From cac1ea3a584a429596418ad8c5895c90038cd8ac Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 24 Nov 2021 10:47:37 +0100 Subject: [PATCH 001/628] Initial prototype --- finn-rtllib/swg/swg_hdl_template.v | 184 +++ src/finn/custom_op/fpgadataflow/__init__.py | 4 + .../convolutioninputgenerator_rtl.py | 1016 +++++++++++++++++ ...est_fpgadataflow_convinputgenerator_rtl.py | 265 +++++ 4 files changed, 1469 insertions(+) create mode 100755 finn-rtllib/swg/swg_hdl_template.v create mode 100755 src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py create mode 100755 tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v new file mode 100755 index 0000000000..b0e00ea4d2 --- /dev/null +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -0,0 +1,184 @@ +// ============================================================== +// RTL generated by Vivado(TM) HLS - High-Level Synthesis from C, C++ and OpenCL +// Version: 2020.1 +// Copyright (C) 1986-2020 Xilinx, Inc. All Rights Reserved. +// +// =========================================================== + +`timescale 1 ns / 1 ps +module window_buffer +#( + parameter IN_WIDTH = 1, //c*bit-width + parameter OUT_WIDTH = 1, //c*bit-width*MMV_out + parameter BUFFER_ELEM_TOTAL = 1 +) +( + CLK, + data_in, + shift_enable, + data_out +); + +input CLK; +input [IN_WIDTH-1:0] data_in; +input shift_enable; +output [OUT_WIDTH-1:0] data_out; + +//Input REG to enable simultaneous R/W +reg [IN_WIDTH-1:0] reg_input; + +//REG FIFOs +$GENERATE_REG_FIFOS$ + +//BRAM FIFOs +//todo: generate real BRAM shift buffers if these get too large +$GENERATE_BRAM_FIFOS$ + +//Fixed REG FIFO <-> output mapping +$GENERATE_OUTPUT_MAPPING$ + +//main process +integer i; +always @ (posedge CLK) begin + if (shift_enable) begin + //shift logic + $GENERATE_SHIFT_LOGIC$ + + //shift in new data + reg_input <= data_in; + end +end + +endmodule //window_buffer + +module $TOP_MODULE_NAME$ ( + ap_clk, + ap_rst_n, + in0_V_V_TDATA, + in0_V_V_TVALID, + in0_V_V_TREADY, + out_V_V_TDATA, + out_V_V_TVALID, + out_V_V_TREADY +); + +//parameters +parameter BIT_WIDTH = $BIT_WIDTH$; +parameter SIMD = $SIMD$; //assuming SIMD=C for now +parameter MMV_IN = $MMV_IN$; //assuming MMV_IN=1 for now +parameter MMV_OUT = $MMV_OUT$; //assuming MMV_OUT=K for now +parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; //c*bit-width +parameter BUF_OUT_WIDTH = BUF_IN_WIDTH * MMV_OUT; //c*bit-width*MMV_out + +parameter CYCLES_TOTAL = $CYCLES_TOTAL$; +parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; + +//IO ports +input ap_clk; +input ap_rst_n; +input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; +input in0_V_V_TVALID; +output in0_V_V_TREADY; +output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; +output out_V_V_TVALID; +input out_V_V_TREADY; + +//main buffer instantiation +wire [BUF_IN_WIDTH-1:0] window_buffer_in; +wire [BUF_OUT_WIDTH-1:0] window_buffer_out; +wire window_buffer_shift_enable; +window_buffer +#( + .IN_WIDTH(BUF_IN_WIDTH), + .OUT_WIDTH(BUF_OUT_WIDTH), + .BUFFER_ELEM_TOTAL(BUF_ELEM_TOTAL) +) +window_buffer_inst +( + .CLK(ap_clk), + .data_in(window_buffer_in), + .shift_enable(window_buffer_shift_enable), + .data_out(window_buffer_out) +); + +//FSM state +reg [1:0] state; +parameter STATE_RESET = 0, STATE_OPERATE = 1, S2 = 2; + +//main cycle counter (where either read/write/both happen, resets for each image) +integer cycle; + +//read/write loop state +wire read_state; +wire write_state; + +//output registers +reg out_V_V_TVALID_reg; + +//assign buffer control +//todo: if mmv_out < k: might not shift and/or write for multiple read_state cycles +assign window_buffer_shift_enable = (read_state && in0_V_V_TVALID) || write_state; + +//assign I/O ports +assign window_buffer_in = in0_V_V_TDATA; +assign in0_V_V_TREADY = read_state; //accept data whenever read loop wants to read +assign out_V_V_TDATA = window_buffer_out; //out_V_V_TDATA_reg; +assign out_V_V_TVALID = out_V_V_TVALID_reg; + +//read schedule +//todo: generate differently +$GENERATE_READ_SCHEDULE$ + +//write schedule +//todo: generate differently +$GENERATE_WRITE_SCHEDULE$ + +//read process (writing to buffer) +always @ (posedge ap_clk) begin + if (ap_rst_n == 1'b0) begin + state <= STATE_RESET; + end else begin + case (state) + STATE_RESET: begin + state <= STATE_OPERATE; + cycle <= 0; + end + STATE_OPERATE: begin + if (read_state && in0_V_V_TVALID) begin + //read into buffer + //done in concurrent assignment + //count cycle (R) + cycle <= cycle+1; + if (cycle == CYCLES_TOTAL-1) + state <= STATE_RESET; + end else if (write_state && out_V_V_TREADY) begin + cycle <= cycle+1; //count cycle (or W) + if (cycle == CYCLES_TOTAL-1) + state <= STATE_RESET; + end + end + endcase + end +end + +//write process (reading from buffer) +always @ (posedge ap_clk) begin + if (ap_rst_n == 1'b0) begin + end else begin + case (state) + STATE_RESET: begin + end + STATE_OPERATE: begin + if (write_state && out_V_V_TREADY) begin + //write from buffer + //todo: VALID seems to be deasserted 1 cycle too late?! + out_V_V_TVALID_reg <= 1'b1; + end else begin + out_V_V_TVALID_reg <= 1'b0; + end + end + endcase + end +end + +endmodule //ConvolutionInputGenerator1D_0_ConvolutionInputGenerator1D_0 diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index 417a505898..50746d4834 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -34,6 +34,9 @@ from finn.custom_op.fpgadataflow.convolutioninputgenerator1d import ( ConvolutionInputGenerator1D, ) +from finn.custom_op.fpgadataflow.convolutioninputgenerator_rtl import ( + ConvolutionInputGenerator_rtl, +) from finn.custom_op.fpgadataflow.downsampler import DownSampler from finn.custom_op.fpgadataflow.duplicatestreams_batch import DuplicateStreams_Batch from finn.custom_op.fpgadataflow.fmpadding_batch import FMPadding_Batch @@ -67,6 +70,7 @@ custom_op["StreamingFCLayer_Batch"] = StreamingFCLayer_Batch custom_op["ConvolutionInputGenerator"] = ConvolutionInputGenerator custom_op["ConvolutionInputGenerator1D"] = ConvolutionInputGenerator1D +custom_op["ConvolutionInputGenerator_rtl"] = ConvolutionInputGenerator_rtl custom_op["TLastMarker"] = TLastMarker custom_op["StreamingDataWidthConverter_Batch"] = StreamingDataWidthConverter_Batch custom_op["StreamingFIFO"] = StreamingFIFO diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py new file mode 100755 index 0000000000..9908bbb30d --- /dev/null +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -0,0 +1,1016 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import math +import numpy as np +import os + +from finn.core.datatype import DataType +from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp +from finn.custom_op.general.im2col import compute_conv_output_dim +from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy +from finn.custom_op.general import im2col + +from finn.util.basic import ( + get_rtlsim_trace_depth, + make_build_dir, +) + +try: + from pyverilator import PyVerilator +except ModuleNotFoundError: + PyVerilator = None + +# This operation should only be used for 1D convolutions. Either the +# IFMDim_H or IFMDim_W should be '1', which represents the so-called +# dummy-dimension + +# ONNX i/o tensor shape assumptions for ConvolutionInputGenerator1D: +# input 0 is the input tensor, shape NHWC = (1, IFMDim_H, IFMDim_W, IFMChannels) +# output 0 is the output tensor, shape NHWC: +# = (1, OFMDim_H, OFMDim_W, (ConvKernelDim_H*ConvKernelDim_W)*IFMChannels) + +# note: the actual data layout produced by the hlslib kernels is different +# for depthwise and non-depthwise ops. +# * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) +# * depthwise SWG: (1, OFMDim_H, OFMDim_W, IFMChannels/SIMD, K_H, K_W, SIMD) +# see test_fpgadataflow_slidingwindow.py for an example of how to transform +# between the two layouts + + +class ConvolutionInputGenerator_rtl(HLSCustomOp): + """Class that corresponds to one of the 1D finn-hlslib ConvolutionInputGenerator + (sliding window) function variants. Depending on the combination of + attributes (e.g. depthwise or not, whether dilation is 0) a different + variant will be picked for the actual HLS implementation.""" + + def __init__(self, onnx_node): + super().__init__(onnx_node) + + def get_nodeattr_types(self): + my_attrs = { + "ConvKernelDim": ("ints", True, []), # [H, W] = [Y, X] + "IFMChannels": ("i", True, 0), + "IFMDim": ("ints", True, []), # [H, W] = [Y, X] + "OFMDim": ("ints", True, []), # [H, W] = [Y, X] + "SIMD": ("i", True, 0), + "Stride": ("ints", True, []), # [H, W] = [Y, X] + "Dilation": ("ints", True, []), # [H, W] = [Y, X] + # FINN DataTypes for inputs, weights, outputs + "inputDataType": ("s", True, ""), + "outputDataType": ("s", True, ""), + "depthwise": ("i", False, 0, {0, 1}), + # FPGA resource type for ConvolutionInputGenerator input buffer + # auto -- let Vivado HLS decide + # block -- use BRAM + # distributed -- use LUTRAM + # ultra -- use URAM + "ram_style": ( + "s", + False, + "distributed", + {"auto", "block", "distributed", "ultra"}, + ), + } + my_attrs.update(super().get_nodeattr_types()) + return my_attrs + + def get_normal_input_shape(self): + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) + return ishape + + def get_folded_input_shape(self): + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + simd = self.get_nodeattr("SIMD") + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + wf = int(ifm_ch / simd) + folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) + return folded_ishape + + def get_normal_output_shape(self): + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") + pad = 0 + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) + oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) + return oshape + + def get_folded_output_shape(self): + k_h, k_w = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") + ifm_ch = self.get_nodeattr("IFMChannels") + stride_h, stride_w = self.get_nodeattr("Stride") + dilation_h, dilation_w = self.get_nodeattr("Dilation") + simd = self.get_nodeattr("SIMD") + pad = 0 + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + if self.use_parallel_window_output(): + wf = int((ifm_ch) // simd) + folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) + else: + wf = int((k_h * k_w * ifm_ch) // simd) + folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) + return folded_oshape + + def make_shape_compatible_op(self, model): + exp_ishape = self.get_normal_input_shape() + oshape = self.get_normal_output_shape() + ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) + assert ishape == exp_ishape, "Unexpect input shape for ConvInpGen." + return super().make_const_shape_op(oshape) + + def infer_node_datatype(self, model): + node = self.onnx_node + # data type stays the same + dtype = model.get_tensor_datatype(node.input[0]) + model.set_tensor_datatype(node.output[0], dtype) + + def verify_node(self): + pass + + def get_input_datatype(self): + """Returns FINN DataType of input.""" + return DataType[self.get_nodeattr("inputDataType")] + + def get_output_datatype(self): + """Returns FINN DataType of output.""" + return DataType[self.get_nodeattr("outputDataType")] + + def get_instream_width(self): + ibits = self.get_input_datatype().bitwidth() + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" + in_width = simd * ibits + return in_width + + def get_outstream_width(self): + if self.use_parallel_window_output(): + # feed all window pixels in parallel + k_h, k_w = self.get_nodeattr("ConvKernelDim") + return self.get_instream_width() * k_h * k_w + else: + # if parallel variant not in use: same width for output and input stream + return self.get_instream_width() + + def get_number_output_values(self): + folded_oshape = self.get_folded_output_shape() + num_output_elems = np.prod(folded_oshape[:-1]) + return num_output_elems + + def get_1d_conv_attrs_normalized(self): + # support both (1, D) and (D, 1) cases transparently: + # For the kernel, presenting the input data of size D as + # [H, W] = [Y, X] = [1, D] or [D, 1] + # effectively gives the same result. Because the + # ConvolutionInputGenerator_NonSquare_Dilated(_dws) kernel currently only + # supports dilation>1 along the X-axis and the + # ConvolutionInputGenerator_NonSquare only works for stride>1 along the + # X-axis, we are working with the following assumption: + # the dummy ('1') dimension is the Y-dimension, i.e. + # images and kernels (and their attributes) of dimension + # [H, W] = [Y, X] = [D, 1] or [1, D] are always mapped to [1, D] + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + ofm_dim = self.get_nodeattr("OFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + + # see defines() for an explanation + if ifm_dim[1] == 1: + ifm_dim = ifm_dim[::-1] + ofm_dim = ofm_dim[::-1] + k = k[::-1] + stride = stride[::-1] + dilation = dilation[::-1] + + return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) + + def use_parallel_window_output(self): + # Check if simple "ConvolutionInputGenerator_1D_parallel" variant can be used to + # feed window in parallel to the following layer, enabling full SIMD unfolding. + dilation = self.get_nodeattr("Dilation") + dilation_h, dilation_w = dilation + + #todo: make this configurable via mmv_out instead of an automatic selection + + if self.get_nodeattr("SIMD") == self.get_nodeattr("IFMChannels"): + if self.get_nodeattr("depthwise") == 0: + return True + + return False + + def get_exp_cycles(self): + simd = self.get_nodeattr("SIMD") + ( + ifm_ch, + ifm_dim, + ofm_dim, + k, + stride, + dilation, + ) = self.get_1d_conv_attrs_normalized() + ifm_dim_h, ifm_dim_w = ifm_dim + ofm_dim_h, ofm_dim_w = ofm_dim + k_h, k_w = k + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + + # since mmv != 1 is not supported yet, we set mmv for now to 1 + mmv = 1 + # see https://github.com/Xilinx/finn-hlslib/blob/master/slidingwindow.h + if self.use_parallel_window_output(): + exp_cycles = ifm_dim_w + 1 + else: + cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv + cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) + max_cycles = max(cycles_write_block, cycles_read_block) + exp_cycles = ( + ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles + ) + + return int(exp_cycles) + + def bram_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "block" or ram_style == "auto": + ram_depth = ifm_dim * ifm_ch / simd + if ram_depth <= 512: + ram_width = 36 + elif ram_depth <= 1024: + ram_width = 18 + elif ram_depth <= 2048: + ram_width = 9 + elif ram_depth <= 4096: + ram_width = 4 + elif ram_depth <= 8192: + ram_width = 2 + else: + ram_width = 1 + return int( + (k + stride) + * ( + math.ceil(simd * self.get_input_datatype().bitwidth() / ram_width) + * math.ceil(ifm_dim * ifm_ch / simd / ram_depth) + ) + ) + else: + return 0 + + def lut_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "distributed": + ram_luts = int( + (k + stride) + * ( + simd + * self.get_input_datatype().bitwidth() + * math.ceil(ifm_dim * ifm_ch / simd / 64) + ) + ) + else: + ram_luts = 0 + return 300 + ram_luts + + def uram_estimation(self): + # NOTE: not tested for correctness + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + ifm_dim = np.prod(self.get_nodeattr("IFMDim")) + k = np.prod(self.get_nodeattr("ConvKernelDim")) + stride = np.prod(self.get_nodeattr("Stride")) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "ultra": + return int( + (k + stride) + * ( + math.ceil(simd * self.get_input_datatype().bitwidth() / 64) + * math.ceil(ifm_dim * ifm_ch / simd / 4096) + ) + ) + else: + return 0 + + def execute_node(self, context, graph): + mode = self.get_nodeattr("exec_mode") + node = self.onnx_node + exp_ishape = self.get_normal_input_shape() + exp_oshape = self.get_normal_output_shape() + folded_ishape = self.get_folded_input_shape() + folded_oshape = self.get_folded_output_shape() + + # TODO ensure codegen dir exists + if mode == "cppsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + elif mode == "rtlsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + + inp = context[node.input[0]] + assert str(inp.dtype) == "float32", "Input datatype is not float32" + assert ( + inp.shape == exp_ishape + ), """Input shape doesn't + match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + if self.get_input_datatype() == DataType["BIPOLAR"]: + # store bipolar activations as binary + inp = (inp + 1) / 2 + export_idt = DataType["BINARY"] + else: + export_idt = self.get_input_datatype() + # reshape input into folded form + inp = inp.reshape(folded_ishape) + # make copy before saving array + reshaped_input = inp.copy() + np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) + + if mode == "cppsim": + # execute the precompiled model + super().exec_precompiled_singlenode_model() + # load output npy file + super().npy_to_dynamic_output(context) + assert ( + context[node.output[0]].shape == folded_oshape + ), "cppsim \ + did not produce expected ofolded utput shape" + context[node.output[0]] = context[node.output[0]].reshape(*exp_oshape) + elif mode == "rtlsim": + sim = self.get_rtlsim() + nbits = self.get_instream_width() + rtlsim_inp = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) + super().reset_rtlsim(sim) + super().toggle_clk(sim) + rtlsim_output = self.rtlsim(sim, rtlsim_inp) + odt = export_idt + target_bits = odt.bitwidth() + packed_bits = self.get_outstream_width() + out_npy_path = "{}/output.npy".format(code_gen_dir) + out_shape = self.get_folded_output_shape() + rtlsim_output_to_npy( + rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits + ) + # load and reshape output + output = np.load(out_npy_path) + output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) + context[node.output[0]] = output + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + # binary -> bipolar if needed + if self.get_output_datatype() == DataType["BIPOLAR"]: + out = context[node.output[0]] + out = 2 * out - 1 + context[node.output[0]] = out + assert ( + context[node.output[0]].shape == exp_oshape + ), """Output + shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" + + def global_includes(self): + self.code_gen_dict["$GLOBALS$"] = ['#include "slidingwindow.h"'] + + def defines(self, var): + numReps = 1 + ( + ifm_ch, + ifm_dim, + ofm_dim, + k, + stride, + dilation, + ) = self.get_1d_conv_attrs_normalized() + simd = self.get_nodeattr("SIMD") + ifm_precision = self.get_input_datatype().bitwidth() + ifm_dim_y, ifm_dim_x = ifm_dim + ofm_dim_y, ofm_dim_x = ofm_dim + k_y, k_x = k + dilation_y, dilation_x = dilation + # For a 1d convolution with stride=[S,1] or [1,S], the finn-hlslib function + # of ConvInpGen must be created with [stride_y, stride_x] = [S, S]. + # TODO: changes in finn-hlslib (slidingwindow.h) + stride_y = np.prod(stride) + stride_x = np.prod(stride) + + if dilation_x > 1: + assert ( + dilation_y == 1 + ), "Dilation value greater than 1 along y-axis is not yet supported" + self.code_gen_dict["$DEFINES$"] = [ + """ + #define ConvKernelDim1_x {}\n + #define ConvKernelDim1_y {}\n + #define IFMChannels1 {}\n + #define Input_precision1 {}\n + #define IFMDim1_x {}\n + #define IFMDim1_y {}\n + #define OFMDim1_x {}\n + #define OFMDim1_y {}\n + #define SIMD1 {}\n + #define Stride1_x {}\n + #define Stride1_y {}\n + #define Dilation1_x {}\n + #define Dilation1_y {}\n + #define numReps {} + """.format( + k_x, + k_y, + ifm_ch, + ifm_precision, + ifm_dim_x, + ifm_dim_y, + ofm_dim_x, + ofm_dim_y, + simd, + stride_x, + stride_y, + dilation_x, + dilation_y, + numReps, + ) + ] + else: + ofm_dim = self.get_nodeattr("OFMDim") + self.code_gen_dict["$DEFINES$"] = [ + """ + #define ConvKernelDim1_x {}\n + #define ConvKernelDim1_y {}\n + #define IFMChannels1 {}\n + #define Input_precision1 {}\n + #define IFMDim1_x {}\n + #define IFMDim1_y {}\n + #define OFMDim1_x {}\n + #define OFMDim1_y {}\n + #define SIMD1 {}\n + #define Stride1_x {}\n + #define Stride1_y {}\n + #define numReps {} + """.format( + k_x, + k_y, + ifm_ch, + ifm_precision, + ifm_dim_x, + ifm_dim_y, + ofm_dim_x, + ofm_dim_y, + simd, + stride_x, + stride_y, + numReps, + ) + ] + + def read_npy_data(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + dtype = self.get_input_datatype() + if dtype == DataType["BIPOLAR"]: + # use binary for bipolar storage + dtype = DataType["BINARY"] + elem_bits = dtype.bitwidth() + packed_bits = self.get_instream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = dtype.get_hls_datatype_str() + npy_type = "float" + npy_in = "%s/input_0.npy" % code_gen_dir + self.code_gen_dict["$READNPYDATA$"] = [] + self.code_gen_dict["$READNPYDATA$"].append( + 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' + % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + ) + + def strm_decl(self): + self.code_gen_dict["$STREAMDECLARATIONS$"] = [] + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + ) + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> out ("out");'.format(self.get_outstream_width()) + ) + + def docompute(self): + ram_style = self.get_nodeattr("ram_style") + map_to_hls_ram_style = { + "auto": "ap_resource_dflt()", + "block": "ap_resource_bram()", + "distributed": "ap_resource_lutram()", + "ultra": "ap_resource_uram()", + } + hls_ram_style = map_to_hls_ram_style[ram_style] + + # check which ConvolutionInputGenerator is needed + if self.use_parallel_window_output(): + hls_call = "ConvolutionInputGenerator_1D_parallel" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{} + (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + else: + hls_call = "ConvolutionInputGenerator_NonSquare" + dilation_h, dilation_w = self.get_nodeattr("Dilation") + if dilation_h > 1 or dilation_w > 1: + hls_call += "_Dilated" + if self.get_nodeattr("depthwise") == 1: + hls_call += "_dws" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{} + (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + elif self.get_nodeattr("depthwise") == 1: + hls_call += "_dws" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{} (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + else: + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{} (in0, out, numReps, {});""".format( + hls_call, hls_ram_style + ) + ] + + def dataoutstrm(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + dtype = self.get_output_datatype() + if dtype == DataType["BIPOLAR"]: + # use binary for bipolar storage + dtype = DataType["BINARY"] + elem_bits = dtype.bitwidth() + packed_bits = self.get_outstream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = dtype.get_hls_datatype_str() + npy_type = "float" + npy_out = "%s/output.npy" % code_gen_dir + oshape = self.get_folded_output_shape() + oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") + if self.use_parallel_window_output(): + # pass the number of pixels in the folded output to apintstream2npy, needed + # to unpack the ouput correctly and reverse only the inner SIMD dimension + k_h, k_w = self.get_nodeattr("ConvKernelDim") + multi_pixel_out = k_h * k_w + else: + multi_pixel_out = 1 + + self.code_gen_dict["$DATAOUTSTREAM$"] = [ + 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", true, 1, %d);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + oshape_cpp_str, + npy_out, + multi_pixel_out, + ) + ] + + def save_as_npy(self): + self.code_gen_dict["$SAVEASCNPY$"] = [] + + def blackboxfunction(self): + if self.use_parallel_window_output(): + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}(hls::stream> &in0, + hls::stream> + &out)""".format( + self.onnx_node.name + ) + ] + else: + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}(hls::stream> &in0, + hls::stream> &out)""".format( + self.onnx_node.name + ) + ] + + def pragmas(self): + self.code_gen_dict["$PRAGMAS$"] = ["#pragma HLS INTERFACE axis port=in0"] + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE axis port=out") + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE ap_ctrl_none port=return" + ) + + def generate_hdl(self): + #todo: generate into some code gen dict + f_debug = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_debuginfo.log"), "w") + code_gen_dict = {} + + #-------------------- + # init hyperparameters + # for 1D case: it does not matter if dummy dim is x or y + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + ofm_dim = self.get_nodeattr("OFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + + n = 1 + h, w = ifm_dim + c = 1#ifm_ch not considered atm (always parallelize across c) + k_h, k_w = k + pad = [0,0,0,0] + pad_val = 0 + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + conv_c = 99 + + # init folding config + simd = self.get_nodeattr("SIMD") + mmv_in = 1 + mmv_out = k_h*k_w + + assert simd==ifm_ch, "Constraint violated: SIMD = C" + assert mmv_in==1, "Constraint violated: MMV_IN = 1" + assert mmv_out==k_h*k_w, "Constraint violated: mmv_out = K" + + # how many "unused" registers are allowed between buffer positions that will be accessed in parallel + # example: + # 0: only consecutive access patterns will be implemented in regs, rest in BRAM line buffers + # 2: [0, 3, 6] access pattern is still allowed and will be implemented with 1 7-position shift reg + REG_BRAM_THRESHOLD = 1 + #-------------------- + + in_shape = (n,c,h,w) #NCHW + + in_image = np.empty(in_shape, dtype=int) + + for index, x in np.ndenumerate(in_image): + # "HWC" dummy values + val = int((index[2]+1)*100+(index[3]+1)*10+(index[1]+1)*1) + in_image[index] = val + + in_image_padded = np.pad( + in_image, + ((0, 0), (0, 0), (pad[0], pad[2]), (pad[1], pad[3])), + mode="constant", + constant_values=pad_val, + ) + in_shape_padded = in_image_padded.shape + h_padded = in_shape_padded[2] + w_padded = in_shape_padded[3] + + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) + out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) + + f_debug.write("\n"+"in shape " + str(in_shape)) + f_debug.write("\n"+"in shape padded " + str(in_shape_padded)) + f_debug.write("\n"+"conv out shape " + str((n,conv_c,out_dim_h,out_dim_w))) + f_debug.write("\n"+"im2col out shape " + str((n,out_dim_h,out_dim_w,k_h*k_w*c))) + + idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( + in_shape, + k_h, + k_w, + pad, + stride_h, + stride_w, + dilation_h, + dilation_w + ) + + f_debug.write("\n"+"c indices") + f_debug.write("\n"+str(idx_c)) + f_debug.write("\n"+"h indices") + f_debug.write("\n"+str(idx_h)) + f_debug.write("\n"+"w indices") + f_debug.write("\n"+str(idx_w)) + + cols = in_image_padded[:, idx_c, idx_h, idx_w] + cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) + + f_debug.write("\n"+"cols (shape %s)" % str(cols.shape)) + f_debug.write("\n"+str(cols)) + + # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW + out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) + # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) + out_image = out_image.transpose(0, 4, 5, 2, 3, 1) + out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) + + f_debug.write("\n"+"output (shape %s)" % str(out_image.shape)) + f_debug.write("\n"+str(out_image)) + + f_debug.write("\n"+"h indices") + f_debug.write("\n"+str(idx_h)) + f_debug.write("\n"+"w indices") + f_debug.write("\n"+str(idx_w)) + + idx_px = idx_h*w+idx_w + f_debug.write("\n"+"sequential pixel indices") + f_debug.write("\n"+str(idx_px)) + + buffer = [] + buffer_max_size = 0 + # buffer schedule (write from input, read to output) + schedule_write = [] + schedule_read = [] + next_in_px = 0 + + idx_px_relative = idx_px.copy() + + # compute schedule and buffer read pattern + Y, X = idx_px_relative.shape + for x in range(X): + # load missing inputs into buffer + for y in range(Y): + while int(idx_px_relative[y,x]) not in buffer: + buffer.append(next_in_px) + next_in_px += 1 + schedule_write.append(1) + schedule_read.append(0) + + # discard unused buffer elements (assumes in-order access) + oldest_px = min(idx_px_relative[:,x]) + while buffer[0] < oldest_px: + buffer.pop(0) + + # adjust relative buffer index + for y in range(Y): + idx_px_relative[y,x] -= oldest_px + + # record max needed buffer depth + if len(buffer) > buffer_max_size: + buffer_max_size = len(buffer) + + # read from buffer + schedule_read.append(1) + + # simultaneously load next pixel(s) into buffer if there are any left + if next_in_px > (h_padded*w_padded-1): + schedule_write.append(0) + else: + buffer.append(next_in_px) + next_in_px += 1 + schedule_write.append(1) + + + # find buffer access patterns + buffer_access_patterns = [] + for x in range(X): + if idx_px_relative[:,x].tolist() not in buffer_access_patterns: + buffer_access_patterns.append(idx_px_relative[:,x].tolist()) + + + f_debug.write("\n"+"max buffer size observed: %d" %(buffer_max_size)) + f_debug.write("\n"+"output vector elements: relative buffer indices") + f_debug.write("\n"+str(idx_px_relative)) + f_debug.write("\n"+"found %d buffer access patterns:" % len(buffer_access_patterns)) + f_debug.write("\n"+str(buffer_access_patterns)) + f_debug.write("\n"+"required parallel-access registers for mmv_out=k: %d" % len(sum(buffer_access_patterns,[]))) + f_debug.write("\n"+"buffer write schedule (%d cycles)" % len(schedule_write)) + f_debug.write("\n"+str(schedule_write)) + f_debug.write("\n"+"writing buffer in %d cycles" % schedule_write.count(1)) + f_debug.write("\n"+"buffer read schedule (%d cycles)" % len(schedule_read)) + f_debug.write("\n"+str(schedule_read)) + f_debug.write("\n"+"reading buffer in %d cycles" % schedule_read.count(1)) + + assert len(schedule_write) == len(schedule_read), "ERROR: Schedules have different lenghts" + cycles_total = len(schedule_write) + + assert schedule_read.count(1) == self.get_number_output_values(), "ERROR: Reading buffer in fewer cycles than expected" + + code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] + code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] + code_gen_dict["$SIMD$"] = [str(simd)] + code_gen_dict["$MMV_IN$"] = [str(mmv_in)] + code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] + code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] + + # determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) + assert len(buffer_access_patterns) == 1, "ERROR: Buffer access pattern is not static" + buf_static_access_pattern = buffer_access_patterns[0] + reg_fifos = [] + bram_fifos = [] + current = [] + for i in range(len(buf_static_access_pattern)): + access_idx = buf_static_access_pattern[i] + if len(current) == 0: + current.append(access_idx) + else: + # assume non-decreasing index order in access pattern + distance = access_idx - max(current) + if not (distance-1 > REG_BRAM_THRESHOLD): + for i in range(distance-1): + # insert dummy into REG FIFO (not read as part of window) + current.append(-1) + # assign this access to same REG FIFO as previous one + current.append(access_idx) + else: + # assign skipped accesses to new BRAM FIFO + bram_fifos.append([-1]*(distance-1)) + # start with new REG FIFO + reg_fifos.append(current) + current = [] + current.append(access_idx) + reg_fifos.append(current) + + f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) + f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) + f_debug.write("\n"+str(reg_fifos)) + f_debug.write("\n"+"%d BRAM FIFOs (line buffers):" % len(bram_fifos)) + f_debug.write("\n"+str(bram_fifos)) + + code_gen_dict["$GENERATE_REG_FIFOS$"] = [] + for i in range(len(reg_fifos)): + code_gen_dict["$GENERATE_REG_FIFOS$"].append( + """parameter reg_fifo_{id}_len = {len}; + reg [IN_WIDTH-1:0] reg_fifo_{id} [reg_fifo_{id}_len-1:0]; + """.format(id=i, len=len(reg_fifos[i]))) + + #todo: generate actual bram shift buffers instead of regs + code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] + for i in range(len(bram_fifos)): + code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( + """parameter bram_fifo_{id}_len = {len}; + reg [IN_WIDTH-1:0] bram_fifo_{id} [bram_fifo_{id}_len-1:0]; + """.format(id=i, len=len(bram_fifos[i]))) + + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] + out_idx = mmv_out-1 + for fifo_id, reg_fifo in enumerate(reg_fifos): + for fifo_idx, access_idx in enumerate(reg_fifo): + if(access_idx != -1): + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + "assign data_out[IN_WIDTH*{out_idx}+:IN_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( + out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx + ) + ) + # reversal: out_idx=0 -> oldest buffer element -> highest access_idx + out_idx = out_idx-1 + assert out_idx==-1, "ERROR: Not all output vector elements connected" + + code_gen_dict["$GENERATE_SHIFT_LOGIC$"] = [] + for i in range(len(reg_fifos)): + if i == 0: + # first FIFO containing newest elements -> input comes from input reg + code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( + """for (i=reg_fifo_{fifo_id}_len-1; i>0; i=i-1) + reg_fifo_{fifo_id}[i] <= reg_fifo_{fifo_id}[i-1]; + reg_fifo_{fifo_id}[0] <= reg_input;""".format( + fifo_id=i, + ) + ) + else: + # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) + input_fifo_id = i-1 + code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( + """for (i=reg_fifo_{fifo_id}_len-1; i>0; i=i-1) + reg_fifo_{fifo_id}[i] <= reg_fifo_{fifo_id}[i-1]; + reg_fifo_{fifo_id}[0] <= bram_fifo_{input_fifo_id} [bram_fifo_{input_fifo_id}_len-1];""".format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) + for i in range(len(bram_fifos)): + input_fifo_id = i + code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( + """for (i=bram_fifo_{fifo_id}_len-1; i>0; i=i-1) + bram_fifo_{fifo_id}[i] <= bram_fifo_{fifo_id}[i-1]; + bram_fifo_{fifo_id}[0] <= reg_fifo_{input_fifo_id} [reg_fifo_{input_fifo_id}_len-1];""".format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) + + # Generate read schedule (when data is read from input, written to buffer) + code_gen_dict["$GENERATE_READ_SCHEDULE$"] = [] + schedule_as_string = "" + #todo: change naming to swap write/read + for i in schedule_write: + if i == 1: + schedule_as_string += "1'b1," + else: + schedule_as_string += "1'b0," + schedule_as_string = schedule_as_string[:-1] # remove trailing ',' + code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( + "localparam [0:{len}-1] READ_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) + ) + code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( + "assign read_state = READ_SCHEDULE[cycle];" + ) + + # Generate write schedule (when data is written to output, read from buffer) + code_gen_dict["$GENERATE_WRITE_SCHEDULE$"] = [] + schedule_as_string = "" + #todo: change naming to swap write/read + for i in schedule_read: + if i == 1: + schedule_as_string += "1'b1," + else: + schedule_as_string += "1'b0," + schedule_as_string = schedule_as_string[:-1] # remove trailing ',' + code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( + "localparam [0:{len}-1] WRITE_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) + ) + code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( + "assign write_state = WRITE_SCHEDULE[cycle];" + ) + + with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: + template = f.read() + + for key in code_gen_dict: + # transform list into long string separated by '\n' + code_gen_line = "\n".join(code_gen_dict[key]) + template = template.replace(key, code_gen_line) + f = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_generated.v"), "w") + f.write(template) + f.close() + f_debug.close() + + def prepare_rtlsim(self): + """Creates a Verilator emulation library for the RTL code generated + for this node, sets the rtlsim_so attribute to its path and returns + a PyVerilator wrapper around it.""" + #modified to use generated verilog instead of HLS output products + + self.generate_hdl() + + if PyVerilator is None: + raise ImportError("Installation of PyVerilator is required.") + verilog_paths = ["/workspace/finn/finn-rtllib/swg/"] + verilog_files = ["swg_hdl_generated.v"] + # build the Verilator emu library + sim = PyVerilator.build( + verilog_files, + build_dir=make_build_dir("pyverilator_" + self.onnx_node.name + "_"), + verilog_path=verilog_paths, + trace_depth=get_rtlsim_trace_depth(), + top_module_name=self.get_verilog_top_module_name(), + ) + # save generated lib filename in attribute + self.set_nodeattr("rtlsim_so", sim.lib._name) + return sim diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py new file mode 100755 index 0000000000..f7a7241333 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -0,0 +1,265 @@ +# Copyright (c) 2020, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +from onnx import TensorProto, helper + +import finn.core.onnx_exec as oxe +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer +from finn.core.datatype import DataType +from finn.core.modelwrapper import ModelWrapper +from finn.custom_op.general.im2col import compute_conv_output_dim +from finn.custom_op.registry import getCustomOp +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.general import GiveUniqueNodeNames +from finn.util.basic import gen_finn_dt_tensor + + +def make_single_im2col_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt +): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + im2col_node = helper.make_node( + "Im2Col", + ["inp"], + ["outp"], + domain="finn.custom_op.general", + stride=[stride_h, stride_w], + kernel_size=[k_h, k_w], + input_shape=str((1, ifm_dim_h, ifm_dim_w, ifm_ch)), + dilations=[dilation_h, dilation_w], + pad_amount=[0, 0, 0, 0], + pad_value=0, + ) + graph = helper.make_graph( + nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] + ) + + model = helper.make_model(graph, producer_name="im2col-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + return model + + +def make_single_slidingwindow_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw=0 +): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + SlidingWindow_node = helper.make_node( + "ConvolutionInputGenerator_rtl", + ["inp"], + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ifm_dim_h, ifm_dim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=simd, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=idt.name, + outputDataType=odt.name, + depthwise=dw, + ) + graph = helper.make_graph( + nodes=[SlidingWindow_node], + name="slidingwindow_graph", + inputs=[inp], + outputs=[outp], + ) + + model = helper.make_model(graph, producer_name="slidingwindow-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + #DEBUG + swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] + swg_inst = getCustomOp(swg_node) + swg_inst.set_nodeattr("rtlsim_trace", "/workspace/finn/finn-rtllib/swg/swg_test_trace.vcd") + + return model + + +def prepare_inputs(input_tensor): + return {"inp": input_tensor} + + +# input datatype +@pytest.mark.parametrize("idt", [DataType["INT4"]]) +# kernel size +@pytest.mark.parametrize("k", [[3, 3]]) +# input dimension +@pytest.mark.parametrize("ifm_dim", [[6, 11]]) +# input channels +@pytest.mark.parametrize("ifm_ch", [2]) +# Stride +@pytest.mark.parametrize("stride", [[1, 2]]) +# Dilation +@pytest.mark.parametrize("dilation", [[1, 2]]) +# execution mode +@pytest.mark.parametrize("exec_mode", ["rtlsim"]) +# input channel parallelism ("SIMD") +@pytest.mark.parametrize("simd", [2]) +# depthwise +@pytest.mark.parametrize("dw", [0]) +# Flip dimensions +@pytest.mark.parametrize("flip", [False]) +@pytest.mark.slow +@pytest.mark.vivado +def test_fpgadataflow_slidingwindow_rtl( + idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw, flip +): + if flip: + k = k[::-1] + ifm_dim = ifm_dim[::-1] + stride = stride[::-1] + dilation = dilation[::-1] + + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + + #if (dilation_h > 1 or dilation_w > 1) and (stride_h > 1 or stride_w > 1): + # pytest.skip( + # """Dilation value greater than 1 and stride greater than 1 + # currently not supported for 1D convolutions""" + # ) + if simd > ifm_ch: + pytest.skip("SIMD cannot be larger than number of input channels") + + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) + ofm_dim = [ofm_dim_h, ofm_dim_w] + + x = gen_finn_dt_tensor(idt, (1, ifm_dim_h, ifm_dim_w, ifm_ch)) + model = make_single_slidingwindow_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + simd=simd, + stride=stride, + dilation=dilation, + idt=idt, + dw=dw, + ) + + if exec_mode == "cppsim": + model = model.transform(SetExecMode("cppsim")) + model = model.transform(PrepareCppSim()) + model = model.transform(CompileCppSim()) + elif exec_mode == "rtlsim": + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode in test_fpgadataflow_slidingwindow") + + # prepare input data + input_dict = prepare_inputs(x) + # execute model + y_produced = oxe.execute_onnx(model, input_dict)["outp"] + golden = make_single_im2col_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + simd=simd, + stride=stride, + dilation=dilation, + idt=idt, + ) + y_expected = oxe.execute_onnx(golden, input_dict)["outp"] + + #DEBUG + print("-------expected:") + print(y_expected) + print("--------produced:") + print(y_produced) + + if dw == 0: + assert (y_produced == y_expected).all() + else: + y_expected = y_expected.reshape( + 1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd + ) + y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) + y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) + assert (y_produced == y_expected).all() + + + # if exec_mode == "rtlsim": + # node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] + # inst = getCustomOp(node) + # cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + # exp_cycles_dict = model.analysis(exp_cycles_per_layer) + # exp_cycles = exp_cycles_dict[node.name] + # assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + # assert exp_cycles != 0 From 888d69c33c275c99d79fd9ea0bcfcbfaa2a05289 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 25 Jan 2022 21:15:40 +0100 Subject: [PATCH 002/628] Basic custom_op integration, fixed AXIS handshake --- finn-rtllib/swg/swg_hdl_template.v | 95 +++-- .../convolutioninputgenerator_rtl.py | 361 +++++------------- ...est_fpgadataflow_convinputgenerator_rtl.py | 17 +- 3 files changed, 148 insertions(+), 325 deletions(-) diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v index b0e00ea4d2..1950757245 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -6,7 +6,7 @@ // =========================================================== `timescale 1 ns / 1 ps -module window_buffer +module $TOP_MODULE_NAME$_wb #( parameter IN_WIDTH = 1, //c*bit-width parameter OUT_WIDTH = 1, //c*bit-width*MMV_out @@ -76,9 +76,11 @@ parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; //IO ports input ap_clk; input ap_rst_n; +(* X_INTERFACE_PARAMETER = "FREQ_HZ 250000000.000000" *) input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; input in0_V_V_TVALID; output in0_V_V_TREADY; +(* X_INTERFACE_PARAMETER = "FREQ_HZ 250000000.000000" *) output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; output out_V_V_TVALID; input out_V_V_TREADY; @@ -87,7 +89,7 @@ input out_V_V_TREADY; wire [BUF_IN_WIDTH-1:0] window_buffer_in; wire [BUF_OUT_WIDTH-1:0] window_buffer_out; wire window_buffer_shift_enable; -window_buffer +$TOP_MODULE_NAME$_wb #( .IN_WIDTH(BUF_IN_WIDTH), .OUT_WIDTH(BUF_OUT_WIDTH), @@ -102,28 +104,44 @@ window_buffer_inst ); //FSM state -reg [1:0] state; -parameter STATE_RESET = 0, STATE_OPERATE = 1, S2 = 2; +//reg [1:0] state; +//parameter STATE_RESET = 0, STATE_OPERATE = 1, S2 = 2; //main cycle counter (where either read/write/both happen, resets for each image) integer cycle; +integer cycle_last; //read/write loop state wire read_state; wire write_state; +reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle -//output registers -reg out_V_V_TVALID_reg; +wire write_blocked; +assign write_blocked = write_state && !out_V_V_TREADY && !write_done; + +wire read_ok; +// with transition to next cycle: +// want to read can read source is ready (waiting on VALID allowed) +assign read_ok = read_state && !write_blocked && in0_V_V_TVALID; + +wire write_ok; +// with transition to next cycle: +// output is VALID sink is ready sink has already read (we are waiting on source) +assign write_ok = write_state && (out_V_V_TREADY || write_done); + +wire advance; +// includes waiting on W if W-only cycle: wait only on W +assign advance = read_ok || (!read_state && write_ok); //assign buffer control //todo: if mmv_out < k: might not shift and/or write for multiple read_state cycles -assign window_buffer_shift_enable = (read_state && in0_V_V_TVALID) || write_state; +assign window_buffer_shift_enable = advance; //assign I/O ports assign window_buffer_in = in0_V_V_TDATA; -assign in0_V_V_TREADY = read_state; //accept data whenever read loop wants to read -assign out_V_V_TDATA = window_buffer_out; //out_V_V_TDATA_reg; -assign out_V_V_TVALID = out_V_V_TVALID_reg; +assign out_V_V_TDATA = window_buffer_out; +assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) +assign out_V_V_TVALID = ap_rst_n && write_state && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) //read schedule //todo: generate differently @@ -133,52 +151,29 @@ $GENERATE_READ_SCHEDULE$ //todo: generate differently $GENERATE_WRITE_SCHEDULE$ -//read process (writing to buffer) +//main process for advancing cycle count always @ (posedge ap_clk) begin if (ap_rst_n == 1'b0) begin - state <= STATE_RESET; + cycle <= 0; + cycle_last <= 0; end else begin - case (state) - STATE_RESET: begin - state <= STATE_OPERATE; + if (advance) begin + write_done <= 1'b0; //reset flag + + //count cycle (completed R or W or both (depending on current cycle)) + cycle_last <= cycle; //cycle last is used to generate write_state (due to how schedule is constructed) + if (cycle == CYCLES_TOTAL-1) cycle <= 0; + else + cycle <= cycle+1; + + end else begin + if (write_ok) begin + // successful W in this cycle, but R still outstanding + write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! end - STATE_OPERATE: begin - if (read_state && in0_V_V_TVALID) begin - //read into buffer - //done in concurrent assignment - //count cycle (R) - cycle <= cycle+1; - if (cycle == CYCLES_TOTAL-1) - state <= STATE_RESET; - end else if (write_state && out_V_V_TREADY) begin - cycle <= cycle+1; //count cycle (or W) - if (cycle == CYCLES_TOTAL-1) - state <= STATE_RESET; - end - end - endcase + end end end -//write process (reading from buffer) -always @ (posedge ap_clk) begin - if (ap_rst_n == 1'b0) begin - end else begin - case (state) - STATE_RESET: begin - end - STATE_OPERATE: begin - if (write_state && out_V_V_TREADY) begin - //write from buffer - //todo: VALID seems to be deasserted 1 cycle too late?! - out_V_V_TVALID_reg <= 1'b1; - end else begin - out_V_V_TVALID_reg <= 1'b0; - end - end - endcase - end -end - endmodule //ConvolutionInputGenerator1D_0_ConvolutionInputGenerator1D_0 diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 9908bbb30d..2e8e8ec75e 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -96,6 +96,7 @@ def get_nodeattr_types(self): "distributed", {"auto", "block", "distributed", "ultra"}, ), + "gen_top_module": ("s", False, ""), } my_attrs.update(super().get_nodeattr_types()) return my_attrs @@ -348,7 +349,12 @@ def execute_node(self, context, graph): # TODO ensure codegen dir exists if mode == "cppsim": - code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + #code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + raise Exception( + """cppsim not possible for RTL SWG""".format( + mode + ) + ) elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: @@ -377,44 +383,27 @@ def execute_node(self, context, graph): reshaped_input = inp.copy() np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) - if mode == "cppsim": - # execute the precompiled model - super().exec_precompiled_singlenode_model() - # load output npy file - super().npy_to_dynamic_output(context) - assert ( - context[node.output[0]].shape == folded_oshape - ), "cppsim \ - did not produce expected ofolded utput shape" - context[node.output[0]] = context[node.output[0]].reshape(*exp_oshape) - elif mode == "rtlsim": - sim = self.get_rtlsim() - nbits = self.get_instream_width() - rtlsim_inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) - super().reset_rtlsim(sim) - super().toggle_clk(sim) - rtlsim_output = self.rtlsim(sim, rtlsim_inp) - odt = export_idt - target_bits = odt.bitwidth() - packed_bits = self.get_outstream_width() - out_npy_path = "{}/output.npy".format(code_gen_dir) - out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) - # load and reshape output - output = np.load(out_npy_path) - output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) - context[node.output[0]] = output - else: - raise Exception( - """Invalid value for attribute exec_mode! Is currently set to: {} - has to be set to one of the following value ("cppsim", "rtlsim")""".format( - mode - ) - ) + sim = self.get_rtlsim() + nbits = self.get_instream_width() + rtlsim_inp = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) + super().reset_rtlsim(sim) + super().toggle_clk(sim) + rtlsim_output = self.rtlsim(sim, rtlsim_inp) + odt = export_idt + target_bits = odt.bitwidth() + packed_bits = self.get_outstream_width() + out_npy_path = "{}/output.npy".format(code_gen_dir) + out_shape = self.get_folded_output_shape() + rtlsim_output_to_npy( + rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits + ) + # load and reshape output + output = np.load(out_npy_path) + output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) + context[node.output[0]] = output + # binary -> bipolar if needed if self.get_output_datatype() == DataType["BIPOLAR"]: out = context[node.output[0]] @@ -426,244 +415,37 @@ def execute_node(self, context, graph): shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" def global_includes(self): - self.code_gen_dict["$GLOBALS$"] = ['#include "slidingwindow.h"'] + pass def defines(self, var): - numReps = 1 - ( - ifm_ch, - ifm_dim, - ofm_dim, - k, - stride, - dilation, - ) = self.get_1d_conv_attrs_normalized() - simd = self.get_nodeattr("SIMD") - ifm_precision = self.get_input_datatype().bitwidth() - ifm_dim_y, ifm_dim_x = ifm_dim - ofm_dim_y, ofm_dim_x = ofm_dim - k_y, k_x = k - dilation_y, dilation_x = dilation - # For a 1d convolution with stride=[S,1] or [1,S], the finn-hlslib function - # of ConvInpGen must be created with [stride_y, stride_x] = [S, S]. - # TODO: changes in finn-hlslib (slidingwindow.h) - stride_y = np.prod(stride) - stride_x = np.prod(stride) - - if dilation_x > 1: - assert ( - dilation_y == 1 - ), "Dilation value greater than 1 along y-axis is not yet supported" - self.code_gen_dict["$DEFINES$"] = [ - """ - #define ConvKernelDim1_x {}\n - #define ConvKernelDim1_y {}\n - #define IFMChannels1 {}\n - #define Input_precision1 {}\n - #define IFMDim1_x {}\n - #define IFMDim1_y {}\n - #define OFMDim1_x {}\n - #define OFMDim1_y {}\n - #define SIMD1 {}\n - #define Stride1_x {}\n - #define Stride1_y {}\n - #define Dilation1_x {}\n - #define Dilation1_y {}\n - #define numReps {} - """.format( - k_x, - k_y, - ifm_ch, - ifm_precision, - ifm_dim_x, - ifm_dim_y, - ofm_dim_x, - ofm_dim_y, - simd, - stride_x, - stride_y, - dilation_x, - dilation_y, - numReps, - ) - ] - else: - ofm_dim = self.get_nodeattr("OFMDim") - self.code_gen_dict["$DEFINES$"] = [ - """ - #define ConvKernelDim1_x {}\n - #define ConvKernelDim1_y {}\n - #define IFMChannels1 {}\n - #define Input_precision1 {}\n - #define IFMDim1_x {}\n - #define IFMDim1_y {}\n - #define OFMDim1_x {}\n - #define OFMDim1_y {}\n - #define SIMD1 {}\n - #define Stride1_x {}\n - #define Stride1_y {}\n - #define numReps {} - """.format( - k_x, - k_y, - ifm_ch, - ifm_precision, - ifm_dim_x, - ifm_dim_y, - ofm_dim_x, - ofm_dim_y, - simd, - stride_x, - stride_y, - numReps, - ) - ] + pass def read_npy_data(self): - code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") - dtype = self.get_input_datatype() - if dtype == DataType["BIPOLAR"]: - # use binary for bipolar storage - dtype = DataType["BINARY"] - elem_bits = dtype.bitwidth() - packed_bits = self.get_instream_width() - packed_hls_type = "ap_uint<%d>" % packed_bits - elem_hls_type = dtype.get_hls_datatype_str() - npy_type = "float" - npy_in = "%s/input_0.npy" % code_gen_dir - self.code_gen_dict["$READNPYDATA$"] = [] - self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) - ) + pass def strm_decl(self): - self.code_gen_dict["$STREAMDECLARATIONS$"] = [] - self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) - ) - self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) - ) + pass def docompute(self): - ram_style = self.get_nodeattr("ram_style") - map_to_hls_ram_style = { - "auto": "ap_resource_dflt()", - "block": "ap_resource_bram()", - "distributed": "ap_resource_lutram()", - "ultra": "ap_resource_uram()", - } - hls_ram_style = map_to_hls_ram_style[ram_style] - - # check which ConvolutionInputGenerator is needed - if self.use_parallel_window_output(): - hls_call = "ConvolutionInputGenerator_1D_parallel" - self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} - (in0, out, numReps, {});""".format( - hls_call, hls_ram_style - ) - ] - else: - hls_call = "ConvolutionInputGenerator_NonSquare" - dilation_h, dilation_w = self.get_nodeattr("Dilation") - if dilation_h > 1 or dilation_w > 1: - hls_call += "_Dilated" - if self.get_nodeattr("depthwise") == 1: - hls_call += "_dws" - self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} - (in0, out, numReps, {});""".format( - hls_call, hls_ram_style - ) - ] - elif self.get_nodeattr("depthwise") == 1: - hls_call += "_dws" - self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} (in0, out, numReps, {});""".format( - hls_call, hls_ram_style - ) - ] - else: - self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} (in0, out, numReps, {});""".format( - hls_call, hls_ram_style - ) - ] + pass def dataoutstrm(self): - code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") - dtype = self.get_output_datatype() - if dtype == DataType["BIPOLAR"]: - # use binary for bipolar storage - dtype = DataType["BINARY"] - elem_bits = dtype.bitwidth() - packed_bits = self.get_outstream_width() - packed_hls_type = "ap_uint<%d>" % packed_bits - elem_hls_type = dtype.get_hls_datatype_str() - npy_type = "float" - npy_out = "%s/output.npy" % code_gen_dir - oshape = self.get_folded_output_shape() - oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") - if self.use_parallel_window_output(): - # pass the number of pixels in the folded output to apintstream2npy, needed - # to unpack the ouput correctly and reverse only the inner SIMD dimension - k_h, k_w = self.get_nodeattr("ConvKernelDim") - multi_pixel_out = k_h * k_w - else: - multi_pixel_out = 1 - - self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", true, 1, %d);' - % ( - packed_hls_type, - elem_hls_type, - elem_bits, - npy_type, - oshape_cpp_str, - npy_out, - multi_pixel_out, - ) - ] + pass def save_as_npy(self): - self.code_gen_dict["$SAVEASCNPY$"] = [] + pass def blackboxfunction(self): - if self.use_parallel_window_output(): - self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> - &out)""".format( - self.onnx_node.name - ) - ] - else: - self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out)""".format( - self.onnx_node.name - ) - ] + pass def pragmas(self): - self.code_gen_dict["$PRAGMAS$"] = ["#pragma HLS INTERFACE axis port=in0"] - self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE axis port=out") - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + pass def generate_hdl(self): - #todo: generate into some code gen dict - f_debug = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_debuginfo.log"), "w") + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + f_debug = open(os.path.join(code_gen_dir, "swg_hdl_debuginfo.log"), "w") + #debug: + #f_debug = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_debuginfo.log"), "w") code_gen_dict = {} #-------------------- @@ -844,6 +626,8 @@ def generate_hdl(self): assert schedule_read.count(1) == self.get_number_output_values(), "ERROR: Reading buffer in fewer cycles than expected" code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] + #save top module name so we can refer to it even after this node has been renamed (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) + self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] code_gen_dict["$SIMD$"] = [str(simd)] code_gen_dict["$MMV_IN$"] = [str(mmv_in)] @@ -976,8 +760,11 @@ def generate_hdl(self): "localparam [0:{len}-1] WRITE_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) ) code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - "assign write_state = WRITE_SCHEDULE[cycle];" + "assign write_state = WRITE_SCHEDULE[cycle_last];" ) + #code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( + # "assign write_state_next = WRITE_SCHEDULE[cycle_next];" + #) with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: template = f.read() @@ -986,23 +773,33 @@ def generate_hdl(self): # transform list into long string separated by '\n' code_gen_line = "\n".join(code_gen_dict[key]) template = template.replace(key, code_gen_line) - f = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_generated.v"), "w") + + f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_hdl_gen.v"), "w") + #debug: + #f = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_generated.v"), "w") f.write(template) f.close() f_debug.close() + #set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain + self.set_nodeattr("ipgen_path", code_gen_dir) + self.set_nodeattr("ip_path", code_gen_dir) + def prepare_rtlsim(self): """Creates a Verilator emulation library for the RTL code generated for this node, sets the rtlsim_so attribute to its path and returns a PyVerilator wrapper around it.""" #modified to use generated verilog instead of HLS output products - self.generate_hdl() - if PyVerilator is None: raise ImportError("Installation of PyVerilator is required.") - verilog_paths = ["/workspace/finn/finn-rtllib/swg/"] - verilog_files = ["swg_hdl_generated.v"] + + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + verilog_paths = [code_gen_dir] + verilog_files = [self.get_nodeattr("gen_top_module") + "_hdl_gen.v"] + #debug: + #verilog_paths = ["/workspace/finn/finn-rtllib/swg/"] + #verilog_files = ["swg_hdl_generated.v"] # build the Verilator emu library sim = PyVerilator.build( verilog_files, @@ -1014,3 +811,37 @@ def prepare_rtlsim(self): # save generated lib filename in attribute self.set_nodeattr("rtlsim_so", sim.lib._name) return sim + + + def code_generation_ipi(self): + """Constructs and returns the TCL for node instantiation in Vivado IPI.""" + vlnv = self.get_nodeattr("ip_vlnv") + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + + #cmd = ["create_bd_cell -type ip -vlnv %s %s" % (vlnv, self.onnx_node.name)] + + cmd = ["add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_hdl_gen.v")), + "create_bd_cell -type module -reference %s %s" % (self.get_nodeattr("gen_top_module"), self.onnx_node.name)] + + #update_compile_order -fileset sources_1 + #add_files -norecurse C:/Users/felix/Downloads/swg_hdl_generated.v + #update_compile_order -fileset sources_1 + #create_bd_cell -type module -reference ConvolutionInputGenerator_rtl_0_ConvolutionInputGenerator_rtl_0 ConvolutionInputGene_0 + + return cmd + + def code_generation_ipgen(self, model, fpgapart, clk): + """Generates c++ code and tcl script for ip generation.""" + self.generate_hdl() + + def ipgen_singlenode_code(self): + """Builds the bash script for ip generation using the CallHLS from + finn.util.hls.""" + pass + + def code_generation_cppsim(self, model): + """Generates c++ code for simulation (cppsim).""" + pass + + def compile_singlenode_code(self): + pass \ No newline at end of file diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index f7a7241333..0845dc2fca 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -151,15 +151,15 @@ def prepare_inputs(input_tensor): # input datatype @pytest.mark.parametrize("idt", [DataType["INT4"]]) # kernel size -@pytest.mark.parametrize("k", [[3, 3]]) +@pytest.mark.parametrize("k", [[3, 1]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[6, 11]]) +@pytest.mark.parametrize("ifm_dim", [[8, 1]]) # input channels @pytest.mark.parametrize("ifm_ch", [2]) # Stride -@pytest.mark.parametrize("stride", [[1, 2]]) +@pytest.mark.parametrize("stride", [[1, 1]]) # Dilation -@pytest.mark.parametrize("dilation", [[1, 2]]) +@pytest.mark.parametrize("dilation", [[1, 1]]) # execution mode @pytest.mark.parametrize("exec_mode", ["rtlsim"]) # input channel parallelism ("SIMD") @@ -210,17 +210,14 @@ def test_fpgadataflow_slidingwindow_rtl( ) if exec_mode == "cppsim": - model = model.transform(SetExecMode("cppsim")) - model = model.transform(PrepareCppSim()) - model = model.transform(CompileCppSim()) + raise Exception("cppsim not supported in test_fpgadataflow_slidingwindow_rtl") elif exec_mode == "rtlsim": model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 5)) - model = model.transform(HLSSynthIP()) + model = model.transform(PrepareIP("xc7z020clg400-1", 4)) model = model.transform(PrepareRTLSim()) else: - raise Exception("Unknown exec_mode in test_fpgadataflow_slidingwindow") + raise Exception("Unknown exec_mode in test_fpgadataflow_slidingwindow_rtl") # prepare input data input_dict = prepare_inputs(x) From 748049db049d95ba5d1f2729e9d63e36237ef16c Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Mon, 31 Jan 2022 21:48:15 +0100 Subject: [PATCH 003/628] Basic MMV in/out implementation for a special 1D case --- finn-rtllib/swg/swg_hdl_template.v | 15 ++- .../convolutioninputgenerator_rtl.py | 109 +++++++++++++----- ...est_fpgadataflow_convinputgenerator_rtl.py | 10 +- 3 files changed, 97 insertions(+), 37 deletions(-) diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v index 1950757245..44fd41abab 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -8,8 +8,9 @@ `timescale 1 ns / 1 ps module $TOP_MODULE_NAME$_wb #( - parameter IN_WIDTH = 1, //c*bit-width - parameter OUT_WIDTH = 1, //c*bit-width*MMV_out + parameter IN_WIDTH = 1, //bit-width*C*MMV_in + parameter OUT_ELEM_WIDTH = 1, //bit-width*C + parameter OUT_WIDTH = 1, //bit-width*C*MMV_out parameter BUFFER_ELEM_TOTAL = 1 ) ( @@ -65,10 +66,11 @@ module $TOP_MODULE_NAME$ ( //parameters parameter BIT_WIDTH = $BIT_WIDTH$; parameter SIMD = $SIMD$; //assuming SIMD=C for now -parameter MMV_IN = $MMV_IN$; //assuming MMV_IN=1 for now -parameter MMV_OUT = $MMV_OUT$; //assuming MMV_OUT=K for now -parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; //c*bit-width -parameter BUF_OUT_WIDTH = BUF_IN_WIDTH * MMV_OUT; //c*bit-width*MMV_out +parameter MMV_IN = $MMV_IN$; //assuming MMV_IN=1*M for now +parameter MMV_OUT = $MMV_OUT$; //assuming MMV_OUT=K*M for now +parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; //bit-width*C*MMV_in +parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; //bit-width*C +parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; //bit-width*C*MMV_out parameter CYCLES_TOTAL = $CYCLES_TOTAL$; parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; @@ -92,6 +94,7 @@ wire window_buffer_shift_enable; $TOP_MODULE_NAME$_wb #( .IN_WIDTH(BUF_IN_WIDTH), + .OUT_ELEM_WIDTH(BUF_OUT_ELEM_WIDTH), .OUT_WIDTH(BUF_OUT_WIDTH), .BUFFER_ELEM_TOTAL(BUF_ELEM_TOTAL) ) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 2e8e8ec75e..55687aa5d2 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -79,6 +79,7 @@ def get_nodeattr_types(self): "IFMDim": ("ints", True, []), # [H, W] = [Y, X] "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), + "M": ("i", True, 1), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs @@ -111,9 +112,15 @@ def get_folded_input_shape(self): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" wf = int(ifm_ch / simd) - folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) + #folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) + #round up to support ifm_dim % M != 0 + if ifm_dim_w == 1: + folded_ishape = (1, math.ceil(ifm_dim_h/M), ifm_dim_w, wf, int(simd*M)) + else: + folded_ishape = (1, ifm_dim_h, math.ceil(ifm_dim_w/M), wf, int(simd*M)) return folded_ishape def get_normal_output_shape(self): @@ -135,13 +142,18 @@ def get_folded_output_shape(self): stride_h, stride_w = self.get_nodeattr("Stride") dilation_h, dilation_w = self.get_nodeattr("Dilation") simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") pad = 0 ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" if self.use_parallel_window_output(): wf = int((ifm_ch) // simd) - folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) + #folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) + if ofm_dim_w == 1: + folded_oshape = (1, int(ofm_dim_h/M), ofm_dim_w, wf, k_h * k_w * int(simd*M)) + else: + folded_oshape = (1, ofm_dim_h, int(ofm_dim_w/M), wf, k_h * k_w * int(simd*M)) else: wf = int((k_h * k_w * ifm_ch) // simd) folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) @@ -175,8 +187,9 @@ def get_instream_width(self): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") + M = self.get_nodeattr("M") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" - in_width = simd * ibits + in_width = simd * ibits * M return in_width def get_outstream_width(self): @@ -377,6 +390,15 @@ def execute_node(self, context, graph): export_idt = DataType["BINARY"] else: export_idt = self.get_input_datatype() + + # pad test input stream to work when IFMdim % M != 0 + # during normal operation, the AXI Stream should not care, in the last cycle garbage elements are read but not used + # ToDo: only works for 1D case + mmv_stream_padding_px = int((np.prod(folded_ishape) - np.prod(exp_ishape)) / exp_ishape[-1]) + if exp_ishape [2] == 1: + inp = np.pad(inp, ((0,0),(0,mmv_stream_padding_px),(0,0),(0,0)), 'constant') + else: + inp = np.pad(inp, ((0,0),(0,0),(0,mmv_stream_padding_px),(0,0)), 'constant') # reshape input into folded form inp = inp.reshape(folded_ishape) # make copy before saving array @@ -460,22 +482,23 @@ def generate_hdl(self): n = 1 h, w = ifm_dim - c = 1#ifm_ch not considered atm (always parallelize across c) + c = 1 # ifm_ch not considered atm (always parallelize across c) k_h, k_w = k - pad = [0,0,0,0] + pad = [0,0,0,0] # padding happens in separate padding node pad_val = 0 stride_h, stride_w = stride dilation_h, dilation_w = dilation conv_c = 99 # init folding config + M = self.get_nodeattr("M") simd = self.get_nodeattr("SIMD") - mmv_in = 1 - mmv_out = k_h*k_w + mmv_in = 1*M + mmv_out = k_h*k_w*M assert simd==ifm_ch, "Constraint violated: SIMD = C" - assert mmv_in==1, "Constraint violated: MMV_IN = 1" - assert mmv_out==k_h*k_w, "Constraint violated: mmv_out = K" + assert mmv_in==1*M, "Constraint violated: MMV_IN = 1" # *M + assert mmv_out==k_h*k_w*M, "Constraint violated: mmv_out = K" # *M # how many "unused" registers are allowed between buffer positions that will be accessed in parallel # example: @@ -552,7 +575,18 @@ def generate_hdl(self): f_debug.write("\n"+str(idx_w)) idx_px = idx_h*w+idx_w - f_debug.write("\n"+"sequential pixel indices") + f_debug.write("\n"+"sequential pixel indices (shape %s" % str(idx_px.shape)) + f_debug.write("\n"+str(idx_px)) + + output_elem, output_cycles = idx_px.shape + # ToDo: what happens when output_cycles=OFMdim % M != 0 + # ...try to support IFMdim % M != 0 first, so we can work with the usual k=3 where OFMdim = IFMdim - -2 + # the additional garbage input elements that are read in the last cycle are not read by any window anyway + idx_px = idx_px.transpose() + idx_px = idx_px.reshape((int(output_cycles/M), int(output_elem*M))) + idx_px = idx_px.transpose() + + f_debug.write("\n"+"sequential pixel indices, MMV_out grouping (shape %s" % str(idx_px.shape)) f_debug.write("\n"+str(idx_px)) buffer = [] @@ -565,23 +599,29 @@ def generate_hdl(self): idx_px_relative = idx_px.copy() # compute schedule and buffer read pattern - Y, X = idx_px_relative.shape - for x in range(X): + output_elem, output_cycles = idx_px_relative.shape + for x in range(output_cycles): # load missing inputs into buffer - for y in range(Y): + for y in range(output_elem): while int(idx_px_relative[y,x]) not in buffer: - buffer.append(next_in_px) - next_in_px += 1 + # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 schedule_write.append(1) schedule_read.append(0) # discard unused buffer elements (assumes in-order access) oldest_px = min(idx_px_relative[:,x]) - while buffer[0] < oldest_px: - buffer.pop(0) + #while buffer[0] < oldest_px: + #check whether M elements can be shifted out, not just the single oldest one + while all([buffer[i] < oldest_px for i in range(M)]): + # M buffer elements are shifted out at once + for m in range(M): + buffer.pop(0) # adjust relative buffer index - for y in range(Y): + for y in range(output_elem): idx_px_relative[y,x] -= oldest_px # record max needed buffer depth @@ -595,14 +635,16 @@ def generate_hdl(self): if next_in_px > (h_padded*w_padded-1): schedule_write.append(0) else: - buffer.append(next_in_px) - next_in_px += 1 + # load M inputs at once + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 schedule_write.append(1) # find buffer access patterns buffer_access_patterns = [] - for x in range(X): + for x in range(output_cycles): if idx_px_relative[:,x].tolist() not in buffer_access_patterns: buffer_access_patterns.append(idx_px_relative[:,x].tolist()) @@ -636,10 +678,13 @@ def generate_hdl(self): code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] # determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) + # ToDo: this part doesn't fully account for M (2D buffer) yet assert len(buffer_access_patterns) == 1, "ERROR: Buffer access pattern is not static" buf_static_access_pattern = buffer_access_patterns[0] reg_fifos = [] + reg_fifos_depth = [] bram_fifos = [] + bram_fifos_depth = [] current = [] for i in range(len(buf_static_access_pattern)): access_idx = buf_static_access_pattern[i] @@ -647,6 +692,7 @@ def generate_hdl(self): current.append(access_idx) else: # assume non-decreasing index order in access pattern + # ToDo: this assumption does not hold for M>1 case (2D buffer) distance = access_idx - max(current) if not (distance-1 > REG_BRAM_THRESHOLD): for i in range(distance-1): @@ -657,11 +703,14 @@ def generate_hdl(self): else: # assign skipped accesses to new BRAM FIFO bram_fifos.append([-1]*(distance-1)) + bram_fifos_depth.append((distance-1)/M) # start with new REG FIFO reg_fifos.append(current) + reg_fifos_depth.append(math.ceil((max(current)+1)/M)) current = [] current.append(access_idx) reg_fifos.append(current) + reg_fifos_depth.append(math.ceil((max(current)+1)/M)) f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) @@ -674,7 +723,7 @@ def generate_hdl(self): code_gen_dict["$GENERATE_REG_FIFOS$"].append( """parameter reg_fifo_{id}_len = {len}; reg [IN_WIDTH-1:0] reg_fifo_{id} [reg_fifo_{id}_len-1:0]; - """.format(id=i, len=len(reg_fifos[i]))) + """.format(id=i, len=reg_fifos_depth[i])) #todo: generate actual bram shift buffers instead of regs code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] @@ -682,16 +731,23 @@ def generate_hdl(self): code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( """parameter bram_fifo_{id}_len = {len}; reg [IN_WIDTH-1:0] bram_fifo_{id} [bram_fifo_{id}_len-1:0]; - """.format(id=i, len=len(bram_fifos[i]))) + """.format(id=i, len=bram_fifos_depth[i])) code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] out_idx = mmv_out-1 for fifo_id, reg_fifo in enumerate(reg_fifos): for fifo_idx, access_idx in enumerate(reg_fifo): if(access_idx != -1): + #code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + # "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( + # out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx + # ) + #) code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - "assign data_out[IN_WIDTH*{out_idx}+:IN_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( - out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx + "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}][OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( + out_idx=out_idx, fifo_id=fifo_id, + access_idx=reg_fifos_depth[fifo_id]-1-int((max(reg_fifo)-access_idx)/M), + mmv_idx=(max(reg_fifo)-access_idx)%M ) ) # reversal: out_idx=0 -> oldest buffer element -> highest access_idx @@ -762,9 +818,6 @@ def generate_hdl(self): code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( "assign write_state = WRITE_SCHEDULE[cycle_last];" ) - #code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - # "assign write_state_next = WRITE_SCHEDULE[cycle_next];" - #) with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: template = f.read() diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 0845dc2fca..ef1fda8e31 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -90,7 +90,7 @@ def make_single_im2col_modelwrapper( def make_single_slidingwindow_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw=0 + k, ifm_ch, ifm_dim, ofm_dim, simd, m, stride, dilation, idt, dw=0 ): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim @@ -117,6 +117,7 @@ def make_single_slidingwindow_modelwrapper( IFMDim=[ifm_dim_h, ifm_dim_w], OFMDim=[ofm_dim_h, ofm_dim_w], SIMD=simd, + M=m, Stride=[stride_h, stride_w], Dilation=[dilation_h, dilation_w], inputDataType=idt.name, @@ -153,7 +154,7 @@ def prepare_inputs(input_tensor): # kernel size @pytest.mark.parametrize("k", [[3, 1]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[8, 1]]) +@pytest.mark.parametrize("ifm_dim", [[10, 1]]) # input channels @pytest.mark.parametrize("ifm_ch", [2]) # Stride @@ -164,6 +165,8 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("exec_mode", ["rtlsim"]) # input channel parallelism ("SIMD") @pytest.mark.parametrize("simd", [2]) +# in/out MMV ("M") +@pytest.mark.parametrize("m", [1, 2, 4]) # depthwise @pytest.mark.parametrize("dw", [0]) # Flip dimensions @@ -171,7 +174,7 @@ def prepare_inputs(input_tensor): @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_slidingwindow_rtl( - idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw, flip + idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, m, dw, flip ): if flip: k = k[::-1] @@ -203,6 +206,7 @@ def test_fpgadataflow_slidingwindow_rtl( ifm_dim=ifm_dim, ofm_dim=ofm_dim, simd=simd, + m=m, stride=stride, dilation=dilation, idt=idt, From e6baacd473a2a341ede17bf6d1b5561f954346aa Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 15 Mar 2022 12:54:38 +0100 Subject: [PATCH 004/628] Fix for simulation in MMV, 1D case --- .../fpgadataflow/convolutioninputgenerator_rtl.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 55687aa5d2..e0285cd473 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -79,7 +79,7 @@ def get_nodeattr_types(self): "IFMDim": ("ints", True, []), # [H, W] = [Y, X] "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), - "M": ("i", True, 1), + "M": ("i", False, 1), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs @@ -380,10 +380,11 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert ( - inp.shape == exp_ishape - ), """Input shape doesn't - match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + # disable this check to allow for IFMdim % M != 0 case (see below) where input comes from MMV-output capable node + #assert ( + # inp.shape == exp_ishape + #), """Input shape doesn't + #match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 @@ -394,7 +395,7 @@ def execute_node(self, context, graph): # pad test input stream to work when IFMdim % M != 0 # during normal operation, the AXI Stream should not care, in the last cycle garbage elements are read but not used # ToDo: only works for 1D case - mmv_stream_padding_px = int((np.prod(folded_ishape) - np.prod(exp_ishape)) / exp_ishape[-1]) + mmv_stream_padding_px = int((np.prod(folded_ishape) - np.prod(inp.shape)) / exp_ishape[-1]) if exp_ishape [2] == 1: inp = np.pad(inp, ((0,0),(0,mmv_stream_padding_px),(0,0),(0,0)), 'constant') else: From 0f866f1e7123a625f13692ac7cc59c11b530da21 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 22 Mar 2022 11:58:14 +0100 Subject: [PATCH 005/628] Replace microprogramming with controller --- finn-rtllib/swg/swg_hdl_template.v | 206 ++++++++--- .../convolutioninputgenerator_rtl.py | 334 ++++++++++++++++-- 2 files changed, 458 insertions(+), 82 deletions(-) diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v index 44fd41abab..88ef58531a 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -1,11 +1,146 @@ -// ============================================================== -// RTL generated by Vivado(TM) HLS - High-Level Synthesis from C, C++ and OpenCL -// Version: 2020.1 -// Copyright (C) 1986-2020 Xilinx, Inc. All Rights Reserved. -// -// =========================================================== - `timescale 1 ns / 1 ps + +module $TOP_MODULE_NAME$_controller +( + CLK, + cycle, + cmd_read, + cmd_write +); + +input CLK; +input [31:0] cycle; //todo: minimize width or switch to single bit flag/advance wire +output cmd_read; +output cmd_write; + +////code generation part: +//mapping of R/W command values to each state (START, MAIN_1, MAIN_2, INTER_1, INTER_2, END_1, END_2) +localparam [0:6] READ_CMD_MAP = $READ_CMD_MAP$; +localparam [0:6] WRITE_CMD_MAP = $WRITE_CMD_MAP$; + +localparam START_COUNTER = $START_COUNTER$; +localparam LOOP_MAIN_COUNTER = $LOOP_MAIN_COUNTER$; +localparam LOOP_MAIN_1_COUNTER = $LOOP_MAIN_1_COUNTER$; +localparam LOOP_MAIN_2_COUNTER = $LOOP_MAIN_2_COUNTER$; +localparam LOOP_INTER_COUNTER = $LOOP_INTER_COUNTER$; +localparam LOOP_INTER_1_COUNTER = $LOOP_INTER_1_COUNTER$; +localparam LOOP_INTER_2_COUNTER = $LOOP_INTER_2_COUNTER$; +localparam LOOP_END_1_COUNTER = $LOOP_END_1_COUNTER$; +localparam LOOP_END_2_COUNTER = $LOOP_END_2_COUNTER$; +//// + +//state and counters +reg [2:0] state, state_next; +parameter STATE_START = 0, STATE_LOOP_MAIN_1 = 1, STATE_LOOP_MAIN_2 = 2, STATE_LOOP_INTER_1 = 3, STATE_LOOP_INTER_2 = 4, STATE_END_1 = 5, STATE_END_2 = 6; +integer counter_current; //todo: minimize width +integer counter_loop_main; +integer counter_loop_inter; + +assign cmd_read = READ_CMD_MAP[state_next]; //read command indicates read in *upcoming* cycle, due to how schedule is constructed +assign cmd_write = WRITE_CMD_MAP[state]; + +reg cycle_last; +wire cycle_advance; +assign cycle_advance = !(cycle == cycle_last); + +//combinational next state logic +always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin + state_next = state; //default + case (state) + STATE_START: + if (counter_current == START_COUNTER-1) + state_next = STATE_LOOP_MAIN_1; + + STATE_LOOP_MAIN_1: + if (counter_current == LOOP_MAIN_1_COUNTER-1) + state_next = STATE_LOOP_MAIN_2; + + STATE_LOOP_MAIN_2: begin + if (counter_current == LOOP_MAIN_2_COUNTER-1) begin + state_next = STATE_LOOP_MAIN_1; + if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin + //no -1 because this counter marks the currently active iteration, not finished iterations + if ((LOOP_INTER_COUNTER != 0) && (counter_loop_inter != LOOP_INTER_COUNTER)) + state_next = STATE_LOOP_INTER_1; + else begin + //there might not be an end sequence -> restart immediately + if (LOOP_END_1_COUNTER != 0) + state_next = STATE_END_1; + else + state_next = STATE_START; + end + end + end + end + + STATE_LOOP_INTER_1: begin + if (counter_current == LOOP_INTER_1_COUNTER-1) begin + if (LOOP_INTER_2_COUNTER != 0) + state_next = STATE_LOOP_INTER_2; + else + state_next = STATE_LOOP_MAIN_1; + end + end + + STATE_LOOP_INTER_2: + if (counter_current == LOOP_INTER_2_COUNTER-1) + state_next = STATE_LOOP_MAIN_1; + + STATE_END_1: begin + if (counter_current == LOOP_END_1_COUNTER-1) begin + if (LOOP_END_2_COUNTER != 0) + state_next = STATE_END_2; + else + state_next = STATE_START; + end + end + + STATE_END_2: + if (counter_current == LOOP_END_2_COUNTER-1) + state_next = STATE_START; + endcase +end + +//sequential logic +always @ (posedge CLK) begin + if (cycle == 0) begin + counter_current <= 0; + counter_loop_main <= 0; + counter_loop_inter <= 0; + cycle_last <= 0; + state <= STATE_START; + end else begin + cycle_last <= cycle; + state <= state_next; + + if (cycle_advance) begin + counter_current <= counter_current+1; + end + + if (state != state_next) begin + counter_current <= 0; + + //count up main loop upon re-entering this loop (not on first enter from start) + if ((state_next == STATE_LOOP_MAIN_1) && (state != STATE_START)) begin + if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin + counter_loop_main <= 0; + end else begin + counter_loop_main <= counter_loop_main+1; + end + end + + if (state_next == STATE_LOOP_INTER_1) begin + if (counter_loop_inter == LOOP_INTER_COUNTER) begin //no -1 because this counter marks the currently active iteration, not finished iterations + counter_loop_inter <= 0; + end else begin + counter_loop_inter <= counter_loop_inter+1; + end + end + end + end +end +endmodule //controller + module $TOP_MODULE_NAME$_wb #( parameter IN_WIDTH = 1, //bit-width*C*MMV_in @@ -63,7 +198,6 @@ module $TOP_MODULE_NAME$ ( out_V_V_TREADY ); -//parameters parameter BIT_WIDTH = $BIT_WIDTH$; parameter SIMD = $SIMD$; //assuming SIMD=C for now parameter MMV_IN = $MMV_IN$; //assuming MMV_IN=1*M for now @@ -71,7 +205,6 @@ parameter MMV_OUT = $MMV_OUT$; //assuming MMV_OUT=K*M for now parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; //bit-width*C*MMV_in parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; //bit-width*C parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; //bit-width*C*MMV_out - parameter CYCLES_TOTAL = $CYCLES_TOTAL$; parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; @@ -106,76 +239,63 @@ window_buffer_inst .data_out(window_buffer_out) ); -//FSM state -//reg [1:0] state; -//parameter STATE_RESET = 0, STATE_OPERATE = 1, S2 = 2; - -//main cycle counter (where either read/write/both happen, resets for each image) -integer cycle; -integer cycle_last; - -//read/write loop state -wire read_state; -wire write_state; +integer cycle; //main cycle counter (where either read/write/both happen, resets for each image) +wire read_cmd; +wire write_cmd; reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle +$TOP_MODULE_NAME$_controller +controller_inst +( + .CLK(ap_clk), + .cycle(cycle), + .cmd_read(read_cmd), + .cmd_write(write_cmd) +); + wire write_blocked; -assign write_blocked = write_state && !out_V_V_TREADY && !write_done; +assign write_blocked = write_cmd && !out_V_V_TREADY && !write_done; wire read_ok; // with transition to next cycle: // want to read can read source is ready (waiting on VALID allowed) -assign read_ok = read_state && !write_blocked && in0_V_V_TVALID; +assign read_ok = read_cmd && !write_blocked && in0_V_V_TVALID; wire write_ok; // with transition to next cycle: // output is VALID sink is ready sink has already read (we are waiting on source) -assign write_ok = write_state && (out_V_V_TREADY || write_done); +assign write_ok = write_cmd && (out_V_V_TREADY || write_done); wire advance; -// includes waiting on W if W-only cycle: wait only on W -assign advance = read_ok || (!read_state && write_ok); +// includes waiting on W if W-only cycle: wait only on W no R/W to wait for +assign advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); //assign buffer control -//todo: if mmv_out < k: might not shift and/or write for multiple read_state cycles +//todo: if mmv_out < k: might not shift and/or write for multiple read_cmd cycles assign window_buffer_shift_enable = advance; //assign I/O ports assign window_buffer_in = in0_V_V_TDATA; assign out_V_V_TDATA = window_buffer_out; assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) -assign out_V_V_TVALID = ap_rst_n && write_state && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) - -//read schedule -//todo: generate differently -$GENERATE_READ_SCHEDULE$ - -//write schedule -//todo: generate differently -$GENERATE_WRITE_SCHEDULE$ +assign out_V_V_TVALID = ap_rst_n && write_cmd && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) //main process for advancing cycle count always @ (posedge ap_clk) begin if (ap_rst_n == 1'b0) begin cycle <= 0; - cycle_last <= 0; end else begin if (advance) begin write_done <= 1'b0; //reset flag //count cycle (completed R or W or both (depending on current cycle)) - cycle_last <= cycle; //cycle last is used to generate write_state (due to how schedule is constructed) if (cycle == CYCLES_TOTAL-1) cycle <= 0; else cycle <= cycle+1; - end else begin - if (write_ok) begin - // successful W in this cycle, but R still outstanding - write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! - end - end + end else if (write_ok) // successful W in this cycle, but R still outstanding + write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! end end diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index e0285cd473..a54dea9167 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -505,7 +505,7 @@ def generate_hdl(self): # example: # 0: only consecutive access patterns will be implemented in regs, rest in BRAM line buffers # 2: [0, 3, 6] access pattern is still allowed and will be implemented with 1 7-position shift reg - REG_BRAM_THRESHOLD = 1 + REG_BRAM_THRESHOLD = 9999 #-------------------- in_shape = (n,c,h,w) #NCHW @@ -595,6 +595,10 @@ def generate_hdl(self): # buffer schedule (write from input, read to output) schedule_write = [] schedule_read = [] + + schedule = [] + schedule_prev = '' + next_in_px = 0 idx_px_relative = idx_px.copy() @@ -611,6 +615,12 @@ def generate_hdl(self): next_in_px += 1 schedule_write.append(1) schedule_read.append(0) + if schedule_prev == 'w': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'w')) + schedule_prev = 'w' # discard unused buffer elements (assumes in-order access) oldest_px = min(idx_px_relative[:,x]) @@ -635,12 +645,24 @@ def generate_hdl(self): # simultaneously load next pixel(s) into buffer if there are any left if next_in_px > (h_padded*w_padded-1): schedule_write.append(0) + if schedule_prev == 'r': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'r')) + schedule_prev = 'r' else: # load M inputs at once for m in range(M): buffer.append(next_in_px) next_in_px += 1 schedule_write.append(1) + if schedule_prev == 'wr': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'wr')) + schedule_prev = 'wr' # find buffer access patterns @@ -649,7 +671,198 @@ def generate_hdl(self): if idx_px_relative[:,x].tolist() not in buffer_access_patterns: buffer_access_patterns.append(idx_px_relative[:,x].tolist()) + # from itertools import groupby + # schedule_write_compressed = ''.join('(' + str(k) + ',' + str(sum(1 for x in g)) + '),' for k, g in groupby(schedule_write)) + # schedule_read_compressed = ''.join('(' + str(k) + ',' + str(sum(1 for x in g)) + '),' for k, g in groupby(schedule_read)) + + # analyse schedule + # class sched_gen: + # start_counter = 0 + # start_val = 0 + + # end_last_sequence_counter = 0 + # end_sequence = [] + + # outer_counter = 0 + # outer_sequence_counter = 0 + # outer_sequence_val = 0 + + # inner_counter = 0 + # inner_sequence = [] + + # def __str__(self): + # return "\nstart: %d x %d\n %d x\n %d x %s + %d x %d\nend: %d x %s + %s\n" % ( + # self.start_counter, + # self.start_val, + # self.outer_counter, + # self.inner_counter, + # str(self.inner_sequence), + # self.outer_sequence_counter, + # self.outer_sequence_val, + # self.end_last_sequence_counter, + # str(self.inner_sequence), + # self.end_sequence + # ) + + + # def analyse_schedule(schedule): + # generator = sched_gen() + + # #determine start sequence + # for i, v in enumerate(schedule): + # if i > 0 and v != schedule[i-1]: + # generator.start_counter = i + # generator.start_val = schedule[i-1] + # break + + # #determine inner loop/sequence + # sequence_MAX = 10 + # schedule = schedule[generator.start_counter:] # cut off processed entries + # sequence = [] + # repititions = 0 + # i = 0 + # while i < len(schedule): + # if not sequence: + # sequence.append(schedule[i]) + # i = i+1 + # else: + # # is this a beginning of a repitition of the current sequence? + # if i + len(sequence) < len(schedule) and all([schedule[i+offset] == sequence[offset] for offset in range(len(sequence))]): + # repititions = repititions + 1 + # i = i+len(sequence) + # else: + # # did we already count repitions of the sequence? + # sequence_candidate = sequence + sequence * repititions + # sequence_candidate.append(schedule[i]) + # if len(sequence_candidate) < sequence_MAX: + # sequence = sequence_candidate.copy() + # repititions = 0 + # i = i+1 + # else: + # schedule = schedule[i:] # cut off processed entries + # break + # generator.inner_counter = repititions + 1 + # generator.inner_sequence = sequence + + # #determine outer sequence + # for i, v in enumerate(schedule): + # if i > 0 and v != schedule[i-1]: + # generator.outer_sequence_counter = i + # generator.outer_sequence_val = schedule[i-1] + # break + + # schedule = schedule[generator.outer_sequence_counter:] # cut off processed entries + + # sequence_to_compare = generator.inner_sequence * generator.inner_counter + [generator.outer_sequence_val] * generator.outer_sequence_counter + + # generator.outer_counter = 1 + # i = 0 + # while i < len(schedule): + # # is this a beginning of a repitition of the current sequence? + # if i + len(sequence_to_compare) < len(schedule) and all([schedule[i+offset] == sequence_to_compare[offset] for offset in range(len(sequence_to_compare))]): + # generator.outer_counter = generator.outer_counter + 1 + # i = i+len(sequence_to_compare) + # else: + # schedule = schedule[i:] # cut off processed entries + # break + + # #determine end sequence + # #for i, v in enumerate(schedule): + # # if i > 0 and v != schedule[i-1]: + # # generator.end_counter = i + # # generator.end_val = schedule[i-1] + # # break + # sequence = generator.inner_sequence + # repititions = 0 + # i = 0 + # while i < len(schedule): + # # is this a beginning of a repitition of the current sequence? + # if i + len(sequence) < len(schedule) and all([schedule[i+offset] == sequence[offset] for offset in range(len(sequence))]): + # repititions = repititions + 1 + # i = i+len(sequence) + # else: + # schedule = schedule[i:] # cut off processed entries + # break + # generator.end_last_sequence_counter = repititions + + # #remainder + # generator.end_sequence = schedule + + # return generator + + def compact_schedule(schedule): + + # leave first sequence (pre-load) as is + start_sequence = schedule[0] + + loop_sequence_1_counter = 1 + loop_sequence_1 = schedule[1] + + loop_counter = 0 + loop_sequence_2 = None + end_sequence = None + + i = 2 + if i < len(schedule): + loop_sequence_1 += schedule[i] + i += 1 + + while i+1 < len(schedule): + candidate = schedule[i] + schedule[i+1] + if candidate == loop_sequence_1: + loop_sequence_1_counter += 1 + i += 2 + else: + break + + if i < len(schedule): + loop_sequence_2 = schedule[i] + i += 1 + + if i+1 < len(schedule): + candidate = schedule[i] + schedule[i+1] + if candidate != loop_sequence_1: + loop_sequence_2 += schedule[i] + + i -= 1 + loop_sequence_total_len = (int(len(loop_sequence_2)/2)) + loop_sequence_1_counter*(int(len(loop_sequence_1)/2)) + loop_sequence_total = loop_sequence_2 + loop_sequence_1_counter*loop_sequence_1 + while i+loop_sequence_total_len < len(schedule): + candidate = schedule[i] + for x in range (i+1, i+loop_sequence_total_len): + candidate += schedule[x] + + if candidate == loop_sequence_total: + loop_counter += 1 + i += loop_sequence_total_len + else: + break + + else: + if i < len(schedule): + end_sequence = loop_sequence_2 + schedule[i] + i += 1 + loop_sequence_2 = None + else: + end_sequence = loop_sequence_2 + loop_sequence_2 = None + + if i < len(schedule): + end_sequence = schedule[i] + i += 1 + + assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" %(i, len(schedule)) + + return ( + start_sequence, + loop_counter, + loop_sequence_1_counter, + loop_sequence_1, + loop_sequence_2, + end_sequence + ) + f_debug.write("\n"+"max buffer size observed: %d" %(buffer_max_size)) f_debug.write("\n"+"output vector elements: relative buffer indices") f_debug.write("\n"+str(idx_px_relative)) @@ -659,9 +872,21 @@ def generate_hdl(self): f_debug.write("\n"+"buffer write schedule (%d cycles)" % len(schedule_write)) f_debug.write("\n"+str(schedule_write)) f_debug.write("\n"+"writing buffer in %d cycles" % schedule_write.count(1)) + #f_debug.write("\n"+"buffer write schedule COMPRESSED") + #f_debug.write("\n"+str(schedule_write_compressed)) + #f_debug.write("\n"+"buffer write schedule ANALYZED") + #f_debug.write("\n"+str(analyse_schedule(schedule_write))) f_debug.write("\n"+"buffer read schedule (%d cycles)" % len(schedule_read)) f_debug.write("\n"+str(schedule_read)) f_debug.write("\n"+"reading buffer in %d cycles" % schedule_read.count(1)) + #f_debug.write("\n"+"buffer read schedule COMPRESSED") + #f_debug.write("\n"+str(schedule_read_compressed)) + #f_debug.write("\n"+"buffer read schedule ANALYZED") + #f_debug.write("\n"+str(analyse_schedule(schedule_read))) + f_debug.write("\n"+"buffer rw schedule NEW") + f_debug.write("\n"+str(schedule)) + f_debug.write("\n"+"buffer rw schedule NEW compacted") + f_debug.write("\n"+"\nstart_sequence: %s\nloop_counter: %s\nloop_sequence_1_counter: %s\nloop_sequence_1: %s\nloop_sequence_2: %s\nend_sequence: %s\n" % compact_schedule(schedule)) assert len(schedule_write) == len(schedule_read), "ERROR: Schedules have different lenghts" cycles_total = len(schedule_write) @@ -704,7 +929,7 @@ def generate_hdl(self): else: # assign skipped accesses to new BRAM FIFO bram_fifos.append([-1]*(distance-1)) - bram_fifos_depth.append((distance-1)/M) + bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? # start with new REG FIFO reg_fifos.append(current) reg_fifos_depth.append(math.ceil((max(current)+1)/M)) @@ -787,38 +1012,76 @@ def generate_hdl(self): ) # Generate read schedule (when data is read from input, written to buffer) - code_gen_dict["$GENERATE_READ_SCHEDULE$"] = [] - schedule_as_string = "" - #todo: change naming to swap write/read - for i in schedule_write: - if i == 1: - schedule_as_string += "1'b1," + # code_gen_dict["$GENERATE_READ_SCHEDULE$"] = [] + # schedule_as_string = "" + # #todo: change naming to swap write/read + # for i in schedule_write: + # if i == 1: + # schedule_as_string += "1'b1," + # else: + # schedule_as_string += "1'b0," + # schedule_as_string = schedule_as_string[:-1] # remove trailing ',' + # code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( + # "localparam [0:{len}-1] READ_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) + # ) + # code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( + # "assign read_state = READ_SCHEDULE[cycle];" + # ) + + # # Generate write schedule (when data is written to output, read from buffer) + # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"] = [] + # schedule_as_string = "" + # #todo: change naming to swap write/read + # for i in schedule_read: + # if i == 1: + # schedule_as_string += "1'b1," + # else: + # schedule_as_string += "1'b0," + # schedule_as_string = schedule_as_string[:-1] # remove trailing ',' + # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( + # "localparam [0:{len}-1] WRITE_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) + # ) + # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( + # "assign write_state = WRITE_SCHEDULE[cycle_last];" + # ) + + def convert_tuple(seq): + mapping = {'w': ("1'b1", "1'b0"), + 'r': ("1'b0", "1'b1"), + 'wr':("1'b1", "1'b1"), + 'n': ("1'b0", "1'b0")} + if seq: + if len(seq) == 2: + return (seq[0], mapping[seq[1]], 0, mapping['n']) + if len(seq) == 4: + return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) else: - schedule_as_string += "1'b0," - schedule_as_string = schedule_as_string[:-1] # remove trailing ',' - code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( - "localparam [0:{len}-1] READ_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) - ) - code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( - "assign read_state = READ_SCHEDULE[cycle];" - ) + return (0, mapping['n'], 0, mapping['n']) - # Generate write schedule (when data is written to output, read from buffer) - code_gen_dict["$GENERATE_WRITE_SCHEDULE$"] = [] - schedule_as_string = "" - #todo: change naming to swap write/read - for i in schedule_read: - if i == 1: - schedule_as_string += "1'b1," - else: - schedule_as_string += "1'b0," - schedule_as_string = schedule_as_string[:-1] # remove trailing ',' - code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - "localparam [0:{len}-1] WRITE_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) - ) - code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - "assign write_state = WRITE_SCHEDULE[cycle_last];" - ) + start_sequence,loop_counter,loop_sequence_1_counter,loop_sequence_1,loop_sequence_2,end_sequence = compact_schedule(schedule) + + start_sequence = convert_tuple(start_sequence) + loop_sequence_1 = convert_tuple(loop_sequence_1) + loop_sequence_2 = convert_tuple(loop_sequence_2) + end_sequence = convert_tuple(end_sequence) + + code_gen_dict["$START_COUNTER$"]=[str(start_sequence[0])] + code_gen_dict["$LOOP_MAIN_COUNTER$"]=[str(loop_sequence_1_counter)] + code_gen_dict["$LOOP_INTER_COUNTER$"]=[str(loop_counter)] + + code_gen_dict["$LOOP_MAIN_1_COUNTER$"]=[str(loop_sequence_1[0])] + code_gen_dict["$LOOP_MAIN_2_COUNTER$"]=[str(loop_sequence_1[2])] + + code_gen_dict["$LOOP_INTER_1_COUNTER$"]=[str(loop_sequence_2[0])] + code_gen_dict["$LOOP_INTER_2_COUNTER$"]=[str(loop_sequence_2[2])] + + code_gen_dict["$LOOP_END_1_COUNTER$"]=[str(end_sequence[0])] + code_gen_dict["$LOOP_END_2_COUNTER$"]=[str(end_sequence[2])] + + code_gen_dict["$READ_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][0],loop_sequence_1[1][0],loop_sequence_1[3][0],loop_sequence_2[1][0],loop_sequence_2[3][0],end_sequence[1][0],end_sequence[3][0])] + code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: template = f.read() @@ -871,17 +1134,10 @@ def code_generation_ipi(self): """Constructs and returns the TCL for node instantiation in Vivado IPI.""" vlnv = self.get_nodeattr("ip_vlnv") code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - - #cmd = ["create_bd_cell -type ip -vlnv %s %s" % (vlnv, self.onnx_node.name)] cmd = ["add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_hdl_gen.v")), "create_bd_cell -type module -reference %s %s" % (self.get_nodeattr("gen_top_module"), self.onnx_node.name)] - #update_compile_order -fileset sources_1 - #add_files -norecurse C:/Users/felix/Downloads/swg_hdl_generated.v - #update_compile_order -fileset sources_1 - #create_bd_cell -type module -reference ConvolutionInputGenerator_rtl_0_ConvolutionInputGenerator_rtl_0 ConvolutionInputGene_0 - return cmd def code_generation_ipgen(self, model, fpgapart, clk): From 0d2a549cece759414c3d6c1ca3a15a85041374cb Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 23 Mar 2022 13:06:10 +0100 Subject: [PATCH 006/628] Add RAM buffer component --- finn-rtllib/swg/swg_hdl_template.v | 139 +++++++++++++++--- .../convolutioninputgenerator_rtl.py | 84 ++++++----- 2 files changed, 171 insertions(+), 52 deletions(-) diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v index 88ef58531a..98ea1cf9cd 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -1,4 +1,4 @@ -`timescale 1 ns / 1 ps +`timescale 1 ns / 1 ps module $TOP_MODULE_NAME$_controller ( @@ -14,7 +14,7 @@ output cmd_read; output cmd_write; ////code generation part: -//mapping of R/W command values to each state (START, MAIN_1, MAIN_2, INTER_1, INTER_2, END_1, END_2) +//mapping of R/W command values to each state (START, MAIN_1, MAIN_2, INTER_1, INTER_2, END_1, END_2) localparam [0:6] READ_CMD_MAP = $READ_CMD_MAP$; localparam [0:6] WRITE_CMD_MAP = $WRITE_CMD_MAP$; @@ -37,7 +37,7 @@ integer counter_loop_main; integer counter_loop_inter; assign cmd_read = READ_CMD_MAP[state_next]; //read command indicates read in *upcoming* cycle, due to how schedule is constructed -assign cmd_write = WRITE_CMD_MAP[state]; +assign cmd_write = WRITE_CMD_MAP[state]; reg cycle_last; wire cycle_advance; @@ -66,7 +66,7 @@ always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin //there might not be an end sequence -> restart immediately if (LOOP_END_1_COUNTER != 0) state_next = STATE_END_1; - else + else state_next = STATE_START; end end @@ -77,7 +77,7 @@ always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin if (counter_current == LOOP_INTER_1_COUNTER-1) begin if (LOOP_INTER_2_COUNTER != 0) state_next = STATE_LOOP_INTER_2; - else + else state_next = STATE_LOOP_MAIN_1; end end @@ -141,6 +141,113 @@ always @ (posedge CLK) begin end endmodule //controller +module $TOP_MODULE_NAME$_reg_buffer +#( + parameter WIDTH = 1, + parameter DEPTH = 1 +) +( + CLK, + shift_enable, + shift_in, + shift_out, + data_out +); + +input CLK, shift_enable; +input [WIDTH-1:0] shift_in; +output [WIDTH-1:0] shift_out; +output [WIDTH*DEPTH-1:0] data_out; + +//UG901 template for SRL inference: +// 32-bit Shift Register +// Rising edge clock +// Active high clock enable +// For-loop based template +// File: shift_registers_1.v +// +//module shift_registers_1 (clk, clken, SI, SO); +//parameter WIDTH = 32; +//input clk, clken, SI; +//output SO; +//reg [WIDTH-1:0] shreg; +// +//integer i; +//always @(posedge clk) +//begin +// if (clken) +// begin +// for (i = 0; i < WIDTH-1; i = i+1) +// shreg[i+1] <= shreg[i]; +// shreg[0] <= SI; +// end +//end +//assign SO = shreg[WIDTH-1]; +//endmodule + +reg [WIDTH-1:0] data [DEPTH-1:0]; + +assign shift_out = data[DEPTH-1]; + +for (genvar e=0; e0; i=i-1) + data[i] <= data[i-1]; + data[0] <= shift_in; + end +end +endmodule //reg_buffer + +module $TOP_MODULE_NAME$_ram_buffer +#( + parameter WIDTH = 1, + parameter DEPTH = 1 +) +( + CLK, + RST, + shift_enable, + shift_in, + shift_out +); + +input CLK, RST, shift_enable; +input [WIDTH-1:0] shift_in; +output [WIDTH-1:0] shift_out; + +reg [WIDTH-1:0] out_reg; +assign shift_out = out_reg; + +integer addr_w, addr_r; //todo: minimize width (as reg), make r addr depend on w + +(* ram_style = "block" *) reg [WIDTH-1:0] ram [DEPTH-1:0]; + +always @(posedge CLK) begin + if (RST == 1'b0) begin + addr_w <= 0; + addr_r <= 1; + end else begin + if (shift_enable) begin + ram[addr_w] <= shift_in; + out_reg <= ram[addr_r]; + + if (addr_w == DEPTH-1) + addr_w <= 0; + else + addr_w <= addr_w + 1; + + if (addr_r == DEPTH-1) + addr_r <= 0; + else + addr_r <= addr_r + 1; + end + end +end +endmodule //ram_buffer + module $TOP_MODULE_NAME$_wb #( parameter IN_WIDTH = 1, //bit-width*C*MMV_in @@ -150,12 +257,13 @@ module $TOP_MODULE_NAME$_wb ) ( CLK, + RST, data_in, shift_enable, data_out ); -input CLK; +input CLK, RST; input [IN_WIDTH-1:0] data_in; input shift_enable; output [OUT_WIDTH-1:0] data_out; @@ -163,24 +271,20 @@ output [OUT_WIDTH-1:0] data_out; //Input REG to enable simultaneous R/W reg [IN_WIDTH-1:0] reg_input; -//REG FIFOs $GENERATE_REG_FIFOS$ -//BRAM FIFOs -//todo: generate real BRAM shift buffers if these get too large $GENERATE_BRAM_FIFOS$ +//Fixed interconnect between linear buffers +$GENERATE_BUFFER_CONNECTION$ + //Fixed REG FIFO <-> output mapping $GENERATE_OUTPUT_MAPPING$ -//main process +//input register logic integer i; always @ (posedge CLK) begin if (shift_enable) begin - //shift logic - $GENERATE_SHIFT_LOGIC$ - - //shift in new data reg_input <= data_in; end end @@ -234,6 +338,7 @@ $TOP_MODULE_NAME$_wb window_buffer_inst ( .CLK(ap_clk), + .RST(ap_rst_n), .data_in(window_buffer_in), .shift_enable(window_buffer_shift_enable), .data_out(window_buffer_out) @@ -291,9 +396,9 @@ always @ (posedge ap_clk) begin //count cycle (completed R or W or both (depending on current cycle)) if (cycle == CYCLES_TOTAL-1) cycle <= 0; - else - cycle <= cycle+1; - + else + cycle <= cycle+1; + end else if (write_ok) // successful W in this cycle, but R still outstanding write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! end diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index a54dea9167..cfd6572a8d 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -505,7 +505,7 @@ def generate_hdl(self): # example: # 0: only consecutive access patterns will be implemented in regs, rest in BRAM line buffers # 2: [0, 3, 6] access pattern is still allowed and will be implemented with 1 7-position shift reg - REG_BRAM_THRESHOLD = 9999 + REG_BRAM_THRESHOLD = 8 #-------------------- in_shape = (n,c,h,w) #NCHW @@ -932,11 +932,13 @@ def compact_schedule(schedule): bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? # start with new REG FIFO reg_fifos.append(current) - reg_fifos_depth.append(math.ceil((max(current)+1)/M)) + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) ToDo: fix for M again + reg_fifos_depth.append(len(current)) current = [] current.append(access_idx) reg_fifos.append(current) - reg_fifos_depth.append(math.ceil((max(current)+1)/M)) + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) ToDo fix for M again + reg_fifos_depth.append(len(current)) f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) @@ -947,17 +949,43 @@ def compact_schedule(schedule): code_gen_dict["$GENERATE_REG_FIFOS$"] = [] for i in range(len(reg_fifos)): code_gen_dict["$GENERATE_REG_FIFOS$"].append( - """parameter reg_fifo_{id}_len = {len}; - reg [IN_WIDTH-1:0] reg_fifo_{id} [reg_fifo_{id}_len-1:0]; - """.format(id=i, len=reg_fifos_depth[i])) - - #todo: generate actual bram shift buffers instead of regs + """ + wire [IN_WIDTH-1:0] reg_fifo_{id}_in; + wire [IN_WIDTH-1:0] reg_fifo_{id}_out; + wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; + {name}_reg_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + reg_buffer_inst_{id} + ( + .CLK(CLK), + .shift_enable(shift_enable), + .shift_in(reg_fifo_{id}_in), + .shift_out(reg_fifo_{id}_out), + .data_out(reg_fifo_{id}) + );""".format(name=self.get_verilog_top_module_name(), id=i, len=reg_fifos_depth[i])) + code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] for i in range(len(bram_fifos)): code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( - """parameter bram_fifo_{id}_len = {len}; - reg [IN_WIDTH-1:0] bram_fifo_{id} [bram_fifo_{id}_len-1:0]; - """.format(id=i, len=bram_fifos_depth[i])) + """ + wire [IN_WIDTH-1:0] bram_fifo_{id}_in; + wire [IN_WIDTH-1:0] bram_fifo_{id}_out; + {name}_ram_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + ram_buffer_inst_{id} + ( + .CLK(CLK), + .RST(RST), + .shift_enable(shift_enable), + .shift_in(bram_fifo_{id}_in), + .shift_out(bram_fifo_{id}_out) + );""".format(name=self.get_verilog_top_module_name(), id=i, len=bram_fifos_depth[i])) code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] out_idx = mmv_out-1 @@ -970,46 +998,32 @@ def compact_schedule(schedule): # ) #) code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}][OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( + "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( out_idx=out_idx, fifo_id=fifo_id, access_idx=reg_fifos_depth[fifo_id]-1-int((max(reg_fifo)-access_idx)/M), - mmv_idx=(max(reg_fifo)-access_idx)%M + mmv_idx=(max(reg_fifo)-access_idx)%M, + mmv = M ) ) # reversal: out_idx=0 -> oldest buffer element -> highest access_idx out_idx = out_idx-1 assert out_idx==-1, "ERROR: Not all output vector elements connected" - code_gen_dict["$GENERATE_SHIFT_LOGIC$"] = [] + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] for i in range(len(reg_fifos)): if i == 0: # first FIFO containing newest elements -> input comes from input reg - code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( - """for (i=reg_fifo_{fifo_id}_len-1; i>0; i=i-1) - reg_fifo_{fifo_id}[i] <= reg_fifo_{fifo_id}[i-1]; - reg_fifo_{fifo_id}[0] <= reg_input;""".format( - fifo_id=i, - ) - ) + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = reg_input;""".format(fifo_id=i,)) else: # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) input_fifo_id = i-1 - code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( - """for (i=reg_fifo_{fifo_id}_len-1; i>0; i=i-1) - reg_fifo_{fifo_id}[i] <= reg_fifo_{fifo_id}[i-1]; - reg_fifo_{fifo_id}[0] <= bram_fifo_{input_fifo_id} [bram_fifo_{input_fifo_id}_len-1];""".format( - fifo_id=i, input_fifo_id=input_fifo_id - ) - ) + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) for i in range(len(bram_fifos)): input_fifo_id = i - code_gen_dict["$GENERATE_SHIFT_LOGIC$"].append( - """for (i=bram_fifo_{fifo_id}_len-1; i>0; i=i-1) - bram_fifo_{fifo_id}[i] <= bram_fifo_{fifo_id}[i-1]; - bram_fifo_{fifo_id}[0] <= reg_fifo_{input_fifo_id} [reg_fifo_{input_fifo_id}_len-1];""".format( - fifo_id=i, input_fifo_id=input_fifo_id - ) - ) + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) # Generate read schedule (when data is read from input, written to buffer) # code_gen_dict["$GENERATE_READ_SCHEDULE$"] = [] From 0ae77fe724758e33ec7a4cc1773ce8e1f5b608f3 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Sun, 12 Jun 2022 16:49:29 +0200 Subject: [PATCH 007/628] Add conventional implementation style for out_width <= in_width --- finn-rtllib/swg/swg_hdl_template.v | 8 +- finn-rtllib/swg/swg_hdl_template_mmv_1.v | 399 +++++++ finn-rtllib/swg/swg_hdl_template_wrapper.v | 46 + .../convolutioninputgenerator_rtl.py | 1048 ++++++++++------- .../fpgadataflow/convert_to_hls_layers.py | 178 +-- ...est_fpgadataflow_convinputgenerator_rtl.py | 79 +- 6 files changed, 1212 insertions(+), 546 deletions(-) create mode 100644 finn-rtllib/swg/swg_hdl_template_mmv_1.v create mode 100644 finn-rtllib/swg/swg_hdl_template_wrapper.v diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_hdl_template.v index 98ea1cf9cd..89ebb8da51 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_hdl_template.v @@ -223,7 +223,7 @@ assign shift_out = out_reg; integer addr_w, addr_r; //todo: minimize width (as reg), make r addr depend on w -(* ram_style = "block" *) reg [WIDTH-1:0] ram [DEPTH-1:0]; +$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; always @(posedge CLK) begin if (RST == 1'b0) begin @@ -291,7 +291,7 @@ end endmodule //window_buffer -module $TOP_MODULE_NAME$ ( +module $TOP_MODULE_NAME$_impl ( ap_clk, ap_rst_n, in0_V_V_TDATA, @@ -315,11 +315,9 @@ parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; //IO ports input ap_clk; input ap_rst_n; -(* X_INTERFACE_PARAMETER = "FREQ_HZ 250000000.000000" *) input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; input in0_V_V_TVALID; output in0_V_V_TREADY; -(* X_INTERFACE_PARAMETER = "FREQ_HZ 250000000.000000" *) output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; output out_V_V_TVALID; input out_V_V_TREADY; @@ -404,4 +402,4 @@ always @ (posedge ap_clk) begin end end -endmodule //ConvolutionInputGenerator1D_0_ConvolutionInputGenerator1D_0 +endmodule //TOP_MODULE_NAME_impl diff --git a/finn-rtllib/swg/swg_hdl_template_mmv_1.v b/finn-rtllib/swg/swg_hdl_template_mmv_1.v new file mode 100644 index 0000000000..670598d9a0 --- /dev/null +++ b/finn-rtllib/swg/swg_hdl_template_mmv_1.v @@ -0,0 +1,399 @@ +`timescale 1 ns / 1 ps + +module $TOP_MODULE_NAME$_controller +( + CLK, + RST, + advance, + addr_incr, + tail_incr +); + +input CLK; +input RST; +input advance; +output [31:0] addr_incr; //todo: minimize width +output [31:0] tail_incr; //todo: minimize width + +////code generation part: +localparam LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$; +localparam LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$; +localparam LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$; +localparam LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$; +localparam LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$; +localparam [31:0] ADDR_INCREMENT_MAP [0:5] = $ADDR_INCREMENT_MAP$; //todo: minimize width +//// + +//state and counters +reg [2:0] state, state_next; +parameter STATE_START = 0, STATE_LOOP_SIMD = 1, STATE_LOOP_KW = 2, STATE_LOOP_KH = 3, STATE_LOOP_W = 4, STATE_LOOP_H = 5; +integer counter_loop_h; //todo: minimize width +integer counter_loop_w; +integer counter_loop_kh; +integer counter_loop_kw; +integer counter_loop_simd; + +assign addr_incr = ADDR_INCREMENT_MAP[state]; + +//combinational logic for tail_incr generation +$TAIL_INCR_GENERATION$ + +//combinational next state logic +always @ (state, counter_loop_simd, counter_loop_kw, counter_loop_kh, counter_loop_w, counter_loop_h) begin + state_next = state; //default + if (state == $INNERMOST_STATE$) begin + if (counter_loop_simd == 0) + if (counter_loop_kw != 0) + state_next = STATE_LOOP_KW; + else + if(counter_loop_kh != 0) + state_next = STATE_LOOP_KH; + else + if(counter_loop_w != 0) + state_next = STATE_LOOP_W; + else + if(counter_loop_h != 0) + state_next = STATE_LOOP_H; + else + state_next = STATE_START; + end else + state_next = $INNERMOST_STATE$; +end + +//sequential logic +always @ (posedge CLK) begin + if (RST == 1'b0) begin + counter_loop_h <= LOOP_H_ITERATIONS; + counter_loop_w <= LOOP_W_ITERATIONS; + counter_loop_kh <= LOOP_KH_ITERATIONS; + counter_loop_kw <= LOOP_KW_ITERATIONS; + counter_loop_simd <= LOOP_SIMD_ITERATIONS; + state <= $INNERMOST_STATE$; //STATE_START; //debug: omit start state to fix timing, maybe omit during FM transition as well + end else begin + if (advance) begin + state <= state_next; + + if (state == $INNERMOST_STATE$) begin + if (counter_loop_simd == 0) begin + counter_loop_simd <= LOOP_SIMD_ITERATIONS; + if (counter_loop_kw == 0) begin + counter_loop_kw <= LOOP_KW_ITERATIONS; + if (counter_loop_kh == 0) begin + counter_loop_kh <= LOOP_KH_ITERATIONS; + if (counter_loop_w == 0) begin + counter_loop_w <= LOOP_W_ITERATIONS; + if (counter_loop_h == 0) begin + counter_loop_h <= LOOP_H_ITERATIONS; + end else + counter_loop_h <= counter_loop_h-1; + end else + counter_loop_w <= counter_loop_w-1; + end else + counter_loop_kh <= counter_loop_kh-1; + end else + counter_loop_kw <= counter_loop_kw-1; + end else + counter_loop_simd <= counter_loop_simd-1; + end + end + end +end +endmodule //controller + +module $TOP_MODULE_NAME$_cyclic_buffer_addressable +#( + parameter WIDTH = 1, + parameter DEPTH = 1 +) +( + CLK, + RST, + read_addr, + read_enable, + write_enable, + data_in, + data_out +); + +input CLK, RST, read_enable, write_enable; +input [$clog2(DEPTH)-1:0] read_addr; // absolute (!) read address of cyclic buffer +input [WIDTH-1:0] data_in; +output [WIDTH-1:0] data_out; + +integer addr_w; //todo: minimize width (as reg) + +$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; + +reg [WIDTH-1:0] out_reg; +assign data_out = out_reg; + +always @(posedge CLK) begin + if (RST == 1'b0) begin + addr_w <= 0; + end else begin + if (read_enable) + out_reg <= ram[read_addr]; + + if (write_enable) begin + ram[addr_w] <= data_in; + + if (addr_w == DEPTH-1) + addr_w <= 0; + else + addr_w <= addr_w + 1; + end + end +end +endmodule //cyclic_buffer_addressable + +module $TOP_MODULE_NAME$_impl ( + ap_clk, + ap_rst_n, + in0_V_V_TDATA, + in0_V_V_TVALID, + in0_V_V_TREADY, + out_V_V_TDATA, + out_V_V_TVALID, + out_V_V_TREADY +); + +parameter BIT_WIDTH = $BIT_WIDTH$; +parameter SIMD = $SIMD$; +parameter MMV_IN = $MMV_IN$; +parameter MMV_OUT = $MMV_OUT$; +parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; +parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; +parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; +parameter LAST_READ_ELEM = $LAST_READ_ELEM$; +parameter LAST_WRITE_ELEM = $LAST_WRITE_ELEM$; +parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; +parameter ELEM_PER_WINDOW = $ELEM_PER_WINDOW$; + +//IO ports +input ap_clk; +input ap_rst_n; +input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; +input in0_V_V_TVALID; +output in0_V_V_TREADY; +output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; +output out_V_V_TVALID; +input out_V_V_TREADY; + +//main buffer instantiation +wire [BUF_IN_WIDTH-1:0] window_buffer_in; +wire [BUF_OUT_WIDTH-1:0] window_buffer_out; +wire window_buffer_write_enable; +wire window_buffer_read_enable; +wire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; +$TOP_MODULE_NAME$_cyclic_buffer_addressable +#( + .WIDTH(BUF_IN_WIDTH), + .DEPTH(BUF_ELEM_TOTAL) +) +window_buffer_inst +( + .CLK(ap_clk), + .RST(ap_rst_n), + .read_addr(window_buffer_read_addr), + .read_enable(window_buffer_read_enable), + .write_enable(window_buffer_write_enable), + .data_in(window_buffer_in), + .data_out(window_buffer_out) +); + +//counters to keep track when to read/write +integer newest_buffered_elem; //todo: minimize width +integer newest_buffered_elem_available; //todo: minimize width +integer current_elem; +integer current_elem_available; +integer first_elem_next_window; +integer k; + +reg [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr_reg; +assign window_buffer_read_addr = window_buffer_read_addr_reg; + +//reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle + +wire advance_controller; +wire [31:0] addr_incr; +wire [31:0] tail_incr; + +$TOP_MODULE_NAME$_controller +controller_inst +( + .CLK(ap_clk), + .RST(ap_rst_n), + .advance(advance_controller), + .addr_incr(addr_incr), + .tail_incr(tail_incr) +); + +wire reading_done; +assign reading_done = newest_buffered_elem == LAST_READ_ELEM; + +reg fetching_done; +reg writing_done; //instead of a separate write cycle/element counter, trigger this flag once current_element reaches LAST_WRITE_ELEM +//assign writing_done = current_elem == LAST_WRITE_ELEM; + + +wire write_blocked; + +//reg write_prefetch_available; // stores if the write of prefetched data is still outstanding + +wire fetch_cmd; +assign fetch_cmd = !(current_elem > newest_buffered_elem) && !write_blocked && !fetching_done; + + +//determine whether to read/write in this cycle +//wire write_cmd; +//assign write_cmd = write_prefetch_available && !writing_done; +reg write_cmd; + + + +wire read_cmd; +assign read_cmd = + ( + ( + (newest_buffered_elem - BUF_ELEM_TOTAL+1) < first_elem_next_window + &&(newest_buffered_elem - BUF_ELEM_TOTAL+1) < current_elem + ) // (over-)write to buffer if oldest buffered element is no longer needed + || fetching_done + ) //or if fetching is done (e.g. for skipped rows at FM end due to stride) + && !reading_done; //and if there is still an input element left to read + +//todo: optmize (e.g. is < or != more efficient?) +// ToDo: ideally this should point to the oldest elem of the next window, +// to allow reading while still writing the remainder of the current window + + + +assign write_blocked = write_cmd && !out_V_V_TREADY; //&& !write_done; + +wire read_ok; +// with transition to next cycle: +// want to read can read source is ready (waiting on VALID allowed) +assign read_ok = read_cmd && !write_blocked && in0_V_V_TVALID; + +wire write_ok; +// with transition to next cycle: +// output is VALID sink is ready sink has already read (we are waiting on source) +//assign write_ok = write_cmd && (out_V_V_TREADY || write_done); +assign write_ok = write_cmd && out_V_V_TREADY; + +//wire advance; +// includes waiting on W if W-only cycle: wait only on W no R/W to wait for +//assign advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); +//todo: optimize/simplify advance logic for write_done generation + +//assign buffer control +assign window_buffer_write_enable = read_ok; +assign window_buffer_read_enable = fetch_cmd; +assign advance_controller = fetch_cmd; //write_ok + +//assign I/O ports +assign window_buffer_in = in0_V_V_TDATA; +assign out_V_V_TDATA = window_buffer_out; +assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) +assign out_V_V_TVALID = ap_rst_n && write_cmd; //&& !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + +//main process for advancing counters +always @ (posedge ap_clk) begin + if (ap_rst_n == 1'b0) begin + newest_buffered_elem <= -1; + //newest_buffered_elem_available <= -1; + current_elem <= 0; + //current_elem_available <= 0; + first_elem_next_window <= 0; + k <= 0; + window_buffer_read_addr_reg <= 0; + fetching_done <= 0; + writing_done <= 0; + //write_prefetch_available <= 0; + write_cmd <= 0; + end else begin + if (read_ok) begin + //check if this is the last read cycle (reading_done will be true afterwards) + if ((newest_buffered_elem == LAST_READ_ELEM-1) && writing_done) begin + //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) + //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) + newest_buffered_elem <= -1; + current_elem <= 0; + first_elem_next_window <= 0; + writing_done <= 0; + fetching_done <= 0; + end + + newest_buffered_elem <= newest_buffered_elem+1; + end + + if (fetch_cmd) begin + //count up to track which element index is about to be read from the buffer, and where it is located within the buffer + //use increment value calculated by controller + + //keep track where we are within a window + if (k == ELEM_PER_WINDOW-1) + k <= 0; + else + k <= k+1; + + //absolute buffer address always wraps around (in both directions for depthwise support) + if ($signed(window_buffer_read_addr_reg + addr_incr) > BUF_ELEM_TOTAL-1) + window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr - BUF_ELEM_TOTAL; + else if ($signed(window_buffer_read_addr_reg + addr_incr) < 0) + window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr + BUF_ELEM_TOTAL; + else + window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr; + + //check if this is the last write cycle (writing_done will be true afterwards) + if (current_elem == LAST_WRITE_ELEM) begin + fetching_done <= 1; + end else begin + //current element index wraps around only at window boundary + //if (((current_elem + addr_incr) > BUF_ELEM_TOTAL-1) && (k == ELEM_PER_WINDOW-1)) + + //if (k == ELEM_PER_WINDOW-1) + // current_elem <= current_elem + addr_incr - BUF_ELEM_TOTAL; + //else + current_elem <= current_elem + addr_incr; + end + + if (k == 0) + first_elem_next_window <= first_elem_next_window + tail_incr; + + // determine if prefetched data will be outstanding in the next cycle + // if we fetch in this cycle -> yes + // if we do not fetch nor write successfully -> do not change + // if we do not fetch but write -> clear outstanding data + //write_prefetch_available <= fetch_cmd; + write_cmd <= fetch_cmd; + end + + if (write_ok) + // determine if prefetched data will be outstanding in the next cycle + // if we fetch in this cycle -> yes + // if we do not fetch nor write successfully -> do not change + // if we do not fetch but write -> clear outstanding data + //write_prefetch_available <= fetch_cmd; + write_cmd <= fetch_cmd; + + if (write_ok && fetching_done) begin + //check if this is the last write cycle (writing_done will be true afterwards) + if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM-1))) begin + //start processing of next FM if reading is done already, or completes in the same cycle + newest_buffered_elem <= -1; + current_elem <= 0; + first_elem_next_window <= 0; + fetching_done <= 0; + end else + writing_done <= 1; + end + + //if (advance) + // write_done <= 1'b0; //reset flag + //else if (write_ok) // successful W in this cycle, but R still outstanding + // write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! + end +end + +endmodule //TOP_MODULE_NAME_impl diff --git a/finn-rtllib/swg/swg_hdl_template_wrapper.v b/finn-rtllib/swg/swg_hdl_template_wrapper.v new file mode 100644 index 0000000000..db0556d940 --- /dev/null +++ b/finn-rtllib/swg/swg_hdl_template_wrapper.v @@ -0,0 +1,46 @@ +`timescale 1 ns / 1 ps + +module $TOP_MODULE_NAME$ ( + ap_clk, + ap_rst_n, + in0_V_V_TDATA, + in0_V_V_TVALID, + in0_V_V_TREADY, + out_V_V_TDATA, + out_V_V_TVALID, + out_V_V_TREADY +); + +parameter BIT_WIDTH = $BIT_WIDTH$; +parameter SIMD = $SIMD$; +parameter MMV_IN = $MMV_IN$; +parameter MMV_OUT = $MMV_OUT$; +parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; +parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; + +input ap_clk; +input ap_rst_n; +(* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) //todo: make configurable or set later +input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; +input in0_V_V_TVALID; +output in0_V_V_TREADY; +(* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) +output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; +output out_V_V_TVALID; +input out_V_V_TREADY; + +$TOP_MODULE_NAME$_impl +#() +impl +( + .ap_clk(ap_clk), + .ap_rst_n(ap_rst_n), + .in0_V_V_TDATA(in0_V_V_TDATA), + .in0_V_V_TVALID(in0_V_V_TVALID), + .in0_V_V_TREADY(in0_V_V_TREADY), + .out_V_V_TDATA(out_V_V_TDATA), + .out_V_V_TVALID(out_V_V_TVALID), + .out_V_V_TREADY(out_V_V_TREADY) +); + +endmodule //TOP_MODULE_NAME diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index cfd6572a8d..4b31b7c973 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -27,6 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math +from math import copysign import numpy as np import os @@ -46,15 +47,6 @@ except ModuleNotFoundError: PyVerilator = None -# This operation should only be used for 1D convolutions. Either the -# IFMDim_H or IFMDim_W should be '1', which represents the so-called -# dummy-dimension - -# ONNX i/o tensor shape assumptions for ConvolutionInputGenerator1D: -# input 0 is the input tensor, shape NHWC = (1, IFMDim_H, IFMDim_W, IFMChannels) -# output 0 is the output tensor, shape NHWC: -# = (1, OFMDim_H, OFMDim_W, (ConvKernelDim_H*ConvKernelDim_W)*IFMChannels) - # note: the actual data layout produced by the hlslib kernels is different # for depthwise and non-depthwise ops. # * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) @@ -62,12 +54,9 @@ # see test_fpgadataflow_slidingwindow.py for an example of how to transform # between the two layouts - class ConvolutionInputGenerator_rtl(HLSCustomOp): - """Class that corresponds to one of the 1D finn-hlslib ConvolutionInputGenerator - (sliding window) function variants. Depending on the combination of - attributes (e.g. depthwise or not, whether dilation is 0) a different - variant will be picked for the actual HLS implementation.""" + """Class that does not correspond to one of the finn-hlslib ConvolutionInputGenerator + (sliding window) function variants! ... """ def __init__(self, onnx_node): super().__init__(onnx_node) @@ -80,6 +69,7 @@ def get_nodeattr_types(self): "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), "M": ("i", False, 1), + "parallel_window": ("i", False, 0, {0, 1}), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs @@ -87,14 +77,14 @@ def get_nodeattr_types(self): "outputDataType": ("s", True, ""), "depthwise": ("i", False, 0, {0, 1}), # FPGA resource type for ConvolutionInputGenerator input buffer - # auto -- let Vivado HLS decide + # auto -- let Vivado decide # block -- use BRAM # distributed -- use LUTRAM # ultra -- use URAM "ram_style": ( "s", False, - "distributed", + "auto", {"auto", "block", "distributed", "ultra"}, ), "gen_top_module": ("s", False, ""), @@ -147,7 +137,7 @@ def get_folded_output_shape(self): ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" - if self.use_parallel_window_output(): + if (self.get_nodeattr("parallel_window")): wf = int((ifm_ch) // simd) #folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) if ofm_dim_w == 1: @@ -193,7 +183,7 @@ def get_instream_width(self): return in_width def get_outstream_width(self): - if self.use_parallel_window_output(): + if (self.get_nodeattr("parallel_window")): # feed all window pixels in parallel k_h, k_w = self.get_nodeattr("ConvKernelDim") return self.get_instream_width() * k_h * k_w @@ -201,6 +191,11 @@ def get_outstream_width(self): # if parallel variant not in use: same width for output and input stream return self.get_instream_width() + def get_number_input_values(self): + folded_ishape = self.get_folded_input_shape() + num_input_elems = np.prod(folded_ishape[:-1]) + return num_input_elems + def get_number_output_values(self): folded_oshape = self.get_folded_output_shape() num_output_elems = np.prod(folded_oshape[:-1]) @@ -235,20 +230,6 @@ def get_1d_conv_attrs_normalized(self): return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) - def use_parallel_window_output(self): - # Check if simple "ConvolutionInputGenerator_1D_parallel" variant can be used to - # feed window in parallel to the following layer, enabling full SIMD unfolding. - dilation = self.get_nodeattr("Dilation") - dilation_h, dilation_w = dilation - - #todo: make this configurable via mmv_out instead of an automatic selection - - if self.get_nodeattr("SIMD") == self.get_nodeattr("IFMChannels"): - if self.get_nodeattr("depthwise") == 0: - return True - - return False - def get_exp_cycles(self): simd = self.get_nodeattr("SIMD") ( @@ -268,7 +249,7 @@ def get_exp_cycles(self): # since mmv != 1 is not supported yet, we set mmv for now to 1 mmv = 1 # see https://github.com/Xilinx/finn-hlslib/blob/master/slidingwindow.h - if self.use_parallel_window_output(): + if (self.get_nodeattr("parallel_window")): exp_cycles = ifm_dim_w + 1 else: cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv @@ -467,8 +448,6 @@ def pragmas(self): def generate_hdl(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") f_debug = open(os.path.join(code_gen_dir, "swg_hdl_debuginfo.log"), "w") - #debug: - #f_debug = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_debuginfo.log"), "w") code_gen_dict = {} #-------------------- @@ -480,26 +459,38 @@ def generate_hdl(self): ofm_dim = self.get_nodeattr("OFMDim") stride = self.get_nodeattr("Stride") dilation = self.get_nodeattr("Dilation") + depthwise = self.get_nodeattr("depthwise") n = 1 h, w = ifm_dim c = 1 # ifm_ch not considered atm (always parallelize across c) k_h, k_w = k - pad = [0,0,0,0] # padding happens in separate padding node + pad = [0,0,0,0] # padding happens in separate padding node for now pad_val = 0 stride_h, stride_w = stride dilation_h, dilation_w = dilation conv_c = 99 # init folding config - M = self.get_nodeattr("M") simd = self.get_nodeattr("SIMD") - mmv_in = 1*M - mmv_out = k_h*k_w*M + M = self.get_nodeattr("M") + if (self.get_nodeattr("parallel_window")): + mmv_in = M*1 + mmv_out = M*k_h*k_w + assert ifm_ch==simd, "Constraint violated: SIMD must be equal to C" + else: + mmv_in = 1 + mmv_out = 1 + assert ifm_ch%simd==0, "Constraint violated: SIMD must divide C" - assert simd==ifm_ch, "Constraint violated: SIMD = C" - assert mmv_in==1*M, "Constraint violated: MMV_IN = 1" # *M - assert mmv_out==k_h*k_w*M, "Constraint violated: mmv_out = K" # *M + # todo: check allowed hyperparams + # ToDo: move/duplicate these checks in corresponding convert_to_hls transformation + + # choose implementation style + if (mmv_out > 1 or (k_h==1 and k_w==1)): + impl_style = "parallel" + else: + impl_style = "default" # how many "unused" registers are allowed between buffer positions that will be accessed in parallel # example: @@ -579,218 +570,245 @@ def generate_hdl(self): f_debug.write("\n"+"sequential pixel indices (shape %s" % str(idx_px.shape)) f_debug.write("\n"+str(idx_px)) - output_elem, output_cycles = idx_px.shape + k, cycles = idx_px.shape + + output_elements = mmv_out + output_cycles = int(cycles/(mmv_out/k)) + # ToDo: what happens when output_cycles=OFMdim % M != 0 # ...try to support IFMdim % M != 0 first, so we can work with the usual k=3 where OFMdim = IFMdim - -2 # the additional garbage input elements that are read in the last cycle are not read by any window anyway idx_px = idx_px.transpose() - idx_px = idx_px.reshape((int(output_cycles/M), int(output_elem*M))) + idx_px = idx_px.reshape(output_cycles, output_elements) idx_px = idx_px.transpose() + # result: first dim is number of parallel output elements, second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle f_debug.write("\n"+"sequential pixel indices, MMV_out grouping (shape %s" % str(idx_px.shape)) f_debug.write("\n"+str(idx_px)) + #f_debug.close() buffer = [] buffer_max_size = 0 # buffer schedule (write from input, read to output) schedule_write = [] schedule_read = [] + schedule_shift = [] + schedule = [] schedule_prev = '' next_in_px = 0 + oldest_px = 0 + buffer_space_freed = False idx_px_relative = idx_px.copy() + idx_px_addr = idx_px.copy() + idx_px_addr_incr = idx_px.copy() + idx_px_addr_rel = idx_px.copy() - # compute schedule and buffer read pattern + # compute schedule and buffer read pattern (output driven) output_elem, output_cycles = idx_px_relative.shape - for x in range(output_cycles): - # load missing inputs into buffer - for y in range(output_elem): - while int(idx_px_relative[y,x]) not in buffer: - # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) + + if (impl_style == "parallel"): + for x in range(output_cycles): + # load missing inputs into buffer + for y in range(output_elem): + while int(idx_px_relative[y,x]) not in buffer: + # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 + schedule_write.append(1) + schedule_read.append(0) + if schedule_prev == 'w': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'w')) + schedule_prev = 'w' + + # discard unused buffer elements + oldest_px = np.min(idx_px_relative[:,x:]) + #check whether M elements can be shifted out, not just the single oldest one + # must this be "while" for MMV to work?!? breaks mmvout = 1 case + #while all([buffer[i] < oldest_px for i in range(M)]): + if all([buffer[i] < oldest_px for i in range(M)]): + # M buffer elements are shifted out at once + for m in range(M): + buffer.pop(0) + + # adjust relative buffer index of current x (according to last discarded buffer elements) + for y in range(output_elem): + idx_px_relative[y,x] -= oldest_px + + + # read from buffer + # + simultaneously load next pixel(s) into buffer if there are any left + if (next_in_px > (h_padded*w_padded-1)): + # read only (append above) + schedule_read.append(1) + schedule_write.append(0) + if schedule_prev == 'r': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'r')) + schedule_prev = 'r' + else: + # load M inputs at once for m in range(M): buffer.append(next_in_px) next_in_px += 1 + schedule_read.append(1) schedule_write.append(1) - schedule_read.append(0) - if schedule_prev == 'w': + if schedule_prev == 'wr': count, cmd = schedule[-1] schedule[-1] = (count+1, cmd) else: - schedule.append((1, 'w')) - schedule_prev = 'w' - - # discard unused buffer elements (assumes in-order access) - oldest_px = min(idx_px_relative[:,x]) - #while buffer[0] < oldest_px: - #check whether M elements can be shifted out, not just the single oldest one - while all([buffer[i] < oldest_px for i in range(M)]): - # M buffer elements are shifted out at once - for m in range(M): - buffer.pop(0) - - # adjust relative buffer index - for y in range(output_elem): - idx_px_relative[y,x] -= oldest_px - - # record max needed buffer depth - if len(buffer) > buffer_max_size: - buffer_max_size = len(buffer) - - # read from buffer - schedule_read.append(1) - - # simultaneously load next pixel(s) into buffer if there are any left - if next_in_px > (h_padded*w_padded-1): - schedule_write.append(0) - if schedule_prev == 'r': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'r')) - schedule_prev = 'r' - else: - # load M inputs at once - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 + schedule.append((1, 'wr')) + schedule_prev = 'wr' + + # record max needed buffer depth + #f_debug.write("\n"+str(buffer)) + if len(buffer) > buffer_max_size: + buffer_max_size = len(buffer) + + # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) + while next_in_px <= (h_padded*w_padded-1): + next_in_px += 1 schedule_write.append(1) - if schedule_prev == 'wr': + schedule_read.append(0) + if schedule_prev == 'w': count, cmd = schedule[-1] schedule[-1] = (count+1, cmd) else: - schedule.append((1, 'wr')) - schedule_prev = 'wr' + schedule.append((1, 'w')) + schedule_prev = 'w' + # find buffer access patterns + buffer_access_patterns = [] + for x in range(output_cycles): + if idx_px_relative[:,x].tolist() not in buffer_access_patterns: + buffer_access_patterns.append(idx_px_relative[:,x].tolist()) - # find buffer access patterns - buffer_access_patterns = [] - for x in range(output_cycles): - if idx_px_relative[:,x].tolist() not in buffer_access_patterns: - buffer_access_patterns.append(idx_px_relative[:,x].tolist()) - - # from itertools import groupby - # schedule_write_compressed = ''.join('(' + str(k) + ',' + str(sum(1 for x in g)) + '),' for k, g in groupby(schedule_write)) - # schedule_read_compressed = ''.join('(' + str(k) + ',' + str(sum(1 for x in g)) + '),' for k, g in groupby(schedule_read)) - - # analyse schedule - # class sched_gen: - # start_counter = 0 - # start_val = 0 - - # end_last_sequence_counter = 0 - # end_sequence = [] - - # outer_counter = 0 - # outer_sequence_counter = 0 - # outer_sequence_val = 0 - - # inner_counter = 0 - # inner_sequence = [] - - # def __str__(self): - # return "\nstart: %d x %d\n %d x\n %d x %s + %d x %d\nend: %d x %s + %s\n" % ( - # self.start_counter, - # self.start_val, - # self.outer_counter, - # self.inner_counter, - # str(self.inner_sequence), - # self.outer_sequence_counter, - # self.outer_sequence_val, - # self.end_last_sequence_counter, - # str(self.inner_sequence), - # self.end_sequence - # ) - - # def analyse_schedule(schedule): - # generator = sched_gen() - - # #determine start sequence - # for i, v in enumerate(schedule): - # if i > 0 and v != schedule[i-1]: - # generator.start_counter = i - # generator.start_val = schedule[i-1] - # break - - # #determine inner loop/sequence - # sequence_MAX = 10 - # schedule = schedule[generator.start_counter:] # cut off processed entries - # sequence = [] - # repititions = 0 - # i = 0 - # while i < len(schedule): - # if not sequence: - # sequence.append(schedule[i]) - # i = i+1 - # else: - # # is this a beginning of a repitition of the current sequence? - # if i + len(sequence) < len(schedule) and all([schedule[i+offset] == sequence[offset] for offset in range(len(sequence))]): - # repititions = repititions + 1 - # i = i+len(sequence) - # else: - # # did we already count repitions of the sequence? - # sequence_candidate = sequence + sequence * repititions - # sequence_candidate.append(schedule[i]) - # if len(sequence_candidate) < sequence_MAX: - # sequence = sequence_candidate.copy() - # repititions = 0 - # i = i+1 - # else: - # schedule = schedule[i:] # cut off processed entries - # break - # generator.inner_counter = repititions + 1 - # generator.inner_sequence = sequence + else: + + #simulate cyclic buffer, which is advanced on every write (as opposed to on overy sheduled cycle) + #buffer_tail = 0 + buffer_head = 0 #buffer_tail+1 + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_len = (k_h-1) * dilation_h * w + (k_w-1) * dilation_w + 1 + buffer = [-1] * buffer_len - # #determine outer sequence - # for i, v in enumerate(schedule): - # if i > 0 and v != schedule[i-1]: - # generator.outer_sequence_counter = i - # generator.outer_sequence_val = schedule[i-1] - # break - - # schedule = schedule[generator.outer_sequence_counter:] # cut off processed entries - - # sequence_to_compare = generator.inner_sequence * generator.inner_counter + [generator.outer_sequence_val] * generator.outer_sequence_counter - - # generator.outer_counter = 1 - # i = 0 - # while i < len(schedule): - # # is this a beginning of a repitition of the current sequence? - # if i + len(sequence_to_compare) < len(schedule) and all([schedule[i+offset] == sequence_to_compare[offset] for offset in range(len(sequence_to_compare))]): - # generator.outer_counter = generator.outer_counter + 1 - # i = i+len(sequence_to_compare) - # else: - # schedule = schedule[i:] # cut off processed entries - # break - - # #determine end sequence - # #for i, v in enumerate(schedule): - # # if i > 0 and v != schedule[i-1]: - # # generator.end_counter = i - # # generator.end_val = schedule[i-1] - # # break - - # sequence = generator.inner_sequence - # repititions = 0 - # i = 0 - # while i < len(schedule): - # # is this a beginning of a repitition of the current sequence? - # if i + len(sequence) < len(schedule) and all([schedule[i+offset] == sequence[offset] for offset in range(len(sequence))]): - # repititions = repititions + 1 - # i = i+len(sequence) - # else: - # schedule = schedule[i:] # cut off processed entries - # break - # generator.end_last_sequence_counter = repititions - - # #remainder - # generator.end_sequence = schedule - - # return generator + # todo: remove this simulation, not needed and doesnt accout for SIMD anyways + for x in range(output_cycles): + # load missing inputs into buffer + while int(idx_px_relative[0,x]) not in buffer: + # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) + for m in range(M): + #buffer.append(next_in_px) + buffer[buffer_head] = next_in_px + next_in_px += 1 + schedule_write.append(1) + schedule_read.append(0) + if schedule_prev == 'w': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'w')) + schedule_prev = 'w' + + #try to advance/shift the buffer by one, discarding the oldest element + #discard_oldest_elem = buffer[0] < np.min(idx_px_relative[0,x:]) + #if discard_oldest_elem: + # buffer.pop(0) + # schedule_shift.append(1) + #else: + # schedule_shift.append(0) + buffer_head += 1 + if buffer_head > buffer_len-1: + buffer_head = 0 + + ### perform read ### + + #try to advance/shift the buffer by one, discarding the oldest element + #discard_oldest_elem = buffer[0] < np.min(idx_px_relative[0,x:]) + #if discard_oldest_elem: + # buffer.pop(0) + # schedule_shift.append(1) + #else: + # schedule_shift.append(0) + + # note current relative addr in buffer + idx_px_addr[0,x] = buffer.index(idx_px_relative[0,x]) + if x > 0: + idx_px_addr_incr[0,x] = idx_px_addr[0,x] - idx_px_addr[0,x-1] + if idx_px_addr_incr[0,x] < 0: + idx_px_addr_incr[0,x] += buffer_len + else: + idx_px_addr_incr[0,x] = idx_px_addr[0,x] + + idx_px_addr_rel [0,x] = buffer.index(idx_px_relative[0,x]) - buffer_head + if idx_px_addr_rel [0,x] < 0: + idx_px_addr_rel [0,x] += buffer_len + + + #try to write a new input into the buffer simultaneously (during this read as opposed to before the next read) + # assume in-order write into the buffer (oldest element is always at head+1) + discard_oldest_elem = np.min(buffer) < np.min(idx_px_relative[0,x:]) + read_only = True + if not (next_in_px > (h_padded*w_padded-1)): + # input data available + #if (x < k_h*k_w) or discard_oldest_elem: + if discard_oldest_elem: + # buffer is not complete, as the first window has not been fully output + # or we can discard one element from the buffer after this read, so there is space for a new one + read_only = False + + + # read from buffer + # + simultaneously load next pixel(s) into buffer if there are any left + # if mmv_out = 1: addressable BRAM implementation style -> do not shift in while outputting K kernel elements to keep addressing consistent + #if (next_in_px > (h_padded*w_padded-1)) or ((x+1) % (k_h*k_w) != 0): + #if (next_in_px > (h_padded*w_padded-1)) or (x > 1 and (not buffer_space_freed)): + if read_only: + # read only + schedule_read.append(1) + schedule_write.append(0) + if schedule_prev == 'r': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'r')) + schedule_prev = 'r' + else: + # read + write + #buffer.append(next_in_px) + buffer[buffer_head] = next_in_px + next_in_px += 1 + schedule_read.append(1) + schedule_write.append(1) + if schedule_prev == 'wr': + count, cmd = schedule[-1] + schedule[-1] = (count+1, cmd) + else: + schedule.append((1, 'wr')) + schedule_prev = 'wr' + + # advance buffer + buffer_head += 1 + if buffer_head > buffer_len-1: + buffer_head = 0 + + # record max needed buffer depth + #f_debug.write("\n"+str(buffer)) + if len(buffer) > buffer_max_size: + buffer_max_size = len(buffer) + + # ToDo: maybe replace with directly-computed schedule (similar to addr. buffer impl. style) def compact_schedule(schedule): # leave first sequence (pre-load) as is @@ -852,6 +870,16 @@ def compact_schedule(schedule): end_sequence = schedule[i] i += 1 + if i < len(schedule): + end_sequence = end_sequence + schedule[i] + i += 1 + + assert len(start_sequence) == 1*2, "ERROR: invalid start sequence" + assert len(loop_sequence_1) == 2*2, "ERROR: invalid loop 1 sequence" + if loop_sequence_2: + assert len(loop_sequence_2) <= 2*2, "ERROR: invalid loop 2 sequence" + if end_sequence: + assert len(end_sequence) <= 2*2, "ERROR: invalid end sequence" assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" %(i, len(schedule)) return ( @@ -866,9 +894,12 @@ def compact_schedule(schedule): f_debug.write("\n"+"max buffer size observed: %d" %(buffer_max_size)) f_debug.write("\n"+"output vector elements: relative buffer indices") f_debug.write("\n"+str(idx_px_relative)) - f_debug.write("\n"+"found %d buffer access patterns:" % len(buffer_access_patterns)) - f_debug.write("\n"+str(buffer_access_patterns)) - f_debug.write("\n"+"required parallel-access registers for mmv_out=k: %d" % len(sum(buffer_access_patterns,[]))) + f_debug.write("\n"+"output vector elements: absolute buffer address") + f_debug.write("\n"+str(idx_px_addr)) + f_debug.write("\n"+"output vector elements: absolute buffer address increment from last") + f_debug.write("\n"+str(idx_px_addr_incr)) + f_debug.write("\n"+"output vector elements: relative buffer address (from head)") + f_debug.write("\n"+str(idx_px_addr_rel)) f_debug.write("\n"+"buffer write schedule (%d cycles)" % len(schedule_write)) f_debug.write("\n"+str(schedule_write)) f_debug.write("\n"+"writing buffer in %d cycles" % schedule_write.count(1)) @@ -879,19 +910,112 @@ def compact_schedule(schedule): f_debug.write("\n"+"buffer read schedule (%d cycles)" % len(schedule_read)) f_debug.write("\n"+str(schedule_read)) f_debug.write("\n"+"reading buffer in %d cycles" % schedule_read.count(1)) + + #f_debug.write("\n"+"buffer shift schedule (%d cycles)" % len(schedule_shift)) + #f_debug.write("\n"+str(schedule_shift)) + #f_debug.write("\n"+"shifting buffer in %d cycles" % schedule_shift.count(1)) #f_debug.write("\n"+"buffer read schedule COMPRESSED") #f_debug.write("\n"+str(schedule_read_compressed)) #f_debug.write("\n"+"buffer read schedule ANALYZED") #f_debug.write("\n"+str(analyse_schedule(schedule_read))) - f_debug.write("\n"+"buffer rw schedule NEW") - f_debug.write("\n"+str(schedule)) - f_debug.write("\n"+"buffer rw schedule NEW compacted") - f_debug.write("\n"+"\nstart_sequence: %s\nloop_counter: %s\nloop_sequence_1_counter: %s\nloop_sequence_1: %s\nloop_sequence_2: %s\nend_sequence: %s\n" % compact_schedule(schedule)) - assert len(schedule_write) == len(schedule_read), "ERROR: Schedules have different lenghts" - cycles_total = len(schedule_write) + addr_incr_end_window_elem = 0 + addr_incr_end_window_row = 0 + addr_incr_end_window = 0 + addr_incr_end_row = 0 + + if (impl_style == "default"): + f_debug.write("\n"+"mmv_out = 1: computing incremental addressing scheme directly:") + addressing_scheme = [[0]] + + # compute index/address increments for each nested loop + channel_factor = int(ifm_ch/simd) + + #todo: rename to (min) buffer len + buffer_max_size = buffer_max_size * channel_factor + + kernel_width = (k_w-1)*dilation_w+1 # incl. dilation + addr_incr_end_simd = 1 + addr_incr_end_window_elem = (dilation_w-1) * channel_factor + 1 + + remaining_line = (w - kernel_width) * channel_factor + skip_lines = (dilation_h-1) * w * channel_factor + addr_incr_end_window_row = remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer + + #addr_incr_end_window = stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer + addr_incr_end_window = -buffer_max_size + stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer + + # rows that are skipped due to imperfect stride<->W combination + skip_columns = w%(kernel_width + (out_dim_w-1)*stride_w) + remaining_line = (skip_columns + kernel_width) * channel_factor # increment from oldest buffer position (top left) to end of line + skip_lines = (stride_h-1) * w * channel_factor + #addr_incr_end_row = remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer + addr_incr_end_row = -buffer_max_size + remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer + + + + if (depthwise): + addr_incr_end_window_elem = dilation_w * channel_factor + addr_incr_end_window_row = (channel_factor + + (w - kernel_width) * channel_factor + + (dilation_h-1) * w * channel_factor + ) + addr_incr_end_simd = -buffer_max_size + (channel_factor + 1) + #addr_incr_end_simd = channel_factor + 1 + + # just for testing: + for i_windows_per_h in range(out_dim_h): # LOOP_H + for i_windows_per_w in range(out_dim_w): # LOOP_W + for i_simd_per_px in range(channel_factor): # LOOP_SIMD + for i_px_per_window_h in range(k_h): # LOOP_KH + for i_px_per_window_w in range(k_w-1): # LOOP_KW + addressing_scheme[0].append(addr_incr_end_window_elem) + if i_px_per_window_h != k_h-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_window_row) + if i_simd_per_px != channel_factor-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_simd) + if i_windows_per_w != out_dim_w-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_window) + if i_windows_per_h != out_dim_h-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_row) + else: + # just for testing: + for i_windows_per_h in range(out_dim_h): # LOOP_H + for i_windows_per_w in range(out_dim_w): # LOOP_W + for i_px_per_window_h in range(k_h): # LOOP_KH + for i_px_per_window_w in range(k_w): # LOOP_KW + for i_simd_per_px in range(channel_factor-1): # LOOP_SIMD + addressing_scheme[0].append(addr_incr_end_simd) + if i_px_per_window_w != k_w-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_window_elem) + if i_px_per_window_h != k_h-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_window_row) + if i_windows_per_w != out_dim_w-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_window) + if i_windows_per_h != out_dim_h-1: # skip on last iteration + addressing_scheme[0].append(addr_incr_end_row) + + f_debug.write("\n"+str(np.array(addressing_scheme))) + if simd == ifm_ch: + # simd < c currently not simulated + if (np.array(addressing_scheme) == idx_px_addr_incr).all: + f_debug.write("\n"+"computed addressing matches simulated addressing") + else: + f_debug.write("\n"+"ERROR") + else: + f_debug.write("\n"+"found %d buffer access patterns:" % len(buffer_access_patterns)) + f_debug.write("\n"+str(buffer_access_patterns)) + f_debug.write("\n"+"required parallel-access registers for mmv_out=k: %d" % len(sum(buffer_access_patterns,[]))) + f_debug.write("\n"+"buffer rw schedule NEW") + f_debug.write("\n"+str(schedule)) + f_debug.write("\n"+"buffer rw schedule NEW compacted") + f_debug.write("\n"+"\nstart_sequence: %s\nloop_counter: %s\nloop_sequence_1_counter: %s\nloop_sequence_1: %s\nloop_sequence_2: %s\nend_sequence: %s\n" % compact_schedule(schedule)) + assert len(schedule_write) == len(schedule_read), "ERROR: Schedules have different lenghts" + assert schedule_write.count(1) == self.get_number_input_values(), "ERROR: Writing buffer in fewer cycles than expected" + assert schedule_read.count(1) == self.get_number_output_values(), "ERROR: Reading buffer in fewer cycles than expected" + cycles_total = len(schedule_write) + - assert schedule_read.count(1) == self.get_number_output_values(), "ERROR: Reading buffer in fewer cycles than expected" code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] #save top module name so we can refer to it even after this node has been renamed (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) @@ -900,216 +1024,297 @@ def compact_schedule(schedule): code_gen_dict["$SIMD$"] = [str(simd)] code_gen_dict["$MMV_IN$"] = [str(mmv_in)] code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] - code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] - # determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) - # ToDo: this part doesn't fully account for M (2D buffer) yet - assert len(buffer_access_patterns) == 1, "ERROR: Buffer access pattern is not static" - buf_static_access_pattern = buffer_access_patterns[0] - reg_fifos = [] - reg_fifos_depth = [] - bram_fifos = [] - bram_fifos_depth = [] - current = [] - for i in range(len(buf_static_access_pattern)): - access_idx = buf_static_access_pattern[i] - if len(current) == 0: - current.append(access_idx) + + ram_style = self.get_nodeattr("ram_style") + if ram_style == "auto": + code_gen_dict["$RAM_STYLE$"]=[""] + else: + code_gen_dict["$RAM_STYLE$"]=["(* ram_style = \"{}\" *)".format(ram_style)] + + if (impl_style == "default"): + ### MMVout = 1: addressable buffer implementation style + f_debug.write("\n"+"Choosing implementation style: Addressable buffer due to mmv_out=1") + + # add additional buffer space in case of stride > 1 + # this minimizes cycle count, as it allows an earlier pre-load of skipped input elements + buffer_actual_size = (buffer_max_size + max(0,((stride_w-1) - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor) + + max(0,((stride_h-1)*w - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor)) + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] + + assert not(abs(addr_incr_end_window) > buffer_actual_size), "ERROR: W increment > buffer size, wrap logic doesn't account for this" + assert not(abs(addr_incr_end_row) > buffer_actual_size), "ERROR: H increment > buffer size, wrap logic doesn't account for this" + + kernel_width = (k_w-1)*dilation_w+1 # incl. dilation + kernel_height = (k_h-1)*dilation_h+1 # incl. dilation + skip_columns = w%(kernel_width + (out_dim_w-1)*stride_w) + skip_rows = h%(kernel_height + (out_dim_h-1)*stride_h) + code_gen_dict["$LAST_READ_ELEM$"] = [str(h*w*channel_factor-1)] + code_gen_dict["$LAST_WRITE_ELEM$"] = [str(((h - skip_rows - 1) * w + (w - skip_columns))*channel_factor -1)] + + loop_h_iterations = out_dim_h + loop_w_iterations = out_dim_w + loop_kh_iterations = k_h + loop_kw_iterations = k_w + loop_simd_iterations = channel_factor + + if (depthwise and channel_factor > 1): + # re-arrange existing controller loop structure for depthwise convolutions + loop_kh_iterations = channel_factor + loop_kw_iterations = k_h + loop_simd_iterations = k_w + addr_incr_end_simd_ = addr_incr_end_simd + addr_incr_end_simd = addr_incr_end_window_elem + addr_incr_end_window_elem = addr_incr_end_window_row + addr_incr_end_window_row = addr_incr_end_simd_ + elem_per_window = k_h*k_w + + code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" + always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin + if (counter_loop_kh != 0) + tail_incr = 1; + else if (counter_loop_w != 0) + tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_W]-{channel_factor}+{buffer_min_size}; + else // do not check for counter_loop_h to increment past LAST_WRITE_ELEM during last window + tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_H]-{channel_factor}+{buffer_min_size}; + end + """.format(channel_factor=channel_factor, buffer_min_size=buffer_max_size)] else: - # assume non-decreasing index order in access pattern - # ToDo: this assumption does not hold for M>1 case (2D buffer) - distance = access_idx - max(current) - if not (distance-1 > REG_BRAM_THRESHOLD): - for i in range(distance-1): - # insert dummy into REG FIFO (not read as part of window) - current.append(-1) - # assign this access to same REG FIFO as previous one - current.append(access_idx) + # depthwise output format is equivalent to non-depthwise if SIMD=C + elem_per_window = k_h*k_w*channel_factor + + code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" + always @ (counter_loop_w, counter_loop_h) begin + if (counter_loop_w != 0) + tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_W]-1+{buffer_min_size}; + else // do not check for counter_loop_h to increment past LAST_WRITE_ELEM during last window + tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_H]-1+{buffer_min_size}; + end + """.format(buffer_min_size=buffer_max_size)] + + # support SIMD = C and k_w = 1 cases + # for k = [k_h, k_w] = [1, k_w], no adjustment is needed + # for k = [k_h, k_w] = [1, 1], do not use this impl. style (mmv_out=K=1) + # innermost loop is executed at least once -> adjust if needed + if (loop_simd_iterations == 1): + # skip innermost SIMD loop completely + if (loop_kw_iterations == 1): + # skip innermost KW loop completely + code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_KH"] + loop_kh_iterations -= 1 # -1 because state is initial state else: - # assign skipped accesses to new BRAM FIFO - bram_fifos.append([-1]*(distance-1)) - bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? - # start with new REG FIFO - reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) ToDo: fix for M again - reg_fifos_depth.append(len(current)) - current = [] + code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_KW"] + loop_kw_iterations -= 1 # -1 because state is initial state + else: + code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_SIMD"] + loop_simd_iterations -= 1 # -1 because state is initial state + + code_gen_dict["$LOOP_H_ITERATIONS$"]=[str(loop_h_iterations-1)] + code_gen_dict["$LOOP_W_ITERATIONS$"]=[str(loop_w_iterations-1)] + code_gen_dict["$LOOP_KH_ITERATIONS$"]=[str(loop_kh_iterations-1)] + code_gen_dict["$LOOP_KW_ITERATIONS$"]=[str(loop_kw_iterations-1)] + code_gen_dict["$LOOP_SIMD_ITERATIONS$"]=[str(loop_simd_iterations-1)] + + w = 32 #ToDo: minimize + code_gen_dict["$ADDR_INCREMENT_MAP$"]=["'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format(w, + int(copysign(w,addr_incr_end_simd)),abs(addr_incr_end_simd), + int(copysign(w,addr_incr_end_window_elem)),abs(addr_incr_end_window_elem), + int(copysign(w,addr_incr_end_window_row)),abs(addr_incr_end_window_row), + int(copysign(w,addr_incr_end_window)),abs(addr_incr_end_window), + int(copysign(w,addr_incr_end_row)),abs(addr_incr_end_row))] + + code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] + + with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template_mmv_1.v", "r") as f: + template = f.read() + else: + f_debug.write("\n"+"Choosing implementation style: Parallel Registers (+ line buffers) due to mmv_out>1") + ### determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) + # ToDo: this part doesn't fully account for M (2D buffer) yet + + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] + + assert len(buffer_access_patterns) == 1, "ERROR: Buffer access pattern is not static" + buf_static_access_pattern = buffer_access_patterns[0] + reg_fifos = [] + reg_fifos_depth = [] + bram_fifos = [] + bram_fifos_depth = [] + current = [] + for i in range(len(buf_static_access_pattern)): + access_idx = buf_static_access_pattern[i] + if len(current) == 0: current.append(access_idx) - reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) ToDo fix for M again - reg_fifos_depth.append(len(current)) - - f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) - f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) - f_debug.write("\n"+str(reg_fifos)) - f_debug.write("\n"+"%d BRAM FIFOs (line buffers):" % len(bram_fifos)) - f_debug.write("\n"+str(bram_fifos)) - - code_gen_dict["$GENERATE_REG_FIFOS$"] = [] - for i in range(len(reg_fifos)): - code_gen_dict["$GENERATE_REG_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] reg_fifo_{id}_in; - wire [IN_WIDTH-1:0] reg_fifo_{id}_out; - wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; - {name}_reg_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) - ) - reg_buffer_inst_{id} - ( - .CLK(CLK), - .shift_enable(shift_enable), - .shift_in(reg_fifo_{id}_in), - .shift_out(reg_fifo_{id}_out), - .data_out(reg_fifo_{id}) - );""".format(name=self.get_verilog_top_module_name(), id=i, len=reg_fifos_depth[i])) - - code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] - for i in range(len(bram_fifos)): - code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] bram_fifo_{id}_in; - wire [IN_WIDTH-1:0] bram_fifo_{id}_out; - {name}_ram_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) - ) - ram_buffer_inst_{id} - ( - .CLK(CLK), - .RST(RST), - .shift_enable(shift_enable), - .shift_in(bram_fifo_{id}_in), - .shift_out(bram_fifo_{id}_out) - );""".format(name=self.get_verilog_top_module_name(), id=i, len=bram_fifos_depth[i])) - - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] - out_idx = mmv_out-1 - for fifo_id, reg_fifo in enumerate(reg_fifos): - for fifo_idx, access_idx in enumerate(reg_fifo): - if(access_idx != -1): - #code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - # "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( - # out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx - # ) - #) - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( - out_idx=out_idx, fifo_id=fifo_id, - access_idx=reg_fifos_depth[fifo_id]-1-int((max(reg_fifo)-access_idx)/M), - mmv_idx=(max(reg_fifo)-access_idx)%M, - mmv = M - ) + else: + # assume non-decreasing index order in access pattern + # ToDo: this assumption does not hold for M>1 case (2D buffer) + distance = access_idx - max(current) + if not (distance-1 > REG_BRAM_THRESHOLD): + for i in range(distance-1): + # insert dummy into REG FIFO (not read as part of window) + current.append(-1) + # assign this access to same REG FIFO as previous one + current.append(access_idx) + else: + # assign skipped accesses to new BRAM FIFO + bram_fifos.append([-1]*(distance-1)) + bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? + # start with new REG FIFO + reg_fifos.append(current) + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) #ToDo: fix for M again + reg_fifos_depth.append(len(current)) + current = [] + current.append(access_idx) + reg_fifos.append(current) + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) #ToDo fix for M again + reg_fifos_depth.append(len(current)) + + f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) + f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) + f_debug.write("\n"+str(reg_fifos)) + f_debug.write("\n"+"%d BRAM FIFOs (line buffers):" % len(bram_fifos)) + f_debug.write("\n"+str(bram_fifos)) + + code_gen_dict["$GENERATE_REG_FIFOS$"] = [] + for i in range(len(reg_fifos)): + code_gen_dict["$GENERATE_REG_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] reg_fifo_{id}_in; + wire [IN_WIDTH-1:0] reg_fifo_{id}_out; + wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; + {name}_reg_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) ) - # reversal: out_idx=0 -> oldest buffer element -> highest access_idx - out_idx = out_idx-1 - assert out_idx==-1, "ERROR: Not all output vector elements connected" - - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] - for i in range(len(reg_fifos)): - if i == 0: - # first FIFO containing newest elements -> input comes from input reg - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = reg_input;""".format(fifo_id=i,)) - else: - # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) - input_fifo_id = i-1 + reg_buffer_inst_{id} + ( + .CLK(CLK), + .shift_enable(shift_enable), + .shift_in(reg_fifo_{id}_in), + .shift_out(reg_fifo_{id}_out), + .data_out(reg_fifo_{id}) + );""".format(name=self.get_verilog_top_module_name(), id=i, len=reg_fifos_depth[i])) + + code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] + for i in range(len(bram_fifos)): + code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] bram_fifo_{id}_in; + wire [IN_WIDTH-1:0] bram_fifo_{id}_out; + {name}_ram_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + ram_buffer_inst_{id} + ( + .CLK(CLK), + .RST(RST), + .shift_enable(shift_enable), + .shift_in(bram_fifo_{id}_in), + .shift_out(bram_fifo_{id}_out) + );""".format(name=self.get_verilog_top_module_name(), id=i, len=bram_fifos_depth[i])) + + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] + out_idx = mmv_out-1 + for fifo_id, reg_fifo in enumerate(reg_fifos): + for fifo_idx, access_idx in enumerate(reg_fifo): + if(access_idx != -1): + #code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + # "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( + # out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx + # ) + #) + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( + out_idx=out_idx, fifo_id=fifo_id, + access_idx=reg_fifos_depth[fifo_id]-1-int((max(reg_fifo)-access_idx)/M), + mmv_idx=(max(reg_fifo)-access_idx)%M, + mmv = M + ) + ) + # reversal: out_idx=0 -> oldest buffer element -> highest access_idx + out_idx = out_idx-1 + assert out_idx==-1, "ERROR: Not all output vector elements connected" + + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] + for i in range(len(reg_fifos)): + if i == 0: + # first FIFO containing newest elements -> input comes from input reg + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = reg_input;""".format(fifo_id=i,)) + else: + # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) + input_fifo_id = i-1 + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) + for i in range(len(bram_fifos)): + input_fifo_id = i code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) - for i in range(len(bram_fifos)): - input_fifo_id = i - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) - - # Generate read schedule (when data is read from input, written to buffer) - # code_gen_dict["$GENERATE_READ_SCHEDULE$"] = [] - # schedule_as_string = "" - # #todo: change naming to swap write/read - # for i in schedule_write: - # if i == 1: - # schedule_as_string += "1'b1," - # else: - # schedule_as_string += "1'b0," - # schedule_as_string = schedule_as_string[:-1] # remove trailing ',' - # code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( - # "localparam [0:{len}-1] READ_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) - # ) - # code_gen_dict["$GENERATE_READ_SCHEDULE$"].append( - # "assign read_state = READ_SCHEDULE[cycle];" - # ) - - # # Generate write schedule (when data is written to output, read from buffer) - # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"] = [] - # schedule_as_string = "" - # #todo: change naming to swap write/read - # for i in schedule_read: - # if i == 1: - # schedule_as_string += "1'b1," - # else: - # schedule_as_string += "1'b0," - # schedule_as_string = schedule_as_string[:-1] # remove trailing ',' - # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - # "localparam [0:{len}-1] WRITE_SCHEDULE = {{{str}}};".format(len=cycles_total, str=schedule_as_string) - # ) - # code_gen_dict["$GENERATE_WRITE_SCHEDULE$"].append( - # "assign write_state = WRITE_SCHEDULE[cycle_last];" - # ) - - def convert_tuple(seq): - mapping = {'w': ("1'b1", "1'b0"), - 'r': ("1'b0", "1'b1"), - 'wr':("1'b1", "1'b1"), - 'n': ("1'b0", "1'b0")} - if seq: - if len(seq) == 2: - return (seq[0], mapping[seq[1]], 0, mapping['n']) - if len(seq) == 4: - return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) - else: - return (0, mapping['n'], 0, mapping['n']) + """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) + + def convert_tuple(seq): + mapping = {'w': ("1'b1", "1'b0"), + 'r': ("1'b0", "1'b1"), + 'wr':("1'b1", "1'b1"), + 'n': ("1'b0", "1'b0")} + if seq: + if len(seq) == 2: + return (seq[0], mapping[seq[1]], 0, mapping['n']) + if len(seq) == 4: + return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) + else: + return (0, mapping['n'], 0, mapping['n']) - start_sequence,loop_counter,loop_sequence_1_counter,loop_sequence_1,loop_sequence_2,end_sequence = compact_schedule(schedule) + start_sequence,loop_counter,loop_sequence_1_counter,loop_sequence_1,loop_sequence_2,end_sequence = compact_schedule(schedule) - start_sequence = convert_tuple(start_sequence) - loop_sequence_1 = convert_tuple(loop_sequence_1) - loop_sequence_2 = convert_tuple(loop_sequence_2) - end_sequence = convert_tuple(end_sequence) + start_sequence = convert_tuple(start_sequence) + loop_sequence_1 = convert_tuple(loop_sequence_1) + loop_sequence_2 = convert_tuple(loop_sequence_2) + end_sequence = convert_tuple(end_sequence) - code_gen_dict["$START_COUNTER$"]=[str(start_sequence[0])] - code_gen_dict["$LOOP_MAIN_COUNTER$"]=[str(loop_sequence_1_counter)] - code_gen_dict["$LOOP_INTER_COUNTER$"]=[str(loop_counter)] + code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] - code_gen_dict["$LOOP_MAIN_1_COUNTER$"]=[str(loop_sequence_1[0])] - code_gen_dict["$LOOP_MAIN_2_COUNTER$"]=[str(loop_sequence_1[2])] + code_gen_dict["$START_COUNTER$"]=[str(start_sequence[0])] + code_gen_dict["$LOOP_MAIN_COUNTER$"]=[str(loop_sequence_1_counter)] + code_gen_dict["$LOOP_INTER_COUNTER$"]=[str(loop_counter)] - code_gen_dict["$LOOP_INTER_1_COUNTER$"]=[str(loop_sequence_2[0])] - code_gen_dict["$LOOP_INTER_2_COUNTER$"]=[str(loop_sequence_2[2])] + code_gen_dict["$LOOP_MAIN_1_COUNTER$"]=[str(loop_sequence_1[0])] + code_gen_dict["$LOOP_MAIN_2_COUNTER$"]=[str(loop_sequence_1[2])] - code_gen_dict["$LOOP_END_1_COUNTER$"]=[str(end_sequence[0])] - code_gen_dict["$LOOP_END_2_COUNTER$"]=[str(end_sequence[2])] + code_gen_dict["$LOOP_INTER_1_COUNTER$"]=[str(loop_sequence_2[0])] + code_gen_dict["$LOOP_INTER_2_COUNTER$"]=[str(loop_sequence_2[2])] - code_gen_dict["$READ_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][0],loop_sequence_1[1][0],loop_sequence_1[3][0],loop_sequence_2[1][0],loop_sequence_2[3][0],end_sequence[1][0],end_sequence[3][0])] - code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] + code_gen_dict["$LOOP_END_1_COUNTER$"]=[str(end_sequence[0])] + code_gen_dict["$LOOP_END_2_COUNTER$"]=[str(end_sequence[2])] - with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: - template = f.read() + code_gen_dict["$READ_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][0],loop_sequence_1[1][0],loop_sequence_1[3][0],loop_sequence_2[1][0],loop_sequence_2[3][0],end_sequence[1][0],end_sequence[3][0])] + code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] + + with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: + template = f.read() + + with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template_wrapper.v", "r") as f: + template_wrapper = f.read() + for key in code_gen_dict: # transform list into long string separated by '\n' code_gen_line = "\n".join(code_gen_dict[key]) template = template.replace(key, code_gen_line) + template_wrapper = template_wrapper.replace(key, code_gen_line) - f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_hdl_gen.v"), "w") - #debug: - #f = open(os.path.join("/workspace/finn/finn-rtllib/swg/", "swg_hdl_generated.v"), "w") + f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv"), "w") f.write(template) f.close() + + f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v"), "w") + f.write(template_wrapper) + f.close() + f_debug.close() #set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain @@ -1127,10 +1332,9 @@ def prepare_rtlsim(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") verilog_paths = [code_gen_dir] - verilog_files = [self.get_nodeattr("gen_top_module") + "_hdl_gen.v"] - #debug: - #verilog_paths = ["/workspace/finn/finn-rtllib/swg/"] - #verilog_files = ["swg_hdl_generated.v"] + verilog_files = [self.get_nodeattr("gen_top_module") + "_wrapper.v", + self.get_nodeattr("gen_top_module") + "_impl.sv"] + # build the Verilator emu library sim = PyVerilator.build( verilog_files, @@ -1149,22 +1353,24 @@ def code_generation_ipi(self): vlnv = self.get_nodeattr("ip_vlnv") code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - cmd = ["add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_hdl_gen.v")), + cmd = ["add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v")), + "add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv")), "create_bd_cell -type module -reference %s %s" % (self.get_nodeattr("gen_top_module"), self.onnx_node.name)] return cmd def code_generation_ipgen(self, model, fpgapart, clk): - """Generates c++ code and tcl script for ip generation.""" + """Normally: Generates c++ code and tcl script for ip generation. + Here: Generates (System-)Verilog code for ip generation.""" self.generate_hdl() def ipgen_singlenode_code(self): - """Builds the bash script for ip generation using the CallHLS from + """Normally: Builds the bash script for ip generation using the CallHLS from finn.util.hls.""" pass def code_generation_cppsim(self, model): - """Generates c++ code for simulation (cppsim).""" + """Normally: Generates c++ code for simulation (cppsim).""" pass def compile_singlenode_code(self): diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 113ccb93b8..cd08bb4603 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -48,6 +48,11 @@ class InferConvInpGen(Transformation): """Convert Im2Col layers to ConvolutionInputGenerator layers.""" + def __init__(self, use_rtl_variant=False): + super().__init__() + self.use_rtl_variant = use_rtl_variant + self.use_rtl_variant = True #testing + def apply(self, model): graph = model.graph node_ind = 0 @@ -128,105 +133,128 @@ def apply(self, model): ) graph.node.insert(node_ind, padding_node) - # Ensure that only supported HLS nodes are inserted - is_square_image = ConvInpGen_idim_h == ConvInpGen_idim_w - is_square_kernel = k_h == k_w - is_kernel_pointwise = k_h == 1 and k_w == 1 - is_equal_stride = stride_h == stride_w - is_1d_convolution = (k_h == 1 and k_w > 1 and ifm_dim_h == 1) or ( - k_h > 1 and k_w == 1 and ifm_dim_w == 1 - ) - - if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: - assert is_square_image, ( - "%s : DownSampler currently only supports square input images." - % n.name - ) - assert is_equal_stride, ( - """%s : DownSampler currently only supports equal stride value - along different axes.""" - % n.name - ) - ConvInpGen_idim = ConvInpGen_idim_h - stride = stride_h - # create DownSampler node + if (self.use_rtl_variant): ConvInpGen_node = helper.make_node( - "DownSampler", + "ConvolutionInputGenerator_rtl", [ConvInpGen_input], [i2c_output], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - ImgDim=ConvInpGen_idim, - NumChannels=ifm_ch, + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], SIMD=ifm_ch, - Stride=stride, + M=1, + parallel_window=0, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], inputDataType=dt.name, - name="DownSampler_" + n.name, + outputDataType=dt.name, + depthwise=depthwise, + name="ConvolutionInputGenerator_rtl" + n.name, ) graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) else: - # create equivalent ConvolutionInputGenerator node - if ( - is_square_image and is_square_kernel - ): # square images and square kernels - assert is_equal_stride, ( - """%s: Non-equal strides along different axes is not supported - for (non-)square convolutions""" + # Ensure that only supported HLS nodes are inserted + is_square_image = ConvInpGen_idim_h == ConvInpGen_idim_w + is_square_kernel = k_h == k_w + is_kernel_pointwise = k_h == 1 and k_w == 1 + is_equal_stride = stride_h == stride_w + is_1d_convolution = (k_h == 1 and k_w > 1 and ifm_dim_h == 1) or ( + k_h > 1 and k_w == 1 and ifm_dim_w == 1 + ) + + if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: + assert is_square_image, ( + "%s : DownSampler currently only supports square input images." % n.name ) - assert dilation_h == 1 and dilation_w == 1, ( - """%s: Dilation value != 1 is not supported - for square convolutions""" + assert is_equal_stride, ( + """%s : DownSampler currently only supports equal stride value + along different axes.""" % n.name ) + ConvInpGen_idim = ConvInpGen_idim_h + stride = stride_h + # create DownSampler node ConvInpGen_node = helper.make_node( - "ConvolutionInputGenerator", + "DownSampler", [ConvInpGen_input], [i2c_output], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - ConvKernelDim=[k_h, k_w], - IFMChannels=ifm_ch, - IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], - OFMDim=[ofm_dim_h, ofm_dim_w], + ImgDim=ConvInpGen_idim, + NumChannels=ifm_ch, SIMD=ifm_ch, - Stride=[stride_h, stride_w], - Dilation=[dilation_h, dilation_w], + Stride=stride, inputDataType=dt.name, - outputDataType=dt.name, - depthwise=depthwise, - name="ConvolutionInputGenerator_" + n.name, - ) - else: # non-square images and/or kernels - assert is_1d_convolution, ( - "%s: ConvolutionInputGenerator1D works only for 1D convs" - % n.name + name="DownSampler_" + n.name, ) - if dilation_h > 1 or dilation_w > 1: - assert stride_h == 1 and stride_w == 1, ( - """%s: Stride value of greater than 1 is not supported for convolutions - with dilation value greater than 1""" + graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) + else: + # create equivalent ConvolutionInputGenerator node + if ( + is_square_image and is_square_kernel + ): # square images and square kernels + assert is_equal_stride, ( + """%s: Non-equal strides along different axes is not supported + for (non-)square convolutions""" % n.name ) - ConvInpGen_node = helper.make_node( - "ConvolutionInputGenerator1D", - [ConvInpGen_input], - [i2c_output], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - ConvKernelDim=[k_h, k_w], - IFMChannels=ifm_ch, - IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], - OFMDim=[ofm_dim_h, ofm_dim_w], - SIMD=ifm_ch, - Stride=[stride_h, stride_w], - Dilation=[dilation_h, dilation_w], - inputDataType=dt.name, - outputDataType=dt.name, - depthwise=depthwise, - name="ConvolutionInputGenerator1D_" + n.name, - ) - graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) + assert dilation_h == 1 and dilation_w == 1, ( + """%s: Dilation value != 1 is not supported + for square convolutions""" + % n.name + ) + ConvInpGen_node = helper.make_node( + "ConvolutionInputGenerator", + [ConvInpGen_input], + [i2c_output], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=ifm_ch, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=dt.name, + outputDataType=dt.name, + depthwise=depthwise, + name="ConvolutionInputGenerator_" + n.name, + ) + else: # non-square images and/or kernels + assert is_1d_convolution, ( + "%s: ConvolutionInputGenerator1D works only for 1D convs" + % n.name + ) + if dilation_h > 1 or dilation_w > 1: + assert stride_h == 1 and stride_w == 1, ( + """%s: Stride value of greater than 1 is not supported for convolutions + with dilation value greater than 1""" + % n.name + ) + ConvInpGen_node = helper.make_node( + "ConvolutionInputGenerator1D", + [ConvInpGen_input], + [i2c_output], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ConvInpGen_idim_h, ConvInpGen_idim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=ifm_ch, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=dt.name, + outputDataType=dt.name, + depthwise=depthwise, + name="ConvolutionInputGenerator1D_" + n.name, + ) + graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) # remove old nodes graph.node.remove(n) graph_modified = True diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index ef1fda8e31..870f5593bf 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020, Xilinx +# Copyright (c) 2022, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -37,18 +37,14 @@ from finn.core.modelwrapper import ModelWrapper from finn.custom_op.general.im2col import compute_conv_output_dim from finn.custom_op.registry import getCustomOp -from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim -from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP -from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode from finn.transformation.general import GiveUniqueNodeNames from finn.util.basic import gen_finn_dt_tensor - def make_single_im2col_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt + k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt ): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim @@ -90,7 +86,7 @@ def make_single_im2col_modelwrapper( def make_single_slidingwindow_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, m, stride, dilation, idt, dw=0 + k, ifm_ch, ifm_dim, ofm_dim, simd, m, parallel_window, stride, dilation, idt, dw=0 ): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim @@ -118,6 +114,7 @@ def make_single_slidingwindow_modelwrapper( OFMDim=[ofm_dim_h, ofm_dim_w], SIMD=simd, M=m, + parallel_window=parallel_window, Stride=[stride_h, stride_w], Dilation=[dilation_h, dilation_w], inputDataType=idt.name, @@ -150,31 +147,33 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType["INT4"]]) +@pytest.mark.parametrize("idt", [DataType["UINT4"]]) # kernel size -@pytest.mark.parametrize("k", [[3, 1]]) +@pytest.mark.parametrize("k", [[3,3]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[10, 1]]) +@pytest.mark.parametrize("ifm_dim", [[24,24]]) # input channels -@pytest.mark.parametrize("ifm_ch", [2]) +@pytest.mark.parametrize("ifm_ch", [8]) # Stride -@pytest.mark.parametrize("stride", [[1, 1]]) +@pytest.mark.parametrize("stride", [[3,3],[6,6]]) # Dilation -@pytest.mark.parametrize("dilation", [[1, 1]]) -# execution mode -@pytest.mark.parametrize("exec_mode", ["rtlsim"]) +@pytest.mark.parametrize("dilation", [[1,1],[2,2]]) +# depthwise +@pytest.mark.parametrize("dw", [0,1]) + # input channel parallelism ("SIMD") -@pytest.mark.parametrize("simd", [2]) +@pytest.mark.parametrize("simd", [1,2,8]) # in/out MMV ("M") -@pytest.mark.parametrize("m", [1, 2, 4]) -# depthwise -@pytest.mark.parametrize("dw", [0]) +@pytest.mark.parametrize("m", [1]) +# paralle_window enable (MMV_out = M*K) +@pytest.mark.parametrize("parallel_window", [0]) + # Flip dimensions -@pytest.mark.parametrize("flip", [False]) +@pytest.mark.parametrize("flip", [False,True]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_slidingwindow_rtl( - idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, m, dw, flip + idt, k, ifm_dim, ifm_ch, stride, dilation, dw, simd, m, parallel_window, flip ): if flip: k = k[::-1] @@ -187,11 +186,6 @@ def test_fpgadataflow_slidingwindow_rtl( stride_h, stride_w = stride dilation_h, dilation_w = dilation - #if (dilation_h > 1 or dilation_w > 1) and (stride_h > 1 or stride_w > 1): - # pytest.skip( - # """Dilation value greater than 1 and stride greater than 1 - # currently not supported for 1D convolutions""" - # ) if simd > ifm_ch: pytest.skip("SIMD cannot be larger than number of input channels") @@ -207,21 +201,17 @@ def test_fpgadataflow_slidingwindow_rtl( ofm_dim=ofm_dim, simd=simd, m=m, + parallel_window=parallel_window, stride=stride, dilation=dilation, idt=idt, dw=dw, ) - if exec_mode == "cppsim": - raise Exception("cppsim not supported in test_fpgadataflow_slidingwindow_rtl") - elif exec_mode == "rtlsim": - model = model.transform(SetExecMode("rtlsim")) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 4)) - model = model.transform(PrepareRTLSim()) - else: - raise Exception("Unknown exec_mode in test_fpgadataflow_slidingwindow_rtl") + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(PrepareRTLSim()) # prepare input data input_dict = prepare_inputs(x) @@ -232,7 +222,6 @@ def test_fpgadataflow_slidingwindow_rtl( ifm_ch=ifm_ch, ifm_dim=ifm_dim, ofm_dim=ofm_dim, - simd=simd, stride=stride, dilation=dilation, idt=idt, @@ -245,6 +234,11 @@ def test_fpgadataflow_slidingwindow_rtl( print("--------produced:") print(y_produced) + node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + print("RTLSIM cycles: %d"%cycles_rtlsim) + if dw == 0: assert (y_produced == y_expected).all() else: @@ -255,12 +249,7 @@ def test_fpgadataflow_slidingwindow_rtl( y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) assert (y_produced == y_expected).all() - - # if exec_mode == "rtlsim": - # node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] - # inst = getCustomOp(node) - # cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") - # exp_cycles_dict = model.analysis(exp_cycles_per_layer) - # exp_cycles = exp_cycles_dict[node.name] - # assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) - # assert exp_cycles != 0 +# exp_cycles_dict = model.analysis(exp_cycles_per_layer) +# exp_cycles = exp_cycles_dict[node.name] +# assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) +# assert exp_cycles != 0 From 34f8211247e391cf1b7e3d6b384b6be2308d8d4b Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Mon, 13 Jun 2022 21:40:13 +0200 Subject: [PATCH 008/628] Cleanup and fixes --- ...mplate_mmv_1.v => swg_template_default.sv} | 230 ++--- ...dl_template.v => swg_template_parallel.sv} | 16 +- ...plate_wrapper.v => swg_template_wrapper.v} | 0 .../convolutioninputgenerator_rtl.py | 902 ++++++------------ ...est_fpgadataflow_convinputgenerator_rtl.py | 42 +- 5 files changed, 436 insertions(+), 754 deletions(-) rename finn-rtllib/swg/{swg_hdl_template_mmv_1.v => swg_template_default.sv} (57%) rename finn-rtllib/swg/{swg_hdl_template.v => swg_template_parallel.sv} (96%) rename finn-rtllib/swg/{swg_hdl_template_wrapper.v => swg_template_wrapper.v} (100%) diff --git a/finn-rtllib/swg/swg_hdl_template_mmv_1.v b/finn-rtllib/swg/swg_template_default.sv similarity index 57% rename from finn-rtllib/swg/swg_hdl_template_mmv_1.v rename to finn-rtllib/swg/swg_template_default.sv index 670598d9a0..12cc656928 100644 --- a/finn-rtllib/swg/swg_hdl_template_mmv_1.v +++ b/finn-rtllib/swg/swg_template_default.sv @@ -9,38 +9,39 @@ module $TOP_MODULE_NAME$_controller tail_incr ); -input CLK; -input RST; -input advance; -output [31:0] addr_incr; //todo: minimize width -output [31:0] tail_incr; //todo: minimize width - -////code generation part: localparam LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$; localparam LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$; localparam LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$; localparam LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$; localparam LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$; -localparam [31:0] ADDR_INCREMENT_MAP [0:5] = $ADDR_INCREMENT_MAP$; //todo: minimize width -//// +localparam INCR_BITWIDTH = $INCR_BITWIDTH$; +localparam [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP [0:5] = $ADDR_INCREMENT_MAP$; + +input CLK; +input RST; +input advance; +output [INCR_BITWIDTH-1:0] addr_incr; +output [INCR_BITWIDTH-1:0] tail_incr; //state and counters reg [2:0] state, state_next; parameter STATE_START = 0, STATE_LOOP_SIMD = 1, STATE_LOOP_KW = 2, STATE_LOOP_KH = 3, STATE_LOOP_W = 4, STATE_LOOP_H = 5; -integer counter_loop_h; //todo: minimize width -integer counter_loop_w; -integer counter_loop_kh; -integer counter_loop_kw; -integer counter_loop_simd; +reg [$clog2(LOOP_H_ITERATIONS+2)-1:0] counter_loop_h; //could add check if ITERATIONS > 0, then replace +2 with +1 +reg [$clog2(LOOP_W_ITERATIONS+2)-1:0] counter_loop_w; +reg [$clog2(LOOP_KH_ITERATIONS+2)-1:0] counter_loop_kh; +reg [$clog2(LOOP_KW_ITERATIONS+2)-1:0] counter_loop_kw; +reg [$clog2(LOOP_SIMD_ITERATIONS+2)-1:0] counter_loop_simd; +reg [INCR_BITWIDTH-1:0] tail_incr_reg; assign addr_incr = ADDR_INCREMENT_MAP[state]; +assign tail_incr = tail_incr_reg; //combinational logic for tail_incr generation $TAIL_INCR_GENERATION$ //combinational next state logic always @ (state, counter_loop_simd, counter_loop_kw, counter_loop_kh, counter_loop_w, counter_loop_h) begin - state_next = state; //default + state_next = state; if (state == $INNERMOST_STATE$) begin if (counter_loop_simd == 0) if (counter_loop_kw != 0) @@ -68,11 +69,10 @@ always @ (posedge CLK) begin counter_loop_kh <= LOOP_KH_ITERATIONS; counter_loop_kw <= LOOP_KW_ITERATIONS; counter_loop_simd <= LOOP_SIMD_ITERATIONS; - state <= $INNERMOST_STATE$; //STATE_START; //debug: omit start state to fix timing, maybe omit during FM transition as well + state <= $INNERMOST_STATE$; end else begin if (advance) begin state <= state_next; - if (state == $INNERMOST_STATE$) begin if (counter_loop_simd == 0) begin counter_loop_simd <= LOOP_SIMD_ITERATIONS; @@ -120,7 +120,7 @@ input [$clog2(DEPTH)-1:0] read_addr; // absolute (!) read address of cyclic buff input [WIDTH-1:0] data_in; output [WIDTH-1:0] data_out; -integer addr_w; //todo: minimize width (as reg) +reg [$clog2(DEPTH)-1:0] write_addr; $RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; @@ -129,18 +129,18 @@ assign data_out = out_reg; always @(posedge CLK) begin if (RST == 1'b0) begin - addr_w <= 0; + write_addr <= 0; end else begin if (read_enable) out_reg <= ram[read_addr]; if (write_enable) begin - ram[addr_w] <= data_in; + ram[write_addr] <= data_in; - if (addr_w == DEPTH-1) - addr_w <= 0; + if (write_addr == DEPTH-1) + write_addr <= 0; else - addr_w <= addr_w + 1; + write_addr <= write_addr + 1; end end end @@ -156,27 +156,29 @@ module $TOP_MODULE_NAME$_impl ( out_V_V_TVALID, out_V_V_TREADY ); - -parameter BIT_WIDTH = $BIT_WIDTH$; -parameter SIMD = $SIMD$; -parameter MMV_IN = $MMV_IN$; -parameter MMV_OUT = $MMV_OUT$; -parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; -parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; -parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; -parameter LAST_READ_ELEM = $LAST_READ_ELEM$; -parameter LAST_WRITE_ELEM = $LAST_WRITE_ELEM$; -parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; -parameter ELEM_PER_WINDOW = $ELEM_PER_WINDOW$; +//generated constants +localparam BIT_WIDTH = $BIT_WIDTH$; +localparam SIMD = $SIMD$; +localparam MMV_IN = $MMV_IN$; +localparam MMV_OUT = $MMV_OUT$; +localparam BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; +localparam BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; +localparam BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; +localparam LAST_READ_ELEM = $LAST_READ_ELEM$; +localparam LAST_WRITE_ELEM = $LAST_WRITE_ELEM$; +//localparam [$clog2($BUF_ELEM_TOTAL$+1)-1:0] BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; +localparam BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; +localparam ELEM_PER_WINDOW = $ELEM_PER_WINDOW$; +localparam INCR_BITWIDTH = $INCR_BITWIDTH$; //IO ports input ap_clk; input ap_rst_n; -input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; +input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; input in0_V_V_TVALID; -output in0_V_V_TREADY; +output in0_V_V_TREADY; output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; -output out_V_V_TVALID; +output out_V_V_TVALID; input out_V_V_TREADY; //main buffer instantiation @@ -201,22 +203,10 @@ window_buffer_inst .data_out(window_buffer_out) ); -//counters to keep track when to read/write -integer newest_buffered_elem; //todo: minimize width -integer newest_buffered_elem_available; //todo: minimize width -integer current_elem; -integer current_elem_available; -integer first_elem_next_window; -integer k; - -reg [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr_reg; -assign window_buffer_read_addr = window_buffer_read_addr_reg; - -//reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle - +//controller instantiation wire advance_controller; -wire [31:0] addr_incr; -wire [31:0] tail_incr; +wire signed [INCR_BITWIDTH-1:0] addr_incr; +wire [INCR_BITWIDTH-1:0] tail_incr; $TOP_MODULE_NAME$_controller controller_inst @@ -228,88 +218,67 @@ controller_inst .tail_incr(tail_incr) ); +// Counters/address registers +// Add a sign bit even to (most) unsigned counters and window_buffer_read_addr_reg, +// so we can use automatic sign extension and simplify calculations w/ signed increment. +// Alternatively, we could manually sign-extend and shave off a bit here or there. +reg signed [$clog2(LAST_READ_ELEM+1)+1-1:0] newest_buffered_elem; +reg [$clog2(LAST_READ_ELEM+1)+1-1:0] current_elem; +reg [$clog2(LAST_READ_ELEM+1)+1-1:0] first_elem_next_window; +reg [$clog2(ELEM_PER_WINDOW)-1:0] k; +reg [$clog2(BUF_ELEM_TOTAL)+1-1:0] window_buffer_read_addr_reg; + +// Control signals/registers +wire read_cmd; +wire read_ok; wire reading_done; -assign reading_done = newest_buffered_elem == LAST_READ_ELEM; +wire fetch_cmd; reg fetching_done; -reg writing_done; //instead of a separate write cycle/element counter, trigger this flag once current_element reaches LAST_WRITE_ELEM -//assign writing_done = current_elem == LAST_WRITE_ELEM; - +reg write_cmd; +wire write_ok; wire write_blocked; +reg writing_done; -//reg write_prefetch_available; // stores if the write of prefetched data is still outstanding - -wire fetch_cmd; -assign fetch_cmd = !(current_elem > newest_buffered_elem) && !write_blocked && !fetching_done; - - -//determine whether to read/write in this cycle -//wire write_cmd; -//assign write_cmd = write_prefetch_available && !writing_done; -reg write_cmd; - - - -wire read_cmd; assign read_cmd = - ( - ( - (newest_buffered_elem - BUF_ELEM_TOTAL+1) < first_elem_next_window - &&(newest_buffered_elem - BUF_ELEM_TOTAL+1) < current_elem - ) // (over-)write to buffer if oldest buffered element is no longer needed - || fetching_done - ) //or if fetching is done (e.g. for skipped rows at FM end due to stride) - && !reading_done; //and if there is still an input element left to read - -//todo: optmize (e.g. is < or != more efficient?) -// ToDo: ideally this should point to the oldest elem of the next window, -// to allow reading while still writing the remainder of the current window - - - -assign write_blocked = write_cmd && !out_V_V_TREADY; //&& !write_done; + (( + $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(first_elem_next_window) + && $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(current_elem) + ) // (over-)write to buffer if oldest buffered element will no longer be needed + || fetching_done //or if fetching is done (e.g. for skipped rows at FM end due to stride) + ) + && !reading_done; //and if there is still an input element left to read +assign read_ok = read_cmd && in0_V_V_TVALID; +assign reading_done = newest_buffered_elem == LAST_READ_ELEM; -wire read_ok; -// with transition to next cycle: -// want to read can read source is ready (waiting on VALID allowed) -assign read_ok = read_cmd && !write_blocked && in0_V_V_TVALID; +assign fetch_cmd = !($signed(current_elem) > newest_buffered_elem) && !write_blocked && !fetching_done; -wire write_ok; -// with transition to next cycle: -// output is VALID sink is ready sink has already read (we are waiting on source) -//assign write_ok = write_cmd && (out_V_V_TREADY || write_done); assign write_ok = write_cmd && out_V_V_TREADY; - -//wire advance; -// includes waiting on W if W-only cycle: wait only on W no R/W to wait for -//assign advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); -//todo: optimize/simplify advance logic for write_done generation +assign write_blocked = write_cmd && !out_V_V_TREADY; //assign buffer control +assign window_buffer_read_addr = window_buffer_read_addr_reg; assign window_buffer_write_enable = read_ok; assign window_buffer_read_enable = fetch_cmd; -assign advance_controller = fetch_cmd; //write_ok +assign advance_controller = fetch_cmd; //assign I/O ports assign window_buffer_in = in0_V_V_TDATA; assign out_V_V_TDATA = window_buffer_out; assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) -assign out_V_V_TVALID = ap_rst_n && write_cmd; //&& !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) +assign out_V_V_TVALID = ap_rst_n && write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) //main process for advancing counters always @ (posedge ap_clk) begin if (ap_rst_n == 1'b0) begin newest_buffered_elem <= -1; - //newest_buffered_elem_available <= -1; current_elem <= 0; - //current_elem_available <= 0; first_elem_next_window <= 0; k <= 0; window_buffer_read_addr_reg <= 0; fetching_done <= 0; writing_done <= 0; - //write_prefetch_available <= 0; write_cmd <= 0; end else begin if (read_ok) begin @@ -332,54 +301,42 @@ always @ (posedge ap_clk) begin //use increment value calculated by controller //keep track where we are within a window - if (k == ELEM_PER_WINDOW-1) + if (k == ELEM_PER_WINDOW - 1) k <= 0; else k <= k+1; - //absolute buffer address always wraps around (in both directions for depthwise support) - if ($signed(window_buffer_read_addr_reg + addr_incr) > BUF_ELEM_TOTAL-1) - window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr - BUF_ELEM_TOTAL; - else if ($signed(window_buffer_read_addr_reg + addr_incr) < 0) - window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr + BUF_ELEM_TOTAL; + //update first element of next window to allow buffer overwrite up until that point + if (k == 0) + first_elem_next_window <= first_elem_next_window + tail_incr; + + //absolute buffer address wrap-around + if ($signed(window_buffer_read_addr_reg) + addr_incr > BUF_ELEM_TOTAL - 1) + window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr - BUF_ELEM_TOTAL; + else if ($signed(window_buffer_read_addr_reg) + addr_incr < 0) + window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr + BUF_ELEM_TOTAL; else - window_buffer_read_addr_reg <= window_buffer_read_addr_reg + addr_incr; + window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr; //check if this is the last write cycle (writing_done will be true afterwards) - if (current_elem == LAST_WRITE_ELEM) begin + if (current_elem == LAST_WRITE_ELEM) fetching_done <= 1; - end else begin - //current element index wraps around only at window boundary - //if (((current_elem + addr_incr) > BUF_ELEM_TOTAL-1) && (k == ELEM_PER_WINDOW-1)) - - //if (k == ELEM_PER_WINDOW-1) - // current_elem <= current_elem + addr_incr - BUF_ELEM_TOTAL; - //else - current_elem <= current_elem + addr_incr; - end - - if (k == 0) - first_elem_next_window <= first_elem_next_window + tail_incr; + else + current_elem <= $signed(current_elem) + addr_incr; // determine if prefetched data will be outstanding in the next cycle // if we fetch in this cycle -> yes - // if we do not fetch nor write successfully -> do not change - // if we do not fetch but write -> clear outstanding data - //write_prefetch_available <= fetch_cmd; + // if we do not fetch nor write -> do not change + // if we do not fetch but write successfully-> clear outstanding data write_cmd <= fetch_cmd; end if (write_ok) - // determine if prefetched data will be outstanding in the next cycle - // if we fetch in this cycle -> yes - // if we do not fetch nor write successfully -> do not change - // if we do not fetch but write -> clear outstanding data - //write_prefetch_available <= fetch_cmd; write_cmd <= fetch_cmd; if (write_ok && fetching_done) begin //check if this is the last write cycle (writing_done will be true afterwards) - if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM-1))) begin + if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM - 1))) begin //start processing of next FM if reading is done already, or completes in the same cycle newest_buffered_elem <= -1; current_elem <= 0; @@ -388,11 +345,6 @@ always @ (posedge ap_clk) begin end else writing_done <= 1; end - - //if (advance) - // write_done <= 1'b0; //reset flag - //else if (write_ok) // successful W in this cycle, but R still outstanding - // write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! end end diff --git a/finn-rtllib/swg/swg_hdl_template.v b/finn-rtllib/swg/swg_template_parallel.sv similarity index 96% rename from finn-rtllib/swg/swg_hdl_template.v rename to finn-rtllib/swg/swg_template_parallel.sv index 89ebb8da51..7c1e042227 100755 --- a/finn-rtllib/swg/swg_hdl_template.v +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -9,7 +9,7 @@ module $TOP_MODULE_NAME$_controller ); input CLK; -input [31:0] cycle; //todo: minimize width or switch to single bit flag/advance wire +input [31:0] cycle; //todo: minimize width or switch to single bit flag output cmd_read; output cmd_write; @@ -159,6 +159,8 @@ input [WIDTH-1:0] shift_in; output [WIDTH-1:0] shift_out; output [WIDTH*DEPTH-1:0] data_out; +// ToDo: experiment with SRL instead of FF-based shift register +// by force or by achieving automatic SRL inference //UG901 template for SRL inference: // 32-bit Shift Register // Rising edge clock @@ -303,12 +305,12 @@ module $TOP_MODULE_NAME$_impl ( ); parameter BIT_WIDTH = $BIT_WIDTH$; -parameter SIMD = $SIMD$; //assuming SIMD=C for now -parameter MMV_IN = $MMV_IN$; //assuming MMV_IN=1*M for now -parameter MMV_OUT = $MMV_OUT$; //assuming MMV_OUT=K*M for now -parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; //bit-width*C*MMV_in -parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; //bit-width*C -parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; //bit-width*C*MMV_out +parameter SIMD = $SIMD$; //assuming SIMD = C for this implementation style +parameter MMV_IN = $MMV_IN$; +parameter MMV_OUT = $MMV_OUT$; +parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; +parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; +parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; parameter CYCLES_TOTAL = $CYCLES_TOTAL$; parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; diff --git a/finn-rtllib/swg/swg_hdl_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v similarity index 100% rename from finn-rtllib/swg/swg_hdl_template_wrapper.v rename to finn-rtllib/swg/swg_template_wrapper.v diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 4b31b7c973..1aeeb9a1ee 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -47,12 +47,15 @@ except ModuleNotFoundError: PyVerilator = None -# note: the actual data layout produced by the hlslib kernels is different -# for depthwise and non-depthwise ops. +# RTL Convolution Input Generator / Sliding Window Generator (SWG) +# Matches and extends the functionality of all ConvolutionInputGenerator_* functions +# in finn-hlslib by generating HDL code for two different implementation styles: +# - Addressable cyclic buffer: to be used when out_width <= in_width +# - Parallel registers + line buffers: to be used when out_width > in_width +# Supports non-square, 1D, strided, dilated, and depthwise convolutions. +# Note: the actual data layout produced is different for depthwise and non-depthwise ops: # * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) # * depthwise SWG: (1, OFMDim_H, OFMDim_W, IFMChannels/SIMD, K_H, K_W, SIMD) -# see test_fpgadataflow_slidingwindow.py for an example of how to transform -# between the two layouts class ConvolutionInputGenerator_rtl(HLSCustomOp): """Class that does not correspond to one of the finn-hlslib ConvolutionInputGenerator @@ -201,54 +204,22 @@ def get_number_output_values(self): num_output_elems = np.prod(folded_oshape[:-1]) return num_output_elems - def get_1d_conv_attrs_normalized(self): - # support both (1, D) and (D, 1) cases transparently: - # For the kernel, presenting the input data of size D as - # [H, W] = [Y, X] = [1, D] or [D, 1] - # effectively gives the same result. Because the - # ConvolutionInputGenerator_NonSquare_Dilated(_dws) kernel currently only - # supports dilation>1 along the X-axis and the - # ConvolutionInputGenerator_NonSquare only works for stride>1 along the - # X-axis, we are working with the following assumption: - # the dummy ('1') dimension is the Y-dimension, i.e. - # images and kernels (and their attributes) of dimension - # [H, W] = [Y, X] = [D, 1] or [1, D] are always mapped to [1, D] + def get_exp_cycles(self): + # TODO: update + simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") ofm_dim = self.get_nodeattr("OFMDim") stride = self.get_nodeattr("Stride") dilation = self.get_nodeattr("Dilation") - - # see defines() for an explanation - if ifm_dim[1] == 1: - ifm_dim = ifm_dim[::-1] - ofm_dim = ofm_dim[::-1] - k = k[::-1] - stride = stride[::-1] - dilation = dilation[::-1] - - return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) - - def get_exp_cycles(self): - simd = self.get_nodeattr("SIMD") - ( - ifm_ch, - ifm_dim, - ofm_dim, - k, - stride, - dilation, - ) = self.get_1d_conv_attrs_normalized() ifm_dim_h, ifm_dim_w = ifm_dim ofm_dim_h, ofm_dim_w = ofm_dim k_h, k_w = k stride_h, stride_w = stride dilation_h, dilation_w = dilation - - # since mmv != 1 is not supported yet, we set mmv for now to 1 mmv = 1 - # see https://github.com/Xilinx/finn-hlslib/blob/master/slidingwindow.h + if (self.get_nodeattr("parallel_window")): exp_cycles = ifm_dim_w + 1 else: @@ -262,7 +233,7 @@ def get_exp_cycles(self): return int(exp_cycles) def bram_estimation(self): - # NOTE: not tested for correctness + # TODO: update simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") ifm_dim = np.prod(self.get_nodeattr("IFMDim")) @@ -294,6 +265,7 @@ def bram_estimation(self): return 0 def lut_estimation(self): + # TODO: update # NOTE: not tested for correctness simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") @@ -315,6 +287,7 @@ def lut_estimation(self): return 300 + ram_luts def uram_estimation(self): + # TODO: update # NOTE: not tested for correctness simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") @@ -375,7 +348,7 @@ def execute_node(self, context, graph): # pad test input stream to work when IFMdim % M != 0 # during normal operation, the AXI Stream should not care, in the last cycle garbage elements are read but not used - # ToDo: only works for 1D case + # TODO: only works for 1D case mmv_stream_padding_px = int((np.prod(folded_ishape) - np.prod(inp.shape)) / exp_ishape[-1]) if exp_ishape [2] == 1: inp = np.pad(inp, ((0,0),(0,mmv_stream_padding_px),(0,0),(0,0)), 'constant') @@ -447,12 +420,10 @@ def pragmas(self): def generate_hdl(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - f_debug = open(os.path.join(code_gen_dir, "swg_hdl_debuginfo.log"), "w") + #f_debug = open(os.path.join(code_gen_dir, "swg_hdl_debuginfo.log"), "w") code_gen_dict = {} - #-------------------- - # init hyperparameters - # for 1D case: it does not matter if dummy dim is x or y + ##### BEGIN INITIALIZE/CHECK CONFIGURATION ##### ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") @@ -463,51 +434,16 @@ def generate_hdl(self): n = 1 h, w = ifm_dim - c = 1 # ifm_ch not considered atm (always parallelize across c) + c = 1 # assume SIMD=C (parallelize across all channels) k_h, k_w = k pad = [0,0,0,0] # padding happens in separate padding node for now pad_val = 0 stride_h, stride_w = stride dilation_h, dilation_w = dilation - conv_c = 99 - - # init folding config - simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") - if (self.get_nodeattr("parallel_window")): - mmv_in = M*1 - mmv_out = M*k_h*k_w - assert ifm_ch==simd, "Constraint violated: SIMD must be equal to C" - else: - mmv_in = 1 - mmv_out = 1 - assert ifm_ch%simd==0, "Constraint violated: SIMD must divide C" - - # todo: check allowed hyperparams - # ToDo: move/duplicate these checks in corresponding convert_to_hls transformation - - # choose implementation style - if (mmv_out > 1 or (k_h==1 and k_w==1)): - impl_style = "parallel" - else: - impl_style = "default" - - # how many "unused" registers are allowed between buffer positions that will be accessed in parallel - # example: - # 0: only consecutive access patterns will be implemented in regs, rest in BRAM line buffers - # 2: [0, 3, 6] access pattern is still allowed and will be implemented with 1 7-position shift reg - REG_BRAM_THRESHOLD = 8 - #-------------------- in_shape = (n,c,h,w) #NCHW in_image = np.empty(in_shape, dtype=int) - - for index, x in np.ndenumerate(in_image): - # "HWC" dummy values - val = int((index[2]+1)*100+(index[3]+1)*10+(index[1]+1)*1) - in_image[index] = val - in_image_padded = np.pad( in_image, ((0, 0), (0, 0), (pad[0], pad[2]), (pad[1], pad[3])), @@ -523,416 +459,40 @@ def generate_hdl(self): out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) - f_debug.write("\n"+"in shape " + str(in_shape)) - f_debug.write("\n"+"in shape padded " + str(in_shape_padded)) - f_debug.write("\n"+"conv out shape " + str((n,conv_c,out_dim_h,out_dim_w))) - f_debug.write("\n"+"im2col out shape " + str((n,out_dim_h,out_dim_w,k_h*k_w*c))) - - idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( - in_shape, - k_h, - k_w, - pad, - stride_h, - stride_w, - dilation_h, - dilation_w - ) - - f_debug.write("\n"+"c indices") - f_debug.write("\n"+str(idx_c)) - f_debug.write("\n"+"h indices") - f_debug.write("\n"+str(idx_h)) - f_debug.write("\n"+"w indices") - f_debug.write("\n"+str(idx_w)) - - cols = in_image_padded[:, idx_c, idx_h, idx_w] - cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) - - f_debug.write("\n"+"cols (shape %s)" % str(cols.shape)) - f_debug.write("\n"+str(cols)) - - # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW - out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) - # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) - out_image = out_image.transpose(0, 4, 5, 2, 3, 1) - out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) - - f_debug.write("\n"+"output (shape %s)" % str(out_image.shape)) - f_debug.write("\n"+str(out_image)) - - f_debug.write("\n"+"h indices") - f_debug.write("\n"+str(idx_h)) - f_debug.write("\n"+"w indices") - f_debug.write("\n"+str(idx_w)) - - idx_px = idx_h*w+idx_w - f_debug.write("\n"+"sequential pixel indices (shape %s" % str(idx_px.shape)) - f_debug.write("\n"+str(idx_px)) - - k, cycles = idx_px.shape - - output_elements = mmv_out - output_cycles = int(cycles/(mmv_out/k)) - - # ToDo: what happens when output_cycles=OFMdim % M != 0 - # ...try to support IFMdim % M != 0 first, so we can work with the usual k=3 where OFMdim = IFMdim - -2 - # the additional garbage input elements that are read in the last cycle are not read by any window anyway - idx_px = idx_px.transpose() - idx_px = idx_px.reshape(output_cycles, output_elements) - idx_px = idx_px.transpose() - - # result: first dim is number of parallel output elements, second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle - f_debug.write("\n"+"sequential pixel indices, MMV_out grouping (shape %s" % str(idx_px.shape)) - f_debug.write("\n"+str(idx_px)) - #f_debug.close() - - buffer = [] - buffer_max_size = 0 - # buffer schedule (write from input, read to output) - schedule_write = [] - schedule_read = [] - schedule_shift = [] - - - schedule = [] - schedule_prev = '' - - next_in_px = 0 - oldest_px = 0 - buffer_space_freed = False - - idx_px_relative = idx_px.copy() - idx_px_addr = idx_px.copy() - idx_px_addr_incr = idx_px.copy() - idx_px_addr_rel = idx_px.copy() - - # compute schedule and buffer read pattern (output driven) - output_elem, output_cycles = idx_px_relative.shape - - if (impl_style == "parallel"): - for x in range(output_cycles): - # load missing inputs into buffer - for y in range(output_elem): - while int(idx_px_relative[y,x]) not in buffer: - # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 - schedule_write.append(1) - schedule_read.append(0) - if schedule_prev == 'w': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'w')) - schedule_prev = 'w' - - # discard unused buffer elements - oldest_px = np.min(idx_px_relative[:,x:]) - #check whether M elements can be shifted out, not just the single oldest one - # must this be "while" for MMV to work?!? breaks mmvout = 1 case - #while all([buffer[i] < oldest_px for i in range(M)]): - if all([buffer[i] < oldest_px for i in range(M)]): - # M buffer elements are shifted out at once - for m in range(M): - buffer.pop(0) - - # adjust relative buffer index of current x (according to last discarded buffer elements) - for y in range(output_elem): - idx_px_relative[y,x] -= oldest_px - - - # read from buffer - # + simultaneously load next pixel(s) into buffer if there are any left - if (next_in_px > (h_padded*w_padded-1)): - # read only (append above) - schedule_read.append(1) - schedule_write.append(0) - if schedule_prev == 'r': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'r')) - schedule_prev = 'r' - else: - # load M inputs at once - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 - schedule_read.append(1) - schedule_write.append(1) - if schedule_prev == 'wr': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'wr')) - schedule_prev = 'wr' - - # record max needed buffer depth - #f_debug.write("\n"+str(buffer)) - if len(buffer) > buffer_max_size: - buffer_max_size = len(buffer) - - # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) - while next_in_px <= (h_padded*w_padded-1): - next_in_px += 1 - schedule_write.append(1) - schedule_read.append(0) - if schedule_prev == 'w': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'w')) - schedule_prev = 'w' - - # find buffer access patterns - buffer_access_patterns = [] - for x in range(output_cycles): - if idx_px_relative[:,x].tolist() not in buffer_access_patterns: - buffer_access_patterns.append(idx_px_relative[:,x].tolist()) - - + # init folding config + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") + if (self.get_nodeattr("parallel_window")): + mmv_in = M*1 + mmv_out = M*k_h*k_w + assert ifm_ch==simd, "Constraint violated: SIMD must be equal to C" else: + mmv_in = 1 + mmv_out = 1 + assert ifm_ch%simd==0, "Constraint violated: SIMD must divide C" - #simulate cyclic buffer, which is advanced on every write (as opposed to on overy sheduled cycle) - #buffer_tail = 0 - buffer_head = 0 #buffer_tail+1 - # compute minimal buffer length (assuming it holds 1 complete window) - buffer_len = (k_h-1) * dilation_h * w + (k_w-1) * dilation_w + 1 - buffer = [-1] * buffer_len - - # todo: remove this simulation, not needed and doesnt accout for SIMD anyways - for x in range(output_cycles): - - # load missing inputs into buffer - while int(idx_px_relative[0,x]) not in buffer: - # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) - for m in range(M): - #buffer.append(next_in_px) - buffer[buffer_head] = next_in_px - next_in_px += 1 - schedule_write.append(1) - schedule_read.append(0) - if schedule_prev == 'w': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'w')) - schedule_prev = 'w' - - #try to advance/shift the buffer by one, discarding the oldest element - #discard_oldest_elem = buffer[0] < np.min(idx_px_relative[0,x:]) - #if discard_oldest_elem: - # buffer.pop(0) - # schedule_shift.append(1) - #else: - # schedule_shift.append(0) - buffer_head += 1 - if buffer_head > buffer_len-1: - buffer_head = 0 - - ### perform read ### - - #try to advance/shift the buffer by one, discarding the oldest element - #discard_oldest_elem = buffer[0] < np.min(idx_px_relative[0,x:]) - #if discard_oldest_elem: - # buffer.pop(0) - # schedule_shift.append(1) - #else: - # schedule_shift.append(0) - - # note current relative addr in buffer - idx_px_addr[0,x] = buffer.index(idx_px_relative[0,x]) - if x > 0: - idx_px_addr_incr[0,x] = idx_px_addr[0,x] - idx_px_addr[0,x-1] - if idx_px_addr_incr[0,x] < 0: - idx_px_addr_incr[0,x] += buffer_len - else: - idx_px_addr_incr[0,x] = idx_px_addr[0,x] - - idx_px_addr_rel [0,x] = buffer.index(idx_px_relative[0,x]) - buffer_head - if idx_px_addr_rel [0,x] < 0: - idx_px_addr_rel [0,x] += buffer_len - - - #try to write a new input into the buffer simultaneously (during this read as opposed to before the next read) - # assume in-order write into the buffer (oldest element is always at head+1) - discard_oldest_elem = np.min(buffer) < np.min(idx_px_relative[0,x:]) - read_only = True - if not (next_in_px > (h_padded*w_padded-1)): - # input data available - #if (x < k_h*k_w) or discard_oldest_elem: - if discard_oldest_elem: - # buffer is not complete, as the first window has not been fully output - # or we can discard one element from the buffer after this read, so there is space for a new one - read_only = False - - - # read from buffer - # + simultaneously load next pixel(s) into buffer if there are any left - # if mmv_out = 1: addressable BRAM implementation style -> do not shift in while outputting K kernel elements to keep addressing consistent - #if (next_in_px > (h_padded*w_padded-1)) or ((x+1) % (k_h*k_w) != 0): - #if (next_in_px > (h_padded*w_padded-1)) or (x > 1 and (not buffer_space_freed)): - if read_only: - # read only - schedule_read.append(1) - schedule_write.append(0) - if schedule_prev == 'r': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'r')) - schedule_prev = 'r' - else: - # read + write - #buffer.append(next_in_px) - buffer[buffer_head] = next_in_px - next_in_px += 1 - schedule_read.append(1) - schedule_write.append(1) - if schedule_prev == 'wr': - count, cmd = schedule[-1] - schedule[-1] = (count+1, cmd) - else: - schedule.append((1, 'wr')) - schedule_prev = 'wr' - - # advance buffer - buffer_head += 1 - if buffer_head > buffer_len-1: - buffer_head = 0 - - # record max needed buffer depth - #f_debug.write("\n"+str(buffer)) - if len(buffer) > buffer_max_size: - buffer_max_size = len(buffer) - - # ToDo: maybe replace with directly-computed schedule (similar to addr. buffer impl. style) - def compact_schedule(schedule): - - # leave first sequence (pre-load) as is - start_sequence = schedule[0] - - loop_sequence_1_counter = 1 - loop_sequence_1 = schedule[1] - - loop_counter = 0 - loop_sequence_2 = None - end_sequence = None - - i = 2 - if i < len(schedule): - loop_sequence_1 += schedule[i] - i += 1 - - while i+1 < len(schedule): - candidate = schedule[i] + schedule[i+1] - if candidate == loop_sequence_1: - loop_sequence_1_counter += 1 - i += 2 - else: - break - - if i < len(schedule): - loop_sequence_2 = schedule[i] - i += 1 - - if i+1 < len(schedule): - candidate = schedule[i] + schedule[i+1] - if candidate != loop_sequence_1: - loop_sequence_2 += schedule[i] - - i -= 1 - loop_sequence_total_len = (int(len(loop_sequence_2)/2)) + loop_sequence_1_counter*(int(len(loop_sequence_1)/2)) - loop_sequence_total = loop_sequence_2 + loop_sequence_1_counter*loop_sequence_1 - while i+loop_sequence_total_len < len(schedule): - candidate = schedule[i] - for x in range (i+1, i+loop_sequence_total_len): - candidate += schedule[x] - - if candidate == loop_sequence_total: - loop_counter += 1 - i += loop_sequence_total_len - else: - break + # TODO: check allowed hyperparams + # for 1D case: it does not matter if dummy dim is x or y + # TODO: move/duplicate these checks in corresponding convert_to_hls transformation (?) - else: - if i < len(schedule): - end_sequence = loop_sequence_2 + schedule[i] - i += 1 - loop_sequence_2 = None - else: - end_sequence = loop_sequence_2 - loop_sequence_2 = None - - if i < len(schedule): - end_sequence = schedule[i] - i += 1 - - if i < len(schedule): - end_sequence = end_sequence + schedule[i] - i += 1 - - assert len(start_sequence) == 1*2, "ERROR: invalid start sequence" - assert len(loop_sequence_1) == 2*2, "ERROR: invalid loop 1 sequence" - if loop_sequence_2: - assert len(loop_sequence_2) <= 2*2, "ERROR: invalid loop 2 sequence" - if end_sequence: - assert len(end_sequence) <= 2*2, "ERROR: invalid end sequence" - assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" %(i, len(schedule)) - - return ( - start_sequence, - loop_counter, - loop_sequence_1_counter, - loop_sequence_1, - loop_sequence_2, - end_sequence - ) + # choose implementation style + if (mmv_out > 1 or (k_h==1 and k_w==1)): + impl_style = "parallel" + else: + impl_style = "default" - f_debug.write("\n"+"max buffer size observed: %d" %(buffer_max_size)) - f_debug.write("\n"+"output vector elements: relative buffer indices") - f_debug.write("\n"+str(idx_px_relative)) - f_debug.write("\n"+"output vector elements: absolute buffer address") - f_debug.write("\n"+str(idx_px_addr)) - f_debug.write("\n"+"output vector elements: absolute buffer address increment from last") - f_debug.write("\n"+str(idx_px_addr_incr)) - f_debug.write("\n"+"output vector elements: relative buffer address (from head)") - f_debug.write("\n"+str(idx_px_addr_rel)) - f_debug.write("\n"+"buffer write schedule (%d cycles)" % len(schedule_write)) - f_debug.write("\n"+str(schedule_write)) - f_debug.write("\n"+"writing buffer in %d cycles" % schedule_write.count(1)) - #f_debug.write("\n"+"buffer write schedule COMPRESSED") - #f_debug.write("\n"+str(schedule_write_compressed)) - #f_debug.write("\n"+"buffer write schedule ANALYZED") - #f_debug.write("\n"+str(analyse_schedule(schedule_write))) - f_debug.write("\n"+"buffer read schedule (%d cycles)" % len(schedule_read)) - f_debug.write("\n"+str(schedule_read)) - f_debug.write("\n"+"reading buffer in %d cycles" % schedule_read.count(1)) - - #f_debug.write("\n"+"buffer shift schedule (%d cycles)" % len(schedule_shift)) - #f_debug.write("\n"+str(schedule_shift)) - #f_debug.write("\n"+"shifting buffer in %d cycles" % schedule_shift.count(1)) - #f_debug.write("\n"+"buffer read schedule COMPRESSED") - #f_debug.write("\n"+str(schedule_read_compressed)) - #f_debug.write("\n"+"buffer read schedule ANALYZED") - #f_debug.write("\n"+str(analyse_schedule(schedule_read))) - - addr_incr_end_window_elem = 0 - addr_incr_end_window_row = 0 - addr_incr_end_window = 0 - addr_incr_end_row = 0 + ##### END INITIALIZE/CHECK CONFIGURATION ##### + ##### BEGIN CODE GEN FOR DEFAULT STYLE ##### if (impl_style == "default"): - f_debug.write("\n"+"mmv_out = 1: computing incremental addressing scheme directly:") - addressing_scheme = [[0]] + # Default implementation style for MMV_out = 1: addressable cyclic buffer + # Computing incremental addressing scheme directly.. # compute index/address increments for each nested loop channel_factor = int(ifm_ch/simd) - #todo: rename to (min) buffer len - buffer_max_size = buffer_max_size * channel_factor + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_min_size = ((k_h-1) * dilation_h * w + (k_w-1) * dilation_w + 1) * channel_factor kernel_width = (k_w-1)*dilation_w+1 # incl. dilation addr_incr_end_simd = 1 @@ -942,17 +502,13 @@ def compact_schedule(schedule): skip_lines = (dilation_h-1) * w * channel_factor addr_incr_end_window_row = remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer - #addr_incr_end_window = stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer - addr_incr_end_window = -buffer_max_size + stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer + addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer # rows that are skipped due to imperfect stride<->W combination skip_columns = w%(kernel_width + (out_dim_w-1)*stride_w) remaining_line = (skip_columns + kernel_width) * channel_factor # increment from oldest buffer position (top left) to end of line skip_lines = (stride_h-1) * w * channel_factor - #addr_incr_end_row = remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer - addr_incr_end_row = -buffer_max_size + remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer - - + addr_incr_end_row = -buffer_min_size + remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer if (depthwise): addr_incr_end_window_elem = dilation_w * channel_factor @@ -960,85 +516,11 @@ def compact_schedule(schedule): + (w - kernel_width) * channel_factor + (dilation_h-1) * w * channel_factor ) - addr_incr_end_simd = -buffer_max_size + (channel_factor + 1) - #addr_incr_end_simd = channel_factor + 1 - - # just for testing: - for i_windows_per_h in range(out_dim_h): # LOOP_H - for i_windows_per_w in range(out_dim_w): # LOOP_W - for i_simd_per_px in range(channel_factor): # LOOP_SIMD - for i_px_per_window_h in range(k_h): # LOOP_KH - for i_px_per_window_w in range(k_w-1): # LOOP_KW - addressing_scheme[0].append(addr_incr_end_window_elem) - if i_px_per_window_h != k_h-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_window_row) - if i_simd_per_px != channel_factor-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_simd) - if i_windows_per_w != out_dim_w-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_window) - if i_windows_per_h != out_dim_h-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_row) - else: - # just for testing: - for i_windows_per_h in range(out_dim_h): # LOOP_H - for i_windows_per_w in range(out_dim_w): # LOOP_W - for i_px_per_window_h in range(k_h): # LOOP_KH - for i_px_per_window_w in range(k_w): # LOOP_KW - for i_simd_per_px in range(channel_factor-1): # LOOP_SIMD - addressing_scheme[0].append(addr_incr_end_simd) - if i_px_per_window_w != k_w-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_window_elem) - if i_px_per_window_h != k_h-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_window_row) - if i_windows_per_w != out_dim_w-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_window) - if i_windows_per_h != out_dim_h-1: # skip on last iteration - addressing_scheme[0].append(addr_incr_end_row) - - f_debug.write("\n"+str(np.array(addressing_scheme))) - if simd == ifm_ch: - # simd < c currently not simulated - if (np.array(addressing_scheme) == idx_px_addr_incr).all: - f_debug.write("\n"+"computed addressing matches simulated addressing") - else: - f_debug.write("\n"+"ERROR") - else: - f_debug.write("\n"+"found %d buffer access patterns:" % len(buffer_access_patterns)) - f_debug.write("\n"+str(buffer_access_patterns)) - f_debug.write("\n"+"required parallel-access registers for mmv_out=k: %d" % len(sum(buffer_access_patterns,[]))) - f_debug.write("\n"+"buffer rw schedule NEW") - f_debug.write("\n"+str(schedule)) - f_debug.write("\n"+"buffer rw schedule NEW compacted") - f_debug.write("\n"+"\nstart_sequence: %s\nloop_counter: %s\nloop_sequence_1_counter: %s\nloop_sequence_1: %s\nloop_sequence_2: %s\nend_sequence: %s\n" % compact_schedule(schedule)) - assert len(schedule_write) == len(schedule_read), "ERROR: Schedules have different lenghts" - assert schedule_write.count(1) == self.get_number_input_values(), "ERROR: Writing buffer in fewer cycles than expected" - assert schedule_read.count(1) == self.get_number_output_values(), "ERROR: Reading buffer in fewer cycles than expected" - cycles_total = len(schedule_write) - - - - code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] - #save top module name so we can refer to it even after this node has been renamed (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) - self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) - code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] - code_gen_dict["$SIMD$"] = [str(simd)] - code_gen_dict["$MMV_IN$"] = [str(mmv_in)] - code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - - - ram_style = self.get_nodeattr("ram_style") - if ram_style == "auto": - code_gen_dict["$RAM_STYLE$"]=[""] - else: - code_gen_dict["$RAM_STYLE$"]=["(* ram_style = \"{}\" *)".format(ram_style)] - - if (impl_style == "default"): - ### MMVout = 1: addressable buffer implementation style - f_debug.write("\n"+"Choosing implementation style: Addressable buffer due to mmv_out=1") + addr_incr_end_simd = -buffer_min_size + (channel_factor + 1) # add additional buffer space in case of stride > 1 # this minimizes cycle count, as it allows an earlier pre-load of skipped input elements - buffer_actual_size = (buffer_max_size + max(0,((stride_w-1) - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor) + buffer_actual_size = (buffer_min_size + max(0,((stride_w-1) - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor) + max(0,((stride_h-1)*w - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor)) code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] @@ -1068,29 +550,39 @@ def compact_schedule(schedule): addr_incr_end_window_elem = addr_incr_end_window_row addr_incr_end_window_row = addr_incr_end_simd_ elem_per_window = k_h*k_w - + + tail_incr_w = addr_incr_end_window + buffer_min_size - channel_factor + tail_incr_h = addr_incr_end_row + buffer_min_size - channel_factor + tail_incr_last_window = buffer_min_size-1 code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin if (counter_loop_kh != 0) - tail_incr = 1; + tail_incr_reg = 1; else if (counter_loop_w != 0) - tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_W]-{channel_factor}+{buffer_min_size}; - else // do not check for counter_loop_h to increment past LAST_WRITE_ELEM during last window - tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_H]-{channel_factor}+{buffer_min_size}; + tail_incr_reg = {}; + else if (counter_loop_h != 0) + tail_incr_reg = {}; + else + tail_incr_reg = {}; end - """.format(channel_factor=channel_factor, buffer_min_size=buffer_max_size)] + """.format(tail_incr_w, tail_incr_h, tail_incr_last_window)] else: # depthwise output format is equivalent to non-depthwise if SIMD=C elem_per_window = k_h*k_w*channel_factor + tail_incr_w = addr_incr_end_window + buffer_min_size - 1 + tail_incr_h = addr_incr_end_row + buffer_min_size - 1 + tail_incr_last_window = buffer_min_size-1 code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" always @ (counter_loop_w, counter_loop_h) begin if (counter_loop_w != 0) - tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_W]-1+{buffer_min_size}; - else // do not check for counter_loop_h to increment past LAST_WRITE_ELEM during last window - tail_incr = ADDR_INCREMENT_MAP[STATE_LOOP_H]-1+{buffer_min_size}; + tail_incr_reg = {}; + else if (counter_loop_h != 0) + tail_incr_reg = {}; + else + tail_incr_reg = {}; end - """.format(buffer_min_size=buffer_max_size)] + """.format(tail_incr_w, tail_incr_h, tail_incr_last_window)] # support SIMD = C and k_w = 1 cases # for k = [k_h, k_w] = [1, k_w], no adjustment is needed @@ -1115,22 +607,215 @@ def compact_schedule(schedule): code_gen_dict["$LOOP_KW_ITERATIONS$"]=[str(loop_kw_iterations-1)] code_gen_dict["$LOOP_SIMD_ITERATIONS$"]=[str(loop_simd_iterations-1)] - w = 32 #ToDo: minimize - code_gen_dict["$ADDR_INCREMENT_MAP$"]=["'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format(w, - int(copysign(w,addr_incr_end_simd)),abs(addr_incr_end_simd), - int(copysign(w,addr_incr_end_window_elem)),abs(addr_incr_end_window_elem), - int(copysign(w,addr_incr_end_window_row)),abs(addr_incr_end_window_row), - int(copysign(w,addr_incr_end_window)),abs(addr_incr_end_window), - int(copysign(w,addr_incr_end_row)),abs(addr_incr_end_row))] + incr_bitwidth = 1 + math.ceil(math.log2(max(abs(addr_incr_end_simd)+1, + abs(addr_incr_end_window_elem)+1, + abs(addr_incr_end_window_row)+1, + abs(addr_incr_end_window)+1, + abs(addr_incr_end_row)+1, + abs(tail_incr_w)+1, + abs(tail_incr_h)+1, + abs(tail_incr_last_window)+1))) + code_gen_dict["$INCR_BITWIDTH$"] = [str(incr_bitwidth)] + code_gen_dict["$ADDR_INCREMENT_MAP$"]=["'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format(incr_bitwidth, + int(copysign(incr_bitwidth,addr_incr_end_simd)),abs(addr_incr_end_simd), + int(copysign(incr_bitwidth,addr_incr_end_window_elem)),abs(addr_incr_end_window_elem), + int(copysign(incr_bitwidth,addr_incr_end_window_row)),abs(addr_incr_end_window_row), + int(copysign(incr_bitwidth,addr_incr_end_window)),abs(addr_incr_end_window), + int(copysign(incr_bitwidth,addr_incr_end_row)),abs(addr_incr_end_row))] code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] - with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template_mmv_1.v", "r") as f: + with open("/workspace/finn/finn-rtllib/swg/swg_template_default.sv", "r") as f: template = f.read() - else: - f_debug.write("\n"+"Choosing implementation style: Parallel Registers (+ line buffers) due to mmv_out>1") + + ##### END CODE GEN FOR DEFAULT STYLE ##### + + ##### BEGIN CODE GEN FOR PARALLEL STYLE ##### + elif (impl_style == "parallel"): + # Out width > In width: Parallel implementation style using registers + line buffers + idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( + in_shape, + k_h, + k_w, + pad, + stride_h, + stride_w, + dilation_h, + dilation_w + ) + + cols = in_image_padded[:, idx_c, idx_h, idx_w] + cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) + + # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW + out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) + # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) + out_image = out_image.transpose(0, 4, 5, 2, 3, 1) + out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) + + idx_px = idx_h*w+idx_w # sequential pixel indices + + k, cycles = idx_px.shape + + output_elements = mmv_out + output_cycles = int(cycles/(mmv_out/k)) + + # TODO: what happens when output_cycles=OFMdim % M != 0 + # ...try to support IFMdim % M != 0 first, so we can work with the usual k=3 where OFMdim = IFMdim - -2 + # the additional garbage input elements that are read in the last cycle are not read by any window anyway + idx_px = idx_px.transpose() + idx_px = idx_px.reshape(output_cycles, output_elements) + idx_px = idx_px.transpose() + # result: first dim is number of parallel output elements, + # second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle + + buffer = [] + buffer_max_size = 0 + schedule = [] + next_in_px = 0 + oldest_px = 0 + + def schedule_append(schedule, op): + if len(schedule) > 0 and schedule[-1][1] == op: + count, op_ = schedule[-1] + schedule[-1] = (count+1, op_) + else: + schedule.append((1, op)) + return schedule + + # compute schedule and buffer read pattern (output driven) + idx_px_relative = idx_px.copy() + output_elem, output_cycles = idx_px_relative.shape + + for x in range(output_cycles): + # load missing inputs into buffer + for y in range(output_elem): + while int(idx_px_relative[y,x]) not in buffer: + # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 + schedule = schedule_append(schedule,'w') + + # discard unused buffer elements + oldest_px = np.min(idx_px_relative[:,x:]) + #check whether M elements can be shifted out, not just the single oldest one + #while all([buffer[i] < oldest_px for i in range(M)]): + if all([buffer[i] < oldest_px for i in range(M)]): + # M buffer elements are shifted out at once + for m in range(M): + buffer.pop(0) + + # adjust relative buffer index of current x (according to last discarded buffer elements) + for y in range(output_elem): + idx_px_relative[y,x] -= oldest_px + + # read from buffer + # + simultaneously load next pixel(s) into buffer if there are any left + if (next_in_px > (h_padded*w_padded-1)): + # read only (append above) + schedule = schedule_append(schedule,'r') + else: + # load M inputs at once + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 + schedule = schedule_append(schedule,'wr') + + # record max needed buffer depth + if len(buffer) > buffer_max_size: + buffer_max_size = len(buffer) + + # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) + while next_in_px <= (h_padded*w_padded-1): + next_in_px += 1 + schedule = schedule_append(schedule,'w') + + # find buffer access patterns + buffer_access_patterns = [] + for x in range(output_cycles): + if idx_px_relative[:,x].tolist() not in buffer_access_patterns: + buffer_access_patterns.append(idx_px_relative[:,x].tolist()) + + # Experimental implementation to map fixed controller loop structure to R/W schedule by analyzing + # the access pattern given by Im2Col, rather than direct computation. + # TODO: Probably replace this with a directly-computed schedule, similar to the default implementation style. + def compact_schedule(schedule): + # leave first sequence (pre-load) as is + start_sequence = schedule[0] + loop_sequence_1_counter = 1 + loop_sequence_1 = schedule[1] + loop_counter = 0 + loop_sequence_2 = None + end_sequence = None + + i = 2 + if i < len(schedule): + loop_sequence_1 += schedule[i] + i += 1 + while i+1 < len(schedule): + candidate = schedule[i] + schedule[i+1] + if candidate == loop_sequence_1: + loop_sequence_1_counter += 1 + i += 2 + else: + break + + if i < len(schedule): + loop_sequence_2 = schedule[i] + i += 1 + if i+1 < len(schedule): + candidate = schedule[i] + schedule[i+1] + if candidate != loop_sequence_1: + loop_sequence_2 += schedule[i] + i -= 1 + loop_sequence_total_len = (int(len(loop_sequence_2)/2)) + loop_sequence_1_counter*(int(len(loop_sequence_1)/2)) + loop_sequence_total = loop_sequence_2 + loop_sequence_1_counter*loop_sequence_1 + while i+loop_sequence_total_len < len(schedule): + candidate = schedule[i] + for x in range (i+1, i+loop_sequence_total_len): + candidate += schedule[x] + + if candidate == loop_sequence_total: + loop_counter += 1 + i += loop_sequence_total_len + else: + break + else: + if i < len(schedule): + end_sequence = loop_sequence_2 + schedule[i] + i += 1 + loop_sequence_2 = None + else: + end_sequence = loop_sequence_2 + loop_sequence_2 = None + + if i < len(schedule): + end_sequence = schedule[i] + i += 1 + if i < len(schedule): + end_sequence = end_sequence + schedule[i] + i += 1 + + assert len(start_sequence) == 1*2, "ERROR: invalid start sequence" + assert len(loop_sequence_1) == 2*2, "ERROR: invalid loop 1 sequence" + if loop_sequence_2: + assert len(loop_sequence_2) <= 2*2, "ERROR: invalid loop 2 sequence" + if end_sequence: + assert len(end_sequence) <= 2*2, "ERROR: invalid end sequence" + assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" %(i, len(schedule)) + + return (start_sequence, loop_counter, loop_sequence_1_counter, + loop_sequence_1, loop_sequence_2, end_sequence) + ### determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) - # ToDo: this part doesn't fully account for M (2D buffer) yet + # TODO: this part doesn't fully account for M for 2D buffers yet + + # how many "unused" registers are allowed between buffer positions that will be accessed in parallel + # example: + # 0: only consecutive access patterns will be implemented in regs, rest in (LUTRAM/BRAM) line buffers + # 2: [0, 3, 6] access pattern is still allowed and will be implemented with one 7-position shift reg + REG_BRAM_THRESHOLD = 8 code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] @@ -1147,7 +832,7 @@ def compact_schedule(schedule): current.append(access_idx) else: # assume non-decreasing index order in access pattern - # ToDo: this assumption does not hold for M>1 case (2D buffer) + # TODO: this assumption does not hold for M>1 case (2D buffer) distance = access_idx - max(current) if not (distance-1 > REG_BRAM_THRESHOLD): for i in range(distance-1): @@ -1161,20 +846,14 @@ def compact_schedule(schedule): bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? # start with new REG FIFO reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) #ToDo: fix for M again + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # fix for M again reg_fifos_depth.append(len(current)) current = [] current.append(access_idx) reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) #ToDo fix for M again + #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # fix for M again reg_fifos_depth.append(len(current)) - f_debug.write("\n"+"Buffer partitioning using REG_BRAM_THRESHOLD=%d" % REG_BRAM_THRESHOLD) - f_debug.write("\n"+"%d REG FIFOs (parallel read access):" % len(reg_fifos)) - f_debug.write("\n"+str(reg_fifos)) - f_debug.write("\n"+"%d BRAM FIFOs (line buffers):" % len(bram_fifos)) - f_debug.write("\n"+str(bram_fifos)) - code_gen_dict["$GENERATE_REG_FIFOS$"] = [] for i in range(len(reg_fifos)): code_gen_dict["$GENERATE_REG_FIFOS$"].append( @@ -1274,6 +953,9 @@ def convert_tuple(seq): loop_sequence_2 = convert_tuple(loop_sequence_2) end_sequence = convert_tuple(end_sequence) + cycles_total = 0 + for t in schedule: + cycles_total += t[0] code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] code_gen_dict["$START_COUNTER$"]=[str(start_sequence[0])] @@ -1294,11 +976,28 @@ def convert_tuple(seq): code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] - with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template.v", "r") as f: + with open("/workspace/finn/finn-rtllib/swg/swg_template_parallel.sv", "r") as f: template = f.read() + + ##### END CODE GEN FOR PARALLEL STYLE ##### + + ##### BEGIN GENERAL CODE GEN ##### + code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] + # save top module name so we can refer to it even after this node has been renamed + # (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) + self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) + code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] + code_gen_dict["$SIMD$"] = [str(simd)] + code_gen_dict["$MMV_IN$"] = [str(mmv_in)] + code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - - with open("/workspace/finn/finn-rtllib/swg/swg_hdl_template_wrapper.v", "r") as f: + ram_style = self.get_nodeattr("ram_style") + if ram_style == "auto": + code_gen_dict["$RAM_STYLE$"]=[""] + else: + code_gen_dict["$RAM_STYLE$"]=["(* ram_style = \"{}\" *)".format(ram_style)] + + with open("/workspace/finn/finn-rtllib/swg/swg_template_wrapper.v", "r") as f: template_wrapper = f.read() for key in code_gen_dict: @@ -1310,22 +1009,21 @@ def convert_tuple(seq): f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv"), "w") f.write(template) f.close() - f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v"), "w") f.write(template_wrapper) f.close() - - f_debug.close() + #f_debug.close() #set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain self.set_nodeattr("ipgen_path", code_gen_dir) self.set_nodeattr("ip_path", code_gen_dir) + ##### END GENERAL CODE GEN ##### def prepare_rtlsim(self): """Creates a Verilator emulation library for the RTL code generated for this node, sets the rtlsim_so attribute to its path and returns a PyVerilator wrapper around it.""" - #modified to use generated verilog instead of HLS output products + # Modified to use generated (System-)Verilog instead of HLS output products if PyVerilator is None: raise ImportError("Installation of PyVerilator is required.") @@ -1374,4 +1072,4 @@ def code_generation_cppsim(self, model): pass def compile_singlenode_code(self): - pass \ No newline at end of file + pass diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 870f5593bf..01133dc5f5 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -148,21 +148,31 @@ def prepare_inputs(input_tensor): # input datatype @pytest.mark.parametrize("idt", [DataType["UINT4"]]) + +# @pytest.mark.parametrize( +# "conv_config", +# [ +# [[12,12], [3, 3], [1, 1], [1, 1]], +# [[13,13], [3, 3], [1, 1], [1, 1]], +# [[12,12], [3, 3], [2, 2], [1, 1]], +# [[13,13], [3, 3], [2, 2], [1, 1]], +# ], +# ) # kernel size -@pytest.mark.parametrize("k", [[3,3]]) +@pytest.mark.parametrize("k", [[1,1],[2,2],[3,3],[4,5],[1,3]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[24,24]]) +@pytest.mark.parametrize("ifm_dim", [[8,8],[13,13],[1,12]]) # input channels -@pytest.mark.parametrize("ifm_ch", [8]) +@pytest.mark.parametrize("ifm_ch", [6]) # Stride -@pytest.mark.parametrize("stride", [[3,3],[6,6]]) +@pytest.mark.parametrize("stride", [[1,1],[2,2],[3,4]]) # Dilation -@pytest.mark.parametrize("dilation", [[1,1],[2,2]]) +@pytest.mark.parametrize("dilation", [[1,1],[2,2],[4,3]]) # depthwise @pytest.mark.parametrize("dw", [0,1]) # input channel parallelism ("SIMD") -@pytest.mark.parametrize("simd", [1,2,8]) +@pytest.mark.parametrize("simd", [1,2,3,6]) # in/out MMV ("M") @pytest.mark.parametrize("m", [1]) # paralle_window enable (MMV_out = M*K) @@ -175,7 +185,14 @@ def prepare_inputs(input_tensor): def test_fpgadataflow_slidingwindow_rtl( idt, k, ifm_dim, ifm_ch, stride, dilation, dw, simd, m, parallel_window, flip ): + #ifm_dim = conv_config[0] + #k = conv_config[1] + #stride = conv_config[2] + #dilation= conv_config[3] + if flip: + if (ifm_dim[0]==ifm_dim[1] and k[0]==k[1] and stride[0]==stride[1] and dilation[0] == dilation[1]): + pytest.skip("Dimension flip would have no effect") k = k[::-1] ifm_dim = ifm_dim[::-1] stride = stride[::-1] @@ -186,8 +203,21 @@ def test_fpgadataflow_slidingwindow_rtl( stride_h, stride_w = stride dilation_h, dilation_w = dilation + kernel_width = (k_w-1)*dilation_w+1 # incl. dilation + kernel_height = (k_h-1)*dilation_h+1 # incl. dilation + if simd > ifm_ch: pytest.skip("SIMD cannot be larger than number of input channels") + if ifm_ch % simd != 0: + pytest.skip("SIMD must divide number of input channels") + if kernel_width > ifm_dim_h or stride_h > ifm_dim_h: + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") + if kernel_height > ifm_dim_w or stride_w > ifm_dim_w: + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") + if (k_h==1 and (stride_h!=1 or dilation_h!=1)) or (k_w==1 and (stride_w!=1 or dilation_w!=1)): + pytest.skip("Illegal convolution configuration: stride or dilation defined for unitary kernel dim") + if k_h==1 and k_w==1 and simd != ifm_ch: + pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) From 05d0ccb1e24860148e34e6ff502a9d9714a80918 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Mon, 13 Jun 2022 21:57:53 +0200 Subject: [PATCH 009/628] Fix merge --- .../fpgadataflow/convert_to_hls_layers.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index fa787c174b..e3faa03ace 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -51,7 +51,6 @@ class InferConvInpGen(Transformation): def __init__(self, use_rtl_variant=False): super().__init__() self.use_rtl_variant = use_rtl_variant - self.use_rtl_variant = True #testing def apply(self, model): graph = model.graph @@ -225,15 +224,15 @@ def apply(self, model): depthwise=depthwise, name="ConvolutionInputGenerator_" + n.name, ) - else: # non-square images and/or kernels + else: # 1D images and/or kernels assert is_1d_convolution, ( "%s: ConvolutionInputGenerator1D works only for 1D convs" % n.name ) if dilation_h > 1 or dilation_w > 1: - assert stride_h == 1 and stride_w == 1, ( - """%s: Stride value of greater than 1 is not supported for convolutions - with dilation value greater than 1""" + assert depthwise == 1, ( + """%s: Dilation value > 1 is only supported for + 1D depthwise separable convolutions""" % n.name ) ConvInpGen_node = helper.make_node( @@ -1689,4 +1688,4 @@ def apply(self, model): if graph_modified: model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) - return (model, graph_modified) + return (model, graph_modified) \ No newline at end of file From 889ba2cef6517866311bcea6593e10b00d2b6598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 17 Jun 2022 14:29:31 +0100 Subject: [PATCH 010/628] Hotfix: Instance name for axi_info instantiation. --- finn-rtllib/axi_info/hdl/axi_info_top.sv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finn-rtllib/axi_info/hdl/axi_info_top.sv b/finn-rtllib/axi_info/hdl/axi_info_top.sv index 2032e1105c..905523ebaa 100644 --- a/finn-rtllib/axi_info/hdl/axi_info_top.sv +++ b/finn-rtllib/axi_info/hdl/axi_info_top.sv @@ -78,7 +78,7 @@ module axi_info_top #( 32'h0, CHECKSUM_COUNT }) - )( + ) inst ( //- Global Control ------------------ .ap_clk, .ap_rst_n, From 3a2a85084bc84bfbd248a78bc7b670621a261ca2 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 5 Jul 2022 14:53:19 +0200 Subject: [PATCH 011/628] Improve Systemverilog, extend tests --- finn-rtllib/swg/swg_template_default.sv | 633 +++++++++--------- .../convolutioninputgenerator_rtl.py | 10 +- .../test_convert_to_hls_1d_conv_layer.py | 8 +- 3 files changed, 310 insertions(+), 341 deletions(-) diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 12cc656928..fc4c96d1c3 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -1,351 +1,316 @@ -`timescale 1 ns / 1 ps - -module $TOP_MODULE_NAME$_controller -( - CLK, - RST, - advance, - addr_incr, - tail_incr +module $TOP_MODULE_NAME$_controller #( + int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, + int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, + int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, + int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, + int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, + + int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, + bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$ +)( + input logic clk, + input logic rst_n, + + input logic advance, + output logic [INCR_BITWIDTH-1:0] addr_incr, + output logic [INCR_BITWIDTH-1:0] tail_incr ); -localparam LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$; -localparam LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$; -localparam LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$; -localparam LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$; -localparam LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$; -localparam INCR_BITWIDTH = $INCR_BITWIDTH$; -localparam [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP [0:5] = $ADDR_INCREMENT_MAP$; - -input CLK; -input RST; -input advance; -output [INCR_BITWIDTH-1:0] addr_incr; -output [INCR_BITWIDTH-1:0] tail_incr; - -//state and counters -reg [2:0] state, state_next; -parameter STATE_START = 0, STATE_LOOP_SIMD = 1, STATE_LOOP_KW = 2, STATE_LOOP_KH = 3, STATE_LOOP_W = 4, STATE_LOOP_H = 5; -reg [$clog2(LOOP_H_ITERATIONS+2)-1:0] counter_loop_h; //could add check if ITERATIONS > 0, then replace +2 with +1 -reg [$clog2(LOOP_W_ITERATIONS+2)-1:0] counter_loop_w; -reg [$clog2(LOOP_KH_ITERATIONS+2)-1:0] counter_loop_kh; -reg [$clog2(LOOP_KW_ITERATIONS+2)-1:0] counter_loop_kw; -reg [$clog2(LOOP_SIMD_ITERATIONS+2)-1:0] counter_loop_simd; - -reg [INCR_BITWIDTH-1:0] tail_incr_reg; -assign addr_incr = ADDR_INCREMENT_MAP[state]; -assign tail_incr = tail_incr_reg; - -//combinational logic for tail_incr generation -$TAIL_INCR_GENERATION$ - -//combinational next state logic -always @ (state, counter_loop_simd, counter_loop_kw, counter_loop_kh, counter_loop_w, counter_loop_h) begin - state_next = state; - if (state == $INNERMOST_STATE$) begin - if (counter_loop_simd == 0) - if (counter_loop_kw != 0) - state_next = STATE_LOOP_KW; - else - if(counter_loop_kh != 0) - state_next = STATE_LOOP_KH; - else - if(counter_loop_w != 0) - state_next = STATE_LOOP_W; - else - if(counter_loop_h != 0) - state_next = STATE_LOOP_H; - else - state_next = STATE_START; - end else - state_next = $INNERMOST_STATE$; -end - -//sequential logic -always @ (posedge CLK) begin - if (RST == 1'b0) begin - counter_loop_h <= LOOP_H_ITERATIONS; - counter_loop_w <= LOOP_W_ITERATIONS; - counter_loop_kh <= LOOP_KH_ITERATIONS; - counter_loop_kw <= LOOP_KW_ITERATIONS; - counter_loop_simd <= LOOP_SIMD_ITERATIONS; - state <= $INNERMOST_STATE$; - end else begin - if (advance) begin - state <= state_next; - if (state == $INNERMOST_STATE$) begin - if (counter_loop_simd == 0) begin - counter_loop_simd <= LOOP_SIMD_ITERATIONS; - if (counter_loop_kw == 0) begin - counter_loop_kw <= LOOP_KW_ITERATIONS; - if (counter_loop_kh == 0) begin - counter_loop_kh <= LOOP_KH_ITERATIONS; - if (counter_loop_w == 0) begin - counter_loop_w <= LOOP_W_ITERATIONS; - if (counter_loop_h == 0) begin - counter_loop_h <= LOOP_H_ITERATIONS; - end else - counter_loop_h <= counter_loop_h-1; - end else - counter_loop_w <= counter_loop_w-1; - end else - counter_loop_kh <= counter_loop_kh-1; - end else - counter_loop_kw <= counter_loop_kw-1; - end else - counter_loop_simd <= counter_loop_simd-1; + //State and counters + typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H + } state_e; + state_e State = $INNERMOST_STATE$; + state_e state_next; + + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] counter_loop_h = LOOP_H_ITERATIONS-1; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] counter_loop_w = LOOP_W_ITERATIONS-1; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] counter_loop_kh = LOOP_KH_ITERATIONS-1; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] counter_loop_kw = LOOP_KW_ITERATIONS-1; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] counter_loop_simd = LOOP_SIMD_ITERATIONS-1; + + logic [INCR_BITWIDTH-1:0] tail_incr_reg = 'x; + assign addr_incr = ADDR_INCREMENT_MAP[State]; + assign tail_incr = tail_incr_reg; + + //combinational logic for tail_incr generation + $TAIL_INCR_GENERATION$ + + //combinational next state logic + always_comb begin : blkState + state_next = State; + if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; + else begin + if(counter_loop_simd < 0) begin + state_next = + (counter_loop_kw >= 0)? STATE_LOOP_KW : + (counter_loop_kh >= 0)? STATE_LOOP_KH : + (counter_loop_w >= 0)? STATE_LOOP_W : + (counter_loop_h >= 0)? STATE_LOOP_H : + /* else */ STATE_START; + end + end + end : blkState + + //sequential logic + always_ff @ (posedge clk) begin + if(!rst_n) begin + State <= $INNERMOST_STATE$; + counter_loop_h <= LOOP_H_ITERATIONS-1; + counter_loop_w <= LOOP_W_ITERATIONS-1; + counter_loop_kh <= LOOP_KH_ITERATIONS-1; + counter_loop_kw <= LOOP_KW_ITERATIONS-1; + counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + end + else if(advance) begin + State <= state_next; + if (State == $INNERMOST_STATE$) begin + if(counter_loop_simd >= 0) counter_loop_simd <= counter_loop_simd-1; + else begin + counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + if(counter_loop_kw >= 0) counter_loop_kw <= counter_loop_kw-1; + else begin + counter_loop_kw <= LOOP_KW_ITERATIONS-1; + if(counter_loop_kh >= 0) counter_loop_kh <= counter_loop_kh-1; + else begin + counter_loop_kh <= LOOP_KH_ITERATIONS-1; + if(counter_loop_w >= 0) counter_loop_w <= counter_loop_w-1; + else begin + counter_loop_w <= LOOP_W_ITERATIONS-1; + if(counter_loop_h >= 0) counter_loop_h <= counter_loop_h-1; + else counter_loop_h <= LOOP_H_ITERATIONS-1; + end + end + end + end end end end -end -endmodule //controller - -module $TOP_MODULE_NAME$_cyclic_buffer_addressable -#( - parameter WIDTH = 1, - parameter DEPTH = 1 -) -( - CLK, - RST, - read_addr, - read_enable, - write_enable, - data_in, - data_out -); - -input CLK, RST, read_enable, write_enable; -input [$clog2(DEPTH)-1:0] read_addr; // absolute (!) read address of cyclic buffer -input [WIDTH-1:0] data_in; -output [WIDTH-1:0] data_out; -reg [$clog2(DEPTH)-1:0] write_addr; +endmodule : $TOP_MODULE_NAME$_controller -$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; +module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + int unsigned WIDTH, + int unsigned DEPTH +)( + input logic clk, + input logic rst_n, -reg [WIDTH-1:0] out_reg; -assign data_out = out_reg; + input logic write_enable, + input logic [$clog2(DEPTH)-1:0] write_addr, + input logic [WIDTH-1:0] data_in, -always @(posedge CLK) begin - if (RST == 1'b0) begin - write_addr <= 0; - end else begin - if (read_enable) - out_reg <= ram[read_addr]; + input logic read_enable, + input logic [$clog2(DEPTH)-1:0] read_addr, // absolute (!) read address of cyclic buffer + output logic [WIDTH-1:0] data_out +); - if (write_enable) begin - ram[write_addr] <= data_in; - - if (write_addr == DEPTH-1) - write_addr <= 0; - else - write_addr <= write_addr + 1; + $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; + logic [WIDTH-1:0] Out = 'x; + always_ff @(posedge clk) begin + if (!rst_n) begin + Out <= 'x; + end + else begin + if (read_enable) Out <= Ram[read_addr]; + if (write_enable) Ram[write_addr] <= data_in; end end -end -endmodule //cyclic_buffer_addressable - -module $TOP_MODULE_NAME$_impl ( - ap_clk, - ap_rst_n, - in0_V_V_TDATA, - in0_V_V_TVALID, - in0_V_V_TREADY, - out_V_V_TDATA, - out_V_V_TVALID, - out_V_V_TREADY + assign data_out = Out; + +endmodule : $TOP_MODULE_NAME$_cyclic_buffer_addressable + +module $TOP_MODULE_NAME$_impl #( + int BIT_WIDTH = $BIT_WIDTH$, + int SIMD = $SIMD$, + int MMV_IN = $MMV_IN$, + int MMV_OUT = $MMV_OUT$, + int LAST_READ_ELEM = $LAST_READ_ELEM$, + int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, + int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, + int ELEM_PER_WINDOW = $ELEM_PER_WINDOW$, + int INCR_BITWIDTH = $INCR_BITWIDTH$ +)( + input logic ap_clk, + input logic ap_rst_n, + + input logic in0_V_V_TVALID, + output logic in0_V_V_TREADY, + input logic [BIT_WIDTH * SIMD * MMV_IN-1:0] in0_V_V_TDATA, + + output logic out_V_V_TVALID, + input logic out_V_V_TREADY, + output logic [BIT_WIDTH * SIMD * MMV_OUT-1:0] out_V_V_TDATA ); -//generated constants -localparam BIT_WIDTH = $BIT_WIDTH$; -localparam SIMD = $SIMD$; -localparam MMV_IN = $MMV_IN$; -localparam MMV_OUT = $MMV_OUT$; -localparam BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; -localparam BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; -localparam BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; -localparam LAST_READ_ELEM = $LAST_READ_ELEM$; -localparam LAST_WRITE_ELEM = $LAST_WRITE_ELEM$; -//localparam [$clog2($BUF_ELEM_TOTAL$+1)-1:0] BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; -localparam BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; -localparam ELEM_PER_WINDOW = $ELEM_PER_WINDOW$; -localparam INCR_BITWIDTH = $INCR_BITWIDTH$; - -//IO ports -input ap_clk; -input ap_rst_n; -input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; -input in0_V_V_TVALID; -output in0_V_V_TREADY; -output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; -output out_V_V_TVALID; -input out_V_V_TREADY; - -//main buffer instantiation -wire [BUF_IN_WIDTH-1:0] window_buffer_in; -wire [BUF_OUT_WIDTH-1:0] window_buffer_out; -wire window_buffer_write_enable; -wire window_buffer_read_enable; -wire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; -$TOP_MODULE_NAME$_cyclic_buffer_addressable -#( - .WIDTH(BUF_IN_WIDTH), - .DEPTH(BUF_ELEM_TOTAL) -) -window_buffer_inst -( - .CLK(ap_clk), - .RST(ap_rst_n), - .read_addr(window_buffer_read_addr), - .read_enable(window_buffer_read_enable), - .write_enable(window_buffer_write_enable), - .data_in(window_buffer_in), - .data_out(window_buffer_out) -); - -//controller instantiation -wire advance_controller; -wire signed [INCR_BITWIDTH-1:0] addr_incr; -wire [INCR_BITWIDTH-1:0] tail_incr; - -$TOP_MODULE_NAME$_controller -controller_inst -( - .CLK(ap_clk), - .RST(ap_rst_n), - .advance(advance_controller), - .addr_incr(addr_incr), - .tail_incr(tail_incr) -); - -// Counters/address registers -// Add a sign bit even to (most) unsigned counters and window_buffer_read_addr_reg, -// so we can use automatic sign extension and simplify calculations w/ signed increment. -// Alternatively, we could manually sign-extend and shave off a bit here or there. -reg signed [$clog2(LAST_READ_ELEM+1)+1-1:0] newest_buffered_elem; -reg [$clog2(LAST_READ_ELEM+1)+1-1:0] current_elem; -reg [$clog2(LAST_READ_ELEM+1)+1-1:0] first_elem_next_window; -reg [$clog2(ELEM_PER_WINDOW)-1:0] k; -reg [$clog2(BUF_ELEM_TOTAL)+1-1:0] window_buffer_read_addr_reg; - -// Control signals/registers -wire read_cmd; -wire read_ok; -wire reading_done; - -wire fetch_cmd; -reg fetching_done; - -reg write_cmd; -wire write_ok; -wire write_blocked; -reg writing_done; - -assign read_cmd = - (( - $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(first_elem_next_window) - && $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(current_elem) - ) // (over-)write to buffer if oldest buffered element will no longer be needed - || fetching_done //or if fetching is done (e.g. for skipped rows at FM end due to stride) - ) - && !reading_done; //and if there is still an input element left to read -assign read_ok = read_cmd && in0_V_V_TVALID; -assign reading_done = newest_buffered_elem == LAST_READ_ELEM; - -assign fetch_cmd = !($signed(current_elem) > newest_buffered_elem) && !write_blocked && !fetching_done; - -assign write_ok = write_cmd && out_V_V_TREADY; -assign write_blocked = write_cmd && !out_V_V_TREADY; - -//assign buffer control -assign window_buffer_read_addr = window_buffer_read_addr_reg; -assign window_buffer_write_enable = read_ok; -assign window_buffer_read_enable = fetch_cmd; -assign advance_controller = fetch_cmd; - -//assign I/O ports -assign window_buffer_in = in0_V_V_TDATA; -assign out_V_V_TDATA = window_buffer_out; -assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) -assign out_V_V_TVALID = ap_rst_n && write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) - -//main process for advancing counters -always @ (posedge ap_clk) begin - if (ap_rst_n == 1'b0) begin - newest_buffered_elem <= -1; - current_elem <= 0; - first_elem_next_window <= 0; - k <= 0; - window_buffer_read_addr_reg <= 0; - fetching_done <= 0; - writing_done <= 0; - write_cmd <= 0; - end else begin - if (read_ok) begin - //check if this is the last read cycle (reading_done will be true afterwards) - if ((newest_buffered_elem == LAST_READ_ELEM-1) && writing_done) begin - //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) - //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) - newest_buffered_elem <= -1; - current_elem <= 0; - first_elem_next_window <= 0; - writing_done <= 0; - fetching_done <= 0; - end - - newest_buffered_elem <= newest_buffered_elem+1; + // Derived Constants + localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; + localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; + localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; + + //main buffer instantiation + uwire [BUF_IN_WIDTH -1:0] window_buffer_in; + uwire [BUF_OUT_WIDTH-1:0] window_buffer_out; + uwire window_buffer_write_enable; + uwire window_buffer_read_enable; + uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr; + uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; + $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + .WIDTH(BUF_IN_WIDTH), + .DEPTH(BUF_ELEM_TOTAL) + ) window_buffer_inst ( + .clk(ap_clk), + .rst_n(ap_rst_n), + + .write_enable(window_buffer_write_enable), + .write_addr(window_buffer_write_addr), + .data_in(window_buffer_in), + + .read_enable(window_buffer_read_enable), + .read_addr(window_buffer_read_addr), + .data_out(window_buffer_out) + ); + + //controller instantiation + uwire advance_controller; + uwire signed [INCR_BITWIDTH-1:0] addr_incr; + uwire [INCR_BITWIDTH-1:0] tail_incr; + $TOP_MODULE_NAME$_controller controller_inst ( + .clk(ap_clk), + .rst_n(ap_rst_n), + .advance(advance_controller), + .addr_incr(addr_incr), + .tail_incr(tail_incr) + ); + + // Counters/address registers + // Add a sign bit even to (most) unsigned counters and window_buffer_read_addr_reg, + // so we can use automatic sign extension and simplify calculations w/ signed increment. + // Alternatively, we could manually sign-extend and shave off a bit here or there. + logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] newest_buffered_elem = -1; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] current_elem = 0; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] first_elem_next_window = 0; + logic [$clog2(ELEM_PER_WINDOW) -1:0] k = 0; + logic [$clog2(BUF_ELEM_TOTAL)+1 -1:0] window_buffer_read_addr_reg = 0; + logic [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr_reg = 0; + + // Control signals/registers + uwire read_cmd = + !reading_done && ( // if there is still an input element left to read + fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) + $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(first_elem_next_window) && + $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(current_elem) + ) // (over-)write to buffer if oldest buffered element will no longer be needed + ); + uwire read_ok = read_cmd && in0_V_V_TVALID; + uwire reading_done = newest_buffered_elem == LAST_READ_ELEM; + + uwire fetch_cmd = !($signed(current_elem) > newest_buffered_elem) && !write_blocked && !fetching_done; + logic fetching_done = 0; + + logic write_cmd = 0; + logic writing_done = 0; + uwire write_ok = write_cmd && out_V_V_TREADY; + uwire write_blocked = write_cmd && !out_V_V_TREADY;; + + //assign buffer control + assign window_buffer_write_addr = window_buffer_write_addr_reg; + assign window_buffer_read_addr = window_buffer_read_addr_reg; + assign window_buffer_write_enable = read_ok; + assign window_buffer_read_enable = fetch_cmd; + assign advance_controller = fetch_cmd; + + //assign I/O ports + assign window_buffer_in = in0_V_V_TDATA; + assign out_V_V_TDATA = window_buffer_out; + assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) + assign out_V_V_TVALID = ap_rst_n && write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + + //main process for advancing counters + always_ff @(posedge ap_clk) begin + if(!ap_rst_n) begin + newest_buffered_elem <= -1; + current_elem <= 0; + first_elem_next_window <= 0; + k <= 0; + window_buffer_read_addr_reg <= 0; + window_buffer_write_addr_reg <= 0; + fetching_done <= 0; + write_cmd <= 0; + writing_done <= 0; end - - if (fetch_cmd) begin - //count up to track which element index is about to be read from the buffer, and where it is located within the buffer - //use increment value calculated by controller - - //keep track where we are within a window - if (k == ELEM_PER_WINDOW - 1) - k <= 0; - else - k <= k+1; - - //update first element of next window to allow buffer overwrite up until that point - if (k == 0) - first_elem_next_window <= first_elem_next_window + tail_incr; - - //absolute buffer address wrap-around - if ($signed(window_buffer_read_addr_reg) + addr_incr > BUF_ELEM_TOTAL - 1) - window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr - BUF_ELEM_TOTAL; - else if ($signed(window_buffer_read_addr_reg) + addr_incr < 0) - window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr + BUF_ELEM_TOTAL; - else - window_buffer_read_addr_reg <= $signed(window_buffer_read_addr_reg) + addr_incr; - - //check if this is the last write cycle (writing_done will be true afterwards) - if (current_elem == LAST_WRITE_ELEM) - fetching_done <= 1; - else - current_elem <= $signed(current_elem) + addr_incr; - - // determine if prefetched data will be outstanding in the next cycle - // if we fetch in this cycle -> yes - // if we do not fetch nor write -> do not change - // if we do not fetch but write successfully-> clear outstanding data - write_cmd <= fetch_cmd; - end - - if (write_ok) - write_cmd <= fetch_cmd; - - if (write_ok && fetching_done) begin - //check if this is the last write cycle (writing_done will be true afterwards) - if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM - 1))) begin - //start processing of next FM if reading is done already, or completes in the same cycle - newest_buffered_elem <= -1; - current_elem <= 0; - first_elem_next_window <= 0; - fetching_done <= 0; - end else - writing_done <= 1; + else begin + if (read_ok) begin + window_buffer_write_addr_reg <= (window_buffer_write_addr_reg == BUF_ELEM_TOTAL-1)? 0 : window_buffer_write_addr_reg + 1; + newest_buffered_elem <= newest_buffered_elem+1; + + if (newest_buffered_elem == LAST_READ_ELEM-1) begin + window_buffer_write_addr_reg <= 0; + end + //check if this is the last read cycle (reading_done will be true afterwards) + if ((newest_buffered_elem == LAST_READ_ELEM-1) && writing_done) begin + //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) + //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) + newest_buffered_elem <= -1; + current_elem <= 0; + window_buffer_read_addr_reg <= 0; + first_elem_next_window <= 0; + writing_done <= 0; + fetching_done <= 0; + end + end + + if (fetch_cmd) begin + //count up to track which element index is about to be read from the buffer, and where it is located within the buffer + //use increment value calculated by controller + + // absolute buffer address wrap-around + automatic logic signed [$clog2(BUF_ELEM_TOTAL)+1:0] ra = $signed(window_buffer_read_addr_reg) + $signed(addr_incr); + automatic logic signed [$clog2(BUF_ELEM_TOTAL+1):0] ra_correct = + (ra >= BUF_ELEM_TOTAL)? -BUF_ELEM_TOTAL : + (ra < 0)? BUF_ELEM_TOTAL : 0; + window_buffer_read_addr_reg <= ra + ra_correct; + + //keep track where we are within a window + k <= (k != ELEM_PER_WINDOW - 1)? k+1 : 0; + + //update first element of next window to allow buffer overwrite up until that point + if (k == 0) + first_elem_next_window <= first_elem_next_window + tail_incr; + + //check if this is the last write cycle (writing_done will be true afterwards) + if (current_elem == LAST_WRITE_ELEM) + fetching_done <= 1; + else + current_elem <= $signed(current_elem) + addr_incr; + + // determine if prefetched data will be outstanding in the next cycle + // if we fetch in this cycle -> yes + // if we do not fetch nor write -> do not change + // if we do not fetch but write successfully-> clear outstanding data + write_cmd <= fetch_cmd; + end + + if (write_ok) + write_cmd <= fetch_cmd; + + if (write_ok && fetching_done) begin + //check if this is the last write cycle (writing_done will be true afterwards) + if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM - 1))) begin + //start processing of next FM if reading is done already, or completes in the same cycle + newest_buffered_elem <= -1; + current_elem <= 0; + window_buffer_read_addr_reg <= 0; + first_elem_next_window <= 0; + fetching_done <= 0; + end else + writing_done <= 1; + end end end -end -endmodule //TOP_MODULE_NAME_impl +endmodule : $TOP_MODULE_NAME$_impl diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 1aeeb9a1ee..af1896092f 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -556,11 +556,11 @@ def generate_hdl(self): tail_incr_last_window = buffer_min_size-1 code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin - if (counter_loop_kh != 0) + if (counter_loop_kh >= 0) tail_incr_reg = 1; - else if (counter_loop_w != 0) + else if (counter_loop_w >= 0) tail_incr_reg = {}; - else if (counter_loop_h != 0) + else if (counter_loop_h >= 0) tail_incr_reg = {}; else tail_incr_reg = {}; @@ -575,9 +575,9 @@ def generate_hdl(self): tail_incr_last_window = buffer_min_size-1 code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" always @ (counter_loop_w, counter_loop_h) begin - if (counter_loop_w != 0) + if (counter_loop_w >= 0) tail_incr_reg = {}; - else if (counter_loop_h != 0) + else if (counter_loop_h >= 0) tail_incr_reg = {}; else tail_incr_reg = {}; diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py index 5cc5f8fa6c..c771792188 100644 --- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -66,10 +66,11 @@ ], ) @pytest.mark.parametrize("depthwise", [False, True]) +@pytest.mark.parametrize("use_rtl_swg", [False, True]) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.slow @pytest.mark.vivado -def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): +def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mode): pad, kernel_size, stride, dilation = conv_config np.random.seed(0) idt = DataType["UINT4"] @@ -83,6 +84,9 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] + if use_rtl_swg and exec_mode == "cppsim": + pytest.skip("cppsim not supported for RTL SWG") + if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, k_h, k_w] @@ -138,7 +142,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, exec_mode): model = model.transform(InferDataTypes()) new_model = model.transform(LowerConvsToMatMul()) - new_model = new_model.transform(to_hls.InferConvInpGen()) + new_model = new_model.transform(to_hls.InferConvInpGen(use_rtl_variant=use_rtl_swg)) if depthwise is True: new_model = new_model.transform(to_hls.InferVVAU()) else: From 53ffa3438dc130e71c46f7db2730095daba04204 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 5 Jul 2022 16:15:35 +0200 Subject: [PATCH 012/628] Adapt to upcoming FINN release --- finn-rtllib/swg/swg_template_wrapper.v | 36 +++++++++---------- .../convolutioninputgenerator_rtl.py | 12 +++---- ...est_fpgadataflow_convinputgenerator_rtl.py | 12 +++---- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index db0556d940..510418453f 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -3,12 +3,12 @@ module $TOP_MODULE_NAME$ ( ap_clk, ap_rst_n, - in0_V_V_TDATA, - in0_V_V_TVALID, - in0_V_V_TREADY, - out_V_V_TDATA, - out_V_V_TVALID, - out_V_V_TREADY + in0_V_TDATA, + in0_V_TVALID, + in0_V_TREADY, + out_V_TDATA, + out_V_TVALID, + out_V_TREADY ); parameter BIT_WIDTH = $BIT_WIDTH$; @@ -21,13 +21,13 @@ parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; input ap_clk; input ap_rst_n; (* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) //todo: make configurable or set later -input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; -input in0_V_V_TVALID; -output in0_V_V_TREADY; +input [BUF_IN_WIDTH-1:0] in0_V_TDATA; +input in0_V_TVALID; +output in0_V_TREADY; (* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) -output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; -output out_V_V_TVALID; -input out_V_V_TREADY; +output [BUF_OUT_WIDTH-1:0] out_V_TDATA; +output out_V_TVALID; +input out_V_TREADY; $TOP_MODULE_NAME$_impl #() @@ -35,12 +35,12 @@ impl ( .ap_clk(ap_clk), .ap_rst_n(ap_rst_n), - .in0_V_V_TDATA(in0_V_V_TDATA), - .in0_V_V_TVALID(in0_V_V_TVALID), - .in0_V_V_TREADY(in0_V_V_TREADY), - .out_V_V_TDATA(out_V_V_TDATA), - .out_V_V_TVALID(out_V_V_TVALID), - .out_V_V_TREADY(out_V_V_TREADY) + .in0_V_V_TDATA(in0_V_TDATA), + .in0_V_V_TVALID(in0_V_TVALID), + .in0_V_V_TREADY(in0_V_TREADY), + .out_V_V_TDATA(out_V_TDATA), + .out_V_V_TVALID(out_V_TVALID), + .out_V_V_TREADY(out_V_TREADY) ); endmodule //TOP_MODULE_NAME diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index af1896092f..9369542582 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -31,11 +31,11 @@ import numpy as np import os -from finn.core.datatype import DataType +from qonnx.core.datatype import DataType +from qonnx.custom_op.general import im2col +from qonnx.custom_op.general.im2col import compute_conv_output_dim from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp -from finn.custom_op.general.im2col import compute_conv_output_dim from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy -from finn.custom_op.general import im2col from finn.util.basic import ( get_rtlsim_trace_depth, @@ -625,7 +625,7 @@ def generate_hdl(self): code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] - with open("/workspace/finn/finn-rtllib/swg/swg_template_default.sv", "r") as f: + with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_default.sv", "r") as f: template = f.read() ##### END CODE GEN FOR DEFAULT STYLE ##### @@ -976,7 +976,7 @@ def convert_tuple(seq): code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] - with open("/workspace/finn/finn-rtllib/swg/swg_template_parallel.sv", "r") as f: + with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_parallel.sv", "r") as f: template = f.read() ##### END CODE GEN FOR PARALLEL STYLE ##### @@ -997,7 +997,7 @@ def convert_tuple(seq): else: code_gen_dict["$RAM_STYLE$"]=["(* ram_style = \"{}\" *)".format(ram_style)] - with open("/workspace/finn/finn-rtllib/swg/swg_template_wrapper.v", "r") as f: + with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_wrapper.v", "r") as f: template_wrapper = f.read() for key in code_gen_dict: diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 01133dc5f5..c0bf799fa8 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -33,15 +33,15 @@ import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer -from finn.core.datatype import DataType -from finn.core.modelwrapper import ModelWrapper -from finn.custom_op.general.im2col import compute_conv_output_dim -from finn.custom_op.registry import getCustomOp +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.general.im2col import compute_conv_output_dim +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.util.basic import gen_finn_dt_tensor from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -from finn.transformation.general import GiveUniqueNodeNames -from finn.util.basic import gen_finn_dt_tensor def make_single_im2col_modelwrapper( k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt From 91a64c2b232ce8639bb5eadc8db745771a359f19 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 6 Jul 2022 13:43:36 +0200 Subject: [PATCH 013/628] Fix stitched ip sim, add builder cfg --- src/finn/builder/build_dataflow_config.py | 4 ++++ src/finn/builder/build_dataflow_steps.py | 5 ++++- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 4 ++-- src/finn/util/pyverilator.py | 3 ++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 381dfe91a2..eec55e5022 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -258,6 +258,10 @@ class DataflowBuildConfig: #: Which memory mode will be used for compute layers default_mem_mode: Optional[ComputeEngineMemMode] = ComputeEngineMemMode.DECOUPLED + #: Force inference of RTL ConvolutionInputGenerator over HLS implementation + #: If set to False, falls back to the default behavior of InferConvInpGen() + force_rtl_conv_inp_gen: Optional[bool] = False + #: Which Vitis platform will be used. #: Only relevant when `shell_flow_type = ShellFlowType.VITIS_ALVEO` #: e.g. "xilinx_u250_xdma_201830_2" diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 59f77650da..e77f17d7c2 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -302,7 +302,10 @@ def step_convert_to_hls(model: ModelWrapper, cfg: DataflowBuildConfig): # needed for convolutions -- TODO always exec? need_conv = len(model.get_nodes_by_op_type("Im2Col")) > 0 if need_conv: - model = model.transform(to_hls.InferConvInpGen()) + if cfg.force_rtl_conv_inp_gen: + model = model.transform(to_hls.InferConvInpGen(use_rtl_variant=True)) + else: + model = model.transform(to_hls.InferConvInpGen()) model = model.transform(to_hls.InferStreamingMaxPool()) model = model.transform(RemoveCNVtoFCFlatten()) # get rid of Tranpose -> Tranpose identity seq diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 7c978cf61a..d52868f5f8 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -534,8 +534,8 @@ def apply(self, model): tcl.append("ipx::save_core [ipx::find_open_core %s]" % block_vlnv) # export list of used Verilog files (for rtlsim later on) tcl.append( - "set all_v_files [get_files -filter {FILE_TYPE == Verilog " - + "&& USED_IN_SYNTHESIS == 1} ]" + "set all_v_files [get_files -filter {USED_IN_SYNTHESIS == 1 " + + "&& (FILE_TYPE == Verilog || FILE_TYPE == SystemVerilog)}]" ) v_file_list = "%s/all_verilog_srcs.txt" % vivado_stitch_proj_dir tcl.append("set fp [open %s w]" % v_file_list) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 3396561e06..ee7df3ed5b 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -74,7 +74,8 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - all_verilog_files = list(set(filter(lambda x: x.endswith(".v"), all_verilog_srcs))) + all_verilog_files = list(set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), + all_verilog_srcs))) # remove all but one instances of regslice_core.v filtered_verilog_files = [] From 9e06b83d4c679fad939df74edb9b0735f250faf5 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 6 Jul 2022 17:46:28 +0100 Subject: [PATCH 014/628] [CustomOp] Change name of axi_info axilite interface --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 7c978cf61a..4524fbd1bd 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -275,7 +275,7 @@ def insert_signature(self, checksum_count): "make_bd_intf_pins_external [get_bd_intf_pins %s/s_axi]" % signature_name ) self.connect_cmds.append( - "set_property name s_axis_info [get_bd_intf_ports s_axi_0]" + "set_property name s_axilite_info [get_bd_intf_ports s_axi_0]" ) self.connect_cmds.append("assign_bd_address") From a8d2c0bd802b74225e9b9d6bcacf96f0ed29b4cc Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 6 Jul 2022 17:48:55 +0100 Subject: [PATCH 015/628] [CustomOp] Change name of checksum axilite interface --- src/finn/custom_op/fpgadataflow/checksum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index bde285eb0d..7510e10133 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -329,5 +329,5 @@ def pragmas(self): def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() # expose axilite interface - intf_names["axilite"] = ["s_axi_checksum"] + intf_names["axilite"] = ["s_axilite_checksum"] return intf_names From 56196891d6816e02c30726313e649b0d08fc22dc Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Mon, 11 Jul 2022 16:03:30 +0000 Subject: [PATCH 016/628] functional --- src/finn/transformation/streamline/reorder.py | 57 ++++ .../streamline/test_scale_resize_nhwc.py | 274 ++++++++++++++++++ 2 files changed, 331 insertions(+) create mode 100644 tests/transformation/streamline/test_scale_resize_nhwc.py diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 9ff8a2173c..89dd2f5a61 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -722,6 +722,63 @@ def apply(self, model): graph_modified = True return (model, graph_modified) +class MakeScaleResizeNHWC(Transformation): + """ + Converts the inputs and outputs for all scales Resize and Upsample nodes + from NCHW to NHWC. + """ + def apply(self, model): + graph = model.graph + node_ind = 0 + for n in graph.node: + node_ind += 1 + if n.op_type == "Upsample" or n.op_type == "Resize": + consumer = model.find_consumer(n.output[0]) + producer = model.find_producer(n.input[0]) + if n.op_type == "Upsample": + scales_ind = 1 + else: + scales_ind = 2 + if producer is not None and producer.op_type == "Transpose": + perms = list(get_by_name(producer.attribute, "perm").ints) + if perms == [0, 3, 1, 2]: + old_value = model.get_initializer(n.input[scales_ind]) + new_value = np.array([old_value[idx] for idx in (0, 2, 3, 1)], dtype=np.dtype('float32')) + model.set_initializer(n.input[scales_ind], new_value) + start_name = producer.input[0] + mid_name = n.input[0] + end_name = n.output[0] + (b, hi, wi, c) = model.get_tensor_shape(start_name) + (b, c, ho, wo) = model.get_tensor_shape(end_name) + producer.input[0] = mid_name + producer.output[0] = end_name + n.input[0] = start_name + n.output[0] = mid_name + model.set_tensor_shape(mid_name, (b, ho, wo, c)) + model.set_tensor_shape(end_name, (b, c, ho, wo)) + graph.node.remove(producer) + graph.node.insert(node_ind, producer) + elif consumer is not None and consumer.op_type == "Transpose": + perms = list(get_by_name(consumer.attribute, "perm").ints) + if perms == [0, 2, 3, 1]: + old_value = model.get_initializer(n.input[scales_ind]) + new_value = np.array([old_value[idx] for idx in (0, 2, 3, 1)], dtype=np.dtype('float32')) + model.set_initializer(n.input[scales_ind], new_value) + start_name = n.input[0] + mid_name = consumer.input[0] + end_name = consumer.output[0] + (b, c, hi, wi) = model.get_tensor_shape(start_name) + (b, c, ho, wo) = model.get_tensor_shape(mid_name) + consumer.input[0] = start_name + consumer.output[0] = mid_name + n.input[0] = mid_name + n.output[0] = end_name + model.set_tensor_shape(mid_name, (b, hi, wi, c)) + model.set_tensor_shape(end_name, (b, ho, wo, c)) + graph.node.remove(consumer) + graph.node.insert(node_ind - 1, consumer) + return (model, False) + class MoveOpPastFork(Transformation): """Move node operations past graph forks. Used when a node before a fork diff --git a/tests/transformation/streamline/test_scale_resize_nhwc.py b/tests/transformation/streamline/test_scale_resize_nhwc.py new file mode 100644 index 0000000000..ba4df84569 --- /dev/null +++ b/tests/transformation/streamline/test_scale_resize_nhwc.py @@ -0,0 +1,274 @@ +import pytest +import numpy as np +import onnx +import onnx.version_converter as vc +import onnx.helper as oh +from onnx import TensorProto +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import gen_finn_dt_tensor + +import finn.core.onnx_exec as oxe +from finn.transformation.streamline.reorder import MakeScaleResizeNHWC + +def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): + ofm_dim_h = ifm_dim[0] * scales[2] + ofm_dim_w = ifm_dim[1] * scales[3] + inp = oh.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] + ) + + param = oh.make_tensor_value_info( + "scales", TensorProto.FLOAT, [4] + ) + + # Not actually used, only needed for compliance with the Resize node interface + roi = oh.make_tensor_value_info( + "roi", TensorProto.FLOAT, [4] + ) + + outp_up = oh.make_tensor_value_info( + "outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] + ) + outp = oh.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] + ) + + resize_node = oh.make_node( + "Resize", + inputs=["inp", "roi", "scales"], + outputs=["outp_up"], + name = "Resize1", + mode=mode, + ) + + transpose_node = onnx.helper.make_node( + "Transpose", + inputs=["outp_up"], + outputs=["outp"], + name="Transpose1", + perm=[0, 2, 3, 1], + ) + + graph = oh.make_graph( + nodes=[resize_node, transpose_node], + name="resize_graph", + inputs=[inp], + outputs=[outp], + value_info=[outp_up, param, roi], + ) + + model = oh.make_model(graph, producer_name="resize_model1") + model = ModelWrapper(model) + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", idt) + + model = model.transform(InferShapes()) + + return model + +def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): + ofm_dim_h = ifm_dim[0] * scales[2] + ofm_dim_w = ifm_dim[1] * scales[3] + inp = oh.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] + ) + + param = oh.make_tensor_value_info( + "scales", TensorProto.FLOAT, [4] + ) + + # Not actually used, only needed for compliance with the Resize node interface + roi = oh.make_tensor_value_info( + "roi", TensorProto.FLOAT, [4] + ) + + outp= oh.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] + ) + outp_tr = oh.make_tensor_value_info( + "outp_tr", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] + ) + + transpose_node = onnx.helper.make_node( + "Transpose", + inputs=["inp"], + outputs=["outp_tr"], + name="Transpose1", + perm=[0, 3, 1, 2], + ) + + resize_node = oh.make_node( + "Resize", + inputs=["outp_tr", "roi", "scales"], + outputs=["outp"], + name = "Resize1", + mode=mode, + ) + + graph = oh.make_graph( + nodes=[transpose_node, resize_node], + name="resize_graph", + inputs=[inp], + outputs=[outp], + value_info=[outp_tr, param, roi], + ) + + model = oh.make_model(graph, producer_name="resize_model2") + model = ModelWrapper(model) + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", idt) + + model = model.transform(InferShapes()) + + return model + +def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): + ofm_dim_h = ifm_dim[0] * scales[2] + ofm_dim_w = ifm_dim[1] * scales[3] + inp = oh.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] + ) + + param = oh.make_tensor_value_info( + "scales", TensorProto.FLOAT, scales + ) + + # Not actually used, only needed for compliance with the Resize node interface + roi = oh.make_tensor_value_info( + "roi", TensorProto.FLOAT, [4] + ) + + outp_tr = oh.make_tensor_value_info( + "outp_tr", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] + ) + + outp_up = oh.make_tensor_value_info( + "outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] + ) + outp = oh.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] + ) + + transpose_node1 = onnx.helper.make_node( + "Transpose", + inputs=["inp"], + outputs=["outp_tr"], + name="Transpose1", + perm=[0, 3, 1, 2], + ) + + resize_node = oh.make_node( + "Resize", + inputs=["outp_tr", "roi", "scales"], + outputs=["outp_up"], + name = "Resize1", + mode=mode, + ) + + transpose_node2 = onnx.helper.make_node( + "Transpose", + inputs=["out_up"], + outputs=["outp"], + name="Transpose2", + perm=[0, 2, 3, 1], + ) + + graph = oh.make_graph( + nodes=[transpose_node1, resize_node, transpose_node2], + name="resize_graph", + inputs=[inp], + outputs=[outp], + value_info=[outp_up, outp_tr, param, roi], + ) + + model = oh.make_model(graph, producer_name="resize_model3") + model = ModelWrapper(model) + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", idt) + + model = model.transform(InferShapes()) + + return model + + +@pytest.mark.streamline +# input dimension +@pytest.mark.parametrize("ifm_dim", [[2**i, 2**i] for i in range(3,6)]) +# input channels +@pytest.mark.parametrize("ifm_ch", [3]) +# scales +@pytest.mark.parametrize("scales", [[1,1,i,j] for i in range(2,5) for j in range(2,5)]) +# mode +@pytest.mark.parametrize("mode", ["nearest"]) +# input datatype +@pytest.mark.parametrize("idt", [DataType["INT4"]]) +def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): + # create models + resize_model1 = create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) + resize_model2 = create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt) + resize_model3 = create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) + + #set initializers + resize_model1.set_initializer("scales", np.array(scales, dtype=np.float32)) + resize_model2.set_initializer("scales", np.array(scales, dtype=np.float32)) + resize_model3.set_initializer("scales", np.array(scales, dtype=np.float32)) + + # generate input tensor for testing + input_tensor_nchw = gen_finn_dt_tensor(idt, [1, ifm_ch, ifm_dim[0], ifm_dim[1]]) + input_tensor_nhwc = gen_finn_dt_tensor(idt, [1, ifm_dim[0], ifm_dim[1], ifm_ch]) + input_dict_nchw = {"inp": input_tensor_nchw} + input_dict_nhwc = {"inp": input_tensor_nhwc} + + + # execute first model + output_dict1 = oxe.execute_onnx(resize_model1, input_dict_nchw) + expected1 = output_dict1["outp"] + + # transform Resize into ResizeNHWC + resize_model1 = resize_model1.transform(MakeScaleResizeNHWC()) + + # execute transformed model + output_node_name1 = resize_model1.graph.output[0].name + output_dict1 = oxe.execute_onnx( + resize_model1, input_dict_nchw, return_full_exec_context=False + ) + output1 = output_dict1[output_node_name1] + + # compare outputs + assert (expected1 == output1).all() + + # execute second model + output_dict2 = oxe.execute_onnx(resize_model2, input_dict_nhwc) + expected2 = output_dict2["outp"] + + # transform Resize into ResizeNHWC + resize_model2 = resize_model2.transform(MakeScaleResizeNHWC()) + + # execute transformed model + output_node_name2 = resize_model2.graph.output[0].name + output_dict2 = oxe.execute_onnx( + resize_model2, input_dict_nhwc, return_full_exec_context=False + ) + output2 = output_dict2[output_node_name2] + + # compare outputs + assert (expected2 == output2).all() + + # execute third model + output_dict3 = oxe.execute_onnx(resize_model3, input_dict_nhwc) + expected3 = output_dict3["outp"] + + # transform Resize into ResizeNHWC + resize_model3 = resize_model3.transform(MakeScaleResizeNHWC()) + + # execute transformed model + output_node_name3 = resize_model3.graph.output[0].name + output_dict3 = oxe.execute_onnx( + resize_model3, input_dict_nhwc, return_full_exec_context=False + ) + output3 = output_dict3[output_node_name3] + + # compare outputs + assert (expected3 == output3).all() From 1a93bbdaf3fe3ffd0c1d52121b55b6d5a7a67a40 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Mon, 11 Jul 2022 17:29:58 +0100 Subject: [PATCH 017/628] pre-commit passed --- src/finn/transformation/streamline/reorder.py | 14 ++++- .../streamline/test_scale_resize_nhwc.py | 56 +++++++++---------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 89dd2f5a61..e36de2aa54 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -722,11 +722,13 @@ def apply(self, model): graph_modified = True return (model, graph_modified) + class MakeScaleResizeNHWC(Transformation): """ Converts the inputs and outputs for all scales Resize and Upsample nodes from NCHW to NHWC. """ + def apply(self, model): graph = model.graph node_ind = 0 @@ -738,12 +740,15 @@ def apply(self, model): if n.op_type == "Upsample": scales_ind = 1 else: - scales_ind = 2 + scales_ind = 2 if producer is not None and producer.op_type == "Transpose": perms = list(get_by_name(producer.attribute, "perm").ints) if perms == [0, 3, 1, 2]: old_value = model.get_initializer(n.input[scales_ind]) - new_value = np.array([old_value[idx] for idx in (0, 2, 3, 1)], dtype=np.dtype('float32')) + new_value = np.array( + [old_value[idx] for idx in (0, 2, 3, 1)], + dtype=np.dtype("float32"), + ) model.set_initializer(n.input[scales_ind], new_value) start_name = producer.input[0] mid_name = n.input[0] @@ -762,7 +767,10 @@ def apply(self, model): perms = list(get_by_name(consumer.attribute, "perm").ints) if perms == [0, 2, 3, 1]: old_value = model.get_initializer(n.input[scales_ind]) - new_value = np.array([old_value[idx] for idx in (0, 2, 3, 1)], dtype=np.dtype('float32')) + new_value = np.array( + [old_value[idx] for idx in (0, 2, 3, 1)], + dtype=np.dtype("float32"), + ) model.set_initializer(n.input[scales_ind], new_value) start_name = n.input[0] mid_name = consumer.input[0] diff --git a/tests/transformation/streamline/test_scale_resize_nhwc.py b/tests/transformation/streamline/test_scale_resize_nhwc.py index ba4df84569..06faa83719 100644 --- a/tests/transformation/streamline/test_scale_resize_nhwc.py +++ b/tests/transformation/streamline/test_scale_resize_nhwc.py @@ -1,7 +1,7 @@ import pytest + import numpy as np import onnx -import onnx.version_converter as vc import onnx.helper as oh from onnx import TensorProto from qonnx.core.datatype import DataType @@ -12,6 +12,7 @@ import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MakeScaleResizeNHWC + def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] @@ -19,14 +20,10 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] ) - param = oh.make_tensor_value_info( - "scales", TensorProto.FLOAT, [4] - ) + param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [4]) # Not actually used, only needed for compliance with the Resize node interface - roi = oh.make_tensor_value_info( - "roi", TensorProto.FLOAT, [4] - ) + roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4]) outp_up = oh.make_tensor_value_info( "outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] @@ -39,7 +36,7 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): "Resize", inputs=["inp", "roi", "scales"], outputs=["outp_up"], - name = "Resize1", + name="Resize1", mode=mode, ) @@ -68,6 +65,7 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): return model + def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] @@ -75,16 +73,12 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] ) - param = oh.make_tensor_value_info( - "scales", TensorProto.FLOAT, [4] - ) + param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [4]) # Not actually used, only needed for compliance with the Resize node interface - roi = oh.make_tensor_value_info( - "roi", TensorProto.FLOAT, [4] - ) + roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4]) - outp= oh.make_tensor_value_info( + outp = oh.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] ) outp_tr = oh.make_tensor_value_info( @@ -103,7 +97,7 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): "Resize", inputs=["outp_tr", "roi", "scales"], outputs=["outp"], - name = "Resize1", + name="Resize1", mode=mode, ) @@ -114,7 +108,7 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): outputs=[outp], value_info=[outp_tr, param, roi], ) - + model = oh.make_model(graph, producer_name="resize_model2") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -124,6 +118,7 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): return model + def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] @@ -131,14 +126,10 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] ) - param = oh.make_tensor_value_info( - "scales", TensorProto.FLOAT, scales - ) + param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, scales) # Not actually used, only needed for compliance with the Resize node interface - roi = oh.make_tensor_value_info( - "roi", TensorProto.FLOAT, [4] - ) + roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4]) outp_tr = oh.make_tensor_value_info( "outp_tr", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] @@ -163,7 +154,7 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): "Resize", inputs=["outp_tr", "roi", "scales"], outputs=["outp_up"], - name = "Resize1", + name="Resize1", mode=mode, ) @@ -195,11 +186,13 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): @pytest.mark.streamline # input dimension -@pytest.mark.parametrize("ifm_dim", [[2**i, 2**i] for i in range(3,6)]) +@pytest.mark.parametrize("ifm_dim", [[2**i, 2**i] for i in range(3, 6)]) # input channels @pytest.mark.parametrize("ifm_ch", [3]) # scales -@pytest.mark.parametrize("scales", [[1,1,i,j] for i in range(2,5) for j in range(2,5)]) +@pytest.mark.parametrize( + "scales", [[1, 1, i, j] for i in range(2, 5) for j in range(2, 5)] +) # mode @pytest.mark.parametrize("mode", ["nearest"]) # input datatype @@ -208,9 +201,11 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # create models resize_model1 = create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) resize_model2 = create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt) - resize_model3 = create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) + resize_model3 = create_transpose_resize_transpose( + ifm_dim, ifm_ch, scales, mode, idt + ) - #set initializers + # set initializers resize_model1.set_initializer("scales", np.array(scales, dtype=np.float32)) resize_model2.set_initializer("scales", np.array(scales, dtype=np.float32)) resize_model3.set_initializer("scales", np.array(scales, dtype=np.float32)) @@ -221,7 +216,6 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): input_dict_nchw = {"inp": input_tensor_nchw} input_dict_nhwc = {"inp": input_tensor_nhwc} - # execute first model output_dict1 = oxe.execute_onnx(resize_model1, input_dict_nchw) expected1 = output_dict1["outp"] @@ -263,12 +257,12 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # transform Resize into ResizeNHWC resize_model3 = resize_model3.transform(MakeScaleResizeNHWC()) - # execute transformed model + # execute transformed model output_node_name3 = resize_model3.graph.output[0].name output_dict3 = oxe.execute_onnx( resize_model3, input_dict_nhwc, return_full_exec_context=False ) output3 = output_dict3[output_node_name3] - # compare outputs + # compare outputs assert (expected3 == output3).all() From 7fd6a640d4702f82e330effa27e37edd2b07c1bb Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 12 Jul 2022 20:44:31 +0200 Subject: [PATCH 018/628] Fixes for tests, ooc synth, clocking --- finn-rtllib/swg/swg_template_wrapper.v | 4 ++-- .../transformation/fpgadataflow/synth_ooc.py | 2 +- .../test_convert_to_hls_conv_layer.py | 17 +++++++++++++---- .../test_fpgadataflow_convinputgenerator_rtl.py | 8 ++++---- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index 510418453f..be5a93b9e6 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -18,13 +18,13 @@ parameter MMV_OUT = $MMV_OUT$; parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) input ap_clk; +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) input ap_rst_n; -(* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) //todo: make configurable or set later input [BUF_IN_WIDTH-1:0] in0_V_TDATA; input in0_V_TVALID; output in0_V_TREADY; -(* X_INTERFACE_PARAMETER = "FREQ_HZ 100000000.000000" *) output [BUF_OUT_WIDTH-1:0] out_V_TDATA; output out_V_TVALID; input out_V_TREADY; diff --git a/src/finn/transformation/fpgadataflow/synth_ooc.py b/src/finn/transformation/fpgadataflow/synth_ooc.py index 8d4aec259c..6070cce636 100644 --- a/src/finn/transformation/fpgadataflow/synth_ooc.py +++ b/src/finn/transformation/fpgadataflow/synth_ooc.py @@ -52,7 +52,7 @@ def file_to_basename(x): top_module_name = model.get_metadata_prop("wrapper_filename") top_module_name = file_to_basename(top_module_name).strip(".v") build_dir = make_build_dir("synth_out_of_context_") - verilog_extensions = [".v", ".vh"] + verilog_extensions = [".v", ".sv", ".vh"] with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f: all_verilog_srcs = f.read().split() for file in all_verilog_srcs: diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 55dc77cafb..7dcae82afe 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -57,11 +57,12 @@ "conv_config", [(1, 2, 0), (1, 3, 0), (3, 2, 1), (3, 1, 0), (3, 1, 1), (5, 2, 1)] ) @pytest.mark.parametrize("depthwise", [False, True]) +@pytest.mark.parametrize("use_rtl_swg", [False, True]) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): +def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mode): kernel_size, stride, pad = conv_config np.random.seed(0) idt = DataType["UINT4"] @@ -69,6 +70,9 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): in_feature_dim = 7 in_chn = 16 + if use_rtl_swg and exec_mode == "cppsim": + pytest.skip("cppsim not supported for RTL SWG") + if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, kernel_size, kernel_size] @@ -122,7 +126,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): model = model.transform(InferDataTypes()) new_model = model.transform(LowerConvsToMatMul()) - new_model = new_model.transform(to_hls.InferConvInpGen()) + new_model = new_model.transform(to_hls.InferConvInpGen(use_rtl_variant=use_rtl_swg)) if depthwise is True: new_model = new_model.transform(to_hls.InferVectorVectorActivation()) else: @@ -156,10 +160,15 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, exec_mode): x = gen_finn_dt_tensor(idt, input_shape) inp_dict = {model.graph.input[0].name: x} assert oxe.compare_execution(model, new_model, inp_dict) + + if use_rtl_swg: + downsampler_op_type = "ConvolutionInputGenerator_rtl" + else: + downsampler_op_type = "DownSampler" if kernel_size == 1 and stride > 1 and pad == 0: - assert new_model.graph.node[1].op_type == "DownSampler" + assert new_model.graph.node[1].op_type == downsampler_op_type if exec_mode == "rtlsim": - node = new_model.get_nodes_by_op_type("DownSampler")[0] + node = new_model.get_nodes_by_op_type(downsampler_op_type)[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index c0bf799fa8..c94aa1eab6 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -173,10 +173,10 @@ def prepare_inputs(input_tensor): # input channel parallelism ("SIMD") @pytest.mark.parametrize("simd", [1,2,3,6]) +# parallel_window enable (MMV_out = M*K) +@pytest.mark.parametrize("parallel_window", [0,1]) # in/out MMV ("M") @pytest.mark.parametrize("m", [1]) -# paralle_window enable (MMV_out = M*K) -@pytest.mark.parametrize("parallel_window", [0]) # Flip dimensions @pytest.mark.parametrize("flip", [False,True]) @@ -210,9 +210,9 @@ def test_fpgadataflow_slidingwindow_rtl( pytest.skip("SIMD cannot be larger than number of input channels") if ifm_ch % simd != 0: pytest.skip("SIMD must divide number of input channels") - if kernel_width > ifm_dim_h or stride_h > ifm_dim_h: + if kernel_height > ifm_dim_h or stride_h > ifm_dim_h: pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") - if kernel_height > ifm_dim_w or stride_w > ifm_dim_w: + if kernel_width > ifm_dim_w or stride_w > ifm_dim_w: pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") if (k_h==1 and (stride_h!=1 or dilation_h!=1)) or (k_w==1 and (stride_w!=1 or dilation_w!=1)): pytest.skip("Illegal convolution configuration: stride or dilation defined for unitary kernel dim") From b5dccf0f6aceb84ea0b2f84725e88f6c0a161f9b Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 13 Jul 2022 11:35:52 +0200 Subject: [PATCH 019/628] Fix FIFO OOC synth --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index d52868f5f8..35ac736aab 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -535,7 +535,8 @@ def apply(self, model): # export list of used Verilog files (for rtlsim later on) tcl.append( "set all_v_files [get_files -filter {USED_IN_SYNTHESIS == 1 " - + "&& (FILE_TYPE == Verilog || FILE_TYPE == SystemVerilog)}]" + + "&& (FILE_TYPE == Verilog || FILE_TYPE == SystemVerilog " + + "|| FILE_TYPE ==\"Verilog Header\")}]" ) v_file_list = "%s/all_verilog_srcs.txt" % vivado_stitch_proj_dir tcl.append("set fp [open %s w]" % v_file_list) From 1f17010edf8d256512ba8b9350e0a6321df16955 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 13 Jul 2022 15:02:29 +0100 Subject: [PATCH 020/628] Exit Dockerfile.finn & run_docker.sh process if XRT_DEB_VERSION is not set Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 7 ++++++- run-docker.sh | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 41f10cb5c1..435f81df82 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -35,6 +35,12 @@ WORKDIR /workspace ENV TZ="Europe/Dublin" RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +# Make sure the XRT_DEB_VERSION --build-arg was passed to this script +ARG XRT_DEB_VERSION +RUN if [ -z "$XRT_DEB_VERSION" ]; then \ + echo "XRT_DEB_VERSION is not set, exiting now" ; exit 1 ; \ + fi + RUN apt-get update && \ apt-get install -y \ build-essential \ @@ -60,7 +66,6 @@ RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config RUN locale-gen "en_US.UTF-8" # install XRT -ARG XRT_DEB_VERSION RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb RUN rm /tmp/$XRT_DEB_VERSION.deb diff --git a/run-docker.sh b/run-docker.sh index 03e59de4a6..3016b2787d 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -179,6 +179,10 @@ if [ "$FINN_DOCKER_PREBUILT" = "0" ]; then OLD_PWD=$(pwd) cd $SCRIPTPATH docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --tag=$FINN_DOCKER_TAG . + if [ "$?" -ne 0 ]; then + echo "Error occurred during docker build, exiting" + exit 1 + fi cd $OLD_PWD fi # Launch container with current directory mounted From e55cc05eeb0901d0cbcc8fde519739138201e007 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 13 Jul 2022 22:01:40 +0200 Subject: [PATCH 021/628] [QONNX] use conv output shape for conv layer conversion --- .../transformation/qonnx/fold_quant_weights.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/qonnx/fold_quant_weights.py b/src/finn/transformation/qonnx/fold_quant_weights.py index 80b6042d03..e8339ae244 100644 --- a/src/finn/transformation/qonnx/fold_quant_weights.py +++ b/src/finn/transformation/qonnx/fold_quant_weights.py @@ -126,10 +126,20 @@ def apply(self, model): model.set_tensor_datatype(node_out, new_dtype) # Reshape scale for Conv if required + target_output_shape = model.get_tensor_shape( + target_node.output[0] + ) if target_node.op_type == "Conv" and len(scale.shape) > 0: - bias_shape = [1] * len(scale.shape) - bias_shape[1] = -1 - scale = scale.reshape(bias_shape) + conv_out_shape = [1] * len(target_output_shape) + # only support per-output channel scaling + # (i.e. all scale shape elems besides 0th must be 1s) + if len(scale.shape) > 1: + assert ( + np.prod(scale.shape[1:]) == 1 + ), "Can't fold scale beyond per-out-channel granularity" + # collect all scaling in channels dim (since we constrain) + conv_out_shape[1] = -1 + scale = scale.reshape(conv_out_shape) if scale.shape == (1,): scale = scale[0] From 5dd85ebc91534334915bc55d41eb0c719329a59a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 14 Jul 2022 15:25:04 +0200 Subject: [PATCH 022/628] [Deps] update hlslib --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 1fb830e349..3c0496832c 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="e9946e5e56acd85837e8e79224d2bb60764bed69" +HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From 05f11518f765b3167566263eb062e9588366c922 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 14 Jul 2022 15:28:43 +0200 Subject: [PATCH 023/628] [Test] add 1D upsample testcase, currently fails --- tests/fpgadataflow/test_fpgadataflow_upsampler.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_upsampler.py b/tests/fpgadataflow/test_fpgadataflow_upsampler.py index d1ef0b890a..534e1ce508 100644 --- a/tests/fpgadataflow/test_fpgadataflow_upsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_upsampler.py @@ -117,7 +117,7 @@ def forward(self, x): # param datatype @pytest.mark.parametrize("dt", [DataType["INT8"]]) -# Width/height of square input feature map +# spatial dim input feature map @pytest.mark.parametrize("IFMDim", [3, 5]) # upscaling factor @pytest.mark.parametrize("scale", [2, 3]) @@ -125,14 +125,19 @@ def forward(self, x): @pytest.mark.parametrize("NumChannels", [4]) # execution mode @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +# whether to use 1D or 2D square testcases +@pytest.mark.parametrize("is_1d", [False, True]) @pytest.mark.fpgadataflow @pytest.mark.vivado @pytest.mark.slow -def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode): +def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode, is_1d): atol = 1e-3 # Create the test model and inputs for it torch_model = PyTorchTestModel(upscale_factor=scale) - input_shape = (1, NumChannels, IFMDim, IFMDim) + if is_1d: + input_shape = (1, NumChannels, IFMDim, 1) + else: + input_shape = (1, NumChannels, IFMDim, IFMDim) test_in = torch.arange(0, np.prod(np.asarray(input_shape))) # Limit the input to values valid for the given datatype test_in %= dt.max() - dt.min() + 1 From 362601eddb9a1f64639810893b565b1580ce5a57 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 14 Jul 2022 16:38:16 +0200 Subject: [PATCH 024/628] [HLSCustomOp] add optional target_dir arg for inp2npy --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 9978ab0c71..6692fa4b60 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -397,18 +397,19 @@ def compile_singlenode_code(self): builder.build(code_gen_dir) self.set_nodeattr("executable_path", builder.executable_path) - def dynamic_input_to_npy(self, context, count): + def dynamic_input_to_npy(self, context, count, target_dir=""): """Saves input (given context) into .npy files. Count indicates the number of inputs that have to be saved.""" node = self.onnx_node - code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") - if code_gen_dir == "": - raise Exception( + if target_dir == "": + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + if code_gen_dir == "": + raise Exception( + """ + Found no codegen dir for this node, did you run the prepare_cppsim transformation? """ -Found no codegen dir for this node, did you run the prepare_cppsim transformation? - """ - ) + ) # create a npy file for each input of the node (in_ind is input index) # assuming dynamic inputs start from 0 for in_ind in range(count): @@ -427,7 +428,7 @@ def dynamic_input_to_npy(self, context, count): # make copy before saving the array reshaped_input = reshaped_input.copy() np.save( - os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)), + os.path.join(target_dir, "input_{}.npy".format(in_ind)), reshaped_input, ) From 801c92da96754d5871d91539641ee0fe908e3f73 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 14 Jul 2022 16:38:34 +0200 Subject: [PATCH 025/628] [Test] make upsampler test suitable for parallel exec --- tests/fpgadataflow/test_fpgadataflow_upsampler.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_upsampler.py b/tests/fpgadataflow/test_fpgadataflow_upsampler.py index 534e1ce508..a08d31f7b0 100644 --- a/tests/fpgadataflow/test_fpgadataflow_upsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_upsampler.py @@ -30,6 +30,7 @@ import numpy as np import os +import shutil import torch from brevitas.export import FINNManager from qonnx.core.datatype import DataType @@ -51,6 +52,7 @@ from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.util.basic import make_build_dir tmpdir = os.environ["FINN_BUILD_DIR"] @@ -131,13 +133,16 @@ def forward(self, x): @pytest.mark.vivado @pytest.mark.slow def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode, is_1d): + tmpdir = make_build_dir("upsample_export_") atol = 1e-3 - # Create the test model and inputs for it - torch_model = PyTorchTestModel(upscale_factor=scale) if is_1d: input_shape = (1, NumChannels, IFMDim, 1) + upscale_factor = (scale, 1) else: input_shape = (1, NumChannels, IFMDim, IFMDim) + upscale_factor = (scale, scale) + # Create the test model and inputs for it + torch_model = PyTorchTestModel(upscale_factor=upscale_factor) test_in = torch.arange(0, np.prod(np.asarray(input_shape))) # Limit the input to values valid for the given datatype test_in %= dt.max() - dt.min() + 1 @@ -205,3 +210,4 @@ def test_fpgadataflow_upsampler(dt, IFMDim, scale, NumChannels, exec_mode, is_1d assert output_matches, "Cppsim output doesn't match ONNX/PyTorch." elif exec_mode == "rtlsim": assert output_matches, "Rtlsim output doesn't match ONNX/PyTorch." + shutil.rmtree(tmpdir, ignore_errors=True) From c9e49ed022198b1aaf7a016d79c25fea4069f4c2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 14 Jul 2022 16:39:21 +0200 Subject: [PATCH 026/628] [Upsample] add support for 1D upsampling --- src/finn/custom_op/fpgadataflow/upsampler.py | 44 ++++++++++++++----- .../fpgadataflow/convert_to_hls_layers.py | 17 ++++--- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index b62e4f2f67..eb51fe39fc 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -27,7 +27,6 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np -import os import warnings from qonnx.core.datatype import DataType @@ -57,6 +56,8 @@ def get_nodeattr_types(self): "inputDataType": ("s", True, ""), # Batch size "numInputVectors": ("i", False, 1), + # Dimensionality mode: 0 = 2D square, 1 = 1D in H dim + "DimMode": ("i", False, 0), } my_attrs.update(super().get_nodeattr_types()) return my_attrs @@ -64,21 +65,34 @@ def get_nodeattr_types(self): def get_exp_cycles(self): OFMDim = self.get_nodeattr("OFMDim") batch_size = self.get_nodeattr("numInputVectors") - exp_cycles = OFMDim * OFMDim * batch_size + is_2d = self.get_nodeattr("DimMode") == 0 + reps = 1 + if is_2d: + OFMDim = OFMDim * OFMDim + reps = batch_size + exp_cycles = OFMDim * reps return int(exp_cycles) def get_normal_input_shape(self): IFMDim = self.get_nodeattr("IFMDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") - ishape = (batch, IFMDim, IFMDim, num_ch) + is_2d = self.get_nodeattr("DimMode") == 0 + if is_2d: + ishape = (batch, IFMDim, IFMDim, num_ch) + else: + ishape = (batch, IFMDim, 1, num_ch) return ishape def get_normal_output_shape(self): OFMDim = self.get_nodeattr("OFMDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") - oshape = (batch, OFMDim, OFMDim, num_ch) + is_2d = self.get_nodeattr("DimMode") == 0 + if is_2d: + oshape = (batch, OFMDim, OFMDim, num_ch) + else: + oshape = (batch, OFMDim, 1, num_ch) return oshape def get_folded_input_shape(self): @@ -187,10 +201,19 @@ def strm_decl(self): ) def docompute(self): - self.code_gen_dict["$DOCOMPUTE$"] = [ - """UpsampleNearestNeighbour_Batch > (in0, out, numReps);""" - ] + is_2d = self.get_nodeattr("DimMode") == 0 + batch = self.get_nodeattr("numInputVectors") + if is_2d: + self.code_gen_dict["$DOCOMPUTE$"] = [ + """UpsampleNearestNeighbour_Batch > (in0, out, numReps);""" + ] + else: + assert batch == 1, "1D upsampler currently needs numReps=1" + self.code_gen_dict["$DOCOMPUTE$"] = [ + """UpsampleNearestNeighbour_1D > (in0, out);""" + ] def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -246,7 +269,6 @@ def execute_node(self, context, graph): node = self.onnx_node exp_ishape = self.get_normal_input_shape() exp_oshape = self.get_normal_output_shape() - folded_ishape = self.get_folded_input_shape() folded_oshape = self.get_folded_output_shape() if mode == "cppsim": @@ -268,9 +290,7 @@ def execute_node(self, context, graph): ), """Input shape doesn't match expected shape (numInputVectors, ImgDim, ImgDim, NumChannels).""" export_idt = self.get_input_datatype() - - reshaped_input = inp.reshape(folded_ishape) - np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) + self.dynamic_input_to_npy(context, 1, target_dir=code_gen_dir) if mode == "cppsim": # execute the precompiled model diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index f0bd5fbd06..429bc34ffc 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -285,20 +285,25 @@ def apply(self, model): ) # Assumes nhwc layout for scales and input - assert scales[1] == scales[2], ( - "%s: Upsampling is only supported for quadratic scales." % n.name + is_scale_square_2d = scales[1] == scales[2] + is_scale_1d = scales[1] > 1 and scales[2] == 1 + assert is_scale_square_2d or is_scale_1d, ( + "%s: Upsampling only supported for 1D H, or 2D square scaling" + % n.name ) assert scales[0] == scales[3] == 1, ( n.name + ": Upsampling is only supported for scales with " - "the first and last dimensions being 1." + "the first and last dimensions being 1 in NHWC." ) spatial_scale = scales[1] assert spatial_scale == int(spatial_scale), ( "%s: Upsampling is only supported for integer scales." % n.name ) + is_shape_square_2d = in_shape[1] == in_shape[2] + is_shape_1d = in_shape[1] > 1 and in_shape[2] == 1 - assert in_shape[1] == in_shape[2], ( - "%s: Upsampling is only supported for quadratic input shapes." + assert is_shape_square_2d or is_shape_1d, ( + "%s: Upsampling is only supported for 1D H or 2D square inputs." % n.name ) @@ -308,6 +313,7 @@ def apply(self, model): NumChannels = in_shape[-1] numInputVectors = in_shape[0] inputDataType = dt.name + dim_mode = 0 if is_shape_square_2d else 1 # Insert the HLSCustomOp node Upsample_HLS_node = helper.make_node( @@ -321,6 +327,7 @@ def apply(self, model): NumChannels=NumChannels, inputDataType=inputDataType, numInputVectors=numInputVectors, + DimMode=dim_mode, name="UpsampleNearestNeighbour_Batch_" + n.name, ) From 2c4849975a8183e86417a8de8ab4930bce1fc4c5 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 15 Jul 2022 12:10:31 +0100 Subject: [PATCH 027/628] [actions] Update quicktest workflow to new docker setup --- .github/workflows/quicktest-dev-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index d188007465..ba1ed224cf 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -50,4 +50,4 @@ jobs: - name: DockerRunQuicktest run: | - docker run --init --hostname finn_gha -w $(pwd) -v $(pwd):$(pwd) -e FINN_BUILD_DIR=/tmp/finn_gha -e FINN_INST_NAME=finn_gha finn_gha quicktest.sh + docker run --init --hostname finn_gha -w $(pwd) -v $(pwd):$(pwd) -e FINN_ROOT=$(pwd) -e FINN_BUILD_DIR=/tmp/finn_gha -e FINN_INST_NAME=finn_gha finn_gha quicktest.sh From 8954df6e86c021b9b317ffe3d6d2cde95a0b3e0a Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 15 Jul 2022 13:10:42 +0100 Subject: [PATCH 028/628] [quicktest] update path --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index f625f2b1ef..b4ad37232f 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -2,7 +2,7 @@ : ${PYTEST_PARALLEL=auto} -cd $FINN_ROOT/finn +cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" From 81fefd999708220bd38b4021c6515115161b96ed Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 15 Jul 2022 16:14:46 +0100 Subject: [PATCH 029/628] FINN-13: Version control the installed XRT version. Docker installs XRT when building the container. This patch allows the maintainer to set the Vitis tools version and the hash of the supported XRT version verified to work with FINN. This is the way XRT versioning is managed in AWS instances. FINN-13 was caused by using 2022.1 Vitis and an old XRT version in usage during 2020.1. Version controlling the sets of tools will prevent bugs like this from recurring. Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 17 +++++++++++++++++ run-docker.sh | 2 +- xrt_supported_versions.txt | 1 + 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 xrt_supported_versions.txt diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 435f81df82..1a1ae4b4f4 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -69,6 +69,23 @@ RUN locale-gen "en_US.UTF-8" RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb RUN rm /tmp/$XRT_DEB_VERSION.deb +RUN if [ ! -f "/opt/xilinx/xrt/include/version.h" ]; then \ + echo "ERROR: XRT not installed. Please install XRT" ; exit 1 ; \ + fi ; + +# Inherit the xrt_supported_versions arg, a file, and copy it to docker +ARG xrt_supported_versions +ADD $xrt_supported_versions ./ +ARG XILINX_VERSION +RUN echo "XRT installed. proceeding to check version compatibility" ; \ + xrt_build_ver="$XILINX_VERSION:"$(grep 'xrt_build_version_hash\[\]' /opt/xilinx/xrt/include/version.h | sed 's/";//' | sed 's/^.*"//') ; \ + if grep -Fxq "$xrt_build_ver" $xrt_supported_versions ; then \ + echo "XRT version $xrt_build_ver is supported." ; \ + echo "XRT Runtime setup Done" ; \ + else \ + echo "ERROR: $xrt_build_ver does not match supported versions" ; \ + exit 1 ; \ + fi ; # versioned Python package requirements for FINN compiler # these are given in requirements.txt diff --git a/run-docker.sh b/run-docker.sh index 3016b2787d..95c023ee87 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -178,7 +178,7 @@ if [ "$FINN_DOCKER_PREBUILT" = "0" ]; then # Need to ensure this is done within the finn/ root folder: OLD_PWD=$(pwd) cd $SCRIPTPATH - docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --tag=$FINN_DOCKER_TAG . + docker build -f docker/Dockerfile.finn --build-arg XILINX_VERSION=$FINN_XILINX_VERSION --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --build-arg xrt_supported_versions=xrt_supported_versions.txt --tag=$FINN_DOCKER_TAG . if [ "$?" -ne 0 ]; then echo "Error occurred during docker build, exiting" exit 1 diff --git a/xrt_supported_versions.txt b/xrt_supported_versions.txt new file mode 100644 index 0000000000..3828906123 --- /dev/null +++ b/xrt_supported_versions.txt @@ -0,0 +1 @@ +2022.1:f5505e402c2ca1ffe45eb6d3a9399b23a0dc8776 From aadf497ae80df9b352c63d146ccf6726297f9cad Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Jul 2022 10:46:13 +0100 Subject: [PATCH 030/628] [actions] Add fetching the repos to quicktest workflow --- .github/workflows/quicktest-dev-pr.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index ba1ed224cf..7bef2ba3ab 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -50,4 +50,6 @@ jobs: - name: DockerRunQuicktest run: | + ./fetch-repos.sh + docker run --init --hostname finn_gha -w $(pwd) -v $(pwd):$(pwd) -e FINN_ROOT=$(pwd) -e FINN_BUILD_DIR=/tmp/finn_gha -e FINN_INST_NAME=finn_gha finn_gha quicktest.sh From 98e7a1c4cf51e3c0f6c12c0a448e3a496e7a581b Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Jul 2022 10:58:25 +0100 Subject: [PATCH 031/628] [actions] Change execution order --- .github/workflows/quicktest-dev-pr.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index 7bef2ba3ab..f3dcd176a3 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -28,6 +28,10 @@ jobs: restore-keys: | ${{ runner.os }}-buildx- + - name: fetch repos + run: | + ./fetch-repos.sh + - name: Build and push uses: docker/build-push-action@v2 with: @@ -50,6 +54,4 @@ jobs: - name: DockerRunQuicktest run: | - ./fetch-repos.sh - docker run --init --hostname finn_gha -w $(pwd) -v $(pwd):$(pwd) -e FINN_ROOT=$(pwd) -e FINN_BUILD_DIR=/tmp/finn_gha -e FINN_INST_NAME=finn_gha finn_gha quicktest.sh From 846696196ffffad909db423ebb067bfedab50b76 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 19 Jul 2022 19:23:07 +0200 Subject: [PATCH 032/628] [QONNX] remove no-fork restriction for QONNX->FINN-ONNX conversion --- .../qonnx/quant_act_to_multithreshold.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py index c52d69b0f0..df1fd089f9 100644 --- a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py +++ b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py @@ -110,11 +110,11 @@ def apply(self, model): predecessor_op_type = predecessor[0].op_type else: predecessor_op_type = predecessor - if model.is_fork_node(n): - raise ValueError( - "Forking Quant/BipolarQuant nodes are currently " - "not supported by FINN." - ) + # if model.is_fork_node(n): + # raise ValueError( + # "Forking Quant/BipolarQuant nodes are currently " + # "not supported by FINN." + # ) if n.op_type == "Quant" and not model.get_initializer(n.input[2]) == 0: raise ValueError( "Only Quant nodes with zero-point == 0 are currently supported." From b70659cbf60ca58341043dedf84f3b8eb1526e19 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 19 Jul 2022 19:38:05 +0200 Subject: [PATCH 033/628] [Build] New flag "verbose" to control build_dataflow verbosity --- src/finn/builder/build_dataflow.py | 14 ++++++++------ src/finn/builder/build_dataflow_config.py | 4 ++++ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/finn/builder/build_dataflow.py b/src/finn/builder/build_dataflow.py index 238083f653..d6864994a7 100644 --- a/src/finn/builder/build_dataflow.py +++ b/src/finn/builder/build_dataflow.py @@ -155,12 +155,14 @@ def build_dataflow_cfg(model_filename, cfg: DataflowBuildConfig): % (step_name, step_num, len(build_dataflow_steps)) ) # redirect output to logfile - sys.stdout = stdout_logger - sys.stderr = stderr_logger - print( - "Running step: %s [%d/%d]" - % (step_name, step_num, len(build_dataflow_steps)) - ) + if not cfg.verbose: + sys.stdout = stdout_logger + sys.stderr = stderr_logger + # also log current step name to logfile + print( + "Running step: %s [%d/%d]" + % (step_name, step_num, len(build_dataflow_steps)) + ) # run the step step_start = time.time() model = transform_step(model, cfg) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 09e9ec3a56..92263bd82c 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -285,6 +285,10 @@ class DataflowBuildConfig: #: Whether pdb postmortem debuggig will be launched when the build fails enable_build_pdb_debug: Optional[bool] = True + #: When True, all warnings and compiler output will be printed in stdout. + #: Otherwise, these will be suppressed and only appear in the build log. + verbose: Optional[bool] = False + #: If given, only run the steps in the list. If not, run default steps. #: See `default_build_dataflow_steps` for the default list of steps. #: When specified: From 4b79ea383f8e9d778667f6ceed9c47f30af05613 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 19 Jul 2022 21:34:59 +0200 Subject: [PATCH 034/628] [Refactor] use ONNX textual input for test_move_past_fork testcase --- .../streamline/test_move_past_fork.py | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/tests/transformation/streamline/test_move_past_fork.py b/tests/transformation/streamline/test_move_past_fork.py index 5064fa3fca..543a43d64d 100644 --- a/tests/transformation/streamline/test_move_past_fork.py +++ b/tests/transformation/streamline/test_move_past_fork.py @@ -28,9 +28,11 @@ import pytest import numpy as np -from onnx import TensorProto, helper +import onnx.parser as oprs from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import get_by_name import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveLinearPastFork @@ -41,67 +43,65 @@ # ifmdim @pytest.mark.parametrize("ifmdim", [-1, 7]) def test_move_past_fork(ch, ifmdim): - # generate test vectors of correct shape if ifmdim == -1: - input_shape = (1, ch) + shp = [1, ch] else: - input_shape = (1, ch, ifmdim, ifmdim) + shp = [1, ch, ifmdim, ifmdim] + shp_str = str(shp) + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{shp_str} in0) => (float{shp_str} out0) + < + float{shp_str} add0_param, + float{shp_str} mul_shared_param, + float{shp_str} add2_param, + float{shp_str} mul2_param, + float{shp_str} add3_param, + float{shp_str} add4_param, + float{shp_str} mul3_param, + float{shp_str} add6_param + > + {{ - top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) - top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) - - num_of_params = 8 - value_info = [] - for i in range(num_of_params): - value_info += [ - helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape) - ] - - add_1_to_move = helper.make_node("Add", ["top_in", "p0"], ["fork1"]) - mul_1_to_move = helper.make_node("Mul", ["t5", "p4"], ["fork2"]) - add_2_to_move = helper.make_node("Add", ["fork2", "p5"], ["t6"]) - mul_1_not_to_move = helper.make_node("Mul", ["t8", "p7"], ["fork3"]) - modelproto = helper.make_model( - helper.make_graph( - name="test", - inputs=[top_in], - outputs=[top_out], - value_info=value_info, - nodes=[ - # fork1 - add_1_to_move, - helper.make_node("Mul", ["fork1", "p1"], ["t2"]), - helper.make_node("Mul", ["fork1", "p2"], ["t3"]), - helper.make_node("Add", ["t2", "t3"], ["t4"]), - helper.make_node("Add", ["t4", "p3"], ["t5"]), - # fork2 - mul_1_to_move, - add_2_to_move, - helper.make_node("Add", ["fork2", "p6"], ["t7"]), - helper.make_node("Add", ["t6", "t7"], ["t8"]), - # empty branches: do nothing - mul_1_not_to_move, - helper.make_node("Add", ["fork3", "fork3"], ["top_out"]), - ], - ) - ) - model = ModelWrapper(modelproto) + add0_out = Add(in0, add0_param) + mul0_out = Mul(add0_out, mul_shared_param) + mul1_out = Mul(add0_out, mul_shared_param) + add1_out = Add(mul0_out, mul1_out) + add2_out = Add(add1_out, add2_param) + mul2_out = Mul(add2_out, mul2_param) + add3_out = Add(mul2_out, add3_param) + add4_out = Add(mul2_out, add4_param) + add5_out = Add(add3_out, add4_out) + mul3_out = Mul(add5_out, mul3_param) + out0 = Add(mul3_out, add6_param) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) model = model.transform(InferShapes()) np.random.seed(0) - for i in range(num_of_params): - model.set_initializer( - "p" + str(i), np.random.rand(*input_shape).astype(np.float32) - ) - + for tensor_name in model.get_all_tensor_names(): + if tensor_name.endswith("_param"): + pshape = model.get_tensor_shape(tensor_name) + model.set_initializer( + tensor_name, np.random.rand(*pshape).astype(np.float32) + ) + model = model.transform(GiveUniqueNodeNames()) # Transform new_model = model.transform(MoveLinearPastFork()) - inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} - + new_model = new_model.transform(GiveUniqueNodeNames()) + inp_dict = {"top_in": np.random.rand(*shp).astype(np.float32)} # Test assert oxe.compare_execution(model, new_model, inp_dict) - assert not new_model.is_fork_node(add_1_to_move) - assert not new_model.is_fork_node(mul_1_to_move) - assert not new_model.is_fork_node(add_2_to_move) - assert new_model.is_fork_node(mul_1_not_to_move) + nodes = new_model.graph.node + assert len(new_model.get_nodes_by_op_type("Add")) == 9 + assert len(new_model.get_nodes_by_op_type("Mul")) == 5 + assert not new_model.is_fork_node(get_by_name(nodes, "Add_0")) + assert new_model.is_join_node(get_by_name(nodes, "Add_2")) + assert not new_model.is_fork_node(get_by_name(nodes, "Mul_2")) + assert not new_model.is_join_node(get_by_name(nodes, "Add_5")) assert len(new_model.graph.node) == 14 From cbdc575e7466cad8478378519c112bf4270f821b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 19 Jul 2022 22:15:01 +0200 Subject: [PATCH 035/628] [Streamline] allow MoveOpPastFork to copy attributes, impl for Transpose --- src/finn/transformation/streamline/reorder.py | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 9ff8a2173c..3bfa0d458f 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -728,9 +728,10 @@ class MoveOpPastFork(Transformation): can be merged with nodes in the branches """ - def __init__(self, op_name_list): + def __init__(self, op_name_list, get_attrs_fxn=lambda x: {}): super().__init__() self.ops_to_move = op_name_list + self.get_attrs_fxn = get_attrs_fxn def apply(self, model): graph = model.graph @@ -747,9 +748,10 @@ def apply(self, model): # Restrict this transform to operations with constant parameters # Assuming parameters is in input 1 - op_init_param = model.get_initializer(n.input[1]) - if op_init_param is None: - continue + if len(n.input) > 1: + op_init_param = model.get_initializer(n.input[1]) + else: + op_init_param = None # Check case when branches are empty and go # to the same node @@ -766,16 +768,20 @@ def apply(self, model): for consumer_node in consumers[1:]: # create new node - new_param_name = model.make_new_valueinfo_name() new_output_tensor_name = model.make_new_valueinfo_name() + if op_init_param is None: + new_inp_list = [n.input[0]] + else: + new_param_name = model.make_new_valueinfo_name() + new_inp_list = [n.input[0], new_param_name] + model.set_initializer(new_param_name, op_init_param) + attrs = self.get_attrs_fxn(n) + # TODO use copy of original node instead to get attrs? new_node = oh.make_node( - n.op_type, - [n.input[0], new_param_name], - [new_output_tensor_name], + n.op_type, new_inp_list, [new_output_tensor_name], **attrs ) graph.node.insert(node_ind, new_node) node_ind += 1 - model.set_initializer(new_param_name, op_init_param) # change consumer input tensor graph.node.remove(consumer_node) @@ -811,6 +817,13 @@ def __init__(self): super().__init__(["Add", "Mul"]) +class MoveTransposePastFork(MoveOpPastFork): + def __init__(self): + super().__init__( + ["Transpose"], lambda x: {"perm": get_by_name(x.attribute, "perm").ints} + ) + + class MoveMaxPoolPastMultiThreshold(Transformation): """Move MaxPool nodes past MultiThreshold nodes on linear segments of the graph.""" From cb5f0f36f6e35ef640b9ef7dd71774e18374c0e9 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 19 Jul 2022 22:15:25 +0200 Subject: [PATCH 036/628] [Test] add testcase for MoveTransposePastFork --- .../streamline/test_move_past_fork.py | 37 ++++++++++++++++++- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/tests/transformation/streamline/test_move_past_fork.py b/tests/transformation/streamline/test_move_past_fork.py index 543a43d64d..7e77d7f9b3 100644 --- a/tests/transformation/streamline/test_move_past_fork.py +++ b/tests/transformation/streamline/test_move_past_fork.py @@ -35,14 +35,47 @@ from qonnx.util.basic import get_by_name import finn.core.onnx_exec as oxe -from finn.transformation.streamline.reorder import MoveLinearPastFork +from finn.transformation.streamline.reorder import ( + MoveLinearPastFork, + MoveTransposePastFork, +) + + +@pytest.mark.streamline +def test_move_past_fork_transpose(): + shp = [1, 3, 32, 32] + shp_str = str(shp) + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{shp_str} in0) => (float{shp_str} out0) + {{ + t0_out = Transpose(in0) + t1_out = Transpose(t0_out) + t2_out = Transpose(t0_out) + out0 = Add(t1_out, t2_out) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) + model = model.transform(InferShapes()) + new_model = model.transform(MoveTransposePastFork()) + new_model = new_model.transform(GiveUniqueNodeNames()) + nodes = new_model.graph.node + assert oxe.compare_execution( + model, new_model, {"in0": np.random.rand(*shp).astype(np.float32)} + ) + assert len(nodes) == 5 + assert not new_model.is_fork_node(get_by_name(nodes, "Transpose_0")) @pytest.mark.streamline @pytest.mark.parametrize("ch", [64, 1]) # ifmdim @pytest.mark.parametrize("ifmdim", [-1, 7]) -def test_move_past_fork(ch, ifmdim): +def test_move_past_fork_linear(ch, ifmdim): if ifmdim == -1: shp = [1, ch] else: From 8f7a26b6def3e89db019d533c4e5be35795261c2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 11:05:21 +0200 Subject: [PATCH 037/628] [Refactor] use textual ONNX for test_absorb_opposite_transposes --- .../test_absorb_opposite_transposes.py | 59 +++++++++---------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/tests/transformation/streamline/test_absorb_opposite_transposes.py b/tests/transformation/streamline/test_absorb_opposite_transposes.py index 51ea5edfc4..d4d26f3cf8 100644 --- a/tests/transformation/streamline/test_absorb_opposite_transposes.py +++ b/tests/transformation/streamline/test_absorb_opposite_transposes.py @@ -29,8 +29,7 @@ import pytest import numpy as np -import onnx.helper as oh -from onnx import TensorProto +import onnx.parser as oprs from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -41,38 +40,36 @@ @pytest.mark.streamline def test_absorb_opposite_transposes(): np.random.seed(0) - input_shape = [1, 3, 4, 2] - top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) - top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape) - value_info = [oh.make_tensor_value_info("add_param_0", TensorProto.FLOAT, [1])] - value_info += [oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [1])] - value_info += [oh.make_tensor_value_info("mul_param_0", TensorProto.FLOAT, [1])] - modelproto = oh.make_model( - oh.make_graph( - name="test", - inputs=[top_in], - outputs=[top_out], - value_info=value_info, - nodes=[ - oh.make_node("Add", ["top_in", "add_param_0"], ["t0"]), - oh.make_node("Transpose", ["t0"], ["t1"], perm=[0, 2, 3, 1]), - oh.make_node("Transpose", ["t1"], ["t2"], perm=[0, 3, 1, 2]), - oh.make_node("Add", ["t2", "add_param_1"], ["t3"]), - oh.make_node("Transpose", ["t3"], ["t4"], perm=[0, 2, 3, 1]), - oh.make_node("Transpose", ["t4"], ["t5"], perm=[0, 3, 1, 2]), - oh.make_node("Add", ["t5", "t2"], ["t6"]), - oh.make_node("Mul", ["t6", "mul_param_0"], ["top_out"]), - ], - ) - ) - model = ModelWrapper(modelproto) + shp = [1, 3, 4, 2] + shp_str = str(shp) + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{shp_str} in0) => (float{shp_str} out0) + < + float[1] add0_param = {{1.0}}, + float[1] add1_param = {{3.0}}, + float[1] mul0_param = {{2.0}} + > + {{ + add0_out = Add(in0, add0_param) + t0_out = Transpose(add0_out) + t1_out = Transpose(t0_out) + add1_out = Add(t1_out, add1_param) + t2_out = Transpose(add1_out) + t3_out = Transpose(t2_out) + add2_out = Add(t1_out, t3_out) + out0 = Mul(add2_out, mul0_param) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) model = model.transform(InferShapes()) - model.set_initializer("add_param_0", np.asarray([1], dtype=np.float32)) - model.set_initializer("add_param_1", np.asarray([3], dtype=np.float32)) - model.set_initializer("mul_param_0", np.asarray([2], dtype=np.float32)) new_model = model.transform(AbsorbConsecutiveTransposes()) new_model = new_model.transform(InferShapes()) - inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)} + inp_dict = {"top_in": np.random.rand(*shp).astype(np.float32)} assert ox.compare_execution(model, model, inp_dict) assert len(new_model.graph.node) == 4 for n in new_model.graph.node: From a8bb97543e901bd6cc253e911168695c2976d7ef Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 11:05:53 +0200 Subject: [PATCH 038/628] [Streamline] simplify and improve AbsorbConsecutiveTransposes --- src/finn/transformation/streamline/absorb.py | 96 +++++++------------- 1 file changed, 32 insertions(+), 64 deletions(-) diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index 0299c4f4d8..a983e67750 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -473,7 +473,7 @@ class AbsorbConsecutiveTransposes(Transformation): """Remove (Transpose -> Transpose) patterns when the input and output of the pattern have the same layout.""" - def Are_opposite_permutations(self, perms1, perms2): + def are_opposite_permutations(self, perms1, perms2): if len(perms1) != len(perms2): return False assert 0 <= max(perms2) < len(perms2), "invalid permutation" @@ -488,72 +488,40 @@ def Are_opposite_permutations(self, perms1, perms2): def apply(self, model): graph = model.graph graph_modified = False - for n in graph.node: - if n.op_type == "Transpose": - if model.is_fork_node(n): - next_nodes = model.find_direct_successors(n) - perms1 = list(get_by_name(n.attribute, "perm").ints) - - # check if all nodes after fork are opposite transposes - all_opposite_transposes = True - for next_node in next_nodes: - if next_node is not None and next_node.op_type == "Transpose": - perms2 = list(get_by_name(next_node.attribute, "perm").ints) - if not self.Are_opposite_permutations(perms1, perms2): - all_opposite_transposes = False - break - else: - all_opposite_transposes = False - break - - if not all_opposite_transposes: - continue - - prod = model.find_producer(n.input[0]) - for next_node in next_nodes: - # connect next_node's consumer input to n's producer output - # TODO implement this to allow for forks as producers and - # joins as consumers - cons = model.find_consumer(next_node.output[0]) - cons.input[0] = prod.output[0] - - # remove consumer transpose - graph.node.remove(next_node) - - # remove producer transpose - graph.node.remove(n) - graph_modified = True - - else: - next_node = model.find_consumer(n.output[0]) + for node in graph.node: + if node.op_type == "Transpose": + next_nodes = model.find_consumers(node.output[0]) + perms1 = list(get_by_name(node.attribute, "perm").ints) + # check if all nodes after fork are opposite transposes + all_opposite_transposes = True + for next_node in next_nodes: if next_node is not None and next_node.op_type == "Transpose": - perms1 = list(get_by_name(n.attribute, "perm").ints) perms2 = list(get_by_name(next_node.attribute, "perm").ints) - if self.Are_opposite_permutations(perms1, perms2): - - # connect next_node's consumer input to n's producer output - # TODO implement this to allow for forks as producers - consumers = model.find_direct_successors(next_node) - prod = model.find_producer(n.input[0]) - if prod is not None: - for cons in consumers: - for cons_in in cons.input: - if cons_in == next_node.output[0]: - prod.output[0] = cons_in - break - else: - # n.input[0] is top-level graph input - # wire consumers directly to that - for cons in consumers: - for i, iname in enumerate(cons.input): - if iname == next_node.output[0]: - cons.input[i] = n.input[0] - - # remove both transposes - graph.node.remove(n) - graph.node.remove(next_node) + if not self.are_opposite_permutations(perms1, perms2): + all_opposite_transposes = False + break + else: + all_opposite_transposes = False + break + if not all_opposite_transposes: + continue + source_tensor = node.input[0] + for next_node in next_nodes: + # connect next_node's consumers' appropriate input to n's input + # TODO how to handle top-level outputs if any? + nextnode_out = next_node.output[0] + assert nextnode_out not in [x.name for x in model.graph.output] + consumers = model.find_consumers(nextnode_out) + for cons in consumers: + for i, iname in enumerate(cons.input): + if iname == nextnode_out: + cons.input[i] = source_tensor + # remove consumer transpose + graph.node.remove(next_node) + # remove producer transpose + graph.node.remove(node) + graph_modified = True - graph_modified = True if graph_modified: model = model.transform(InferDataTypes()) return (model, graph_modified) From f1aa0e3b5db76829ec07f8d482b5f25656fbb58a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 11:11:30 +0200 Subject: [PATCH 039/628] [Test] add forked tranpose testcase to test_absorb_opposite_transposes --- .../streamline/test_absorb_opposite_transposes.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/transformation/streamline/test_absorb_opposite_transposes.py b/tests/transformation/streamline/test_absorb_opposite_transposes.py index d4d26f3cf8..88cbd5657e 100644 --- a/tests/transformation/streamline/test_absorb_opposite_transposes.py +++ b/tests/transformation/streamline/test_absorb_opposite_transposes.py @@ -61,16 +61,23 @@ def test_absorb_opposite_transposes(): t2_out = Transpose(add1_out) t3_out = Transpose(t2_out) add2_out = Add(t1_out, t3_out) - out0 = Mul(add2_out, mul0_param) + t4_out = Transpose(add2_out) + t5_out = Transpose(t4_out) + t6_out = Transpose(t4_out) + m0_out = Mul(t5_out, mul0_param) + m1_out = Mul(t6_out, mul0_param) + out0 = Mul(m0_out, m1_out) }} """ model = oprs.parse_model(input) model = ModelWrapper(model) model = model.transform(InferShapes()) + model.save("dbg.onnx") new_model = model.transform(AbsorbConsecutiveTransposes()) new_model = new_model.transform(InferShapes()) + new_model.save("newdbg.onnx") inp_dict = {"top_in": np.random.rand(*shp).astype(np.float32)} assert ox.compare_execution(model, model, inp_dict) - assert len(new_model.graph.node) == 4 + assert len(new_model.graph.node) == 6 for n in new_model.graph.node: assert new_model.graph.node[0].op_type != "Transpose" From 7177ad4372411933c1456ffebb40b5f40acc3dd7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 11:13:21 +0200 Subject: [PATCH 040/628] [Infra] bugfix for quicktest.sh --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index f625f2b1ef..b4ad37232f 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -2,7 +2,7 @@ : ${PYTEST_PARALLEL=auto} -cd $FINN_ROOT/finn +cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" From ec6b9b958592cc44b5a0f31280be634c1a065a78 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 12:20:34 +0200 Subject: [PATCH 041/628] [Test] add forking QONNX Quant -> FINN-ONNX testcase --- .../brevitas/test_brevitas_relu_act_export.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index b0c3d6088c..3dc46ec31e 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -41,6 +41,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.cleanup import cleanup as qonnx_cleanup +from torch import nn import finn.core.onnx_exec as oxe from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN @@ -179,3 +180,83 @@ def test_brevitas_act_export_relu_imagenet( assert np.isclose(produced, expected, atol=1e-3).all() os.remove(export_onnx_path) + + +class PyTorchTestModel(nn.Module): + def __init__(self, abits): + super(PyTorchTestModel, self).__init__() + out_channels = 32 + self.b_act = QuantReLU( + bit_width=abits, + quant_type=QuantType.INT, + scaling_impl_type=ScalingImplType.PARAMETER, + scaling_per_channel=True, + restrict_scaling_type=RestrictValueType.LOG_FP, + scaling_min_val=2e-16, + max_val=6.0, + return_quant_tensor=False, + per_channel_broadcastable_shape=(1, out_channels, 1, 1), + ) + + def forward(self, x): + act_out = self.b_act(x) + y0 = act_out * 2.0 + y1 = act_out * -1.0 + y = y0 + y1 + return y + + +@pytest.mark.brevitas_export +@pytest.mark.parametrize("abits", [2, 4, 8]) +@pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)]) +@pytest.mark.parametrize("scaling_per_channel", [True]) +@pytest.mark.parametrize("QONNX_export", [True]) +def test_brevitas_act_export_relu_forking( + abits, max_val, scaling_per_channel, QONNX_export +): + out_channels = 32 + ishape = (1, out_channels, 1, 1) + min_val = -1.0 + model_pyt = PyTorchTestModel(abits) + + rand_tensor = (2) * torch.rand((1, out_channels, 1, 1)) + + checkpoint = { + "b_act.act_quant_proxy.fused_activation_quant_proxy." + "tensor_quant.scaling_impl.learned_value": rand_tensor.type(torch.FloatTensor) + } + model_pyt.load_state_dict(checkpoint) + + if QONNX_export: + m_path = export_onnx_path + BrevitasONNXManager.export(model_pyt, ishape, m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) + model.save(m_path) + + model = ModelWrapper(export_onnx_path) + model = model.transform(InferShapes()) + inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( + np.float32 + ) + idict = {model.graph.input[0].name: inp_tensor} + odict = oxe.execute_onnx(model, idict, True) + produced = odict[model.graph.output[0].name] + inp_tensor = torch.from_numpy(inp_tensor).float() + model_pyt.eval() + expected = model_pyt.forward(inp_tensor).detach().numpy() + if not np.isclose(produced, expected, atol=1e-3).all(): + print(abits, max_val) + print("scale: ", model_pyt.quant_act_scale().type(torch.FloatTensor).detach()) + if abits < 5: + print( + "thres:", + ", ".join(["{:8.4f}".format(x) for x in model_pyt.export_thres[0]]), + ) + print("input:", ", ".join(["{:8.4f}".format(x) for x in inp_tensor[0]])) + print("prod :", ", ".join(["{:8.4f}".format(x) for x in produced[0]])) + print("expec:", ", ".join(["{:8.4f}".format(x) for x in expected[0]])) + + assert np.isclose(produced, expected, atol=1e-3).all() + os.remove(export_onnx_path) From cc6438666d3d3521fa54d4cedaa82a2870a60884 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 20 Jul 2022 12:20:56 +0200 Subject: [PATCH 042/628] [QONNX] Remove forking act. restriction for QONNX->FINN-ONNX, works fine --- src/finn/transformation/qonnx/quant_act_to_multithreshold.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py index df1fd089f9..77025ecdf5 100644 --- a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py +++ b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py @@ -110,11 +110,6 @@ def apply(self, model): predecessor_op_type = predecessor[0].op_type else: predecessor_op_type = predecessor - # if model.is_fork_node(n): - # raise ValueError( - # "Forking Quant/BipolarQuant nodes are currently " - # "not supported by FINN." - # ) if n.op_type == "Quant" and not model.get_initializer(n.input[2]) == 0: raise ValueError( "Only Quant nodes with zero-point == 0 are currently supported." From e056b638db3622ab0584efcf2b0988015514ab81 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 20 Jul 2022 16:58:55 +0100 Subject: [PATCH 043/628] [HLSCustomOp] set target_dir to code_gen_dir if empty --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 6692fa4b60..b202e95a28 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -410,6 +410,7 @@ def dynamic_input_to_npy(self, context, count, target_dir=""): Found no codegen dir for this node, did you run the prepare_cppsim transformation? """ ) + target_dir = code_gen_dir # create a npy file for each input of the node (in_ind is input index) # assuming dynamic inputs start from 0 for in_ind in range(count): From b76b79eb30030987430e559afc4a1c5f30f4826d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 00:51:00 +0200 Subject: [PATCH 044/628] [Streamline] bugfix in MoveLinearPastEltwiseAdd --- src/finn/transformation/streamline/reorder.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 3bfa0d458f..3e815c1537 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -553,6 +553,8 @@ def apply(self, model): # Other transform should handle that if prod0 is None or prod1 is None or (prod0 == prod1): continue + if len(prod0.input) < 2 or len(prod1.input) < 2: + continue init0 = model.get_initializer(prod0.input[1]) init1 = model.get_initializer(prod1.input[1]) # if either initializer is None, skip From 4a0bff5dad0909d7075087baf3734d55578dade1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 15:15:53 +0200 Subject: [PATCH 045/628] [Deps] update hlslib --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..34727cb88c 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" +HLSLIB_COMMIT="2c7caccb5ecd2af448acac7d150e3cabc1119433" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From 3e27f4330379530b817d3898a973745c7fe1142e Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 21 Jul 2022 16:25:40 +0100 Subject: [PATCH 046/628] [actions] Change quicktest workflow to use run-docker.sh script --- .github/workflows/quicktest-dev-pr.yml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index f3dcd176a3..2a0b2b7b07 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -28,21 +28,7 @@ jobs: restore-keys: | ${{ runner.os }}-buildx- - - name: fetch repos - run: | - ./fetch-repos.sh - - name: Build and push - uses: docker/build-push-action@v2 - with: - file: docker/Dockerfile.finn - context: . - push: false - load: true - tags: finn_gha - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - - # Temp fix # https://github.com/docker/build-push-action/issues/252 # https://github.com/moby/buildkit/issues/1896 @@ -54,4 +40,7 @@ jobs: - name: DockerRunQuicktest run: | - docker run --init --hostname finn_gha -w $(pwd) -v $(pwd):$(pwd) -e FINN_ROOT=$(pwd) -e FINN_BUILD_DIR=/tmp/finn_gha -e FINN_INST_NAME=finn_gha finn_gha quicktest.sh + export FINN_ROOT=$(pwd) + export FINN_BUILD_DIR=/tmp/finn_gha + export FINN_INST_NAME=finn_gha + ./run-docker.sh quicktest From 8b49ac6416d0ff32458aff282e91466f60234311 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 21 Jul 2022 16:28:45 +0100 Subject: [PATCH 047/628] [actions] Fix typo in quicktest workflow --- .github/workflows/quicktest-dev-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index 2a0b2b7b07..df1911486d 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -32,7 +32,7 @@ jobs: # Temp fix # https://github.com/docker/build-push-action/issues/252 # https://github.com/moby/buildkit/issues/1896 - name: Move cache + - name: Move cache run: | rm -rf /tmp/.buildx-cache mv /tmp/.buildx-cache-new /tmp/.buildx-cache From ba28342b17e8db9078ddca2b49ca3d73cee701eb Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 21 Jul 2022 16:31:56 +0100 Subject: [PATCH 048/628] [actions] remove move cache --- .github/workflows/quicktest-dev-pr.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index df1911486d..320ef21c81 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -28,16 +28,6 @@ jobs: restore-keys: | ${{ runner.os }}-buildx- - - # Temp fix - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache - - - name: DockerRunQuicktest run: | export FINN_ROOT=$(pwd) From 367a8f379188d2aff8a529069e79cf00194239ec Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 21 Jul 2022 17:02:10 +0100 Subject: [PATCH 049/628] [Tests] Mark checksum test with vivado marker --- tests/fpgadataflow/test_fpgadataflow_checksum.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_checksum.py b/tests/fpgadataflow/test_fpgadataflow_checksum.py index 5e79ea2dad..495fcd10b6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_checksum.py +++ b/tests/fpgadataflow/test_fpgadataflow_checksum.py @@ -133,6 +133,7 @@ def create_two_fc_model(): return model +@pytest.mark.vivado @pytest.mark.fpgadataflow def test_fpgadataflow_checksum(): # use a graph consisting of two fc layers to test From 78a2654e177a04baf43f6ed868325f0c94015f43 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 21 Jul 2022 17:25:39 +0100 Subject: [PATCH 050/628] [actions] pre-commit fix on quicktest workflow --- .github/workflows/quicktest-dev-pr.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index 320ef21c81..7e610425ee 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -32,5 +32,5 @@ jobs: run: | export FINN_ROOT=$(pwd) export FINN_BUILD_DIR=/tmp/finn_gha - export FINN_INST_NAME=finn_gha - ./run-docker.sh quicktest + export FINN_INST_NAME=finn_gha + ./run-docker.sh quicktest From 341a8a3ef25383396edba6d28aaa7ee3e0c51761 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 20:11:30 +0200 Subject: [PATCH 051/628] [ToHLS] draft a InferStreamingEltwiseAbsDiff conversion --- .../fpgadataflow/convert_to_hls_layers.py | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 429bc34ffc..e8f6372ab2 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1671,3 +1671,95 @@ def apply(self, model): model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) + + +class InferStreamingEltwiseAbsDiff(Transformation): + """Convert eltwise Sub -> Abs to StreamingEltwise layer + with AbsDiffEltwise op.""" + + def apply(self, model): + graph = model.graph + node_ind = 0 + graph_modified = False + for node in graph.node: + node_ind += 1 + if node.op_type == "Sub": + in0 = node.input[0] + in1 = node.input[1] + result = node.output[0] + in0_shape = model.get_tensor_shape(in0) + in1_shape = model.get_tensor_shape(in1) + + # skip if different shapes on inputs + if in0_shape != in1_shape: + continue + + idt0 = model.get_tensor_datatype(in0) + idt1 = model.get_tensor_datatype(in1) + + # skip conversion for layers with float input + if not (idt0.is_integer() and idt1.is_integer()): + continue + + # look for a downstream Abs node + res_consumer = model.find_consumer(result) + if res_consumer is None: + continue + if res_consumer.op_type != "Abs": + continue + + result = res_consumer.output[0] + + # check layout and convert if necessary + in0_layout = model.get_tensor_layout(in0) + in1_layout = model.get_tensor_layout(in1) + result_layout = model.get_tensor_layout(result) + + if in0_layout == DataLayout.NCHW: + in0 = nchw_to_nhwc(in0, model, node_ind) + node_ind += 1 + in0_shape = model.get_tensor_shape(in0) + + if in1_layout == DataLayout.NCHW: + in1 = nchw_to_nhwc(in1, model, node_ind) + node_ind += 1 + in1_shape = model.get_tensor_shape(in1) + + # keep track of where we need to insert the HLS Op + # it has to be ahead of the output transform + insert_point = node_ind + + if result_layout == DataLayout.NCHW: + result = nchw_to_nhwc(result, model, node_ind, reverse=True) + node_ind += 1 + + # now safe to assume num_channels is size of last dimension + num_channels = int(in0_shape[-1]) + # create node with no parallelization first + pe = 1 + + # create and insert new Eltwise node + new_node = helper.make_node( + "StreamingEltwise", + [in0, in1], + [result], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + NumChannels=num_channels, + PE=pe, + inputDataType0=idt0.name, + inputDataType1=idt1.name, + eltwiseOp="AbsDiff", + numInputVectors=in0_shape[:-1], + name="StreamingEltwise_" + node.name, + ) + graph.node.insert(insert_point, new_node) + # remove old nodes + graph.node.remove(node) + graph.node.remove(res_consumer) + graph_modified = True + + # if graph_modified: + # model = model.transform(InferShapes()) + # model = model.transform(InferDataTypes()) + return (model, graph_modified) From d7846755817a271f945c6c22d761bf8bc8b95442 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 20:12:22 +0200 Subject: [PATCH 052/628] [Test] start adding eltwise op test --- .../fpgadataflow/test_fpgadataflow_eltwise.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tests/fpgadataflow/test_fpgadataflow_eltwise.py diff --git a/tests/fpgadataflow/test_fpgadataflow_eltwise.py b/tests/fpgadataflow/test_fpgadataflow_eltwise.py new file mode 100644 index 0000000000..94d66457d1 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_eltwise.py @@ -0,0 +1,42 @@ +import numpy as np +import onnx.parser as oprs +import qonnx.core.data_layout as dl +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.infer_shapes import InferShapes + +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls + + +def build_model(dt0, dt1): + np.random.seed(0) + shp = [1, 3, 4, 2] + shp_str = str(shp) + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{shp_str} in0, float{shp_str} in1) => (float{shp_str} out0) + {{ + sub_out = Sub(in0, in1) + out0 = Abs(sub_out) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) + model.set_tensor_datatype("in0", dt0) + model.set_tensor_datatype("in1", dt1) + model.set_tensor_layout("in0", dl.NHWC) + model.set_tensor_layout("in1", dl.NHWC) + model = model.transform(InferShapes()) + return model + + +def test_fpgadataflow_eltwise(): + dt0 = DataType["UINT7"] + dt1 = DataType["UINT8"] + model = build_model(dt0, dt1) + model = model.transform(to_hls.InferStreamingEltwiseAbsDiff()) + assert len(model.graph.node) == 1 + assert model.graph.node[0].op_type == "StreamingEltwise" From 214612ff2ffe83107198b1eb29c4cfe6358b2feb Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 21:03:42 +0200 Subject: [PATCH 053/628] [CustomOp] first sketch for StreamingEltwise --- src/finn/custom_op/fpgadataflow/eltwise.py | 416 +++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 src/finn/custom_op/fpgadataflow/eltwise.py diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py new file mode 100644 index 0000000000..ce99154231 --- /dev/null +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -0,0 +1,416 @@ +# Copyright (c) 2022, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +import os +import warnings +from qonnx.core.datatype import DataType + +from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp +from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy + + +class StreamingEltwise(HLSCustomOp): + """Class that corresponds to finn-hlslib StreamingEltwise function.""" + + def __init__(self, onnx_node): + super().__init__(onnx_node) + + def get_nodeattr_types(self): + my_attrs = { + "NumChannels": ("i", True, ""), + "PE": ("i", True, ""), + # FINN DataTypes for inputs; output datatype inferred from input + "inputDataType0": ("s", True, ""), + "inputDataType1": ("s", True, ""), + # type of EltwiseFunction for the operation + "eltwiseOp": ("s", True, "", ["Add", "Sub", "AbsDiff"]), + # number of input vectors, examples: + # [1] is a single vector (like a FC layer with batch=1) + # [4] is four vectors (like a FC layer with batch=4) + # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) + "numInputVectors": ("ints", False, [1]), + } + my_attrs.update(super().get_nodeattr_types()) + return my_attrs + + def get_normal_input_shape(self, ind=0): + ich = self.get_nodeattr("NumChannels") + vecs = list(self.get_nodeattr("numInputVectors")) + ishape = tuple(vecs + [ich]) + return ishape + + def get_folded_input_shape(self, ind=0): + ich = self.get_nodeattr("NumChannels") + pe = self.get_nodeattr("PE") + assert ich % pe == 0, "PE must divide NumChannels" + vecs = list(self.get_nodeattr("numInputVectors")) + ishape = tuple(vecs + [ich // pe, pe]) + return ishape + + def get_normal_output_shape(self): + return self.get_normal_input_shape() + + def get_folded_output_shape(self): + return self.get_folded_input_shape() + + def make_shape_compatible_op(self, model): + exp_ishape = self.get_normal_input_shape() + oshape = self.get_normal_output_shape() + ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) + assert ishape == exp_ishape, "Unexpected input1 shape." + ishape = tuple(model.get_tensor_shape(self.onnx_node.input[1])) + assert ishape == exp_ishape, "Unexpected input2 shape." + return super().make_const_shape_op(oshape) + + def infer_node_datatype(self, model): + node = self.onnx_node + idt0 = model.get_tensor_datatype(node.input[0]) + if idt0 != self.get_input_datatype(0): + warn_str = "inputDataType0 changing for %s: %s -> %s " % ( + node.name, + str(self.get_input_datatype(0)), + str(idt0), + ) + warnings.warn(warn_str) + self.set_nodeattr("inputDataType0", idt0.name) + idt1 = model.get_tensor_datatype(node.input[1]) + if idt1 != self.get_input_datatype(1): + warn_str = "inputDataType1 changing for %s: %s -> %s " % ( + node.name, + str(self.get_input_datatype(1)), + str(idt1), + ) + warnings.warn(warn_str) + self.set_nodeattr("inputDataType1", idt1.name) + # enforce output data type (calculated based on idt) + odt = self.get_output_datatype() + model.set_tensor_datatype(self.onnx_node.output[0], odt) + + def verify_node(self): + info_messages = [] + # verify that "backend" is set to "fpgadataflow" + backend_value = self.get_nodeattr("backend") + if backend_value == "fpgadataflow": + info_messages.append("Attribute backend is set correctly") + else: + info_messages.append('Attribute backend should be set to "fpgadataflow"') + + # verify that all necessary attributes exist + try: + self.get_nodeattr("code_gen_dir_cppsim") + self.get_nodeattr("executable_path") + self.get_nodeattr("NumChannels") + self.get_nodeattr("PE") + self.get_nodeattr("inputDataType0") + self.get_nodeattr("inputDataType1") + self.get_nodeattr("eltwiseOp") + info_messages.append("All necessary attributes exist") + except Exception: + info_messages.append( + """The required LabelSelect_Batch attributes do not exist.""" + ) + + return info_messages + + def get_input_datatype(self, id=0): + """Returns FINN DataType of input.""" + return DataType[self.get_nodeattr("inputDataType" + str(id))] + + def get_output_datatype(self): + """Returns FINN DataType of output.""" + op = self.get_nodeattr("eltwiseOp") + idt0 = self.get_input_datatype(0) + idt1 = self.get_input_datatype(1) + assert idt0.signed() == idt1.signed(), ( + "%s: Inputs must have same signedness" % self.onnx_node.name + ) + idt0_min, idt0_max = idt0.min(), idt0.max() + idt1_min, idt1_max = idt1.min(), idt1.max() + cands = [ + idt0_min - idt1_min, + idt0_min - idt1_max, + idt0_max - idt1_min, + idt0_max - idt1_max, + ] + largest_magnitude = max(map(abs, cands)) + if op == "Add": + if idt0.signed(): + return DataType.get_smallest_possible(idt0.min() + idt1.min()) + else: + return DataType.get_smallest_possible(idt0.max() + idt1.max()) + elif op == "Sub": + return DataType.get_smallest_possible(-largest_magnitude) + elif op == "AbsDiff": + return DataType.get_smallest_possible(largest_magnitude) + else: + raise Exception("%s: Unknown eltWiseOp = %s" % (self.onnx_node.name, op)) + + def get_instream_width(self, ind=0): + """Returns input stream width.""" + ibits = self.get_input_datatype(ind).bitwidth() + pe = self.get_nodeattr("PE") + in_width = pe * ibits + return in_width + + def get_outstream_width(self): + """Returns output stream width.""" + obits = self.get_output_datatype().bitwidth() + pe = self.get_nodeattr("PE") + out_width = pe * obits + return out_width + + def get_number_output_values(self): + return np.prod(self.get_folded_output_shape()[:-1]) + + def get_exp_cycles(self): + # Channels/PE * batch size * fmdim * fmdim + return np.prod(self.get_folded_output_shape()[:-1]) + + def execute_node(self, context, graph): + mode = self.get_nodeattr("exec_mode") + node = self.onnx_node + exp_ishape = self.get_normal_input_shape() + exp_oshape = self.get_normal_output_shape() + folded_ishape = self.get_folded_input_shape() + + if mode == "cppsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + elif mode == "rtlsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + + inp = context[node.input[0]] + assert str(inp.dtype) == "float32", "Input datatype is not float32" + assert ( + inp.shape == exp_ishape + ), """Input0 shape doesn't match expected shape .""" + export_idt = self.get_input_datatype() + # reshape input into folded form + inp = inp.reshape(folded_ishape) + # make copy before saving array + reshaped_input = inp.copy() + np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) + + # exact same thing for input1 + inp = context[node.input[1]] + assert str(inp.dtype) == "float32", "Input datatype is not float32" + assert ( + inp.shape == exp_ishape + ), """Input1 shape doesn't match expected shape .""" + export_idt = self.get_input_datatype() + # reshape input into folded form + inp = inp.reshape(folded_ishape) + # make copy before saving array + reshaped_input = inp.copy() + np.save(os.path.join(code_gen_dir, "input_1.npy"), reshaped_input) + + if mode == "cppsim": + # execute the precompiled model + super().exec_precompiled_singlenode_model() + # load output npy file + super().npy_to_dynamic_output(context) + assert ( + context[node.output[0]].shape == exp_oshape + ), "cppsim did not produce expected output shape" + elif mode == "rtlsim": + sim = self.get_rtlsim() + nbits = self.get_instream_width() + rtlsim_inp0 = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) + rtlsim_inp1 = npy_to_rtlsim_input( + "{}/input_1.npy".format(code_gen_dir), export_idt, nbits + ) + super().reset_rtlsim(sim) + super().toggle_clk(sim) + rtlsim_output = self.rtlsim(sim, rtlsim_inp0, rtlsim_inp1) + odt = self.get_output_datatype() + target_bits = odt.bitwidth() + packed_bits = self.get_outstream_width() + out_npy_path = "{}/output.npy".format(code_gen_dir) + out_shape = self.get_folded_output_shape() + rtlsim_output_to_npy( + rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits + ) + # load and reshape output + output = np.load(out_npy_path) + output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) + context[node.output[0]] = output + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + + assert ( + context[node.output[0]].shape == exp_oshape + ), """Output shape doesn't match expected shape.""" + + def global_includes(self): + self.code_gen_dict["$GLOBALS$"] = [ + '#include "eltwise.hpp"', + '#include "interpret.hpp', + ] + + def defines(self, var): + self.code_gen_dict["$DEFINES$"] = [] + + def read_npy_data(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + idt0 = self.get_input_datatype(0) + idt1 = self.get_input_datatype(1) + elem_bits_0 = idt0.bitwidth() + elem_bits_1 = idt1.bitwidth() + packed_bits_0 = self.get_instream_width(0) + packed_hls_type_0 = "ap_uint<%d>" % packed_bits_0 + packed_bits_1 = self.get_instream_width(1) + packed_hls_type_1 = "ap_uint<%d>" % packed_bits_1 + elem_hls_type_0 = idt0.get_hls_datatype_str() + elem_hls_type_1 = idt1.get_hls_datatype_str() + npy_type = "float" + self.code_gen_dict["$READNPYDATA$"] = [] + npy_in = "%s/input_0.npy" % code_gen_dir + self.code_gen_dict["$READNPYDATA$"].append( + 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' + % (packed_hls_type_0, elem_hls_type_0, elem_bits_0, npy_type, npy_in) + ) + npy_in = "%s/input_1.npy" % code_gen_dir + self.code_gen_dict["$READNPYDATA$"].append( + 'npy2apintstream<%s, %s, %d, %s>("%s", in1);' + % (packed_hls_type_1, elem_hls_type_1, elem_bits_1, npy_type, npy_in) + ) + + def strm_decl(self): + self.code_gen_dict["$STREAMDECLARATIONS$"] = [] + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> in0 ("in0");'.format(self.get_instream_width(0)) + ) + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> in1 ("in1");'.format(self.get_instream_width(1)) + ) + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> out ("out");'.format(self.get_outstream_width()) + ) + + def docompute(self): + op = self.get_nodeattr("eltwiseOp") + idt0 = self.get_input_datatype(0) + idt1 = self.get_input_datatype(1) + odt = self.get_output_datatype() + elem_hls_type_0 = idt0.get_hls_datatype_str() + elem_hls_type_1 = idt1.get_hls_datatype_str() + out_hls_type = odt.get_hls_datatype_str() + slice_in0 = "Slice<%s>" % elem_hls_type_0 + slice_in1 = "Slice<%s>" % elem_hls_type_1 + slice_out = "Slice<%s>" % out_hls_type + eltwise_op_str = "%sEltwiseFunction<%s, %s, %s>()" % ( + op, + elem_hls_type_0, + elem_hls_type_1, + out_hls_type, + ) + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{}<{}, {}, {}, {}, {}, {}, {}>(in0, in1, out);""".format( + "StreamingEltwise", + self.get_nodeattr("NumChannels"), + self.get_nodeattr("PE"), + self.get_number_output_values(), + slice_in0, + slice_in1, + slice_out, + eltwise_op_str, + ) + ] + + def dataoutstrm(self): + code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") + dtype = self.get_output_datatype() + elem_bits = dtype.bitwidth() + packed_bits = self.get_outstream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = dtype.get_hls_datatype_str() + npy_type = "float" + npy_out = "%s/output.npy" % code_gen_dir + oshape = self.get_folded_output_shape() + oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") + + self.code_gen_dict["$DATAOUTSTREAM$"] = [ + 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + oshape_cpp_str, + npy_out, + ) + ] + + def save_as_npy(self): + self.code_gen_dict["$SAVEASCNPY$"] = [] + + def blackboxfunction(self): + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}(hls::stream> &in0, hls::stream> &in1, + hls::stream> &out)""".format( + self.onnx_node.name, + self.get_nodeattr("PE") * self.get_input_datatype(0).bitwidth(), + self.get_nodeattr("PE") * self.get_input_datatype(1).bitwidth(), + self.get_nodeattr("PE") * self.get_output_datatype().bitwidth(), + ) + ] + + def pragmas(self): + self.code_gen_dict["$PRAGMAS$"] = [ + "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + ] + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE axis port=in1 name=in1_" + self.hls_sname() + ) + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + ) + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE ap_ctrl_none port=return" + ) + + def get_verilog_top_module_intf_names(self): + intf_names = super().get_verilog_top_module_intf_names() + sname = self.hls_sname() + swidth = self.get_instream_width_padded() + intf_names["s_axis"] = [(x + "_" + sname, swidth) for x in ["in0", "in1"]] + return intf_names From c52148795291ff07bc9e1a5f740f545698534a35 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 21:13:55 +0200 Subject: [PATCH 054/628] [Eltwise] register op and fix bugs --- src/finn/custom_op/fpgadataflow/__init__.py | 2 ++ src/finn/custom_op/fpgadataflow/eltwise.py | 15 ++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index 2c7c86c64e..57cd56f30a 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -38,6 +38,7 @@ ) from finn.custom_op.fpgadataflow.downsampler import DownSampler from finn.custom_op.fpgadataflow.duplicatestreams_batch import DuplicateStreams_Batch +from finn.custom_op.fpgadataflow.eltwise import StreamingEltwise from finn.custom_op.fpgadataflow.fmpadding_batch import FMPadding_Batch from finn.custom_op.fpgadataflow.globalaccpool_batch import GlobalAccPool_Batch from finn.custom_op.fpgadataflow.iodma import IODMA @@ -85,3 +86,4 @@ custom_op["Lookup"] = Lookup custom_op["StreamingConcat"] = StreamingConcat custom_op["CheckSum"] = CheckSum +custom_op["StreamingEltwise"] = StreamingEltwise diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index ce99154231..f17eb6fdf3 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -216,7 +216,7 @@ def execute_node(self, context, graph): assert ( inp.shape == exp_ishape ), """Input0 shape doesn't match expected shape .""" - export_idt = self.get_input_datatype() + export_idt0 = self.get_input_datatype(0) # reshape input into folded form inp = inp.reshape(folded_ishape) # make copy before saving array @@ -229,7 +229,7 @@ def execute_node(self, context, graph): assert ( inp.shape == exp_ishape ), """Input1 shape doesn't match expected shape .""" - export_idt = self.get_input_datatype() + export_idt1 = self.get_input_datatype(1) # reshape input into folded form inp = inp.reshape(folded_ishape) # make copy before saving array @@ -246,12 +246,13 @@ def execute_node(self, context, graph): ), "cppsim did not produce expected output shape" elif mode == "rtlsim": sim = self.get_rtlsim() - nbits = self.get_instream_width() + nbits0 = self.get_instream_width(0) + nbits1 = self.get_instream_width(1) rtlsim_inp0 = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + "{}/input_0.npy".format(code_gen_dir), export_idt0, nbits0 ) rtlsim_inp1 = npy_to_rtlsim_input( - "{}/input_1.npy".format(code_gen_dir), export_idt, nbits + "{}/input_1.npy".format(code_gen_dir), export_idt1, nbits1 ) super().reset_rtlsim(sim) super().toggle_clk(sim) @@ -283,7 +284,7 @@ def execute_node(self, context, graph): def global_includes(self): self.code_gen_dict["$GLOBALS$"] = [ '#include "eltwise.hpp"', - '#include "interpret.hpp', + '#include "interpret.hpp"', ] def defines(self, var): @@ -344,7 +345,7 @@ def docompute(self): out_hls_type, ) self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, {}, {}, {}, {}, {}, {}>(in0, in1, out);""".format( + """{}<{}, {}, {}, {}, {}, {}>(in0, in1, out, {});""".format( "StreamingEltwise", self.get_nodeattr("NumChannels"), self.get_nodeattr("PE"), From 8660448d646373de18cb075f864e151dfb28384b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 21:32:34 +0200 Subject: [PATCH 055/628] [Test] add cppsim and rtlsim to eltwise test --- .../fpgadataflow/test_fpgadataflow_eltwise.py | 64 +++++++++++++++++-- 1 file changed, 59 insertions(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_eltwise.py b/tests/fpgadataflow/test_fpgadataflow_eltwise.py index 94d66457d1..76a68f3779 100644 --- a/tests/fpgadataflow/test_fpgadataflow_eltwise.py +++ b/tests/fpgadataflow/test_fpgadataflow_eltwise.py @@ -1,16 +1,28 @@ +import pytest + import numpy as np import onnx.parser as oprs import qonnx.core.data_layout as dl from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import gen_finn_dt_tensor import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer +from finn.core.onnx_exec import execute_onnx +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def build_model(dt0, dt1): +def build_model(shp, dt0, dt1): np.random.seed(0) - shp = [1, 3, 4, 2] shp_str = str(shp) input = f""" < @@ -33,10 +45,52 @@ def build_model(dt0, dt1): return model -def test_fpgadataflow_eltwise(): - dt0 = DataType["UINT7"] +# input datatype for one operand +@pytest.mark.parametrize("dt0", [DataType["UINT4"], DataType["UINT7"]]) +# channels +@pytest.mark.parametrize("ch", [1, 64]) +# folding +@pytest.mark.parametrize("fold", [-1, 2, 1]) +# execution mode +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.fpgadataflow +@pytest.mark.vivado +def test_fpgadataflow_eltwise(dt0, ch, fold, exec_mode): + if fold == -1: + pe = 1 + else: + pe = max(1, ch // fold) + assert ch % pe == 0 dt1 = DataType["UINT8"] - model = build_model(dt0, dt1) + shp = [1, 4, 2, ch] + model = build_model(shp, dt0, dt1) + in0 = gen_finn_dt_tensor(dt0, shp) + in1 = gen_finn_dt_tensor(dt1, shp) + idict = {"in0": in0, "in1": in1} + y_expected = execute_onnx(model, idict)["out0"] model = model.transform(to_hls.InferStreamingEltwiseAbsDiff()) assert len(model.graph.node) == 1 assert model.graph.node[0].op_type == "StreamingEltwise" + getCustomOp(model.graph.node[0]).set_nodeattr("PE", pe) + if exec_mode == "cppsim": + model = model.transform(PrepareCppSim()) + model = model.transform(CompileCppSim()) + model = model.transform(SetExecMode("cppsim")) + elif exec_mode == "rtlsim": + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode") + y_produced = execute_onnx(model, idict)["out0"] + assert (y_produced == y_expected).all(), exec_mode + " failed" + if exec_mode == "rtlsim": + node = model.get_nodes_by_op_type("StreamingEltwise")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + assert exp_cycles != 0 From 2b516a95117d5bb282b121e4f0e63a96e67514b5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 21 Jul 2022 21:53:15 +0200 Subject: [PATCH 056/628] [Eltwise] also cover Sub inference and layer tests --- .../fpgadataflow/convert_to_hls_layers.py | 24 +++++++++---------- .../fpgadataflow/test_fpgadataflow_eltwise.py | 21 +++++++++++----- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index e8f6372ab2..bb7c045661 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1673,9 +1673,9 @@ def apply(self, model): return (model, graph_modified) -class InferStreamingEltwiseAbsDiff(Transformation): - """Convert eltwise Sub -> Abs to StreamingEltwise layer - with AbsDiffEltwise op.""" +class InferStreamingEltwise(Transformation): + """Convert eltwise Sub or Sub -> Abs to StreamingEltwise layer + with SubEltwise or AbsDiffEltwise op.""" def apply(self, model): graph = model.graph @@ -1701,14 +1701,14 @@ def apply(self, model): if not (idt0.is_integer() and idt1.is_integer()): continue + eltwiseOp = "Sub" + nodes_to_remove = [node] # look for a downstream Abs node res_consumer = model.find_consumer(result) - if res_consumer is None: - continue - if res_consumer.op_type != "Abs": - continue - - result = res_consumer.output[0] + if (res_consumer is not None) and (res_consumer.op_type == "Abs"): + eltwiseOp = "AbsDiff" + result = res_consumer.output[0] + nodes_to_remove.append(res_consumer) # check layout and convert if necessary in0_layout = model.get_tensor_layout(in0) @@ -1749,14 +1749,14 @@ def apply(self, model): PE=pe, inputDataType0=idt0.name, inputDataType1=idt1.name, - eltwiseOp="AbsDiff", + eltwiseOp=eltwiseOp, numInputVectors=in0_shape[:-1], name="StreamingEltwise_" + node.name, ) graph.node.insert(insert_point, new_node) # remove old nodes - graph.node.remove(node) - graph.node.remove(res_consumer) + for nd in nodes_to_remove: + graph.node.remove(nd) graph_modified = True # if graph_modified: diff --git a/tests/fpgadataflow/test_fpgadataflow_eltwise.py b/tests/fpgadataflow/test_fpgadataflow_eltwise.py index 76a68f3779..bfc007421e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_eltwise.py +++ b/tests/fpgadataflow/test_fpgadataflow_eltwise.py @@ -21,9 +21,17 @@ from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def build_model(shp, dt0, dt1): +def build_model(shp, dt0, dt1, do_abs): np.random.seed(0) shp_str = str(shp) + if do_abs: + graph = """ + sub_out = Sub(in0, in1) + out0 = Abs(sub_out) + """ + else: + graph = "out0 = Sub(in0, in1)" + input = f""" < ir_version: 7, @@ -31,8 +39,7 @@ def build_model(shp, dt0, dt1): > agraph (float{shp_str} in0, float{shp_str} in1) => (float{shp_str} out0) {{ - sub_out = Sub(in0, in1) - out0 = Abs(sub_out) + {graph} }} """ model = oprs.parse_model(input) @@ -51,11 +58,13 @@ def build_model(shp, dt0, dt1): @pytest.mark.parametrize("ch", [1, 64]) # folding @pytest.mark.parametrize("fold", [-1, 2, 1]) +# include Abs output node or not +@pytest.mark.parametrize("do_abs", [True, False]) # execution mode @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.fpgadataflow @pytest.mark.vivado -def test_fpgadataflow_eltwise(dt0, ch, fold, exec_mode): +def test_fpgadataflow_eltwise(dt0, ch, fold, do_abs, exec_mode): if fold == -1: pe = 1 else: @@ -63,12 +72,12 @@ def test_fpgadataflow_eltwise(dt0, ch, fold, exec_mode): assert ch % pe == 0 dt1 = DataType["UINT8"] shp = [1, 4, 2, ch] - model = build_model(shp, dt0, dt1) + model = build_model(shp, dt0, dt1, do_abs) in0 = gen_finn_dt_tensor(dt0, shp) in1 = gen_finn_dt_tensor(dt1, shp) idict = {"in0": in0, "in1": in1} y_expected = execute_onnx(model, idict)["out0"] - model = model.transform(to_hls.InferStreamingEltwiseAbsDiff()) + model = model.transform(to_hls.InferStreamingEltwise()) assert len(model.graph.node) == 1 assert model.graph.node[0].op_type == "StreamingEltwise" getCustomOp(model.graph.node[0]).set_nodeattr("PE", pe) From 5dce1e32beab4cdac02f975f0caff8d128576d45 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 14:26:15 +0200 Subject: [PATCH 057/628] [Deps] update hlslib --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..b9a81613c1 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" +HLSLIB_COMMIT="38591586d4d1012a556d2b0bfef7be254d0a8016" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From e5114ef0a8ad521f32d172fc48facce2404cc7b4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 19:06:52 +0200 Subject: [PATCH 058/628] [VVAU] port mem_mode=decoupled support from MVAU --- .../fpgadataflow/vectorvectoractivation.py | 572 ++++++++++++++++-- 1 file changed, 519 insertions(+), 53 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 27b23dd328..9d0a9ee520 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -29,6 +29,7 @@ import math import numpy as np import os +import textwrap import warnings from qonnx.core.datatype import DataType from qonnx.util.basic import ( @@ -41,6 +42,7 @@ from finn.util.data_packing import ( npy_to_rtlsim_input, numpy_to_hls_code, + pack_innermost_dim_as_hex_string, rtlsim_output_to_npy, ) @@ -67,6 +69,33 @@ def get_nodeattr_types(self): "accDataType": ("s", False, "INT32"), # no-activation mode (produce accumulators) "noActivation": ("i", False, 0, {0, 1}), + # memory mode for the layer weights + # const -- embedded weights, default, long compile/synth times + # decoupled -- streaming weights with weight streamer packaged inside IP + # external -- streaming weights with external streamer + "mem_mode": ("s", False, "const", {"const", "decoupled", "external"}), + # (mem_mode = decoupled only) whether weights will be writable through + # an AXI-lite interface during runtime + # 1 for enabled, 0 for disabled. + # see finn-rtllib/memstream/doc/README for more about the memory + # address map used for writable weights + # IMPORTANT: After using AXI lite to either read or write the weights, + # always "flush" the accelerator by first passing a dummy input + # vector through the accelerator. This will get rid of any old + # weight data from the weight FIFOs. + "runtime_writeable_weights": ("i", False, 0, {0, 1}), + # FPGA resource type for memories in decoupled mode + # auto -- let Vivado decide + # block -- use BRAM + # distributed -- use LUTRAM + # ultra -- use UltraRAM (URAM), must have runtime_writeable_weights=1 + # see also https://www.xilinx.com/support/answers/38070.html + "ram_style": ( + "s", + False, + "auto", + {"auto", "block", "distributed", "ultra"}, + ), } my_attrs.update(super().get_nodeattr_types()) return my_attrs @@ -198,14 +227,23 @@ def get_outstream_width(self): out_width = o_bits * self.get_nodeattr("PE") return out_width - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): k_h, k_w = self.get_nodeattr("Kernel") sf = k_h * k_w dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") nf = ch // pe - folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, pe]) + + if ind == 0: + # calculate shape of input 0 + folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, pe]) + elif ind == 1 and self.get_nodeattr("mem_mode") == "external": + # calculate shape of input 1 (weights) + folded_input_shape = tuple([1, sf * nf, pe]) + else: + raise Exception("Undefined input shape for requested input") + return folded_input_shape def get_folded_output_shape(self): @@ -319,43 +357,173 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): rows between PEs is not as expected (n_thres_steps)""" return ret.reshape(1, pe, tmem, n_thres_steps) - def generate_params(self, model, path): - # weights - weights = model.get_initializer(self.onnx_node.input[1]) + def make_weight_file(self, weights, weight_file_mode, weight_file_name): + """Produce a file containing given weights in appropriate format for this + layer. This file can be used for either synthesis or run-time reconfig + of weights. + + Arguments: + * weights : numpy array with weights to be put into the file + * weight_file_mode : one of {hls_header, decoupled_verilog_dat, + decoupled_runtime} + * weight_file_name : filename for the weight file to be generated + """ # convert weights into hlslib-compatible format weight_tensor = self.get_hls_compatible_weight_tensor(weights) - wdt = self.get_weight_datatype() - code_gen_dir = path - - """Saves weights into params.h""" - weight_hls_code = numpy_to_hls_code(weight_tensor, wdt, "weights", True, True) - # write weights into params.h - f_weights = open("{}/params.h".format(code_gen_dir), "w") - - if wdt.bitwidth() != 1: - f_weights.write( - "const FixedPointWeights<1,{},{},{}> weights = ".format( - wdt.get_hls_datatype_str(), - self.get_nodeattr("PE"), - self.calc_wmem(), + export_wdt = self.get_weight_datatype() + # we have converted bipolar weights to binary for export, + # so use it as such for weight generation + if self.get_weight_datatype() == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] + if weight_file_mode == "hls_header": + weight_hls_code = numpy_to_hls_code( + weight_tensor, export_wdt, "weights", True, True + ) + # write weights into C++ header file as dictated by finn-hlslib + f_weights = open(weight_file_name, "w") + if export_wdt.bitwidth() != 1: + f_weights.write( + "const FixedPointWeights<1,{},{},{}> weights = ".format( + export_wdt.get_hls_datatype_str(), + self.get_nodeattr("PE"), + self.calc_wmem(), + ) ) + else: + f_weights.write( + "const BinaryWeights<1,{},{}> weights = ".format( + self.get_nodeattr("PE"), + self.calc_wmem(), + ) + ) + f_weights.write(weight_hls_code) + f_weights.close() + elif "decoupled" in weight_file_mode: + # create a weight stream for various flavors of decoupled mode: + # transpose weight tensor from (1, PE, WMEM, SIMD) to (1, WMEM, PE, SIMD) + weight_tensor_unflipped = np.transpose(weight_tensor, (0, 2, 1, 3)) + # reverse SIMD flip for saving weights in .npy + weight_tensor_simd_flipped = np.flip(weight_tensor_unflipped, axis=-1) + # PE flip for saving weights in .dat + weight_tensor_pe_flipped = np.flip(weight_tensor_unflipped, axis=-2) + # reshape weight tensor (simd_flipped and pe_flipped) to desired shape + pe = self.get_nodeattr("PE") + simd = 1 + # simd_flipped + weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape( + 1, -1, pe * simd + ) + weight_tensor_simd_flipped = weight_tensor_simd_flipped.copy() + # flipped + weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape( + 1, -1, pe * simd ) + weight_tensor_pe_flipped = weight_tensor_pe_flipped.copy() + if weight_file_mode == "decoupled_npy": + # save weight stream into npy for cppsim + np.save(weight_file_name, weight_tensor_simd_flipped) + elif weight_file_mode == "decoupled_verilog_dat": + # convert weight values into hexstring + weight_width = self.get_weightstream_width() + # pad to nearest 4 bits to get hex strings + weight_width_padded = roundup_to_integer_multiple(weight_width, 4) + weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string( + weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix="" + ) + # add zeroes to pad out file to 1024 entries + weight_stream = weight_tensor_pe_flipped.flatten() + weight_stream = weight_stream.copy() + with open(weight_file_name, "w") as f: + for val in weight_stream: + f.write(val + "\n") + elif weight_file_mode == "decoupled_runtime": + # memstream axi-lite interface will map each mem line to + # one or multiple 32-bit words + weight_width = self.get_weightstream_width() + words_per_memwidth = 2 ** math.ceil(math.log2(weight_width / 32)) + if words_per_memwidth < 1: + words_per_memwidth = 1 + weight_width_padded = words_per_memwidth * 32 + # first, pack and ensure padding to 32 bits + weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string( + weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix="" + ) + weight_stream = weight_tensor_pe_flipped.flatten() + weight_stream = weight_stream.copy() + with open(weight_file_name, "w") as f: + for val in weight_stream: + # split into groups of 8 hex digits (= 32 bits) + words_32b = textwrap.wrap(val, 8) + words_32b.reverse() + for word_32b in words_32b: + f.write(word_32b + "\n") + else: + raise Exception("Unknown weight_file_mode") + else: - f_weights.write( - "const BinaryWeights<1,{},{}> weights = ".format( - self.get_nodeattr("PE"), self.calc_wmem() + raise Exception("Unknown weight_file_mode") + + def generate_params(self, model, path): + mem_mode = self.get_nodeattr("mem_mode") + code_gen_dir = path + # weights, if not external + weights = model.get_initializer(self.onnx_node.input[1]) + if mem_mode == "const": + # save hlslib-compatible weights in params.h + weight_filename = "{}/params.h".format(code_gen_dir) + self.make_weight_file(weights, "hls_header", weight_filename) + elif mem_mode == "decoupled" or mem_mode == "external": + weight_filename_sim = "{}/weights.npy".format(code_gen_dir) + # save decoupled weights for cppsim + self.make_weight_file(weights, "decoupled_npy", weight_filename_sim) + if mem_mode == "decoupled": + # also save weights as Verilog .dat file + # note that we provide two different .dat files, one for synth + # and one for synthesis. this is because URAM-based weights always + # need zero weights for synthesis, otherwise they get inferred + # as BRAM + weight_filename_rtl_synth = "{}/memblock_synth_0.dat".format( + code_gen_dir + ) + weight_filename_rtl_sim = "{}/memblock_sim_0.dat".format(code_gen_dir) + # sim weights are always the true weights + self.make_weight_file( + weights, "decoupled_verilog_dat", weight_filename_rtl_sim + ) + ram_style = self.get_nodeattr("ram_style") + if ram_style == "ultra": + # UltraRAM must have no memory initializer, or only zeroes + # otherwise BRAM will be inferred instead of URAM + # as a workaround we provide a zero-weight init here + synth_weights = np.zeros_like(weights, dtype=np.float32) + else: + synth_weights = weights + self.make_weight_file( + synth_weights, "decoupled_verilog_dat", weight_filename_rtl_synth ) + else: + raise Exception( + """Please set mem_mode to "const", "decoupled", or "external", + currently no other parameter value is supported!""" ) - f_weights.write(weight_hls_code) - f_weights.close() # save thresholds in thresh.h if len(self.onnx_node.input) > 2: thresholds = model.get_initializer(self.onnx_node.input[2]) if thresholds is not None: threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + # use UINT32 threshold export for bipolar times bipolar + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] + # reinterpret inp/wt as bipolar if bin_xnor_mode is iset + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] + bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 + inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) + wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) # get computed threshold datatype from attribute tdt = DataType[self.get_nodeattr("accDataType")] + assert np.vectorize(tdt.allowed)( threshold_tensor ).all(), "Thresholds in %s can't be expressed with type %s" % ( @@ -368,8 +536,11 @@ def generate_params(self, model, path): # write thresholds into thresh.h f_thresh = open("{}/thresh.h".format(code_gen_dir), "w") tdt_hls = tdt.get_hls_datatype_str() - odt = self.get_output_datatype() - odt_hls = odt.get_hls_datatype_str() + # use binary to export bipolar activations + export_odt = self.get_output_datatype() + if self.get_output_datatype() == DataType["BIPOLAR"]: + export_odt = DataType["BINARY"] + odt_hls = export_odt.get_hls_datatype_str() f_thresh.write( "static ThresholdsActivation<{},{},{},{},{},{},{}> threshs \ = ".format( @@ -387,6 +558,7 @@ def generate_params(self, model, path): def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") + mem_mode = self.get_nodeattr("mem_mode") node = self.onnx_node # TODO ensure codegen dir exists @@ -440,7 +612,26 @@ def execute_node(self, context, graph): inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) - output = self.rtlsim(sim, inp) + + if mem_mode == "external" or mem_mode == "decoupled": + wnbits = self.get_weightstream_width() + export_wdt = self.get_weight_datatype() + # we have converted bipolar weights to binary for export, + # so use it as such for weight generation + if self.get_weight_datatype() == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] + wei = npy_to_rtlsim_input( + "{}/weights.npy".format(code_gen_dir), export_wdt, wnbits + ) + num_w_reps = 1 + io_dict = { + "inputs": {"in0": inp, "weights": wei * num_w_reps}, + "outputs": {"out": []}, + } + self.rtlsim_multi_io(sim, io_dict) + output = io_dict["outputs"]["out"] + else: + output = self.rtlsim(sim, inp) odt = self.get_output_datatype() target_bits = odt.bitwidth() packed_bits = self.get_outstream_width() @@ -466,6 +657,12 @@ def execute_node(self, context, graph): def global_includes(self): self.code_gen_dict["$GLOBALS$"] = ['#include "weights.hpp"'] self.code_gen_dict["$GLOBALS$"] += ['#include "activations.hpp"'] + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode not in ["const", "decoupled", "external"]: + raise Exception( + """Please set mem_mode to "const", "decoupled", or "external", + currently no other parameter value is supported!""" + ) if self.calc_tmem() != 0: self.code_gen_dict["$GLOBALS$"] += ['#include "thresh.h"'] @@ -474,6 +671,8 @@ def defines(self, var): numReps = 1 * dim_h * dim_w k_h, k_w = self.get_nodeattr("Kernel") innerProdDim = k_h * k_w + mem_mode = self.get_nodeattr("mem_mode") + self.code_gen_dict["$DEFINES$"] = [ """#define Channels1 {}\n #define InnerProdDim {}\n #define SIMD1 1\n #define PE1 {}\n #define numReps {}""".format( @@ -483,6 +682,11 @@ def defines(self, var): numReps, ) ] + if mem_mode == "decoupled" or mem_mode == "external": + wdt = self.get_weight_datatype() + self.code_gen_dict["$DEFINES$"].append( + "#define WP1 {}\n".format(wdt.bitwidth()) + ) def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -500,7 +704,23 @@ def read_npy_data(self): % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) ) + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode == "decoupled" or mem_mode == "external": + wdt = self.get_weight_datatype() + elem_bits = wdt.bitwidth() + packed_bits = self.get_weightstream_width() + packed_hls_type = "ap_uint<%d>" % packed_bits + elem_hls_type = wdt.get_hls_datatype_str() + npy_type = "float" + npy_in = "%s/weights.npy" % code_gen_dir + + self.code_gen_dict["$READNPYDATA$"].append( + 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, numReps);' + % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + ) + def strm_decl(self): + mem_mode = self.get_nodeattr("mem_mode") self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) @@ -508,8 +728,15 @@ def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> out ("out");'.format(self.get_outstream_width()) ) + if mem_mode == "decoupled" or mem_mode == "external": + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> weights ("weights");'.format( + self.get_weightstream_width() + ) + ) def docompute(self): + mem_mode = self.get_nodeattr("mem_mode") map_to_hls_mult_style = { "auto": "ap_resource_dflt()", "lut": "ap_resource_lut()", @@ -521,16 +748,42 @@ def docompute(self): threshs = "PassThroughActivation<%s>()" % odtype_hls_str else: threshs = "threshs" - self.code_gen_dict["$DOCOMPUTE$"] = [ - """Vector_Vector_Activate_Batch - (in0, out, weights, {}, numReps, {});""".format( - tmpl_args["TSrcI"], - tmpl_args["TDstI"], - tmpl_args["TWeightI"], - threshs, - map_to_hls_mult_style[self.get_nodeattr("resType")], + + if mem_mode == "const": + self.code_gen_dict["$DOCOMPUTE$"] = [ + """Vector_Vector_Activate_Batch + (in0, out, weights, {}, numReps, {});""".format( + tmpl_args["TSrcI"], + tmpl_args["TDstI"], + tmpl_args["TWeightI"], + threshs, + map_to_hls_mult_style[self.get_nodeattr("resType")], + ) + ] + elif mem_mode == "decoupled" or mem_mode == "external": + wdt = self.get_weight_datatype() + if wdt == DataType["BIPOLAR"]: + export_wdt = DataType["BINARY"] + else: + export_wdt = wdt + wdtype_hls_str = export_wdt.get_hls_datatype_str() + self.code_gen_dict["$DOCOMPUTE$"] = [ + """{} + (in0, out, weights, {}, numReps, {});""".format( + "Vector_Vector_Activate_Stream_Batch", + tmpl_args["TSrcI"], + tmpl_args["TDstI"], + tmpl_args["TWeightI"], + wdtype_hls_str, + threshs, + map_to_hls_mult_style[self.get_nodeattr("resType")], + ) + ] + else: + raise Exception( + """Please set mem_mode to "const", "decoupled", or "external", + currently no other parameter value is supported!""" ) - ] def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -561,17 +814,38 @@ def save_as_npy(self): self.code_gen_dict["$SAVEASCNPY$"] = [] def blackboxfunction(self): - self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out - )""".format( - self.onnx_node.name, - self.get_instream_width(), - self.get_outstream_width(), + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode == "const": + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}(hls::stream> &in0, + hls::stream> &out + )""".format( + self.onnx_node.name, + self.get_instream_width(), + self.get_outstream_width(), + ) + ] + elif mem_mode == "decoupled" or mem_mode == "external": + self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ + """void {}( + hls::stream> &in0, + hls::stream> &weights, + hls::stream> &out + )""".format( + self.onnx_node.name, + self.get_instream_width(), + self.get_weightstream_width(), + self.get_outstream_width(), + ) + ] + else: + raise Exception( + """Please set mem_mode to "const" or "decoupled", currently no other + parameter value is supported!""" ) - ] def pragmas(self): + mem_mode = self.get_nodeattr("mem_mode") self.code_gen_dict["$PRAGMAS$"] = [ "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() ] @@ -593,12 +867,30 @@ def pragmas(self): "#pragma HLS INTERFACE ap_ctrl_none port=return" ) - self.code_gen_dict["$PRAGMAS$"].append('#include "params.h"') - # the weight tensor is ap_uint [PE][WMEM] - # partition for parallel access along the PE dimension (dim 1) - self.code_gen_dict["$PRAGMAS$"].append( - ("#pragma HLS ARRAY_PARTITION variable=weights.m_weights " "complete dim=1") - ) + if mem_mode == "const": + self.code_gen_dict["$PRAGMAS$"].append('#include "params.h"') + # the weight tensor is ap_uint [PE][WMEM] + # partition for parallel access along the PE dimension (dim 1) + self.code_gen_dict["$PRAGMAS$"].append( + ( + "#pragma HLS ARRAY_PARTITION variable=weights.m_weights " + "complete dim=1" + ) + ) + elif mem_mode == "decoupled" or mem_mode == "external": + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS INTERFACE axis port=weights name=weights_" + + self.hls_sname() + ) + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS stream depth=8 variable=weights" + ) + else: + raise Exception( + """Please set mem_mode to "const", "decoupled", or external, + currently no other parameter value is supported!""" + ) + if self.calc_tmem() != 0: # TODO find a better way of checking for no pregenerated thresholds self.code_gen_dict["$PRAGMAS$"].append( @@ -614,6 +906,157 @@ def pragmas(self): ) ) + def get_verilog_top_module_intf_names(self): + intf_names = super().get_verilog_top_module_intf_names() + mem_mode = self.get_nodeattr("mem_mode") + sname = self.hls_sname() + if mem_mode == "external": + intf_names["s_axis"].append( + ("weights_" + sname, self.get_weightstream_width_padded()) + ) + if mem_mode == "decoupled": + # only expose axilite interface if attribute is set + runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 1 + if runtime_writable: + intf_names["axilite"] = ["s_axilite"] + return intf_names + + def code_generation_ipi(self): + cmd = [] + # add streamer if needed + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode == "decoupled": + runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 1 + if self.get_nodeattr("ram_style") == "ultra": + assert ( + runtime_writable == 1 + ), "Layer with URAM weights must have runtime_writeable_weights=1" + node_name = self.onnx_node.name + sname = self.hls_sname() + # create a hierarchy for this layer, with the same port names + clk_name = self.get_verilog_top_module_intf_names()["clk"][0] + rst_name = self.get_verilog_top_module_intf_names()["rst"][0] + dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0] + din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0] + cmd.append("create_bd_cell -type hier %s" % node_name) + cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name)) + cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) + cmd.append( + "create_bd_intf_pin -mode Master " + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" + % (node_name, dout_name) + ) + cmd.append( + "create_bd_intf_pin -mode Slave " + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name) + ) + # instantiate the hls ip + cmd.append( + "create_bd_cell -type ip -vlnv %s /%s/%s" + % (self.get_nodeattr("ip_vlnv"), node_name, node_name) + ) + # instantiate a streamer and connect it to the HLS IP + strm_vlnv = "xilinx.com:user:memstream:1.0" + strm_inst = node_name + "_wstrm" + cmd.append( + "create_bd_cell -type ip -vlnv %s /%s/%s" + % (strm_vlnv, node_name, strm_inst) + ) + cmd.append( + "set_property -dict [list " + "CONFIG.NSTREAMS {1} " + "CONFIG.MEM_DEPTH {%d} " + "CONFIG.MEM_WIDTH {%d} " + "CONFIG.MEM_INIT {%s} " + "CONFIG.RAM_STYLE {%s} " + "CONFIG.STRM0_DEPTH {%d} " + "CONFIG.STRM0_WIDTH {%d} " + "CONFIG.STRM0_OFFSET {0} " + "] [get_bd_cells /%s/%s]" + % ( + self.calc_wmem(), + self.get_weightstream_width_padded(), + self.get_nodeattr("code_gen_dir_ipgen") + "/", + self.get_nodeattr("ram_style"), + self.calc_wmem(), + self.get_weightstream_width_padded(), + node_name, + strm_inst, + ) + ) + cmd.append( + "connect_bd_intf_net [get_bd_intf_pins %s/%s/m_axis_0] " + "[get_bd_intf_pins %s/%s/weights_%s]" + % (node_name, strm_inst, node_name, node_name, sname) + ) + cmd.append( + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]" + % (node_name, rst_name, node_name, strm_inst) + ) + cmd.append( + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]" + % (node_name, clk_name, node_name, strm_inst) + ) + cmd.append( + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]" + % (node_name, rst_name, node_name, node_name, rst_name) + ) + cmd.append( + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]" + % (node_name, clk_name, node_name, node_name, clk_name) + ) + cmd.append( + "connect_bd_intf_net [get_bd_intf_pins %s/%s] " + "[get_bd_intf_pins %s/%s/%s]" + % (node_name, din_name, node_name, node_name, din_name) + ) + cmd.append( + "connect_bd_intf_net [get_bd_intf_pins %s/%s] " + "[get_bd_intf_pins %s/%s/%s]" + % (node_name, dout_name, node_name, node_name, dout_name) + ) + if runtime_writable: + # expose axi lite interface for writeable weights + axilite_name = self.get_verilog_top_module_intf_names()["axilite"][0] + cmd.append( + "create_bd_intf_pin -mode Slave " + "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" + % (node_name, axilite_name) + ) + cmd.append( + "connect_bd_intf_net [get_bd_intf_pins %s/%s] " + "[get_bd_intf_pins %s/%s/%s]" + % (node_name, axilite_name, node_name, strm_inst, axilite_name) + ) + # TODO calculate and pass in segment size here + cmd.append("assign_bd_address") + cmd.append("save_bd_design") + elif mem_mode == "const" or mem_mode == "external": + # base class impl sufficient for const/external modes + return super().code_generation_ipi() + else: + raise Exception("Unrecognized mem_mode for VectorVectorActivation") + return cmd + + def uram_estimation(self): + P = self.get_nodeattr("PE") + Q = 1 + wdt = self.get_weight_datatype() + W = wdt.bitwidth() + omega = self.calc_wmem() + mem_width = Q * W * P + mmode = self.get_nodeattr("mem_mode") + mstyle = self.get_nodeattr("ram_style") + if ( + (mmode == "decoupled" and mstyle != "ultra") + or (mmode == "const" and self.calc_wmem() <= 128) + or (mmode == "external") + ): + return 0 + width_multiplier = math.ceil(mem_width / 72) + depth_multiplier = math.ceil(omega / 4096) + return width_multiplier * depth_multiplier + def bram_estimation(self): """Calculates resource estimation for BRAM""" # TODO add in/out FIFO contributions @@ -624,7 +1067,13 @@ def bram_estimation(self): # assuming SDP mode RAMB18s (see UG573 Table 1-10) # since this is HLS memory, not using the full width of a BRAM # assuming memories up to 128 deep get implemented in LUTs - if self.calc_wmem() <= 128: + mmode = self.get_nodeattr("mem_mode") + mstyle = self.get_nodeattr("ram_style") + if ( + (mmode == "decoupled" and mstyle in ["distributed", "ultra"]) + or (mmode == "const" and self.calc_wmem() <= 128) + or (mmode == "external") + ): return 0 if W == 1: @@ -671,8 +1120,12 @@ def lut_estimation(self): c0 = 300 c1 = 1.1 c2 = 0 - if self.calc_wmem() <= 128: - c2 = P * W * math.ceil(self.calc_wmem() / 64) + mmode = self.get_nodeattr("mem_mode") + mstyle = self.get_nodeattr("ram_style") + if (mmode == "decoupled" and mstyle == "distributed") or ( + mmode == "const" and self.calc_wmem() <= 128 + ): + c2 = (P * W) * math.ceil(self.calc_wmem() / 64) # multiplication res_type = self.get_nodeattr("resType") @@ -710,6 +1163,19 @@ def dsp_estimation(self): mult_dsp = 0 return int(mult_dsp) + def get_weightstream_width(self): + """Returns weight stream width. Used only in decoupled mode.""" + if ( + self.get_nodeattr("mem_mode") == "decoupled" + or self.get_nodeattr("mem_mode") == "external" + ): + pe = self.get_nodeattr("PE") + wp = self.get_weight_datatype().bitwidth() + w_width = pe * wp + return w_width + else: + return 0 + def get_op_and_param_counts(self): k_h, k_w = self.get_nodeattr("Kernel") fm = self.get_nodeattr("Channels") From 87529826556586ab49d91dd8ad5847f28653690b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 19:07:27 +0200 Subject: [PATCH 059/628] [InferVVAU] add suport for setting mem_mode at conversion time --- .../transformation/fpgadataflow/convert_to_hls_layers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 429bc34ffc..7f3f8bff9a 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -870,6 +870,10 @@ class InferVectorVectorActivation(Transformation): a depthwise convolution. Any immediately following MultiThreshold layers will also be absorbed into the VVAU.""" + def __init__(self, mem_mode="const"): + super().__init__() + self.mem_mode = mem_mode + def apply(self, model): graph = model.graph node_ind = 0 @@ -970,6 +974,7 @@ def apply(self, model): ActVal=actval, noActivation=0, name="VectorVectorActivation_" + n.name, + mem_mode=self.mem_mode, ) graph.node.insert(node_ind, new_node) # remove old nodes From 206f4002821062427212cd4071ace1776347de1e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 19:25:39 +0200 Subject: [PATCH 060/628] [VVAU] binaryXnorMode support (untested) --- .../fpgadataflow/vectorvectoractivation.py | 57 +++++++++++++++++-- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 9d0a9ee520..24cb3101fa 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -96,6 +96,9 @@ def get_nodeattr_types(self): "auto", {"auto", "block", "distributed", "ultra"}, ), + # use xnor-popcount for binary weights/inputs, thus treating them + # as bipolar + "binaryXnorMode": ("i", False, 0, {0, 1}), } my_attrs.update(super().get_nodeattr_types()) return my_attrs @@ -289,13 +292,31 @@ def get_template_param_values(self): ret = dict() inp_hls_str = self.get_input_datatype().get_hls_datatype_str() out_hls_str = self.get_output_datatype().get_hls_datatype_str() + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + # out_is_binary = self.get_output_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] + bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 + if (inp_is_binary or wt_is_binary) and (not bin_xnor_mode): + raise Exception("True binary (non-bipolar) inputs not yet supported") inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + # out_is_bipolar = self.get_output_datatype() == DataType["BIPOLAR"] wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] + # reinterpret inp/wt as bipolar if bin_xnor_mode is iset + inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) + wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) # fill in TSrcI and TWeightI - # TODO handle bipolar inputs - if inp_is_bipolar or wt_is_bipolar: - raise Exception("VVAU node doesn't support bipolar values yet.") - else: + # TODO check these with Giulio + # TODO handle non-bipolar binary inputs + if inp_is_bipolar and wt_is_bipolar: + ret["TSrcI"] = "Recast" + ret["TWeightI"] = "Identity" + elif (not inp_is_bipolar) and wt_is_bipolar: + ret["TSrcI"] = "Slice<%s>" % inp_hls_str + ret["TWeightI"] = "Recast" + elif inp_is_bipolar and (not wt_is_bipolar): + ret["TSrcI"] = "Recast" + ret["TWeightI"] = "Identity" + elif (not inp_is_bipolar) and (not wt_is_bipolar): ret["TSrcI"] = "Slice<%s>" % inp_hls_str ret["TWeightI"] = "Identity" @@ -324,6 +345,13 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): return ret def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): + """Convert the original numpy weight matrix orig_weight_matrix into + a form suitable for passing to the hlslib call: + * ensure MH % PE == 0 + * for bipolar weights&inputs, ensure thresholds are positive + * interleave rows between PEs + * reshape into (PE, TMEM, n_thres_steps) and return + """ ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") tmem = self.calc_tmem() @@ -333,14 +361,33 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): ), """Threshold matrix dimension is not as expected (2).""" n_thres_steps = orig_thres_matrix.shape[1] + inp_is_bipolar = self.get_input_datatype() == DataType["BIPOLAR"] + wt_is_bipolar = self.get_weight_datatype() == DataType["BIPOLAR"] + # reinterpret inp/wt as bipolar if bin_xnor_mode is iset + inp_is_binary = self.get_input_datatype() == DataType["BINARY"] + wt_is_binary = self.get_weight_datatype() == DataType["BINARY"] + bin_xnor_mode = self.get_nodeattr("binaryXnorMode") == 1 + inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode) + wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode) + if inp_is_bipolar and wt_is_bipolar: + # ensure all thresholds are nonnegative + assert (orig_thres_matrix >= 0).all() + # ensure all thresholds are integer + assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all() ret = orig_thres_matrix # workaround for vivado_hls threshold bug - if ret[0][0] == 0: + if ret[0][0] == 0 and n_thres_steps == 1: ret = np.copy(ret) ret[0][0] = 1 warnings.warn( "Setting 0-valued first threshold to 1 to avoid vivado_hls bug" ) + # ensure channels = mh , duplicating if necessary + if ret.shape[0] == 1: + ret = np.tile(ret, (ch, 1)) + assert ( + ret.shape[0] == ch + ), "Channels of threshold matrix are not as expected (ch)" # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) assert ( From b443dbff9a7ac5daa185607dbb84c2802226cad5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 19:32:24 +0200 Subject: [PATCH 061/628] [Test] add decoupled mode tests for VVAU --- tests/fpgadataflow/test_fpgadataflow_vvau.py | 21 +++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index c48448787d..5adc9ef3db 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -75,7 +75,19 @@ def _calculate_dot_prod_range(dt_a, dt_b, len): def _make_single_vvau_modelwrapper( - W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T=None, tdt=None + W, + pe, + k_h, + k_w, + channels, + dim_h, + dim_w, + wdt, + idt, + odt, + T=None, + tdt=None, + mem_mode="const", ): in_shape = [1, dim_h, dim_w, k_h * k_w * channels] # [N, H, W, K*K*CH] out_shape = [ @@ -113,6 +125,7 @@ def _make_single_vvau_modelwrapper( weightDataType=wdt.name, outputDataType=odt.name, noActivation=no_act, + mem_mode=mem_mode, ) graph = helper.make_graph( @@ -156,13 +169,15 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("k_w", [3, 1]) # Number of input and output channels @pytest.mark.parametrize("channels", [3, 4]) +# memory mode +@pytest.mark.parametrize("mem_mode", ["const", "decoupled"]) # execution mode @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_vvau( - idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, exec_mode + idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, mem_mode, exec_mode ): if pe == "channels": pe = channels @@ -198,7 +213,7 @@ def test_fpgadataflow_vvau( tdt = DataType["INT32"] model = _make_single_vvau_modelwrapper( - W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt + W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt, mem_mode ) if exec_mode == "cppsim": From 8c894ed550da7600758ba2fafad10867f0210242 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 22 Jul 2022 19:43:25 +0200 Subject: [PATCH 062/628] [VVAU] bugfix in weight reps for node-by-node rtlsim --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 24cb3101fa..77fed5e3ab 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -670,7 +670,9 @@ def execute_node(self, context, graph): wei = npy_to_rtlsim_input( "{}/weights.npy".format(code_gen_dir), export_wdt, wnbits ) - num_w_reps = 1 + dim_h, dim_w = self.get_nodeattr("Dim") + num_w_reps = dim_h * dim_w + io_dict = { "inputs": {"in0": inp, "weights": wei * num_w_reps}, "outputs": {"out": []}, From ef41a94d6f63dff613f4a51cc9065e8fa047f054 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 25 Jul 2022 09:17:21 +0100 Subject: [PATCH 063/628] [actions] Remove all other commands from quicktest workflow but shell script --- .github/workflows/quicktest-dev-pr.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index 7e610425ee..ec92c84665 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -17,17 +17,6 @@ jobs: - name: checkout uses: actions/checkout@v2 - - name: set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: cache Docker layers - uses: actions/cache@v2 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx- - - name: DockerRunQuicktest run: | export FINN_ROOT=$(pwd) From 1bf72b169e50b4677a088846545ccd9c059247cb Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 25 Jul 2022 10:11:33 +0100 Subject: [PATCH 064/628] [tests] temporarily exclude mobilenet qonnx test --- tests/transformation/test_qonnx_to_finn.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 43055f6704..7e438b4b8b 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -94,6 +94,9 @@ def analysis_testing_for_no_quant_nodes(model): @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("model_name", ["TFC", "SFC", "LFC", "CNV", "mobilenet"]) def test_QONNX_to_FINN(model_name, wbits, abits): + if model_name == "mobilenet": + pytest.xfail("MobileNet test is temporarily excluded from QONNX testing.") + if wbits > abits: pytest.skip("No wbits > abits cases at the moment") if model_name == "LFC" and wbits == 2 and abits == 2: From 005aea52bcf26459b4a5c000147d40f6871b665d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 25 Jul 2022 11:32:04 +0200 Subject: [PATCH 065/628] [Deps] update hlslib --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..fe585b2190 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" +HLSLIB_COMMIT="8d582d0992b17866447a943fa93301bc15f92104" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From 07f43817846274235c37d537e09474b4b02f90cf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 25 Jul 2022 18:27:14 +0200 Subject: [PATCH 066/628] [Downsample] reflect changes to HLS kernel name change now with 2D --- src/finn/custom_op/fpgadataflow/downsampler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index da29a524b6..b8645e04ef 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -36,7 +36,7 @@ class DownSampler(HLSCustomOp): - """Corresponds to finn-hlslib ConvolutionInputGenerator_kernel1 function. + """Corresponds to finn-hlslib ConvolutionInputGenerator_*_kernel1 function. Basically performs a down sampling of the image removing rows and columns.""" def __init__(self, onnx_node): @@ -205,7 +205,7 @@ def strm_decl(self): def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ - """ConvolutionInputGenerator_kernel1 (in0, out, numReps);""" ] From 56d01b1a11d709f69d97b2867f041411a90ae02e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 25 Jul 2022 18:29:45 +0200 Subject: [PATCH 067/628] [Test] new test for Downsampler --- .../test_fpgadataflow_downsampler.py | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 tests/fpgadataflow/test_fpgadataflow_downsampler.py diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py new file mode 100644 index 0000000000..8b5c5c77fd --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -0,0 +1,140 @@ +# Copyright (c) 2022, Xilinx, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +import onnx.parser as oprs +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.general.im2col import compute_conv_output_dim +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul +from qonnx.util.basic import gen_finn_dt_tensor + +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer +from finn.core.onnx_exec import execute_onnx +from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode + + +def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0): + np.random.seed(0) + out_dim = compute_conv_output_dim(in_dim, k, stride, 2 * pad_half) + ifm = 8 + ofm = 16 + if is_1d: + shape_in = [1, ifm, in_dim, 1] + shape_out = [1, ofm, out_dim, 1] + shape_k = [k, 1] + shape_s = [stride, 1] + shape_p = [pad_half, 0, pad_half, 0] + else: + shape_in = [1, ifm, in_dim, in_dim] + shape_out = [1, ofm, out_dim, out_dim] + shape_k = [k, k] + shape_s = [stride, stride] + shape_p = [pad_half, pad_half, pad_half, pad_half] + shape_w = [ofm, ifm] + shape_k + + sstr_in = str(shape_in) + sstr_out = str(shape_out) + sstr_k = str(shape_k) + sstr_s = str(shape_s) + sstr_p = str(shape_p) + sstr_w = str(shape_w) + + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{sstr_in} in0) => (float{sstr_out} out0) + < + float{sstr_w} param_w_conv0 + > + {{ + out0 = Conv(in0, param_w_conv0) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) + model.set_tensor_datatype("in0", dt_in) + model.set_tensor_datatype("param_w_conv0", dt_w) + model.set_initializer("param_w_conv0", gen_finn_dt_tensor(dt_w, shape_w)) + model = model.transform(InferShapes()) + model = model.transform(LowerConvsToMatMul()) + model = model.transform(InferShapes()) + return model + + +@pytest.mark.parametrize("is_1d", [False]) +@pytest.mark.parametrize("exec_mode", ["cppsim"]) +def test_fpgadataflow_downsampler(is_1d, exec_mode): + in_dim = 32 + k = 1 + stride = 2 + dt_in = DataType["UINT8"] + dt_w = DataType["INT2"] + model = build_model(is_1d, in_dim, k, stride, dt_in, dt_w) + inp = gen_finn_dt_tensor(dt_in, model.get_tensor_shape("in0")) + idict = {"in0": inp} + y_expected = execute_onnx(model, idict)["out0"] + model = model.transform(to_hls.InferConvInpGen()) + if exec_mode == "cppsim": + model = model.transform(SetExecMode("cppsim")) + model = model.transform(PrepareCppSim()) + model = model.transform(CompileCppSim()) + elif exec_mode == "rtlsim": + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + else: + raise Exception("Unknown exec_mode") + y_produced = execute_onnx(model, idict)["out0"] + assert len(model.get_nodes_by_op_type("DownSampler")) == 1 + assert (y_produced == y_expected).all() + + if exec_mode == "rtlsim": + node = model.get_nodes_by_op_type("DownSampler")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + assert exp_cycles != 0 From b9ac818352efa713d15b101c073e1ade7e57e470 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 26 Jul 2022 10:08:22 +0200 Subject: [PATCH 068/628] [Downsample] add 1D downsample support + conversion --- .../custom_op/fpgadataflow/downsampler.py | 31 ++++++++++++++++--- .../fpgadataflow/convert_to_hls_layers.py | 23 ++++++++------ 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index b8645e04ef..e9009e1856 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -55,6 +55,10 @@ def get_nodeattr_types(self): "inputDataType": ("s", True, ""), # Batch size "numInputVectors": ("i", False, 1), + # 1D (True) or 2D (False) spatial data + "is1D": ("i", False, 0), + # for 1D only: (D, 1) (True) or (1, D) dims + "is1D_unitx": ("i", False, 1), } my_attrs.update(super().get_nodeattr_types()) return my_attrs @@ -66,25 +70,43 @@ def get_downsampled_odim(self): return int(np.floor((idim - 1) / stride) + 1) def get_exp_cycles(self): + is_1D = self.get_nodeattr("is1D") idim = self.get_nodeattr("ImgDim") + idim_total = idim if is_1D else idim * idim channels = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") batch_size = self.get_nodeattr("numInputVectors") - exp_cycles = channels / simd * batch_size * idim * idim + exp_cycles = channels / simd * batch_size * idim_total return int(exp_cycles) def get_normal_input_shape(self): + is_1D = self.get_nodeattr("is1D") + is_1D_unitx = self.get_nodeattr("is1D_unitx") idim = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") - ishape = (batch, idim, idim, num_ch) + if is_1D: + if is_1D_unitx: + ishape = (batch, idim, 1, num_ch) + else: + ishape = (batch, 1, idim, num_ch) + else: + ishape = (batch, idim, idim, num_ch) return ishape def get_normal_output_shape(self): + is_1D = self.get_nodeattr("is1D") + is_1D_unitx = self.get_nodeattr("is1D_unitx") odim = self.get_downsampled_odim() num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") - oshape = (batch, odim, odim, num_ch) + if is_1D: + if is_1D_unitx: + oshape = (batch, odim, 1, num_ch) + else: + oshape = (batch, 1, odim, num_ch) + else: + oshape = (batch, odim, odim, num_ch) return oshape def get_folded_input_shape(self): @@ -204,8 +226,9 @@ def strm_decl(self): ) def docompute(self): + dim_var = "1D" if (self.get_nodeattr("is1D") == 1) else "2D" self.code_gen_dict["$DOCOMPUTE$"] = [ - """ConvolutionInputGenerator_2D_kernel1 (in0, out, numReps);""" ] diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 429bc34ffc..9059e023a0 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -138,17 +138,18 @@ def apply(self, model): ) if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: - assert is_square_image, ( - "%s : DownSampler currently only supports square input images." - % n.name - ) - assert is_equal_stride, ( - """%s : DownSampler currently only supports equal stride value - along different axes.""" - % n.name + downsample_1D = (ifm_dim_h == 1) or (ifm_dim_w == 1) + is1D_unitx = ifm_dim_w == 1 + downsample_2D = ( + (not downsample_1D) and is_square_image and is_equal_stride ) - ConvInpGen_idim = ConvInpGen_idim_h - stride = stride_h + if not (downsample_1D or downsample_2D): + warnings.warn( + f"Couldn't infer Downsample from {n.name}, check config." + ) + continue + ConvInpGen_idim = max(ConvInpGen_idim_h, ConvInpGen_idim_w) + stride = max(stride_h, stride_w) # create DownSampler node ConvInpGen_node = helper.make_node( "DownSampler", @@ -162,6 +163,8 @@ def apply(self, model): Stride=stride, inputDataType=dt.name, name="DownSampler_" + n.name, + is1D=downsample_1D, + is1D_unitx=is1D_unitx, ) graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) else: From 94c184587b278c546fe5e4fa1bc2566076191a88 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 26 Jul 2022 11:34:59 +0100 Subject: [PATCH 069/628] [Tests] Remove saving of .onnx files in tests --- tests/fpgadataflow/test_fpgadataflow_concat.py | 1 - .../streamline/test_absorb_opposite_transposes.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_concat.py b/tests/fpgadataflow/test_fpgadataflow_concat.py index dddc470ec2..8488a34dff 100644 --- a/tests/fpgadataflow/test_fpgadataflow_concat.py +++ b/tests/fpgadataflow/test_fpgadataflow_concat.py @@ -144,6 +144,5 @@ def test_fpgadataflow_concat_stitchedip(): ) model.set_metadata_prop("exec_mode", "rtlsim") model.set_metadata_prop("rtlsim_trace", "trace.vcd") - model.save("dbg.onnx") ret_sim = execute_onnx(model, inp_dict) assert (exp_out == ret_sim[oname]).all() diff --git a/tests/transformation/streamline/test_absorb_opposite_transposes.py b/tests/transformation/streamline/test_absorb_opposite_transposes.py index 88cbd5657e..6d8d2b9f0c 100644 --- a/tests/transformation/streamline/test_absorb_opposite_transposes.py +++ b/tests/transformation/streamline/test_absorb_opposite_transposes.py @@ -72,10 +72,8 @@ def test_absorb_opposite_transposes(): model = oprs.parse_model(input) model = ModelWrapper(model) model = model.transform(InferShapes()) - model.save("dbg.onnx") new_model = model.transform(AbsorbConsecutiveTransposes()) new_model = new_model.transform(InferShapes()) - new_model.save("newdbg.onnx") inp_dict = {"top_in": np.random.rand(*shp).astype(np.float32)} assert ox.compare_execution(model, model, inp_dict) assert len(new_model.graph.node) == 6 From 6af98efea5681026d2d158fa138f1bb4e487afaa Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 26 Jul 2022 16:39:28 +0200 Subject: [PATCH 070/628] [Test] broaden scope of Downsampler tests --- .../test_fpgadataflow_downsampler.py | 41 +++++++++++++------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py index 8b5c5c77fd..11e5cca284 100644 --- a/tests/fpgadataflow/test_fpgadataflow_downsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -50,17 +50,24 @@ from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0): +def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=False): np.random.seed(0) out_dim = compute_conv_output_dim(in_dim, k, stride, 2 * pad_half) ifm = 8 ofm = 16 if is_1d: - shape_in = [1, ifm, in_dim, 1] - shape_out = [1, ofm, out_dim, 1] - shape_k = [k, 1] - shape_s = [stride, 1] - shape_p = [pad_half, 0, pad_half, 0] + if flip_1d: + shape_in = [1, ifm, 1, in_dim] + shape_out = [1, ofm, 1, out_dim] + shape_k = [1, k] + shape_s = [1, stride] + shape_p = [0, pad_half, 0, pad_half] + else: + shape_in = [1, ifm, in_dim, 1] + shape_out = [1, ofm, out_dim, 1] + shape_k = [k, 1] + shape_s = [stride, 1] + shape_p = [pad_half, 0, pad_half, 0] else: shape_in = [1, ifm, in_dim, in_dim] shape_out = [1, ofm, out_dim, out_dim] @@ -101,19 +108,25 @@ def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0): return model -@pytest.mark.parametrize("is_1d", [False]) -@pytest.mark.parametrize("exec_mode", ["cppsim"]) -def test_fpgadataflow_downsampler(is_1d, exec_mode): +@pytest.mark.parametrize("is_1d", [True, False]) +@pytest.mark.parametrize("flip_1d", [True, False]) +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +def test_fpgadataflow_downsampler(is_1d, flip_1d, exec_mode): + if flip_1d and not is_1d: + pytest.skip("flip_1d only applicable for is_1d") in_dim = 32 k = 1 stride = 2 dt_in = DataType["UINT8"] dt_w = DataType["INT2"] - model = build_model(is_1d, in_dim, k, stride, dt_in, dt_w) + model = build_model( + is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=flip_1d + ) inp = gen_finn_dt_tensor(dt_in, model.get_tensor_shape("in0")) idict = {"in0": inp} y_expected = execute_onnx(model, idict)["out0"] model = model.transform(to_hls.InferConvInpGen()) + assert len(model.get_nodes_by_op_type("DownSampler")) == 1 if exec_mode == "cppsim": model = model.transform(SetExecMode("cppsim")) model = model.transform(PrepareCppSim()) @@ -127,14 +140,18 @@ def test_fpgadataflow_downsampler(is_1d, exec_mode): else: raise Exception("Unknown exec_mode") y_produced = execute_onnx(model, idict)["out0"] - assert len(model.get_nodes_by_op_type("DownSampler")) == 1 assert (y_produced == y_expected).all() - if exec_mode == "rtlsim": node = model.get_nodes_by_op_type("DownSampler")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] + # small adjustment for 2D testcase due to how rtlsim works: + # output is finished before all pixels are read, since last + # row is dropped (rtlsim finishes based on # of expected + # pixels) + if not is_1d: + exp_cycles = exp_cycles - in_dim assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) assert exp_cycles != 0 From ae61af04eefb6ca5be8e3631ea8606ab03eb04ea Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 26 Jul 2022 17:14:55 +0200 Subject: [PATCH 071/628] [Test] add missing test markings to Downsample --- tests/fpgadataflow/test_fpgadataflow_downsampler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py index 11e5cca284..e815a3d800 100644 --- a/tests/fpgadataflow/test_fpgadataflow_downsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -111,6 +111,8 @@ def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=False @pytest.mark.parametrize("is_1d", [True, False]) @pytest.mark.parametrize("flip_1d", [True, False]) @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.slow +@pytest.mark.vivado def test_fpgadataflow_downsampler(is_1d, flip_1d, exec_mode): if flip_1d and not is_1d: pytest.skip("flip_1d only applicable for is_1d") From ee76eebb7e8839337d57fb0aa194e20e381b23c4 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 27 Jul 2022 09:59:16 +0100 Subject: [PATCH 072/628] [actions] Update precommit version to work with Act Signed-off-by: Fionn O'Donohoe --- .github/workflows/pre-commit.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 2fbb9265be..20f5b48f7a 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -13,10 +13,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 - name: Run Lint - uses: pre-commit/action@v2.0.0 + uses: pre-commit/action@v3.0.0 From f63738174eaf40b42c400aa739191dba477f4529 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Thu, 28 Jul 2022 20:01:42 +0100 Subject: [PATCH 073/628] [floorplan] Now attributes different partition ids to nodes that need an axilite interface --- src/finn/transformation/fpgadataflow/floorplan.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 6792017223..4871a86943 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -151,6 +151,7 @@ def apply(self, model): node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 continue + elif not ( node.op_type == "MatrixVectorActivation" and node_inst.get_nodeattr("mem_mode") is not None @@ -164,9 +165,12 @@ def apply(self, model): for pre_node in pre_nodes: pre_inst = getCustomOp(pre_node) pre_slr = pre_inst.get_nodeattr("slr") + axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] if node_slr == pre_slr: partition_id = pre_inst.get_nodeattr("partition_id") node_inst.set_nodeattr("partition_id", partition_id) + if len(axilite_intf_name) != '0': + partition_cnt += 1 break else: # no matching, new partition From 4a0cc01596c0e72856bf88e7d625257302d3d4c8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 28 Jul 2022 18:42:39 -0400 Subject: [PATCH 074/628] [HLSCustomOp] add new attrs for FIFO sizing --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index b202e95a28..f73868df4f 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -111,6 +111,8 @@ def get_nodeattr_types(self): "inFIFODepth": ("i", False, 2), "outFIFODepth": ("i", False, 2), "output_hook": ("s", False, ""), + # characterization of stream input-output behavior per cycle + "io_characteristic": ("ints", False, []), } def get_verilog_top_module_name(self): From fe2ea73cd84644c14ca33a4e911a5c3d58215bca Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 28 Jul 2022 18:44:33 -0400 Subject: [PATCH 075/628] [FIFO] Introduce new DeriveCharacteristic transform --- .../fpgadataflow/derive_characteristic.py | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 src/finn/transformation/fpgadataflow/derive_characteristic.py diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py new file mode 100644 index 0000000000..9029d3ed5f --- /dev/null +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -0,0 +1,141 @@ +# Copyright (c) 2022, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import numpy as np +import qonnx.custom_op.registry as registry +from pyverilator.axi_utils import _read_signal, rtlsim_multi_io +from qonnx.transformation.base import NodeLocalTransformation + +from finn.util.fpgadataflow import is_fpgadataflow_node + + +class DeriveCharacteristic(NodeLocalTransformation): + """For each node in the graph, run rtlsim to obtain the i/o + characteristic function for FIFO sizing and set the attribute. + It is assumed that the PrepareRTLSim transformation was already + called on the graph. + + This transformation performs rtlsim for each node, so it will run for + some time (minutes to hours depending on configuration). + + * period (int) desired period over which the characteristic function + will be derived. + + * num_workers (int or None) number of parallel workers, see documentation in + NodeLocalTransformation for more details. + """ + + def __init__(self, period, num_workers=None): + super().__init__(num_workers=num_workers) + self.period = period + + def applyNodeLocal(self, node): + op_type = node.op_type + if is_fpgadataflow_node(node) is True: + try: + # lookup op_type in registry of CustomOps + inst = registry.getCustomOp(node) + # TODO move into HLSCustomOp? + # ideally, call execute with rtlsim mode and + # specify some way of setting up a hook + # ensure rtlsim is ready + assert inst.get_nodeattr("rtlsim_so") != "", ( + "rtlsim not ready for " + node.name + ) + # restricted to single input and output nodes for now + multistream_optypes = [ + "AddStreams_Batch", + "DuplicateStreams_Batch", + "StreamingConcat", + ] + assert ( + node.op_type not in multistream_optypes + ), f"{node.name} unsupported" + exp_cycles = inst.get_exp_cycles() + n_inps = np.prod(inst.get_folded_input_shape()[:-1]) + n_outs = np.prod(inst.get_folded_output_shape()[:-1]) + if exp_cycles == 0: + # try to come up with an optimistic estimate + exp_cycles = min(n_inps, n_outs) + assert ( + self.period < exp_cycles + ), "Period %d too short to characterize %s" % (self.period, node.name) + sim = inst.get_rtlsim + # signal name + sname = "_" + inst.hls_sname() + "_" + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + # "weights": wei * num_w_reps + }, + "outputs": {"out": []}, + } + + txns_in = [] + txns_out = [] + + def monitor_txns(sim_obj): + for inp in io_dict["inputs"]: + in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 + in_valid = _read_signal(sim, inp + sname + "TVALID") == 1 + if in_ready and in_valid: + txns_in.append(1) + else: + txns_in.append(0) + for outp in io_dict["outputs"]: + if ( + _read_signal(sim, outp + sname + "TREADY") == 1 + and _read_signal(sim, outp + sname + "TVALID") == 1 + ): + txns_out.append(1) + else: + txns_out.append(0) + + total_cycle_count = rtlsim_multi_io( + sim, + io_dict, + n_outs, + sname=sname, + liveness_threshold=self.period, + hook_preclk=monitor_txns, + ) + assert total_cycle_count <= self.period + if len(txns_in) < self.period: + txns_in += [0 for x in range(self.period - len(txns_in))] + if len(txns_out) < self.period: + txns_out += [0 for x in range(self.period - len(txns_out))] + io_characteristic = txns_in + txns_out + inst.set_nodeattr("io_characteristic", io_characteristic) + inst.set_nodeattr("io_characteristic_period", self.period) + except KeyError: + # exception if op_type is not supported + raise Exception( + "Custom op_type %s is currently not supported." % op_type + ) + return (node, False) From 8fbb609ca31bd439d319555fae44214a532d3dab Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 29 Jul 2022 06:01:31 -0400 Subject: [PATCH 076/628] [FIFO] bugfix in DeriveCharacteristic --- .../fpgadataflow/derive_characteristic.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 9029d3ed5f..834fd092a7 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -29,7 +29,7 @@ import numpy as np import qonnx.custom_op.registry as registry -from pyverilator.axi_utils import _read_signal, rtlsim_multi_io +from pyverilator.util.axi_utils import _read_signal, rtlsim_multi_io from qonnx.transformation.base import NodeLocalTransformation from finn.util.fpgadataflow import is_fpgadataflow_node @@ -84,9 +84,13 @@ def applyNodeLocal(self, node): # try to come up with an optimistic estimate exp_cycles = min(n_inps, n_outs) assert ( - self.period < exp_cycles - ), "Period %d too short to characterize %s" % (self.period, node.name) - sim = inst.get_rtlsim + exp_cycles < self.period + ), "Period %d too short to characterize %s : expects min %d cycles" % ( + self.period, + node.name, + exp_cycles, + ) + sim = inst.get_rtlsim() # signal name sname = "_" + inst.hls_sname() + "_" io_dict = { From 77f319121d80d22d3874ef0d9788bda1d615e22f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 29 Jul 2022 06:03:18 -0400 Subject: [PATCH 077/628] [Test] add test_fclayer_fifocharacterize --- tests/fpgadataflow/test_fpgadataflow_mvau.py | 53 ++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index d1895a1267..5050bf245a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -42,6 +42,7 @@ from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.analysis.fpgadataflow.hls_synth_res_estimation import hls_synth_res_estimation from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim +from finn.transformation.fpgadataflow.derive_characteristic import DeriveCharacteristic from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP @@ -417,3 +418,55 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( exp_cycles = exp_cycles_dict[node.name] assert np.isclose(exp_cycles, cycles_rtlsim, atol=15) assert exp_cycles != 0 + + +# mem_mode: const or decoupled +@pytest.mark.parametrize("mem_mode", ["const"]) +# activation: None or DataType +@pytest.mark.parametrize("act", [DataType["INT4"]]) +# weight datatype +@pytest.mark.parametrize("wdt", [DataType["INT4"]]) +# input datatype +@pytest.mark.parametrize("idt", [DataType["INT4"]]) +# neuron folding, -1 is maximum possible +@pytest.mark.parametrize("nf", [8]) +# synapse folding, -1 is maximum possible +@pytest.mark.parametrize("sf", [8]) +# HLS matrix width (input features) +@pytest.mark.parametrize("mw", [128]) +# HLS matrix height (output features) +@pytest.mark.parametrize("mh", [128]) +@pytest.mark.fpgadataflow +@pytest.mark.vivado +def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): + if nf == -1: + nf = mh + if sf == -1: + sf = mw + pe = mh // nf + simd = mw // sf + assert mh % pe == 0 + assert mw % sf == 0 + # generate weights + W = gen_finn_dt_tensor(wdt, (mw, mh)) + + # no activation, produce accumulators + T = None + tdt = None + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] + else: + odt = DataType["INT32"] + + model = make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T, tdt) + for node in model.graph.node: + # lookup op_type in registry of CustomOps + inst = getCustomOp(node) + inst.set_nodeattr("mem_mode", mem_mode) + + model = model.transform(SetExecMode("rtlsim")) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + model = model.transform(DeriveCharacteristic(1000)) From 98aeb90c52fe3e1bb6721fb4e70d515ec23ce536 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Mon, 1 Aug 2022 17:32:10 +0100 Subject: [PATCH 078/628] Lookup through external memory with input bounds checking. --- custom_hls/lookup.hpp | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/custom_hls/lookup.hpp b/custom_hls/lookup.hpp index 3001f6613e..dac586c38c 100644 --- a/custom_hls/lookup.hpp +++ b/custom_hls/lookup.hpp @@ -26,14 +26,15 @@ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - *******************************************************************************/ +*******************************************************************************/ +#ifndef LOOKUP_HPP +#define LOOKUP_HPP #include #include -#ifndef LOOKUP_HPP -#define LOOKUP_HPP +#include "utils.hpp" + template < unsigned NumEmbeddings, @@ -57,4 +58,37 @@ void StreamingLookup( } } +/** + * Lookup implementation over a table stored in AXI-accessible memory. + */ +template < + unsigned EmbeddingSize, // Number of memory words per embedding + unsigned EmbeddingAlign = clog2(EmbeddingSize), // Alignment of entries = number of word index bits + typename T_SRC, + typename T_DST +> +void StreamingLookup_ext( + hls::stream &in0, + hls::stream &out, + T_DST const *const mem, + unsigned const size, + unsigned &oob_count +) { +#pragma HLS pipeline II=EmbeddingSize+8 style=flp + if(!in0.empty()) { + T_SRC const x = in0.read(); + + // Map out-of-bounds inputs to an offset of zero and increment counter + bool const oob = x >= T_SRC(size); + ap_uint const ofs = + ((oob? T_SRC(0) : x), ap_uint(0)); + oob_count += oob; + + // Stream lookup data (burst inferred) + for(unsigned i = 0; i < EmbeddingSize; i++) { +#pragma HLS pipeline II=1 style=flp + out.write(mem[ofs+i]); + } + } +} #endif From e7fd0214f2b4fe5acd7760dcb22e58446bb63dce Mon Sep 17 00:00:00 2001 From: Le Blevec Date: Wed, 3 Aug 2022 16:22:21 +0100 Subject: [PATCH 079/628] [Floorplan] temporary commit --- src/finn/transformation/fpgadataflow/floorplan.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 4871a86943..00e613b508 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -167,11 +167,14 @@ def apply(self, model): pre_slr = pre_inst.get_nodeattr("slr") axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] if node_slr == pre_slr: - partition_id = pre_inst.get_nodeattr("partition_id") - node_inst.set_nodeattr("partition_id", partition_id) if len(axilite_intf_name) != '0': + node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 - break + else: + partition_id = pre_inst.get_nodeattr("partition_id") + node_inst.set_nodeattr("partition_id", partition_id) + + else: # no matching, new partition node_inst.set_nodeattr("partition_id", partition_cnt) From affff430cff803ee2881e6db1dab9e5c982cd5ff Mon Sep 17 00:00:00 2001 From: Le Blevec Date: Wed, 3 Aug 2022 16:26:42 +0100 Subject: [PATCH 080/628] [Floorplan] temporary commit --- src/finn/transformation/fpgadataflow/floorplan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 00e613b508..564de40db9 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -165,7 +165,7 @@ def apply(self, model): for pre_node in pre_nodes: pre_inst = getCustomOp(pre_node) pre_slr = pre_inst.get_nodeattr("slr") - axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] + axilite_intf_name = node_inst.get_verilog_top_module_intf_names()["axilite"] if node_slr == pre_slr: if len(axilite_intf_name) != '0': node_inst.set_nodeattr("partition_id", partition_cnt) From 944c858f70ee807f057bc0a6daffc14450e65716 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Thu, 4 Aug 2022 10:07:00 +0100 Subject: [PATCH 081/628] [Floorplan] Hot fix to correct issues in VitisBuild when using multiple axilite interfaces. --- src/finn/transformation/fpgadataflow/floorplan.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 564de40db9..c3a0567e31 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -165,14 +165,15 @@ def apply(self, model): for pre_node in pre_nodes: pre_inst = getCustomOp(pre_node) pre_slr = pre_inst.get_nodeattr("slr") - axilite_intf_name = node_inst.get_verilog_top_module_intf_names()["axilite"] if node_slr == pre_slr: - if len(axilite_intf_name) != '0': + axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] + if len(axilite_intf_name) != 0: node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 else: partition_id = pre_inst.get_nodeattr("partition_id") node_inst.set_nodeattr("partition_id", partition_id) + break else: From 51defdae6bd1d12bdf49e71ca037efe290824e19 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Thu, 4 Aug 2022 10:25:43 +0100 Subject: [PATCH 082/628] [Floorplan] Hot fix to correct issues in VitisBuild when using multiple axilite interfaces. --- src/finn/transformation/fpgadataflow/floorplan.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index c3a0567e31..549b94d9f2 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -151,7 +151,7 @@ def apply(self, model): node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 continue - + elif not ( node.op_type == "MatrixVectorActivation" and node_inst.get_nodeattr("mem_mode") is not None @@ -166,7 +166,9 @@ def apply(self, model): pre_inst = getCustomOp(pre_node) pre_slr = pre_inst.get_nodeattr("slr") if node_slr == pre_slr: - axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] + axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()[ + "axilite" + ] if len(axilite_intf_name) != 0: node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 @@ -175,7 +177,6 @@ def apply(self, model): node_inst.set_nodeattr("partition_id", partition_id) break - else: # no matching, new partition node_inst.set_nodeattr("partition_id", partition_cnt) From 8aad25b04f6dc75735edae7513c110127a6ca867 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 8 Aug 2022 11:14:45 +0300 Subject: [PATCH 083/628] [Deps] update QONNX to latest version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..fb00faccea 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="398a0ecfcb32407c0a3df39246cf6d2bca02886c" +QONNX_COMMIT="92184fea2dd417bc7a53c82811fef271e4833c4c" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" From 5ade63a9e31220b92aa2bc84632f412043f2278c Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 9 Aug 2022 12:51:17 +0100 Subject: [PATCH 084/628] [LookUp layer] Change hls implementation of LU external --- src/finn/custom_op/fpgadataflow/lookup.py | 25 +++++++++++------------ 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index d90fa0f05a..d9788dce61 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -159,8 +159,8 @@ def get_number_output_values(self): def global_includes(self): mem_mode = self.get_nodeattr("mem_mode") global_incls = [] + global_incls.append('#include "lookup.hpp"') if mem_mode == "const": - global_incls.append('#include "lookup.hpp"') global_incls.append('#include "embeddings.hpp"') self.code_gen_dict["$GLOBALS$"] = global_incls @@ -258,17 +258,10 @@ def docompute(self): InputType, EmbeddingType >(in0, out, embeddings);""" ] elif mem_mode == "external": - hls_impl = """ - if(!in0.empty()) { - ap_uint const base = - (in0.read(), ap_uint(0)); - for(unsigned j = 0; j < EmbeddingSize; j++) { -#pragma HLS PIPELINE II=1 - out.write(mem[base+j]); - } - } - """ - self.code_gen_dict["$DOCOMPUTE$"] = [hls_impl] + self.code_gen_dict["$DOCOMPUTE$"] = [ + """StreamingLookup_ext + (in0, out, mem, size, oob_count);""" + ] def blackboxfunction(self): mem_mode = self.get_nodeattr("mem_mode") @@ -286,7 +279,7 @@ def blackboxfunction(self): "void " + self.onnx_node.name + "(hls::stream &in0, hls::stream &out, " - + "T_DST const *const mem)" + + "T_DST const *const mem, unsigned const size, unsigned &oob_count)" ] def pragmas(self): @@ -305,6 +298,12 @@ def pragmas(self): elif mem_mode == "external": my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem") my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control") + my_pragmas.append( + "#pragma HLS_INTERFACE s_axilite port=size bundle=control" + ) + my_pragmas.append( + "#pragma HLS_INTERFACE s_axilite port=oob_count bundle=control" + ) else: raise Exception("Unrecognized mem_mode: " + mem_mode) self.code_gen_dict["$PRAGMAS$"] = my_pragmas From 860842354c3fcaad3a467c4a69eda0cfcc9df120 Mon Sep 17 00:00:00 2001 From: Le Blevec Date: Tue, 9 Aug 2022 14:32:53 +0100 Subject: [PATCH 085/628] [Prepare_IP] Fix on a bug where two IPs could have the same name and not correspond to the same object. --- src/finn/transformation/fpgadataflow/prepare_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/prepare_ip.py b/src/finn/transformation/fpgadataflow/prepare_ip.py index 2ebd6310f0..8bf653faf1 100644 --- a/src/finn/transformation/fpgadataflow/prepare_ip.py +++ b/src/finn/transformation/fpgadataflow/prepare_ip.py @@ -46,7 +46,7 @@ def _codegen_single_node(node, model, fpgapart, clk): # get the path of the code generation directory code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen") # ensure that there is a directory - if code_gen_dir == "" or not os.path.isdir(code_gen_dir): + if code_gen_dir == "" or not os.path.isdir(code_gen_dir) or not str(node.name) in code_gen_dir: code_gen_dir = make_build_dir( prefix="code_gen_ipgen_" + str(node.name) + "_" ) From 162d0f26709bd4913a1a53b4c2041389b466545f Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 9 Aug 2022 15:23:22 +0100 Subject: [PATCH 086/628] [LookUp layer] Fix typo in pragma insertion --- src/finn/custom_op/fpgadataflow/lookup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index d9788dce61..d3c8af56ed 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -299,10 +299,10 @@ def pragmas(self): my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem") my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control") my_pragmas.append( - "#pragma HLS_INTERFACE s_axilite port=size bundle=control" + "#pragma HLS INTERFACE s_axilite port=size bundle=control" ) my_pragmas.append( - "#pragma HLS_INTERFACE s_axilite port=oob_count bundle=control" + "#pragma HLS INTERFACE s_axilite port=oob_count bundle=control" ) else: raise Exception("Unrecognized mem_mode: " + mem_mode) From b778d53aa0a9816ed7fd7979a3938bd715ee94a4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 9 Aug 2022 19:24:03 +0300 Subject: [PATCH 087/628] [HLSCustomOp] add missing attribute for characterization period --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index f73868df4f..85f5bfd3f1 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -113,6 +113,8 @@ def get_nodeattr_types(self): "output_hook": ("s", False, ""), # characterization of stream input-output behavior per cycle "io_characteristic": ("ints", False, []), + # the period for which the characterization was run + "io_characteristic_period": ("i", False, 0), } def get_verilog_top_module_name(self): From 20d0b6b42265767d23fd8afd5114d5003887156d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 9 Aug 2022 19:37:16 +0300 Subject: [PATCH 088/628] [FIFO] bugfix: add reset, extra checks --- .../transformation/fpgadataflow/derive_characteristic.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 834fd092a7..474cd10d84 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -29,7 +29,7 @@ import numpy as np import qonnx.custom_op.registry as registry -from pyverilator.util.axi_utils import _read_signal, rtlsim_multi_io +from pyverilator.util.axi_utils import _read_signal, reset_rtlsim, rtlsim_multi_io from qonnx.transformation.base import NodeLocalTransformation from finn.util.fpgadataflow import is_fpgadataflow_node @@ -77,6 +77,11 @@ def applyNodeLocal(self, node): assert ( node.op_type not in multistream_optypes ), f"{node.name} unsupported" + try: + mem_mode = inst.get_nodeattr("mem_mode") + assert mem_mode == "const", "Only mem_mode=const supported for now" + except AttributeError: + pass exp_cycles = inst.get_exp_cycles() n_inps = np.prod(inst.get_folded_input_shape()[:-1]) n_outs = np.prod(inst.get_folded_output_shape()[:-1]) @@ -121,6 +126,7 @@ def monitor_txns(sim_obj): else: txns_out.append(0) + reset_rtlsim(sim) total_cycle_count = rtlsim_multi_io( sim, io_dict, From c3e01a5d3e21d684f3910c977d3b2150267aaae4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 9 Aug 2022 19:39:02 +0300 Subject: [PATCH 089/628] [Test] flesh out FIFO characterizatio test for MVAU --- tests/fpgadataflow/test_fpgadataflow_mvau.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index 5050bf245a..87c30a00bf 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -433,9 +433,9 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( # synapse folding, -1 is maximum possible @pytest.mark.parametrize("sf", [8]) # HLS matrix width (input features) -@pytest.mark.parametrize("mw", [128]) +@pytest.mark.parametrize("mw", [32]) # HLS matrix height (output features) -@pytest.mark.parametrize("mh", [128]) +@pytest.mark.parametrize("mh", [32]) @pytest.mark.fpgadataflow @pytest.mark.vivado def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): @@ -463,10 +463,21 @@ def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): # lookup op_type in registry of CustomOps inst = getCustomOp(node) inst.set_nodeattr("mem_mode", mem_mode) - + total_fold = nf * sf + exp_total_cycles = total_fold + 10 model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP("xc7z020clg400-1", 5)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) - model = model.transform(DeriveCharacteristic(1000)) + model = model.transform(DeriveCharacteristic(exp_total_cycles)) + node_inst = getCustomOp(model.graph.node[0]) + period_attr = node_inst.get_nodeattr("io_characteristic_period") + assert period_attr == exp_total_cycles + chrc = node_inst.get_nodeattr("io_characteristic") + assert len(chrc) == 2 * exp_total_cycles + chrc = np.asarray(chrc, dtype=np.uint8).reshape(2, -1) + # first sf cycles should read input continuously + assert (chrc[0, :sf] == 1).all() + # all outputs should be produced within the exp n of cycles + assert sum(chrc[1]) == nf From 88d37baf50e09bc5c08f8725860a5818664a53d7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 10 Aug 2022 13:09:50 +0300 Subject: [PATCH 090/628] [Deps] update finn-hlslib --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 34727cb88c..1dd07c98a0 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="2c7caccb5ecd2af448acac7d150e3cabc1119433" +HLSLIB_COMMIT="36e6c8cb1019ba0307e1886011692a58e02f3bfa" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From 131f93aa02a88727603b867e63e20ab6f453e36a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 10 Aug 2022 13:31:24 +0300 Subject: [PATCH 091/628] [Test] add copyright header to eltwise --- .../fpgadataflow/test_fpgadataflow_eltwise.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_eltwise.py b/tests/fpgadataflow/test_fpgadataflow_eltwise.py index bfc007421e..6028a9b9f0 100644 --- a/tests/fpgadataflow/test_fpgadataflow_eltwise.py +++ b/tests/fpgadataflow/test_fpgadataflow_eltwise.py @@ -1,3 +1,31 @@ +# Copyright (c) 2022, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + import pytest import numpy as np From 2c6290528950af025989f51d1362a0152e23d6b8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 10 Aug 2022 13:31:41 +0300 Subject: [PATCH 092/628] [Eltwise] reflect latest hlslib updates --- src/finn/custom_op/fpgadataflow/eltwise.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index f17eb6fdf3..2395d451d1 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -59,6 +59,15 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs + def get_eltwise_op_lambda(self): + eltwise_op = self.get_nodeattr("eltwiseOp") + eltwise_ops = { + "Add": "[](auto a, auto b) { return a + b; }", + "Sub": "[](auto a, auto b) { return a - b; }", + "AbsDiff": "[](auto a, auto b) { return a>b? a-b : b-a; }", + } + return eltwise_ops[eltwise_op] + def get_normal_input_shape(self, ind=0): ich = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) @@ -338,7 +347,8 @@ def docompute(self): slice_in0 = "Slice<%s>" % elem_hls_type_0 slice_in1 = "Slice<%s>" % elem_hls_type_1 slice_out = "Slice<%s>" % out_hls_type - eltwise_op_str = "%sEltwiseFunction<%s, %s, %s>()" % ( + eltwise_op_str = self.get_eltwise_op_lambda() + "%sEltwiseFunction<%s, %s, %s>()" % ( op, elem_hls_type_0, elem_hls_type_1, From 07bb0d30eb2ea3d7878a7ce7732b8914224ce391 Mon Sep 17 00:00:00 2001 From: Le Blevec Date: Wed, 10 Aug 2022 12:13:45 +0100 Subject: [PATCH 093/628] [Prepare_IP] Fix on a bug where two IPs could have the same name and not correspond to the same object. --- src/finn/transformation/fpgadataflow/hlssynth_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/hlssynth_ip.py b/src/finn/transformation/fpgadataflow/hlssynth_ip.py index 1fede06678..c0b9df5bad 100644 --- a/src/finn/transformation/fpgadataflow/hlssynth_ip.py +++ b/src/finn/transformation/fpgadataflow/hlssynth_ip.py @@ -64,7 +64,7 @@ def applyNodeLocal(self, node): ), """Node attribute "code_gen_dir_ipgen" is empty. Please run transformation PrepareIP first.""" - if not os.path.isdir(inst.get_nodeattr("ipgen_path")): + if not os.path.isdir(inst.get_nodeattr("ipgen_path")) or not inst.get_nodeattr("code_gen_dir_ipgen") in inst.get_nodeattr("ipgen_path"): # call the compilation function for this node inst.ipgen_singlenode_code() else: From a842867793b0a50aafbfa8c500a1c575e945a6dc Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 10 Aug 2022 23:48:53 +0300 Subject: [PATCH 094/628] [FIFO] fix boundary condition in DeriveCharacteristic --- src/finn/transformation/fpgadataflow/derive_characteristic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 474cd10d84..0a32ba7e73 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -89,7 +89,7 @@ def applyNodeLocal(self, node): # try to come up with an optimistic estimate exp_cycles = min(n_inps, n_outs) assert ( - exp_cycles < self.period + exp_cycles <= self.period ), "Period %d too short to characterize %s : expects min %d cycles" % ( self.period, node.name, From 4c04af8da866d2dde88455bd920c6fc17b06493b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Thu, 11 Aug 2022 06:14:48 +0100 Subject: [PATCH 095/628] Add IRQ signalling output for input bounds violation (to be externalized). --- src/finn/custom_op/fpgadataflow/lookup.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index d3c8af56ed..6425edd56e 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -259,8 +259,8 @@ def docompute(self): ] elif mem_mode == "external": self.code_gen_dict["$DOCOMPUTE$"] = [ - """StreamingLookup_ext - (in0, out, mem, size, oob_count);""" + """StreamingLookup_ext(in0, out, mem, size, oob_count); + oob_irq = oob_count != 0;""" ] def blackboxfunction(self): @@ -279,7 +279,7 @@ def blackboxfunction(self): "void " + self.onnx_node.name + "(hls::stream &in0, hls::stream &out, " - + "T_DST const *const mem, unsigned const size, unsigned &oob_count)" + + "T_DST const *const mem, unsigned const size, unsigned &oob_count, bool &oob_irq)" ] def pragmas(self): @@ -298,12 +298,9 @@ def pragmas(self): elif mem_mode == "external": my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem") my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control") - my_pragmas.append( - "#pragma HLS INTERFACE s_axilite port=size bundle=control" - ) - my_pragmas.append( - "#pragma HLS INTERFACE s_axilite port=oob_count bundle=control" - ) + my_pragmas.append("#pragma HLS INTERFACE s_axilite port=size bundle=control") + my_pragmas.append("#pragma HLS INTERFACE s_axilite port=oob_count bundle=control") + my_pragmas.append("#pragma HLS INTERFACE ap_none port=oob_irq") else: raise Exception("Unrecognized mem_mode: " + mem_mode) self.code_gen_dict["$PRAGMAS$"] = my_pragmas @@ -474,4 +471,5 @@ def get_verilog_top_module_intf_names(self): if mem_mode == "external": intf_names["axilite"] = ["s_axi_control"] intf_names["aximm"] = [("m_axi_gmem", self.get_nodeattr("ext_mem_width"))] + intf_names["oob_irq"] = ["ap_none"] return intf_names From a8fe9b31e484e3e6e2e4f5d3983e9d0cc4a273d8 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Thu, 11 Aug 2022 13:34:37 +0100 Subject: [PATCH 096/628] [PrepareIP][HLSSynthIP] Fix on a bug where two IPs could have the same name and not correspond to the same object. --- src/finn/transformation/fpgadataflow/hlssynth_ip.py | 6 +++++- src/finn/transformation/fpgadataflow/prepare_ip.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/hlssynth_ip.py b/src/finn/transformation/fpgadataflow/hlssynth_ip.py index c0b9df5bad..c091dbd5ed 100644 --- a/src/finn/transformation/fpgadataflow/hlssynth_ip.py +++ b/src/finn/transformation/fpgadataflow/hlssynth_ip.py @@ -64,7 +64,11 @@ def applyNodeLocal(self, node): ), """Node attribute "code_gen_dir_ipgen" is empty. Please run transformation PrepareIP first.""" - if not os.path.isdir(inst.get_nodeattr("ipgen_path")) or not inst.get_nodeattr("code_gen_dir_ipgen") in inst.get_nodeattr("ipgen_path"): + if not os.path.isdir( + inst.get_nodeattr("ipgen_path") + ) or not inst.get_nodeattr("code_gen_dir_ipgen") in inst.get_nodeattr( + "ipgen_path" + ): # call the compilation function for this node inst.ipgen_singlenode_code() else: diff --git a/src/finn/transformation/fpgadataflow/prepare_ip.py b/src/finn/transformation/fpgadataflow/prepare_ip.py index 8bf653faf1..7c2dfd9beb 100644 --- a/src/finn/transformation/fpgadataflow/prepare_ip.py +++ b/src/finn/transformation/fpgadataflow/prepare_ip.py @@ -46,7 +46,11 @@ def _codegen_single_node(node, model, fpgapart, clk): # get the path of the code generation directory code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen") # ensure that there is a directory - if code_gen_dir == "" or not os.path.isdir(code_gen_dir) or not str(node.name) in code_gen_dir: + if ( + code_gen_dir == "" + or not os.path.isdir(code_gen_dir) + or not str(node.name) in code_gen_dir + ): code_gen_dir = make_build_dir( prefix="code_gen_ipgen_" + str(node.name) + "_" ) From d5292fcb179d33e6f414fd6140a2b8c1f154a8ab Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 11 Aug 2022 16:34:58 +0300 Subject: [PATCH 097/628] [Test] add first sketch for FIFO sizing end2end test --- tests/end2end/test_end2end_fifosizing.py | 103 +++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 tests/end2end/test_end2end_fifosizing.py diff --git a/tests/end2end/test_end2end_fifosizing.py b/tests/end2end/test_end2end_fifosizing.py new file mode 100644 index 0000000000..3266267230 --- /dev/null +++ b/tests/end2end/test_end2end_fifosizing.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022 Xilinx, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pkg_resources as pk + +import numpy as np +from qonnx.custom_op.registry import getCustomOp + +import finn.builder.build_dataflow as build +import finn.builder.build_dataflow_config as build_cfg +from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance +from finn.transformation.fpgadataflow.derive_characteristic import DeriveCharacteristic +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim +from finn.util.basic import make_build_dir + + +def custom_step_fifosize(model, cfg): + # TODO convert to NodeLocalTransformation + def accumulate_char_fxn(chrc): + p = len(chrc) + ret = [] + for t in range(2 * p): + if t == 0: + ret.append(chrc[0]) + else: + ret.append(ret[-1] + chrc[t % p]) + return ret + + # TODO handle chrc for input and output nodes + all_act_tensors = [x.name for x in model.graph.value_info] + for tensor_nm in all_act_tensors: + # generate accumulated characteristic functions + prod = getCustomOp(model.find_producer(tensor_nm)) + prod_chrc = prod.get_nodeattr("io_characteristic") + prod_chrc = np.asarray(prod_chrc, dtype=np.uint8).reshape(2, -1)[1] + prod_chrc = accumulate_char_fxn(prod_chrc) + cons = getCustomOp(model.find_consumer(tensor_nm)) + cons_chrc = cons.get_nodeattr("io_characteristic") + cons_chrc = np.asarray(cons_chrc, dtype=np.uint8).reshape(2, -1)[0] + cons_chrc = accumulate_char_fxn(cons_chrc) + # TODO find minimum phase shift + + for node in model.graph.node: + inst = getCustomOp(node) + chrc = inst.get_nodeattr("io_characteristic") + chrc = np.asarray(chrc, dtype=np.uint8).reshape(2, -1) + + return model + + +def custom_step_fifocharacterize(model, cfg): + model = model.transform(PrepareRTLSim()) + period = model.analysis(dataflow_performance)["max_cycles"] + 10 + model = model.transform(DeriveCharacteristic(period)) + return model + + +def test_end2end_fifosizing(): + chkpt_name = pk.resource_filename("finn.qnn-data", "build_dataflow/model.onnx") + tmp_output_dir = make_build_dir("build_fifosizing_") + # tmp_output_dir = "/tmp/finn_dev_maltanar/build_fifosizing_5mt0o6s_" + steps = build_cfg.default_build_dataflow_steps + steps = steps[:10] + steps.append(custom_step_fifocharacterize) + # steps.append(custom_step_fifosize) + cfg = build_cfg.DataflowBuildConfig( + output_dir=tmp_output_dir, + auto_fifo_depths=False, + target_fps=10000, + synth_clk_period_ns=10.0, + board="Pynq-Z1", + shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, + generate_outputs=[], + steps=steps, + default_mem_mode=build_cfg.ComputeEngineMemMode.CONST, + start_step="custom_step_fifocharacterize", + ) + build.build_dataflow_cfg(chkpt_name, cfg) From e05154e60dcf05c69408c5c951476df15c57ed44 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 11 Aug 2022 16:22:26 +0200 Subject: [PATCH 098/628] [FIFO] also do accumulation as part of DeriveCharacteristic --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 2 +- .../fpgadataflow/derive_characteristic.py | 13 +++++++++++++ tests/fpgadataflow/test_fpgadataflow_mvau.py | 6 +++--- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 85f5bfd3f1..bb359ef0b5 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -111,7 +111,7 @@ def get_nodeattr_types(self): "inFIFODepth": ("i", False, 2), "outFIFODepth": ("i", False, 2), "output_hook": ("s", False, ""), - # characterization of stream input-output behavior per cycle + # accumulated characteristic function over two periods "io_characteristic": ("ints", False, []), # the period for which the characterization was run "io_characteristic_period": ("i", False, 0), diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 0a32ba7e73..72573f250b 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -140,6 +140,19 @@ def monitor_txns(sim_obj): txns_in += [0 for x in range(self.period - len(txns_in))] if len(txns_out) < self.period: txns_out += [0 for x in range(self.period - len(txns_out))] + + def accumulate_char_fxn(chrc): + p = len(chrc) + ret = [] + for t in range(2 * p): + if t == 0: + ret.append(chrc[0]) + else: + ret.append(ret[-1] + chrc[t % p]) + return ret + + txns_in = accumulate_char_fxn(txns_in) + txns_out = accumulate_char_fxn(txns_out) io_characteristic = txns_in + txns_out inst.set_nodeattr("io_characteristic", io_characteristic) inst.set_nodeattr("io_characteristic_period", self.period) diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index 87c30a00bf..22ff36f537 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -475,9 +475,9 @@ def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): period_attr = node_inst.get_nodeattr("io_characteristic_period") assert period_attr == exp_total_cycles chrc = node_inst.get_nodeattr("io_characteristic") - assert len(chrc) == 2 * exp_total_cycles + assert len(chrc) == 4 * exp_total_cycles chrc = np.asarray(chrc, dtype=np.uint8).reshape(2, -1) # first sf cycles should read input continuously - assert (chrc[0, :sf] == 1).all() + assert (chrc[0, :sf] == range(1, sf + 1)).all() # all outputs should be produced within the exp n of cycles - assert sum(chrc[1]) == nf + assert chrc[1, exp_total_cycles] == nf From a88d25b6d1a976d8a9852aee3ffab9ede246fb9f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 11 Aug 2022 16:52:08 +0200 Subject: [PATCH 099/628] [FIFO] support characterizing components with decoupled mode weights --- .../fpgadataflow/derive_characteristic.py | 33 ++++++++++++------- tests/fpgadataflow/test_fpgadataflow_mvau.py | 2 +- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 72573f250b..8617960abf 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -77,11 +77,6 @@ def applyNodeLocal(self, node): assert ( node.op_type not in multistream_optypes ), f"{node.name} unsupported" - try: - mem_mode = inst.get_nodeattr("mem_mode") - assert mem_mode == "const", "Only mem_mode=const supported for now" - except AttributeError: - pass exp_cycles = inst.get_exp_cycles() n_inps = np.prod(inst.get_folded_input_shape()[:-1]) n_outs = np.prod(inst.get_folded_output_shape()[:-1]) @@ -106,25 +101,38 @@ def applyNodeLocal(self, node): "outputs": {"out": []}, } - txns_in = [] - txns_out = [] + txns_in = {"in0": []} + txns_out = {"out": []} + + try: + # fill out weight stream for decoupled-mode components + mem_mode = inst.get_nodeattr("mem_mode") + if mem_mode in ["decoupled", "external"]: + if op_type == "Thresholding_Batch": + n_weight_inps = inst.calc_tmem() + else: + n_weight_inps = inst.calc_wmem() + io_dict["inputs"]["weights"] = [0 for i in range(n_weight_inps)] + txns_in["weights"] = [] + except AttributeError: + pass def monitor_txns(sim_obj): for inp in io_dict["inputs"]: in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 in_valid = _read_signal(sim, inp + sname + "TVALID") == 1 if in_ready and in_valid: - txns_in.append(1) + txns_in[inp].append(1) else: - txns_in.append(0) + txns_in[inp].append(0) for outp in io_dict["outputs"]: if ( _read_signal(sim, outp + sname + "TREADY") == 1 and _read_signal(sim, outp + sname + "TVALID") == 1 ): - txns_out.append(1) + txns_out[outp].append(1) else: - txns_out.append(0) + txns_out[outp].append(0) reset_rtlsim(sim) total_cycle_count = rtlsim_multi_io( @@ -136,6 +144,9 @@ def monitor_txns(sim_obj): hook_preclk=monitor_txns, ) assert total_cycle_count <= self.period + # restrict to single input-output stream only for now + txns_in = txns_in["in0"] + txns_out = txns_out["out"] if len(txns_in) < self.period: txns_in += [0 for x in range(self.period - len(txns_in))] if len(txns_out) < self.period: diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index 22ff36f537..a2f3448592 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -421,7 +421,7 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( # mem_mode: const or decoupled -@pytest.mark.parametrize("mem_mode", ["const"]) +@pytest.mark.parametrize("mem_mode", ["decoupled", "const"]) # activation: None or DataType @pytest.mark.parametrize("act", [DataType["INT4"]]) # weight datatype From 7233107e7a40e858789b9e944948216bcc0fd83c Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 11 Aug 2022 21:56:36 +0200 Subject: [PATCH 100/628] [Test] flesh out new FIFO sizing test --- .../test_fifosizing.py} | 91 ++++++++++++------- 1 file changed, 60 insertions(+), 31 deletions(-) rename tests/{end2end/test_end2end_fifosizing.py => fpgadataflow/test_fifosizing.py} (56%) diff --git a/tests/end2end/test_end2end_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py similarity index 56% rename from tests/end2end/test_end2end_fifosizing.py rename to tests/fpgadataflow/test_fifosizing.py index 3266267230..d93c5630db 100644 --- a/tests/end2end/test_end2end_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -28,76 +28,105 @@ import pkg_resources as pk +import pytest + +import json import numpy as np +import shutil from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance from finn.transformation.fpgadataflow.derive_characteristic import DeriveCharacteristic +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.insert_dwc import InsertDWC +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.util.basic import make_build_dir def custom_step_fifosize(model, cfg): # TODO convert to NodeLocalTransformation - def accumulate_char_fxn(chrc): - p = len(chrc) - ret = [] - for t in range(2 * p): - if t == 0: - ret.append(chrc[0]) - else: - ret.append(ret[-1] + chrc[t % p]) - return ret - # TODO handle chrc for input and output nodes all_act_tensors = [x.name for x in model.graph.value_info] for tensor_nm in all_act_tensors: # generate accumulated characteristic functions - prod = getCustomOp(model.find_producer(tensor_nm)) + prod = model.find_producer(tensor_nm) + cons = model.find_consumer(tensor_nm) + if prod is None or cons is None: + continue + prod = getCustomOp(prod) + period = prod.get_nodeattr("io_characteristic_period") prod_chrc = prod.get_nodeattr("io_characteristic") - prod_chrc = np.asarray(prod_chrc, dtype=np.uint8).reshape(2, -1)[1] - prod_chrc = accumulate_char_fxn(prod_chrc) - cons = getCustomOp(model.find_consumer(tensor_nm)) + prod_chrc = np.asarray(prod_chrc).reshape(2, -1)[1] + cons = getCustomOp(cons) cons_chrc = cons.get_nodeattr("io_characteristic") - cons_chrc = np.asarray(cons_chrc, dtype=np.uint8).reshape(2, -1)[0] - cons_chrc = accumulate_char_fxn(cons_chrc) - # TODO find minimum phase shift - - for node in model.graph.node: - inst = getCustomOp(node) - chrc = inst.get_nodeattr("io_characteristic") - chrc = np.asarray(chrc, dtype=np.uint8).reshape(2, -1) - + cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] + # find minimum phase shift satisfying the constraint + pshift_min = period + for pshift_cand in range(period): + pshift_condition = [ + (prod_chrc[i + pshift_cand] >= cons_chrc[i]) + for i in range(period - pshift_cand) + ] + if all(pshift_condition): + pshift_min = pshift_cand + break + fifo_depth = max( + [(prod_chrc[i + pshift_cand] - cons_chrc[i]) for i in range(pshift_min)] + ) + prod.set_nodeattr("outFIFODepth", fifo_depth) + cons.set_nodeattr("inFIFODepth", fifo_depth) return model def custom_step_fifocharacterize(model, cfg): + model = model.transform(InsertDWC()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) + ) + model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) period = model.analysis(dataflow_performance)["max_cycles"] + 10 model = model.transform(DeriveCharacteristic(period)) return model -def test_end2end_fifosizing(): +@pytest.mark.slow +@pytest.mark.vivado +def test_fifosizing(): chkpt_name = pk.resource_filename("finn.qnn-data", "build_dataflow/model.onnx") tmp_output_dir = make_build_dir("build_fifosizing_") - # tmp_output_dir = "/tmp/finn_dev_maltanar/build_fifosizing_5mt0o6s_" steps = build_cfg.default_build_dataflow_steps - steps = steps[:10] - steps.append(custom_step_fifocharacterize) - # steps.append(custom_step_fifosize) + steps.insert(10, custom_step_fifocharacterize) + steps.insert(11, custom_step_fifosize) cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=False, target_fps=10000, synth_clk_period_ns=10.0, board="Pynq-Z1", + rtlsim_batch_size=100, shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, - generate_outputs=[], + generate_outputs=[ + build_cfg.DataflowOutputType.ESTIMATE_REPORTS, + build_cfg.DataflowOutputType.STITCHED_IP, + build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE, + ], steps=steps, - default_mem_mode=build_cfg.ComputeEngineMemMode.CONST, - start_step="custom_step_fifocharacterize", + default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, ) build.build_dataflow_cfg(chkpt_name, cfg) + with open(tmp_output_dir + "/report/estimate_network_performance.json") as f: + est_data = json.load(f) + with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: + sim_data = json.load(f) + assert ( + float(sim_data["throughput[images/s]"]) + / float(est_data["estimated_throughput_fps"]) + > 0.9 + ) + shutil.rmtree(tmp_output_dir) From 45a730ed4c6242154c2a95056dec6b1e9eb407d8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 11 Aug 2022 22:03:17 +0200 Subject: [PATCH 101/628] [FIFO] allow skipping nodes with existing characteristic --- .../transformation/fpgadataflow/derive_characteristic.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 8617960abf..5556399ee6 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -29,6 +29,7 @@ import numpy as np import qonnx.custom_op.registry as registry +import warnings from pyverilator.util.axi_utils import _read_signal, reset_rtlsim, rtlsim_multi_io from qonnx.transformation.base import NodeLocalTransformation @@ -68,6 +69,11 @@ def applyNodeLocal(self, node): assert inst.get_nodeattr("rtlsim_so") != "", ( "rtlsim not ready for " + node.name ) + if inst.get_nodeattr("io_characteristic_period") > 0: + warnings.warn( + "Skipping node %s: already has FIFO characteristic" % node.name + ) + return (node, False) # restricted to single input and output nodes for now multistream_optypes = [ "AddStreams_Batch", From d765762dbdd112b4f4031a11214b735a74b1e052 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Aug 2022 12:34:29 +0200 Subject: [PATCH 102/628] [Deps] update QONNX --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..97427ec9da 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="398a0ecfcb32407c0a3df39246cf6d2bca02886c" +QONNX_COMMIT="34ecaa73398c85201b325bcff1beeca1e45f4541" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" From 1292fd5cab01b9fcd5839ac65e63f6edf7cb169e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Aug 2022 12:35:41 +0200 Subject: [PATCH 103/628] [FIFO] Add DeriveFIFOSizes as NodeLocalTransformation --- .../fpgadataflow/derive_characteristic.py | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 5556399ee6..fcec750245 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -179,3 +179,71 @@ def accumulate_char_fxn(chrc): "Custom op_type %s is currently not supported." % op_type ) return (node, False) + + +class DeriveFIFOSizes(NodeLocalTransformation): + """Prerequisite: DeriveCharacteristic already called on graph. + For each node in the graph, use the accumulated I/O characteristic function + to perform FIFO sizing, setting the in/outFIFODepth attributes of HLSCustomOp + nodes. + + * num_workers (int or None) number of parallel workers, see documentation in + NodeLocalTransformation for more details. + """ + + def __init__(self, num_workers=None): + super().__init__(num_workers=num_workers) + + def applyNodeLocal(self, node): + op_type = node.op_type + if is_fpgadataflow_node(node) is True: + try: + # lookup op_type in registry of CustomOps + prod = registry.getCustomOp(node) + assert op_type != "StreamingFIFO", "Found existing FIFOs" + period = prod.get_nodeattr("io_characteristic_period") + prod_chrc = prod.get_nodeattr("io_characteristic") + assert ( + len(prod_chrc) == 4 * period + ), "Found unexpected characterization attribute" + if prod.get_nodeattr("outFIFODepth") > 2: + # FIFO depth already set, can skip this node + return (node, False) + prod_chrc = np.asarray(prod_chrc).reshape(2, -1)[1] + # find consumers + model = self.ref_input_model + consumers = model.find_consumers(node.output[0]) + # compute FIFO depth for each consumer + out_fifo_depth = 0 + for cons_node in consumers: + cons = registry.getCustomOp(cons_node) + cons_chrc = cons.get_nodeattr("io_characteristic") + cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] + # find minimum phase shift satisfying the constraint + pshift_min = period + for pshift_cand in range(period): + pshift_condition = [ + (prod_chrc[i + pshift_cand] >= cons_chrc[i]) + for i in range(period - pshift_cand) + ] + if all(pshift_condition): + pshift_min = pshift_cand + break + fifo_depth = max( + [ + (prod_chrc[i + pshift_cand] - cons_chrc[i]) + for i in range(pshift_min) + ] + ) + out_fifo_depth = max(out_fifo_depth, fifo_depth) + # set output FIFO depth for this (producing) node + # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) + # for each tensor + prod.set_nodeattr("outFIFODepth", out_fifo_depth) + + except KeyError: + # exception if op_type is not supported + raise Exception( + "Custom op_type %s is currently not supported." % op_type + ) + return (node, False) From 067e6744826d82d2366bb60c03a0fb781efd35b3 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Aug 2022 12:37:20 +0200 Subject: [PATCH 104/628] [Test] switch to new DeriveFIFOSizes --- tests/fpgadataflow/test_fifosizing.py | 46 ++++----------------------- 1 file changed, 6 insertions(+), 40 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index d93c5630db..34875e8975 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -31,15 +31,16 @@ import pytest import json -import numpy as np import shutil -from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance -from finn.transformation.fpgadataflow.derive_characteristic import DeriveCharacteristic +from finn.transformation.fpgadataflow.derive_characteristic import ( + DeriveCharacteristic, + DeriveFIFOSizes, +) from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.prepare_ip import PrepareIP @@ -48,41 +49,6 @@ def custom_step_fifosize(model, cfg): - # TODO convert to NodeLocalTransformation - # TODO handle chrc for input and output nodes - all_act_tensors = [x.name for x in model.graph.value_info] - for tensor_nm in all_act_tensors: - # generate accumulated characteristic functions - prod = model.find_producer(tensor_nm) - cons = model.find_consumer(tensor_nm) - if prod is None or cons is None: - continue - prod = getCustomOp(prod) - period = prod.get_nodeattr("io_characteristic_period") - prod_chrc = prod.get_nodeattr("io_characteristic") - prod_chrc = np.asarray(prod_chrc).reshape(2, -1)[1] - cons = getCustomOp(cons) - cons_chrc = cons.get_nodeattr("io_characteristic") - cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] - # find minimum phase shift satisfying the constraint - pshift_min = period - for pshift_cand in range(period): - pshift_condition = [ - (prod_chrc[i + pshift_cand] >= cons_chrc[i]) - for i in range(period - pshift_cand) - ] - if all(pshift_condition): - pshift_min = pshift_cand - break - fifo_depth = max( - [(prod_chrc[i + pshift_cand] - cons_chrc[i]) for i in range(pshift_min)] - ) - prod.set_nodeattr("outFIFODepth", fifo_depth) - cons.set_nodeattr("inFIFODepth", fifo_depth) - return model - - -def custom_step_fifocharacterize(model, cfg): model = model.transform(InsertDWC()) model = model.transform(GiveUniqueNodeNames()) model = model.transform( @@ -92,6 +58,7 @@ def custom_step_fifocharacterize(model, cfg): model = model.transform(PrepareRTLSim()) period = model.analysis(dataflow_performance)["max_cycles"] + 10 model = model.transform(DeriveCharacteristic(period)) + model = model.transform(DeriveFIFOSizes()) return model @@ -101,8 +68,7 @@ def test_fifosizing(): chkpt_name = pk.resource_filename("finn.qnn-data", "build_dataflow/model.onnx") tmp_output_dir = make_build_dir("build_fifosizing_") steps = build_cfg.default_build_dataflow_steps - steps.insert(10, custom_step_fifocharacterize) - steps.insert(11, custom_step_fifosize) + steps.insert(10, custom_step_fifosize) cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=False, From 0242e6fe76490ce398f31d411a9386ae65294daf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Aug 2022 17:25:05 +0200 Subject: [PATCH 105/628] [FIFO] handle weight reps correctly for decoupled mode --- .../transformation/fpgadataflow/derive_characteristic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index fcec750245..82512b3f1f 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -118,7 +118,10 @@ def applyNodeLocal(self, node): n_weight_inps = inst.calc_tmem() else: n_weight_inps = inst.calc_wmem() - io_dict["inputs"]["weights"] = [0 for i in range(n_weight_inps)] + num_w_reps = np.prod(inst.get_nodeattr("numInputVectors")) + io_dict["inputs"]["weights"] = [ + 0 for i in range(num_w_reps * n_weight_inps) + ] txns_in["weights"] = [] except AttributeError: pass From bea06b8aaae00474b7c3af5f875795f90ba8bed3 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 12 Aug 2022 17:49:36 +0200 Subject: [PATCH 106/628] [Test] generalize fifosizing test to enable more nets --- tests/fpgadataflow/test_fifosizing.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 34875e8975..7287861d58 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -26,12 +26,11 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import json import shutil +from brevitas.export.onnx.generic.manager import BrevitasONNXManager from qonnx.transformation.general import GiveUniqueNodeNames import finn.builder.build_dataflow as build @@ -46,6 +45,7 @@ from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.util.basic import make_build_dir +from finn.util.test import get_trained_network_and_ishape def custom_step_fifosize(model, cfg): @@ -62,11 +62,18 @@ def custom_step_fifosize(model, cfg): return model +def fetch_test_model(topology, wbits=2, abits=2): + tmp_output_dir = make_build_dir("build_fifosizing_%s_" % topology) + (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) + chkpt_name = tmp_output_dir + "/model.onnx" + BrevitasONNXManager.export(model, ishape, chkpt_name) + return tmp_output_dir + + @pytest.mark.slow @pytest.mark.vivado def test_fifosizing(): - chkpt_name = pk.resource_filename("finn.qnn-data", "build_dataflow/model.onnx") - tmp_output_dir = make_build_dir("build_fifosizing_") + tmp_output_dir = fetch_test_model("tfc") steps = build_cfg.default_build_dataflow_steps steps.insert(10, custom_step_fifosize) cfg = build_cfg.DataflowBuildConfig( @@ -85,7 +92,7 @@ def test_fifosizing(): steps=steps, default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, ) - build.build_dataflow_cfg(chkpt_name, cfg) + build.build_dataflow_cfg(tmp_output_dir + "/model.onnx", cfg) with open(tmp_output_dir + "/report/estimate_network_performance.json") as f: est_data = json.load(f) with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: From 2139cb952ea23e8e7ba289dc1e4ffe52ad713e2b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 15 Aug 2022 13:36:53 +0200 Subject: [PATCH 107/628] [Eltwise] use struct member fxn to workaround HLS pipeline style bug --- src/finn/custom_op/fpgadataflow/eltwise.py | 41 ++++++++++++++++++++-- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index 2395d451d1..d8c55b2283 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -61,10 +61,19 @@ def get_nodeattr_types(self): def get_eltwise_op_lambda(self): eltwise_op = self.get_nodeattr("eltwiseOp") + idt0 = self.get_input_datatype(0) + idt1 = self.get_input_datatype(1) + odt = self.get_output_datatype() + tin0 = idt0.get_hls_datatype_str() + tin1 = idt1.get_hls_datatype_str() + tout = odt.get_hls_datatype_str() eltwise_ops = { - "Add": "[](auto a, auto b) { return a + b; }", - "Sub": "[](auto a, auto b) { return a - b; }", - "AbsDiff": "[](auto a, auto b) { return a>b? a-b : b-a; }", + # "Add": "[](auto a, auto b) { return a + b; }", + # "Sub": "[](auto a, auto b) { return a - b; }", + # "AbsDiff": "[](auto a, auto b) { return a>b? a-b : b-a; }", + "Add": f"add<{tin0}, {tin1}, {tout}>()", + "Sub": f"sub<{tin0}, {tin1}, {tout}>()", + "AbsDiff": f"absdiff<{tin0}, {tin1}, {tout}>()", } return eltwise_ops[eltwise_op] @@ -296,6 +305,32 @@ def global_includes(self): '#include "interpret.hpp"', ] + self.code_gen_dict["$GLOBALS$"].extend( + [ + "template", + "struct absdiff {", + "TO operator()(TI1 const &a, TI2 const &b) const {", + "#pragma HLS inline", + "return a>b? a-b : b-a;", + "}", + "};", + "template", + "struct sub {", + "TO operator()(TI1 const &a, TI2 const &b) const {", + "#pragma HLS inline", + "return a-b;", + "}", + "};", + "template", + "struct add {", + "TO operator()(TI1 const &a, TI2 const &b) const {", + "#pragma HLS inline", + "return a+b;", + "}", + "};", + ] + ) + def defines(self, var): self.code_gen_dict["$DEFINES$"] = [] From aa228f23b7e603de6384ddfcf338a1cd7109c562 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 15 Aug 2022 13:38:15 +0200 Subject: [PATCH 108/628] [Deps] manually clone and install particular verilator version --- docker/Dockerfile.finn | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index a3f40d52ef..9c18c03d7b 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -46,7 +46,6 @@ RUN apt-get update && \ libsm6 \ libxext6 \ libxrender-dev \ - verilator \ nano \ zsh \ rsync \ @@ -62,6 +61,16 @@ RUN apt-get update && \ RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config RUN locale-gen "en_US.UTF-8" +# install Verilator from source to get the right version +RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev +RUN git clone https://github.com/verilator/verilator +RUN cd verilator && \ + git checkout v4.012 && \ + autoconf && \ + ./configure && \ + make -j4 && \ + make install + # install XRT RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb From 5dcc89cb11260e97f70455152fbffa13ee93717b Mon Sep 17 00:00:00 2001 From: Le Blevec Date: Thu, 18 Aug 2022 16:25:28 +0100 Subject: [PATCH 109/628] [Streamline] Updating the transformation to check if the input is in the expected format. --- src/finn/transformation/streamline/reorder.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index e36de2aa54..2fd8f36d63 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -735,6 +735,12 @@ def apply(self, model): for n in graph.node: node_ind += 1 if n.op_type == "Upsample" or n.op_type == "Resize": + if model.get_tensor_layout(n.input[0]) != DataLayout.NCHW: + warnings.warn( + "%s: Input not NCHW. Can't operate transformation on node." + % n.name + ) + continue consumer = model.find_consumer(n.output[0]) producer = model.find_producer(n.input[0]) if n.op_type == "Upsample": From e73a07190cdbe690a46136c603421b1d3dba8efb Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 19 Aug 2022 14:31:51 +0100 Subject: [PATCH 110/628] [IP stitch] make new lookup layer output external --- src/finn/custom_op/fpgadataflow/lookup.py | 13 +++++++++---- .../fpgadataflow/create_stitched_ip.py | 17 +++++++++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index 6425edd56e..31344c9f1b 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -279,7 +279,8 @@ def blackboxfunction(self): "void " + self.onnx_node.name + "(hls::stream &in0, hls::stream &out, " - + "T_DST const *const mem, unsigned const size, unsigned &oob_count, bool &oob_irq)" + + "T_DST const *const mem, unsigned const size, " + + "unsigned &oob_count, bool &oob_irq)" ] def pragmas(self): @@ -298,8 +299,12 @@ def pragmas(self): elif mem_mode == "external": my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem") my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control") - my_pragmas.append("#pragma HLS INTERFACE s_axilite port=size bundle=control") - my_pragmas.append("#pragma HLS INTERFACE s_axilite port=oob_count bundle=control") + my_pragmas.append( + "#pragma HLS INTERFACE s_axilite port=size bundle=control" + ) + my_pragmas.append( + "#pragma HLS INTERFACE s_axilite port=oob_count bundle=control" + ) my_pragmas.append("#pragma HLS INTERFACE ap_none port=oob_irq") else: raise Exception("Unrecognized mem_mode: " + mem_mode) @@ -471,5 +476,5 @@ def get_verilog_top_module_intf_names(self): if mem_mode == "external": intf_names["axilite"] = ["s_axi_control"] intf_names["aximm"] = [("m_axi_gmem", self.get_nodeattr("ext_mem_width"))] - intf_names["oob_irq"] = ["ap_none"] + intf_names["ap_none"] = ["oob_irq"] return intf_names diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 892ab09fdf..5b0b0cb600 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -228,6 +228,22 @@ def connect_s_axis_external(self, node, idx=None): ) self.s_axis_idx += 1 + def connect_ap_none_external(self, node): + inst_name = node.name + node_inst = getCustomOp(node) + input_intf_names = node_inst.get_verilog_top_module_intf_names()["ap_none"] + # make external + for i in range(len(input_intf_names)): + input_intf_name = input_intf_names[i] + self.connect_cmds.append( + "make_bd_pins_external [get_bd_pins %s/%s]" + % (inst_name, input_intf_name) + ) + self.connect_cmds.append( + "set_property name %s [get_bd_ports %s_0]" + % (input_intf_name, input_intf_name) + ) + def insert_signature(self, checksum_count): signature_vlnv = "AMD:user:axi_info_top:1.0" signature_name = "axi_info_top0" @@ -305,6 +321,7 @@ def apply(self, model): ip_dirs += [ip_dir_value] self.create_cmds += node_inst.code_generation_ipi() self.connect_clk_rst(node) + self.connect_ap_none_external(node) self.connect_axi(node) for i in range(len(node.input)): if not is_external_input(model, node, i): From 2afe6ecebd475f25cfa68eb846392804e9a5044a Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 22 Aug 2022 14:11:31 +0100 Subject: [PATCH 111/628] [HLSCustomOp] Adding ap_none interface string --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index b202e95a28..c5041acd46 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -138,6 +138,7 @@ def get_verilog_top_module_intf_names(self): intf_names["m_axis"] = [("out_" + sname, self.get_outstream_width_padded())] intf_names["aximm"] = [] intf_names["axilite"] = [] + intf_names["ap_none"] = [] return intf_names def get_verilog_top_filename(self): From fe457fab422086b1af2db0c5346b4b5305a76719 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 12:37:00 +0200 Subject: [PATCH 112/628] [FIFO] Speed up DeriveFIFOSizes considerably with numpy --- .../fpgadataflow/derive_characteristic.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 82512b3f1f..a9b291ba5b 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -223,21 +223,16 @@ def applyNodeLocal(self, node): cons_chrc = cons.get_nodeattr("io_characteristic") cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] # find minimum phase shift satisfying the constraint - pshift_min = period + pshift_min = period - 1 for pshift_cand in range(period): - pshift_condition = [ - (prod_chrc[i + pshift_cand] >= cons_chrc[i]) - for i in range(period - pshift_cand) - ] - if all(pshift_condition): + prod_chrc_part = prod_chrc[pshift_cand:period] + cons_chrc_part = cons_chrc[: period - pshift_cand] + if (prod_chrc_part >= cons_chrc_part).all(): pshift_min = pshift_cand break - fifo_depth = max( - [ - (prod_chrc[i + pshift_cand] - cons_chrc[i]) - for i in range(pshift_min) - ] - ) + prod_chrc_part = prod_chrc[pshift_min : (pshift_min + period)] + cons_chrc_part = cons_chrc[:period] + fifo_depth = (prod_chrc_part - cons_chrc_part).max() out_fifo_depth = max(out_fifo_depth, fifo_depth) # set output FIFO depth for this (producing) node # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) From d04e15abd374eea65778502a428db8bf108b48cb Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 13:22:58 +0200 Subject: [PATCH 113/628] [FIFO] round up depth to power-of-2 for impl_style=vivado --- .../custom_op/fpgadataflow/streamingfifo.py | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index a7c3cd0be5..06b557982d 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -73,6 +73,22 @@ def get_nodeattr_types(self): return my_attrs + def get_adjusted_depth(self): + impl = self.get_nodeattr("impl_style") + depth = self.get_nodeattr("depth") + if impl == "vivado": + old_depth = depth + # round up depth to nearest power-of-2 + # Vivado FIFO impl may fail otherwise + depth = (1 << (depth - 1).bit_length()) if impl == "vivado" else depth + if old_depth != depth: + warnings.warn( + "%s: rounding-up FIFO depth from %d to %d for impl_style=vivado" + % (self.onnx_node.name, old_depth, depth) + ) + + return depth + def make_shape_compatible_op(self, model): exp_ishape = self.get_normal_input_shape() oshape = self.get_normal_output_shape() @@ -181,7 +197,7 @@ def ipgen_singlenode_code(self): self.code_gen_dict.clear() def get_normal_input_shape(self): - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() # depth has to be between 2 and 256 with the current # StreamingFIFO implementation assert depth >= 2, """Depth is too low""" @@ -328,7 +344,7 @@ def code_generation_ipi(self): elif impl_style == "vivado": cmd = [] node_name = self.onnx_node.name - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() ram_style = self.get_nodeattr("ram_style") # create a hierarchy for this layer, with the same port names clk_name = self.get_verilog_top_module_intf_names()["clk"][0] @@ -393,7 +409,7 @@ def bram_estimation(self): """Calculates resource estimation for BRAM""" impl = self.get_nodeattr("impl_style") ram_type = self.get_nodeattr("ram_style") - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() W = self.get_instream_width() if impl == "rtl" or (impl == "vivado" and ram_type != "block"): @@ -418,7 +434,7 @@ def uram_estimation(self): impl = self.get_nodeattr("impl_style") ram_type = self.get_nodeattr("ram_style") - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() W = self.get_instream_width() if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"): @@ -428,7 +444,7 @@ def uram_estimation(self): return (math.ceil(depth / 4096)) * (math.ceil(W / 72)) def bram_efficiency_estimation(self): - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() W = self.get_instream_width() bram16_est = self.bram_estimation() if bram16_est == 0: @@ -441,7 +457,7 @@ def lut_estimation(self): """Calculates resource estimations for LUTs""" impl = self.get_nodeattr("impl_style") ram_type = self.get_nodeattr("ram_style") - depth = self.get_nodeattr("depth") + depth = self.get_adjusted_depth() W = self.get_instream_width() address_luts = 2 * math.ceil(math.log(depth, 2)) From d299636d56b3a73e92eb135e926bf84b64ef843b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 13:23:52 +0200 Subject: [PATCH 114/628] [InsertFIFO] add optional args for max QSRL depth and ram_style --- .../fpgadataflow/insert_fifo.py | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 78200b2809..c5b7005145 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -70,16 +70,26 @@ class InsertFIFO(Transformation): node attribute 'outFIFODepth' of the previous and node attribute 'inFIFODepth' of the subsequent node. max() of these two values sets the FIFO depth. - Normally, shallow-depth (<=2) FIFOs won't be created since HLS streaming - interfaces already have a degree of buffering. You can set - create_shallow_fifos=True to override this default behavior. + Constructor arguments: + - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of + Verilog FIFOs (Q_srl.v) + - vivado_ram_style : the StreamingFIFO.ram_style attribute to be used for + large FIFOs implemented by Vivado + - create_shallow_fifos : Normally, shallow-depth (<=2) FIFOs won't be created since + HLS streaming interfaces already have a degree of buffering. + Override with this parameter. + The other node attributes necessary to create a FIFO node are taken from the node the FIFO node is inserted after: 'folded_shape' and 'dtype'""" - def __init__(self, create_shallow_fifos=False): + def __init__( + self, create_shallow_fifos=False, max_qsrl_depth=256, vivado_ram_style="auto" + ): super().__init__() self.create_shallow_fifos = create_shallow_fifos + self.max_qsrl_depth = max_qsrl_depth + self.vivado_ram_style = vivado_ram_style def apply(self, model): graph = model.graph @@ -142,7 +152,9 @@ def apply(self, model): ) graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) - + impl_style = ( + "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" + ) fifo_node = oh.make_node( "StreamingFIFO", [n_output], @@ -152,6 +164,8 @@ def apply(self, model): depth=fifo_depth, folded_shape=fld_shape, dataType=str(dtype.name), + impl_style=impl_style, + ram_style=self.vivado_ram_style, ) # insert fifo graph.node.insert(node_ind + 1, fifo_node) @@ -197,6 +211,7 @@ def apply(self, model): ) graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) + impl_style = "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" fifo_node = oh.make_node( "StreamingFIFO", @@ -207,6 +222,8 @@ def apply(self, model): depth=fifo_depth, folded_shape=fld_shape, dataType=str(dtype.name), + impl_style=impl_style, + ram_style=self.vivado_ram_style, ) # insert fifo graph.node.insert(0, fifo_node) @@ -244,7 +261,7 @@ def apply(self, model): ) graph.value_info.append(fifo_input_tensor) model.set_tensor_datatype(fifo_input_tensor.name, dtype) - + impl_style = "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" fifo_node = oh.make_node( "StreamingFIFO", [fifo_input_tensor.name], @@ -254,6 +271,8 @@ def apply(self, model): depth=fifo_depth, folded_shape=fld_shape, dataType=str(dtype.name), + impl_style=impl_style, + ram_style=self.vivado_ram_style, ) # insert fifo graph.node.append(fifo_node) From f6ee5365078be63aaf6d29aee996ec1bf4dff4e6 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 13:24:20 +0200 Subject: [PATCH 115/628] [Build] add new option and logic for chrc-based auto FIFO sizing --- src/finn/builder/build_dataflow_config.py | 13 ++++++++ src/finn/builder/build_dataflow_steps.py | 40 ++++++++++++++++++----- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 09e9ec3a56..b03a8cb5dc 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -37,6 +37,13 @@ from finn.util.basic import alveo_default_platform, alveo_part_map, pynq_part_map +class AutoFIFOSizingMethod(str, Enum): + "Select the type of automatic FIFO sizing strategy." + + CHARACTERIZE = "characterize" + LARGEFIFO_RTLSIM = "largefifo_rtlsim" + + class ShellFlowType(str, Enum): """For builds that produce a bitfile, select the shell flow that will integrate the FINN-generated accelerator.""" @@ -246,6 +253,12 @@ class DataflowBuildConfig: #: for each FIFO. auto_fifo_depths: Optional[bool] = True + #: When `auto_fifo_depths = True`, select which method will be used for + #: setting the FIFO sizes. + auto_fifo_strategy: Optional[ + AutoFIFOSizingMethod + ] = AutoFIFOSizingMethod.CHARACTERIZE + #: Memory resource type for large FIFOs #: Only relevant when `auto_fifo_depths = True` large_fifo_mem_style: Optional[LargeFIFOMemStyle] = LargeFIFOMemStyle.AUTO diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 59f77650da..a87bdd5807 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -64,6 +64,7 @@ res_estimation_complete, ) from finn.builder.build_dataflow_config import ( + AutoFIFOSizingStrategy, DataflowBuildConfig, DataflowOutputType, ShellFlowType, @@ -78,6 +79,10 @@ CreateDataflowPartition, ) from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP +from finn.transformation.fpgadataflow.derive_characteristic import ( + DeriveCharacteristic, + DeriveFIFOSizes, +) from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO @@ -85,6 +90,7 @@ from finn.transformation.fpgadataflow.make_zynq_proj import ZynqBuild from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.replace_verilog_relpaths import ( ReplaceVerilogRelPaths, ) @@ -446,9 +452,9 @@ def step_hls_ipgen(model: ModelWrapper, cfg: DataflowBuildConfig): def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): """ Depending on the auto_fifo_depths setting, do one of the following: - * if auto_fifo_depths=True: Run the `InsertAndSetFIFODepths` transformation - to attempt to determine the FIFO sizes that provide full throughput. Involves - running stitched-IP rtlsim and may take a long time. + * if auto_fifo_depths=True: Run the appropriate auto-sizing transformation + to attempt to determine the FIFO sizes that provide full throughput. + May take a long time. * if auto_fifo_depths=False: Assume the folding config file contains FIFO sizes as well. Runs the `InsertFIFO` transformation, then `ApplyConfig(cfg.folding_config_file)`, and finally `RemoveShallowFIFOs`. @@ -457,13 +463,29 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): """ if cfg.auto_fifo_depths: - model = model.transform( - InsertAndSetFIFODepths( - cfg._resolve_fpga_part(), - cfg._resolve_hls_clk_period(), - vivado_ram_style=cfg.large_fifo_mem_style, + if cfg.auto_fifo_strategy == AutoFIFOSizingStrategy.CHARACTERIZE: + model = model.transform(InsertDWC()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) + ) + model = model.transform(HLSSynthIP()) + model = model.transform(PrepareRTLSim()) + model = model.transform(AnnotateCycles()) + period = model.analysis(dataflow_performance)["max_cycles"] + 10 + model = model.transform(DeriveCharacteristic(period)) + model = model.transform(DeriveFIFOSizes()) + model = model.transform( + InsertFIFO(vivado_ram_style=cfg.large_fifo_mem_style) + ) + elif cfg.auto_fifo_strategy == AutoFIFOSizingStrategy.LARGEFIFO_RTLSIM: + model = model.transform( + InsertAndSetFIFODepths( + cfg._resolve_fpga_part(), + cfg._resolve_hls_clk_period(), + vivado_ram_style=cfg.large_fifo_mem_style, + ) ) - ) else: # assume folding cfg json contains FIFO sizes too # insert DWCs, FIFOs and run ApplyConfig once more From c1ce443ebff397b57794bcf015f6963a16d31d64 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 18:07:28 +0200 Subject: [PATCH 116/628] [Stitch] print failing dir if IP stitch fails --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 892ab09fdf..1287f677ee 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -565,6 +565,10 @@ def apply(self, model): if os.path.isfile(wrapper_filename_alt): model.set_metadata_prop("wrapper_filename", wrapper_filename_alt) else: - raise Exception("CreateStitchedIP failed, no wrapper HDL found.") + raise Exception( + """CreateStitchedIP failed, no wrapper HDL found under %s or %s. + Please check logs under the parent directory.""" + % (wrapper_filename, wrapper_filename_alt) + ) return (model, False) From d2b0d7be0a0cc2d78391032ed478363b1ad0d121 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 18:07:59 +0200 Subject: [PATCH 117/628] [Build] small fixes to new FIFO sizing integration --- src/finn/builder/build_dataflow_steps.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index a87bdd5807..141f7caafe 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -64,7 +64,6 @@ res_estimation_complete, ) from finn.builder.build_dataflow_config import ( - AutoFIFOSizingStrategy, DataflowBuildConfig, DataflowOutputType, ShellFlowType, @@ -463,7 +462,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): """ if cfg.auto_fifo_depths: - if cfg.auto_fifo_strategy == AutoFIFOSizingStrategy.CHARACTERIZE: + if cfg.auto_fifo_strategy == "characterize": model = model.transform(InsertDWC()) model = model.transform(GiveUniqueNodeNames()) model = model.transform( @@ -478,7 +477,9 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform( InsertFIFO(vivado_ram_style=cfg.large_fifo_mem_style) ) - elif cfg.auto_fifo_strategy == AutoFIFOSizingStrategy.LARGEFIFO_RTLSIM: + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + elif cfg.auto_fifo_strategy == "largefifo_rtlsim": model = model.transform( InsertAndSetFIFODepths( cfg._resolve_fpga_part(), @@ -486,6 +487,8 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): vivado_ram_style=cfg.large_fifo_mem_style, ) ) + else: + assert "Unsupported auto_fifo_strategy: " + cfg.auto_fifo_strategy else: # assume folding cfg json contains FIFO sizes too # insert DWCs, FIFOs and run ApplyConfig once more From f46e2d0a79a6a19cd09e4ee3d0503d81a42cc87e Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 25 Aug 2022 00:16:02 +0200 Subject: [PATCH 118/628] Restructure, basic resource estimation --- finn-rtllib/swg/swg_template_parallel.sv | 74 +- .../convolutioninputgenerator_rtl.py | 1474 +++++++++-------- ...est_fpgadataflow_convinputgenerator_rtl.py | 81 +- 3 files changed, 915 insertions(+), 714 deletions(-) diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv index 7c1e042227..19638d8a1d 100755 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -3,13 +3,15 @@ module $TOP_MODULE_NAME$_controller ( CLK, - cycle, + RST, + advance, cmd_read, cmd_write ); input CLK; -input [31:0] cycle; //todo: minimize width or switch to single bit flag +input RST; +input advance; output cmd_read; output cmd_write; @@ -39,10 +41,6 @@ integer counter_loop_inter; assign cmd_read = READ_CMD_MAP[state_next]; //read command indicates read in *upcoming* cycle, due to how schedule is constructed assign cmd_write = WRITE_CMD_MAP[state]; -reg cycle_last; -wire cycle_advance; -assign cycle_advance = !(cycle == cycle_last); - //combinational next state logic always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin state_next = state; //default @@ -67,7 +65,7 @@ always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin if (LOOP_END_1_COUNTER != 0) state_next = STATE_END_1; else - state_next = STATE_START; + state_next = STATE_LOOP_MAIN_2; //wait in current state until reset end end end @@ -91,49 +89,46 @@ always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin if (LOOP_END_2_COUNTER != 0) state_next = STATE_END_2; else - state_next = STATE_START; + state_next = STATE_END_1; //wait in current state until reset end end STATE_END_2: if (counter_current == LOOP_END_2_COUNTER-1) - state_next = STATE_START; + state_next = STATE_END_2; //wait in current state until reset endcase end //sequential logic always @ (posedge CLK) begin - if (cycle == 0) begin - counter_current <= 0; + if (RST) begin + counter_current <= -1; counter_loop_main <= 0; counter_loop_inter <= 0; - cycle_last <= 0; state <= STATE_START; end else begin - cycle_last <= cycle; - state <= state_next; - - if (cycle_advance) begin + if (advance) begin counter_current <= counter_current+1; - end + state <= state_next; - if (state != state_next) begin - counter_current <= 0; + if (state != state_next) begin + counter_current <= 0; - //count up main loop upon re-entering this loop (not on first enter from start) - if ((state_next == STATE_LOOP_MAIN_1) && (state != STATE_START)) begin - if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin - counter_loop_main <= 0; - end else begin - counter_loop_main <= counter_loop_main+1; + //count up main loop upon re-entering this loop (not on first enter from start) + if ((state_next == STATE_LOOP_MAIN_1) && (state != STATE_START)) begin + if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin + counter_loop_main <= 0; + end else begin + counter_loop_main <= counter_loop_main+1; + end end - end - if (state_next == STATE_LOOP_INTER_1) begin - if (counter_loop_inter == LOOP_INTER_COUNTER) begin //no -1 because this counter marks the currently active iteration, not finished iterations - counter_loop_inter <= 0; - end else begin - counter_loop_inter <= counter_loop_inter+1; + if (state_next == STATE_LOOP_INTER_1) begin + if (counter_loop_inter == LOOP_INTER_COUNTER) begin //no -1 because this counter marks the currently active iteration, not finished iterations + counter_loop_inter <= 0; + end else begin + counter_loop_inter <= counter_loop_inter+1; + end end end end @@ -169,8 +164,8 @@ output [WIDTH*DEPTH-1:0] data_out; // File: shift_registers_1.v // //module shift_registers_1 (clk, clken, SI, SO); -//parameter WIDTH = 32; -//input clk, clken, SI; +//parameter WIDTH = 32; +//input clk, clken, SI; //output SO; //reg [WIDTH-1:0] shreg; // @@ -181,7 +176,7 @@ output [WIDTH*DEPTH-1:0] data_out; // begin // for (i = 0; i < WIDTH-1; i = i+1) // shreg[i+1] <= shreg[i]; -// shreg[0] <= SI; +// shreg[0] <= SI; // end //end //assign SO = shreg[WIDTH-1]; @@ -227,7 +222,7 @@ integer addr_w, addr_r; //todo: minimize width (as reg), make r addr depend on w $RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; -always @(posedge CLK) begin +always @(posedge CLK) begin if (RST == 1'b0) begin addr_w <= 0; addr_r <= 1; @@ -349,11 +344,15 @@ wire read_cmd; wire write_cmd; reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle +wire controller_reset; +wire controller_advance; + $TOP_MODULE_NAME$_controller controller_inst ( .CLK(ap_clk), - .cycle(cycle), + .RST(controller_reset), + .advance(controller_advance), .cmd_read(read_cmd), .cmd_write(write_cmd) ); @@ -379,6 +378,9 @@ assign advance = read_ok || (!read_cmd && write_ok) || (!read_c //todo: if mmv_out < k: might not shift and/or write for multiple read_cmd cycles assign window_buffer_shift_enable = advance; +assign controller_reset = !ap_rst_n || ((cycle == CYCLES_TOTAL-1) && advance); +assign controller_advance = advance; + //assign I/O ports assign window_buffer_in = in0_V_V_TDATA; assign out_V_V_TDATA = window_buffer_out; diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 9369542582..f1e0f53a7a 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -27,21 +27,17 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math -from math import copysign import numpy as np import os - +from math import copysign from qonnx.core.datatype import DataType from qonnx.custom_op.general import im2col from qonnx.custom_op.general.im2col import compute_conv_output_dim + from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp +from finn.util.basic import get_rtlsim_trace_depth, make_build_dir from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy -from finn.util.basic import ( - get_rtlsim_trace_depth, - make_build_dir, -) - try: from pyverilator import PyVerilator except ModuleNotFoundError: @@ -57,9 +53,124 @@ # * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) # * depthwise SWG: (1, OFMDim_H, OFMDim_W, IFMChannels/SIMD, K_H, K_W, SIMD) + +# helper functions for parallel mode buffer scheduling (to be superseded by improved implementation): + + +def schedule_append(schedule, op): + if len(schedule) > 0 and schedule[-1][1] == op: + count, op_ = schedule[-1] + schedule[-1] = (count + 1, op_) + else: + schedule.append((1, op)) + return schedule + + +def schedule_map_cmds(seq): + mapping = { + "w": ("1'b1", "1'b0"), + "r": ("1'b0", "1'b1"), + "wr": ("1'b1", "1'b1"), + "n": ("1'b0", "1'b0"), + } + if seq: + if len(seq) == 2: + return (seq[0], mapping[seq[1]], 0, mapping["n"]) + if len(seq) == 4: + return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) + else: + return (0, mapping["n"], 0, mapping["n"]) + + +def schedule_map_controller(schedule): + # Experimental implementation to map fixed controller loop structure to R/W schedule by analyzing + # the access pattern given by Im2Col, rather than direct computation. + # TODO: Probably replace this with a directly-computed schedule, similar to the default implementation style. + + # leave first sequence (pre-load) as is + start_sequence = schedule[0] + loop_sequence_1_counter = 1 + loop_sequence_1 = schedule[1] + loop_counter = 0 + loop_sequence_2 = None + end_sequence = None + + i = 2 + if i < len(schedule): + loop_sequence_1 += schedule[i] + i += 1 + while i + 1 < len(schedule): + candidate = schedule[i] + schedule[i + 1] + if candidate == loop_sequence_1: + loop_sequence_1_counter += 1 + i += 2 + else: + break + + if i < len(schedule): + loop_sequence_2 = schedule[i] + i += 1 + if i + 1 < len(schedule): + candidate = schedule[i] + schedule[i + 1] + if candidate != loop_sequence_1: + loop_sequence_2 += schedule[i] + i -= 1 + loop_sequence_total_len = ( + int(len(loop_sequence_2) / 2) + ) + loop_sequence_1_counter * (int(len(loop_sequence_1) / 2)) + loop_sequence_total = ( + loop_sequence_2 + loop_sequence_1_counter * loop_sequence_1 + ) + while i + loop_sequence_total_len < len(schedule): + candidate = schedule[i] + for x in range(i + 1, i + loop_sequence_total_len): + candidate += schedule[x] + + if candidate == loop_sequence_total: + loop_counter += 1 + i += loop_sequence_total_len + else: + break + else: + if i < len(schedule): + end_sequence = loop_sequence_2 + schedule[i] + i += 1 + loop_sequence_2 = None + else: + end_sequence = loop_sequence_2 + loop_sequence_2 = None + + if i < len(schedule): + end_sequence = schedule[i] + i += 1 + if i < len(schedule): + end_sequence = end_sequence + schedule[i] + i += 1 + + assert len(start_sequence) == 1 * 2, "ERROR: invalid start sequence" + assert len(loop_sequence_1) == 2 * 2, "ERROR: invalid loop 1 sequence" + if loop_sequence_2: + assert len(loop_sequence_2) <= 2 * 2, "ERROR: invalid loop 2 sequence" + if end_sequence: + assert len(end_sequence) <= 2 * 2, "ERROR: invalid end sequence" + assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" % ( + i, + len(schedule), + ) + + return ( + start_sequence, + loop_counter, + loop_sequence_1_counter, + loop_sequence_1, + loop_sequence_2, + end_sequence, + ) + + class ConvolutionInputGenerator_rtl(HLSCustomOp): """Class that does not correspond to one of the finn-hlslib ConvolutionInputGenerator - (sliding window) function variants! ... """ + (sliding window) function variants! ...""" def __init__(self, onnx_node): super().__init__(onnx_node) @@ -108,12 +219,12 @@ def get_folded_input_shape(self): M = self.get_nodeattr("M") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" wf = int(ifm_ch / simd) - #folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) - #round up to support ifm_dim % M != 0 + # folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) + # round up to support ifm_dim % M != 0 if ifm_dim_w == 1: - folded_ishape = (1, math.ceil(ifm_dim_h/M), ifm_dim_w, wf, int(simd*M)) + folded_ishape = (1, math.ceil(ifm_dim_h / M), ifm_dim_w, wf, int(simd * M)) else: - folded_ishape = (1, ifm_dim_h, math.ceil(ifm_dim_w/M), wf, int(simd*M)) + folded_ishape = (1, ifm_dim_h, math.ceil(ifm_dim_w / M), wf, int(simd * M)) return folded_ishape def get_normal_output_shape(self): @@ -140,13 +251,25 @@ def get_folded_output_shape(self): ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" - if (self.get_nodeattr("parallel_window")): + if self.get_nodeattr("parallel_window"): wf = int((ifm_ch) // simd) - #folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) + # folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) if ofm_dim_w == 1: - folded_oshape = (1, int(ofm_dim_h/M), ofm_dim_w, wf, k_h * k_w * int(simd*M)) + folded_oshape = ( + 1, + int(ofm_dim_h / M), + ofm_dim_w, + wf, + k_h * k_w * int(simd * M), + ) else: - folded_oshape = (1, ofm_dim_h, int(ofm_dim_w/M), wf, k_h * k_w * int(simd*M)) + folded_oshape = ( + 1, + ofm_dim_h, + int(ofm_dim_w / M), + wf, + k_h * k_w * int(simd * M), + ) else: wf = int((k_h * k_w * ifm_ch) // simd) folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) @@ -186,7 +309,7 @@ def get_instream_width(self): return in_width def get_outstream_width(self): - if (self.get_nodeattr("parallel_window")): + if self.get_nodeattr("parallel_window"): # feed all window pixels in parallel k_h, k_w = self.get_nodeattr("ConvKernelDim") return self.get_instream_width() * k_h * k_w @@ -205,25 +328,31 @@ def get_number_output_values(self): return num_output_elems def get_exp_cycles(self): - # TODO: update simd = self.get_nodeattr("SIMD") + m = self.get_nodeattr("M") ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") ofm_dim = self.get_nodeattr("OFMDim") stride = self.get_nodeattr("Stride") dilation = self.get_nodeattr("Dilation") + depthwise = self.get_nodeattr("depthwise") ifm_dim_h, ifm_dim_w = ifm_dim ofm_dim_h, ofm_dim_w = ofm_dim k_h, k_w = k stride_h, stride_w = stride dilation_h, dilation_w = dilation - mmv = 1 + k_h, k_w = k + stride_h, stride_w = stride + dilation_h, dilation_w = dilation - if (self.get_nodeattr("parallel_window")): - exp_cycles = ifm_dim_w + 1 + impl_style = self.select_impl_style() + if impl_style == "parallel": + exp_cycles = self.get_number_input_values() + 2 else: - cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv + # based on 2D HLS SWG estimate + # FIXME: increase accuracy for newly supported parameter scenarios + cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / 1 cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) max_cycles = max(cycles_write_block, cycles_read_block) exp_cycles = ( @@ -233,15 +362,21 @@ def get_exp_cycles(self): return int(exp_cycles) def bram_estimation(self): - # TODO: update simd = self.get_nodeattr("SIMD") - ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = np.prod(self.get_nodeattr("IFMDim")) - k = np.prod(self.get_nodeattr("ConvKernelDim")) - stride = np.prod(self.get_nodeattr("Stride")) ram_style = self.get_nodeattr("ram_style") + + impl_style = self.select_impl_style() + # call codegen preparation to populate self.buffer_depth + if impl_style == "default": + template_path, code_gen_dict = self.prepare_codegen_default() + elif impl_style == "parallel": + template_path, code_gen_dict = self.prepare_codegen_parallel() + + buffer_width = simd * self.get_input_datatype().bitwidth() + buffer_depth = self.buffer_depth + if ram_style == "block" or ram_style == "auto": - ram_depth = ifm_dim * ifm_ch / simd + ram_depth = buffer_depth if ram_depth <= 512: ram_width = 36 elif ram_depth <= 1024: @@ -254,57 +389,37 @@ def bram_estimation(self): ram_width = 2 else: ram_width = 1 - return int( - (k + stride) - * ( - math.ceil(simd * self.get_input_datatype().bitwidth() / ram_width) - * math.ceil(ifm_dim * ifm_ch / simd / ram_depth) - ) - ) + + ram_cascade_depth = math.ceil(buffer_depth / 16384) + ram_cascade_width = math.ceil(buffer_width / ram_width) + + return int(ram_cascade_depth * ram_cascade_width) else: return 0 def lut_estimation(self): - # TODO: update - # NOTE: not tested for correctness simd = self.get_nodeattr("SIMD") - ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = np.prod(self.get_nodeattr("IFMDim")) - k = np.prod(self.get_nodeattr("ConvKernelDim")) - stride = np.prod(self.get_nodeattr("Stride")) ram_style = self.get_nodeattr("ram_style") + + impl_style = self.select_impl_style() + # call codegen preparation to populate self.buffer_depth + if impl_style == "default": + template_path, code_gen_dict = self.prepare_codegen_default() + elif impl_style == "parallel": + template_path, code_gen_dict = self.prepare_codegen_parallel() + + buffer_width = simd * self.get_input_datatype().bitwidth() + buffer_depth = self.buffer_depth + if ram_style == "distributed": - ram_luts = int( - (k + stride) - * ( - simd - * self.get_input_datatype().bitwidth() - * math.ceil(ifm_dim * ifm_ch / simd / 64) - ) - ) + ram_luts = int(buffer_width * math.ceil(buffer_depth / 32)) else: ram_luts = 0 return 300 + ram_luts def uram_estimation(self): - # TODO: update - # NOTE: not tested for correctness - simd = self.get_nodeattr("SIMD") - ifm_ch = self.get_nodeattr("IFMChannels") - ifm_dim = np.prod(self.get_nodeattr("IFMDim")) - k = np.prod(self.get_nodeattr("ConvKernelDim")) - stride = np.prod(self.get_nodeattr("Stride")) - ram_style = self.get_nodeattr("ram_style") - if ram_style == "ultra": - return int( - (k + stride) - * ( - math.ceil(simd * self.get_input_datatype().bitwidth() / 64) - * math.ceil(ifm_dim * ifm_ch / simd / 4096) - ) - ) - else: - return 0 + # TODO: implement URAM estimation + return 0 def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") @@ -314,14 +429,8 @@ def execute_node(self, context, graph): folded_ishape = self.get_folded_input_shape() folded_oshape = self.get_folded_output_shape() - # TODO ensure codegen dir exists if mode == "cppsim": - #code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") - raise Exception( - """cppsim not possible for RTL SWG""".format( - mode - ) - ) + raise Exception("""cppsim not possible for RTL SWG""".format(mode)) elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: @@ -335,10 +444,10 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" # disable this check to allow for IFMdim % M != 0 case (see below) where input comes from MMV-output capable node - #assert ( + # assert ( # inp.shape == exp_ishape - #), """Input shape doesn't - #match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + # ), """Input shape doesn't + # match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 @@ -349,11 +458,17 @@ def execute_node(self, context, graph): # pad test input stream to work when IFMdim % M != 0 # during normal operation, the AXI Stream should not care, in the last cycle garbage elements are read but not used # TODO: only works for 1D case - mmv_stream_padding_px = int((np.prod(folded_ishape) - np.prod(inp.shape)) / exp_ishape[-1]) - if exp_ishape [2] == 1: - inp = np.pad(inp, ((0,0),(0,mmv_stream_padding_px),(0,0),(0,0)), 'constant') + mmv_stream_padding_px = int( + (np.prod(folded_ishape) - np.prod(inp.shape)) / exp_ishape[-1] + ) + if exp_ishape[2] == 1: + inp = np.pad( + inp, ((0, 0), (0, mmv_stream_padding_px), (0, 0), (0, 0)), "constant" + ) else: - inp = np.pad(inp, ((0,0),(0,0),(0,mmv_stream_padding_px),(0,0)), 'constant') + inp = np.pad( + inp, ((0, 0), (0, 0), (0, mmv_stream_padding_px), (0, 0)), "constant" + ) # reshape input into folded form inp = inp.reshape(folded_ishape) # make copy before saving array @@ -391,633 +506,660 @@ def execute_node(self, context, graph): ), """Output shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" - def global_includes(self): - pass + def prepare_codegen_default(self): + # Default implementation style for MMV_out = 1: addressable cyclic buffer + # Computing incremental addressing scheme directly.. + template_path = ( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_default.sv" + ) + code_gen_dict = {} - def defines(self, var): - pass + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + depthwise = self.get_nodeattr("depthwise") + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") - def read_npy_data(self): - pass + k_h, k_w = k + h, w = ifm_dim + pad = [0, 0, 0, 0] # padding happens in separate padding node for now + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) + out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) - def strm_decl(self): - pass + if self.get_nodeattr("parallel_window"): + mmv_in = M * 1 + mmv_out = M * k_h * k_w + else: + mmv_in = 1 + mmv_out = 1 - def docompute(self): - pass + # compute index/address increments for each nested loop + channel_factor = int(ifm_ch / simd) + + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_min_size = ( + (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + + # add additional buffer space in case of stride > 1 + # this minimizes cycle count, as it allows an earlier pre-load of skipped input elements + buffer_actual_size = ( + buffer_min_size + + max( + 0, + ((stride_w - 1) - (int(mmv_out * k_h * k_w / mmv_in))) * channel_factor, + ) + + max( + 0, + ((stride_h - 1) * w - (int(mmv_out * k_h * k_w / mmv_in))) + * channel_factor, + ) + ) + self.buffer_depth = buffer_actual_size # for resource estimation + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] + + # compute some intermediate values, e.g., kernel "width" = k_w incl. dilation + # or cols/rows that are skipped due to imperfect stride<->dim combination + kernel_width = (k_w - 1) * dilation_w + 1 + kernel_height = (k_h - 1) * dilation_h + 1 + skip_columns = w % (kernel_width + (out_dim_w - 1) * stride_w) + skip_rows = h % (kernel_height + (out_dim_h - 1) * stride_h) + + # compute address increment values for 5-loop nest + addr_incr_end_simd = 1 + addr_incr_end_window_elem = (dilation_w - 1) * channel_factor + 1 + addr_incr_end_window_row = ( + ((w - kernel_width) * channel_factor) # remaining line + + ((dilation_h - 1) * w * channel_factor) # skip lines + + 1 # wrap-around of minimally sized buffer + ) + addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 + addr_incr_end_row = ( + -buffer_min_size + + ((skip_columns + kernel_width) * channel_factor) # remaining line + + ((stride_h - 1) * w * channel_factor) # skip lines + + 1 + ) - def dataoutstrm(self): - pass + # re-use same controller structure -> re-assign address increments for the dw case + if depthwise: + addr_incr_end_window_elem = dilation_w * channel_factor + addr_incr_end_window_row = ( + channel_factor + + (w - kernel_width) * channel_factor + + (dilation_h - 1) * w * channel_factor + ) + addr_incr_end_simd = -buffer_min_size + (channel_factor + 1) + + # sanity check + assert not ( + abs(addr_incr_end_window) > buffer_actual_size + ), "ERROR: W increment > buffer size, wrap logic doesn't account for this" + assert not ( + abs(addr_incr_end_row) > buffer_actual_size + ), "ERROR: H increment > buffer size, wrap logic doesn't account for this" + + # set certain threshold indices to detect when reading/writing finishes + code_gen_dict["$LAST_READ_ELEM$"] = [str(h * w * channel_factor - 1)] + code_gen_dict["$LAST_WRITE_ELEM$"] = [ + str(((h - skip_rows - 1) * w + (w - skip_columns)) * channel_factor - 1) + ] + + # default controller loop structure: # iterations (counters) map directly + loop_h_iterations = out_dim_h + loop_w_iterations = out_dim_w + loop_kh_iterations = k_h + loop_kw_iterations = k_w + loop_simd_iterations = channel_factor + + if depthwise and channel_factor > 1: + # re-arrange existing controller loop structure for depthwise convolutions + loop_kh_iterations = channel_factor + loop_kw_iterations = k_h + loop_simd_iterations = k_w + addr_incr_end_simd_ = addr_incr_end_simd + addr_incr_end_simd = addr_incr_end_window_elem + addr_incr_end_window_elem = addr_incr_end_window_row + addr_incr_end_window_row = addr_incr_end_simd_ + elem_per_window = k_h * k_w + + tail_incr_w = addr_incr_end_window + buffer_min_size - channel_factor + tail_incr_h = addr_incr_end_row + buffer_min_size - channel_factor + tail_incr_last_window = buffer_min_size - 1 + code_gen_dict["$TAIL_INCR_GENERATION$"] = [ + """ + always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin + if (counter_loop_kh >= 0) + tail_incr_reg = 1; + else if (counter_loop_w >= 0) + tail_incr_reg = {}; + else if (counter_loop_h >= 0) + tail_incr_reg = {}; + else + tail_incr_reg = {}; + end + """.format( + tail_incr_w, tail_incr_h, tail_incr_last_window + ) + ] + else: + # depthwise output format is equivalent to non-depthwise if SIMD=C + elem_per_window = k_h * k_w * channel_factor + + tail_incr_w = addr_incr_end_window + buffer_min_size - 1 + tail_incr_h = addr_incr_end_row + buffer_min_size - 1 + tail_incr_last_window = buffer_min_size - 1 + code_gen_dict["$TAIL_INCR_GENERATION$"] = [ + """ + always @ (counter_loop_w, counter_loop_h) begin + if (counter_loop_w >= 0) + tail_incr_reg = {}; + else if (counter_loop_h >= 0) + tail_incr_reg = {}; + else + tail_incr_reg = {}; + end + """.format( + tail_incr_w, tail_incr_h, tail_incr_last_window + ) + ] + + # support SIMD = C and k_w = 1 cases + # for k = [k_h, k_w] = [1, k_w], no adjustment is needed + # for k = [k_h, k_w] = [1, 1], do not use this impl. style (mmv_out=K=1) + # innermost loop is executed at least once -> adjust if needed + if loop_simd_iterations == 1: + # skip innermost SIMD loop completely + if loop_kw_iterations == 1: + # skip innermost KW loop completely + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KH"] + loop_kh_iterations -= 1 # -1 because state is initial state + else: + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KW"] + loop_kw_iterations -= 1 # -1 because state is initial state + else: + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_SIMD"] + loop_simd_iterations -= 1 # -1 because state is initial state + + code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 1)] + code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 1)] + code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 1)] + code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 1)] + code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 1)] + + incr_bitwidth = 1 + math.ceil( + math.log2( + max( + abs(addr_incr_end_simd) + 1, + abs(addr_incr_end_window_elem) + 1, + abs(addr_incr_end_window_row) + 1, + abs(addr_incr_end_window) + 1, + abs(addr_incr_end_row) + 1, + abs(tail_incr_w) + 1, + abs(tail_incr_h) + 1, + abs(tail_incr_last_window) + 1, + ) + ) + ) + code_gen_dict["$INCR_BITWIDTH$"] = [str(incr_bitwidth)] + code_gen_dict["$ADDR_INCREMENT_MAP$"] = [ + "'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format( + incr_bitwidth, + int(copysign(incr_bitwidth, addr_incr_end_simd)), + abs(addr_incr_end_simd), + int(copysign(incr_bitwidth, addr_incr_end_window_elem)), + abs(addr_incr_end_window_elem), + int(copysign(incr_bitwidth, addr_incr_end_window_row)), + abs(addr_incr_end_window_row), + int(copysign(incr_bitwidth, addr_incr_end_window)), + abs(addr_incr_end_window), + int(copysign(incr_bitwidth, addr_incr_end_row)), + abs(addr_incr_end_row), + ) + ] - def save_as_npy(self): - pass + code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] + code_gen_dict["$SIMD$"] = [str(simd)] + code_gen_dict["$MMV_IN$"] = [str(mmv_in)] + code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - def blackboxfunction(self): - pass + return template_path, code_gen_dict - def pragmas(self): - pass - - def generate_hdl(self): - code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - #f_debug = open(os.path.join(code_gen_dir, "swg_hdl_debuginfo.log"), "w") + def prepare_codegen_parallel(self): + # Parallel implementation style for MMV_out = K: + # mix of shift-registers (for parallel read) and line buffers (BRAM or LUTRAM) + # compute a static schedule by analyzing access pattern (from im2col function) + template_path = ( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" + ) code_gen_dict = {} - ##### BEGIN INITIALIZE/CHECK CONFIGURATION ##### ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") - ofm_dim = self.get_nodeattr("OFMDim") stride = self.get_nodeattr("Stride") dilation = self.get_nodeattr("Dilation") - depthwise = self.get_nodeattr("depthwise") + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") - n = 1 - h, w = ifm_dim - c = 1 # assume SIMD=C (parallelize across all channels) k_h, k_w = k - pad = [0,0,0,0] # padding happens in separate padding node for now - pad_val = 0 + h, w = ifm_dim + n = c = 1 # no need to consider fully-parallel C dimension + in_shape = (n, c, h, w) + pad = [0, 0, 0, 0] stride_h, stride_w = stride dilation_h, dilation_w = dilation - - in_shape = (n,c,h,w) #NCHW - in_image = np.empty(in_shape, dtype=int) in_image_padded = np.pad( in_image, ((0, 0), (0, 0), (pad[0], pad[2]), (pad[1], pad[3])), mode="constant", - constant_values=pad_val, + constant_values=0, ) in_shape_padded = in_image_padded.shape h_padded = in_shape_padded[2] w_padded = in_shape_padded[3] - pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) - # init folding config - simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") - if (self.get_nodeattr("parallel_window")): - mmv_in = M*1 - mmv_out = M*k_h*k_w - assert ifm_ch==simd, "Constraint violated: SIMD must be equal to C" + if self.get_nodeattr("parallel_window"): + mmv_in = M * 1 + mmv_out = M * k_h * k_w + assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" else: mmv_in = 1 mmv_out = 1 - assert ifm_ch%simd==0, "Constraint violated: SIMD must divide C" + assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide C" - # TODO: check allowed hyperparams - # for 1D case: it does not matter if dummy dim is x or y - # TODO: move/duplicate these checks in corresponding convert_to_hls transformation (?) - - # choose implementation style - if (mmv_out > 1 or (k_h==1 and k_w==1)): - impl_style = "parallel" - else: - impl_style = "default" - - ##### END INITIALIZE/CHECK CONFIGURATION ##### - - ##### BEGIN CODE GEN FOR DEFAULT STYLE ##### - if (impl_style == "default"): - # Default implementation style for MMV_out = 1: addressable cyclic buffer - # Computing incremental addressing scheme directly.. - - # compute index/address increments for each nested loop - channel_factor = int(ifm_ch/simd) - - # compute minimal buffer length (assuming it holds 1 complete window) - buffer_min_size = ((k_h-1) * dilation_h * w + (k_w-1) * dilation_w + 1) * channel_factor - - kernel_width = (k_w-1)*dilation_w+1 # incl. dilation - addr_incr_end_simd = 1 - addr_incr_end_window_elem = (dilation_w-1) * channel_factor + 1 - - remaining_line = (w - kernel_width) * channel_factor - skip_lines = (dilation_h-1) * w * channel_factor - addr_incr_end_window_row = remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer - - addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 # 1 = wrap around of minimally sized buffer - - # rows that are skipped due to imperfect stride<->W combination - skip_columns = w%(kernel_width + (out_dim_w-1)*stride_w) - remaining_line = (skip_columns + kernel_width) * channel_factor # increment from oldest buffer position (top left) to end of line - skip_lines = (stride_h-1) * w * channel_factor - addr_incr_end_row = -buffer_min_size + remaining_line + skip_lines + 1 # 1 = wrap around of minimally sized buffer - - if (depthwise): - addr_incr_end_window_elem = dilation_w * channel_factor - addr_incr_end_window_row = (channel_factor - + (w - kernel_width) * channel_factor - + (dilation_h-1) * w * channel_factor - ) - addr_incr_end_simd = -buffer_min_size + (channel_factor + 1) - - # add additional buffer space in case of stride > 1 - # this minimizes cycle count, as it allows an earlier pre-load of skipped input elements - buffer_actual_size = (buffer_min_size + max(0,((stride_w-1) - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor) - + max(0,((stride_h-1)*w - (int(mmv_out*k_h*k_w/mmv_in)))*channel_factor)) - code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] - - assert not(abs(addr_incr_end_window) > buffer_actual_size), "ERROR: W increment > buffer size, wrap logic doesn't account for this" - assert not(abs(addr_incr_end_row) > buffer_actual_size), "ERROR: H increment > buffer size, wrap logic doesn't account for this" - - kernel_width = (k_w-1)*dilation_w+1 # incl. dilation - kernel_height = (k_h-1)*dilation_h+1 # incl. dilation - skip_columns = w%(kernel_width + (out_dim_w-1)*stride_w) - skip_rows = h%(kernel_height + (out_dim_h-1)*stride_h) - code_gen_dict["$LAST_READ_ELEM$"] = [str(h*w*channel_factor-1)] - code_gen_dict["$LAST_WRITE_ELEM$"] = [str(((h - skip_rows - 1) * w + (w - skip_columns))*channel_factor -1)] - - loop_h_iterations = out_dim_h - loop_w_iterations = out_dim_w - loop_kh_iterations = k_h - loop_kw_iterations = k_w - loop_simd_iterations = channel_factor - - if (depthwise and channel_factor > 1): - # re-arrange existing controller loop structure for depthwise convolutions - loop_kh_iterations = channel_factor - loop_kw_iterations = k_h - loop_simd_iterations = k_w - addr_incr_end_simd_ = addr_incr_end_simd - addr_incr_end_simd = addr_incr_end_window_elem - addr_incr_end_window_elem = addr_incr_end_window_row - addr_incr_end_window_row = addr_incr_end_simd_ - elem_per_window = k_h*k_w - - tail_incr_w = addr_incr_end_window + buffer_min_size - channel_factor - tail_incr_h = addr_incr_end_row + buffer_min_size - channel_factor - tail_incr_last_window = buffer_min_size-1 - code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" - always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin - if (counter_loop_kh >= 0) - tail_incr_reg = 1; - else if (counter_loop_w >= 0) - tail_incr_reg = {}; - else if (counter_loop_h >= 0) - tail_incr_reg = {}; - else - tail_incr_reg = {}; - end - """.format(tail_incr_w, tail_incr_h, tail_incr_last_window)] - else: - # depthwise output format is equivalent to non-depthwise if SIMD=C - elem_per_window = k_h*k_w*channel_factor - - tail_incr_w = addr_incr_end_window + buffer_min_size - 1 - tail_incr_h = addr_incr_end_row + buffer_min_size - 1 - tail_incr_last_window = buffer_min_size-1 - code_gen_dict["$TAIL_INCR_GENERATION$"] = [""" - always @ (counter_loop_w, counter_loop_h) begin - if (counter_loop_w >= 0) - tail_incr_reg = {}; - else if (counter_loop_h >= 0) - tail_incr_reg = {}; - else - tail_incr_reg = {}; - end - """.format(tail_incr_w, tail_incr_h, tail_incr_last_window)] - - # support SIMD = C and k_w = 1 cases - # for k = [k_h, k_w] = [1, k_w], no adjustment is needed - # for k = [k_h, k_w] = [1, 1], do not use this impl. style (mmv_out=K=1) - # innermost loop is executed at least once -> adjust if needed - if (loop_simd_iterations == 1): - # skip innermost SIMD loop completely - if (loop_kw_iterations == 1): - # skip innermost KW loop completely - code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_KH"] - loop_kh_iterations -= 1 # -1 because state is initial state - else: - code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_KW"] - loop_kw_iterations -= 1 # -1 because state is initial state - else: - code_gen_dict["$INNERMOST_STATE$"]=["STATE_LOOP_SIMD"] - loop_simd_iterations -= 1 # -1 because state is initial state - - code_gen_dict["$LOOP_H_ITERATIONS$"]=[str(loop_h_iterations-1)] - code_gen_dict["$LOOP_W_ITERATIONS$"]=[str(loop_w_iterations-1)] - code_gen_dict["$LOOP_KH_ITERATIONS$"]=[str(loop_kh_iterations-1)] - code_gen_dict["$LOOP_KW_ITERATIONS$"]=[str(loop_kw_iterations-1)] - code_gen_dict["$LOOP_SIMD_ITERATIONS$"]=[str(loop_simd_iterations-1)] - - incr_bitwidth = 1 + math.ceil(math.log2(max(abs(addr_incr_end_simd)+1, - abs(addr_incr_end_window_elem)+1, - abs(addr_incr_end_window_row)+1, - abs(addr_incr_end_window)+1, - abs(addr_incr_end_row)+1, - abs(tail_incr_w)+1, - abs(tail_incr_h)+1, - abs(tail_incr_last_window)+1))) - code_gen_dict["$INCR_BITWIDTH$"] = [str(incr_bitwidth)] - code_gen_dict["$ADDR_INCREMENT_MAP$"]=["'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format(incr_bitwidth, - int(copysign(incr_bitwidth,addr_incr_end_simd)),abs(addr_incr_end_simd), - int(copysign(incr_bitwidth,addr_incr_end_window_elem)),abs(addr_incr_end_window_elem), - int(copysign(incr_bitwidth,addr_incr_end_window_row)),abs(addr_incr_end_window_row), - int(copysign(incr_bitwidth,addr_incr_end_window)),abs(addr_incr_end_window), - int(copysign(incr_bitwidth,addr_incr_end_row)),abs(addr_incr_end_row))] - - code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] - - with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_default.sv", "r") as f: - template = f.read() - - ##### END CODE GEN FOR DEFAULT STYLE ##### - - ##### BEGIN CODE GEN FOR PARALLEL STYLE ##### - elif (impl_style == "parallel"): - # Out width > In width: Parallel implementation style using registers + line buffers - idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( - in_shape, - k_h, - k_w, - pad, - stride_h, - stride_w, - dilation_h, - dilation_w - ) + # Out width > In width: Parallel implementation style using registers + line buffers + idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( + in_shape, k_h, k_w, pad, stride_h, stride_w, dilation_h, dilation_w + ) - cols = in_image_padded[:, idx_c, idx_h, idx_w] - cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) - - # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW - out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) - # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) - out_image = out_image.transpose(0, 4, 5, 2, 3, 1) - out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) - - idx_px = idx_h*w+idx_w # sequential pixel indices - - k, cycles = idx_px.shape - - output_elements = mmv_out - output_cycles = int(cycles/(mmv_out/k)) - - # TODO: what happens when output_cycles=OFMdim % M != 0 - # ...try to support IFMdim % M != 0 first, so we can work with the usual k=3 where OFMdim = IFMdim - -2 - # the additional garbage input elements that are read in the last cycle are not read by any window anyway - idx_px = idx_px.transpose() - idx_px = idx_px.reshape(output_cycles, output_elements) - idx_px = idx_px.transpose() - # result: first dim is number of parallel output elements, - # second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle - - buffer = [] - buffer_max_size = 0 - schedule = [] - next_in_px = 0 - oldest_px = 0 - - def schedule_append(schedule, op): - if len(schedule) > 0 and schedule[-1][1] == op: - count, op_ = schedule[-1] - schedule[-1] = (count+1, op_) - else: - schedule.append((1, op)) - return schedule - - # compute schedule and buffer read pattern (output driven) - idx_px_relative = idx_px.copy() - output_elem, output_cycles = idx_px_relative.shape - - for x in range(output_cycles): - # load missing inputs into buffer - for y in range(output_elem): - while int(idx_px_relative[y,x]) not in buffer: - # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 - schedule = schedule_append(schedule,'w') - - # discard unused buffer elements - oldest_px = np.min(idx_px_relative[:,x:]) - #check whether M elements can be shifted out, not just the single oldest one - #while all([buffer[i] < oldest_px for i in range(M)]): - if all([buffer[i] < oldest_px for i in range(M)]): - # M buffer elements are shifted out at once - for m in range(M): - buffer.pop(0) - - # adjust relative buffer index of current x (according to last discarded buffer elements) - for y in range(output_elem): - idx_px_relative[y,x] -= oldest_px - - # read from buffer - # + simultaneously load next pixel(s) into buffer if there are any left - if (next_in_px > (h_padded*w_padded-1)): - # read only (append above) - schedule = schedule_append(schedule,'r') - else: - # load M inputs at once + cols = in_image_padded[:, idx_c, idx_h, idx_w] + cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) + # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW + out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) + # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) + out_image = out_image.transpose(0, 4, 5, 2, 3, 1) + out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) + idx_px = idx_h * w + idx_w # sequential pixel indices + k, cycles = idx_px.shape + output_elements = mmv_out + output_cycles = int(cycles / (mmv_out / k)) + + idx_px = idx_px.transpose() + idx_px = idx_px.reshape(output_cycles, output_elements) + idx_px = idx_px.transpose() + # result: first dim is number of parallel output elements, + # second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle + + buffer = [] + buffer_max_size = 0 + schedule = [] + next_in_px = 0 + oldest_px = 0 + + # compute schedule and buffer read pattern (output driven) + idx_px_relative = idx_px.copy() + output_elem, output_cycles = idx_px_relative.shape + + for x in range(output_cycles): + # load missing inputs into buffer + for y in range(output_elem): + while int(idx_px_relative[y, x]) >= next_in_px: + # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) for m in range(M): buffer.append(next_in_px) next_in_px += 1 - schedule = schedule_append(schedule,'wr') - - # record max needed buffer depth - if len(buffer) > buffer_max_size: - buffer_max_size = len(buffer) - - # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) - while next_in_px <= (h_padded*w_padded-1): - next_in_px += 1 - schedule = schedule_append(schedule,'w') - - # find buffer access patterns - buffer_access_patterns = [] - for x in range(output_cycles): - if idx_px_relative[:,x].tolist() not in buffer_access_patterns: - buffer_access_patterns.append(idx_px_relative[:,x].tolist()) - - # Experimental implementation to map fixed controller loop structure to R/W schedule by analyzing - # the access pattern given by Im2Col, rather than direct computation. - # TODO: Probably replace this with a directly-computed schedule, similar to the default implementation style. - def compact_schedule(schedule): - # leave first sequence (pre-load) as is - start_sequence = schedule[0] - loop_sequence_1_counter = 1 - loop_sequence_1 = schedule[1] - loop_counter = 0 - loop_sequence_2 = None - end_sequence = None - - i = 2 - if i < len(schedule): - loop_sequence_1 += schedule[i] - i += 1 - while i+1 < len(schedule): - candidate = schedule[i] + schedule[i+1] - if candidate == loop_sequence_1: - loop_sequence_1_counter += 1 - i += 2 - else: - break - - if i < len(schedule): - loop_sequence_2 = schedule[i] - i += 1 - if i+1 < len(schedule): - candidate = schedule[i] + schedule[i+1] - if candidate != loop_sequence_1: - loop_sequence_2 += schedule[i] - i -= 1 - loop_sequence_total_len = (int(len(loop_sequence_2)/2)) + loop_sequence_1_counter*(int(len(loop_sequence_1)/2)) - loop_sequence_total = loop_sequence_2 + loop_sequence_1_counter*loop_sequence_1 - while i+loop_sequence_total_len < len(schedule): - candidate = schedule[i] - for x in range (i+1, i+loop_sequence_total_len): - candidate += schedule[x] - - if candidate == loop_sequence_total: - loop_counter += 1 - i += loop_sequence_total_len - else: - break - else: - if i < len(schedule): - end_sequence = loop_sequence_2 + schedule[i] - i += 1 - loop_sequence_2 = None - else: - end_sequence = loop_sequence_2 - loop_sequence_2 = None - - if i < len(schedule): - end_sequence = schedule[i] - i += 1 - if i < len(schedule): - end_sequence = end_sequence + schedule[i] - i += 1 - - assert len(start_sequence) == 1*2, "ERROR: invalid start sequence" - assert len(loop_sequence_1) == 2*2, "ERROR: invalid loop 1 sequence" - if loop_sequence_2: - assert len(loop_sequence_2) <= 2*2, "ERROR: invalid loop 2 sequence" - if end_sequence: - assert len(end_sequence) <= 2*2, "ERROR: invalid end sequence" - assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" %(i, len(schedule)) - - return (start_sequence, loop_counter, loop_sequence_1_counter, - loop_sequence_1, loop_sequence_2, end_sequence) - - ### determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) - # TODO: this part doesn't fully account for M for 2D buffers yet - - # how many "unused" registers are allowed between buffer positions that will be accessed in parallel - # example: - # 0: only consecutive access patterns will be implemented in regs, rest in (LUTRAM/BRAM) line buffers - # 2: [0, 3, 6] access pattern is still allowed and will be implemented with one 7-position shift reg - REG_BRAM_THRESHOLD = 8 - - code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] - - assert len(buffer_access_patterns) == 1, "ERROR: Buffer access pattern is not static" - buf_static_access_pattern = buffer_access_patterns[0] - reg_fifos = [] - reg_fifos_depth = [] - bram_fifos = [] - bram_fifos_depth = [] - current = [] - for i in range(len(buf_static_access_pattern)): - access_idx = buf_static_access_pattern[i] - if len(current) == 0: + schedule = schedule_append(schedule, "w") + + # discard unused buffer elements + # FIXME: this is very slow for large feature maps (e.g., 4096x4096) + oldest_px = np.min(idx_px_relative[:, x:]) + # check whether M elements can be shifted out, not just the single oldest one + # while all([buffer[i] < oldest_px for i in range(M)]): + if all([buffer[i] < oldest_px for i in range(M)]): + # M buffer elements are shifted out at once + for m in range(M): + buffer.pop(0) + + # adjust relative buffer index of current x (according to last discarded buffer elements) + for y in range(output_elem): + idx_px_relative[y, x] -= oldest_px + + # read from buffer + # + simultaneously load next pixel(s) into buffer if there are any left + if next_in_px > (h_padded * w_padded - 1): + # read only (append above) + schedule = schedule_append(schedule, "r") + else: + # load M inputs at once + for m in range(M): + buffer.append(next_in_px) + next_in_px += 1 + schedule = schedule_append(schedule, "wr") + + # record max needed buffer depth + if len(buffer) > buffer_max_size: + buffer_max_size = len(buffer) + + # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) + while next_in_px <= (h_padded * w_padded - 1): + next_in_px += 1 + schedule = schedule_append(schedule, "w") + + # add 1 extra cycle after final READ+WRITE cycle for transition b/w feature maps + if schedule[-1][1] == "wr": + schedule_append(schedule, "n") + + # find buffer access patterns + buffer_access_patterns = [] + for x in range(output_cycles): + if idx_px_relative[:, x].tolist() not in buffer_access_patterns: + buffer_access_patterns.append(idx_px_relative[:, x].tolist()) + + ### determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) + # TODO: this part doesn't fully account for M>1 for 2D buffers yet + REG_BRAM_THRESHOLD = 8 + # how many "unused" registers are allowed between buffer positions that will be accessed in parallel + # example: + # 0: only consecutive access patterns will be implemented in regs, rest in (LUTRAM/BRAM) line buffers + # 2: [0, 3, 6] access pattern is still allowed and will be implemented with one 7-position shift reg + + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] + self.buffer_depth = buffer_max_size # for resource estimation + + assert ( + len(buffer_access_patterns) == 1 + ), "ERROR: Buffer access pattern is not static" + buf_static_access_pattern = buffer_access_patterns[0] + reg_fifos = [] + reg_fifos_depth = [] + bram_fifos = [] + bram_fifos_depth = [] + current = [] + for i in range(len(buf_static_access_pattern)): + access_idx = buf_static_access_pattern[i] + if len(current) == 0: + current.append(access_idx) + else: + # assume non-decreasing index order in access pattern + # TODO: this assumption does not hold for M>1 for the 2D case + distance = access_idx - max(current) + if not (distance - 1 > REG_BRAM_THRESHOLD): + for i in range(distance - 1): + # insert dummy into REG FIFO (not read as part of window) + current.append(-1) + # assign this access to same REG FIFO as previous one current.append(access_idx) else: - # assume non-decreasing index order in access pattern - # TODO: this assumption does not hold for M>1 case (2D buffer) - distance = access_idx - max(current) - if not (distance-1 > REG_BRAM_THRESHOLD): - for i in range(distance-1): - # insert dummy into REG FIFO (not read as part of window) - current.append(-1) - # assign this access to same REG FIFO as previous one - current.append(access_idx) - else: - # assign skipped accesses to new BRAM FIFO - bram_fifos.append([-1]*(distance-1)) - bram_fifos_depth.append(math.ceil((distance-1)/M)) # really ceil? - # start with new REG FIFO - reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # fix for M again - reg_fifos_depth.append(len(current)) - current = [] - current.append(access_idx) - reg_fifos.append(current) - #reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # fix for M again - reg_fifos_depth.append(len(current)) - - code_gen_dict["$GENERATE_REG_FIFOS$"] = [] - for i in range(len(reg_fifos)): - code_gen_dict["$GENERATE_REG_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] reg_fifo_{id}_in; - wire [IN_WIDTH-1:0] reg_fifo_{id}_out; - wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; - {name}_reg_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) + # assign skipped accesses to new BRAM FIFO + bram_fifos.append([-1] * (distance - 1)) + bram_fifos_depth.append( + math.ceil((distance - 1) / M) + ) # really ceil? + # start with new REG FIFO + reg_fifos.append(current) + # reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # allows for MMV in the 1D case + reg_fifos_depth.append(len(current)) + current = [] + current.append(access_idx) + reg_fifos.append(current) + # reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # allows for MMV in the 1D case + reg_fifos_depth.append(len(current)) + + code_gen_dict["$GENERATE_REG_FIFOS$"] = [] + for i in range(len(reg_fifos)): + code_gen_dict["$GENERATE_REG_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] reg_fifo_{id}_in; + wire [IN_WIDTH-1:0] reg_fifo_{id}_out; + wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; + {name}_reg_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + reg_buffer_inst_{id} + ( + .CLK(CLK), + .shift_enable(shift_enable), + .shift_in(reg_fifo_{id}_in), + .shift_out(reg_fifo_{id}_out), + .data_out(reg_fifo_{id}) + );""".format( + name=self.get_verilog_top_module_name(), + id=i, + len=reg_fifos_depth[i], + ) + ) + + code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] + for i in range(len(bram_fifos)): + code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] bram_fifo_{id}_in; + wire [IN_WIDTH-1:0] bram_fifo_{id}_out; + {name}_ram_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + ram_buffer_inst_{id} + ( + .CLK(CLK), + .RST(RST), + .shift_enable(shift_enable), + .shift_in(bram_fifo_{id}_in), + .shift_out(bram_fifo_{id}_out) + );""".format( + name=self.get_verilog_top_module_name(), + id=i, + len=bram_fifos_depth[i], + ) + ) + + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] + out_idx = mmv_out - 1 + for fifo_id, reg_fifo in enumerate(reg_fifos): + for fifo_idx, access_idx in enumerate(reg_fifo): + if access_idx != -1: + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( + out_idx=out_idx, + fifo_id=fifo_id, + access_idx=reg_fifos_depth[fifo_id] + - 1 + - int((max(reg_fifo) - access_idx) / M), + mmv_idx=(max(reg_fifo) - access_idx) % M, + mmv=M, + ) ) - reg_buffer_inst_{id} - ( - .CLK(CLK), - .shift_enable(shift_enable), - .shift_in(reg_fifo_{id}_in), - .shift_out(reg_fifo_{id}_out), - .data_out(reg_fifo_{id}) - );""".format(name=self.get_verilog_top_module_name(), id=i, len=reg_fifos_depth[i])) - - code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] - for i in range(len(bram_fifos)): - code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] bram_fifo_{id}_in; - wire [IN_WIDTH-1:0] bram_fifo_{id}_out; - {name}_ram_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) + # reversal: out_idx=0 -> oldest buffer element -> highest access_idx + out_idx = out_idx - 1 + assert out_idx == -1, "ERROR: Not all output vector elements connected" + + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] + for i in range(len(reg_fifos)): + if i == 0: + # first FIFO containing newest elements -> input comes from input reg + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = reg_input;""".format( + fifo_id=i, ) - ram_buffer_inst_{id} - ( - .CLK(CLK), - .RST(RST), - .shift_enable(shift_enable), - .shift_in(bram_fifo_{id}_in), - .shift_out(bram_fifo_{id}_out) - );""".format(name=self.get_verilog_top_module_name(), id=i, len=bram_fifos_depth[i])) - - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] - out_idx = mmv_out-1 - for fifo_id, reg_fifo in enumerate(reg_fifos): - for fifo_idx, access_idx in enumerate(reg_fifo): - if(access_idx != -1): - #code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - # "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{fifo_idx}]; //{access_idx}".format( - # out_idx=out_idx, fifo_id=fifo_id, fifo_idx=fifo_idx, access_idx=access_idx - # ) - #) - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( - out_idx=out_idx, fifo_id=fifo_id, - access_idx=reg_fifos_depth[fifo_id]-1-int((max(reg_fifo)-access_idx)/M), - mmv_idx=(max(reg_fifo)-access_idx)%M, - mmv = M - ) - ) - # reversal: out_idx=0 -> oldest buffer element -> highest access_idx - out_idx = out_idx-1 - assert out_idx==-1, "ERROR: Not all output vector elements connected" - - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] - for i in range(len(reg_fifos)): - if i == 0: - # first FIFO containing newest elements -> input comes from input reg - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = reg_input;""".format(fifo_id=i,)) - else: - # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) - input_fifo_id = i-1 - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) - for i in range(len(bram_fifos)): - input_fifo_id = i + ) + else: + # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) + input_fifo_id = i - 1 code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format(fifo_id=i, input_fifo_id=input_fifo_id)) - - def convert_tuple(seq): - mapping = {'w': ("1'b1", "1'b0"), - 'r': ("1'b0", "1'b1"), - 'wr':("1'b1", "1'b1"), - 'n': ("1'b0", "1'b0")} - if seq: - if len(seq) == 2: - return (seq[0], mapping[seq[1]], 0, mapping['n']) - if len(seq) == 4: - return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) - else: - return (0, mapping['n'], 0, mapping['n']) + """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) + for i in range(len(bram_fifos)): + input_fifo_id = i + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) - start_sequence,loop_counter,loop_sequence_1_counter,loop_sequence_1,loop_sequence_2,end_sequence = compact_schedule(schedule) + ( + start_sequence, + loop_counter, + loop_sequence_1_counter, + loop_sequence_1, + loop_sequence_2, + end_sequence, + ) = schedule_map_controller(schedule) + + start_sequence = schedule_map_cmds(start_sequence) + loop_sequence_1 = schedule_map_cmds(loop_sequence_1) + loop_sequence_2 = schedule_map_cmds(loop_sequence_2) + end_sequence = schedule_map_cmds(end_sequence) + + cycles_total = 0 + for t in schedule: + cycles_total += t[0] + # add extra cycle if schedule ends on READ + if schedule[-1][1] == "r": + cycles_total += 1 + code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] + + code_gen_dict["$START_COUNTER$"] = [str(start_sequence[0])] + code_gen_dict["$LOOP_MAIN_COUNTER$"] = [str(loop_sequence_1_counter)] + code_gen_dict["$LOOP_INTER_COUNTER$"] = [str(loop_counter)] + + code_gen_dict["$LOOP_MAIN_1_COUNTER$"] = [str(loop_sequence_1[0])] + code_gen_dict["$LOOP_MAIN_2_COUNTER$"] = [str(loop_sequence_1[2])] + + code_gen_dict["$LOOP_INTER_1_COUNTER$"] = [str(loop_sequence_2[0])] + code_gen_dict["$LOOP_INTER_2_COUNTER$"] = [str(loop_sequence_2[2])] + + code_gen_dict["$LOOP_END_1_COUNTER$"] = [str(end_sequence[0])] + code_gen_dict["$LOOP_END_2_COUNTER$"] = [str(end_sequence[2])] + + code_gen_dict["$READ_CMD_MAP$"] = [ + "{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][0], + loop_sequence_1[1][0], + loop_sequence_1[3][0], + loop_sequence_2[1][0], + loop_sequence_2[3][0], + end_sequence[1][0], + end_sequence[3][0], + ) + ] + code_gen_dict["$WRITE_CMD_MAP$"] = [ + "{{ {}, {}, {}, {}, {}, {}, {} }}".format( + start_sequence[1][1], + loop_sequence_1[1][1], + loop_sequence_1[3][1], + loop_sequence_2[1][1], + loop_sequence_2[3][1], + end_sequence[1][1], + end_sequence[3][1], + ) + ] - start_sequence = convert_tuple(start_sequence) - loop_sequence_1 = convert_tuple(loop_sequence_1) - loop_sequence_2 = convert_tuple(loop_sequence_2) - end_sequence = convert_tuple(end_sequence) + code_gen_dict["$SIMD$"] = [str(simd)] + code_gen_dict["$MMV_IN$"] = [str(mmv_in)] + code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - cycles_total = 0 - for t in schedule: - cycles_total += t[0] - code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] + return template_path, code_gen_dict - code_gen_dict["$START_COUNTER$"]=[str(start_sequence[0])] - code_gen_dict["$LOOP_MAIN_COUNTER$"]=[str(loop_sequence_1_counter)] - code_gen_dict["$LOOP_INTER_COUNTER$"]=[str(loop_counter)] + def select_impl_style(self): + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") - code_gen_dict["$LOOP_MAIN_1_COUNTER$"]=[str(loop_sequence_1[0])] - code_gen_dict["$LOOP_MAIN_2_COUNTER$"]=[str(loop_sequence_1[2])] + k_h, k_w = k + # init folding config + if self.get_nodeattr("parallel_window"): + mmv_in = M * 1 + mmv_out = M * k_h * k_w + assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" + else: + mmv_in = 1 + mmv_out = 1 + assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide C" - code_gen_dict["$LOOP_INTER_1_COUNTER$"]=[str(loop_sequence_2[0])] - code_gen_dict["$LOOP_INTER_2_COUNTER$"]=[str(loop_sequence_2[2])] + # TODO: check allowed hyperparams + # for 1D case: it does not matter if dummy dim is x or y + # TODO: move/duplicate these checks in corresponding convert_to_hls transformation (?) - code_gen_dict["$LOOP_END_1_COUNTER$"]=[str(end_sequence[0])] - code_gen_dict["$LOOP_END_2_COUNTER$"]=[str(end_sequence[2])] + # choose implementation style + if mmv_out > 1 or (k_h == 1 and k_w == 1): + impl_style = "parallel" + assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" + else: + impl_style = "default" - code_gen_dict["$READ_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][0],loop_sequence_1[1][0],loop_sequence_1[3][0],loop_sequence_2[1][0],loop_sequence_2[3][0],end_sequence[1][0],end_sequence[3][0])] - code_gen_dict["$WRITE_CMD_MAP$"]=["{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][1],loop_sequence_1[1][1],loop_sequence_1[3][1],loop_sequence_2[1][1],loop_sequence_2[3][1],end_sequence[1][1],end_sequence[3][1])] + return impl_style - with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_parallel.sv", "r") as f: - template = f.read() + def generate_hdl(self): + impl_style = self.select_impl_style() - ##### END CODE GEN FOR PARALLEL STYLE ##### + # prepare code generation by filling out dictionaries + if impl_style == "default": + template_path, code_gen_dict = self.prepare_codegen_default() + elif impl_style == "parallel": + template_path, code_gen_dict = self.prepare_codegen_parallel() - ##### BEGIN GENERAL CODE GEN ##### + # add general parameters to dictionary code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] - # save top module name so we can refer to it even after this node has been renamed + # save top module name so we can refer to it even after this node has been renamed # (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] - code_gen_dict["$SIMD$"] = [str(simd)] - code_gen_dict["$MMV_IN$"] = [str(mmv_in)] - code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] - ram_style = self.get_nodeattr("ram_style") if ram_style == "auto": - code_gen_dict["$RAM_STYLE$"]=[""] + code_gen_dict["$RAM_STYLE$"] = [""] else: - code_gen_dict["$RAM_STYLE$"]=["(* ram_style = \"{}\" *)".format(ram_style)] + code_gen_dict["$RAM_STYLE$"] = ['(* ram_style = "{}" *)'.format(ram_style)] - with open(os.environ['FINN_ROOT']+"/finn-rtllib/swg/swg_template_wrapper.v", "r") as f: + # apply code generation to templates + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + with open(template_path, "r") as f: + template = f.read() + with open( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_wrapper.v", "r" + ) as f: template_wrapper = f.read() - for key in code_gen_dict: # transform list into long string separated by '\n' code_gen_line = "\n".join(code_gen_dict[key]) template = template.replace(key, code_gen_line) template_wrapper = template_wrapper.replace(key, code_gen_line) + with open( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" + ), + "w", + ) as f: + f.write(template) + with open( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v" + ), + "w", + ) as f: + f.write(template_wrapper) - f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv"), "w") - f.write(template) - f.close() - f = open(os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v"), "w") - f.write(template_wrapper) - f.close() - #f_debug.close() - - #set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain + # set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain self.set_nodeattr("ipgen_path", code_gen_dir) self.set_nodeattr("ip_path", code_gen_dir) - ##### END GENERAL CODE GEN ##### def prepare_rtlsim(self): """Creates a Verilator emulation library for the RTL code generated @@ -1029,9 +1171,11 @@ def prepare_rtlsim(self): raise ImportError("Installation of PyVerilator is required.") code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - verilog_paths = [code_gen_dir] - verilog_files = [self.get_nodeattr("gen_top_module") + "_wrapper.v", - self.get_nodeattr("gen_top_module") + "_impl.sv"] + verilog_paths = [code_gen_dir] + verilog_files = [ + self.get_nodeattr("gen_top_module") + "_wrapper.v", + self.get_nodeattr("gen_top_module") + "_impl.sv", + ] # build the Verilator emu library sim = PyVerilator.build( @@ -1045,31 +1189,69 @@ def prepare_rtlsim(self): self.set_nodeattr("rtlsim_so", sim.lib._name) return sim - def code_generation_ipi(self): """Constructs and returns the TCL for node instantiation in Vivado IPI.""" vlnv = self.get_nodeattr("ip_vlnv") code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - cmd = ["add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v")), - "add_files -norecurse %s" % (os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv")), - "create_bd_cell -type module -reference %s %s" % (self.get_nodeattr("gen_top_module"), self.onnx_node.name)] + cmd = [ + "add_files -norecurse %s" + % ( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v" + ) + ), + "add_files -norecurse %s" + % ( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" + ) + ), + "create_bd_cell -type module -reference %s %s" + % (self.get_nodeattr("gen_top_module"), self.onnx_node.name), + ] return cmd def code_generation_ipgen(self, model, fpgapart, clk): - """Normally: Generates c++ code and tcl script for ip generation. - Here: Generates (System-)Verilog code for ip generation.""" + """Normally: Generates C++ code and tcl script for IP generation. + Here: Generates (System-)Verilog code for IP generation.""" self.generate_hdl() def ipgen_singlenode_code(self): - """Normally: Builds the bash script for ip generation using the CallHLS from - finn.util.hls.""" + """Normally: Builds the bash script for IP generation.""" pass def code_generation_cppsim(self, model): - """Normally: Generates c++ code for simulation (cppsim).""" + """Normally: Generates C++ code for simulation (cppsim).""" pass def compile_singlenode_code(self): pass + + def global_includes(self): + pass + + def defines(self, var): + pass + + def read_npy_data(self): + pass + + def strm_decl(self): + pass + + def docompute(self): + pass + + def dataoutstrm(self): + pass + + def save_as_npy(self): + pass + + def blackboxfunction(self): + pass + + def pragmas(self): + pass diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index c94aa1eab6..d3ea9d117c 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -30,22 +30,21 @@ import numpy as np from onnx import TensorProto, helper - -import finn.core.onnx_exec as oxe -from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.util.basic import gen_finn_dt_tensor + +import finn.core.onnx_exec as oxe +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def make_single_im2col_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt -): + +def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim stride_h, stride_w = stride @@ -134,10 +133,10 @@ def make_single_slidingwindow_modelwrapper( model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", odt) - #DEBUG - swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] - swg_inst = getCustomOp(swg_node) - swg_inst.set_nodeattr("rtlsim_trace", "/workspace/finn/finn-rtllib/swg/swg_test_trace.vcd") + # DEBUG + # swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] + # swg_inst = getCustomOp(swg_node) + # swg_inst.set_nodeattr("rtlsim_trace", "/home/felixj/WD/finn/finn-rtllib/swg/swg_test_trace.vcd") return model @@ -159,39 +158,46 @@ def prepare_inputs(input_tensor): # ], # ) # kernel size -@pytest.mark.parametrize("k", [[1,1],[2,2],[3,3],[4,5],[1,3]]) +@pytest.mark.parametrize("k", [[1, 1], [2, 2], [3, 3], [1, 2], [1, 3]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[8,8],[13,13],[1,12]]) +@pytest.mark.parametrize( + "ifm_dim", [[8, 8], [13, 13], [1, 11], [1, 12], [1, 13], [1, 14]] +) # input channels @pytest.mark.parametrize("ifm_ch", [6]) # Stride -@pytest.mark.parametrize("stride", [[1,1],[2,2],[3,4]]) +@pytest.mark.parametrize("stride", [[1, 1], [2, 2], [1, 2]]) # Dilation -@pytest.mark.parametrize("dilation", [[1,1],[2,2],[4,3]]) +@pytest.mark.parametrize("dilation", [[1, 1], [2, 2], [1, 3]]) # depthwise -@pytest.mark.parametrize("dw", [0,1]) +@pytest.mark.parametrize("dw", [0, 1]) # input channel parallelism ("SIMD") -@pytest.mark.parametrize("simd", [1,2,3,6]) +@pytest.mark.parametrize("simd", [1, 2, 3, 6]) # parallel_window enable (MMV_out = M*K) -@pytest.mark.parametrize("parallel_window", [0,1]) +@pytest.mark.parametrize("parallel_window", [0, 1]) # in/out MMV ("M") @pytest.mark.parametrize("m", [1]) # Flip dimensions -@pytest.mark.parametrize("flip", [False,True]) +@pytest.mark.parametrize("flip", [False]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_slidingwindow_rtl( idt, k, ifm_dim, ifm_ch, stride, dilation, dw, simd, m, parallel_window, flip ): - #ifm_dim = conv_config[0] - #k = conv_config[1] - #stride = conv_config[2] - #dilation= conv_config[3] + # ifm_dim = conv_config[0] + # k = conv_config[1] + # stride = conv_config[2] + # dilation= conv_config[3] if flip: - if (ifm_dim[0]==ifm_dim[1] and k[0]==k[1] and stride[0]==stride[1] and dilation[0] == dilation[1]): + if ( + ifm_dim[0] == ifm_dim[1] + and k[0] == k[1] + and stride[0] == stride[1] + and dilation[0] == dilation[1] + ): pytest.skip("Dimension flip would have no effect") k = k[::-1] ifm_dim = ifm_dim[::-1] @@ -203,21 +209,31 @@ def test_fpgadataflow_slidingwindow_rtl( stride_h, stride_w = stride dilation_h, dilation_w = dilation - kernel_width = (k_w-1)*dilation_w+1 # incl. dilation - kernel_height = (k_h-1)*dilation_h+1 # incl. dilation + kernel_width = (k_w - 1) * dilation_w + 1 # incl. dilation + kernel_height = (k_h - 1) * dilation_h + 1 # incl. dilation if simd > ifm_ch: pytest.skip("SIMD cannot be larger than number of input channels") if ifm_ch % simd != 0: pytest.skip("SIMD must divide number of input channels") if kernel_height > ifm_dim_h or stride_h > ifm_dim_h: - pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") + pytest.skip( + "Illegal convolution configuration: kernel or stride > FM dimension" + ) if kernel_width > ifm_dim_w or stride_w > ifm_dim_w: - pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") - if (k_h==1 and (stride_h!=1 or dilation_h!=1)) or (k_w==1 and (stride_w!=1 or dilation_w!=1)): - pytest.skip("Illegal convolution configuration: stride or dilation defined for unitary kernel dim") - if k_h==1 and k_w==1 and simd != ifm_ch: + pytest.skip( + "Illegal convolution configuration: kernel or stride > FM dimension" + ) + if (k_h == 1 and (stride_h != 1 or dilation_h != 1)) or ( + k_w == 1 and (stride_w != 1 or dilation_w != 1) + ): + pytest.skip( + "Illegal convolution configuration: stride or dilation defined for unitary kernel dim" + ) + if k_h == 1 and k_w == 1 and simd != ifm_ch: pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") + if parallel_window and simd != ifm_ch: + pytest.skip("Parallel window requires SIMD=C") ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) @@ -258,7 +274,7 @@ def test_fpgadataflow_slidingwindow_rtl( ) y_expected = oxe.execute_onnx(golden, input_dict)["outp"] - #DEBUG + # DEBUG print("-------expected:") print(y_expected) print("--------produced:") @@ -267,7 +283,7 @@ def test_fpgadataflow_slidingwindow_rtl( node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") - print("RTLSIM cycles: %d"%cycles_rtlsim) + print("RTLSIM cycles: %d" % cycles_rtlsim) if dw == 0: assert (y_produced == y_expected).all() @@ -279,6 +295,7 @@ def test_fpgadataflow_slidingwindow_rtl( y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) assert (y_produced == y_expected).all() + # exp_cycles_dict = model.analysis(exp_cycles_per_layer) # exp_cycles = exp_cycles_dict[node.name] # assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) From 8268ec254985382f40184e0e986442c2b5ab1f0a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 22:53:45 +0200 Subject: [PATCH 119/628] [Deps] update pyverilator --- docker/Dockerfile.finn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 9c18c03d7b..b3c669ec10 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -65,7 +65,7 @@ RUN locale-gen "en_US.UTF-8" RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev RUN git clone https://github.com/verilator/verilator RUN cd verilator && \ - git checkout v4.012 && \ + git checkout v4.224 && \ autoconf && \ ./configure && \ make -j4 && \ From c50b34827ec73dcd2bab22a584adb032fe9677d0 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 22:54:02 +0200 Subject: [PATCH 120/628] [Deps] update pyverilator and verilator --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 74d910478e..bf0e3b33de 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -30,7 +30,7 @@ QONNX_COMMIT="398a0ecfcb32407c0a3df39246cf6d2bca02886c" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" -PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" +PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" From 0b158c2e304d227bc2f849ea71e957146e39845b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 22:54:47 +0200 Subject: [PATCH 121/628] [rtlsim] experimental support for impl_style=vivado FIFO and DWC sim --- src/finn/util/pyverilator.py | 50 +++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index f6a51da8e4..5396281397 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,7 +26,10 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pkg_resources as pk + import os +import shutil from pyverilator import PyVerilator from finn.util.basic import get_rtlsim_trace_depth, make_build_dir @@ -74,14 +77,34 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - all_verilog_files = list( - set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + src_exts = [".v", ".sv"] + + all_verilog_src_files = list( + set( + filter( + lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs + ) + ) + ) + + verilog_header_dir = make_build_dir("pyverilator_vh_") + # use custom version of axis infrastructure vh + custom_vh = pk.resource_filename( + "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" ) + shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") + for fn in all_verilog_srcs: + if fn.endswith(".vh"): + if "axis_infrastructure_v1_1_0.vh" in fn: + # skip, we use a custom version for this file without recursive gcd + continue + else: + shutil.copy(fn, verilog_header_dir) # remove all but one instances of regslice_core.v filtered_verilog_files = [] remove_entry = False - for vfile in all_verilog_files: + for vfile in all_verilog_src_files: if "regslice_core" in vfile: if not remove_entry: filtered_verilog_files.append(vfile) @@ -94,7 +117,12 @@ def file_to_basename(x): for vfile in filtered_verilog_files: with open(vfile) as rf: wf.write("//Added from " + vfile + "\n\n") - wf.write(rf.read()) + lines = rf.read() + for line in lines.split("\n"): + # break down too-long lines, Verilator complains otherwise + if len(line) > 20000: + line = line.replace("&", "\n&") + wf.write("\n" + line) verilator_args = [] # disable common verilator warnings that should be harmless but commonly occur @@ -108,10 +136,20 @@ def file_to_basename(x): # force inlining of all submodules to ensure we can read internal signals properly if read_internal_signals: verilator_args += ["--inline-mult", "0"] + # add defines to make certain XPM src files work with Verilator + verilator_args.append("-DDISABLE_XPM_ASSERTIONS") + verilator_args.append("-DOBSOLETE") + verilator_args.append("-DONESPIN") + verilator_args.append("--bbox-unsup") + vivado_path = os.environ["VIVADO_PATH"] + # additional SystemVerilog modules to make XPMs work with Verilator + xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" + xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" + xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" sim = PyVerilator.build( - top_module_file_name, - verilog_path=[vivado_stitch_proj_dir], + [top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], + verilog_path=[vivado_stitch_proj_dir, verilog_header_dir], build_dir=build_dir, trace_depth=get_rtlsim_trace_depth(), top_module_name=top_module_name, From e1d09b957b4008a58986108c09f5b300bff9bebe Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 24 Aug 2022 23:12:14 +0200 Subject: [PATCH 122/628] [Build] add option (enabled) to keep impl_style=vivado for rtlsim --- src/finn/builder/build_dataflow_config.py | 4 ++ src/finn/builder/build_dataflow_steps.py | 70 ++++++++++++----------- 2 files changed, 41 insertions(+), 33 deletions(-) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 92263bd82c..49538939d7 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -316,6 +316,10 @@ class DataflowBuildConfig: #: Override the number of inputs for rtlsim performance measurement. rtlsim_batch_size: Optional[int] = 1 + #: If set to True, FIFOs and DWCs with impl_style=vivado will be kept during + #: rtlsim, otherwise they will be replaced by HLS implementations. + rtlsim_use_vivado_comps: Optional[bool] = True + def _resolve_hls_clk_period(self): if self.hls_clk_period_ns is None: # use same clk for synth and hls if not explicitly specified diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 59f77650da..c983432e1e 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -162,40 +162,44 @@ def verify_step( def prepare_for_stitched_ip_rtlsim(verify_model, cfg): - need_restitch = False - # rtlsim only supports certain impl_style for some nodes - # StreamingFIFO must have impl_style=rtl - for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): - inst = getCustomOp(fifo_layer) - if inst.get_nodeattr("impl_style") != "rtl": - inst.set_nodeattr("impl_style", "rtl") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # StreamingDataWidthConverter must have impl_style=hls - for dwc_layer in verify_model.get_nodes_by_op_type( - "StreamingDataWidthConverter_Batch" - ): - inst = getCustomOp(dwc_layer) - if inst.get_nodeattr("impl_style") != "hls": - inst.set_nodeattr("impl_style", "hls") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # if we've made alterations to the model, need to do some re-prep - if need_restitch: - print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") - verify_model = verify_model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) - verify_model = verify_model.transform(HLSSynthIP()) - verify_model = verify_model.transform( - CreateStitchedIP( - cfg._resolve_fpga_part(), - cfg.synth_clk_period_ns, - vitis=False, + if not cfg.rtlsim_use_vivado_comps: + need_restitch = False + # switch impl_style=vivado components to rtl/hls + # StreamingFIFO must have impl_style=rtl + for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): + inst = getCustomOp(fifo_layer) + if inst.get_nodeattr("impl_style") != "rtl": + inst.set_nodeattr("impl_style", "rtl") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # StreamingDataWidthConverter must have impl_style=hls + for dwc_layer in verify_model.get_nodes_by_op_type( + "StreamingDataWidthConverter_Batch" + ): + inst = getCustomOp(dwc_layer) + if inst.get_nodeattr("impl_style") != "hls": + inst.set_nodeattr("impl_style", "hls") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # if we've made alterations to the model, need to do some re-prep + if need_restitch: + print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") + verify_model = verify_model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) ) - ) + verify_model = verify_model.transform(HLSSynthIP()) + verify_model = verify_model.transform( + CreateStitchedIP( + cfg._resolve_fpga_part(), + cfg.synth_clk_period_ns, + vitis=False, + ) + ) + else: + print("rtlsim_use_vivado_comps is enabled, may yield incorrect results") + # set top-level prop for stitched-ip rtlsim and launch verify_model.set_metadata_prop("exec_mode", "rtlsim") # TODO make configurable From e7d58388331f0932c2520a96f4594ba29e98e65b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 25 Aug 2022 14:42:32 +0200 Subject: [PATCH 123/628] [rtlsim] add custom variant of axis_infrastructure.vh --- .../verilog/custom_axis_infrastructure.vh | 346 ++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 src/finn/qnn-data/verilog/custom_axis_infrastructure.vh diff --git a/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh new file mode 100644 index 0000000000..1c8b6403e8 --- /dev/null +++ b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh @@ -0,0 +1,346 @@ +// (c) Copyright 2011-2013 Xilinx, Inc. All rights reserved. +// +// This file contains confidential and proprietary information +// of Xilinx, Inc. and is protected under U.S. and +// international copyright and other intellectual property +// laws. +// +// DISCLAIMER +// This disclaimer is not a license and does not grant any +// rights to the materials distributed herewith. Except as +// otherwise provided in a valid license issued to you by +// Xilinx, and to the maximum extent permitted by applicable +// law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND +// WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES +// AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING +// BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- +// INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and +// (2) Xilinx shall not be liable (whether in contract or tort, +// including negligence, or under any other theory of +// liability) for any loss or damage of any kind or nature +// related to, arising under or in connection with these +// materials, including for any direct, or any indirect, +// special, incidental, or consequential loss or damage +// (including loss of data, profits, goodwill, or any type of +// loss or damage suffered as a result of any action brought +// by a third party) even if such damage or loss was +// reasonably foreseeable or Xilinx had been advised of the +// possibility of the same. +// +// CRITICAL APPLICATIONS +// Xilinx products are not designed or intended to be fail- +// safe, or for use in any application requiring fail-safe +// performance, such as life-support or safety devices or +// systems, Class III medical devices, nuclear facilities, +// applications related to the deployment of airbags, or any +// other applications that could lead to death, personal +// injury, or severe property or environmental damage +// (individually and collectively, "Critical +// Applications"). Customer assumes the sole risk and +// liability of any use of Xilinx products in Critical +// Applications, subject only to applicable laws and +// regulations governing limitations on product liability. +// +// THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS +// PART OF THIS FILE AT ALL TIMES. +//----------------------------------------------------------------------------- +// +// Generic Functions used by AXIS-Interconnect and Infrastrucutre Modules +// +// Verilog-standard: Verilog 2001 +//-------------------------------------------------------------------------- +// Global Parameters: +// +// Functions: +// f_clogb2 +// f_gcd +// f_lcm +// f_get_tdata_indx +// f_get_tstrb_indx +// f_get_tkeep_indx +// f_get_tlast_indx +// f_get_tid_indx +// f_get_tdest_indx +// f_get_tuser_indx +// f_payload_width +// Tasks: +// t_display_tdata_error +//-------------------------------------------------------------------------- +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Global Parameters +/////////////////////////////////////////////////////////////////////////////// +// Define Signal Set indices +localparam G_INDX_SS_TREADY = 0; +localparam G_INDX_SS_TDATA = 1; +localparam G_INDX_SS_TSTRB = 2; +localparam G_INDX_SS_TKEEP = 3; +localparam G_INDX_SS_TLAST = 4; +localparam G_INDX_SS_TID = 5; +localparam G_INDX_SS_TDEST = 6; +localparam G_INDX_SS_TUSER = 7; +localparam G_MASK_SS_TREADY = 32'h1 << G_INDX_SS_TREADY; +localparam G_MASK_SS_TDATA = 32'h1 << G_INDX_SS_TDATA; +localparam G_MASK_SS_TSTRB = 32'h1 << G_INDX_SS_TSTRB; +localparam G_MASK_SS_TKEEP = 32'h1 << G_INDX_SS_TKEEP; +localparam G_MASK_SS_TLAST = 32'h1 << G_INDX_SS_TLAST; +localparam G_MASK_SS_TID = 32'h1 << G_INDX_SS_TID ; +localparam G_MASK_SS_TDEST = 32'h1 << G_INDX_SS_TDEST; +localparam G_MASK_SS_TUSER = 32'h1 << G_INDX_SS_TUSER; + +// Task DRC error levels +localparam G_TASK_SEVERITY_ERR = 2; +localparam G_TASK_SEVERITY_WARNING = 1; +localparam G_TASK_SEVERITY_INFO = 0; + +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Functions +/////////////////////////////////////////////////////////////////////////////// +// ceiling logb2 + function integer f_clogb2 (input integer size); + integer s; + begin + s = size; + s = s - 1; + for (f_clogb2=1; s>1; f_clogb2=f_clogb2+1) + s = s >> 1; + end + endfunction // clogb2 + + // Calculates the Greatest Common Divisor between two integers using the + // euclidean algorithm. + function automatic integer f_gcd ( + input integer a, + input integer b + ); + begin : main + integer A, B, done, swap; + A = a; + B = b; + done = 0; + while(!done) + begin + if (A < B ) begin + swap = A; + A = B; + B = swap; + end else if ( B != 0 ) begin + A = A - B; + end else begin + done = 1; + end + end + + f_gcd = A; + end + endfunction + + + // Calculates the Lowest Common Denominator between two integers + function integer f_lcm ( + input integer a, + input integer b + ); + begin : main + f_lcm = ( a / f_gcd(a, b)) * b; + end + endfunction + + // Returns back the index to the TDATA portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdata_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + f_get_tdata_indx = 0; + end + endfunction + + // Returns back the index to the tstrb portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tstrb_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdata_indx(DAW, IDW, DEW, USW, SST); + // If TDATA exists, then add its width to its base to get the tstrb index + f_get_tstrb_indx = SST[G_INDX_SS_TDATA] ? cur_indx + DAW : cur_indx; + end + endfunction + + // Returns back the index to the tkeep portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tkeep_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tstrb_indx(DAW, IDW, DEW, USW, SST); + f_get_tkeep_indx = SST[G_INDX_SS_TSTRB] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tlast portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tlast_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tkeep_indx(DAW, IDW, DEW, USW, SST); + f_get_tlast_indx = SST[G_INDX_SS_TKEEP] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tid portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tid_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tlast_indx(DAW, IDW, DEW, USW, SST); + f_get_tid_indx = SST[G_INDX_SS_TLAST] ? cur_indx + 1 : cur_indx; + end + endfunction + + // Returns back the index to the tdest portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdest_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tid_indx(DAW, IDW, DEW, USW, SST); + f_get_tdest_indx = SST[G_INDX_SS_TID] ? cur_indx + IDW : cur_indx; + end + endfunction + + // Returns back the index to the tuser portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tuser_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdest_indx(DAW, IDW, DEW, USW, SST); + f_get_tuser_indx = SST[G_INDX_SS_TDEST] ? cur_indx + DEW : cur_indx; + end + endfunction + + // Payload is the sum of all the AXIS signals present except for + // TREADY/TVALID + function integer f_payload_width ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tuser_indx(DAW, IDW, DEW, USW, SST); + f_payload_width = SST[G_INDX_SS_TUSER] ? cur_indx + USW : cur_indx; + // Ensure that the return value is never less than 1 + f_payload_width = (f_payload_width < 1) ? 1 : f_payload_width; + end + endfunction + + task t_check_tdata_width( + input integer data_width, + input [8*80-1:0] var_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tdata_width + if (data_width%8 != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s (%2d) must be a multiple of 8.", var_name, data_width); + $display(" AXI4-Stream data width is only defined for byte multiples. See the "); + $display(" AMBA4 AXI4-Stream Protocol Specification v1.0 Section 2.1 for more"); + $display(" information."); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask + + task t_check_tuser_width( + input integer tuser_width, + input [8*80-1:0] tuser_name, + input integer tdata_width, + input [8*80-1:0] tdata_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tuser_width + integer tdata_bytes; + tdata_bytes = tdata_width/8; + if ((tuser_width%tdata_bytes) != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s == %2d is not the recommended value of 'an integer ", tuser_name, tuser_width); + $display(" multiple of the width of the interface (%s == %2d) in bytes.' AXI4-Stream", tdata_name, tdata_width); + $display(" TUSER width in this module is only defined when the TUSER is the"); + $display(" recommended value. See the AMBA4 AXI4-Stream Protocol Specification v1.0"); + $display(" Section 2.1, 2.3.3 and 2.8 for more information. "); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask From 7ab679462cca64990924336f7f54c48cfc64ad18 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 26 Aug 2022 00:16:46 +0200 Subject: [PATCH 124/628] [InsertFIFO] change default value of max_qsrl_depth to None --- src/finn/transformation/fpgadataflow/insert_fifo.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index c5b7005145..e77774df72 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -84,11 +84,14 @@ class InsertFIFO(Transformation): node the FIFO node is inserted after: 'folded_shape' and 'dtype'""" def __init__( - self, create_shallow_fifos=False, max_qsrl_depth=256, vivado_ram_style="auto" + self, create_shallow_fifos=False, max_qsrl_depth=None, vivado_ram_style="auto" ): super().__init__() self.create_shallow_fifos = create_shallow_fifos - self.max_qsrl_depth = max_qsrl_depth + if max_qsrl_depth is None: + self.max_qsrl_depth = 1000000 + else: + self.max_qsrl_depth = max_qsrl_depth self.vivado_ram_style = vivado_ram_style def apply(self, model): From 4ca27c4a1132ad51e38613ae5f11c8a65ed4660f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 26 Aug 2022 00:17:19 +0200 Subject: [PATCH 125/628] [Build] adjust chrc FIFO sizing for new InsertFIFO behavior --- src/finn/builder/build_dataflow_steps.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index a31f37a0ee..175863e84d 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -479,7 +479,9 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform(DeriveCharacteristic(period)) model = model.transform(DeriveFIFOSizes()) model = model.transform( - InsertFIFO(vivado_ram_style=cfg.large_fifo_mem_style) + InsertFIFO( + vivado_ram_style=cfg.large_fifo_mem_style, max_qsrl_depth=256 + ) ) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) From fc9e880a646ed19e35e2cf32f4a8d085453a4561 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 26 Aug 2022 00:17:44 +0200 Subject: [PATCH 126/628] Revert "Merge branch 'feature/rtlsim-vivado-ip' into feature/new-fifo-sizing-residual" This reverts commit 0297b55f13ea5180d1dbcde5a203432e0727afe9, reversing changes made to b3110eecc78567910907e7751dbfd51062e3fc0c. --- docker/Dockerfile.finn | 2 +- fetch-repos.sh | 2 +- src/finn/builder/build_dataflow_config.py | 4 - src/finn/builder/build_dataflow_steps.py | 70 ++-- .../verilog/custom_axis_infrastructure.vh | 346 ------------------ src/finn/util/pyverilator.py | 50 +-- 6 files changed, 41 insertions(+), 433 deletions(-) delete mode 100644 src/finn/qnn-data/verilog/custom_axis_infrastructure.vh diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index b3c669ec10..9c18c03d7b 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -65,7 +65,7 @@ RUN locale-gen "en_US.UTF-8" RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev RUN git clone https://github.com/verilator/verilator RUN cd verilator && \ - git checkout v4.224 && \ + git checkout v4.012 && \ autoconf && \ ./configure && \ make -j4 && \ diff --git a/fetch-repos.sh b/fetch-repos.sh index ce91b9d2f6..97427ec9da 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -30,7 +30,7 @@ QONNX_COMMIT="34ecaa73398c85201b325bcff1beeca1e45f4541" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" -PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" +PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 0f2d4de209..13946c9d1e 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -329,10 +329,6 @@ class DataflowBuildConfig: #: Override the number of inputs for rtlsim performance measurement. rtlsim_batch_size: Optional[int] = 1 - #: If set to True, FIFOs and DWCs with impl_style=vivado will be kept during - #: rtlsim, otherwise they will be replaced by HLS implementations. - rtlsim_use_vivado_comps: Optional[bool] = True - def _resolve_hls_clk_period(self): if self.hls_clk_period_ns is None: # use same clk for synth and hls if not explicitly specified diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 175863e84d..ad7e1da054 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -167,44 +167,40 @@ def verify_step( def prepare_for_stitched_ip_rtlsim(verify_model, cfg): - if not cfg.rtlsim_use_vivado_comps: - need_restitch = False - # switch impl_style=vivado components to rtl/hls - # StreamingFIFO must have impl_style=rtl - for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): - inst = getCustomOp(fifo_layer) - if inst.get_nodeattr("impl_style") != "rtl": - inst.set_nodeattr("impl_style", "rtl") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # StreamingDataWidthConverter must have impl_style=hls - for dwc_layer in verify_model.get_nodes_by_op_type( - "StreamingDataWidthConverter_Batch" - ): - inst = getCustomOp(dwc_layer) - if inst.get_nodeattr("impl_style") != "hls": - inst.set_nodeattr("impl_style", "hls") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # if we've made alterations to the model, need to do some re-prep - if need_restitch: - print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") - verify_model = verify_model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) - verify_model = verify_model.transform(HLSSynthIP()) - verify_model = verify_model.transform( - CreateStitchedIP( - cfg._resolve_fpga_part(), - cfg.synth_clk_period_ns, - vitis=False, - ) + need_restitch = False + # rtlsim only supports certain impl_style for some nodes + # StreamingFIFO must have impl_style=rtl + for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): + inst = getCustomOp(fifo_layer) + if inst.get_nodeattr("impl_style") != "rtl": + inst.set_nodeattr("impl_style", "rtl") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # StreamingDataWidthConverter must have impl_style=hls + for dwc_layer in verify_model.get_nodes_by_op_type( + "StreamingDataWidthConverter_Batch" + ): + inst = getCustomOp(dwc_layer) + if inst.get_nodeattr("impl_style") != "hls": + inst.set_nodeattr("impl_style", "hls") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # if we've made alterations to the model, need to do some re-prep + if need_restitch: + print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") + verify_model = verify_model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) + ) + verify_model = verify_model.transform(HLSSynthIP()) + verify_model = verify_model.transform( + CreateStitchedIP( + cfg._resolve_fpga_part(), + cfg.synth_clk_period_ns, + vitis=False, ) - else: - print("rtlsim_use_vivado_comps is enabled, may yield incorrect results") - + ) # set top-level prop for stitched-ip rtlsim and launch verify_model.set_metadata_prop("exec_mode", "rtlsim") # TODO make configurable diff --git a/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh deleted file mode 100644 index 1c8b6403e8..0000000000 --- a/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh +++ /dev/null @@ -1,346 +0,0 @@ -// (c) Copyright 2011-2013 Xilinx, Inc. All rights reserved. -// -// This file contains confidential and proprietary information -// of Xilinx, Inc. and is protected under U.S. and -// international copyright and other intellectual property -// laws. -// -// DISCLAIMER -// This disclaimer is not a license and does not grant any -// rights to the materials distributed herewith. Except as -// otherwise provided in a valid license issued to you by -// Xilinx, and to the maximum extent permitted by applicable -// law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -// WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -// AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -// BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -// INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -// (2) Xilinx shall not be liable (whether in contract or tort, -// including negligence, or under any other theory of -// liability) for any loss or damage of any kind or nature -// related to, arising under or in connection with these -// materials, including for any direct, or any indirect, -// special, incidental, or consequential loss or damage -// (including loss of data, profits, goodwill, or any type of -// loss or damage suffered as a result of any action brought -// by a third party) even if such damage or loss was -// reasonably foreseeable or Xilinx had been advised of the -// possibility of the same. -// -// CRITICAL APPLICATIONS -// Xilinx products are not designed or intended to be fail- -// safe, or for use in any application requiring fail-safe -// performance, such as life-support or safety devices or -// systems, Class III medical devices, nuclear facilities, -// applications related to the deployment of airbags, or any -// other applications that could lead to death, personal -// injury, or severe property or environmental damage -// (individually and collectively, "Critical -// Applications"). Customer assumes the sole risk and -// liability of any use of Xilinx products in Critical -// Applications, subject only to applicable laws and -// regulations governing limitations on product liability. -// -// THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -// PART OF THIS FILE AT ALL TIMES. -//----------------------------------------------------------------------------- -// -// Generic Functions used by AXIS-Interconnect and Infrastrucutre Modules -// -// Verilog-standard: Verilog 2001 -//-------------------------------------------------------------------------- -// Global Parameters: -// -// Functions: -// f_clogb2 -// f_gcd -// f_lcm -// f_get_tdata_indx -// f_get_tstrb_indx -// f_get_tkeep_indx -// f_get_tlast_indx -// f_get_tid_indx -// f_get_tdest_indx -// f_get_tuser_indx -// f_payload_width -// Tasks: -// t_display_tdata_error -//-------------------------------------------------------------------------- -/////////////////////////////////////////////////////////////////////////////// -// BEGIN Global Parameters -/////////////////////////////////////////////////////////////////////////////// -// Define Signal Set indices -localparam G_INDX_SS_TREADY = 0; -localparam G_INDX_SS_TDATA = 1; -localparam G_INDX_SS_TSTRB = 2; -localparam G_INDX_SS_TKEEP = 3; -localparam G_INDX_SS_TLAST = 4; -localparam G_INDX_SS_TID = 5; -localparam G_INDX_SS_TDEST = 6; -localparam G_INDX_SS_TUSER = 7; -localparam G_MASK_SS_TREADY = 32'h1 << G_INDX_SS_TREADY; -localparam G_MASK_SS_TDATA = 32'h1 << G_INDX_SS_TDATA; -localparam G_MASK_SS_TSTRB = 32'h1 << G_INDX_SS_TSTRB; -localparam G_MASK_SS_TKEEP = 32'h1 << G_INDX_SS_TKEEP; -localparam G_MASK_SS_TLAST = 32'h1 << G_INDX_SS_TLAST; -localparam G_MASK_SS_TID = 32'h1 << G_INDX_SS_TID ; -localparam G_MASK_SS_TDEST = 32'h1 << G_INDX_SS_TDEST; -localparam G_MASK_SS_TUSER = 32'h1 << G_INDX_SS_TUSER; - -// Task DRC error levels -localparam G_TASK_SEVERITY_ERR = 2; -localparam G_TASK_SEVERITY_WARNING = 1; -localparam G_TASK_SEVERITY_INFO = 0; - -/////////////////////////////////////////////////////////////////////////////// -// BEGIN Functions -/////////////////////////////////////////////////////////////////////////////// -// ceiling logb2 - function integer f_clogb2 (input integer size); - integer s; - begin - s = size; - s = s - 1; - for (f_clogb2=1; s>1; f_clogb2=f_clogb2+1) - s = s >> 1; - end - endfunction // clogb2 - - // Calculates the Greatest Common Divisor between two integers using the - // euclidean algorithm. - function automatic integer f_gcd ( - input integer a, - input integer b - ); - begin : main - integer A, B, done, swap; - A = a; - B = b; - done = 0; - while(!done) - begin - if (A < B ) begin - swap = A; - A = B; - B = swap; - end else if ( B != 0 ) begin - A = A - B; - end else begin - done = 1; - end - end - - f_gcd = A; - end - endfunction - - - // Calculates the Lowest Common Denominator between two integers - function integer f_lcm ( - input integer a, - input integer b - ); - begin : main - f_lcm = ( a / f_gcd(a, b)) * b; - end - endfunction - - // Returns back the index to the TDATA portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tdata_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - f_get_tdata_indx = 0; - end - endfunction - - // Returns back the index to the tstrb portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tstrb_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tdata_indx(DAW, IDW, DEW, USW, SST); - // If TDATA exists, then add its width to its base to get the tstrb index - f_get_tstrb_indx = SST[G_INDX_SS_TDATA] ? cur_indx + DAW : cur_indx; - end - endfunction - - // Returns back the index to the tkeep portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tkeep_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tstrb_indx(DAW, IDW, DEW, USW, SST); - f_get_tkeep_indx = SST[G_INDX_SS_TSTRB] ? cur_indx + DAW/8 : cur_indx; - end - endfunction - - // Returns back the index to the tlast portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tlast_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tkeep_indx(DAW, IDW, DEW, USW, SST); - f_get_tlast_indx = SST[G_INDX_SS_TKEEP] ? cur_indx + DAW/8 : cur_indx; - end - endfunction - - // Returns back the index to the tid portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tid_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tlast_indx(DAW, IDW, DEW, USW, SST); - f_get_tid_indx = SST[G_INDX_SS_TLAST] ? cur_indx + 1 : cur_indx; - end - endfunction - - // Returns back the index to the tdest portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tdest_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tid_indx(DAW, IDW, DEW, USW, SST); - f_get_tdest_indx = SST[G_INDX_SS_TID] ? cur_indx + IDW : cur_indx; - end - endfunction - - // Returns back the index to the tuser portion of TPAYLOAD, returns 0 if the - // signal is not enabled. - function integer f_get_tuser_indx ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tdest_indx(DAW, IDW, DEW, USW, SST); - f_get_tuser_indx = SST[G_INDX_SS_TDEST] ? cur_indx + DEW : cur_indx; - end - endfunction - - // Payload is the sum of all the AXIS signals present except for - // TREADY/TVALID - function integer f_payload_width ( - input integer DAW, // TDATA Width - input integer IDW, // TID Width - input integer DEW, // TDEST Width - input integer USW, // TUSER Width - input [31:0] SST // Signal Set - ); - begin : main - integer cur_indx; - cur_indx = f_get_tuser_indx(DAW, IDW, DEW, USW, SST); - f_payload_width = SST[G_INDX_SS_TUSER] ? cur_indx + USW : cur_indx; - // Ensure that the return value is never less than 1 - f_payload_width = (f_payload_width < 1) ? 1 : f_payload_width; - end - endfunction - - task t_check_tdata_width( - input integer data_width, - input [8*80-1:0] var_name, - input [8*80-1:0] inst_name, - input integer severity_lvl, - output integer ret_val - ); - // Severity levels: - // 0 = INFO - // 1 = WARNING - // 2 = ERROR - begin : t_check_tdata_width - if (data_width%8 != 0) begin - // 000 1 2 3 4 5 6 7 8 - // 012 0 0 0 0 0 0 0 0 - if (severity_lvl >= 2) begin - $display("ERROR: %m::%s", inst_name); - end else if (severity_lvl == 1) begin - $display("WARNING: %m::%s", inst_name); - end else begin - $display("INFO: %m::%s", inst_name); - end - $display(" Parameter %s (%2d) must be a multiple of 8.", var_name, data_width); - $display(" AXI4-Stream data width is only defined for byte multiples. See the "); - $display(" AMBA4 AXI4-Stream Protocol Specification v1.0 Section 2.1 for more"); - $display(" information."); - ret_val = 1; - end else begin - ret_val = 0; - end - end - endtask - - task t_check_tuser_width( - input integer tuser_width, - input [8*80-1:0] tuser_name, - input integer tdata_width, - input [8*80-1:0] tdata_name, - input [8*80-1:0] inst_name, - input integer severity_lvl, - output integer ret_val - ); - // Severity levels: - // 0 = INFO - // 1 = WARNING - // 2 = ERROR - begin : t_check_tuser_width - integer tdata_bytes; - tdata_bytes = tdata_width/8; - if ((tuser_width%tdata_bytes) != 0) begin - // 000 1 2 3 4 5 6 7 8 - // 012 0 0 0 0 0 0 0 0 - if (severity_lvl >= 2) begin - $display("ERROR: %m::%s", inst_name); - end else if (severity_lvl == 1) begin - $display("WARNING: %m::%s", inst_name); - end else begin - $display("INFO: %m::%s", inst_name); - end - $display(" Parameter %s == %2d is not the recommended value of 'an integer ", tuser_name, tuser_width); - $display(" multiple of the width of the interface (%s == %2d) in bytes.' AXI4-Stream", tdata_name, tdata_width); - $display(" TUSER width in this module is only defined when the TUSER is the"); - $display(" recommended value. See the AMBA4 AXI4-Stream Protocol Specification v1.0"); - $display(" Section 2.1, 2.3.3 and 2.8 for more information. "); - ret_val = 1; - end else begin - ret_val = 0; - end - end - endtask diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 5396281397..f6a51da8e4 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,10 +26,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import os -import shutil from pyverilator import PyVerilator from finn.util.basic import get_rtlsim_trace_depth, make_build_dir @@ -77,34 +74,14 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - src_exts = [".v", ".sv"] - - all_verilog_src_files = list( - set( - filter( - lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs - ) - ) - ) - - verilog_header_dir = make_build_dir("pyverilator_vh_") - # use custom version of axis infrastructure vh - custom_vh = pk.resource_filename( - "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" + all_verilog_files = list( + set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) ) - shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") - for fn in all_verilog_srcs: - if fn.endswith(".vh"): - if "axis_infrastructure_v1_1_0.vh" in fn: - # skip, we use a custom version for this file without recursive gcd - continue - else: - shutil.copy(fn, verilog_header_dir) # remove all but one instances of regslice_core.v filtered_verilog_files = [] remove_entry = False - for vfile in all_verilog_src_files: + for vfile in all_verilog_files: if "regslice_core" in vfile: if not remove_entry: filtered_verilog_files.append(vfile) @@ -117,12 +94,7 @@ def file_to_basename(x): for vfile in filtered_verilog_files: with open(vfile) as rf: wf.write("//Added from " + vfile + "\n\n") - lines = rf.read() - for line in lines.split("\n"): - # break down too-long lines, Verilator complains otherwise - if len(line) > 20000: - line = line.replace("&", "\n&") - wf.write("\n" + line) + wf.write(rf.read()) verilator_args = [] # disable common verilator warnings that should be harmless but commonly occur @@ -136,20 +108,10 @@ def file_to_basename(x): # force inlining of all submodules to ensure we can read internal signals properly if read_internal_signals: verilator_args += ["--inline-mult", "0"] - # add defines to make certain XPM src files work with Verilator - verilator_args.append("-DDISABLE_XPM_ASSERTIONS") - verilator_args.append("-DOBSOLETE") - verilator_args.append("-DONESPIN") - verilator_args.append("--bbox-unsup") - vivado_path = os.environ["VIVADO_PATH"] - # additional SystemVerilog modules to make XPMs work with Verilator - xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" - xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" - xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" sim = PyVerilator.build( - [top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], - verilog_path=[vivado_stitch_proj_dir, verilog_header_dir], + top_module_file_name, + verilog_path=[vivado_stitch_proj_dir], build_dir=build_dir, trace_depth=get_rtlsim_trace_depth(), top_module_name=top_module_name, From a258b803e9c47790e702eb84336a2418c00a3ae6 Mon Sep 17 00:00:00 2001 From: aziz bahri Date: Fri, 26 Aug 2022 10:49:41 +0100 Subject: [PATCH 127/628] VVAU: add weightstream width helper Signed-off-by: aziz bahri --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 77fed5e3ab..b0c05d1ad6 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -1225,6 +1225,12 @@ def get_weightstream_width(self): else: return 0 + def get_weightstream_width_padded(self): + """Returns weight stream width padded to a multiple of 8. This is required + by the AXI Stream spec. Used in decoupled mode.""" + weight_width = self.get_weightstream_width() + return roundup_to_integer_multiple(weight_width, 8) + def get_op_and_param_counts(self): k_h, k_w = self.get_nodeattr("Kernel") fm = self.get_nodeattr("Channels") From 5356ace57554e8ca27bdd7fd7bc96c6d9d672edd Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 26 Aug 2022 13:10:07 +0200 Subject: [PATCH 128/628] [Test] add residual FIFO sizing testcase, path needs fix --- .../qnn-data/testcase/residual_testcase.onnx | Bin 0 -> 30334 bytes tests/fpgadataflow/test_fifosizing.py | 25 +++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 src/finn/qnn-data/testcase/residual_testcase.onnx diff --git a/src/finn/qnn-data/testcase/residual_testcase.onnx b/src/finn/qnn-data/testcase/residual_testcase.onnx new file mode 100644 index 0000000000000000000000000000000000000000..e22bd6396dfbc9834d62d7ab61e8f4a12fa558ac GIT binary patch literal 30334 zcmbuI4XmJ5edf>bIbKnQ5oH(=0jVgZI(&+V^2eDfT2Wf5TBR8VW`xmUW;zTCyEZvX zsioGo)KW{`#+({sOk*~rX&Se+dK7A@wU&16uG#Hw-gHf48ndz1T57HC?{_}#T*d)4 zp7Px1|Mi^j_kAzxhP(dwjmPi5@~L?+<)7_(a)x%Ogv%bsB3<- z*KA%n{)QblzIE5$ooAi!=KVMA*tz52jyLbw_x7^x=yNx%JZbk{wA+YXd&mA=XC3p3 zS6_RPnbTHI+7OC%x?IYj*71xqI(Ts~dUZVWD$R zS~>0&x7@UC_brDOa->Z?dFA-g6-#da=aQeea?ERAarL$zWKVzUh`Scw+pZn97`E&7 zo3`wJ`vq^k@Vjo;pB}w(T;HaR$G!D!mt6VkgPWheb>H5%?b~zP!QK1zwnKjTuD!bs z?BJZafu}xZ^4zDbJjvvb?K;3EbSICFqurg8R@QIZy=RBrxn+jf+E={n>PO#KPvB)& ztGja@|8(VXuj!%H+x4ieK5^wZZuah-yJo+Scb_+`JYnaq{ReM;`|ewJZKC_59_NOi zy7J_G?ZNrZ%m37sCmh&u%l2Cj-mz!bS;yY6XWxx)^_jTwi0Ajp@9q2^b5A+R3og0i zEpOg_!<)bBN&a6>vY%i-hwT?`-@k*YZdp0y>f81l+r05+~K~ z#k>?h@Jr|^D<|yTcWADC9-F`8+>ZUp`PMpV<#;xry~&Q|a(ieT^V@grzUk&8yyH$; zdE$;6Z(Ke8W9jAmEoy$C*V~bIlT&{4_FMnM&I|9j>ARlt7mryPr#4RcqQ^Srnb>&T z9j|@mkMDo$=1o`c;9Lb2Y27HYx~;|>^`__#|?Y1 z+Eu)@@4ydq&5nGWJoj(ia>@B`{+S!!_Fb>p@A4Jxzm2Sn>%-^aHG6hneDmgwSMJ<- z`0phf4v!u2H87ui(QMz%#k$!tJ+RxOdCXe7DWoxS{=S_akrF zeeg$i-M)X{frICM;Pmye{=^OIUV8YecRrM_%%`XP(+{Q^;8_o*bo_%UfBu1#{xjGL z%BNB~;iV~^vNffj-(wXs_6o z((BM(22KY@q5pHVzmDz&;8)O|jdn9y=A8Z*{4@HeG3Et*+|0Q7>EK)FKCK3v2lk-7 z7VWdq{1W4?nR8;!`Sq-maVzUh>%c=p`&{^1w6~+Z0PU}%{p{8hM>F3uwy}<`jTf=r zXQDeAe2%ey1KnldUbLs7-Gufov>yXs19Q%=VXhaVJq6v@ssA|IpNBUC_v#+BFGTwr zX#Rq=9>ti?Wb79)?yJ#VfbMDZ@lVwM5!yT9lfemS-;XxXvCqkUJ_hZXXqj`p2Hiz~ zar4)P_CLWJz?VLm(nrv~0^R%3{|~Sp-4CIEDY%w#-Dl7D->Co811aAPpAODr-1nfp z9L;Z{XWZa?6X#oBvMpuC4aeOC9zpjBG~SJ8fGud>h4vD(>~nITS#O+Ws~^a;%~nDYy} zwxZ44jqq-K2JNq*y8zsYb{pEW(EK9WPr=^+r=lItQLGua-K$?k=bgM6?R99MOZ_i{ zPlIpM$7!tfQt$@Gx}0(6z4{9@_kv5n-DodGdj|D$T%N(;8LZytalU!ntNh0gqy{Rz5X2Iqpg zSLdL8H+T>@F6(XYoS#DTV#Yj^`5ue?SLmH{fcG=*dd7VUnlWzrC+-L1Cf*yFalOys zxZk4w)8H}Q8}nUP$N9z$=zf$jpTl0Q!2b*F?|`ep{fzsiD^uFIHKm{1nzHY!N0>9( zHfQdn?>g^Qd&fS6#=GI2ydJH0@<-6zgZ9tRtz*92=jyY-J}36Mp3eA3(FgCz%YXJjzt0z)wl&JDGk9 z{5N#Rp?ekxz&l569%tm;$nQq?9PkFTd(g61`LO3VUdXsV!I&4L-30yx9nV7YS$HZ~ zkM;v-e{^d~zm5KH!13EsoQ>{E_L}FY^3GA4{+0SqqQ4tH6Kq49(QZNO8S&l--Wze+ zw)Xt`p7eX=J^f2 zDT!}Nd$<2HW4T_xDW`%n7}xjn3%9O*Qxe~@^vphPx*Fa2todZ-{*ToESMZ;}Ng$yu zXs=|g_ib(O_RKe>@*S*~Gv_y1(X@kI?)a@QgN~^?mgcbc~xH24mdxV&*-bh0L>%jsiRj?M`~1??k%| z?OABw1$bZ0_eSMenBN7Tg&5z3_U!x4xel%O>Un59-!;!*`yP3(E&-bv)BF4Y@O+;Q z=AAqb?XRHa-JZS;^Pa3P*w*KK8T))1+Ha%dZsd2NI|H1@xc4%y&%*oAd>(KoGk3CH z+2=gJNAvkjzlQF7@D8+Bp?wybvEKQ+S!TV#donnq#Cx(H!=C>Yn%@Fff)AigXiq|$ zw{rIIKl0u^Gu&N!Nzj&!PDZa0&38<368_?%nAA1h`(G1)jnA-U#;DIj??0 z=R3#u)icrFji!P}0PmdaJBMds{?5>z-;X;Ucsh6v(;i+!u}4-`L^%iVb5TD=lBe624|yn&X=OO7kn0sXTQDM(Z*rVZ+rGPp*;<)@5#&2 zyqEF*lJoKT~!nJJDW?mOGh$4KQxm zGIuiZ9JS~7GicV{t5>1@A#}e4`2Oa9XRpV7PS-IeccXHzGIt~Wvkk;7*3`E5^-Z&e=J8pI-p(MjL2-j$AMI zxjn!0KL0pl@=b|T(SHLS-?HTM>%09LwENNeJz~A>_quPv>{XjH_u1cL+xO@Zv=!Wq z&SyW+X0+#1?>mQYN>1n;_tI_cJ(*Z<wQk_v;R(4)jl1q@2furzl!epV7_zCqW%{F&qDfF z^lP5q`FneQto3f8eylfG@9H=7jf}_p%HQiM_p05E`xx^j;6CPjGIKr!?Olxd$KdPO zr!eP>wmtU0B+vhcOaCP~l?s9b;}^AufDiRW;5y(z+zUPoJ_a5JUj&bU;}|TV+k)o9 z;1O^(aUT*7|J|T~-vpckW0UrYP<39a8_y$;y&bgQd1@8d&0>_@(f51+R zw*sE(xWPYY?gxpS+H1h=!2S9F_yh16FwgjCbWZ{qZAP~M_exy`&i8)s5Lmz?z_amR z#?AnI6xv!n*R#nv*TZ#=<^H-y-Va{N?LJ-t^RF=9v0p*$&EQ?YJLbLqH29xj?XytF zoF8WF4dmAD^ZW4s3wRKG0el%eYOfgg40Im_p0Q&d1n&W!kGeVUx!yCGulL$#KcVya zT~D3k?g#e(&u41gt0z$ZG{6U;{lf2k-h$3EI)?lfa2>b{IOqF;Yjy0efq5srSJ$Jd zW6bl&x!2wc=lmh?QSe#dIjQyeeKO!hw>oAbKD8P9Uf`TR4n7aQ1il5Fw`c5GUBj5} zlWV;QTn`SPuV?cW;C*sT-x;nsqU-0p)z9_bhwT}C3d}Q_@40rL8S@^nfYYei2KE5& zMFGDL9s=&wSHT>cU#=dtSMKo}z+CT#u|EbL244h^0N=~=*-wmDz%j&~@6Et>hI?Fr zb9Fp5^L_Wj*!*JF#(fCw3FHsotHamoee|8~T78x>dv!h93f$}OXWozXJ^F6z_I~+( zUKrcG_BnR1uL1{w>;544L-0A^etiSXXE~2KuU+%n^Zf|5p6`DL{{XyOzOTGnnQ<%n zyBYHfU=z6jYtMMTuNLB81J2iPl4msU#>vDl2Rp$#!F|ASJ*&@yzXx;9zN6>3nLZu{ z8^~V@t_ODk_4BOO&f90g=l2XW1$+gZP3^0Idvy=^AowWwEciR%dv89&bAQ!b3o>x5 z`@yGy_xNjI-mCc?^B2*zV|qT@fctbe_-$YxhwroRE$?Syp7(*ZXS<(z&-Mdg&euI& zJKy#lj7Q(e^Qdv|?*P7c)~;8*^Y=OQo8lgMk3Ad5v;HH%wOZ?$co%#>oejctc#L@|b=QNtzPeJ4+EX+^_-5O<~(pMQ0smj>0Zt6+2Oi3P@loQ zz%f4t{u=x}So_?c=D1_dcQdZAxb?*!m8;raUe#+I>9Cy>7s+zj3g+#~O!YxQg$Q?1`WpC|9eezXs- zS*O~&!TZ75XW^^(bASDItM@)T_WdA{KM#0DbMEf7=R5cMo7mn9@0Rf-m z2KEEzUceuK&wvGd4LH8fL=NsBnhMqv-vW+w#t#$!B6tM&9QyqZwAY}U&;QikNsZ5v z`#taEm$04ld>%aC1)M?bRls$s@jiSS{4H>v^Zo4aM$ZMlpJUCOPoU-^a2>b{+y~a) zsW0Qt`{B3WaUTXJ6Tckn1oJN5kNqI{0+_n_JA?n8b@+YWK;5O_dN9YHYo6yc_p8+Y z{1(Pe)H~;!!MnkSKn0HL-I!~3%=5OM`&)S(|5I}CKP9*S&sEzG{SU}_U7u88Lox$o z*hvaot=Q3X3D&o!8slGnGQN3pWiVdv$5gBtT{5p~m0uLK)6Rd4C=K$}k-(j;u>qc$6d)mNaODv)W`RA7x918sle zP~7^KgH!GEcMRhhzrf~=$@*+Hj3&D2?^onY%#)?jNF*k(-v<^wFC>etkCbDt}|`3%e%x6ZayY|!$|)%jW%#IiZ; zxdwF!e_|PShARmD3TRDFU;3?28GVHpP~y-f}&+!wnf-hoV&bN+u%SF%I zM;6Q@VEwX&6Sn&-RPs~sGEBl zt{K-MqgB5eOo)W^;I9cz>Bthz_Udq z7CpaqOzVxO!RAWsbuEXz0Bg)yXPf)xnC8@2SFuF^bJ}SQYZqd5aPzUP7YQt2uB~8O zJKKU?qmQK>pnz-)n9~+B9y8{iRB}KzzQC=?(fwI7*J$4U^h@pYms`KYt-`H&m}5R&}u)?`&&k=$h4@$1$moe*8nqm$^mHF#+Nm`MAwF_4z|wts*PXqGf=B-YvV@mQ%%8cz)s*%^CIWG zW5f%r#y-`w{ekNmzs2hs9r{*3Vhgp)uoL#$b?eWzcAmy+j5)&@Si3qO##r=6e4bmb zW1j4hF>8zkP_JLH%~k9&VgU>1uo$bUwbn1TdgJ;DPPKE>jByI_EXc_fwi!=gslAWD zwnmP@dDyG{<0o?Z>TGI6k)vz%ZLLq{UV|K*>KG3$#1>$S+WWBHnpu~%X{&ne>W@BF ziZNGw`?9`)u?oH|!XJQEYLA=K&akakegyu|8J{`V+H}-Z<8a8-%4)>|3TS&1ofE#A z1j-s-uygHDQ$cINPf@!*U~O4AW`^x6gMw{q=TrMw_HEo)#ZI8=3-soKSO(ff9=0_J zETBfW=lU1?Aa1^`Q3er*xY>+bMl8d&Iqw>ME~OI07PJ4NU;CV}R>x}bFhK2K^$T{u z6>jk~#&LY&=JEuwh}2q(;a74gMlEa~)-1L6nPBq`+CYtUqJTxzUZ3IS6Ki#!=)Um; z)W{Vsut?a`&-gLWYDpLK#H@9n%o}TYxW(ZBOC9yYwpJwksZIFR7_blH=9|xa?Zu}6e57k&J=03L^dHcz*e$%0~&Q@#hGvE@3=C~a(zt)$5wR1}~wVDE(UqF&;XX6>j5s8j^%>q}@Vua<4Z;N`UUb_ZCc!B5o3$_|L z58GPv#unrF39d4b=iWw)FJq=yCI{KYc;I5K~(Ow#~XKqT31ETpqFJlWS`oY#$3yV{L$hZLM`SYs@)E z07t*VlvJH1O=BR+6^LKcGZsB7MI2HtTEj{xh7wQi+2y*mTc#v0{fwnBHFK_}Cdo7#K#9AI+fO9uy z&UghWx;A9|R>znL+uQ;-oh*o%vo7EQw^sYyv|9|{+NReQj9=j(ZnLiKXN4KF`NjnP ztX=R$1+7(nf)YR*o<3G+E>Pp!3A5GH`jb~%r}U)7=99!_+Y%&tf?Y8 z>z4s-p|-6RGvkLoXmR{9?XeHdH%)vYUqQxBpbnU`@0Q0-*k!<4VSbLg;Maj%|eZsbx*DgR^ z#cwToF2M^h-)dlM3J7dlvnxJG0ch6fTcb{H{lm6)Mzw332HTh#>*g6wUE}C|71)|Y zZN*N&ywJAg+H)mvup48yea5%WJ{EZPQ?YC9wc2eS`Xi<$j0@(vz!faD*P65C+Vk2a z_yLSnU|XX16BP%$f(1KZ{ZvOSwlR?hr_r37dUfV(welnSeATpm;0mm1tWF=cUbZd; zpjlHY2COZ#qxL@Lxh%#&7FYDTOicxIZ(3Y`jHxE+!>u25<_dlVj*(#dDL_sW(fhX6 z8uPNKpnwI;xde6+v^l=H*|e=uW8G4_w$)-6Vhz|~4&NH>1v|;0Vh5<$at6jl3>0Ds z7^`rB%_Xoz*9F|>CllAVPJ3z+twi@Laq~9qre`mg=x`mjmaC%{PSNLReC|~xrhYM| z4%QES_Q{y_`fK&pWMTzgfL!n^Y^|K&1e>$31z)xahbmU^OjH-v!(8Wl?Ktx{RH%3ousk6NmxxQB!N?tg$wMC1B9jg>Q4sC5FBd zz!nv#GdJgvu@_^&ngAI)K*hGn8JJ78`!jFOcsna{Nn55lMvmUMHB)PizWEB*I_3r2 z@0@=oXVX{P#z0eI)Wf!xhnL#>Q&+I9Tfo}9HPa7s^k?4*yTWROxdpUb?YXHlmdyd{ z^aE6E{RLcPZHp1Fz#8)z&VvKHfY#szr0De(+q`SYa3yXo)uG3pdE)^p zxdLaNtYV9d zJO z?;~JQqWjin{6(hk3>W+gtV>{yY0i4%?nS{a*fZ8_eOlWqYHIX8Ywh*=0h0bei)lwk zn+Mv3SR!VoA}7?OWhm-{RU}#tZQUlo3zZHfs_v z-ukV*pNem80WC*7X~V{4`?k-DZ%o@h8t`Xr^PL}gV|DPX3;e}cq&7yv&ciO)2|L4e zFp)NP5VMa8o3m*z1KI&f9IDr!{b;)e^9!(k?S5r^bGByJ&gui0({4IrwIjhZH|vb0 z0F!m`n+9Hp7hp|>Gh79B=nL0MOuxW3V<`^#sdkMqHOMmMS+#1|;^-w?7iY#11?en*< zLQLPhcEPR#=G%QlvtWyPRxxUtJ`Uw(AH`b7qD2fF)>hl{aK+ECFmArZ)MB^sujt%- z;+48)n+Gvqt*ykNby#P!Ce;qd(w-dHRm2zvd%-TSnEPvu{mi<>9Db6W7xtnLid=jB zwB-PEO@+KzKqWpA_|_P28aM@@IgYSqiLS}mwpL$9|1s)d`>4jzscC(USRpRdws}!w zZ2~nqw|e~02liu|bFr@3R}icKzt!Pq>;R==>w^W~`Y;AtBiokRZMo&6&qLdOthL!k ztsTa-ZB4@%HK>Ji?K*v%b!yY#1y0cs*lG&C$oMS>&%P>ljYIvkIGP}q$SrUd*a3{` zSJ)b3bM599Ae$@0Hea#zMUB*&u>|H`$t601xOutAF@S9|K6Cb`c8;@}BW`~SwRv#D zZuRg&Y_7NBw-&u#-?{+SsM8L30a@U;amhDZA6Q#Kiax%zWx!k^ZnNK|_C5oyAcG{L z$D5C9zcU}$=7sqN)?e@gzvZAhe0c%JOYL>ed%+H3vbnYvY-j>wkXk& zvF)qn;6)$UZ)0O1h>6ys*V#wY5iE7|183rDY;v0;w!G9HYjNzhX1Mhoqnmg$6H{ZYEytKQY+cKbaY(n?Y8))l{RMljsbE*) z=4Xw5>tAN97B`PCTHd&M##+d=7@P%hV`YtBh*=XdaoJky7VLl%?0EXN8jV(eSYL=) zTYz~vY0J_5z|PplKiK*nB%UEzXXVC^JF=ZtN0jHUK?s|kW2K&f449~qc$F>GrxeiG5+t2x8O z)hxihpZj0*K~xy1v9-0<&TBR1^eeDlo|=Te09%f(HD7C=f52I|SFo+rL;59p&bfs) zXnHtRK(pSu0z%+Vv~}TU;?|f?pw?cq+An?rHK=7Qd9@DP`so*J{dO+VZT$cXNZQfI zGoFWSzxJVDDmKUi+G+x}MmD#|>`ftNQ>$-{ErK7l_h(H7<{KEVlCx$tXH1{?0;`>Q z?FsAcBZ!p|&)UZ6!(9nPilN80?*PWk3)|!+x+cT+Q|hC&>K0<=tZnxN-_|tI>uc>}Ss&Pw zE51#g@dAu3pv`;4^b@wN+0p9?Y_oRit*gMCF{^<2bum1M|4*j?DHumA{s!(2)0K3u8u&r%T#fTmYZ0oFPF{Yr#df9yQ$JENm znXly326lqYRj^>oE+q|rjw`EcdAQ}Nkpq}-z+TqG3U(bxqnWp_1ZQ}TYpek4tr4wH z=F@C)$=I0RYL0j&Za=mXl-1r>hMNXn0visv0&~lNeYU)DV7$NuHttw~U4e0Hv=^YZ z#juM$wX-hp#WGksfj0NR6!;0Y&w_1@v1QoC60kN8Hn!j|aDnwh8&qIl=JT+V%sj&w z^D`I3(qv3^1%H9fSCFx7)|yLj9-QFTC$Rz+<6yzI#$1KX88;Td`~nwkkU^^Ont7Sa z#KO2XFlJM4yv89mmVs<*9MsfavuJ}1(y*iUy0tNNLEc;eHe(4a(Q_3pK;Jdy;U`$# zT!)&NG3)S+$u+vIrrA!4F_7!H?RZcIsrIRt~U$W)rKR z^)X@@Hop*0a0u)ou(LjBHn9XMzA^0uWRPl)%dIAQZox0cXmSm$-mf`p3&`5E9NmxF z`%}~Q0A9$|;hR@$&Kms;7MYkiZJYO}1Uyj*f+~+@Z2_%M>h+hvx5c4xjaMMsO6~pY zJ5Gy7A5UEYa){dd)UMbGXcxEvTYttE(@&Ezt!aAG6={qGuWe9W4+Joh{Pg4+DAFl~JD$_aG5o{j^Yc!bjV3)y#+KJ&0Y;B{C* zyoIUtzufgWQ#)@lsTb?!lINZilS5BegDMZ)q#nJ2QF^<^( zBHZJQNX__rfsfv>JME1h?1cS{Kf(Cunl&45P5%Q^k3H6bHyysZr>;Dy{|QO{Cgnl? zCgsNVHz_w>xN_pbU3+ik4;0?YpS-(u_rW_hp79^g+4=T!E`#3M{vP4>TW{X6e;0u_ zU3lIZ7yk6m{_KHgAHM%b?)<>{kA3?5#luHW$3GKy;RQ#KtlomhlDus7e2zSgjX$*V zq_M_5JMQ3*6K*{DP3OG&ob%2&=PG}^-XB}f`RN1nGybaR_a@kQ~^r^o>Tt literal 0 HcmV?d00001 diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 7287861d58..9d8b02ebe4 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -72,7 +72,7 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.slow @pytest.mark.vivado -def test_fifosizing(): +def test_fifosizing_linear(): tmp_output_dir = fetch_test_model("tfc") steps = build_cfg.default_build_dataflow_steps steps.insert(10, custom_step_fifosize) @@ -103,3 +103,26 @@ def test_fifosizing(): > 0.9 ) shutil.rmtree(tmp_output_dir) + + +def test_fifosizing_residual(): + steps = build_cfg.default_build_dataflow_steps[8:] + tmp_output_dir = make_build_dir("build_fifosizing_residual") + cfg = build_cfg.DataflowBuildConfig( + output_dir=tmp_output_dir, + auto_fifo_depths=True, + auto_fifo_strategy="characterize", + synth_clk_period_ns=10.0, + board="Pynq-Z1", + verbose=True, + rtlsim_batch_size=10, + verify_save_rtlsim_waveforms=True, + shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, + generate_outputs=[ + build_cfg.DataflowOutputType.STITCHED_IP, + build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE, + ], + steps=steps, + default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, + ) + build.build_dataflow_cfg("residual_testcase.onnx", cfg) From bce0e6ca2bd2f72abae19eefcb81d3d48e0d8c74 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 26 Aug 2022 13:11:01 +0200 Subject: [PATCH 129/628] [FIFO] try sizing bypass FIFOs with a new approach --- .../fpgadataflow/derive_characteristic.py | 58 ++++++++++++++++++- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index a9b291ba5b..f857cdb5ef 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -31,6 +31,7 @@ import qonnx.custom_op.registry as registry import warnings from pyverilator.util.axi_utils import _read_signal, reset_rtlsim, rtlsim_multi_io +from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.base import NodeLocalTransformation from finn.util.fpgadataflow import is_fpgadataflow_node @@ -80,9 +81,9 @@ def applyNodeLocal(self, node): "DuplicateStreams_Batch", "StreamingConcat", ] - assert ( - node.op_type not in multistream_optypes - ), f"{node.name} unsupported" + if node.op_type in multistream_optypes: + warnings.warn(f"Skipping {node.name} for rtlsim characterization") + return (node, False) exp_cycles = inst.get_exp_cycles() n_inps = np.prod(inst.get_folded_input_shape()[:-1]) n_outs = np.prod(inst.get_folded_output_shape()[:-1]) @@ -183,6 +184,57 @@ def accumulate_char_fxn(chrc): ) return (node, False) + def apply(self, model: ModelWrapper): + (model, run_again) = super().apply(model) + # apply manual fix for DuplicateStreams and AddStreams for + # simple residual reconvergent paths with bypass + addstrm_nodes = model.get_nodes_by_op_type("AddStreams") + for addstrm_node in addstrm_nodes: + # we currently only support the case where one branch is + # a bypass + b0 = model.find_producer(addstrm_node.input[0]) + b1 = model.find_producer(addstrm_node.input[1]) + if (b0 is None) or (b1 is None): + warnings.warn("Found unsupported AddStreams, skipping") + return (model, run_again) + b0_is_bypass = b0.op_type == "DuplicateStreams" + b1_is_bypass = b1.op_type == "DuplicateStreams" + if (not b0_is_bypass) and (not b1_is_bypass): + warnings.warn("Found unsupported AddStreams, skipping") + return (model, run_again) + ds_node = b0 if b0_is_bypass else b1 + comp_branch_last = b1 if b0_is_bypass else b0 + + ds_comp_bout = ds_node.output[0] if b0_is_bypass else ds_node.output[1] + comp_branch_first = model.find_consumer(ds_comp_bout) + if comp_branch_first is None or comp_branch_last is None: + warnings.warn("Found unsupported DuplicateStreams, skipping") + return (model, run_again) + comp_branch_last = registry.getCustomOp(comp_branch_last) + comp_branch_first = registry.getCustomOp(comp_branch_first) + # for DuplicateStreams, use comp_branch_first's input characterization + # for AddStreams, use comp_branch_last's output characterization + period = comp_branch_first.get_nodeattr("io_characteristic_period") + comp_branch_first_f = comp_branch_first.get_nodeattr("io_characteristic")[ + : 2 * period + ] + comp_branch_last_f = comp_branch_last.get_nodeattr("io_characteristic")[ + 2 * period : + ] + ds_node_inst = registry.getCustomOp(ds_node) + addstrm_node_inst = registry.getCustomOp(addstrm_node) + ds_node_inst.set_nodeattr("io_characteristic_period", period) + ds_node_inst.set_nodeattr("io_characteristic", comp_branch_first_f * 2) + addstrm_node_inst.set_nodeattr("io_characteristic_period", period) + addstrm_node_inst.set_nodeattr("io_characteristic", comp_branch_last_f * 2) + warnings.warn( + f"Set {ds_node.name} chrc. from {comp_branch_first.onnx_node.name}" + ) + warnings.warn( + f"Set {addstrm_node.name} chrc. from {comp_branch_last.onnx_node.name}" + ) + return (model, run_again) + class DeriveFIFOSizes(NodeLocalTransformation): """Prerequisite: DeriveCharacteristic already called on graph. From 5072b696b42d395aa70cbbc8bf853ccc14303592 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 29 Aug 2022 22:54:30 +0200 Subject: [PATCH 130/628] [FIFO] typo fix for residual experiment --- .../transformation/fpgadataflow/derive_characteristic.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index f857cdb5ef..6514620664 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -188,7 +188,7 @@ def apply(self, model: ModelWrapper): (model, run_again) = super().apply(model) # apply manual fix for DuplicateStreams and AddStreams for # simple residual reconvergent paths with bypass - addstrm_nodes = model.get_nodes_by_op_type("AddStreams") + addstrm_nodes = model.get_nodes_by_op_type("AddStreams_Batch") for addstrm_node in addstrm_nodes: # we currently only support the case where one branch is # a bypass @@ -197,8 +197,8 @@ def apply(self, model: ModelWrapper): if (b0 is None) or (b1 is None): warnings.warn("Found unsupported AddStreams, skipping") return (model, run_again) - b0_is_bypass = b0.op_type == "DuplicateStreams" - b1_is_bypass = b1.op_type == "DuplicateStreams" + b0_is_bypass = b0.op_type == "DuplicateStreams_Batch" + b1_is_bypass = b1.op_type == "DuplicateStreams_Batch" if (not b0_is_bypass) and (not b1_is_bypass): warnings.warn("Found unsupported AddStreams, skipping") return (model, run_again) From 246a52f3c8be0d7b50393078a3ded741cd204d08 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 29 Aug 2022 22:55:37 +0200 Subject: [PATCH 131/628] [Test] baseline for residual FIFO sizing test --- .../qnn-data/testcase/residual_testcase.onnx | Bin 30334 -> 7453 bytes tests/fpgadataflow/test_fifosizing.py | 13 ++++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/finn/qnn-data/testcase/residual_testcase.onnx b/src/finn/qnn-data/testcase/residual_testcase.onnx index e22bd6396dfbc9834d62d7ab61e8f4a12fa558ac..c96e8c694e3a39cdb9e5d984e1c069ceb55b3f2a 100644 GIT binary patch delta 879 zcmaJ;OKcKR6wR9%n8zzRucIwfN|oX#7PW%%Nt;Heg{n{iwFYCyNDxT}T0Sy>u(3@v zF>xing>E!m8n?tf#*KToZtPYU-MKTqnbImmT%6pOlXK6x_r700z?<1o$cucrSuIsE zs-5#rdp)x|Gw4gprU2a1jsZ-tWmd8yp;#+Kisf20naxnKCIi_iJ6qqMfcK;X0+RwV zM+&RqTtTO|0PhyjP`p+Msp(=dyRJ!?d@_iG3-#273b2$hQUlrL3ISJDMfbQ6+0rxwVPVj&Nv zf<$r4L^&NMsWWp#IZNv_t>-A`q3}|ir?^1jnhk8EujM E4P-F7QUCw| literal 30334 zcmbuI4XmJ5edf>bIbKnQ5oH(=0jVgZI(&+V^2eDfT2Wf5TBR8VW`xmUW;zTCyEZvX zsioGo)KW{`#+({sOk*~rX&Se+dK7A@wU&16uG#Hw-gHf48ndz1T57HC?{_}#T*d)4 zp7Px1|Mi^j_kAzxhP(dwjmPi5@~L?+<)7_(a)x%Ogv%bsB3<- z*KA%n{)QblzIE5$ooAi!=KVMA*tz52jyLbw_x7^x=yNx%JZbk{wA+YXd&mA=XC3p3 zS6_RPnbTHI+7OC%x?IYj*71xqI(Ts~dUZVWD$R zS~>0&x7@UC_brDOa->Z?dFA-g6-#da=aQeea?ERAarL$zWKVzUh`Scw+pZn97`E&7 zo3`wJ`vq^k@Vjo;pB}w(T;HaR$G!D!mt6VkgPWheb>H5%?b~zP!QK1zwnKjTuD!bs z?BJZafu}xZ^4zDbJjvvb?K;3EbSICFqurg8R@QIZy=RBrxn+jf+E={n>PO#KPvB)& ztGja@|8(VXuj!%H+x4ieK5^wZZuah-yJo+Scb_+`JYnaq{ReM;`|ewJZKC_59_NOi zy7J_G?ZNrZ%m37sCmh&u%l2Cj-mz!bS;yY6XWxx)^_jTwi0Ajp@9q2^b5A+R3og0i zEpOg_!<)bBN&a6>vY%i-hwT?`-@k*YZdp0y>f81l+r05+~K~ z#k>?h@Jr|^D<|yTcWADC9-F`8+>ZUp`PMpV<#;xry~&Q|a(ieT^V@grzUk&8yyH$; zdE$;6Z(Ke8W9jAmEoy$C*V~bIlT&{4_FMnM&I|9j>ARlt7mryPr#4RcqQ^Srnb>&T z9j|@mkMDo$=1o`c;9Lb2Y27HYx~;|>^`__#|?Y1 z+Eu)@@4ydq&5nGWJoj(ia>@B`{+S!!_Fb>p@A4Jxzm2Sn>%-^aHG6hneDmgwSMJ<- z`0phf4v!u2H87ui(QMz%#k$!tJ+RxOdCXe7DWoxS{=S_akrF zeeg$i-M)X{frICM;Pmye{=^OIUV8YecRrM_%%`XP(+{Q^;8_o*bo_%UfBu1#{xjGL z%BNB~;iV~^vNffj-(wXs_6o z((BM(22KY@q5pHVzmDz&;8)O|jdn9y=A8Z*{4@HeG3Et*+|0Q7>EK)FKCK3v2lk-7 z7VWdq{1W4?nR8;!`Sq-maVzUh>%c=p`&{^1w6~+Z0PU}%{p{8hM>F3uwy}<`jTf=r zXQDeAe2%ey1KnldUbLs7-Gufov>yXs19Q%=VXhaVJq6v@ssA|IpNBUC_v#+BFGTwr zX#Rq=9>ti?Wb79)?yJ#VfbMDZ@lVwM5!yT9lfemS-;XxXvCqkUJ_hZXXqj`p2Hiz~ zar4)P_CLWJz?VLm(nrv~0^R%3{|~Sp-4CIEDY%w#-Dl7D->Co811aAPpAODr-1nfp z9L;Z{XWZa?6X#oBvMpuC4aeOC9zpjBG~SJ8fGud>h4vD(>~nITS#O+Ws~^a;%~nDYy} zwxZ44jqq-K2JNq*y8zsYb{pEW(EK9WPr=^+r=lItQLGua-K$?k=bgM6?R99MOZ_i{ zPlIpM$7!tfQt$@Gx}0(6z4{9@_kv5n-DodGdj|D$T%N(;8LZytalU!ntNh0gqy{Rz5X2Iqpg zSLdL8H+T>@F6(XYoS#DTV#Yj^`5ue?SLmH{fcG=*dd7VUnlWzrC+-L1Cf*yFalOys zxZk4w)8H}Q8}nUP$N9z$=zf$jpTl0Q!2b*F?|`ep{fzsiD^uFIHKm{1nzHY!N0>9( zHfQdn?>g^Qd&fS6#=GI2ydJH0@<-6zgZ9tRtz*92=jyY-J}36Mp3eA3(FgCz%YXJjzt0z)wl&JDGk9 z{5N#Rp?ekxz&l569%tm;$nQq?9PkFTd(g61`LO3VUdXsV!I&4L-30yx9nV7YS$HZ~ zkM;v-e{^d~zm5KH!13EsoQ>{E_L}FY^3GA4{+0SqqQ4tH6Kq49(QZNO8S&l--Wze+ zw)Xt`p7eX=J^f2 zDT!}Nd$<2HW4T_xDW`%n7}xjn3%9O*Qxe~@^vphPx*Fa2todZ-{*ToESMZ;}Ng$yu zXs=|g_ib(O_RKe>@*S*~Gv_y1(X@kI?)a@QgN~^?mgcbc~xH24mdxV&*-bh0L>%jsiRj?M`~1??k%| z?OABw1$bZ0_eSMenBN7Tg&5z3_U!x4xel%O>Un59-!;!*`yP3(E&-bv)BF4Y@O+;Q z=AAqb?XRHa-JZS;^Pa3P*w*KK8T))1+Ha%dZsd2NI|H1@xc4%y&%*oAd>(KoGk3CH z+2=gJNAvkjzlQF7@D8+Bp?wybvEKQ+S!TV#donnq#Cx(H!=C>Yn%@Fff)AigXiq|$ zw{rIIKl0u^Gu&N!Nzj&!PDZa0&38<368_?%nAA1h`(G1)jnA-U#;DIj??0 z=R3#u)icrFji!P}0PmdaJBMds{?5>z-;X;Ucsh6v(;i+!u}4-`L^%iVb5TD=lBe624|yn&X=OO7kn0sXTQDM(Z*rVZ+rGPp*;<)@5#&2 zyqEF*lJoKT~!nJJDW?mOGh$4KQxm zGIuiZ9JS~7GicV{t5>1@A#}e4`2Oa9XRpV7PS-IeccXHzGIt~Wvkk;7*3`E5^-Z&e=J8pI-p(MjL2-j$AMI zxjn!0KL0pl@=b|T(SHLS-?HTM>%09LwENNeJz~A>_quPv>{XjH_u1cL+xO@Zv=!Wq z&SyW+X0+#1?>mQYN>1n;_tI_cJ(*Z<wQk_v;R(4)jl1q@2furzl!epV7_zCqW%{F&qDfF z^lP5q`FneQto3f8eylfG@9H=7jf}_p%HQiM_p05E`xx^j;6CPjGIKr!?Olxd$KdPO zr!eP>wmtU0B+vhcOaCP~l?s9b;}^AufDiRW;5y(z+zUPoJ_a5JUj&bU;}|TV+k)o9 z;1O^(aUT*7|J|T~-vpckW0UrYP<39a8_y$;y&bgQd1@8d&0>_@(f51+R zw*sE(xWPYY?gxpS+H1h=!2S9F_yh16FwgjCbWZ{qZAP~M_exy`&i8)s5Lmz?z_amR z#?AnI6xv!n*R#nv*TZ#=<^H-y-Va{N?LJ-t^RF=9v0p*$&EQ?YJLbLqH29xj?XytF zoF8WF4dmAD^ZW4s3wRKG0el%eYOfgg40Im_p0Q&d1n&W!kGeVUx!yCGulL$#KcVya zT~D3k?g#e(&u41gt0z$ZG{6U;{lf2k-h$3EI)?lfa2>b{IOqF;Yjy0efq5srSJ$Jd zW6bl&x!2wc=lmh?QSe#dIjQyeeKO!hw>oAbKD8P9Uf`TR4n7aQ1il5Fw`c5GUBj5} zlWV;QTn`SPuV?cW;C*sT-x;nsqU-0p)z9_bhwT}C3d}Q_@40rL8S@^nfYYei2KE5& zMFGDL9s=&wSHT>cU#=dtSMKo}z+CT#u|EbL244h^0N=~=*-wmDz%j&~@6Et>hI?Fr zb9Fp5^L_Wj*!*JF#(fCw3FHsotHamoee|8~T78x>dv!h93f$}OXWozXJ^F6z_I~+( zUKrcG_BnR1uL1{w>;544L-0A^etiSXXE~2KuU+%n^Zf|5p6`DL{{XyOzOTGnnQ<%n zyBYHfU=z6jYtMMTuNLB81J2iPl4msU#>vDl2Rp$#!F|ASJ*&@yzXx;9zN6>3nLZu{ z8^~V@t_ODk_4BOO&f90g=l2XW1$+gZP3^0Idvy=^AowWwEciR%dv89&bAQ!b3o>x5 z`@yGy_xNjI-mCc?^B2*zV|qT@fctbe_-$YxhwroRE$?Syp7(*ZXS<(z&-Mdg&euI& zJKy#lj7Q(e^Qdv|?*P7c)~;8*^Y=OQo8lgMk3Ad5v;HH%wOZ?$co%#>oejctc#L@|b=QNtzPeJ4+EX+^_-5O<~(pMQ0smj>0Zt6+2Oi3P@loQ zz%f4t{u=x}So_?c=D1_dcQdZAxb?*!m8;raUe#+I>9Cy>7s+zj3g+#~O!YxQg$Q?1`WpC|9eezXs- zS*O~&!TZ75XW^^(bASDItM@)T_WdA{KM#0DbMEf7=R5cMo7mn9@0Rf-m z2KEEzUceuK&wvGd4LH8fL=NsBnhMqv-vW+w#t#$!B6tM&9QyqZwAY}U&;QikNsZ5v z`#taEm$04ld>%aC1)M?bRls$s@jiSS{4H>v^Zo4aM$ZMlpJUCOPoU-^a2>b{+y~a) zsW0Qt`{B3WaUTXJ6Tckn1oJN5kNqI{0+_n_JA?n8b@+YWK;5O_dN9YHYo6yc_p8+Y z{1(Pe)H~;!!MnkSKn0HL-I!~3%=5OM`&)S(|5I}CKP9*S&sEzG{SU}_U7u88Lox$o z*hvaot=Q3X3D&o!8slGnGQN3pWiVdv$5gBtT{5p~m0uLK)6Rd4C=K$}k-(j;u>qc$6d)mNaODv)W`RA7x918sle zP~7^KgH!GEcMRhhzrf~=$@*+Hj3&D2?^onY%#)?jNF*k(-v<^wFC>etkCbDt}|`3%e%x6ZayY|!$|)%jW%#IiZ; zxdwF!e_|PShARmD3TRDFU;3?28GVHpP~y-f}&+!wnf-hoV&bN+u%SF%I zM;6Q@VEwX&6Sn&-RPs~sGEBl zt{K-MqgB5eOo)W^;I9cz>Bthz_Udq z7CpaqOzVxO!RAWsbuEXz0Bg)yXPf)xnC8@2SFuF^bJ}SQYZqd5aPzUP7YQt2uB~8O zJKKU?qmQK>pnz-)n9~+B9y8{iRB}KzzQC=?(fwI7*J$4U^h@pYms`KYt-`H&m}5R&}u)?`&&k=$h4@$1$moe*8nqm$^mHF#+Nm`MAwF_4z|wts*PXqGf=B-YvV@mQ%%8cz)s*%^CIWG zW5f%r#y-`w{ekNmzs2hs9r{*3Vhgp)uoL#$b?eWzcAmy+j5)&@Si3qO##r=6e4bmb zW1j4hF>8zkP_JLH%~k9&VgU>1uo$bUwbn1TdgJ;DPPKE>jByI_EXc_fwi!=gslAWD zwnmP@dDyG{<0o?Z>TGI6k)vz%ZLLq{UV|K*>KG3$#1>$S+WWBHnpu~%X{&ne>W@BF ziZNGw`?9`)u?oH|!XJQEYLA=K&akakegyu|8J{`V+H}-Z<8a8-%4)>|3TS&1ofE#A z1j-s-uygHDQ$cINPf@!*U~O4AW`^x6gMw{q=TrMw_HEo)#ZI8=3-soKSO(ff9=0_J zETBfW=lU1?Aa1^`Q3er*xY>+bMl8d&Iqw>ME~OI07PJ4NU;CV}R>x}bFhK2K^$T{u z6>jk~#&LY&=JEuwh}2q(;a74gMlEa~)-1L6nPBq`+CYtUqJTxzUZ3IS6Ki#!=)Um; z)W{Vsut?a`&-gLWYDpLK#H@9n%o}TYxW(ZBOC9yYwpJwksZIFR7_blH=9|xa?Zu}6e57k&J=03L^dHcz*e$%0~&Q@#hGvE@3=C~a(zt)$5wR1}~wVDE(UqF&;XX6>j5s8j^%>q}@Vua<4Z;N`UUb_ZCc!B5o3$_|L z58GPv#unrF39d4b=iWw)FJq=yCI{KYc;I5K~(Ow#~XKqT31ETpqFJlWS`oY#$3yV{L$hZLM`SYs@)E z07t*VlvJH1O=BR+6^LKcGZsB7MI2HtTEj{xh7wQi+2y*mTc#v0{fwnBHFK_}Cdo7#K#9AI+fO9uy z&UghWx;A9|R>znL+uQ;-oh*o%vo7EQw^sYyv|9|{+NReQj9=j(ZnLiKXN4KF`NjnP ztX=R$1+7(nf)YR*o<3G+E>Pp!3A5GH`jb~%r}U)7=99!_+Y%&tf?Y8 z>z4s-p|-6RGvkLoXmR{9?XeHdH%)vYUqQxBpbnU`@0Q0-*k!<4VSbLg;Maj%|eZsbx*DgR^ z#cwToF2M^h-)dlM3J7dlvnxJG0ch6fTcb{H{lm6)Mzw332HTh#>*g6wUE}C|71)|Y zZN*N&ywJAg+H)mvup48yea5%WJ{EZPQ?YC9wc2eS`Xi<$j0@(vz!faD*P65C+Vk2a z_yLSnU|XX16BP%$f(1KZ{ZvOSwlR?hr_r37dUfV(welnSeATpm;0mm1tWF=cUbZd; zpjlHY2COZ#qxL@Lxh%#&7FYDTOicxIZ(3Y`jHxE+!>u25<_dlVj*(#dDL_sW(fhX6 z8uPNKpnwI;xde6+v^l=H*|e=uW8G4_w$)-6Vhz|~4&NH>1v|;0Vh5<$at6jl3>0Ds z7^`rB%_Xoz*9F|>CllAVPJ3z+twi@Laq~9qre`mg=x`mjmaC%{PSNLReC|~xrhYM| z4%QES_Q{y_`fK&pWMTzgfL!n^Y^|K&1e>$31z)xahbmU^OjH-v!(8Wl?Ktx{RH%3ousk6NmxxQB!N?tg$wMC1B9jg>Q4sC5FBd zz!nv#GdJgvu@_^&ngAI)K*hGn8JJ78`!jFOcsna{Nn55lMvmUMHB)PizWEB*I_3r2 z@0@=oXVX{P#z0eI)Wf!xhnL#>Q&+I9Tfo}9HPa7s^k?4*yTWROxdpUb?YXHlmdyd{ z^aE6E{RLcPZHp1Fz#8)z&VvKHfY#szr0De(+q`SYa3yXo)uG3pdE)^p zxdLaNtYV9d zJO z?;~JQqWjin{6(hk3>W+gtV>{yY0i4%?nS{a*fZ8_eOlWqYHIX8Ywh*=0h0bei)lwk zn+Mv3SR!VoA}7?OWhm-{RU}#tZQUlo3zZHfs_v z-ukV*pNem80WC*7X~V{4`?k-DZ%o@h8t`Xr^PL}gV|DPX3;e}cq&7yv&ciO)2|L4e zFp)NP5VMa8o3m*z1KI&f9IDr!{b;)e^9!(k?S5r^bGByJ&gui0({4IrwIjhZH|vb0 z0F!m`n+9Hp7hp|>Gh79B=nL0MOuxW3V<`^#sdkMqHOMmMS+#1|;^-w?7iY#11?en*< zLQLPhcEPR#=G%QlvtWyPRxxUtJ`Uw(AH`b7qD2fF)>hl{aK+ECFmArZ)MB^sujt%- z;+48)n+Gvqt*ykNby#P!Ce;qd(w-dHRm2zvd%-TSnEPvu{mi<>9Db6W7xtnLid=jB zwB-PEO@+KzKqWpA_|_P28aM@@IgYSqiLS}mwpL$9|1s)d`>4jzscC(USRpRdws}!w zZ2~nqw|e~02liu|bFr@3R}icKzt!Pq>;R==>w^W~`Y;AtBiokRZMo&6&qLdOthL!k ztsTa-ZB4@%HK>Ji?K*v%b!yY#1y0cs*lG&C$oMS>&%P>ljYIvkIGP}q$SrUd*a3{` zSJ)b3bM599Ae$@0Hea#zMUB*&u>|H`$t601xOutAF@S9|K6Cb`c8;@}BW`~SwRv#D zZuRg&Y_7NBw-&u#-?{+SsM8L30a@U;amhDZA6Q#Kiax%zWx!k^ZnNK|_C5oyAcG{L z$D5C9zcU}$=7sqN)?e@gzvZAhe0c%JOYL>ed%+H3vbnYvY-j>wkXk& zvF)qn;6)$UZ)0O1h>6ys*V#wY5iE7|183rDY;v0;w!G9HYjNzhX1Mhoqnmg$6H{ZYEytKQY+cKbaY(n?Y8))l{RMljsbE*) z=4Xw5>tAN97B`PCTHd&M##+d=7@P%hV`YtBh*=XdaoJky7VLl%?0EXN8jV(eSYL=) zTYz~vY0J_5z|PplKiK*nB%UEzXXVC^JF=ZtN0jHUK?s|kW2K&f449~qc$F>GrxeiG5+t2x8O z)hxihpZj0*K~xy1v9-0<&TBR1^eeDlo|=Te09%f(HD7C=f52I|SFo+rL;59p&bfs) zXnHtRK(pSu0z%+Vv~}TU;?|f?pw?cq+An?rHK=7Qd9@DP`so*J{dO+VZT$cXNZQfI zGoFWSzxJVDDmKUi+G+x}MmD#|>`ftNQ>$-{ErK7l_h(H7<{KEVlCx$tXH1{?0;`>Q z?FsAcBZ!p|&)UZ6!(9nPilN80?*PWk3)|!+x+cT+Q|hC&>K0<=tZnxN-_|tI>uc>}Ss&Pw zE51#g@dAu3pv`;4^b@wN+0p9?Y_oRit*gMCF{^<2bum1M|4*j?DHumA{s!(2)0K3u8u&r%T#fTmYZ0oFPF{Yr#df9yQ$JENm znXly326lqYRj^>oE+q|rjw`EcdAQ}Nkpq}-z+TqG3U(bxqnWp_1ZQ}TYpek4tr4wH z=F@C)$=I0RYL0j&Za=mXl-1r>hMNXn0visv0&~lNeYU)DV7$NuHttw~U4e0Hv=^YZ z#juM$wX-hp#WGksfj0NR6!;0Y&w_1@v1QoC60kN8Hn!j|aDnwh8&qIl=JT+V%sj&w z^D`I3(qv3^1%H9fSCFx7)|yLj9-QFTC$Rz+<6yzI#$1KX88;Td`~nwkkU^^Ont7Sa z#KO2XFlJM4yv89mmVs<*9MsfavuJ}1(y*iUy0tNNLEc;eHe(4a(Q_3pK;Jdy;U`$# zT!)&NG3)S+$u+vIrrA!4F_7!H?RZcIsrIRt~U$W)rKR z^)X@@Hop*0a0u)ou(LjBHn9XMzA^0uWRPl)%dIAQZox0cXmSm$-mf`p3&`5E9NmxF z`%}~Q0A9$|;hR@$&Kms;7MYkiZJYO}1Uyj*f+~+@Z2_%M>h+hvx5c4xjaMMsO6~pY zJ5Gy7A5UEYa){dd)UMbGXcxEvTYttE(@&Ezt!aAG6={qGuWe9W4+Joh{Pg4+DAFl~JD$_aG5o{j^Yc!bjV3)y#+KJ&0Y;B{C* zyoIUtzufgWQ#)@lsTb?!lINZilS5BegDMZ)q#nJ2QF^<^( zBHZJQNX__rfsfv>JME1h?1cS{Kf(Cunl&45P5%Q^k3H6bHyysZr>;Dy{|QO{Cgnl? zCgsNVHz_w>xN_pbU3+ik4;0?YpS-(u_rW_hp79^g+4=T!E`#3M{vP4>TW{X6e;0u_ zU3lIZ7yk6m{_KHgAHM%b?)<>{kA3?5#luHW$3GKy;RQ#KtlomhlDus7e2zSgjX$*V zq_M_5JMQ3*6K*{DP3OG&ob%2&=PG}^-XB}f`RN1nGybaR_a@kQ~^r^o>Tt diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 9d8b02ebe4..1680624611 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -26,6 +26,8 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pkg_resources as pk + import pytest import json @@ -106,16 +108,19 @@ def test_fifosizing_linear(): def test_fifosizing_residual(): + model_fname = pk.resource_filename( + "finn.qnn-data", "testcase/residual_testcase.onnx" + ) steps = build_cfg.default_build_dataflow_steps[8:] tmp_output_dir = make_build_dir("build_fifosizing_residual") cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=True, - auto_fifo_strategy="characterize", + auto_fifo_strategy="largefifo_rtlsim", synth_clk_period_ns=10.0, board="Pynq-Z1", verbose=True, - rtlsim_batch_size=10, + rtlsim_batch_size=1, verify_save_rtlsim_waveforms=True, shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ @@ -124,5 +129,7 @@ def test_fifosizing_residual(): ], steps=steps, default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, + # start_step="step_set_fifo_depths", + # stop_step="step_set_fifo_depths" ) - build.build_dataflow_cfg("residual_testcase.onnx", cfg) + build.build_dataflow_cfg(model_fname, cfg) From eca7b071550aaa9d79966b7b3338d9748ec4202e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 21:11:10 +0200 Subject: [PATCH 132/628] [HLSCustomOp] new attributes for multi-IO FIFO sizes, chrc. padding --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index bb359ef0b5..c20ce5b25c 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -110,11 +110,16 @@ def get_nodeattr_types(self): # input and output FIFO depths "inFIFODepth": ("i", False, 2), "outFIFODepth": ("i", False, 2), + # input and output FIFO depths for multi-I/O nodes + "inFIFODepths": ("ints", False, []), + "outFIFODepths": ("ints", False, []), "output_hook": ("s", False, ""), # accumulated characteristic function over two periods "io_characteristic": ("ints", False, []), # the period for which the characterization was run "io_characteristic_period": ("i", False, 0), + # amount of zero padding inserted during chrc. + "io_characteristic_pads": ("ints", False, []), } def get_verilog_top_module_name(self): From 5abfe83f75acead8a25da63e4d96023bc8d78710 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 21:11:44 +0200 Subject: [PATCH 133/628] [FIFO] characterize Add/DuplicateStreams, optional bypass fix, pad --- .../fpgadataflow/derive_characteristic.py | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 6514620664..53540e22d8 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -53,9 +53,10 @@ class DeriveCharacteristic(NodeLocalTransformation): NodeLocalTransformation for more details. """ - def __init__(self, period, num_workers=None): + def __init__(self, period, num_workers=None, manual_bypass=False): super().__init__(num_workers=num_workers) self.period = period + self.manual_bypass = manual_bypass def applyNodeLocal(self, node): op_type = node.op_type @@ -77,8 +78,6 @@ def applyNodeLocal(self, node): return (node, False) # restricted to single input and output nodes for now multistream_optypes = [ - "AddStreams_Batch", - "DuplicateStreams_Batch", "StreamingConcat", ] if node.op_type in multistream_optypes: @@ -107,9 +106,13 @@ def applyNodeLocal(self, node): }, "outputs": {"out": []}, } - - txns_in = {"in0": []} - txns_out = {"out": []} + # override for certain fork/join nodes + if node.op_type == "DuplicateStreams_Batch": + del io_dict["outputs"]["out"] + io_dict["outputs"]["out0"] = [] + io_dict["outputs"]["out1"] = [] + elif node.op_type == "AddStreams_Batch": + io_dict["inputs"]["in1"] = [0 for i in range(n_inps)] try: # fill out weight stream for decoupled-mode components @@ -123,10 +126,13 @@ def applyNodeLocal(self, node): io_dict["inputs"]["weights"] = [ 0 for i in range(num_w_reps * n_weight_inps) ] - txns_in["weights"] = [] except AttributeError: pass + # extra dicts to keep track of cycle-by-cycle transaction behavior + txns_in = {key: [] for (key, value) in io_dict["inputs"].items()} + txns_out = {key: [] for (key, value) in io_dict["outputs"].items()} + def monitor_txns(sim_obj): for inp in io_dict["inputs"]: in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 @@ -156,11 +162,15 @@ def monitor_txns(sim_obj): assert total_cycle_count <= self.period # restrict to single input-output stream only for now txns_in = txns_in["in0"] - txns_out = txns_out["out"] + txns_out = txns_out[ + "out" if node.op_type != "DuplicateStreams_Batch" else "out0" + ] if len(txns_in) < self.period: - txns_in += [0 for x in range(self.period - len(txns_in))] + pad_in = self.period - len(txns_in) + txns_in += [0 for x in range(pad_in)] if len(txns_out) < self.period: - txns_out += [0 for x in range(self.period - len(txns_out))] + pad_out = self.period - len(txns_out) + txns_out += [0 for x in range(pad_out)] def accumulate_char_fxn(chrc): p = len(chrc) @@ -177,6 +187,7 @@ def accumulate_char_fxn(chrc): io_characteristic = txns_in + txns_out inst.set_nodeattr("io_characteristic", io_characteristic) inst.set_nodeattr("io_characteristic_period", self.period) + inst.set_nodeattr("io_characteristic_pads", [pad_in, pad_out]) except KeyError: # exception if op_type is not supported raise Exception( @@ -186,6 +197,8 @@ def accumulate_char_fxn(chrc): def apply(self, model: ModelWrapper): (model, run_again) = super().apply(model) + if not self.manual_bypass: + return (model, run_again) # apply manual fix for DuplicateStreams and AddStreams for # simple residual reconvergent paths with bypass addstrm_nodes = model.get_nodes_by_op_type("AddStreams_Batch") From 84c58adad2719b9daf11a740a26ddcf1d60e72b2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 21:15:31 +0200 Subject: [PATCH 134/628] [FIFO] use outFIFODepths attr for multi-out --- .../transformation/fpgadataflow/derive_characteristic.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 53540e22d8..1bd7182ca8 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -282,7 +282,7 @@ def applyNodeLocal(self, node): model = self.ref_input_model consumers = model.find_consumers(node.output[0]) # compute FIFO depth for each consumer - out_fifo_depth = 0 + out_fifo_depths = [] for cons_node in consumers: cons = registry.getCustomOp(cons_node) cons_chrc = cons.get_nodeattr("io_characteristic") @@ -298,11 +298,13 @@ def applyNodeLocal(self, node): prod_chrc_part = prod_chrc[pshift_min : (pshift_min + period)] cons_chrc_part = cons_chrc[:period] fifo_depth = (prod_chrc_part - cons_chrc_part).max() - out_fifo_depth = max(out_fifo_depth, fifo_depth) + out_fifo_depths.append(fifo_depth) # set output FIFO depth for this (producing) node # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) # for each tensor - prod.set_nodeattr("outFIFODepth", out_fifo_depth) + prod.set_nodeattr("outFIFODepth", out_fifo_depths[0]) + # used only for multi-consumer. nodes + prod.set_nodeattr("outFIFODepths", out_fifo_depths) except KeyError: # exception if op_type is not supported From 0366250475f79ec32c84c68a05f81852a0fe2c6e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 23:36:00 +0200 Subject: [PATCH 135/628] [AddStreams] default inputFIFODepths attr --- .../fpgadataflow/addstreams_batch.py | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 13a4c5892c..153699197b 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -42,18 +42,21 @@ def __init__(self, onnx_node): super().__init__(onnx_node) def get_nodeattr_types(self): - my_attrs = { - "NumChannels": ("i", True, ""), - "PE": ("i", True, ""), - # FINN DataTypes for inputs; output datatype inferred from input - "inputDataType": ("s", True, ""), - # number of input vectors, examples: - # [1] is a single vector (like a FC layer with batch=1) - # [4] is four vectors (like a FC layer with batch=4) - # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) - "numInputVectors": ("ints", False, [1]), - } - my_attrs.update(super().get_nodeattr_types()) + my_attrs = super().get_nodeattr_types() + my_attrs.update( + { + "NumChannels": ("i", True, ""), + "PE": ("i", True, ""), + # FINN DataTypes for inputs; output datatype inferred from input + "inputDataType": ("s", True, ""), + # number of input vectors, examples: + # [1] is a single vector (like a FC layer with batch=1) + # [4] is four vectors (like a FC layer with batch=4) + # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) + "numInputVectors": ("ints", False, [1]), + "inFIFODepths": ("ints", False, [2, 2]), + } + ) return my_attrs def get_normal_input_shape(self, ind=0): From 2b5f93fefd32b7a7f8f8de3abc62fbd80f310764 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 23:36:15 +0200 Subject: [PATCH 136/628] [InsertFIFO] make use of multi FIFO depths attributes --- .../fpgadataflow/insert_fifo.py | 39 ++++++++++++++----- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index e77774df72..b9222cf3ee 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -101,8 +101,8 @@ def apply(self, model): for first_node in graph.node: node_ind += 1 if _suitable_node(first_node): - for n_output in first_node.output: - consumers = model.find_consumers(n_output) + for idx_out, output_name in enumerate(first_node.output): + consumers = model.find_consumers(output_name) if consumers == []: continue if len(consumers) > 1: @@ -120,12 +120,15 @@ def apply(self, model): # check if folded_shape of output of first node and # input of the second node is equal n1 = getCustomOp(consumer) + idx_inp = 0 for idx, inp in enumerate(consumer.input): - if inp == n_output: + if inp == output_name: if idx == 0: fld_shape_2 = n1.get_folded_input_shape() + idx_inp = 0 else: fld_shape_2 = n1.get_folded_input_shape(ind=idx) + idx_inp = idx assert _suitable_folded_shapes( fld_shape, fld_shape_2 ), """The @@ -135,8 +138,15 @@ def apply(self, model): # check if outFIFOdepth attribute of first node # and inFIFOdepth attribute of consumer node is equal - n0_depth = n0.get_nodeattr("outFIFODepth") - n1_depth = n1.get_nodeattr("inFIFODepth") + if idx_out == 0: + n0_depth = n0.get_nodeattr("outFIFODepth") + else: + n0_depth = n0.get_nodeattr("outFIFODepths")[idx_out] + if idx_inp == 0: + n1_depth = n1.get_nodeattr("inFIFODepth") + else: + n1_depth = n1.get_nodeattr("inFIFODepths")[idx_inp] + if n0_depth == n1_depth: fifo_depth = n0_depth elif n0_depth != n1_depth: @@ -160,7 +170,7 @@ def apply(self, model): ) fifo_node = oh.make_node( "StreamingFIFO", - [n_output], + [output_name], [fifo_output_tensor.name], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", @@ -174,11 +184,22 @@ def apply(self, model): graph.node.insert(node_ind + 1, fifo_node) # set fifo output tensor as new input tensor of second node for idx, inp in enumerate(consumer.input): - if inp == n_output: + if inp == output_name: consumer.input[idx] = fifo_output_tensor.name # ensure created FIFO depth is reflected on both sides - n0.set_nodeattr("outFIFODepth", fifo_depth) - n1.set_nodeattr("inFIFODepth", fifo_depth) + if idx_out == 0: + n0.set_nodeattr("outFIFODepth", fifo_depth) + else: + odepths = n0.get_nodeattr("outFIFODepths") + odepths[idx_out] = fifo_depth + n0.set_nodeattr("outFIFODepths", odepths) + if idx_inp == 0: + n1.set_nodeattr("inFIFODepth", fifo_depth) + else: + idepths = n1.get_nodeattr("inFIFODepths") + idepths[idx_inp] = fifo_depth + n1.set_nodeattr("inFIFODepths", idepths) + graph_modified = True if graph_modified is False: From 0fe07278e4c42d30df752eabd3d60d4f35926c74 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 30 Aug 2022 23:36:48 +0200 Subject: [PATCH 137/628] [FIFO] derive sizes for multiple consumers, DuplicateStreams bugfix --- .../fpgadataflow/derive_characteristic.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 1bd7182ca8..b821059bc8 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -111,6 +111,9 @@ def applyNodeLocal(self, node): del io_dict["outputs"]["out"] io_dict["outputs"]["out0"] = [] io_dict["outputs"]["out1"] = [] + # n_outs is total of output streams + # so multiply expected by 2 + n_outs *= 2 elif node.op_type == "AddStreams_Batch": io_dict["inputs"]["in1"] = [0 for i in range(n_inps)] @@ -280,10 +283,14 @@ def applyNodeLocal(self, node): prod_chrc = np.asarray(prod_chrc).reshape(2, -1)[1] # find consumers model = self.ref_input_model - consumers = model.find_consumers(node.output[0]) - # compute FIFO depth for each consumer out_fifo_depths = [] - for cons_node in consumers: + for output_name in node.output: + cons_node = model.find_consumer(output_name) + if cons_node is None: + # could be final node, will be overridden if so + # need an entry in the list anyway + out_fifo_depths.append(2) + continue cons = registry.getCustomOp(cons_node) cons_chrc = cons.get_nodeattr("io_characteristic") cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] @@ -302,8 +309,9 @@ def applyNodeLocal(self, node): # set output FIFO depth for this (producing) node # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) # for each tensor - prod.set_nodeattr("outFIFODepth", out_fifo_depths[0]) - # used only for multi-consumer. nodes + if len(out_fifo_depths) > 0: + prod.set_nodeattr("outFIFODepth", out_fifo_depths[0]) + # used only for multi-producer nodes prod.set_nodeattr("outFIFODepths", out_fifo_depths) except KeyError: From 1d215771b9fd5bd2e15dbc991464e4a2cb6ba0c1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Aug 2022 11:05:31 +0200 Subject: [PATCH 138/628] [Test] simply linear FIFO sizing test --- tests/fpgadataflow/test_fifosizing.py | 59 +-------------------------- 1 file changed, 2 insertions(+), 57 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 1680624611..37efc5124b 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -26,44 +26,19 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk import pytest import json import shutil from brevitas.export.onnx.generic.manager import BrevitasONNXManager -from qonnx.transformation.general import GiveUniqueNodeNames import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg -from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance -from finn.transformation.fpgadataflow.derive_characteristic import ( - DeriveCharacteristic, - DeriveFIFOSizes, -) -from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP -from finn.transformation.fpgadataflow.insert_dwc import InsertDWC -from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.util.basic import make_build_dir from finn.util.test import get_trained_network_and_ishape -def custom_step_fifosize(model, cfg): - model = model.transform(InsertDWC()) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) - model = model.transform(HLSSynthIP()) - model = model.transform(PrepareRTLSim()) - period = model.analysis(dataflow_performance)["max_cycles"] + 10 - model = model.transform(DeriveCharacteristic(period)) - model = model.transform(DeriveFIFOSizes()) - return model - - def fetch_test_model(topology, wbits=2, abits=2): tmp_output_dir = make_build_dir("build_fifosizing_%s_" % topology) (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) @@ -76,11 +51,10 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.vivado def test_fifosizing_linear(): tmp_output_dir = fetch_test_model("tfc") - steps = build_cfg.default_build_dataflow_steps - steps.insert(10, custom_step_fifosize) cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, - auto_fifo_depths=False, + auto_fifo_depths=True, + auto_fifo_strategy="characterize", target_fps=10000, synth_clk_period_ns=10.0, board="Pynq-Z1", @@ -91,7 +65,6 @@ def test_fifosizing_linear(): build_cfg.DataflowOutputType.STITCHED_IP, build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE, ], - steps=steps, default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, ) build.build_dataflow_cfg(tmp_output_dir + "/model.onnx", cfg) @@ -105,31 +78,3 @@ def test_fifosizing_linear(): > 0.9 ) shutil.rmtree(tmp_output_dir) - - -def test_fifosizing_residual(): - model_fname = pk.resource_filename( - "finn.qnn-data", "testcase/residual_testcase.onnx" - ) - steps = build_cfg.default_build_dataflow_steps[8:] - tmp_output_dir = make_build_dir("build_fifosizing_residual") - cfg = build_cfg.DataflowBuildConfig( - output_dir=tmp_output_dir, - auto_fifo_depths=True, - auto_fifo_strategy="largefifo_rtlsim", - synth_clk_period_ns=10.0, - board="Pynq-Z1", - verbose=True, - rtlsim_batch_size=1, - verify_save_rtlsim_waveforms=True, - shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, - generate_outputs=[ - build_cfg.DataflowOutputType.STITCHED_IP, - build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE, - ], - steps=steps, - default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, - # start_step="step_set_fifo_depths", - # stop_step="step_set_fifo_depths" - ) - build.build_dataflow_cfg(model_fname, cfg) From e8c214964090846ffe531a187790fbab8c3587cf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Aug 2022 13:30:19 +0200 Subject: [PATCH 139/628] [Deps] update QONNX to get tensor attribute support --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 97427ec9da..60ea4eb307 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="34ecaa73398c85201b325bcff1beeca1e45f4541" +QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" From 2bb8a682e10dcaab06fce7bb8a5fdf3b0bc51a76 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Aug 2022 14:42:48 +0200 Subject: [PATCH 140/628] [HLSCustomOp] refactor types for FIFO attributes --- src/finn/custom_op/fpgadataflow/hlscustomop.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index c20ce5b25c..0d580f5fa1 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -115,11 +115,13 @@ def get_nodeattr_types(self): "outFIFODepths": ("ints", False, []), "output_hook": ("s", False, ""), # accumulated characteristic function over two periods - "io_characteristic": ("ints", False, []), + "io_chrc_in": ("t", False, np.asarray([], dtype=np.int32)), + "io_chrc_out": ("t", False, np.asarray([], dtype=np.int32)), # the period for which the characterization was run - "io_characteristic_period": ("i", False, 0), + "io_chrc_period": ("i", False, 0), # amount of zero padding inserted during chrc. - "io_characteristic_pads": ("ints", False, []), + "io_chrc_pads_in": ("ints", False, []), + "io_chrc_pads_out": ("ints", False, []), } def get_verilog_top_module_name(self): From 9b776821e3a5281b7dec05857b4ac19037677d04 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 31 Aug 2022 14:43:57 +0200 Subject: [PATCH 141/628] [FIFO] adapt FIFO sizing to new attributes --- .../fpgadataflow/derive_characteristic.py | 84 +++++++++++-------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index b821059bc8..6604760704 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -71,7 +71,7 @@ def applyNodeLocal(self, node): assert inst.get_nodeattr("rtlsim_so") != "", ( "rtlsim not ready for " + node.name ) - if inst.get_nodeattr("io_characteristic_period") > 0: + if inst.get_nodeattr("io_chrc_period") > 0: warnings.warn( "Skipping node %s: already has FIFO characteristic" % node.name ) @@ -133,18 +133,25 @@ def applyNodeLocal(self, node): pass # extra dicts to keep track of cycle-by-cycle transaction behavior - txns_in = {key: [] for (key, value) in io_dict["inputs"].items()} - txns_out = {key: [] for (key, value) in io_dict["outputs"].items()} + # note that we restrict key names to filter out weight streams etc + txns_in = { + key: [] for (key, value) in io_dict["inputs"].items() if "in" in key + } + txns_out = { + key: [] + for (key, value) in io_dict["outputs"].items() + if "out" in key + } def monitor_txns(sim_obj): - for inp in io_dict["inputs"]: + for inp in txns_in: in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 in_valid = _read_signal(sim, inp + sname + "TVALID") == 1 if in_ready and in_valid: txns_in[inp].append(1) else: txns_in[inp].append(0) - for outp in io_dict["outputs"]: + for outp in txns_out: if ( _read_signal(sim, outp + sname + "TREADY") == 1 and _read_signal(sim, outp + sname + "TVALID") == 1 @@ -163,17 +170,7 @@ def monitor_txns(sim_obj): hook_preclk=monitor_txns, ) assert total_cycle_count <= self.period - # restrict to single input-output stream only for now - txns_in = txns_in["in0"] - txns_out = txns_out[ - "out" if node.op_type != "DuplicateStreams_Batch" else "out0" - ] - if len(txns_in) < self.period: - pad_in = self.period - len(txns_in) - txns_in += [0 for x in range(pad_in)] - if len(txns_out) < self.period: - pad_out = self.period - len(txns_out) - txns_out += [0 for x in range(pad_out)] + inst.set_nodeattr("io_chrc_period", self.period) def accumulate_char_fxn(chrc): p = len(chrc) @@ -183,14 +180,36 @@ def accumulate_char_fxn(chrc): ret.append(chrc[0]) else: ret.append(ret[-1] + chrc[t % p]) - return ret + return np.asarray(ret, dtype=np.int32) + + all_txns_in = np.empty((len(txns_in.keys()), 2 * self.period)) + all_txns_out = np.empty((len(txns_out.keys()), 2 * self.period)) + all_pad_in = [] + all_pad_out = [] + for in_idx, in_strm_nm in enumerate(txns_in.keys()): + txn_in = txns_in[in_strm_nm] + if len(txn_in) < self.period: + pad_in = self.period - len(txn_in) + txn_in += [0 for x in range(pad_in)] + txn_in = accumulate_char_fxn(txn_in) + all_txns_in[in_idx, :] = txn_in + all_pad_in.append(pad_in) + + for out_idx, out_strm_nm in enumerate(txns_out.keys()): + txn_out = txns_out[out_strm_nm] + if len(txn_out) < self.period: + pad_out = self.period - len(txn_out) + txn_out += [0 for x in range(pad_out)] + txn_out = accumulate_char_fxn(txn_out) + all_txns_out[out_idx, :] = txn_out + all_pad_out.append(pad_out) + + # TODO specialize here for DuplicateStreams and AddStreams + inst.set_nodeattr("io_chrc_in", all_txns_in) + inst.set_nodeattr("io_chrc_out", all_txns_out) + inst.set_nodeattr("io_chrc_pads_in", all_pad_in) + inst.set_nodeattr("io_chrc_pads_out", all_pad_out) - txns_in = accumulate_char_fxn(txns_in) - txns_out = accumulate_char_fxn(txns_out) - io_characteristic = txns_in + txns_out - inst.set_nodeattr("io_characteristic", io_characteristic) - inst.set_nodeattr("io_characteristic_period", self.period) - inst.set_nodeattr("io_characteristic_pads", [pad_in, pad_out]) except KeyError: # exception if op_type is not supported raise Exception( @@ -230,7 +249,7 @@ def apply(self, model: ModelWrapper): comp_branch_first = registry.getCustomOp(comp_branch_first) # for DuplicateStreams, use comp_branch_first's input characterization # for AddStreams, use comp_branch_last's output characterization - period = comp_branch_first.get_nodeattr("io_characteristic_period") + period = comp_branch_first.get_nodeattr("io_chrc_period") comp_branch_first_f = comp_branch_first.get_nodeattr("io_characteristic")[ : 2 * period ] @@ -239,9 +258,9 @@ def apply(self, model: ModelWrapper): ] ds_node_inst = registry.getCustomOp(ds_node) addstrm_node_inst = registry.getCustomOp(addstrm_node) - ds_node_inst.set_nodeattr("io_characteristic_period", period) + ds_node_inst.set_nodeattr("io_chrc_period", period) ds_node_inst.set_nodeattr("io_characteristic", comp_branch_first_f * 2) - addstrm_node_inst.set_nodeattr("io_characteristic_period", period) + addstrm_node_inst.set_nodeattr("io_chrc_period", period) addstrm_node_inst.set_nodeattr("io_characteristic", comp_branch_last_f * 2) warnings.warn( f"Set {ds_node.name} chrc. from {comp_branch_first.onnx_node.name}" @@ -272,15 +291,15 @@ def applyNodeLocal(self, node): # lookup op_type in registry of CustomOps prod = registry.getCustomOp(node) assert op_type != "StreamingFIFO", "Found existing FIFOs" - period = prod.get_nodeattr("io_characteristic_period") - prod_chrc = prod.get_nodeattr("io_characteristic") + period = prod.get_nodeattr("io_chrc_period") + prod_chrc = prod.get_nodeattr("io_chrc_out")[0] assert ( - len(prod_chrc) == 4 * period + len(prod_chrc) == 2 * period ), "Found unexpected characterization attribute" if prod.get_nodeattr("outFIFODepth") > 2: # FIFO depth already set, can skip this node return (node, False) - prod_chrc = np.asarray(prod_chrc).reshape(2, -1)[1] + # find consumers model = self.ref_input_model out_fifo_depths = [] @@ -292,8 +311,7 @@ def applyNodeLocal(self, node): out_fifo_depths.append(2) continue cons = registry.getCustomOp(cons_node) - cons_chrc = cons.get_nodeattr("io_characteristic") - cons_chrc = np.asarray(cons_chrc).reshape(2, -1)[0] + cons_chrc = cons.get_nodeattr("io_chrc_in")[0] # find minimum phase shift satisfying the constraint pshift_min = period - 1 for pshift_cand in range(period): @@ -304,7 +322,7 @@ def applyNodeLocal(self, node): break prod_chrc_part = prod_chrc[pshift_min : (pshift_min + period)] cons_chrc_part = cons_chrc[:period] - fifo_depth = (prod_chrc_part - cons_chrc_part).max() + fifo_depth = int((prod_chrc_part - cons_chrc_part).max()) out_fifo_depths.append(fifo_depth) # set output FIFO depth for this (producing) node # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) From 345f2b9c3f21a1489dc251731fc6a810d0c16641 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 6 Sep 2022 15:03:15 +0100 Subject: [PATCH 142/628] [FIFO] use correct type for empty chrc data --- .../transformation/fpgadataflow/derive_characteristic.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 6604760704..5c7a0c07eb 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -182,8 +182,12 @@ def accumulate_char_fxn(chrc): ret.append(ret[-1] + chrc[t % p]) return np.asarray(ret, dtype=np.int32) - all_txns_in = np.empty((len(txns_in.keys()), 2 * self.period)) - all_txns_out = np.empty((len(txns_out.keys()), 2 * self.period)) + all_txns_in = np.empty( + (len(txns_in.keys()), 2 * self.period), dtype=np.int32 + ) + all_txns_out = np.empty( + (len(txns_out.keys()), 2 * self.period), dtype=np.int32 + ) all_pad_in = [] all_pad_out = [] for in_idx, in_strm_nm in enumerate(txns_in.keys()): From f9c0c6b3846e7a2711539def3f2bd3078135010d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 6 Sep 2022 18:06:41 +0100 Subject: [PATCH 143/628] [FIFO] move characterization into HLSCustomOp member fxns --- .../fpgadataflow/addstreams_batch.py | 11 ++ .../fpgadataflow/duplicatestreams_batch.py | 10 ++ .../custom_op/fpgadataflow/hlscustomop.py | 116 ++++++++++++- .../fpgadataflow/matrixvectoractivation.py | 17 ++ .../fpgadataflow/thresholding_batch.py | 17 ++ .../fpgadataflow/vectorvectoractivation.py | 17 ++ .../fpgadataflow/derive_characteristic.py | 153 +----------------- 7 files changed, 188 insertions(+), 153 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 153699197b..1190ad0646 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -360,3 +360,14 @@ def get_verilog_top_module_intf_names(self): swidth = self.get_instream_width_padded() intf_names["s_axis"] = [(x + "_" + sname, swidth) for x in ["in0", "in1"]] return intf_names + + def derive_characteristic_fxns(self, period): + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + "in1": [0 for i in range(n_inps)], + }, + "outputs": {"out": []}, + } + super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py index 04ca45e7f1..7aee3a401e 100644 --- a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py @@ -408,3 +408,13 @@ def get_verilog_top_module_intf_names(self): ("out%d_%s" % (i, sname), self.get_outstream_width_padded()) ) return intf_names + + def derive_characteristic_fxns(self, period): + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + }, + "outputs": {"out0": [], "out1": []}, + } + super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 0d580f5fa1..2ee436aae9 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -29,8 +29,9 @@ import numpy as np import os import subprocess +import warnings from abc import abstractmethod -from pyverilator.util.axi_utils import rtlsim_multi_io +from pyverilator.util.axi_utils import _read_signal, reset_rtlsim, rtlsim_multi_io from qonnx.core.datatype import DataType from qonnx.custom_op.base import CustomOp from qonnx.util.basic import roundup_to_integer_multiple @@ -744,3 +745,116 @@ def get_ap_int_max_w(self): "AP_INT_MAX_W=%d is larger than allowed maximum of 32768" % ret ) return ret + + def derive_characteristic_fxns(self, period, override_rtlsim_dict=None): + """Return the unconstrained characteristic functions for this node.""" + # ensure rtlsim is ready + assert self.get_nodeattr("rtlsim_so") != "", ( + "rtlsim not ready for " + self.onnx_node.name + ) + if self.get_nodeattr("io_chrc_period") > 0: + warnings.warn( + "Skipping node %s: already has FIFO characteristic" + % self.onnx_node.name + ) + return + exp_cycles = self.get_exp_cycles() + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + n_outs = np.prod(self.get_folded_output_shape()[:-1]) + if exp_cycles == 0: + # try to come up with an optimistic estimate + exp_cycles = min(n_inps, n_outs) + assert ( + exp_cycles <= period + ), "Period %d too short to characterize %s : expects min %d cycles" % ( + period, + self.onnx_node.name, + exp_cycles, + ) + sim = self.get_rtlsim() + # signal name + sname = "_" + self.hls_sname() + "_" + if override_rtlsim_dict is not None: + io_dict = override_rtlsim_dict + else: + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + # "weights": wei * num_w_reps + }, + "outputs": {"out": []}, + } + + # extra dicts to keep track of cycle-by-cycle transaction behavior + # note that we restrict key names to filter out weight streams etc + txns_in = {key: [] for (key, value) in io_dict["inputs"].items() if "in" in key} + txns_out = { + key: [] for (key, value) in io_dict["outputs"].items() if "out" in key + } + + def monitor_txns(sim_obj): + for inp in txns_in: + in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 + in_valid = _read_signal(sim, inp + sname + "TVALID") == 1 + if in_ready and in_valid: + txns_in[inp].append(1) + else: + txns_in[inp].append(0) + for outp in txns_out: + if ( + _read_signal(sim, outp + sname + "TREADY") == 1 + and _read_signal(sim, outp + sname + "TVALID") == 1 + ): + txns_out[outp].append(1) + else: + txns_out[outp].append(0) + + reset_rtlsim(sim) + total_cycle_count = rtlsim_multi_io( + sim, + io_dict, + n_outs, + sname=sname, + liveness_threshold=period, + hook_preclk=monitor_txns, + ) + assert total_cycle_count <= period + self.set_nodeattr("io_chrc_period", period) + + def accumulate_char_fxn(chrc): + p = len(chrc) + ret = [] + for t in range(2 * p): + if t == 0: + ret.append(chrc[0]) + else: + ret.append(ret[-1] + chrc[t % p]) + return np.asarray(ret, dtype=np.int32) + + all_txns_in = np.empty((len(txns_in.keys()), 2 * period), dtype=np.int32) + all_txns_out = np.empty((len(txns_out.keys()), 2 * period), dtype=np.int32) + all_pad_in = [] + all_pad_out = [] + for in_idx, in_strm_nm in enumerate(txns_in.keys()): + txn_in = txns_in[in_strm_nm] + if len(txn_in) < period: + pad_in = period - len(txn_in) + txn_in += [0 for x in range(pad_in)] + txn_in = accumulate_char_fxn(txn_in) + all_txns_in[in_idx, :] = txn_in + all_pad_in.append(pad_in) + + for out_idx, out_strm_nm in enumerate(txns_out.keys()): + txn_out = txns_out[out_strm_nm] + if len(txn_out) < period: + pad_out = period - len(txn_out) + txn_out += [0 for x in range(pad_out)] + txn_out = accumulate_char_fxn(txn_out) + all_txns_out[out_idx, :] = txn_out + all_pad_out.append(pad_out) + + # TODO specialize here for DuplicateStreams and AddStreams + self.set_nodeattr("io_chrc_in", all_txns_in) + self.set_nodeattr("io_chrc_out", all_txns_out) + self.set_nodeattr("io_chrc_pads_in", all_pad_in) + self.set_nodeattr("io_chrc_pads_out", all_pad_out) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 9d2717dc8c..d5dbc86c4e 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1462,3 +1462,20 @@ def get_op_and_param_counts(self): thres_count = out_features ret_dict[thres_param_type] = thres_count return ret_dict + + def derive_characteristic_fxns(self, period): + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + }, + "outputs": {"out": []}, + } + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode in ["decoupled", "external"]: + n_weight_inps = self.calc_wmem() + num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) + io_dict["inputs"]["weights"] = [ + 0 for i in range(num_w_reps * n_weight_inps) + ] + super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 5383cc1f4b..828ddd9737 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -960,3 +960,20 @@ def ipgen_extra_directives(self): "Return a list of extra tcl directives for HLS synthesis." return ["config_compile -pipeline_style frp"] + + def derive_characteristic_fxns(self, period): + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + }, + "outputs": {"out": []}, + } + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode in ["decoupled", "external"]: + n_weight_inps = self.calc_tmem() + num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) + io_dict["inputs"]["weights"] = [ + 0 for i in range(num_w_reps * n_weight_inps) + ] + super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 27b23dd328..abcb1c756d 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -733,3 +733,20 @@ def get_op_and_param_counts(self): thres_count = fm ret_dict[thres_param_type] = thres_count return ret_dict + + def derive_characteristic_fxns(self, period): + n_inps = np.prod(self.get_folded_input_shape()[:-1]) + io_dict = { + "inputs": { + "in0": [0 for i in range(n_inps)], + }, + "outputs": {"out": []}, + } + mem_mode = self.get_nodeattr("mem_mode") + if mem_mode in ["decoupled", "external"]: + n_weight_inps = self.calc_wmem() + num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) + io_dict["inputs"]["weights"] = [ + 0 for i in range(num_w_reps * n_weight_inps) + ] + super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 5c7a0c07eb..c171fce3cd 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -27,10 +27,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import numpy as np import qonnx.custom_op.registry as registry import warnings -from pyverilator.util.axi_utils import _read_signal, reset_rtlsim, rtlsim_multi_io from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.base import NodeLocalTransformation @@ -64,156 +62,7 @@ def applyNodeLocal(self, node): try: # lookup op_type in registry of CustomOps inst = registry.getCustomOp(node) - # TODO move into HLSCustomOp? - # ideally, call execute with rtlsim mode and - # specify some way of setting up a hook - # ensure rtlsim is ready - assert inst.get_nodeattr("rtlsim_so") != "", ( - "rtlsim not ready for " + node.name - ) - if inst.get_nodeattr("io_chrc_period") > 0: - warnings.warn( - "Skipping node %s: already has FIFO characteristic" % node.name - ) - return (node, False) - # restricted to single input and output nodes for now - multistream_optypes = [ - "StreamingConcat", - ] - if node.op_type in multistream_optypes: - warnings.warn(f"Skipping {node.name} for rtlsim characterization") - return (node, False) - exp_cycles = inst.get_exp_cycles() - n_inps = np.prod(inst.get_folded_input_shape()[:-1]) - n_outs = np.prod(inst.get_folded_output_shape()[:-1]) - if exp_cycles == 0: - # try to come up with an optimistic estimate - exp_cycles = min(n_inps, n_outs) - assert ( - exp_cycles <= self.period - ), "Period %d too short to characterize %s : expects min %d cycles" % ( - self.period, - node.name, - exp_cycles, - ) - sim = inst.get_rtlsim() - # signal name - sname = "_" + inst.hls_sname() + "_" - io_dict = { - "inputs": { - "in0": [0 for i in range(n_inps)], - # "weights": wei * num_w_reps - }, - "outputs": {"out": []}, - } - # override for certain fork/join nodes - if node.op_type == "DuplicateStreams_Batch": - del io_dict["outputs"]["out"] - io_dict["outputs"]["out0"] = [] - io_dict["outputs"]["out1"] = [] - # n_outs is total of output streams - # so multiply expected by 2 - n_outs *= 2 - elif node.op_type == "AddStreams_Batch": - io_dict["inputs"]["in1"] = [0 for i in range(n_inps)] - - try: - # fill out weight stream for decoupled-mode components - mem_mode = inst.get_nodeattr("mem_mode") - if mem_mode in ["decoupled", "external"]: - if op_type == "Thresholding_Batch": - n_weight_inps = inst.calc_tmem() - else: - n_weight_inps = inst.calc_wmem() - num_w_reps = np.prod(inst.get_nodeattr("numInputVectors")) - io_dict["inputs"]["weights"] = [ - 0 for i in range(num_w_reps * n_weight_inps) - ] - except AttributeError: - pass - - # extra dicts to keep track of cycle-by-cycle transaction behavior - # note that we restrict key names to filter out weight streams etc - txns_in = { - key: [] for (key, value) in io_dict["inputs"].items() if "in" in key - } - txns_out = { - key: [] - for (key, value) in io_dict["outputs"].items() - if "out" in key - } - - def monitor_txns(sim_obj): - for inp in txns_in: - in_ready = _read_signal(sim, inp + sname + "TREADY") == 1 - in_valid = _read_signal(sim, inp + sname + "TVALID") == 1 - if in_ready and in_valid: - txns_in[inp].append(1) - else: - txns_in[inp].append(0) - for outp in txns_out: - if ( - _read_signal(sim, outp + sname + "TREADY") == 1 - and _read_signal(sim, outp + sname + "TVALID") == 1 - ): - txns_out[outp].append(1) - else: - txns_out[outp].append(0) - - reset_rtlsim(sim) - total_cycle_count = rtlsim_multi_io( - sim, - io_dict, - n_outs, - sname=sname, - liveness_threshold=self.period, - hook_preclk=monitor_txns, - ) - assert total_cycle_count <= self.period - inst.set_nodeattr("io_chrc_period", self.period) - - def accumulate_char_fxn(chrc): - p = len(chrc) - ret = [] - for t in range(2 * p): - if t == 0: - ret.append(chrc[0]) - else: - ret.append(ret[-1] + chrc[t % p]) - return np.asarray(ret, dtype=np.int32) - - all_txns_in = np.empty( - (len(txns_in.keys()), 2 * self.period), dtype=np.int32 - ) - all_txns_out = np.empty( - (len(txns_out.keys()), 2 * self.period), dtype=np.int32 - ) - all_pad_in = [] - all_pad_out = [] - for in_idx, in_strm_nm in enumerate(txns_in.keys()): - txn_in = txns_in[in_strm_nm] - if len(txn_in) < self.period: - pad_in = self.period - len(txn_in) - txn_in += [0 for x in range(pad_in)] - txn_in = accumulate_char_fxn(txn_in) - all_txns_in[in_idx, :] = txn_in - all_pad_in.append(pad_in) - - for out_idx, out_strm_nm in enumerate(txns_out.keys()): - txn_out = txns_out[out_strm_nm] - if len(txn_out) < self.period: - pad_out = self.period - len(txn_out) - txn_out += [0 for x in range(pad_out)] - txn_out = accumulate_char_fxn(txn_out) - all_txns_out[out_idx, :] = txn_out - all_pad_out.append(pad_out) - - # TODO specialize here for DuplicateStreams and AddStreams - inst.set_nodeattr("io_chrc_in", all_txns_in) - inst.set_nodeattr("io_chrc_out", all_txns_out) - inst.set_nodeattr("io_chrc_pads_in", all_pad_in) - inst.set_nodeattr("io_chrc_pads_out", all_pad_out) - + inst.derive_characteristic_fxns(period=self.period) except KeyError: # exception if op_type is not supported raise Exception( From c4fad13b60402962d3b43887da13fd18cf239a60 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 7 Sep 2022 10:03:32 +0200 Subject: [PATCH 144/628] ZynqBuild: use AXI port width from part map --- src/finn/transformation/fpgadataflow/make_zynq_proj.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py index a589cb039c..f48566326e 100644 --- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py +++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py @@ -45,7 +45,7 @@ from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.insert_iodma import InsertIODMA from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.util.basic import make_build_dir, pynq_part_map +from finn.util.basic import make_build_dir, pynq_native_port_width, pynq_part_map from . import templates @@ -320,6 +320,7 @@ def __init__( ): super().__init__() self.fpga_part = pynq_part_map[platform] + self.axi_port_width = pynq_native_port_width[platform] self.period_ns = period_ns self.platform = platform self.enable_debug = enable_debug @@ -330,7 +331,7 @@ def apply(self, model): model = model.transform(InferDataLayouts()) # prepare at global level, then break up into kernels prep_transforms = [ - InsertIODMA(64), + InsertIODMA(self.axi_port_width), InsertDWC(), Floorplan(), CreateDataflowPartition(partition_model_dir=self.partition_model_dir), From 04695cbaecde7b614589565f6761fef4b0e10a66 Mon Sep 17 00:00:00 2001 From: patrickg <44997541+patrickgeel@users.noreply.github.com> Date: Wed, 7 Sep 2022 14:54:40 +0200 Subject: [PATCH 145/628] Update basic.py FIRST COMMIT: Started implementing KV260 into FINN flow and went and forked the repo to make editing easier. --- src/finn/util/basic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 4aba87216c..960b7f7c82 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -40,6 +40,8 @@ pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e" pynq_part_map["ZCU111"] = "xczu28dr-ffvg1517-2-e" pynq_part_map["RFSoC2x2"] = "xczu28dr-ffvg1517-2-e" +pynq_part_map["kv260_som"] = "SK-KV260-G" + # native AXI HP port width (in bits) for PYNQ boards pynq_native_port_width = dict() @@ -50,6 +52,7 @@ pynq_native_port_width["ZCU104"] = 128 pynq_native_port_width["ZCU111"] = 128 pynq_native_port_width["RFSoC2x2"] = 128 +pynq_native_port_width["kv260_som"] = 128 # Alveo device and platform mappings alveo_part_map = dict() From 431625b6e07a9d77748aa4f0279fe2137593abd0 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 7 Sep 2022 16:22:10 +0200 Subject: [PATCH 146/628] Initial VVAU SIMD support --- .../fpgadataflow/vectorvectoractivation.py | 37 +++++++++++++------ tests/fpgadataflow/test_fpgadataflow_vvau.py | 22 ++++++++--- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 27b23dd328..bc332b5944 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -54,6 +54,7 @@ def __init__(self, onnx_node): def get_nodeattr_types(self): my_attrs = { "PE": ("i", True, 0), + "SIMD": ("i", False, 1), "Dim": ("ints", True, []), # [H, W] "Channels": ("i", True, 0), "Kernel": ("ints", True, []), # [H, W] @@ -142,7 +143,8 @@ def calc_wmem(self): ch = self.get_nodeattr("Channels") k_h, k_w = self.get_nodeattr("Kernel") pe = self.get_nodeattr("PE") - wmem = k_h * k_w * ch // pe + simd = self.get_nodeattr("SIMD") + wmem = (k_h * k_w * ch // pe) // simd return wmem def calc_tmem(self): @@ -190,7 +192,12 @@ def get_output_datatype(self): def get_instream_width(self): i_bits = self.get_input_datatype().bitwidth() - in_width = i_bits * self.get_nodeattr("PE") + simd = self.get_nodeattr("SIMD") + if simd > 1: + pe = self.get_nodeattr("Channels") + else: + pe = self.get_nodeattr("PE") + in_width = i_bits * simd * pe return in_width def get_outstream_width(self): @@ -200,12 +207,16 @@ def get_outstream_width(self): def get_folded_input_shape(self): k_h, k_w = self.get_nodeattr("Kernel") - sf = k_h * k_w dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") - pe = self.get_nodeattr("PE") + simd = self.get_nodeattr("SIMD") + if simd > 1: + pe = self.get_nodeattr("Channels") + else: + pe = self.get_nodeattr("PE") + sf = k_h * k_w // simd nf = ch // pe - folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, pe]) + folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, simd * pe]) return folded_input_shape def get_folded_output_shape(self): @@ -235,6 +246,7 @@ def get_number_output_values(self): def get_exp_cycles(self): pe = self.get_nodeattr("PE") + simd = self.get_nodeattr("SIMD") ch = self.get_nodeattr("Channels") dim_h, dim_w = self.get_nodeattr("Dim") k_h, k_w = self.get_nodeattr("Kernel") @@ -242,7 +254,7 @@ def get_exp_cycles(self): batch_size = 1 # since mmv != 1 is not supported yet, we set mmv for now to 1 mmv = 1 - exp_cycles = ((ch * k_h * k_w) / pe) * batch_size * (dim_h * dim_w) / mmv + exp_cycles = ((ch * k_h * k_w) / pe / simd) * batch_size * (dim_h * dim_w) / mmv return int(exp_cycles) def get_template_param_values(self): @@ -268,6 +280,7 @@ def get_template_param_values(self): def get_hls_compatible_weight_tensor(self, orig_weight_matrix): pe = self.get_nodeattr("PE") + simd = self.get_nodeattr("SIMD") ch = self.get_nodeattr("Channels") k_h, k_w = self.get_nodeattr("Kernel") wmem = self.calc_wmem() @@ -282,7 +295,7 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): ret = ret.reshape(ch, k_h * k_w) # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) - ret = ret.reshape(1, pe, wmem, 1) + ret = ret.reshape(1, pe, wmem, simd) return ret def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): @@ -334,7 +347,8 @@ def generate_params(self, model, path): if wdt.bitwidth() != 1: f_weights.write( - "const FixedPointWeights<1,{},{},{}> weights = ".format( + "const FixedPointWeights<{},{},{},{}> weights = ".format( + self.get_nodeattr("SIMD"), wdt.get_hls_datatype_str(), self.get_nodeattr("PE"), self.calc_wmem(), @@ -342,8 +356,8 @@ def generate_params(self, model, path): ) else: f_weights.write( - "const BinaryWeights<1,{},{}> weights = ".format( - self.get_nodeattr("PE"), self.calc_wmem() + "const BinaryWeights<{},{},{}> weights = ".format( + self.get_nodeattr("SIMD"), self.get_nodeattr("PE"), self.calc_wmem() ) ) f_weights.write(weight_hls_code) @@ -476,9 +490,10 @@ def defines(self, var): innerProdDim = k_h * k_w self.code_gen_dict["$DEFINES$"] = [ """#define Channels1 {}\n #define InnerProdDim {}\n - #define SIMD1 1\n #define PE1 {}\n #define numReps {}""".format( + #define SIMD1 {}\n #define PE1 {}\n #define numReps {}""".format( self.get_nodeattr("Channels"), innerProdDim, + self.get_nodeattr("SIMD"), self.get_nodeattr("PE"), numReps, ) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index c48448787d..f854c997ff 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -75,7 +75,7 @@ def _calculate_dot_prod_range(dt_a, dt_b, len): def _make_single_vvau_modelwrapper( - W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T=None, tdt=None + W, pe, simd, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T=None, tdt=None ): in_shape = [1, dim_h, dim_w, k_h * k_w * channels] # [N, H, W, K*K*CH] out_shape = [ @@ -104,6 +104,7 @@ def _make_single_vvau_modelwrapper( domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", PE=pe, + SIMD=simd, Dim=[dim_h, dim_w], Channels=channels, Kernel=[k_h, k_w], @@ -148,6 +149,8 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("act", [DataType["UINT4"], None]) # PE @pytest.mark.parametrize("pe", [1, "channels"]) +# SIMD +@pytest.mark.parametrize("simd", [1]) # Input image shape @pytest.mark.parametrize("dim_h", [10]) @pytest.mark.parametrize("dim_w", [10, 1]) @@ -162,7 +165,7 @@ def prepare_inputs(input_tensor): @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_vvau( - idt, wdt, act, pe, dim_h, dim_w, k_h, k_w, channels, exec_mode + idt, wdt, act, pe, simd, dim_h, dim_w, k_h, k_w, channels, exec_mode ): if pe == "channels": pe = channels @@ -198,7 +201,7 @@ def test_fpgadataflow_vvau( tdt = DataType["INT32"] model = _make_single_vvau_modelwrapper( - W, pe, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt + W, pe, simd, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt ) if exec_mode == "cppsim": @@ -230,7 +233,14 @@ def test_fpgadataflow_vvau( "outp" ] - assert (y_produced == y_expected).all(), "cppsim failed" + with open("vvau_test_expected.txt", "w") as f: + f.write("-------expected:\n") + f.write(str(y_expected)) + with open("vvau_test_produced.txt", "w") as f: + f.write("--------produced:\n") + f.write(str(y_produced)) + + assert (y_produced == y_expected).all(), "incorrect result" if exec_mode == "rtlsim": node = model.get_nodes_by_op_type("VectorVectorActivation")[0] @@ -238,5 +248,5 @@ def test_fpgadataflow_vvau( cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) exp_cycles = exp_cycles_dict[node.name] - assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) - assert exp_cycles != 0 + # assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + # assert exp_cycles != 0 From 96a366a178cb5dfad0b1e9e2e6f899f23387c2fb Mon Sep 17 00:00:00 2001 From: Hendrik Borras Date: Thu, 8 Sep 2022 12:23:33 +0200 Subject: [PATCH 147/628] Rename brevitas_network_import notebook and add intro note --- .../basics/1_brevitas_network_import.ipynb | 319 ------- ...revitas_network_import_via_FINN-ONNX.ipynb | 882 ++++++++++++++++++ 2 files changed, 882 insertions(+), 319 deletions(-) delete mode 100644 notebooks/basics/1_brevitas_network_import.ipynb create mode 100644 notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb diff --git a/notebooks/basics/1_brevitas_network_import.ipynb b/notebooks/basics/1_brevitas_network_import.ipynb deleted file mode 100644 index 5fb29754dc..0000000000 --- a/notebooks/basics/1_brevitas_network_import.ipynb +++ /dev/null @@ -1,319 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Importing Brevitas networks into FINN\n", - "\n", - "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", - "\n", - "1. Load up the trained PyTorch model\n", - "2. Call Brevitas FINN-ONNX export and visualize with Netron\n", - "3. Import into FINN and call cleanup transformations\n", - "\n", - "We'll use the following utility functions to print the source code for function calls (`showSrc()`) and to visualize a network using netron (`showInNetron()`) in the Jupyter notebook:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import onnx\n", - "from finn.util.visualization import showSrc, showInNetron" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Load up the trained PyTorch model\n", - "\n", - "The FINN Docker image comes with several [example Brevitas networks](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq), and we'll use the LFC-w1a1 model as the example network here. This is a binarized fully connected network trained on the MNIST dataset. Let's start by looking at what the PyTorch network definition looks like:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from brevitas_examples import bnn_pynq\n", - "showSrc(bnn_pynq.models.FC)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the network topology is constructed using a few helper functions that generate the quantized linear layers and quantized activations. The bitwidth of the layers is actually parametrized in the constructor, so let's instantiate a 1-bit weights and activations version of this network. We also have pretrained weights for this network, which we will load into the model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from finn.util.test import get_test_model\n", - "lfc = get_test_model(netname = \"LFC\", wbits = 1, abits = 1, pretrained = True)\n", - "lfc" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have now instantiated our trained PyTorch network. Let's try to run an example MNIST image through the network using PyTorch." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import matplotlib.pyplot as plt\n", - "from pkgutil import get_data\n", - "import onnx\n", - "import onnx.numpy_helper as nph\n", - "raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n", - "input_tensor = onnx.load_tensor_from_string(raw_i)\n", - "input_tensor_npy = nph.to_array(input_tensor)\n", - "input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n", - "imgplot = plt.imshow(input_tensor_npy.reshape(28,28), cmap='gray')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.nn.functional import softmax\n", - "# do forward pass in PyTorch/Brevitas\n", - "produced = lfc.forward(input_tensor_pyt).detach()\n", - "probabilities = softmax(produced, dim=-1).flatten()\n", - "probabilities" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "objects = [str(x) for x in range(10)]\n", - "y_pos = np.arange(len(objects))\n", - "plt.bar(y_pos, probabilities, align='center', alpha=0.5)\n", - "plt.xticks(y_pos, objects)\n", - "plt.ylabel('Predicted Probability')\n", - "plt.title('LFC-w1a1 Predictions for Image')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Call Brevitas FINN-ONNX export and visualize with Netron\n", - "\n", - "Brevitas comes with built-in FINN-ONNX export functionality. This is similar to the regular ONNX export capabilities of PyTorch, with a few differences:\n", - "\n", - "1. The weight quantization logic is not exported as part of the graph; rather, the quantized weights themselves are exported.\n", - "2. Special quantization annotations are used to preserve the low-bit quantization information. ONNX (at the time of writing) supports 8-bit quantization as the minimum bitwidth, whereas FINN-ONNX quantization annotations can go down to binary/bipolar quantization.\n", - "3. Low-bit quantized activation functions are exported as MultiThreshold operators.\n", - "\n", - "It's actually quite straightforward to export ONNX from our Brevitas model as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import brevitas.onnx as bo\n", - "export_onnx_path = \"/tmp/LFCW1A1.onnx\"\n", - "input_shape = (1, 1, 28, 28)\n", - "bo.export_finn_onnx(lfc, input_shape, export_onnx_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's examine what the exported ONNX model looks like. For this, we will use the Netron visualizer:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "showInNetron('/tmp/LFCW1A1.onnx')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When running this notebook in the FINN Docker container, you should be able to see an interactive visualization of the imported network above, and click on individual nodes to inspect their parameters. If you look at any of the MatMul nodes, you should be able to see that the weights are all {-1, +1} values, and the activations are Sign functions." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Import into FINN and call cleanup transformations\n", - "\n", - "We will now import this ONNX model into FINN using the ModelWrapper, and examine some of the graph attributes from Python." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(export_onnx_path)\n", - "model.graph.node[8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ModelWrapper exposes a range of other useful functions as well. For instance, by convention the second input of the MatMul node will be a pre-initialized weight tensor, which we can view using the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_initializer(model.graph.node[8].input[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also examine the quantization annotations and shapes of various tensors using the convenience functions provided by ModelWrapper." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_tensor_datatype(model.graph.node[8].input[1]).name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_tensor_shape(model.graph.node[8].input[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we want to operate further on this model in FINN, it is a good idea to execute certain \"cleanup\" transformations on this graph. Here, we will run shape inference and constant folding on this graph, and visualize the resulting graph in Netron again." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from qonnx.transformation.fold_constants import FoldConstants\n", - "from qonnx.transformation.infer_shapes import InferShapes\n", - "model = model.transform(InferShapes())\n", - "model = model.transform(FoldConstants())\n", - "export_onnx_path_transformed = \"/tmp/LFCW1A1-clean.onnx\"\n", - "model.save(export_onnx_path_transformed)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "showInNetron('/tmp/LFCW1A1-clean.onnx')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the resulting graph has become smaller and simpler. Specifically, the input reshaping is now a single Reshape node instead of the Shape -> Gather -> Unsqueeze -> Concat -> Reshape sequence. We can now use the internal ONNX execution capabilities of FINN to ensure that we still get the same output from this model as we did with PyTorch." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import finn.core.onnx_exec as oxe\n", - "input_dict = {\"0\": nph.to_array(input_tensor)}\n", - "output_dict = oxe.execute_onnx(model, input_dict)\n", - "produced_finn = output_dict[list(output_dict.keys())[0]]\n", - "\n", - "produced_finn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.isclose(produced, produced_finn).all()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have succesfully verified that the transformed and cleaned-up FINN graph still produces the same output, and can now use this model for further processing in FINN." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb new file mode 100644 index 0000000000..9f28459f77 --- /dev/null +++ b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb @@ -0,0 +1,882 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Importing Brevitas networks into FINN with the FINN-ONNX interchange format\n", + "\n", + "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", + "\n", + "1. Load up the trained PyTorch model\n", + "2. Call Brevitas FINN-ONNX export and visualize with Netron\n", + "3. Import into FINN and call cleanup transformations\n", + "\n", + "We'll use the following utility functions to print the source code for function calls (`showSrc()`) and to visualize a network using netron (`showInNetron()`) in the Jupyter notebook:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import onnx\n", + "from finn.util.visualization import showSrc, showInNetron" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Load up the trained PyTorch model\n", + "\n", + "The FINN Docker image comes with several [example Brevitas networks](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq), and we'll use the LFC-w1a1 model as the example network here. This is a binarized fully connected network trained on the MNIST dataset. Let's start by looking at what the PyTorch network definition looks like:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# MIT License\n", + "#\n", + "# Copyright (c) 2019 Xilinx\n", + "#\n", + "# Permission is hereby granted, free of charge, to any person obtaining a copy\n", + "# of this software and associated documentation files (the \"Software\"), to deal\n", + "# in the Software without restriction, including without limitation the rights\n", + "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n", + "# copies of the Software, and to permit persons to whom the Software is\n", + "# furnished to do so, subject to the following conditions:\n", + "#\n", + "# The above copyright notice and this permission notice shall be included in all\n", + "# copies or substantial portions of the Software.\n", + "#\n", + "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n", + "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n", + "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n", + "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n", + "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n", + "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n", + "# SOFTWARE.\n", + "\n", + "import ast\n", + "from functools import reduce\n", + "from operator import mul\n", + "\n", + "from torch.nn import Module, ModuleList, BatchNorm1d, Dropout\n", + "import torch\n", + "\n", + "from brevitas.nn import QuantIdentity, QuantLinear\n", + "from .common import CommonWeightQuant, CommonActQuant\n", + "from .tensor_norm import TensorNorm\n", + "\n", + "DROPOUT = 0.2\n", + "\n", + "\n", + "class FC(Module):\n", + "\n", + " def __init__(\n", + " self,\n", + " num_classes,\n", + " weight_bit_width,\n", + " act_bit_width,\n", + " in_bit_width,\n", + " in_channels,\n", + " out_features,\n", + " in_features=(28, 28)):\n", + " super(FC, self).__init__()\n", + "\n", + " self.features = ModuleList()\n", + " self.features.append(QuantIdentity(act_quant=CommonActQuant, bit_width=in_bit_width))\n", + " self.features.append(Dropout(p=DROPOUT))\n", + " in_features = reduce(mul, in_features)\n", + " for out_features in out_features:\n", + " self.features.append(QuantLinear(\n", + " in_features=in_features,\n", + " out_features=out_features,\n", + " bias=False,\n", + " weight_bit_width=weight_bit_width,\n", + " weight_quant=CommonWeightQuant))\n", + " in_features = out_features\n", + " self.features.append(BatchNorm1d(num_features=in_features))\n", + " self.features.append(QuantIdentity(act_quant=CommonActQuant, bit_width=act_bit_width))\n", + " self.features.append(Dropout(p=DROPOUT))\n", + " self.features.append(QuantLinear(\n", + " in_features=in_features,\n", + " out_features=num_classes,\n", + " bias=False,\n", + " weight_bit_width=weight_bit_width,\n", + " weight_quant=CommonWeightQuant))\n", + " self.features.append(TensorNorm())\n", + "\n", + " for m in self.modules():\n", + " if isinstance(m, QuantLinear):\n", + " torch.nn.init.uniform_(m.weight.data, -1, 1)\n", + "\n", + " def clip_weights(self, min_val, max_val):\n", + " for mod in self.features:\n", + " if isinstance(mod, QuantLinear):\n", + " mod.weight.data.clamp_(min_val, max_val)\n", + " \n", + " def forward(self, x):\n", + " x = x.view(x.shape[0], -1)\n", + " x = 2.0 * x - torch.tensor([1.0], device=x.device)\n", + " for mod in self.features:\n", + " x = mod(x)\n", + " return x\n", + "\n", + "\n", + "def fc(cfg):\n", + " weight_bit_width = cfg.getint('QUANT', 'WEIGHT_BIT_WIDTH')\n", + " act_bit_width = cfg.getint('QUANT', 'ACT_BIT_WIDTH')\n", + " in_bit_width = cfg.getint('QUANT', 'IN_BIT_WIDTH')\n", + " num_classes = cfg.getint('MODEL', 'NUM_CLASSES')\n", + " in_channels = cfg.getint('MODEL', 'IN_CHANNELS')\n", + " out_features = ast.literal_eval(cfg.get('MODEL', 'OUT_FEATURES'))\n", + " net = FC(\n", + " weight_bit_width=weight_bit_width,\n", + " act_bit_width=act_bit_width,\n", + " in_bit_width=in_bit_width,\n", + " in_channels=in_channels,\n", + " out_features=out_features,\n", + " num_classes=num_classes)\n", + " return net\n", + "\n" + ] + } + ], + "source": [ + "from brevitas_examples import bnn_pynq\n", + "showSrc(bnn_pynq.models.FC)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the network topology is constructed using a few helper functions that generate the quantized linear layers and quantized activations. The bitwidth of the layers is actually parametrized in the constructor, so let's instantiate a 1-bit weights and activations version of this network. We also have pretrained weights for this network, which we will load into the model." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "FC(\n", + " (features): ModuleList(\n", + " (0): QuantIdentity(\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (act_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", + " (activation_impl): Identity()\n", + " (tensor_quant): ClampedBinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " (tensor_clamp_impl): TensorClamp()\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (1): Dropout(p=0.2, inplace=False)\n", + " (2): QuantLinear(\n", + " in_features=784, out_features=1024, bias=False\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (output_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (weight_quant): WeightQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (tensor_quant): BinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " )\n", + " )\n", + " (bias_quant): BiasQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " )\n", + " (3): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): QuantIdentity(\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (act_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", + " (activation_impl): Identity()\n", + " (tensor_quant): ClampedBinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " (tensor_clamp_impl): TensorClamp()\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (5): Dropout(p=0.2, inplace=False)\n", + " (6): QuantLinear(\n", + " in_features=1024, out_features=1024, bias=False\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (output_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (weight_quant): WeightQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (tensor_quant): BinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " )\n", + " )\n", + " (bias_quant): BiasQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " )\n", + " (7): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (8): QuantIdentity(\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (act_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", + " (activation_impl): Identity()\n", + " (tensor_quant): ClampedBinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " (tensor_clamp_impl): TensorClamp()\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (9): Dropout(p=0.2, inplace=False)\n", + " (10): QuantLinear(\n", + " in_features=1024, out_features=1024, bias=False\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (output_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (weight_quant): WeightQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (tensor_quant): BinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " )\n", + " )\n", + " (bias_quant): BiasQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " )\n", + " (11): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (12): QuantIdentity(\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (act_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", + " (activation_impl): Identity()\n", + " (tensor_quant): ClampedBinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " (tensor_clamp_impl): TensorClamp()\n", + " )\n", + " )\n", + " )\n", + " )\n", + " (13): Dropout(p=0.2, inplace=False)\n", + " (14): QuantLinear(\n", + " in_features=1024, out_features=10, bias=False\n", + " (input_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (output_quant): ActQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " (weight_quant): WeightQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " (tensor_quant): BinaryQuant(\n", + " (scaling_impl): ConstScaling(\n", + " (restrict_clamp_scaling): _RestrictClampValue(\n", + " (clamp_min_ste): Identity()\n", + " (restrict_value_impl): FloatRestrictValue()\n", + " )\n", + " (value): StatelessBuffer()\n", + " )\n", + " (bit_width): BitWidthConst(\n", + " (bit_width): StatelessBuffer()\n", + " )\n", + " (zero_point): StatelessBuffer()\n", + " (delay_wrapper): DelayWrapper(\n", + " (delay_impl): _NoDelay()\n", + " )\n", + " )\n", + " )\n", + " (bias_quant): BiasQuantProxyFromInjector(\n", + " (_zero_hw_sentinel): StatelessBuffer()\n", + " )\n", + " )\n", + " (15): TensorNorm()\n", + " )\n", + ")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from finn.util.test import get_test_model\n", + "lfc = get_test_model(netname = \"LFC\", wbits = 1, abits = 1, pretrained = True)\n", + "lfc" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now instantiated our trained PyTorch network. Let's try to run an example MNIST image through the network using PyTorch." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":9: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /opt/conda/conda-bld/pytorch_1607370172916/work/torch/csrc/utils/tensor_numpy.cpp:141.)\n", + " input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAARYElEQVR4nO3dfYyVZXrH8d/FoDAw8iYRCaisG/5QqmUbgk1KyOKmxlUMbKJm/aPauAmarMmqTVqz/UOSaqJVa/pH3YStL9CsmiWoq0a7a82mWo1GNFQQW1CULGR4E5H3t+HqH/NgZ3We6549z3nOc9z7+0kmM3Ouec65OTM/zsv13Pdt7i4Af/xGNT0AAJ1B2IFMEHYgE4QdyARhBzIxupM3Zma89Z+ZUaPKH09OnTpV23VXvf6enp6wPjAw0PJ1183dbbjLK4XdzK6U9M+SeiT9q7vfV+X6cmU27O/mS6k/6ip/eKNHx38CqcCk6r29vaW1Q4cOhcem9PX1hfUDBw6U1lIt50mTJoX1zz77LKx3o5afxptZj6R/kfR9SRdLusHMLm7XwAC0V5XX7PMlfeTuW9z9uKSnJS1pz7AAtFuVsM+Q9Lsh328rLvs9ZrbMzNaa2doKtwWgotrfoHP3FZJWSLxBBzSpyiP7dknnDfl+ZnEZgC5UJezvSJptZt8yszMl/VDS8+0ZFoB2a/lpvLufNLPbJP1ag623x9z9g7aNLCPjx48P6wcPHmz5useMGRPWjx07FtZTbcFx48aF9ai9lmoppqSOj9prqT76vn37WhlSV6v0mt3dX5L0UpvGAqBGnC4LZIKwA5kg7EAmCDuQCcIOZIKwA5mwTq4um+vpsqled6qXffTo0bA+duzYlo9Nia676vWfffbZYb3qNNLofp06dWp47O7du8N6amrwyZMnw3qdyuaz88gOZIKwA5kg7EAmCDuQCcIOZIKwA5mg9fYNkGrNVfkd1nnddUtNDa6yem1q6m5qanCTS03TegMyR9iBTBB2IBOEHcgEYQcyQdiBTBB2IBP02TvgrLPOCuvRbqOSNHHixLB+4sSJ0lpqN9LUFNbPP/88rC9YsCCs33rrraW1VC/6jjvuCOtbt24N601OM20SfXYgc4QdyARhBzJB2IFMEHYgE4QdyARhBzJBn/0b4JFHHgnrUS871Wuuuox1b29vWI+ktk2+5JJLwvqmTZvC+vHjx0trZ5xxRnhsdO6ClP53HzlyJKzXqazPXmnLZjP7VNIBSQOSTrr7vCrXB6A+lcJeWOTue9pwPQBqxGt2IBNVw+6SfmNm75rZsuF+wMyWmdlaM1tb8bYAVFD1afwCd99uZudIesXM/sfdXxv6A+6+QtIKiTfogCZVemR39+3F512SnpU0vx2DAtB+LYfdzMab2Vmnv5Z0haQN7RoYgPaq8jR+mqRniz7taElPuvu/t2VUf2RSWzYvWrQorF922WVhPeqVHzx4MDw21W/u6+sL66nzNKI566m11x999NGWr1uS7rzzztLaW2+9FR5b93bSTWg57O6+RdKftnEsAGpE6w3IBGEHMkHYgUwQdiAThB3IBFNcu0Bqqubs2bPD+v79+0trEyZMCI+NpoFK6SmwVbZ8TrX9UlJLcO/du7e0tnTp0vDYdevWhfVUSzLV8qwTS0kDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJdiw42TFRT7fOfnBK6thU/ZZbbgnrq1atCuszZ85s+bZTffZ77rknrK9evTqsn3nmmaW1K664Ijz2wQcfDOuprbCj2168eHF47LZt28L6nj3fvDVWeWQHMkHYgUwQdiAThB3IBGEHMkHYgUwQdiATHZ/Pnup3Rzo51naqOvd54cKFYf2iiy4qrY0bNy48dvTo+FSLNWvWhPUtW7aE9SpSyz3PmTMnrKfu90jq75T57AC6FmEHMkHYgUwQdiAThB3IBGEHMkHYgUx0vM8+alT5/y9V54XXqcpc+lOnTlW67eg+S9VPnjwZHjt+/PiwfujQobCe2o46+p2l5tJfffXVYf3pp58O61X67Kk17VP3a5Na7rOb2WNmtsvMNgy5bIqZvWJmm4vPk9s5WADtN5Kn8U9IuvIrl90l6VV3ny3p1eJ7AF0sGXZ3f03SV/fRWSJpZfH1SklL2zssAO3W6hp009y9v/h6h6RpZT9oZsskLWvxdgC0SeUFJ93dow0b3X2FpBUSGzsCTWq19bbTzKZLUvF5V/uGBKAOrYb9eUk3FV/fJOlX7RkOgLok++xm9pSk70qaKmmnpLslPSfpl5LOl7RV0vXuXr4Z9v9fV21P46uuG1+1Hkn1ZFN7qEf7r1fV29sb1o8cORLWU+cAVDnH4MILLwzrH3/8ccvXnRpXak36lMOHD1c6voqyPnvyNbu731BS+l6lEQHoKE6XBTJB2IFMEHYgE4QdyARhBzLBls2FVAtyYGAgrEd6enrCetVlh6M2UarFlJrCmpK6/mjb5KgmSYsWLWppTKdFv9MTJ06Ex6amuFb5e2gKj+xAJgg7kAnCDmSCsAOZIOxAJgg7kAnCDmSiq/rsdW7nXHU55yrqvu0DBw6U1lL94lSvO3V8qk8fLRedWsb6uuuuC+tHjx4N62PHji2tpfrsqd9Zk1syt4pHdiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMtHxPns0t7ube+XRksmp5ZRT6txW+dJLLw2PnTNnTlhPLSX93HPPhfVI1AeXpIULF4b1Klt4p5ahjs5dkKovwd0EHtmBTBB2IBOEHcgEYQcyQdiBTBB2IBOEHchEx/vs0Zz1OvvoqbnyqXndUU949Oj4bly6dGlYTx2/ZMmSsD5mzJjS2ty5c8NjJ02aFNZTvezXX3+95eNnz54dHptamz3V616/fn1p7fLLLw+Pje5TqTv76CnJR3Yze8zMdpnZhiGXLTez7Wa2rvi4qt5hAqhqJE/jn5B05TCXP+zuc4uPl9o7LADtlgy7u78maW8HxgKgRlXeoLvNzN4vnuZPLvshM1tmZmvNbG2F2wJQUath/5mkb0uaK6lf0kNlP+juK9x9nrvPa/G2ALRBS2F3953uPuDupyT9XNL89g4LQLu1FHYzmz7k2x9I2lD2swC6g6X6qGb2lKTvSpoqaaeku4vv50pySZ9KusXd+5M3ZhbeWKrfnJr3HZk1a1ZYv+aaa8L64sWLS2upedepedupudPR/utSvIZ5X19feGxK1Xnd0e/0iy++CI+dOHFiWE/ZvHlzaW3VqlXhsQ89VPrKVFJ399ndfdiTSpIn1bj7DcNc/GjlEQHoKE6XBTJB2IFMEHYgE4QdyARhBzKRbL219cbMPFp2uc4prnfffXdYX758eVjfs2dPaW3q1KmtDOlLqa2H9+6NpyZE9QsuuCA8NtUWTG3ZnHLs2LHSWmoaaervIdWKjaYtp7Zcfvnll8P6zTffHNab3NK5rPXGIzuQCcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noeJ89qlfZmjg11TLV96yy7fKuXbvC+tatW8P6Aw88ENZXr14d1ufNK18E6OGHHw6PTW3ZPHly6YpjkqRt27aF9eh3+sQTT4THfvLJJ2H92muvDevR1OOq02tffPHFsJ6aMl0n+uxA5gg7kAnCDmSCsAOZIOxAJgg7kAnCDmSio332UaNGeTQ/+vjx4+Hx55xzTmlt9+7d4bGpPntq7nTUL05tB71p06awPmXKlLCeWrY4Wu75/PPPD49NzWdPLe+9b9++sH7jjTeW1l544YXw2JTUOgLRctGLFi0Kj02tMZC6X1LLf9eJPjuQOcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noqvnsVaT6nitXrgzr119/fcvXf/jw4fDYcePGhfXUtsipef4DAwOltdS672+++WZYf/LJJ8P6unXrwvobb7xRWkudX5Dq4ad+59F5G/Pnzw+Pffvtt8P6448/HtZT68rXqeU+u5mdZ2a/NbONZvaBmf2kuHyKmb1iZpuLz/EqBwAaNZKn8Scl/Y27XyzpzyX92MwulnSXpFfdfbakV4vvAXSpZNjdvd/d3yu+PiDpQ0kzJC2RdPq58UpJS2saI4A2iF/0fIWZzZL0HUlvS5rm7v1FaYekaSXHLJO0rMIYAbTBiN+NN7M+SWsk3e7u+4fWfPBdvmHffHP3Fe4+z93LV0UEULsRhd3MztBg0H/h7s8UF+80s+lFfbqkeIlVAI1Ktt5scP7mSkl73f32IZc/IOkzd7/PzO6SNMXd/zZxXeGNnXvuueFYduzYEdYj0fa9kjRz5sywfu+995bWZsyYER6b2nI5tXVxtF20JN1///2ltY0bN4bHpqa4prZFTklNW46k2oYnTpwI69HU49Tf/YQJE8J61SnTdSprvY3kNftfSPorSevNbF1x2U8l3Sfpl2b2I0lbJcWNagCNSobd3f9LUtl/kd9r73AA1IXTZYFMEHYgE4QdyARhBzJB2IFMdHSKa09Pj0d93dRU0aj3uX///tKaJPX19YX1VN806vlW6fdK6Z5v6hyBqJed6uEfO3YsrFcV/b5TyzWnpgan/l6q/M5Sqo6tTiwlDWSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrpqKenUHOKol55aVrjqvOzp06eX1vr7+0trI9Hb2xvWU1s213ndqWWsDx06FNarzClPGTUqfqyqMqe86fMTqqDPDmSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrqqzw6gOvrsQOYIO5AJwg5kgrADmSDsQCYIO5AJwg5kIhl2MzvPzH5rZhvN7AMz+0lx+XIz225m64qPq+ofLoBWJU+qMbPpkqa7+3tmdpakdyUt1eB+7Afd/cER3xgn1QC1KzupZiT7s/dL6i++PmBmH0qa0d7hAajbH/Sa3cxmSfqOpLeLi24zs/fN7DEzm1xyzDIzW2tma6sNFUAVIz433sz6JP2npHvd/RkzmyZpjySX9A8afKp/c+I6eBoP1KzsafyIwm5mZ0h6UdKv3f2fhqnPkvSiu/9J4noIO1CzlifC2ODyoI9K+nBo0Is37k77gaQNVQcJoD4jeTd+gaTXJa2XdHpt3p9KukHSXA0+jf9U0i3Fm3nRdfHIDtSs0tP4diHsQP2Yzw5kjrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmUguONlmeyRtHfL91OKybtStY+vWcUmMrVXtHNsFZYWOzmf/2o2brXX3eY0NINCtY+vWcUmMrVWdGhtP44FMEHYgE02HfUXDtx/p1rF167gkxtaqjoyt0dfsADqn6Ud2AB1C2IFMNBJ2M7vSzP7XzD4ys7uaGEMZM/vUzNYX21A3uj9dsYfeLjPbMOSyKWb2ipltLj4Pu8deQ2Prim28g23GG73vmt7+vOOv2c2sR9ImSX8paZukdyTd4O4bOzqQEmb2qaR57t74CRhmtlDSQUmrTm+tZWb/KGmvu99X/Ec52d3/rkvGtlx/4DbeNY2tbJvxv1aD9107tz9vRROP7PMlfeTuW9z9uKSnJS1pYBxdz91fk7T3KxcvkbSy+HqlBv9YOq5kbF3B3fvd/b3i6wOSTm8z3uh9F4yrI5oI+wxJvxvy/TZ1137vLuk3ZvaumS1rejDDmDZkm60dkqY1OZhhJLfx7qSvbDPeNfddK9ufV8UbdF+3wN3/TNL3Jf24eLralXzwNVg39U5/JunbGtwDsF/SQ00OpthmfI2k2919/9Bak/fdMOPqyP3WRNi3SzpvyPczi8u6grtvLz7vkvSsBl92dJOdp3fQLT7vang8X3L3ne4+4O6nJP1cDd53xTbjayT9wt2fKS5u/L4bblydut+aCPs7kmab2bfM7ExJP5T0fAPj+BozG1+8cSIzGy/pCnXfVtTPS7qp+PomSb9qcCy/p1u28S7bZlwN33eNb3/u7h3/kHSVBt+R/1jS3zcxhpJxXSjpv4uPD5oem6SnNPi07oQG39v4kaSzJb0qabOk/5A0pYvG9m8a3Nr7fQ0Ga3pDY1ugwafo70taV3xc1fR9F4yrI/cbp8sCmeANOiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMvF/rSIwqVQD1iIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from pkgutil import get_data\n", + "import onnx\n", + "import onnx.numpy_helper as nph\n", + "raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n", + "input_tensor = onnx.load_tensor_from_string(raw_i)\n", + "input_tensor_npy = nph.to_array(input_tensor)\n", + "input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n", + "imgplot = plt.imshow(input_tensor_npy.reshape(28,28), cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([0.1020, 0.0113, 0.4806, 0.0571, 0.0482, 0.0079, 0.0450, 0.0076, 0.1851,\n", + " 0.0552])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from torch.nn.functional import softmax\n", + "# do forward pass in PyTorch/Brevitas\n", + "produced = lfc.forward(input_tensor_pyt).detach()\n", + "probabilities = softmax(produced, dim=-1).flatten()\n", + "probabilities" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEICAYAAABS0fM3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAbi0lEQVR4nO3debxdZXn28d9FIDKFQRIVEiDMNk6IERAVZWpDq2AREV4nrEwtsSi+VlTUSp3qhFWxCgRBKfACgo0WZShKHYGAKIRBwhzGMAmiLxC4+sd6Dm6O++yzMqx1yFnX9/PZn6z5vvc+sO+9nmetZ8k2ERHRXSuNdQIRETG2UggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgoiZJlrR5mf66pI8s5XF+L2nT5ZvdwHiS9E1JD0i6pK24seJIIRiHJN0sadc+y18r6cnyRTT0+l7P+i0lnSHpXkm/k/QbSYdLmrCM+cyWNE/So5JOXMJ995H0c0l/kPTjUbbtfX8PS7pO0juXJfeR2D7E9r+Mtp2kH0s6YNi+a9q+sYm8RvAqYDdgmu1tl/VgkqaXorjysqcWzwQpBN1zR/kiGnq9HkDSZsDFwG3Ai2yvDbwJmAlMWtaYwCeAE5Zi3/uBLwGfqRvL9prAWsAHgOMkzRi+Uce+xDYGbrb9yJLu2LHPqbNSCGLIx4Gf2z7c9p0Atq+z/X9sPzh8Y0k7SbqyZ/58SZf2zP9E0hvKcc6y/V3gvj7HWVfS9yUtKk0X35c0bWi97Qtsn05VTGpz5bvAA8AMSftL+pmkoyXdB/yzpGdJ+rykWyXdXZp7VuvJ7f2S7pR0h6S/G5b3iZI+0TO/p6QrJD0k6QZJsyR9Eng18NVylvLVsm1vE9Pakr5V3v8tko6UtFJZt7+kn5YcH5B0k6Tde2LuL+nGcvZzk6S39Pl83wUcD7yi5PDxsvxASQsk3S9prqQNevaxpEMlXQ9cP9pnXT6Lr0n6QYnxM0nPk/Slkve1kl7as/0R5TN6WNLVkv62Z90ESV8oZ6U3lbPJp84+yuc1p/xdbpf0iWU9Y40UgviTXYEzl2D7XwJbSJosaRXgxcAGkiaVL9OZwE9qHGcl4JtUv1o3Av4IfHWJMu9D0krlC2YdYKhgbQfcCDwX+CTVWcaWwNbA5sBU4KNl/1nA/6VqUtmC6vMZKda2wLeA95d4O1L9Av8w1Wcwu5x9ze6z+1eAtYFNgdcAbwd6m7O2A64DJgOfBeaosgbwZWB325OAHYArhh/c9hzgEOAXJYePSdoZ+DSwD7A+cAtw2rBd31Bi/9nZ1Aj2AY4seT4K/AK4vMyfCXyxZ9sbqArk2lQ/QE6WtH5ZdyCwO9XfZJuSR68TgcVUf6+XAn8JHEAsG9t5jbMXcDOwa5/lrwWeBB7see1T1j0OzFrCOD8B9gK2B84DTgdmATsBv+mz/SeAE0c55tbAA32WHwD8eJR9e9/f/VRfjPuWdfsDt/ZsK+ARYLOeZa8AbirTJwCf6Vm3JWBg8zJ/IvCJMv0N4OgRcvoxcMCwZab6IpsAPAbM6Fl38ND7LDkv6Fm3etn3ecAa5X2+EVhtlM9lf+CnPfNzgM/2zK9Z/v7Te/LbecDxppdtVu75LI7rWf9u4Jqe+RcBDw443hXAnmX6QuDgnnW7DsWiKuCP9r5fYD/gR23/PzbeXmn/6547bE/rs/w+ql+HfUn6OvDWMvsp258CLqL68l1Yph+g+lX7aJkflaTVgaOpCsi6ZfEkSRNsP1HnGMOM9P6g6v8YMoXqi/UySU+lQ/XlDLABcFnP9rcMiLkhcM6Sp8pkYJVhx76F6sxkyF1DE7b/UHJd0/Zdkt5MddYyR9LPgPfZvrZG3A2ofq0PHff3pblsKtWPCHj6Z1XH3T3Tf+wzv+bQjKS3A4dTFRTKusk9ufXG7p3emOrzurPnb7bSUuQaw6RpKIZcQPXrsi9XV8kMdTB/qiweKgQ7lumLqArBa6hZCID3AVsB29leqxwLqi/l5a13qN17qb6gXmB7nfJa21VHM8CdVF/wQzYacNzbgM1qxBzuXqpf4hsPi3P7gH3+dGD7XNu7URXwa4Hj6uxH1d/yVMzSzLTesLiNDEssaWOqPGcD69leB7iKP/297wR6C3nv3+A2qh8Zk3v+ZmvZfkETuXZJCsH4tYqkVXteo539fQzYQdLnJD0PQNLmkk6WtM4I+/yc6kt8W+AS2/OpvmC2A/5naCNJK0talerX9oRh+Uyi+kJ+UNKzSx707Duh7LsysFLZd5X6H0N/tp+k+kI6WtJzSqypkv6qbHI6sL+kGeWs5WMjHAqqppZ3Stql9E1MlfT8su5uqvb/fjk8UeJ8svStbEz1S/nk0fKX9NzSQb0G1Zfj76maxeo4teS7taRnAZ8CLrZ9c839l8UaVEVmEYCqy3tf2LP+dOCw8hmuQ3XlFwCuLmI4D/iCpLXKZ72ZpNe0kPe4lkIwfp1D9QU79PrnQRvbvoGqjXw6MF/S74DvAPOAh0fY5xGqJob5th8ri38B3GL7np5Njyw5HEHVvPTHsgyqS0NXo/p1/Evgh8PCvK1s/+9UHYx/pP4v39F8AFgA/FLSQ1RnRVuV9/aDktuFZZsLRzqI7UuoOniPBn5HdTY09Iv734C9y9UzX+6z+7up+ipuBH4KnEK9y2xXoioad1D1h7wG+Psa+2H7AuAjVH/fO6nOZvats++ysn018AWq/07upuo/+FnPJsdRfdn/BvgV1X/Hi4GhZsK3AxOBq6maIs9kQJNm1KPS4RIR8YxTLpf9uu2NR904llrOCCLiGUPSapL+ujQnTqVqkjt7rPMa73JGEBHPGKU/5iLg+VTNgP8FHGb7oTFNbJxLIYiI6LhGm4ZU3WZ/XbmV/Yg+6/dXdWv9FeWVOwQjIlrW2A1lZfyPY6hu0V8IXCppbrlqoNf/c/9b7/uaPHmyp0+fvvwSjYjogMsuu+xe21P6rWvyzuJtqW6PvxFA0mnAnlSXfS216dOnM2/evOWQXkREd0ga8e74JpuGpvL0W78X8vRb54e8UdW492dK2rDPeiQdpGo8+3mLFi1qIteIiM4a68tHv0c10NWLgfOBk/ptZPtY2zNtz5wype+ZTURELKUmC8HtPH2ckGkMG0PF9n22Hy2zxwMvazCfiIjoo8lCcCnVePWbSJpIdQv73N4NesYgB9gDuKbBfCIioo/GOottL5Y0GziXarCxE2zPl3QUMM/2XOAfJe1BNZbI/VTjpkdERItWuBvKZs6c6Vw1FBGxZCRdZntmv3Vj3VkcERFjLIUgIqLjUggiIjouzyzugKPP/23jMd6725aNx4iIZuSMICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOq7RQiBplqTrJC2QdMSA7d4oyZJmNplPRET8ucYKgaQJwDHA7sAMYD9JM/psNwk4DLi4qVwiImJkTZ4RbAsssH2j7ceA04A9+2z3L8C/Av+/wVwiImIETRaCqcBtPfMLy7KnSNoG2ND2fw06kKSDJM2TNG/RokXLP9OIiA4bs85iSSsBXwTeN9q2to+1PdP2zClTpjSfXEREhzRZCG4HNuyZn1aWDZkEvBD4saSbge2BuekwjohoV5OF4FJgC0mbSJoI7AvMHVpp+3e2J9uebns68EtgD9vzGswpIiKGaawQ2F4MzAbOBa4BTrc9X9JRkvZoKm5ERCyZlZs8uO1zgHOGLfvoCNu+tslcIiKiv9xZHBHRcSkEEREdl0IQEdFxKQQRER2XQhAR0XEpBBERHZdCEBHRcSkEEREdN2ohkPRuSeu2kUxERLSvzhnBc4FLJZ1enjimppOKiIj2jFoIbB8JbAHMAfYHrpf0KUmbNZxbRES0oFYfgW0Dd5XXYmBd4ExJn20wt4iIaMGog85JOgx4O3AvcDzwftuPlwfLXA/8U7MpRkREk+qMPvpsYC/bt/QutP2kpNc1k1ZERLSlTtPQpsOLgKRvA9i+ppGsIiKiNXUKwQt6ZyRNAF7WTDoREdG2EQuBpA9Kehh4saSHyuth4B7gP1vLMCIiGjViIbD9aduTgM/ZXqu8Jtlez/YHW8wxIiIaNGJnsaTn274WOEPSNsPX27680cwiIqIVg64aeh9wIPCFPusM7NxIRhER0aoRC4HtA8u/O7WXTkREtG1Q09Beg3a0fdbyTyciIto2qGno9QPWGUghiIgYBwY1Db2zzUQiImJsDGoaeqvtkyUd3m+97S82l1ZERLRlUNPQGuXfSW0kEhERY2NQ09A3yr8fby+diIhoW51HVW4q6XuSFkm6R9J/Stq0jeQiIqJ5dQadOwU4HVgf2AA4Azi1yaQiIqI9dQrB6ra/bXtxeZ0MrNp0YhER0Y5BVw09u0z+QNIRwGlU9w+8GTinhdwiIqIFg64auozqi19l/uCedQYyAmlExDgw6KqhTdpMJCIixkadZxYj6YXADHr6Bmx/q6mkIiKiPXUuH/0Y8JXy2gn4LLBHnYNLmiXpOkkLSj/D8PWHSLpS0hWSfippxhLmHxERy6jOVUN7A7sAd5Xxh14CrD3aTuXZxscAu1OdTezX54v+FNsvsr01VYHJsBURES2rUwj+aPtJYLGktaieWbxhjf22BRbYvtH2Y1RXHe3Zu4Hth3pm16DqhI6IiBbV6SOYJ2kd4DiqK4l+D/yixn5Tgdt65hcC2w3fSNKhwOHARPLUs4iI1o16RmD7H2w/aPvrwG7AO5bnENW2j7G9GfAB4Mh+20g6SNI8SfMWLVq0vEJHRAT1moaQtJekLwLvBjareezbeXoT0rSybCSnAW/ot8L2sbZn2p45ZcqUmuEjIqKOOlcNfQ04BLgSuAo4WNIxNY59KbCFpE0kTQT2BeYOO/YWPbN/A1xfN/GIiFg+6vQR7Az8hW0DSDoJmD/aTrYXS5oNnAtMAE6wPV/SUcA823OB2ZJ2BR4HHgDesZTvIyIillKdQrAA2Ai4pcxvWJaNyvY5DBuXyPZHe6YPq5dmREQ0ZdCgc9+jupxzEnCNpEvKqm2BS0baLyIiViyDzgg+31oWERExZgYNOnfR0LSk5wIvL7OX2L6n6cQiIqIdda4a2oeqKehNwD7AxZL2bjqxiIhoR53O4g8DLx86C5A0BbgAOLPJxCIioh11bihbaVhT0H0194uIiBVAnTOCH0o6lz89sD6PqoyIGEcGFgJJAr5M1VH8qrL4WNtnN51YRES0Y2AhsG1J59h+EXBWSzlFRESL6rT1Xy7p5aNvFhERK6I6fQTbAW+VdDPwCCCqk4UXN5lYRES0o04h+KvGs4iIiDEzaKyh5wAfAjanGoL608MeLRkREePAoD6Cb1E1BX0FWJPq6qGIiBhnBjUNrW/7w2X6XEmXt5FQRES0a7T7CNal6hwGmNA7b/v+hnOLiIgWDCoEawOX8adCADB0VmBg06aSioiI9gwahnp6i3lERMQYyeBxEREdl0IQEdFxKQQRER036IayZw/aMVcNRUSMD4OuGrqM6uogARsBD5TpdYBbgU2aTi4iIpo3YtOQ7U1sb0r1WMrX255sez3gdcB5bSUYERHNqtNHsL3tp55IZvsHwA7NpRQREW2qM/roHZKOBE4u828B7mgupYiIaFOdM4L9gCnA2VRPKZtSlkVExDgw6hlBuTroMElr2H6khZwiIqJFo54RSNpB0tXANWX+JZK+1nhmERHRijpNQ0dTPaXsPgDbvwZ2bDKpiIhoT607i23fNmzREw3kEhERY6DOVUO3SdoBsKRVgMMozUQREbHiq3NGcAhwKDAVuB3YGviHBnOKiIgW1Tkj2Mr2W3oXSHol8LNmUoqIiDbVOSP4Ss1lf0bSLEnXSVog6Yg+6w+XdLWk30j6b0kb1zluREQsP4NGH30F1VASUyQd3rNqLWDCaAeWNAE4BtgNWAhcKmmu7at7NvsVMNP2HyT9PfBZ4M1L/jYiImJpDTojmAisSVUsJvW8HgL2rnHsbYEFtm+0/RhwGrBn7wa2f2T7D2X2l8C0JUs/IiKW1aBnFl8EXCTpRNu3LMWxpwK9l50uBLYbsP27gB8sRZyIiFgGdfoIjpe0ztCMpHUlnbs8k5D0VmAm8LkR1h8kaZ6keYsWLVqeoSMiOq9OIZhs+8GhGdsPAM+psd/twIY989PKsqeRtCvwYWAP24/2O5DtY23PtD1zypQpNUJHRERddQrBk5I2GpopV/a4xn6XAltI2kTSRGBfYG7vBpJeCnyDqgjcUz/tiIhYXurcR/Bh4KeSLqJ6VOWrgYNG28n2YkmzgXOprjI6wfZ8SUcB82zPpWoKWhM4QxLArbb3WLq3EhERS6POMNQ/lLQNsH1Z9B7b99Y5eHmy2TnDln20Z3rXJcg1IiIaMGLTkKTnl3+3oXp4/R3ltVFZFhER48CgM4L3AQcCX+izzsDOjWQUEbGUjj7/t43HeO9uWzYeo22D7iM4sPy7U3vpRERE2wYNMbHXoB1tn7X804mIiLYNahp6ffn3OVRjDl1Y5ncCfk71IPuIiFjBDWoaeieApPOAGbbvLPPrAye2kl1ERDSuzg1lGw4VgeJuqquIIiJiHKhzQ9l/l7GFTi3zbwYuaC6liIhoU50bymZL+ltgx7LoWNtnN5tWRES0pc4ZAcDlwMO2L5C0uqRJth9uMrGIiGjHqH0Ekg4EzqQaHA6q5wx8t8GcIiKiRXU6iw8FXkn1ZDJsX0+9YagjImIFUKcQPFoeNQmApJWpNwx1RESsAOoUgoskfQhYTdJuwBnA95pNKyIi2lKnEHwAWARcCRxMNaz0kU0mFRER7Rl41ZCkCcB8288HjmsnpYiIaNPAMwLbTwDX9T6qMiIixpc69xGsC8yXdAnwyNDCPFIyImJ8qFMIPtJ4FhERMWYGPY9gVeAQYHOqjuI5the3lVhERLRjUB/BScBMqiKwO/0fWRkRESu4QU1DM2y/CEDSHOCSdlKKiIg2DTojeHxoIk1CERHj16AzgpdIeqhMi+rO4ofKtG2v1Xh2ERHRuEGPqpzQZiIRETE26gwxERER41gKQUREx6UQRER0XApBRETHpRBERHRcCkFERMelEEREdFwKQUREx6UQRER0XApBRETHNVoIJM2SdJ2kBZKO6LN+R0mXS1osae8mc4mIiP4aKwTlwffHUD3LYAawn6QZwza7FdgfOKWpPCIiYrA6j6pcWtsCC2zfCCDpNGBP4OqhDWzfXNY92WAeTzn6/N82HuO9u23ZeIyIiOWpyaahqcBtPfMLy7IlJukgSfMkzVu0aNFySS4iIiorRGex7WNtz7Q9c8qUKWOdTkTEuNJkIbgd2LBnflpZFhERzyBNFoJLgS0kbSJpIrAvMLfBeBERsRQaKwTlOcezgXOBa4DTbc+XdJSkPQAkvVzSQuBNwDckzW8qn4iI6K/Jq4awfQ5wzrBlH+2ZvpSqySgiIsbICtFZHBERzUkhiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LhGB52LyONBI575UggiIpaDFflHT5qGIiI6LoUgIqLj0jQU41rTp+vpn4jxIIUgYhxKAYwlkaahiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4DEPdkhX5MXYRMb6lEEQ0JM8EiBVFo4VA0izg34AJwPG2PzNs/bOAbwEvA+4D3mz75iZziohm5ex3xdNYH4GkCcAxwO7ADGA/STOGbfYu4AHbmwNHA//aVD4REdFfk53F2wILbN9o+zHgNGDPYdvsCZxUps8EdpGkBnOKiIhhZLuZA0t7A7NsH1Dm3wZsZ3t2zzZXlW0Wlvkbyjb3DjvWQcBBZXYr4LpGku5vMnDvqFsldmIndmI/s2NvbHtKvxUrRGex7WOBY8citqR5tmcmdmIndmKPl9jDNdk0dDuwYc/8tLKs7zaSVgbWpuo0joiIljRZCC4FtpC0iaSJwL7A3GHbzAXeUab3Bi50U21VERHRV2NNQ7YXS5oNnEt1+egJtudLOgqYZ3suMAf4tqQFwP1UxeKZZkyapBI7sRM7sdvSWGdxRESsGDLWUEREx6UQRER0XArBCCTNknSdpAWSjmg59gmS7in3WbQZd0NJP5J0taT5kg5rMfaqki6R9OsS++Ntxe7JYYKkX0n6/hjEvlnSlZKukDSv5djrSDpT0rWSrpH0ipbiblXe79DrIUnvaSN2if/e8t/aVZJOlbRqi7EPK3Hnt/meR2Q7r2Evqs7tG4BNgYnAr4EZLcbfEdgGuKrl970+sE2ZngT8tq33DQhYs0yvAlwMbN/y+z8cOAX4fptxS+ybgcltxy2xTwIOKNMTgXXGIIcJwF1UNz21EW8qcBOwWpk/Hdi/pdgvBK4CVqe6YOcCYPOx+NsPvXJG0F+d4TEaY/t/qK6iapXtO21fXqYfBq6h+h+mjdi2/fsyu0p5tXYlg6RpwN8Ax7cV85lA0tpUPzzmANh+zPaDY5DKLsANtm9pMebKwGrlHqbVgTtaivsXwMW2/2B7MXARsFdLsftKIehvKnBbz/xCWvpCfKaQNB14KdUv87ZiTpB0BXAPcL7t1mIDXwL+CXiyxZi9DJwn6bIypEpbNgEWAd8szWLHS1qjxfhD9gVObSuY7duBzwO3AncCv7N9XkvhrwJeLWk9SasDf83Tb75tXQpB/BlJawLfAd5j+6G24tp+wvbWVHehbyvphW3ElfQ64B7bl7URbwSvsr0N1Wi9h0rasaW4K1M1Q/677ZcCjwBt94lNBPYAzmgx5rpUZ/mbABsAa0h6axuxbV9DNdLyecAPgSuAJ9qIPZIUgv7qDI8xLklahaoI/Ifts8Yih9I08SNgVkshXwnsIelmqmbAnSWd3FJs4KlfqNi+BzibqnmyDQuBhT1nX2dSFYY27Q5cbvvuFmPuCtxke5Htx4GzgB3aCm57ju2X2d4ReICqP27MpBD0V2d4jHGnDAE+B7jG9hdbjj1F0jplejVgN+DaNmLb/qDtabanU/2tL7Tdyq9DAElrSJo0NA38JVXzQeNs3wXcJmmrsmgX4Oo2YvfYjxabhYpbge0lrV7+u9+Fqk+sFZKeU/7diKp/4JS2YvezQow+2jaPMDxGW/ElnQq8FpgsaSHwMdtzWgj9SuBtwJWlrR7gQ7bPaSH2+sBJ5YFGKwGn2279Ms4x8lzg7PIojpWBU2z/sMX47wb+o/zouRF4Z1uBS+HbDTi4rZgAti+WdCZwObAY+BXtDvnwHUnrAY8Dh45RB/1TMsRERETHpWkoIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLj/hdRB2LXFx7MKAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import numpy as np\n", + "objects = [str(x) for x in range(10)]\n", + "y_pos = np.arange(len(objects))\n", + "plt.bar(y_pos, probabilities, align='center', alpha=0.5)\n", + "plt.xticks(y_pos, objects)\n", + "plt.ylabel('Predicted Probability')\n", + "plt.title('LFC-w1a1 Predictions for Image')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Call Brevitas FINN-ONNX export and visualize with Netron\n", + "\n", + "Brevitas comes with built-in FINN-ONNX export functionality. This is similar to the regular ONNX export capabilities of PyTorch, with a few differences:\n", + "\n", + "1. The weight quantization logic is not exported as part of the graph; rather, the quantized weights themselves are exported.\n", + "2. Special quantization annotations are used to preserve the low-bit quantization information. ONNX (at the time of writing) supports 8-bit quantization as the minimum bitwidth, whereas FINN-ONNX quantization annotations can go down to binary/bipolar quantization.\n", + "3. Low-bit quantized activation functions are exported as MultiThreshold operators.\n", + "\n", + "It's actually quite straightforward to export ONNX from our Brevitas model as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import brevitas.onnx as bo\n", + "export_onnx_path = \"/tmp/LFCW1A1_finn-onnx.onnx\"\n", + "input_shape = (1, 1, 28, 28)\n", + "bo.export_finn_onnx(lfc, input_shape, export_onnx_path);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's examine what the exported ONNX model looks like. For this, we will use the Netron visualizer:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Serving '/tmp/LFCW1A1_finn-onnx.onnx' at http://0.0.0.0:8081\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "showInNetron(export_onnx_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When running this notebook in the FINN Docker container, you should be able to see an interactive visualization of the imported network above, and click on individual nodes to inspect their parameters. If you look at any of the MatMul nodes, you should be able to see that the weights are all {-1, +1} values, and the activations are Sign functions." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Import into FINN and call cleanup transformations\n", + "\n", + "We will now import this ONNX model into FINN using the ModelWrapper, and examine some of the graph attributes from Python." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/hendrik/Dropbox/a_local/Uni/fpga_synth_system_data/finn_deving/finn/deps/qonnx/src/qonnx/core/modelwrapper.py:93: UserWarning: Some old-style domain attributes were automatically converted to new-style,\n", + " i.e. domain=finn to domain=qonnx.custom_op.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/plain": [ + "input: \"37\"\n", + "input: \"38\"\n", + "output: \"39\"\n", + "name: \"MatMul_13\"\n", + "op_type: \"MatMul\"\n", + "domain: \"\"" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from qonnx.core.modelwrapper import ModelWrapper\n", + "model = ModelWrapper(export_onnx_path)\n", + "model.graph.node[8]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ModelWrapper exposes a range of other useful functions as well. For instance, by convention the second input of the MatMul node will be a pre-initialized weight tensor, which we can view using the following:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-1., -1., 1., ..., -1., 1., -1.],\n", + " [ 1., 1., -1., ..., 1., -1., 1.],\n", + " [-1., -1., -1., ..., 1., -1., 1.],\n", + " ...,\n", + " [ 1., -1., -1., ..., -1., -1., 1.],\n", + " [ 1., -1., -1., ..., 1., 1., 1.],\n", + " [ 1., -1., 1., ..., 1., -1., 1.]], dtype=float32)" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.get_initializer(model.graph.node[8].input[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also examine the quantization annotations and shapes of various tensors using the convenience functions provided by ModelWrapper." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'BIPOLAR'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.get_tensor_datatype(model.graph.node[8].input[1]).name" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[784, 1024]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.get_tensor_shape(model.graph.node[8].input[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If we want to operate further on this model in FINN, it is a good idea to execute certain \"cleanup\" transformations on this graph. Here, we will run shape inference and constant folding on this graph, and visualize the resulting graph in Netron again." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.transformation.fold_constants import FoldConstants\n", + "from qonnx.transformation.infer_shapes import InferShapes\n", + "model = model.transform(InferShapes())\n", + "model = model.transform(FoldConstants())\n", + "export_onnx_path_transformed = \"/tmp/LFCW1A1-finn-onnx-clean.onnx\"\n", + "model.save(export_onnx_path_transformed)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping http://0.0.0.0:8081\n", + "Serving '/tmp/LFCW1A1-finn-onnx-clean.onnx' at http://0.0.0.0:8081\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "showInNetron(export_onnx_path_transformed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the resulting graph has become smaller and simpler. Specifically, the input reshaping is now a single Reshape node instead of the Shape -> Gather -> Unsqueeze -> Concat -> Reshape sequence. We can now use the internal ONNX execution capabilities of FINN to ensure that we still get the same output from this model as we did with PyTorch." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-1.3736125, -3.5715756, 0.1768887, -1.9529207, -2.1233053,\n", + " -3.9293835, -2.1914592, -3.9634604, -0.7772659, -1.9869976]],\n", + " dtype=float32)" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import finn.core.onnx_exec as oxe\n", + "input_dict = {\"0\": nph.to_array(input_tensor)}\n", + "output_dict = oxe.execute_onnx(model, input_dict)\n", + "produced_finn = output_dict[list(output_dict.keys())[0]]\n", + "\n", + "produced_finn" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.isclose(produced, produced_finn).all()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have succesfully verified that the transformed and cleaned-up FINN graph still produces the same output, and can now use this model for further processing in FINN." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 6d4e0f04b39bad87d99bcdb1557e90ed23d6d0b7 Mon Sep 17 00:00:00 2001 From: Hendrik Borras Date: Thu, 8 Sep 2022 12:26:21 +0200 Subject: [PATCH 148/628] Remove cell execution --- ...revitas_network_import_via_FINN-ONNX.ipynb | 623 +----------------- 1 file changed, 31 insertions(+), 592 deletions(-) diff --git a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb index 9f28459f77..ed5375fd70 100644 --- a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb +++ b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb @@ -6,6 +6,8 @@ "source": [ "# Importing Brevitas networks into FINN with the FINN-ONNX interchange format\n", "\n", + "**Note: This notebook is very similar to the 1b notebook, in that it shows the same concepts for the FINN-ONNX ingestion as 1b does for QONNX.**\n", + "\n", "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", "\n", "1. Load up the trained PyTorch model\n", @@ -17,7 +19,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -36,121 +38,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# MIT License\n", - "#\n", - "# Copyright (c) 2019 Xilinx\n", - "#\n", - "# Permission is hereby granted, free of charge, to any person obtaining a copy\n", - "# of this software and associated documentation files (the \"Software\"), to deal\n", - "# in the Software without restriction, including without limitation the rights\n", - "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n", - "# copies of the Software, and to permit persons to whom the Software is\n", - "# furnished to do so, subject to the following conditions:\n", - "#\n", - "# The above copyright notice and this permission notice shall be included in all\n", - "# copies or substantial portions of the Software.\n", - "#\n", - "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n", - "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n", - "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n", - "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n", - "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n", - "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n", - "# SOFTWARE.\n", - "\n", - "import ast\n", - "from functools import reduce\n", - "from operator import mul\n", - "\n", - "from torch.nn import Module, ModuleList, BatchNorm1d, Dropout\n", - "import torch\n", - "\n", - "from brevitas.nn import QuantIdentity, QuantLinear\n", - "from .common import CommonWeightQuant, CommonActQuant\n", - "from .tensor_norm import TensorNorm\n", - "\n", - "DROPOUT = 0.2\n", - "\n", - "\n", - "class FC(Module):\n", - "\n", - " def __init__(\n", - " self,\n", - " num_classes,\n", - " weight_bit_width,\n", - " act_bit_width,\n", - " in_bit_width,\n", - " in_channels,\n", - " out_features,\n", - " in_features=(28, 28)):\n", - " super(FC, self).__init__()\n", - "\n", - " self.features = ModuleList()\n", - " self.features.append(QuantIdentity(act_quant=CommonActQuant, bit_width=in_bit_width))\n", - " self.features.append(Dropout(p=DROPOUT))\n", - " in_features = reduce(mul, in_features)\n", - " for out_features in out_features:\n", - " self.features.append(QuantLinear(\n", - " in_features=in_features,\n", - " out_features=out_features,\n", - " bias=False,\n", - " weight_bit_width=weight_bit_width,\n", - " weight_quant=CommonWeightQuant))\n", - " in_features = out_features\n", - " self.features.append(BatchNorm1d(num_features=in_features))\n", - " self.features.append(QuantIdentity(act_quant=CommonActQuant, bit_width=act_bit_width))\n", - " self.features.append(Dropout(p=DROPOUT))\n", - " self.features.append(QuantLinear(\n", - " in_features=in_features,\n", - " out_features=num_classes,\n", - " bias=False,\n", - " weight_bit_width=weight_bit_width,\n", - " weight_quant=CommonWeightQuant))\n", - " self.features.append(TensorNorm())\n", - "\n", - " for m in self.modules():\n", - " if isinstance(m, QuantLinear):\n", - " torch.nn.init.uniform_(m.weight.data, -1, 1)\n", - "\n", - " def clip_weights(self, min_val, max_val):\n", - " for mod in self.features:\n", - " if isinstance(mod, QuantLinear):\n", - " mod.weight.data.clamp_(min_val, max_val)\n", - " \n", - " def forward(self, x):\n", - " x = x.view(x.shape[0], -1)\n", - " x = 2.0 * x - torch.tensor([1.0], device=x.device)\n", - " for mod in self.features:\n", - " x = mod(x)\n", - " return x\n", - "\n", - "\n", - "def fc(cfg):\n", - " weight_bit_width = cfg.getint('QUANT', 'WEIGHT_BIT_WIDTH')\n", - " act_bit_width = cfg.getint('QUANT', 'ACT_BIT_WIDTH')\n", - " in_bit_width = cfg.getint('QUANT', 'IN_BIT_WIDTH')\n", - " num_classes = cfg.getint('MODEL', 'NUM_CLASSES')\n", - " in_channels = cfg.getint('MODEL', 'IN_CHANNELS')\n", - " out_features = ast.literal_eval(cfg.get('MODEL', 'OUT_FEATURES'))\n", - " net = FC(\n", - " weight_bit_width=weight_bit_width,\n", - " act_bit_width=act_bit_width,\n", - " in_bit_width=in_bit_width,\n", - " in_channels=in_channels,\n", - " out_features=out_features,\n", - " num_classes=num_classes)\n", - " return net\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "from brevitas_examples import bnn_pynq\n", "showSrc(bnn_pynq.models.FC)" @@ -165,267 +55,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "FC(\n", - " (features): ModuleList(\n", - " (0): QuantIdentity(\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (act_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", - " (activation_impl): Identity()\n", - " (tensor_quant): ClampedBinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " (tensor_clamp_impl): TensorClamp()\n", - " )\n", - " )\n", - " )\n", - " )\n", - " (1): Dropout(p=0.2, inplace=False)\n", - " (2): QuantLinear(\n", - " in_features=784, out_features=1024, bias=False\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (output_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (weight_quant): WeightQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (tensor_quant): BinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " )\n", - " )\n", - " (bias_quant): BiasQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " )\n", - " (3): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): QuantIdentity(\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (act_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", - " (activation_impl): Identity()\n", - " (tensor_quant): ClampedBinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " (tensor_clamp_impl): TensorClamp()\n", - " )\n", - " )\n", - " )\n", - " )\n", - " (5): Dropout(p=0.2, inplace=False)\n", - " (6): QuantLinear(\n", - " in_features=1024, out_features=1024, bias=False\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (output_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (weight_quant): WeightQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (tensor_quant): BinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " )\n", - " )\n", - " (bias_quant): BiasQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " )\n", - " (7): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (8): QuantIdentity(\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (act_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", - " (activation_impl): Identity()\n", - " (tensor_quant): ClampedBinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " (tensor_clamp_impl): TensorClamp()\n", - " )\n", - " )\n", - " )\n", - " )\n", - " (9): Dropout(p=0.2, inplace=False)\n", - " (10): QuantLinear(\n", - " in_features=1024, out_features=1024, bias=False\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (output_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (weight_quant): WeightQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (tensor_quant): BinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " )\n", - " )\n", - " (bias_quant): BiasQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " )\n", - " (11): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (12): QuantIdentity(\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (act_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (fused_activation_quant_proxy): FusedActivationQuantProxy(\n", - " (activation_impl): Identity()\n", - " (tensor_quant): ClampedBinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " (tensor_clamp_impl): TensorClamp()\n", - " )\n", - " )\n", - " )\n", - " )\n", - " (13): Dropout(p=0.2, inplace=False)\n", - " (14): QuantLinear(\n", - " in_features=1024, out_features=10, bias=False\n", - " (input_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (output_quant): ActQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " (weight_quant): WeightQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " (tensor_quant): BinaryQuant(\n", - " (scaling_impl): ConstScaling(\n", - " (restrict_clamp_scaling): _RestrictClampValue(\n", - " (clamp_min_ste): Identity()\n", - " (restrict_value_impl): FloatRestrictValue()\n", - " )\n", - " (value): StatelessBuffer()\n", - " )\n", - " (bit_width): BitWidthConst(\n", - " (bit_width): StatelessBuffer()\n", - " )\n", - " (zero_point): StatelessBuffer()\n", - " (delay_wrapper): DelayWrapper(\n", - " (delay_impl): _NoDelay()\n", - " )\n", - " )\n", - " )\n", - " (bias_quant): BiasQuantProxyFromInjector(\n", - " (_zero_hw_sentinel): StatelessBuffer()\n", - " )\n", - " )\n", - " (15): TensorNorm()\n", - " )\n", - ")" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from finn.util.test import get_test_model\n", "lfc = get_test_model(netname = \"LFC\", wbits = 1, abits = 1, pretrained = True)\n", @@ -441,30 +73,9 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - ":9: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /opt/conda/conda-bld/pytorch_1607370172916/work/torch/csrc/utils/tensor_numpy.cpp:141.)\n", - " input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAARYElEQVR4nO3dfYyVZXrH8d/FoDAw8iYRCaisG/5QqmUbgk1KyOKmxlUMbKJm/aPauAmarMmqTVqz/UOSaqJVa/pH3YStL9CsmiWoq0a7a82mWo1GNFQQW1CULGR4E5H3t+HqH/NgZ3We6549z3nOc9z7+0kmM3Ouec65OTM/zsv13Pdt7i4Af/xGNT0AAJ1B2IFMEHYgE4QdyARhBzIxupM3Zma89Z+ZUaPKH09OnTpV23VXvf6enp6wPjAw0PJ1183dbbjLK4XdzK6U9M+SeiT9q7vfV+X6cmU27O/mS6k/6ip/eKNHx38CqcCk6r29vaW1Q4cOhcem9PX1hfUDBw6U1lIt50mTJoX1zz77LKx3o5afxptZj6R/kfR9SRdLusHMLm7XwAC0V5XX7PMlfeTuW9z9uKSnJS1pz7AAtFuVsM+Q9Lsh328rLvs9ZrbMzNaa2doKtwWgotrfoHP3FZJWSLxBBzSpyiP7dknnDfl+ZnEZgC5UJezvSJptZt8yszMl/VDS8+0ZFoB2a/lpvLufNLPbJP1ag623x9z9g7aNLCPjx48P6wcPHmz5useMGRPWjx07FtZTbcFx48aF9ai9lmoppqSOj9prqT76vn37WhlSV6v0mt3dX5L0UpvGAqBGnC4LZIKwA5kg7EAmCDuQCcIOZIKwA5mwTq4um+vpsqled6qXffTo0bA+duzYlo9Nia676vWfffbZYb3qNNLofp06dWp47O7du8N6amrwyZMnw3qdyuaz88gOZIKwA5kg7EAmCDuQCcIOZIKwA5mg9fYNkGrNVfkd1nnddUtNDa6yem1q6m5qanCTS03TegMyR9iBTBB2IBOEHcgEYQcyQdiBTBB2IBP02TvgrLPOCuvRbqOSNHHixLB+4sSJ0lpqN9LUFNbPP/88rC9YsCCs33rrraW1VC/6jjvuCOtbt24N601OM20SfXYgc4QdyARhBzJB2IFMEHYgE4QdyARhBzJBn/0b4JFHHgnrUS871Wuuuox1b29vWI+ktk2+5JJLwvqmTZvC+vHjx0trZ5xxRnhsdO6ClP53HzlyJKzXqazPXmnLZjP7VNIBSQOSTrr7vCrXB6A+lcJeWOTue9pwPQBqxGt2IBNVw+6SfmNm75rZsuF+wMyWmdlaM1tb8bYAVFD1afwCd99uZudIesXM/sfdXxv6A+6+QtIKiTfogCZVemR39+3F512SnpU0vx2DAtB+LYfdzMab2Vmnv5Z0haQN7RoYgPaq8jR+mqRniz7taElPuvu/t2VUf2RSWzYvWrQorF922WVhPeqVHzx4MDw21W/u6+sL66nzNKI566m11x999NGWr1uS7rzzztLaW2+9FR5b93bSTWg57O6+RdKftnEsAGpE6w3IBGEHMkHYgUwQdiAThB3IBFNcu0Bqqubs2bPD+v79+0trEyZMCI+NpoFK6SmwVbZ8TrX9UlJLcO/du7e0tnTp0vDYdevWhfVUSzLV8qwTS0kDmSPsQCYIO5AJwg5kgrADmSDsQCYIO5CJdiw42TFRT7fOfnBK6thU/ZZbbgnrq1atCuszZ85s+bZTffZ77rknrK9evTqsn3nmmaW1K664Ijz2wQcfDOuprbCj2168eHF47LZt28L6nj3fvDVWeWQHMkHYgUwQdiAThB3IBGEHMkHYgUwQdiATHZ/Pnup3Rzo51naqOvd54cKFYf2iiy4qrY0bNy48dvTo+FSLNWvWhPUtW7aE9SpSyz3PmTMnrKfu90jq75T57AC6FmEHMkHYgUwQdiAThB3IBGEHMkHYgUx0vM8+alT5/y9V54XXqcpc+lOnTlW67eg+S9VPnjwZHjt+/PiwfujQobCe2o46+p2l5tJfffXVYf3pp58O61X67Kk17VP3a5Na7rOb2WNmtsvMNgy5bIqZvWJmm4vPk9s5WADtN5Kn8U9IuvIrl90l6VV3ny3p1eJ7AF0sGXZ3f03SV/fRWSJpZfH1SklL2zssAO3W6hp009y9v/h6h6RpZT9oZsskLWvxdgC0SeUFJ93dow0b3X2FpBUSGzsCTWq19bbTzKZLUvF5V/uGBKAOrYb9eUk3FV/fJOlX7RkOgLok++xm9pSk70qaKmmnpLslPSfpl5LOl7RV0vXuXr4Z9v9fV21P46uuG1+1Hkn1ZFN7qEf7r1fV29sb1o8cORLWU+cAVDnH4MILLwzrH3/8ccvXnRpXak36lMOHD1c6voqyPnvyNbu731BS+l6lEQHoKE6XBTJB2IFMEHYgE4QdyARhBzLBls2FVAtyYGAgrEd6enrCetVlh6M2UarFlJrCmpK6/mjb5KgmSYsWLWppTKdFv9MTJ06Ex6amuFb5e2gKj+xAJgg7kAnCDmSCsAOZIOxAJgg7kAnCDmSiq/rsdW7nXHU55yrqvu0DBw6U1lL94lSvO3V8qk8fLRedWsb6uuuuC+tHjx4N62PHji2tpfrsqd9Zk1syt4pHdiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMtHxPns0t7ube+XRksmp5ZRT6txW+dJLLw2PnTNnTlhPLSX93HPPhfVI1AeXpIULF4b1Klt4p5ahjs5dkKovwd0EHtmBTBB2IBOEHcgEYQcyQdiBTBB2IBOEHchEx/vs0Zz1OvvoqbnyqXndUU949Oj4bly6dGlYTx2/ZMmSsD5mzJjS2ty5c8NjJ02aFNZTvezXX3+95eNnz54dHptamz3V616/fn1p7fLLLw+Pje5TqTv76CnJR3Yze8zMdpnZhiGXLTez7Wa2rvi4qt5hAqhqJE/jn5B05TCXP+zuc4uPl9o7LADtlgy7u78maW8HxgKgRlXeoLvNzN4vnuZPLvshM1tmZmvNbG2F2wJQUath/5mkb0uaK6lf0kNlP+juK9x9nrvPa/G2ALRBS2F3953uPuDupyT9XNL89g4LQLu1FHYzmz7k2x9I2lD2swC6g6X6qGb2lKTvSpoqaaeku4vv50pySZ9KusXd+5M3ZhbeWKrfnJr3HZk1a1ZYv+aaa8L64sWLS2upedepedupudPR/utSvIZ5X19feGxK1Xnd0e/0iy++CI+dOHFiWE/ZvHlzaW3VqlXhsQ89VPrKVFJ399ndfdiTSpIn1bj7DcNc/GjlEQHoKE6XBTJB2IFMEHYgE4QdyARhBzKRbL219cbMPFp2uc4prnfffXdYX758eVjfs2dPaW3q1KmtDOlLqa2H9+6NpyZE9QsuuCA8NtUWTG3ZnHLs2LHSWmoaaervIdWKjaYtp7Zcfvnll8P6zTffHNab3NK5rPXGIzuQCcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noeJ89qlfZmjg11TLV96yy7fKuXbvC+tatW8P6Aw88ENZXr14d1ufNK18E6OGHHw6PTW3ZPHly6YpjkqRt27aF9eh3+sQTT4THfvLJJ2H92muvDevR1OOq02tffPHFsJ6aMl0n+uxA5gg7kAnCDmSCsAOZIOxAJgg7kAnCDmSio332UaNGeTQ/+vjx4+Hx55xzTmlt9+7d4bGpPntq7nTUL05tB71p06awPmXKlLCeWrY4Wu75/PPPD49NzWdPLe+9b9++sH7jjTeW1l544YXw2JTUOgLRctGLFi0Kj02tMZC6X1LLf9eJPjuQOcIOZIKwA5kg7EAmCDuQCcIOZIKwA5noqvnsVaT6nitXrgzr119/fcvXf/jw4fDYcePGhfXUtsipef4DAwOltdS672+++WZYf/LJJ8P6unXrwvobb7xRWkudX5Dq4ad+59F5G/Pnzw+Pffvtt8P6448/HtZT68rXqeU+u5mdZ2a/NbONZvaBmf2kuHyKmb1iZpuLz/EqBwAaNZKn8Scl/Y27XyzpzyX92MwulnSXpFfdfbakV4vvAXSpZNjdvd/d3yu+PiDpQ0kzJC2RdPq58UpJS2saI4A2iF/0fIWZzZL0HUlvS5rm7v1FaYekaSXHLJO0rMIYAbTBiN+NN7M+SWsk3e7u+4fWfPBdvmHffHP3Fe4+z93LV0UEULsRhd3MztBg0H/h7s8UF+80s+lFfbqkeIlVAI1Ktt5scP7mSkl73f32IZc/IOkzd7/PzO6SNMXd/zZxXeGNnXvuueFYduzYEdYj0fa9kjRz5sywfu+995bWZsyYER6b2nI5tXVxtF20JN1///2ltY0bN4bHpqa4prZFTklNW46k2oYnTpwI69HU49Tf/YQJE8J61SnTdSprvY3kNftfSPorSevNbF1x2U8l3Sfpl2b2I0lbJcWNagCNSobd3f9LUtl/kd9r73AA1IXTZYFMEHYgE4QdyARhBzJB2IFMdHSKa09Pj0d93dRU0aj3uX///tKaJPX19YX1VN806vlW6fdK6Z5v6hyBqJed6uEfO3YsrFcV/b5TyzWnpgan/l6q/M5Sqo6tTiwlDWSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrpqKenUHOKol55aVrjqvOzp06eX1vr7+0trI9Hb2xvWU1s213ndqWWsDx06FNarzClPGTUqfqyqMqe86fMTqqDPDmSOsAOZIOxAJgg7kAnCDmSCsAOZIOxAJrqqzw6gOvrsQOYIO5AJwg5kgrADmSDsQCYIO5AJwg5kIhl2MzvPzH5rZhvN7AMz+0lx+XIz225m64qPq+ofLoBWJU+qMbPpkqa7+3tmdpakdyUt1eB+7Afd/cER3xgn1QC1KzupZiT7s/dL6i++PmBmH0qa0d7hAajbH/Sa3cxmSfqOpLeLi24zs/fN7DEzm1xyzDIzW2tma6sNFUAVIz433sz6JP2npHvd/RkzmyZpjySX9A8afKp/c+I6eBoP1KzsafyIwm5mZ0h6UdKv3f2fhqnPkvSiu/9J4noIO1CzlifC2ODyoI9K+nBo0Is37k77gaQNVQcJoD4jeTd+gaTXJa2XdHpt3p9KukHSXA0+jf9U0i3Fm3nRdfHIDtSs0tP4diHsQP2Yzw5kjrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmSDsQCYIO5AJwg5kgrADmUguONlmeyRtHfL91OKybtStY+vWcUmMrVXtHNsFZYWOzmf/2o2brXX3eY0NINCtY+vWcUmMrVWdGhtP44FMEHYgE02HfUXDtx/p1rF167gkxtaqjoyt0dfsADqn6Ud2AB1C2IFMNBJ2M7vSzP7XzD4ys7uaGEMZM/vUzNYX21A3uj9dsYfeLjPbMOSyKWb2ipltLj4Pu8deQ2Prim28g23GG73vmt7+vOOv2c2sR9ImSX8paZukdyTd4O4bOzqQEmb2qaR57t74CRhmtlDSQUmrTm+tZWb/KGmvu99X/Ec52d3/rkvGtlx/4DbeNY2tbJvxv1aD9107tz9vRROP7PMlfeTuW9z9uKSnJS1pYBxdz91fk7T3KxcvkbSy+HqlBv9YOq5kbF3B3fvd/b3i6wOSTm8z3uh9F4yrI5oI+wxJvxvy/TZ1137vLuk3ZvaumS1rejDDmDZkm60dkqY1OZhhJLfx7qSvbDPeNfddK9ufV8UbdF+3wN3/TNL3Jf24eLralXzwNVg39U5/JunbGtwDsF/SQ00OpthmfI2k2919/9Bak/fdMOPqyP3WRNi3SzpvyPczi8u6grtvLz7vkvSsBl92dJOdp3fQLT7vang8X3L3ne4+4O6nJP1cDd53xTbjayT9wt2fKS5u/L4bblydut+aCPs7kmab2bfM7ExJP5T0fAPj+BozG1+8cSIzGy/pCnXfVtTPS7qp+PomSb9qcCy/p1u28S7bZlwN33eNb3/u7h3/kHSVBt+R/1jS3zcxhpJxXSjpv4uPD5oem6SnNPi07oQG39v4kaSzJb0qabOk/5A0pYvG9m8a3Nr7fQ0Ga3pDY1ugwafo70taV3xc1fR9F4yrI/cbp8sCmeANOiAThB3IBGEHMkHYgUwQdiAThB3IBGEHMvF/rSIwqVQD1iIAAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import torch\n", "import matplotlib.pyplot as plt\n", @@ -480,21 +91,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "tensor([0.1020, 0.0113, 0.4806, 0.0571, 0.0482, 0.0079, 0.0450, 0.0076, 0.1851,\n", - " 0.0552])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from torch.nn.functional import softmax\n", "# do forward pass in PyTorch/Brevitas\n", @@ -505,22 +104,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEICAYAAABS0fM3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAAbi0lEQVR4nO3debxdZXn28d9FIDKFQRIVEiDMNk6IERAVZWpDq2AREV4nrEwtsSi+VlTUSp3qhFWxCgRBKfACgo0WZShKHYGAKIRBwhzGMAmiLxC4+sd6Dm6O++yzMqx1yFnX9/PZn6z5vvc+sO+9nmetZ8k2ERHRXSuNdQIRETG2UggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgoiZJlrR5mf66pI8s5XF+L2nT5ZvdwHiS9E1JD0i6pK24seJIIRiHJN0sadc+y18r6cnyRTT0+l7P+i0lnSHpXkm/k/QbSYdLmrCM+cyWNE/So5JOXMJ995H0c0l/kPTjUbbtfX8PS7pO0juXJfeR2D7E9r+Mtp2kH0s6YNi+a9q+sYm8RvAqYDdgmu1tl/VgkqaXorjysqcWzwQpBN1zR/kiGnq9HkDSZsDFwG3Ai2yvDbwJmAlMWtaYwCeAE5Zi3/uBLwGfqRvL9prAWsAHgOMkzRi+Uce+xDYGbrb9yJLu2LHPqbNSCGLIx4Gf2z7c9p0Atq+z/X9sPzh8Y0k7SbqyZ/58SZf2zP9E0hvKcc6y/V3gvj7HWVfS9yUtKk0X35c0bWi97Qtsn05VTGpz5bvAA8AMSftL+pmkoyXdB/yzpGdJ+rykWyXdXZp7VuvJ7f2S7pR0h6S/G5b3iZI+0TO/p6QrJD0k6QZJsyR9Eng18NVylvLVsm1vE9Pakr5V3v8tko6UtFJZt7+kn5YcH5B0k6Tde2LuL+nGcvZzk6S39Pl83wUcD7yi5PDxsvxASQsk3S9prqQNevaxpEMlXQ9cP9pnXT6Lr0n6QYnxM0nPk/Slkve1kl7as/0R5TN6WNLVkv62Z90ESV8oZ6U3lbPJp84+yuc1p/xdbpf0iWU9Y40UgviTXYEzl2D7XwJbSJosaRXgxcAGkiaVL9OZwE9qHGcl4JtUv1o3Av4IfHWJMu9D0krlC2YdYKhgbQfcCDwX+CTVWcaWwNbA5sBU4KNl/1nA/6VqUtmC6vMZKda2wLeA95d4O1L9Av8w1Wcwu5x9ze6z+1eAtYFNgdcAbwd6m7O2A64DJgOfBeaosgbwZWB325OAHYArhh/c9hzgEOAXJYePSdoZ+DSwD7A+cAtw2rBd31Bi/9nZ1Aj2AY4seT4K/AK4vMyfCXyxZ9sbqArk2lQ/QE6WtH5ZdyCwO9XfZJuSR68TgcVUf6+XAn8JHEAsG9t5jbMXcDOwa5/lrwWeBB7see1T1j0OzFrCOD8B9gK2B84DTgdmATsBv+mz/SeAE0c55tbAA32WHwD8eJR9e9/f/VRfjPuWdfsDt/ZsK+ARYLOeZa8AbirTJwCf6Vm3JWBg8zJ/IvCJMv0N4OgRcvoxcMCwZab6IpsAPAbM6Fl38ND7LDkv6Fm3etn3ecAa5X2+EVhtlM9lf+CnPfNzgM/2zK9Z/v7Te/LbecDxppdtVu75LI7rWf9u4Jqe+RcBDw443hXAnmX6QuDgnnW7DsWiKuCP9r5fYD/gR23/PzbeXmn/6547bE/rs/w+ql+HfUn6OvDWMvsp258CLqL68l1Yph+g+lX7aJkflaTVgaOpCsi6ZfEkSRNsP1HnGMOM9P6g6v8YMoXqi/UySU+lQ/XlDLABcFnP9rcMiLkhcM6Sp8pkYJVhx76F6sxkyF1DE7b/UHJd0/Zdkt5MddYyR9LPgPfZvrZG3A2ofq0PHff3pblsKtWPCHj6Z1XH3T3Tf+wzv+bQjKS3A4dTFRTKusk9ufXG7p3emOrzurPnb7bSUuQaw6RpKIZcQPXrsi9XV8kMdTB/qiweKgQ7lumLqArBa6hZCID3AVsB29leqxwLqi/l5a13qN17qb6gXmB7nfJa21VHM8CdVF/wQzYacNzbgM1qxBzuXqpf4hsPi3P7gH3+dGD7XNu7URXwa4Hj6uxH1d/yVMzSzLTesLiNDEssaWOqPGcD69leB7iKP/297wR6C3nv3+A2qh8Zk3v+ZmvZfkETuXZJCsH4tYqkVXteo539fQzYQdLnJD0PQNLmkk6WtM4I+/yc6kt8W+AS2/OpvmC2A/5naCNJK0talerX9oRh+Uyi+kJ+UNKzSx707Duh7LsysFLZd5X6H0N/tp+k+kI6WtJzSqypkv6qbHI6sL+kGeWs5WMjHAqqppZ3Stql9E1MlfT8su5uqvb/fjk8UeJ8svStbEz1S/nk0fKX9NzSQb0G1Zfj76maxeo4teS7taRnAZ8CLrZ9c839l8UaVEVmEYCqy3tf2LP+dOCw8hmuQ3XlFwCuLmI4D/iCpLXKZ72ZpNe0kPe4lkIwfp1D9QU79PrnQRvbvoGqjXw6MF/S74DvAPOAh0fY5xGqJob5th8ri38B3GL7np5Njyw5HEHVvPTHsgyqS0NXo/p1/Evgh8PCvK1s/+9UHYx/pP4v39F8AFgA/FLSQ1RnRVuV9/aDktuFZZsLRzqI7UuoOniPBn5HdTY09Iv734C9y9UzX+6z+7up+ipuBH4KnEK9y2xXoioad1D1h7wG+Psa+2H7AuAjVH/fO6nOZvats++ysn018AWq/07upuo/+FnPJsdRfdn/BvgV1X/Hi4GhZsK3AxOBq6maIs9kQJNm1KPS4RIR8YxTLpf9uu2NR904llrOCCLiGUPSapL+ujQnTqVqkjt7rPMa73JGEBHPGKU/5iLg+VTNgP8FHGb7oTFNbJxLIYiI6LhGm4ZU3WZ/XbmV/Yg+6/dXdWv9FeWVOwQjIlrW2A1lZfyPY6hu0V8IXCppbrlqoNf/c/9b7/uaPHmyp0+fvvwSjYjogMsuu+xe21P6rWvyzuJtqW6PvxFA0mnAnlSXfS216dOnM2/evOWQXkREd0ga8e74JpuGpvL0W78X8vRb54e8UdW492dK2rDPeiQdpGo8+3mLFi1qIteIiM4a68tHv0c10NWLgfOBk/ptZPtY2zNtz5wype+ZTURELKUmC8HtPH2ckGkMG0PF9n22Hy2zxwMvazCfiIjoo8lCcCnVePWbSJpIdQv73N4NesYgB9gDuKbBfCIioo/GOottL5Y0GziXarCxE2zPl3QUMM/2XOAfJe1BNZbI/VTjpkdERItWuBvKZs6c6Vw1FBGxZCRdZntmv3Vj3VkcERFjLIUgIqLjUggiIjouzyzugKPP/23jMd6725aNx4iIZuSMICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLjUggiIjouhSAiouNSCCIiOq7RQiBplqTrJC2QdMSA7d4oyZJmNplPRET8ucYKgaQJwDHA7sAMYD9JM/psNwk4DLi4qVwiImJkTZ4RbAsssH2j7ceA04A9+2z3L8C/Av+/wVwiImIETRaCqcBtPfMLy7KnSNoG2ND2fw06kKSDJM2TNG/RokXLP9OIiA4bs85iSSsBXwTeN9q2to+1PdP2zClTpjSfXEREhzRZCG4HNuyZn1aWDZkEvBD4saSbge2BuekwjohoV5OF4FJgC0mbSJoI7AvMHVpp+3e2J9uebns68EtgD9vzGswpIiKGaawQ2F4MzAbOBa4BTrc9X9JRkvZoKm5ERCyZlZs8uO1zgHOGLfvoCNu+tslcIiKiv9xZHBHRcSkEEREdl0IQEdFxKQQRER2XQhAR0XEpBBERHZdCEBHRcSkEEREdN2ohkPRuSeu2kUxERLSvzhnBc4FLJZ1enjimppOKiIj2jFoIbB8JbAHMAfYHrpf0KUmbNZxbRES0oFYfgW0Dd5XXYmBd4ExJn20wt4iIaMGog85JOgx4O3AvcDzwftuPlwfLXA/8U7MpRkREk+qMPvpsYC/bt/QutP2kpNc1k1ZERLSlTtPQpsOLgKRvA9i+ppGsIiKiNXUKwQt6ZyRNAF7WTDoREdG2EQuBpA9Kehh4saSHyuth4B7gP1vLMCIiGjViIbD9aduTgM/ZXqu8Jtlez/YHW8wxIiIaNGJnsaTn274WOEPSNsPX27680cwiIqIVg64aeh9wIPCFPusM7NxIRhER0aoRC4HtA8u/O7WXTkREtG1Q09Beg3a0fdbyTyciIto2qGno9QPWGUghiIgYBwY1Db2zzUQiImJsDGoaeqvtkyUd3m+97S82l1ZERLRlUNPQGuXfSW0kEhERY2NQ09A3yr8fby+diIhoW51HVW4q6XuSFkm6R9J/Stq0jeQiIqJ5dQadOwU4HVgf2AA4Azi1yaQiIqI9dQrB6ra/bXtxeZ0MrNp0YhER0Y5BVw09u0z+QNIRwGlU9w+8GTinhdwiIqIFg64auozqi19l/uCedQYyAmlExDgw6KqhTdpMJCIixkadZxYj6YXADHr6Bmx/q6mkIiKiPXUuH/0Y8JXy2gn4LLBHnYNLmiXpOkkLSj/D8PWHSLpS0hWSfippxhLmHxERy6jOVUN7A7sAd5Xxh14CrD3aTuXZxscAu1OdTezX54v+FNsvsr01VYHJsBURES2rUwj+aPtJYLGktaieWbxhjf22BRbYvtH2Y1RXHe3Zu4Hth3pm16DqhI6IiBbV6SOYJ2kd4DiqK4l+D/yixn5Tgdt65hcC2w3fSNKhwOHARPLUs4iI1o16RmD7H2w/aPvrwG7AO5bnENW2j7G9GfAB4Mh+20g6SNI8SfMWLVq0vEJHRAT1moaQtJekLwLvBjareezbeXoT0rSybCSnAW/ot8L2sbZn2p45ZcqUmuEjIqKOOlcNfQ04BLgSuAo4WNIxNY59KbCFpE0kTQT2BeYOO/YWPbN/A1xfN/GIiFg+6vQR7Az8hW0DSDoJmD/aTrYXS5oNnAtMAE6wPV/SUcA823OB2ZJ2BR4HHgDesZTvIyIillKdQrAA2Ai4pcxvWJaNyvY5DBuXyPZHe6YPq5dmREQ0ZdCgc9+jupxzEnCNpEvKqm2BS0baLyIiViyDzgg+31oWERExZgYNOnfR0LSk5wIvL7OX2L6n6cQiIqIdda4a2oeqKehNwD7AxZL2bjqxiIhoR53O4g8DLx86C5A0BbgAOLPJxCIioh11bihbaVhT0H0194uIiBVAnTOCH0o6lz89sD6PqoyIGEcGFgJJAr5M1VH8qrL4WNtnN51YRES0Y2AhsG1J59h+EXBWSzlFRESL6rT1Xy7p5aNvFhERK6I6fQTbAW+VdDPwCCCqk4UXN5lYRES0o04h+KvGs4iIiDEzaKyh5wAfAjanGoL608MeLRkREePAoD6Cb1E1BX0FWJPq6qGIiBhnBjUNrW/7w2X6XEmXt5FQRES0a7T7CNal6hwGmNA7b/v+hnOLiIgWDCoEawOX8adCADB0VmBg06aSioiI9gwahnp6i3lERMQYyeBxEREdl0IQEdFxKQQRER036IayZw/aMVcNRUSMD4OuGrqM6uogARsBD5TpdYBbgU2aTi4iIpo3YtOQ7U1sb0r1WMrX255sez3gdcB5bSUYERHNqtNHsL3tp55IZvsHwA7NpRQREW2qM/roHZKOBE4u828B7mgupYiIaFOdM4L9gCnA2VRPKZtSlkVExDgw6hlBuTroMElr2H6khZwiIqJFo54RSNpB0tXANWX+JZK+1nhmERHRijpNQ0dTPaXsPgDbvwZ2bDKpiIhoT607i23fNmzREw3kEhERY6DOVUO3SdoBsKRVgMMozUQREbHiq3NGcAhwKDAVuB3YGviHBnOKiIgW1Tkj2Mr2W3oXSHol8LNmUoqIiDbVOSP4Ss1lf0bSLEnXSVog6Yg+6w+XdLWk30j6b0kb1zluREQsP4NGH30F1VASUyQd3rNqLWDCaAeWNAE4BtgNWAhcKmmu7at7NvsVMNP2HyT9PfBZ4M1L/jYiImJpDTojmAisSVUsJvW8HgL2rnHsbYEFtm+0/RhwGrBn7wa2f2T7D2X2l8C0JUs/IiKW1aBnFl8EXCTpRNu3LMWxpwK9l50uBLYbsP27gB8sRZyIiFgGdfoIjpe0ztCMpHUlnbs8k5D0VmAm8LkR1h8kaZ6keYsWLVqeoSMiOq9OIZhs+8GhGdsPAM+psd/twIY989PKsqeRtCvwYWAP24/2O5DtY23PtD1zypQpNUJHRERddQrBk5I2GpopV/a4xn6XAltI2kTSRGBfYG7vBpJeCnyDqgjcUz/tiIhYXurcR/Bh4KeSLqJ6VOWrgYNG28n2YkmzgXOprjI6wfZ8SUcB82zPpWoKWhM4QxLArbb3WLq3EhERS6POMNQ/lLQNsH1Z9B7b99Y5eHmy2TnDln20Z3rXJcg1IiIaMGLTkKTnl3+3oXp4/R3ltVFZFhER48CgM4L3AQcCX+izzsDOjWQUEbGUjj7/t43HeO9uWzYeo22D7iM4sPy7U3vpRERE2wYNMbHXoB1tn7X804mIiLYNahp6ffn3OVRjDl1Y5ncCfk71IPuIiFjBDWoaeieApPOAGbbvLPPrAye2kl1ERDSuzg1lGw4VgeJuqquIIiJiHKhzQ9l/l7GFTi3zbwYuaC6liIhoU50bymZL+ltgx7LoWNtnN5tWRES0pc4ZAcDlwMO2L5C0uqRJth9uMrGIiGjHqH0Ekg4EzqQaHA6q5wx8t8GcIiKiRXU6iw8FXkn1ZDJsX0+9YagjImIFUKcQPFoeNQmApJWpNwx1RESsAOoUgoskfQhYTdJuwBnA95pNKyIi2lKnEHwAWARcCRxMNaz0kU0mFRER7Rl41ZCkCcB8288HjmsnpYiIaNPAMwLbTwDX9T6qMiIixpc69xGsC8yXdAnwyNDCPFIyImJ8qFMIPtJ4FhERMWYGPY9gVeAQYHOqjuI5the3lVhERLRjUB/BScBMqiKwO/0fWRkRESu4QU1DM2y/CEDSHOCSdlKKiIg2DTojeHxoIk1CERHj16AzgpdIeqhMi+rO4ofKtG2v1Xh2ERHRuEGPqpzQZiIRETE26gwxERER41gKQUREx6UQRER0XApBRETHpRBERHRcCkFERMelEEREdFwKQUREx6UQRER0XApBRETHNVoIJM2SdJ2kBZKO6LN+R0mXS1osae8mc4mIiP4aKwTlwffHUD3LYAawn6QZwza7FdgfOKWpPCIiYrA6j6pcWtsCC2zfCCDpNGBP4OqhDWzfXNY92WAeTzn6/N82HuO9u23ZeIyIiOWpyaahqcBtPfMLy7IlJukgSfMkzVu0aNFySS4iIiorRGex7WNtz7Q9c8qUKWOdTkTEuNJkIbgd2LBnflpZFhERzyBNFoJLgS0kbSJpIrAvMLfBeBERsRQaKwTlOcezgXOBa4DTbc+XdJSkPQAkvVzSQuBNwDckzW8qn4iI6K/Jq4awfQ5wzrBlH+2ZvpSqySgiIsbICtFZHBERzUkhiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LhGB52LyONBI575UggiIpaDFflHT5qGIiI6LoUgIqLj0jQU41rTp+vpn4jxIIUgYhxKAYwlkaahiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4FIKIiI5LIYiI6LgUgoiIjkshiIjouBSCiIiOSyGIiOi4DEPdkhX5MXYRMb6lEEQ0JM8EiBVFo4VA0izg34AJwPG2PzNs/bOAbwEvA+4D3mz75iZziohm5ex3xdNYH4GkCcAxwO7ADGA/STOGbfYu4AHbmwNHA//aVD4REdFfk53F2wILbN9o+zHgNGDPYdvsCZxUps8EdpGkBnOKiIhhZLuZA0t7A7NsH1Dm3wZsZ3t2zzZXlW0Wlvkbyjb3DjvWQcBBZXYr4LpGku5vMnDvqFsldmIndmI/s2NvbHtKvxUrRGex7WOBY8citqR5tmcmdmIndmKPl9jDNdk0dDuwYc/8tLKs7zaSVgbWpuo0joiIljRZCC4FtpC0iaSJwL7A3GHbzAXeUab3Bi50U21VERHRV2NNQ7YXS5oNnEt1+egJtudLOgqYZ3suMAf4tqQFwP1UxeKZZkyapBI7sRM7sdvSWGdxRESsGDLWUEREx6UQRER0XArBCCTNknSdpAWSjmg59gmS7in3WbQZd0NJP5J0taT5kg5rMfaqki6R9OsS++Ntxe7JYYKkX0n6/hjEvlnSlZKukDSv5djrSDpT0rWSrpH0ipbiblXe79DrIUnvaSN2if/e8t/aVZJOlbRqi7EPK3Hnt/meR2Q7r2Evqs7tG4BNgYnAr4EZLcbfEdgGuKrl970+sE2ZngT8tq33DQhYs0yvAlwMbN/y+z8cOAX4fptxS+ybgcltxy2xTwIOKNMTgXXGIIcJwF1UNz21EW8qcBOwWpk/Hdi/pdgvBK4CVqe6YOcCYPOx+NsPvXJG0F+d4TEaY/t/qK6iapXtO21fXqYfBq6h+h+mjdi2/fsyu0p5tXYlg6RpwN8Ax7cV85lA0tpUPzzmANh+zPaDY5DKLsANtm9pMebKwGrlHqbVgTtaivsXwMW2/2B7MXARsFdLsftKIehvKnBbz/xCWvpCfKaQNB14KdUv87ZiTpB0BXAPcL7t1mIDXwL+CXiyxZi9DJwn6bIypEpbNgEWAd8szWLHS1qjxfhD9gVObSuY7duBzwO3AncCv7N9XkvhrwJeLWk9SasDf83Tb75tXQpB/BlJawLfAd5j+6G24tp+wvbWVHehbyvphW3ElfQ64B7bl7URbwSvsr0N1Wi9h0rasaW4K1M1Q/677ZcCjwBt94lNBPYAzmgx5rpUZ/mbABsAa0h6axuxbV9DNdLyecAPgSuAJ9qIPZIUgv7qDI8xLklahaoI/Ifts8Yih9I08SNgVkshXwnsIelmqmbAnSWd3FJs4KlfqNi+BzibqnmyDQuBhT1nX2dSFYY27Q5cbvvuFmPuCtxke5Htx4GzgB3aCm57ju2X2d4ReICqP27MpBD0V2d4jHGnDAE+B7jG9hdbjj1F0jplejVgN+DaNmLb/qDtabanU/2tL7Tdyq9DAElrSJo0NA38JVXzQeNs3wXcJmmrsmgX4Oo2YvfYjxabhYpbge0lrV7+u9+Fqk+sFZKeU/7diKp/4JS2YvezQow+2jaPMDxGW/ElnQq8FpgsaSHwMdtzWgj9SuBtwJWlrR7gQ7bPaSH2+sBJ5YFGKwGn2279Ms4x8lzg7PIojpWBU2z/sMX47wb+o/zouRF4Z1uBS+HbDTi4rZgAti+WdCZwObAY+BXtDvnwHUnrAY8Dh45RB/1TMsRERETHpWkoIqLjUggiIjouhSAiouNSCCIiOi6FICKi41IIIiI6LoUgIqLj/hdRB2LXFx7MKAAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import numpy as np\n", "objects = [str(x) for x in range(10)]\n", @@ -549,7 +135,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -568,38 +154,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving '/tmp/LFCW1A1_finn-onnx.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron(export_onnx_path)" ] @@ -622,34 +179,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/hendrik/Dropbox/a_local/Uni/fpga_synth_system_data/finn_deving/finn/deps/qonnx/src/qonnx/core/modelwrapper.py:93: UserWarning: Some old-style domain attributes were automatically converted to new-style,\n", - " i.e. domain=finn to domain=qonnx.custom_op.\n", - " warnings.warn(\n" - ] - }, - { - "data": { - "text/plain": [ - "input: \"37\"\n", - "input: \"38\"\n", - "output: \"39\"\n", - "name: \"MatMul_13\"\n", - "op_type: \"MatMul\"\n", - "domain: \"\"" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", "model = ModelWrapper(export_onnx_path)\n", @@ -665,26 +197,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[-1., -1., 1., ..., -1., 1., -1.],\n", - " [ 1., 1., -1., ..., 1., -1., 1.],\n", - " [-1., -1., -1., ..., 1., -1., 1.],\n", - " ...,\n", - " [ 1., -1., -1., ..., -1., -1., 1.],\n", - " [ 1., -1., -1., ..., 1., 1., 1.],\n", - " [ 1., -1., 1., ..., 1., -1., 1.]], dtype=float32)" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model.get_initializer(model.graph.node[8].input[1])" ] @@ -698,40 +213,18 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'BIPOLAR'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model.get_tensor_datatype(model.graph.node[8].input[1]).name" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[784, 1024]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model.get_tensor_shape(model.graph.node[8].input[1])" ] @@ -745,7 +238,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -759,39 +252,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:8081\n", - "Serving '/tmp/LFCW1A1-finn-onnx-clean.onnx' at http://0.0.0.0:8081\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron(export_onnx_path_transformed)" ] @@ -805,22 +268,9 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[-1.3736125, -3.5715756, 0.1768887, -1.9529207, -2.1233053,\n", - " -3.9293835, -2.1914592, -3.9634604, -0.7772659, -1.9869976]],\n", - " dtype=float32)" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import finn.core.onnx_exec as oxe\n", "input_dict = {\"0\": nph.to_array(input_tensor)}\n", @@ -832,20 +282,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "np.isclose(produced, produced_finn).all()" ] From 14801b8e98094b59174d4a206398ed8033076065 Mon Sep 17 00:00:00 2001 From: Hendrik Borras Date: Thu, 8 Sep 2022 12:27:19 +0200 Subject: [PATCH 149/628] Initial QONNX ingestion notebook --- ...1b_brevitas_network_import_via_QONNX.ipynb | 328 ++++++++++++++++++ 1 file changed, 328 insertions(+) create mode 100644 notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb diff --git a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb new file mode 100644 index 0000000000..2d8447ad3a --- /dev/null +++ b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb @@ -0,0 +1,328 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Importing Brevitas networks into FINN with the QONNX interchange format\n", + "\n", + "**Note: This notebook is very similar to the 1a notebook, in that it shows the same concepts for the QONNX ingestion as 1a does for FINN-ONNX.**\n", + "\n", + "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", + "\n", + "1. Load up the trained PyTorch model\n", + "2. Call Brevitas QONNX export and visualize with Netron\n", + "3. Import into FINN and converting QONNX to FINN-ONNX\n", + "\n", + "We'll use the following utility functions to print the source code for function calls (`showSrc()`) and to visualize a network using netron (`showInNetron()`) in the Jupyter notebook:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import onnx\n", + "from finn.util.visualization import showSrc, showInNetron" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Load up the trained PyTorch model\n", + "\n", + "The FINN Docker image comes with several [example Brevitas networks](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq), and we'll use the LFC-w1a1 model as the example network here. This is a binarized fully connected network trained on the MNIST dataset. Let's start by looking at what the PyTorch network definition looks like:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from brevitas_examples import bnn_pynq\n", + "showSrc(bnn_pynq.models.FC)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that the network topology is constructed using a few helper functions that generate the quantized linear layers and quantized activations. The bitwidth of the layers is actually parametrized in the constructor, so let's instantiate a 1-bit weights and activations version of this network. We also have pretrained weights for this network, which we will load into the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.test import get_test_model\n", + "lfc = get_test_model(netname = \"LFC\", wbits = 1, abits = 1, pretrained = True)\n", + "lfc" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have now instantiated our trained PyTorch network. Let's try to run an example MNIST image through the network using PyTorch." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from pkgutil import get_data\n", + "import onnx\n", + "import onnx.numpy_helper as nph\n", + "raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n", + "input_tensor = onnx.load_tensor_from_string(raw_i)\n", + "input_tensor_npy = nph.to_array(input_tensor)\n", + "input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n", + "imgplot = plt.imshow(input_tensor_npy.reshape(28,28), cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from torch.nn.functional import softmax\n", + "# do forward pass in PyTorch/Brevitas\n", + "produced = lfc.forward(input_tensor_pyt).detach()\n", + "probabilities = softmax(produced, dim=-1).flatten()\n", + "probabilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "objects = [str(x) for x in range(10)]\n", + "y_pos = np.arange(len(objects))\n", + "plt.bar(y_pos, probabilities, align='center', alpha=0.5)\n", + "plt.xticks(y_pos, objects)\n", + "plt.ylabel('Predicted Probability')\n", + "plt.title('LFC-w1a1 Predictions for Image')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Call Brevitas QONNX export and visualize with Netron\n", + "\n", + "Brevitas comes with built-in QONNX export functionality. This is similar to the regular ONNX export capabilities of PyTorch, with a few differences:\n", + "\n", + "1. Weight and activation quantization is represented as a 'fake-quantization' with Quant and BipolarQuant nodes.\n", + "2. Truncation operations as required by average pooling are represented with a Trunc node.\n", + "\n", + "One can read more about how QONNX works and why it was developed here: https://xilinx.github.io/finn//2021/11/03/qonnx-and-finn.html\n", + "\n", + "Additionally QONNX comes with a set of tools for working with the format. These are maintained together with the Fast Machinelearning collaboration as an open-source projet here: https://github.com/fastmachinelearning/qonnx\n", + "\n", + "It's actually quite straightforward to export QONNX from our Brevitas model as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from brevitas.export.onnx.generic.manager import BrevitasONNXManager\n", + "export_onnx_path = \"/tmp/LFCW1A1_qonnx.onnx\"\n", + "input_shape = (1, 1, 28, 28)\n", + "BrevitasONNXManager.export(lfc, input_shape, export_onnx_path);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's examine what the exported ONNX model looks like. For this, we will use the Netron visualizer:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(export_onnx_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When running this notebook in the FINN Docker container, you should be able to see an interactive visualization of the imported network above, and click on individual nodes to inspect their parameters. If you look at any of the MatMul nodes, you should be able to see that the weights are all {-1, +1} values." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Import into FINN and converting QONNX to FINN-ONNX\n", + "\n", + "Similarily to the 1a notebook we will first run a cleanup transformation on the exported QONNX model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.util.cleanup import cleanup\n", + "\n", + "export_onnx_path_cleaned = \"/tmp/LFCW1A1-qonnx-clean.onnx\"\n", + "cleanup(export_onnx_path, out_file=export_onnx_path_cleaned)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(export_onnx_path_cleaned)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now import this QONNX model into FINN using the ModelWrapper. Here we can immediatley execute the model to verify correctness." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.core.modelwrapper import ModelWrapper\n", + "import qonnx.core.onnx_exec as oxe\n", + "model = ModelWrapper(export_onnx_path_cleaned)\n", + "input_dict = {\"0\": nph.to_array(input_tensor)}\n", + "output_dict = oxe.execute_onnx(model, input_dict)\n", + "produced_qonnx = output_dict[list(output_dict.keys())[0]]\n", + "\n", + "produced_qonnx" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.isclose(produced, produced_finn).all()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using the `QONNXtoFINN` transformation we can convert the model to the FINN internal FINN-ONNX representation. Notably all Quant and BipolarQuant nodes will have disappeared and are converted into MultiThreshold nodes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN\n", + "model = ModelWrapper(export_onnx_path_cleaned)\n", + "\n", + "model = model.transform(ConvertQONNXtoFINN())\n", + "\n", + "export_onnx_path_converted = \"/tmp/LFCW1A1-qonnx-converted.onnx\"\n", + "model.save(export_onnx_path_converted)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "showInNetron(export_onnx_path_converted)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And once again we can execute the model with the FINN/QONNX execution engine." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = ModelWrapper(export_onnx_path_cleaned)\n", + "input_dict = {\"0\": nph.to_array(input_tensor)}\n", + "output_dict = oxe.execute_onnx(model, input_dict)\n", + "produced_finn = output_dict[list(output_dict.keys())[0]]\n", + "\n", + "produced_finn" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.isclose(produced, produced_finn).all()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have succesfully verified that the transformed and cleaned-up FINN graph still produces the same output, and can now use this model for further processing in FINN." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From ae2f76c38a7922c655f48bf0a97567d12073d919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Thu, 8 Sep 2022 13:37:39 +0100 Subject: [PATCH 150/628] Maintain static OOB count internally requiring explicit acknowledging reset. --- src/finn/custom_op/fpgadataflow/lookup.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index 31344c9f1b..98521a9264 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -259,8 +259,20 @@ def docompute(self): ] elif mem_mode == "external": self.code_gen_dict["$DOCOMPUTE$"] = [ - """StreamingLookup_ext(in0, out, mem, size, oob_count); - oob_irq = oob_count != 0;""" + """ + static unsigned oob_count_li; + static unsigned oob_count_int; +#pragma HLS reset variable=oob_count_li +#pragma HLS reset variable=oob_count_int + + if(oob_count != oob_count_li) { + oob_count_int -= oob_count_li; + oob_count_li = oob_count; + } + StreamingLookup_ext(in0, out, mem, size, oob_count_int); + oob_count = oob_count_int; + oob_irq = (oob_count_int != 0); + """ ] def blackboxfunction(self): From 8ac23b16582d9b01189b7445b26422f81f22dbdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 9 Sep 2022 08:42:02 +0100 Subject: [PATCH 151/628] Match requested II with achievable one. --- custom_hls/lookup.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/custom_hls/lookup.hpp b/custom_hls/lookup.hpp index dac586c38c..79b350bb70 100644 --- a/custom_hls/lookup.hpp +++ b/custom_hls/lookup.hpp @@ -74,7 +74,7 @@ void StreamingLookup_ext( unsigned const size, unsigned &oob_count ) { -#pragma HLS pipeline II=EmbeddingSize+8 style=flp +#pragma HLS pipeline II=EmbeddingSize+9 style=flp if(!in0.empty()) { T_SRC const x = in0.read(); From 8408b45ae88cfe85bf6f58fa0f60d4d221228f81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 9 Sep 2022 11:25:24 +0100 Subject: [PATCH 152/628] Rebalanced lookup implementation code to go mostly into header file. --- custom_hls/lookup.hpp | 17 +++++++++++++++-- src/finn/custom_op/fpgadataflow/lookup.py | 15 +-------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/custom_hls/lookup.hpp b/custom_hls/lookup.hpp index 79b350bb70..037b038a09 100644 --- a/custom_hls/lookup.hpp +++ b/custom_hls/lookup.hpp @@ -72,9 +72,20 @@ void StreamingLookup_ext( hls::stream &out, T_DST const *const mem, unsigned const size, - unsigned &oob_count + unsigned &oob_count, + bool &oob_irq ) { #pragma HLS pipeline II=EmbeddingSize+9 style=flp + + static unsigned oob_count_li; + static unsigned oob_count_int; +#pragma HLS reset variable=oob_count_li +#pragma HLS reset variable=oob_count_int + + if(oob_count != oob_count_li) { + oob_count_int -= oob_count_li; + oob_count_li = oob_count; + } if(!in0.empty()) { T_SRC const x = in0.read(); @@ -82,7 +93,7 @@ void StreamingLookup_ext( bool const oob = x >= T_SRC(size); ap_uint const ofs = ((oob? T_SRC(0) : x), ap_uint(0)); - oob_count += oob; + oob_count_int += oob; // Stream lookup data (burst inferred) for(unsigned i = 0; i < EmbeddingSize; i++) { @@ -90,5 +101,7 @@ void StreamingLookup_ext( out.write(mem[ofs+i]); } } + oob_count = oob_count_int; + oob_irq = (oob_count_int != 0); } #endif diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index 98521a9264..e87072f809 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -259,20 +259,7 @@ def docompute(self): ] elif mem_mode == "external": self.code_gen_dict["$DOCOMPUTE$"] = [ - """ - static unsigned oob_count_li; - static unsigned oob_count_int; -#pragma HLS reset variable=oob_count_li -#pragma HLS reset variable=oob_count_int - - if(oob_count != oob_count_li) { - oob_count_int -= oob_count_li; - oob_count_li = oob_count; - } - StreamingLookup_ext(in0, out, mem, size, oob_count_int); - oob_count = oob_count_int; - oob_irq = (oob_count_int != 0); - """ + "StreamingLookup_ext(in0, out, mem, size, oob_count, oob_irq);" ] def blackboxfunction(self): From 91e3df89afeb2cbebdd8fe50647b1bacf0d76856 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 9 Sep 2022 12:13:30 +0100 Subject: [PATCH 153/628] [pre-commit] Run pre-commit on lookup layer file --- src/finn/custom_op/fpgadataflow/lookup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index e87072f809..613a91b628 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -259,7 +259,8 @@ def docompute(self): ] elif mem_mode == "external": self.code_gen_dict["$DOCOMPUTE$"] = [ - "StreamingLookup_ext(in0, out, mem, size, oob_count, oob_irq);" + """StreamingLookup_ext(in0, out, mem, size, oob_count, + oob_irq);""" ] def blackboxfunction(self): From 57f9fecdcd8835ef57b845f96e507536a04d2c1e Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 9 Sep 2022 21:05:03 +0200 Subject: [PATCH 154/628] Move parallel impl. to different branch, cleanup --- finn-rtllib/swg/swg_template_default.sv | 229 +++--- finn-rtllib/swg/swg_template_parallel.sv | 409 ---------- finn-rtllib/swg/swg_template_wrapper.v | 10 +- .../convolutioninputgenerator_rtl.py | 746 ++++-------------- .../fpgadataflow/set_folding.py | 2 + .../test_convert_to_hls_conv_layer.py | 3 + ...est_fpgadataflow_convinputgenerator_rtl.py | 57 +- 7 files changed, 298 insertions(+), 1158 deletions(-) delete mode 100755 finn-rtllib/swg/swg_template_parallel.sv diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index fc4c96d1c3..2d255a35ed 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -6,7 +6,9 @@ module $TOP_MODULE_NAME$_controller #( int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, - bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$ + bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$, + + bit IS_DEPTHWISE = $IS_DEPTHWISE$ )( input logic clk, input logic rst_n, @@ -16,7 +18,7 @@ module $TOP_MODULE_NAME$_controller #( output logic [INCR_BITWIDTH-1:0] tail_incr ); - //State and counters + // state and counters typedef enum logic [2:0] { STATE_START, STATE_LOOP_SIMD, @@ -28,66 +30,83 @@ module $TOP_MODULE_NAME$_controller #( state_e State = $INNERMOST_STATE$; state_e state_next; - logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] counter_loop_h = LOOP_H_ITERATIONS-1; - logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] counter_loop_w = LOOP_W_ITERATIONS-1; - logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] counter_loop_kh = LOOP_KH_ITERATIONS-1; - logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] counter_loop_kw = LOOP_KW_ITERATIONS-1; - logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] counter_loop_simd = LOOP_SIMD_ITERATIONS-1; + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS-1; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS-1; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS-1; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS-1; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS-1; logic [INCR_BITWIDTH-1:0] tail_incr_reg = 'x; assign addr_incr = ADDR_INCREMENT_MAP[State]; assign tail_incr = tail_incr_reg; - //combinational logic for tail_incr generation - $TAIL_INCR_GENERATION$ + // combinational logic for tail_incr generation + uwire tail_incr_inner_condition; + generate + if (IS_DEPTHWISE) + assign tail_incr_inner_condition = (Counter_loop_kh >= 0); + else + assign tail_incr_inner_condition = 0; + endgenerate + + always @ (tail_incr_inner_condition, Counter_loop_w, Counter_loop_h) begin + if (tail_incr_inner_condition) + tail_incr_reg = 1; + else if (Counter_loop_w >= 0) + tail_incr_reg = $TAIL_INCR_W$; + else if (Counter_loop_h >= 0) + tail_incr_reg = $TAIL_INCR_H$; + else + tail_incr_reg = $TAIL_INCR_LAST$; + end - //combinational next state logic + // combinational next state logic always_comb begin : blkState state_next = State; if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; else begin - if(counter_loop_simd < 0) begin + if(Counter_loop_simd < 0) begin state_next = - (counter_loop_kw >= 0)? STATE_LOOP_KW : - (counter_loop_kh >= 0)? STATE_LOOP_KH : - (counter_loop_w >= 0)? STATE_LOOP_W : - (counter_loop_h >= 0)? STATE_LOOP_H : + (Counter_loop_kw >= 0)? STATE_LOOP_KW : + (Counter_loop_kh >= 0)? STATE_LOOP_KH : + (Counter_loop_w >= 0)? STATE_LOOP_W : + (Counter_loop_h >= 0)? STATE_LOOP_H : /* else */ STATE_START; end end end : blkState - //sequential logic + // sequential logic always_ff @ (posedge clk) begin if(!rst_n) begin State <= $INNERMOST_STATE$; - counter_loop_h <= LOOP_H_ITERATIONS-1; - counter_loop_w <= LOOP_W_ITERATIONS-1; - counter_loop_kh <= LOOP_KH_ITERATIONS-1; - counter_loop_kw <= LOOP_KW_ITERATIONS-1; - counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + Counter_loop_h <= LOOP_H_ITERATIONS-1; + Counter_loop_w <= LOOP_W_ITERATIONS-1; + Counter_loop_kh <= LOOP_KH_ITERATIONS-1; + Counter_loop_kw <= LOOP_KW_ITERATIONS-1; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; end else if(advance) begin State <= state_next; if (State == $INNERMOST_STATE$) begin - if(counter_loop_simd >= 0) counter_loop_simd <= counter_loop_simd-1; + if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; else begin - counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; - if(counter_loop_kw >= 0) counter_loop_kw <= counter_loop_kw-1; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; else begin - counter_loop_kw <= LOOP_KW_ITERATIONS-1; - if(counter_loop_kh >= 0) counter_loop_kh <= counter_loop_kh-1; + Counter_loop_kw <= LOOP_KW_ITERATIONS-1; + if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; else begin - counter_loop_kh <= LOOP_KH_ITERATIONS-1; - if(counter_loop_w >= 0) counter_loop_w <= counter_loop_w-1; + Counter_loop_kh <= LOOP_KH_ITERATIONS-1; + if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; else begin - counter_loop_w <= LOOP_W_ITERATIONS-1; - if(counter_loop_h >= 0) counter_loop_h <= counter_loop_h-1; - else counter_loop_h <= LOOP_H_ITERATIONS-1; - end - end + Counter_loop_w <= LOOP_W_ITERATIONS-1; + if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; + else Counter_loop_h <= LOOP_H_ITERATIONS-1; + end + end end - end + end end end end @@ -112,7 +131,7 @@ module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; logic [WIDTH-1:0] Out = 'x; - always_ff @(posedge clk) begin + always_ff @(posedge clk) begin if (!rst_n) begin Out <= 'x; end @@ -126,10 +145,10 @@ module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( endmodule : $TOP_MODULE_NAME$_cyclic_buffer_addressable module $TOP_MODULE_NAME$_impl #( - int BIT_WIDTH = $BIT_WIDTH$, - int SIMD = $SIMD$, - int MMV_IN = $MMV_IN$, - int MMV_OUT = $MMV_OUT$, + int BIT_WIDTH, + int SIMD, + int MMV_IN, + int MMV_OUT, int LAST_READ_ELEM = $LAST_READ_ELEM$, int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, @@ -147,12 +166,12 @@ module $TOP_MODULE_NAME$_impl #( input logic out_V_V_TREADY, output logic [BIT_WIDTH * SIMD * MMV_OUT-1:0] out_V_V_TDATA ); - // Derived Constants + // derived Constants localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; - //main buffer instantiation + // main buffer instantiation uwire [BUF_IN_WIDTH -1:0] window_buffer_in; uwire [BUF_OUT_WIDTH-1:0] window_buffer_out; uwire window_buffer_write_enable; @@ -188,38 +207,38 @@ module $TOP_MODULE_NAME$_impl #( ); // Counters/address registers - // Add a sign bit even to (most) unsigned counters and window_buffer_read_addr_reg, + // Add a sign bit even to (most) unsigned counters and Window_buffer_read_addr_reg, // so we can use automatic sign extension and simplify calculations w/ signed increment. // Alternatively, we could manually sign-extend and shave off a bit here or there. - logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] newest_buffered_elem = -1; - logic [$clog2(LAST_READ_ELEM+1)+1-1:0] current_elem = 0; - logic [$clog2(LAST_READ_ELEM+1)+1-1:0] first_elem_next_window = 0; - logic [$clog2(ELEM_PER_WINDOW) -1:0] k = 0; - logic [$clog2(BUF_ELEM_TOTAL)+1 -1:0] window_buffer_read_addr_reg = 0; - logic [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr_reg = 0; + logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] Newest_buffered_elem = -1; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] Current_elem = 0; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] First_elem_next_window = 0; + logic [$clog2(ELEM_PER_WINDOW) -1:0] K = 0; + logic [$clog2(BUF_ELEM_TOTAL)+1 -1:0] Window_buffer_read_addr_reg = 0; + logic [$clog2(BUF_ELEM_TOTAL)-1:0] Window_buffer_write_addr_reg = 0; // Control signals/registers uwire read_cmd = !reading_done && ( // if there is still an input element left to read - fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) - $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(first_elem_next_window) && - $signed(((newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(current_elem) - ) // (over-)write to buffer if oldest buffered element will no longer be needed - ); + Fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(First_elem_next_window) && + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(Current_elem) + ) // (over-)write to buffer if oldest buffered element will no longer be needed + ); uwire read_ok = read_cmd && in0_V_V_TVALID; - uwire reading_done = newest_buffered_elem == LAST_READ_ELEM; + uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; - uwire fetch_cmd = !($signed(current_elem) > newest_buffered_elem) && !write_blocked && !fetching_done; - logic fetching_done = 0; + uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; + logic Fetching_done = 0; - logic write_cmd = 0; - logic writing_done = 0; - uwire write_ok = write_cmd && out_V_V_TREADY; - uwire write_blocked = write_cmd && !out_V_V_TREADY;; + logic Write_cmd = 0; + logic Writing_done = 0; + uwire write_ok = Write_cmd && out_V_V_TREADY; + uwire write_blocked = Write_cmd && !out_V_V_TREADY;; //assign buffer control - assign window_buffer_write_addr = window_buffer_write_addr_reg; - assign window_buffer_read_addr = window_buffer_read_addr_reg; + assign window_buffer_write_addr = Window_buffer_write_addr_reg; + assign window_buffer_read_addr = Window_buffer_read_addr_reg; assign window_buffer_write_enable = read_ok; assign window_buffer_read_enable = fetch_cmd; assign advance_controller = fetch_cmd; @@ -228,87 +247,87 @@ module $TOP_MODULE_NAME$_impl #( assign window_buffer_in = in0_V_V_TDATA; assign out_V_V_TDATA = window_buffer_out; assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) - assign out_V_V_TVALID = ap_rst_n && write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + assign out_V_V_TVALID = ap_rst_n && Write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) //main process for advancing counters always_ff @(posedge ap_clk) begin if(!ap_rst_n) begin - newest_buffered_elem <= -1; - current_elem <= 0; - first_elem_next_window <= 0; - k <= 0; - window_buffer_read_addr_reg <= 0; - window_buffer_write_addr_reg <= 0; - fetching_done <= 0; - write_cmd <= 0; - writing_done <= 0; + Newest_buffered_elem <= -1; + Current_elem <= 0; + First_elem_next_window <= 0; + K <= 0; + Window_buffer_read_addr_reg <= 0; + Window_buffer_write_addr_reg <= 0; + Fetching_done <= 0; + Write_cmd <= 0; + Writing_done <= 0; end else begin if (read_ok) begin - window_buffer_write_addr_reg <= (window_buffer_write_addr_reg == BUF_ELEM_TOTAL-1)? 0 : window_buffer_write_addr_reg + 1; - newest_buffered_elem <= newest_buffered_elem+1; + Window_buffer_write_addr_reg <= (Window_buffer_write_addr_reg == BUF_ELEM_TOTAL-1)? 0 : Window_buffer_write_addr_reg + 1; + Newest_buffered_elem <= Newest_buffered_elem+1; - if (newest_buffered_elem == LAST_READ_ELEM-1) begin - window_buffer_write_addr_reg <= 0; + if (Newest_buffered_elem == LAST_READ_ELEM-1) begin + Window_buffer_write_addr_reg <= 0; end //check if this is the last read cycle (reading_done will be true afterwards) - if ((newest_buffered_elem == LAST_READ_ELEM-1) && writing_done) begin + if ((Newest_buffered_elem == LAST_READ_ELEM-1) && Writing_done) begin //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) - newest_buffered_elem <= -1; - current_elem <= 0; - window_buffer_read_addr_reg <= 0; - first_elem_next_window <= 0; - writing_done <= 0; - fetching_done <= 0; + Newest_buffered_elem <= -1; + Current_elem <= 0; + Window_buffer_read_addr_reg <= 0; + First_elem_next_window <= 0; + Writing_done <= 0; + Fetching_done <= 0; end end - + if (fetch_cmd) begin //count up to track which element index is about to be read from the buffer, and where it is located within the buffer //use increment value calculated by controller // absolute buffer address wrap-around - automatic logic signed [$clog2(BUF_ELEM_TOTAL)+1:0] ra = $signed(window_buffer_read_addr_reg) + $signed(addr_incr); + automatic logic signed [$clog2(BUF_ELEM_TOTAL)+1:0] ra = $signed(Window_buffer_read_addr_reg) + $signed(addr_incr); automatic logic signed [$clog2(BUF_ELEM_TOTAL+1):0] ra_correct = (ra >= BUF_ELEM_TOTAL)? -BUF_ELEM_TOTAL : (ra < 0)? BUF_ELEM_TOTAL : 0; - window_buffer_read_addr_reg <= ra + ra_correct; + Window_buffer_read_addr_reg <= ra + ra_correct; //keep track where we are within a window - k <= (k != ELEM_PER_WINDOW - 1)? k+1 : 0; + K <= (K != ELEM_PER_WINDOW - 1)? K+1 : 0; //update first element of next window to allow buffer overwrite up until that point - if (k == 0) - first_elem_next_window <= first_elem_next_window + tail_incr; + if (K == 0) + First_elem_next_window <= First_elem_next_window + tail_incr; - //check if this is the last write cycle (writing_done will be true afterwards) - if (current_elem == LAST_WRITE_ELEM) - fetching_done <= 1; + //check if this is the last write cycle (Writing_done will be true afterwards) + if (Current_elem == LAST_WRITE_ELEM) + Fetching_done <= 1; else - current_elem <= $signed(current_elem) + addr_incr; + Current_elem <= $signed(Current_elem) + addr_incr; // determine if prefetched data will be outstanding in the next cycle // if we fetch in this cycle -> yes // if we do not fetch nor write -> do not change // if we do not fetch but write successfully-> clear outstanding data - write_cmd <= fetch_cmd; - end + Write_cmd <= fetch_cmd; + end if (write_ok) - write_cmd <= fetch_cmd; + Write_cmd <= fetch_cmd; - if (write_ok && fetching_done) begin - //check if this is the last write cycle (writing_done will be true afterwards) - if (reading_done || (read_ok && (newest_buffered_elem == LAST_READ_ELEM - 1))) begin + if (write_ok && Fetching_done) begin + //check if this is the last write cycle (Writing_done will be true afterwards) + if (reading_done || (read_ok && (Newest_buffered_elem == LAST_READ_ELEM - 1))) begin //start processing of next FM if reading is done already, or completes in the same cycle - newest_buffered_elem <= -1; - current_elem <= 0; - window_buffer_read_addr_reg <= 0; - first_elem_next_window <= 0; - fetching_done <= 0; + Newest_buffered_elem <= -1; + Current_elem <= 0; + Window_buffer_read_addr_reg <= 0; + First_elem_next_window <= 0; + Fetching_done <= 0; end else - writing_done <= 1; + Writing_done <= 1; end end end diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv deleted file mode 100755 index 19638d8a1d..0000000000 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ /dev/null @@ -1,409 +0,0 @@ -`timescale 1 ns / 1 ps - -module $TOP_MODULE_NAME$_controller -( - CLK, - RST, - advance, - cmd_read, - cmd_write -); - -input CLK; -input RST; -input advance; -output cmd_read; -output cmd_write; - -////code generation part: -//mapping of R/W command values to each state (START, MAIN_1, MAIN_2, INTER_1, INTER_2, END_1, END_2) -localparam [0:6] READ_CMD_MAP = $READ_CMD_MAP$; -localparam [0:6] WRITE_CMD_MAP = $WRITE_CMD_MAP$; - -localparam START_COUNTER = $START_COUNTER$; -localparam LOOP_MAIN_COUNTER = $LOOP_MAIN_COUNTER$; -localparam LOOP_MAIN_1_COUNTER = $LOOP_MAIN_1_COUNTER$; -localparam LOOP_MAIN_2_COUNTER = $LOOP_MAIN_2_COUNTER$; -localparam LOOP_INTER_COUNTER = $LOOP_INTER_COUNTER$; -localparam LOOP_INTER_1_COUNTER = $LOOP_INTER_1_COUNTER$; -localparam LOOP_INTER_2_COUNTER = $LOOP_INTER_2_COUNTER$; -localparam LOOP_END_1_COUNTER = $LOOP_END_1_COUNTER$; -localparam LOOP_END_2_COUNTER = $LOOP_END_2_COUNTER$; -//// - -//state and counters -reg [2:0] state, state_next; -parameter STATE_START = 0, STATE_LOOP_MAIN_1 = 1, STATE_LOOP_MAIN_2 = 2, STATE_LOOP_INTER_1 = 3, STATE_LOOP_INTER_2 = 4, STATE_END_1 = 5, STATE_END_2 = 6; -integer counter_current; //todo: minimize width -integer counter_loop_main; -integer counter_loop_inter; - -assign cmd_read = READ_CMD_MAP[state_next]; //read command indicates read in *upcoming* cycle, due to how schedule is constructed -assign cmd_write = WRITE_CMD_MAP[state]; - -//combinational next state logic -always @ (state, counter_current, counter_loop_main, counter_loop_inter) begin - state_next = state; //default - case (state) - STATE_START: - if (counter_current == START_COUNTER-1) - state_next = STATE_LOOP_MAIN_1; - - STATE_LOOP_MAIN_1: - if (counter_current == LOOP_MAIN_1_COUNTER-1) - state_next = STATE_LOOP_MAIN_2; - - STATE_LOOP_MAIN_2: begin - if (counter_current == LOOP_MAIN_2_COUNTER-1) begin - state_next = STATE_LOOP_MAIN_1; - if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin - //no -1 because this counter marks the currently active iteration, not finished iterations - if ((LOOP_INTER_COUNTER != 0) && (counter_loop_inter != LOOP_INTER_COUNTER)) - state_next = STATE_LOOP_INTER_1; - else begin - //there might not be an end sequence -> restart immediately - if (LOOP_END_1_COUNTER != 0) - state_next = STATE_END_1; - else - state_next = STATE_LOOP_MAIN_2; //wait in current state until reset - end - end - end - end - - STATE_LOOP_INTER_1: begin - if (counter_current == LOOP_INTER_1_COUNTER-1) begin - if (LOOP_INTER_2_COUNTER != 0) - state_next = STATE_LOOP_INTER_2; - else - state_next = STATE_LOOP_MAIN_1; - end - end - - STATE_LOOP_INTER_2: - if (counter_current == LOOP_INTER_2_COUNTER-1) - state_next = STATE_LOOP_MAIN_1; - - STATE_END_1: begin - if (counter_current == LOOP_END_1_COUNTER-1) begin - if (LOOP_END_2_COUNTER != 0) - state_next = STATE_END_2; - else - state_next = STATE_END_1; //wait in current state until reset - end - end - - STATE_END_2: - if (counter_current == LOOP_END_2_COUNTER-1) - state_next = STATE_END_2; //wait in current state until reset - endcase -end - -//sequential logic -always @ (posedge CLK) begin - if (RST) begin - counter_current <= -1; - counter_loop_main <= 0; - counter_loop_inter <= 0; - state <= STATE_START; - end else begin - if (advance) begin - counter_current <= counter_current+1; - state <= state_next; - - if (state != state_next) begin - counter_current <= 0; - - //count up main loop upon re-entering this loop (not on first enter from start) - if ((state_next == STATE_LOOP_MAIN_1) && (state != STATE_START)) begin - if (counter_loop_main == LOOP_MAIN_COUNTER-1) begin - counter_loop_main <= 0; - end else begin - counter_loop_main <= counter_loop_main+1; - end - end - - if (state_next == STATE_LOOP_INTER_1) begin - if (counter_loop_inter == LOOP_INTER_COUNTER) begin //no -1 because this counter marks the currently active iteration, not finished iterations - counter_loop_inter <= 0; - end else begin - counter_loop_inter <= counter_loop_inter+1; - end - end - end - end - end -end -endmodule //controller - -module $TOP_MODULE_NAME$_reg_buffer -#( - parameter WIDTH = 1, - parameter DEPTH = 1 -) -( - CLK, - shift_enable, - shift_in, - shift_out, - data_out -); - -input CLK, shift_enable; -input [WIDTH-1:0] shift_in; -output [WIDTH-1:0] shift_out; -output [WIDTH*DEPTH-1:0] data_out; - -// ToDo: experiment with SRL instead of FF-based shift register -// by force or by achieving automatic SRL inference -//UG901 template for SRL inference: -// 32-bit Shift Register -// Rising edge clock -// Active high clock enable -// For-loop based template -// File: shift_registers_1.v -// -//module shift_registers_1 (clk, clken, SI, SO); -//parameter WIDTH = 32; -//input clk, clken, SI; -//output SO; -//reg [WIDTH-1:0] shreg; -// -//integer i; -//always @(posedge clk) -//begin -// if (clken) -// begin -// for (i = 0; i < WIDTH-1; i = i+1) -// shreg[i+1] <= shreg[i]; -// shreg[0] <= SI; -// end -//end -//assign SO = shreg[WIDTH-1]; -//endmodule - -reg [WIDTH-1:0] data [DEPTH-1:0]; - -assign shift_out = data[DEPTH-1]; - -for (genvar e=0; e0; i=i-1) - data[i] <= data[i-1]; - data[0] <= shift_in; - end -end -endmodule //reg_buffer - -module $TOP_MODULE_NAME$_ram_buffer -#( - parameter WIDTH = 1, - parameter DEPTH = 1 -) -( - CLK, - RST, - shift_enable, - shift_in, - shift_out -); - -input CLK, RST, shift_enable; -input [WIDTH-1:0] shift_in; -output [WIDTH-1:0] shift_out; - -reg [WIDTH-1:0] out_reg; -assign shift_out = out_reg; - -integer addr_w, addr_r; //todo: minimize width (as reg), make r addr depend on w - -$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; - -always @(posedge CLK) begin - if (RST == 1'b0) begin - addr_w <= 0; - addr_r <= 1; - end else begin - if (shift_enable) begin - ram[addr_w] <= shift_in; - out_reg <= ram[addr_r]; - - if (addr_w == DEPTH-1) - addr_w <= 0; - else - addr_w <= addr_w + 1; - - if (addr_r == DEPTH-1) - addr_r <= 0; - else - addr_r <= addr_r + 1; - end - end -end -endmodule //ram_buffer - -module $TOP_MODULE_NAME$_wb -#( - parameter IN_WIDTH = 1, //bit-width*C*MMV_in - parameter OUT_ELEM_WIDTH = 1, //bit-width*C - parameter OUT_WIDTH = 1, //bit-width*C*MMV_out - parameter BUFFER_ELEM_TOTAL = 1 -) -( - CLK, - RST, - data_in, - shift_enable, - data_out -); - -input CLK, RST; -input [IN_WIDTH-1:0] data_in; -input shift_enable; -output [OUT_WIDTH-1:0] data_out; - -//Input REG to enable simultaneous R/W -reg [IN_WIDTH-1:0] reg_input; - -$GENERATE_REG_FIFOS$ - -$GENERATE_BRAM_FIFOS$ - -//Fixed interconnect between linear buffers -$GENERATE_BUFFER_CONNECTION$ - -//Fixed REG FIFO <-> output mapping -$GENERATE_OUTPUT_MAPPING$ - -//input register logic -integer i; -always @ (posedge CLK) begin - if (shift_enable) begin - reg_input <= data_in; - end -end - -endmodule //window_buffer - -module $TOP_MODULE_NAME$_impl ( - ap_clk, - ap_rst_n, - in0_V_V_TDATA, - in0_V_V_TVALID, - in0_V_V_TREADY, - out_V_V_TDATA, - out_V_V_TVALID, - out_V_V_TREADY -); - -parameter BIT_WIDTH = $BIT_WIDTH$; -parameter SIMD = $SIMD$; //assuming SIMD = C for this implementation style -parameter MMV_IN = $MMV_IN$; -parameter MMV_OUT = $MMV_OUT$; -parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; -parameter BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; -parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; -parameter CYCLES_TOTAL = $CYCLES_TOTAL$; -parameter BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$; - -//IO ports -input ap_clk; -input ap_rst_n; -input [BUF_IN_WIDTH-1:0] in0_V_V_TDATA; -input in0_V_V_TVALID; -output in0_V_V_TREADY; -output [BUF_OUT_WIDTH-1:0] out_V_V_TDATA; -output out_V_V_TVALID; -input out_V_V_TREADY; - -//main buffer instantiation -wire [BUF_IN_WIDTH-1:0] window_buffer_in; -wire [BUF_OUT_WIDTH-1:0] window_buffer_out; -wire window_buffer_shift_enable; -$TOP_MODULE_NAME$_wb -#( - .IN_WIDTH(BUF_IN_WIDTH), - .OUT_ELEM_WIDTH(BUF_OUT_ELEM_WIDTH), - .OUT_WIDTH(BUF_OUT_WIDTH), - .BUFFER_ELEM_TOTAL(BUF_ELEM_TOTAL) -) -window_buffer_inst -( - .CLK(ap_clk), - .RST(ap_rst_n), - .data_in(window_buffer_in), - .shift_enable(window_buffer_shift_enable), - .data_out(window_buffer_out) -); - -integer cycle; //main cycle counter (where either read/write/both happen, resets for each image) -wire read_cmd; -wire write_cmd; -reg write_done; //keep track if W of current cycle was already completed, but we still wait on a R in the same cycle - -wire controller_reset; -wire controller_advance; - -$TOP_MODULE_NAME$_controller -controller_inst -( - .CLK(ap_clk), - .RST(controller_reset), - .advance(controller_advance), - .cmd_read(read_cmd), - .cmd_write(write_cmd) -); - -wire write_blocked; -assign write_blocked = write_cmd && !out_V_V_TREADY && !write_done; - -wire read_ok; -// with transition to next cycle: -// want to read can read source is ready (waiting on VALID allowed) -assign read_ok = read_cmd && !write_blocked && in0_V_V_TVALID; - -wire write_ok; -// with transition to next cycle: -// output is VALID sink is ready sink has already read (we are waiting on source) -assign write_ok = write_cmd && (out_V_V_TREADY || write_done); - -wire advance; -// includes waiting on W if W-only cycle: wait only on W no R/W to wait for -assign advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); - -//assign buffer control -//todo: if mmv_out < k: might not shift and/or write for multiple read_cmd cycles -assign window_buffer_shift_enable = advance; - -assign controller_reset = !ap_rst_n || ((cycle == CYCLES_TOTAL-1) && advance); -assign controller_advance = advance; - -//assign I/O ports -assign window_buffer_in = in0_V_V_TDATA; -assign out_V_V_TDATA = window_buffer_out; -assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) -assign out_V_V_TVALID = ap_rst_n && write_cmd && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) - -//main process for advancing cycle count -always @ (posedge ap_clk) begin - if (ap_rst_n == 1'b0) begin - cycle <= 0; - end else begin - if (advance) begin - write_done <= 1'b0; //reset flag - - //count cycle (completed R or W or both (depending on current cycle)) - if (cycle == CYCLES_TOTAL-1) - cycle <= 0; - else - cycle <= cycle+1; - - end else if (write_ok) // successful W in this cycle, but R still outstanding - write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! - end -end - -endmodule //TOP_MODULE_NAME_impl diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index be5a93b9e6..1b470817d6 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -11,10 +11,13 @@ module $TOP_MODULE_NAME$ ( out_V_TREADY ); +// top-level parameters (set via code-generation) parameter BIT_WIDTH = $BIT_WIDTH$; parameter SIMD = $SIMD$; parameter MMV_IN = $MMV_IN$; parameter MMV_OUT = $MMV_OUT$; + +// derived constants parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; @@ -30,7 +33,12 @@ output out_V_TVALID; input out_V_TREADY; $TOP_MODULE_NAME$_impl -#() +#( + .BIT_WIDTH(BIT_WIDTH), + .SIMD(SIMD), + .MMV_IN(MMV_IN), + .MMV_OUT(MMV_OUT) +) impl ( .ap_clk(ap_clk), diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index f1e0f53a7a..98351942b9 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -49,128 +49,17 @@ # - Addressable cyclic buffer: to be used when out_width <= in_width # - Parallel registers + line buffers: to be used when out_width > in_width # Supports non-square, 1D, strided, dilated, and depthwise convolutions. -# Note: the actual data layout produced is different for depthwise and non-depthwise ops: +# Note: the actual data layout produced is different for depthwise and non-depthwise: # * non-depthwise SWG: (1, OFMDim_H, OFMDim_W, K_H, K_W, IFMChannels/SIMD, SIMD) # * depthwise SWG: (1, OFMDim_H, OFMDim_W, IFMChannels/SIMD, K_H, K_W, SIMD) - -# helper functions for parallel mode buffer scheduling (to be superseded by improved implementation): - - -def schedule_append(schedule, op): - if len(schedule) > 0 and schedule[-1][1] == op: - count, op_ = schedule[-1] - schedule[-1] = (count + 1, op_) - else: - schedule.append((1, op)) - return schedule - - -def schedule_map_cmds(seq): - mapping = { - "w": ("1'b1", "1'b0"), - "r": ("1'b0", "1'b1"), - "wr": ("1'b1", "1'b1"), - "n": ("1'b0", "1'b0"), - } - if seq: - if len(seq) == 2: - return (seq[0], mapping[seq[1]], 0, mapping["n"]) - if len(seq) == 4: - return (seq[0], mapping[seq[1]], seq[2], mapping[seq[3]]) - else: - return (0, mapping["n"], 0, mapping["n"]) - - -def schedule_map_controller(schedule): - # Experimental implementation to map fixed controller loop structure to R/W schedule by analyzing - # the access pattern given by Im2Col, rather than direct computation. - # TODO: Probably replace this with a directly-computed schedule, similar to the default implementation style. - - # leave first sequence (pre-load) as is - start_sequence = schedule[0] - loop_sequence_1_counter = 1 - loop_sequence_1 = schedule[1] - loop_counter = 0 - loop_sequence_2 = None - end_sequence = None - - i = 2 - if i < len(schedule): - loop_sequence_1 += schedule[i] - i += 1 - while i + 1 < len(schedule): - candidate = schedule[i] + schedule[i + 1] - if candidate == loop_sequence_1: - loop_sequence_1_counter += 1 - i += 2 - else: - break - - if i < len(schedule): - loop_sequence_2 = schedule[i] - i += 1 - if i + 1 < len(schedule): - candidate = schedule[i] + schedule[i + 1] - if candidate != loop_sequence_1: - loop_sequence_2 += schedule[i] - i -= 1 - loop_sequence_total_len = ( - int(len(loop_sequence_2) / 2) - ) + loop_sequence_1_counter * (int(len(loop_sequence_1) / 2)) - loop_sequence_total = ( - loop_sequence_2 + loop_sequence_1_counter * loop_sequence_1 - ) - while i + loop_sequence_total_len < len(schedule): - candidate = schedule[i] - for x in range(i + 1, i + loop_sequence_total_len): - candidate += schedule[x] - - if candidate == loop_sequence_total: - loop_counter += 1 - i += loop_sequence_total_len - else: - break - else: - if i < len(schedule): - end_sequence = loop_sequence_2 + schedule[i] - i += 1 - loop_sequence_2 = None - else: - end_sequence = loop_sequence_2 - loop_sequence_2 = None - - if i < len(schedule): - end_sequence = schedule[i] - i += 1 - if i < len(schedule): - end_sequence = end_sequence + schedule[i] - i += 1 - - assert len(start_sequence) == 1 * 2, "ERROR: invalid start sequence" - assert len(loop_sequence_1) == 2 * 2, "ERROR: invalid loop 1 sequence" - if loop_sequence_2: - assert len(loop_sequence_2) <= 2 * 2, "ERROR: invalid loop 2 sequence" - if end_sequence: - assert len(end_sequence) <= 2 * 2, "ERROR: invalid end sequence" - assert i == len(schedule), "ERROR: schedule could not be compacted %d / %d" % ( - i, - len(schedule), - ) - - return ( - start_sequence, - loop_counter, - loop_sequence_1_counter, - loop_sequence_1, - loop_sequence_2, - end_sequence, - ) +# NOTE: "Parallel" implementation style not yet implemented in this version! class ConvolutionInputGenerator_rtl(HLSCustomOp): """Class that does not correspond to one of the finn-hlslib ConvolutionInputGenerator - (sliding window) function variants! ...""" + (sliding window) function variants. Generates an RTL ConvolutionInputGenerator + implementation based on (System-)Verilog templates.""" def __init__(self, onnx_node): super().__init__(onnx_node) @@ -216,15 +105,9 @@ def get_folded_input_shape(self): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" wf = int(ifm_ch / simd) - # folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) - # round up to support ifm_dim % M != 0 - if ifm_dim_w == 1: - folded_ishape = (1, math.ceil(ifm_dim_h / M), ifm_dim_w, wf, int(simd * M)) - else: - folded_ishape = (1, ifm_dim_h, math.ceil(ifm_dim_w / M), wf, int(simd * M)) + folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) return folded_ishape def get_normal_output_shape(self): @@ -246,30 +129,13 @@ def get_folded_output_shape(self): stride_h, stride_w = self.get_nodeattr("Stride") dilation_h, dilation_w = self.get_nodeattr("Dilation") simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") pad = 0 ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, pad, dilation_h) ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, pad, dilation_w) assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" if self.get_nodeattr("parallel_window"): wf = int((ifm_ch) // simd) - # folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) - if ofm_dim_w == 1: - folded_oshape = ( - 1, - int(ofm_dim_h / M), - ofm_dim_w, - wf, - k_h * k_w * int(simd * M), - ) - else: - folded_oshape = ( - 1, - ofm_dim_h, - int(ofm_dim_w / M), - wf, - k_h * k_w * int(simd * M), - ) + folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, k_h * k_w * simd) else: wf = int((k_h * k_w * ifm_ch) // simd) folded_oshape = (1, ofm_dim_h, ofm_dim_w, wf, simd) @@ -303,9 +169,8 @@ def get_instream_width(self): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") - M = self.get_nodeattr("M") assert ifm_ch % simd == 0, "SIMD must divide IFMChannels" - in_width = simd * ibits * M + in_width = simd * ibits return in_width def get_outstream_width(self): @@ -327,9 +192,28 @@ def get_number_output_values(self): num_output_elems = np.prod(folded_oshape[:-1]) return num_output_elems + def get_1d_conv_attrs_normalized(self): + # normalize FM dimensions so that: + # [H, W] = [Y, X] = [1, D] or [D, 1] are always mapped to [1, D]. + # The dummy ('1') dimension is the Y-dimension. + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + ofm_dim = self.get_nodeattr("OFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + + if ifm_dim[1] == 1: + ifm_dim = ifm_dim[::-1] + ofm_dim = ofm_dim[::-1] + k = k[::-1] + stride = stride[::-1] + dilation = dilation[::-1] + + return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) + def get_exp_cycles(self): simd = self.get_nodeattr("SIMD") - m = self.get_nodeattr("M") ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") @@ -342,84 +226,137 @@ def get_exp_cycles(self): k_h, k_w = k stride_h, stride_w = stride dilation_h, dilation_w = dilation - k_h, k_w = k - stride_h, stride_w = stride - dilation_h, dilation_w = dilation - impl_style = self.select_impl_style() - if impl_style == "parallel": - exp_cycles = self.get_number_input_values() + 2 + channel_factor = int(ifm_ch / simd) + + if ifm_dim_h == 1 or ifm_dim_w == 1: + # 1D case + ( + ifm_ch, + [ifm_dim_h, ifm_dim_w], + [ofm_dim_h, ofm_dim_w], + [k_h, k_w], + [stride_h, stride_w], + [dilation_h, dilation_w], + ) = self.get_1d_conv_attrs_normalized() + + if depthwise: + exp_cycles = ( + +ofm_dim_w * k_w * channel_factor + + channel_factor * (k_w - 1) * (stride_w - 1) + - (k_w - 1) + + 2 + ) + else: + exp_cycles = ofm_dim_w * k_w * channel_factor + 2 else: - # based on 2D HLS SWG estimate - # FIXME: increase accuracy for newly supported parameter scenarios - cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / 1 - cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) + # 2D case + buffer_min_size = ( + (k_h - 1) * dilation_h * ifm_dim_w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + cycles_write_block = ofm_dim_w * k_w * k_h * channel_factor + cycles_read_block = stride_w * ifm_dim_w * channel_factor max_cycles = max(cycles_write_block, cycles_read_block) - exp_cycles = ( - ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles - ) + if depthwise: + max_cycles += ofm_dim_w * (stride_w - 1) * (channel_factor - 1) + exp_cycles = buffer_min_size + ofm_dim_h * max_cycles # initial buffering + if depthwise: + exp_cycles += (stride_h - 1) * ifm_dim_w * channel_factor return int(exp_cycles) def bram_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - impl_style = self.select_impl_style() # call codegen preparation to populate self.buffer_depth if impl_style == "default": - template_path, code_gen_dict = self.prepare_codegen_default() - elif impl_style == "parallel": - template_path, code_gen_dict = self.prepare_codegen_parallel() + self.prepare_codegen_default() + else: + raise Exception("Requested impl. style not implemented") + # NOTE: Actual BRAM usage might be lower in some cases. + # This does not account for the exact Vivado behavior yet. buffer_width = simd * self.get_input_datatype().bitwidth() buffer_depth = self.buffer_depth - if ram_style == "block" or ram_style == "auto": - ram_depth = buffer_depth - if ram_depth <= 512: + if buffer_depth <= 512: ram_width = 36 - elif ram_depth <= 1024: + elif buffer_depth <= 1024: ram_width = 18 - elif ram_depth <= 2048: + elif buffer_depth <= 2048: ram_width = 9 - elif ram_depth <= 4096: + elif buffer_depth <= 4096: ram_width = 4 - elif ram_depth <= 8192: + elif buffer_depth <= 8192: ram_width = 2 else: ram_width = 1 ram_cascade_depth = math.ceil(buffer_depth / 16384) ram_cascade_width = math.ceil(buffer_width / ram_width) + cascade_savings = 0 + if buffer_depth > 16384: + remainder_depth = buffer_depth % 16384 + if remainder_depth <= 512: + remainder_width = 36 + elif remainder_depth <= 1024: + remainder_width = 18 + elif remainder_depth <= 2048: + remainder_width = 9 + elif remainder_depth <= 4096: + remainder_width = 4 + elif remainder_depth <= 8192: + remainder_width = 2 + else: + remainder_width = 1 - return int(ram_cascade_depth * ram_cascade_width) + remainder_cascade_width = math.ceil(buffer_width / remainder_width) + cascade_savings = ram_cascade_width - remainder_cascade_width + + return int(ram_cascade_depth * ram_cascade_width - cascade_savings) else: return 0 def lut_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - impl_style = self.select_impl_style() # call codegen preparation to populate self.buffer_depth if impl_style == "default": - template_path, code_gen_dict = self.prepare_codegen_default() - elif impl_style == "parallel": - template_path, code_gen_dict = self.prepare_codegen_parallel() + self.prepare_codegen_default() + else: + raise Exception("Requested impl. style not implemented") buffer_width = simd * self.get_input_datatype().bitwidth() buffer_depth = self.buffer_depth - if ram_style == "distributed": - ram_luts = int(buffer_width * math.ceil(buffer_depth / 32)) + ram_luts = int(buffer_width * math.ceil(buffer_depth / 38)) else: ram_luts = 0 return 300 + ram_luts def uram_estimation(self): - # TODO: implement URAM estimation - return 0 + simd = self.get_nodeattr("SIMD") + ram_style = self.get_nodeattr("ram_style") + impl_style = self.select_impl_style() + # call codegen preparation to populate self.buffer_depth + if impl_style == "default": + self.prepare_codegen_default() + else: + raise Exception("Requested impl. style not implemented") + + buffer_width = simd * self.get_input_datatype().bitwidth() + buffer_depth = self.buffer_depth + + if ram_style == "ultra": + ram_depth = 4096 + ram_width = 72 + ram_cascade_depth = math.ceil(buffer_depth / ram_depth) + ram_cascade_width = math.ceil(buffer_width / ram_width) + return int(ram_cascade_depth * ram_cascade_width) + else: + return 0 def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") @@ -427,10 +364,9 @@ def execute_node(self, context, graph): exp_ishape = self.get_normal_input_shape() exp_oshape = self.get_normal_output_shape() folded_ishape = self.get_folded_input_shape() - folded_oshape = self.get_folded_output_shape() if mode == "cppsim": - raise Exception("""cppsim not possible for RTL SWG""".format(mode)) + raise Exception("cppsim not possible for RTL SWG") elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: @@ -443,11 +379,9 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - # disable this check to allow for IFMdim % M != 0 case (see below) where input comes from MMV-output capable node - # assert ( - # inp.shape == exp_ishape - # ), """Input shape doesn't - # match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" + assert ( + inp.shape == exp_ishape + ), """Input shape doesn't match expected shape (1, ifm_dim, ifm_dim, ifm_ch).""" if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary inp = (inp + 1) / 2 @@ -455,20 +389,6 @@ def execute_node(self, context, graph): else: export_idt = self.get_input_datatype() - # pad test input stream to work when IFMdim % M != 0 - # during normal operation, the AXI Stream should not care, in the last cycle garbage elements are read but not used - # TODO: only works for 1D case - mmv_stream_padding_px = int( - (np.prod(folded_ishape) - np.prod(inp.shape)) / exp_ishape[-1] - ) - if exp_ishape[2] == 1: - inp = np.pad( - inp, ((0, 0), (0, mmv_stream_padding_px), (0, 0), (0, 0)), "constant" - ) - else: - inp = np.pad( - inp, ((0, 0), (0, 0), (0, mmv_stream_padding_px), (0, 0)), "constant" - ) # reshape input into folded form inp = inp.reshape(folded_ishape) # make copy before saving array @@ -521,7 +441,6 @@ def prepare_codegen_default(self): dilation = self.get_nodeattr("Dilation") depthwise = self.get_nodeattr("depthwise") simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") k_h, k_w = k h, w = ifm_dim @@ -532,15 +451,8 @@ def prepare_codegen_default(self): pad_w = pad[1] + pad[3] out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) - - if self.get_nodeattr("parallel_window"): - mmv_in = M * 1 - mmv_out = M * k_h * k_w - else: - mmv_in = 1 - mmv_out = 1 - - # compute index/address increments for each nested loop + mmv_in = 1 + mmv_out = 1 channel_factor = int(ifm_ch / simd) # compute minimal buffer length (assuming it holds 1 complete window) @@ -549,7 +461,7 @@ def prepare_codegen_default(self): ) * channel_factor # add additional buffer space in case of stride > 1 - # this minimizes cycle count, as it allows an earlier pre-load of skipped input elements + # this minimizes cycle count as it allows an earlier pre-load of input elements buffer_actual_size = ( buffer_min_size + max( @@ -588,7 +500,7 @@ def prepare_codegen_default(self): + 1 ) - # re-use same controller structure -> re-assign address increments for the dw case + # re-use same controller structure -> re-assign address increments if depthwise: addr_incr_end_window_elem = dilation_w * channel_factor addr_incr_end_window_row = ( @@ -633,22 +545,7 @@ def prepare_codegen_default(self): tail_incr_w = addr_incr_end_window + buffer_min_size - channel_factor tail_incr_h = addr_incr_end_row + buffer_min_size - channel_factor tail_incr_last_window = buffer_min_size - 1 - code_gen_dict["$TAIL_INCR_GENERATION$"] = [ - """ - always @ (counter_loop_kh, counter_loop_w, counter_loop_h) begin - if (counter_loop_kh >= 0) - tail_incr_reg = 1; - else if (counter_loop_w >= 0) - tail_incr_reg = {}; - else if (counter_loop_h >= 0) - tail_incr_reg = {}; - else - tail_incr_reg = {}; - end - """.format( - tail_incr_w, tail_incr_h, tail_incr_last_window - ) - ] + code_gen_dict["$IS_DEPTHWISE$"] = ["1"] else: # depthwise output format is equivalent to non-depthwise if SIMD=C elem_per_window = k_h * k_w * channel_factor @@ -656,20 +553,11 @@ def prepare_codegen_default(self): tail_incr_w = addr_incr_end_window + buffer_min_size - 1 tail_incr_h = addr_incr_end_row + buffer_min_size - 1 tail_incr_last_window = buffer_min_size - 1 - code_gen_dict["$TAIL_INCR_GENERATION$"] = [ - """ - always @ (counter_loop_w, counter_loop_h) begin - if (counter_loop_w >= 0) - tail_incr_reg = {}; - else if (counter_loop_h >= 0) - tail_incr_reg = {}; - else - tail_incr_reg = {}; - end - """.format( - tail_incr_w, tail_incr_h, tail_incr_last_window - ) - ] + code_gen_dict["$IS_DEPTHWISE$"] = ["0"] + + code_gen_dict["$TAIL_INCR_W$"] = [str(tail_incr_w)] + code_gen_dict["$TAIL_INCR_H$"] = [str(tail_incr_h)] + code_gen_dict["$TAIL_INCR_LAST$"] = [str(tail_incr_last_window)] # support SIMD = C and k_w = 1 cases # for k = [k_h, k_w] = [1, k_w], no adjustment is needed @@ -732,373 +620,42 @@ def prepare_codegen_default(self): return template_path, code_gen_dict - def prepare_codegen_parallel(self): - # Parallel implementation style for MMV_out = K: - # mix of shift-registers (for parallel read) and line buffers (BRAM or LUTRAM) - # compute a static schedule by analyzing access pattern (from im2col function) - template_path = ( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" - ) - code_gen_dict = {} - + def select_impl_style(self): + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") ifm_ch = self.get_nodeattr("IFMChannels") - k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") stride = self.get_nodeattr("Stride") dilation = self.get_nodeattr("Dilation") - simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") - - k_h, k_w = k - h, w = ifm_dim - n = c = 1 # no need to consider fully-parallel C dimension - in_shape = (n, c, h, w) - pad = [0, 0, 0, 0] + k = self.get_nodeattr("ConvKernelDim") + ifm_dim_h, ifm_dim_w = ifm_dim stride_h, stride_w = stride dilation_h, dilation_w = dilation - in_image = np.empty(in_shape, dtype=int) - in_image_padded = np.pad( - in_image, - ((0, 0), (0, 0), (pad[0], pad[2]), (pad[1], pad[3])), - mode="constant", - constant_values=0, - ) - in_shape_padded = in_image_padded.shape - h_padded = in_shape_padded[2] - w_padded = in_shape_padded[3] - pad_h = pad[0] + pad[2] - pad_w = pad[1] + pad[3] - out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) - out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) - - if self.get_nodeattr("parallel_window"): - mmv_in = M * 1 - mmv_out = M * k_h * k_w - assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" - else: - mmv_in = 1 - mmv_out = 1 - assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide C" - - # Out width > In width: Parallel implementation style using registers + line buffers - idx_c, idx_h, idx_w = im2col.get_im2col_indices_nchw( - in_shape, k_h, k_w, pad, stride_h, stride_w, dilation_h, dilation_w - ) - - cols = in_image_padded[:, idx_c, idx_h, idx_w] - cols = cols.transpose(1, 2, 0).reshape(k_h * k_w * c, -1) - # result shape is (k_H*k_W*N, out_dim_H*out_dim_W), convert to NCHW - out_image = cols.reshape(n, c, k_h, k_w, out_dim_h, out_dim_w) - # (N=0,C=1,kh=2,kw=3,H=4,W=5) -> (N=0,H=4,W=5,kh=2,kw=3,C=1) - out_image = out_image.transpose(0, 4, 5, 2, 3, 1) - out_image = out_image.reshape(n, out_dim_h, out_dim_w, k_h * k_w * c) - idx_px = idx_h * w + idx_w # sequential pixel indices - k, cycles = idx_px.shape - output_elements = mmv_out - output_cycles = int(cycles / (mmv_out / k)) - - idx_px = idx_px.transpose() - idx_px = idx_px.reshape(output_cycles, output_elements) - idx_px = idx_px.transpose() - # result: first dim is number of parallel output elements, - # second dim is the input element (pixel in case of SIMD=C) index that each output element outputs per cycle - - buffer = [] - buffer_max_size = 0 - schedule = [] - next_in_px = 0 - oldest_px = 0 - - # compute schedule and buffer read pattern (output driven) - idx_px_relative = idx_px.copy() - output_elem, output_cycles = idx_px_relative.shape - - for x in range(output_cycles): - # load missing inputs into buffer - for y in range(output_elem): - while int(idx_px_relative[y, x]) >= next_in_px: - # load M inputs at once (keep "buffer" list 1D for now, handle actual 2D buffer generation later) - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 - schedule = schedule_append(schedule, "w") - - # discard unused buffer elements - # FIXME: this is very slow for large feature maps (e.g., 4096x4096) - oldest_px = np.min(idx_px_relative[:, x:]) - # check whether M elements can be shifted out, not just the single oldest one - # while all([buffer[i] < oldest_px for i in range(M)]): - if all([buffer[i] < oldest_px for i in range(M)]): - # M buffer elements are shifted out at once - for m in range(M): - buffer.pop(0) - - # adjust relative buffer index of current x (according to last discarded buffer elements) - for y in range(output_elem): - idx_px_relative[y, x] -= oldest_px - - # read from buffer - # + simultaneously load next pixel(s) into buffer if there are any left - if next_in_px > (h_padded * w_padded - 1): - # read only (append above) - schedule = schedule_append(schedule, "r") - else: - # load M inputs at once - for m in range(M): - buffer.append(next_in_px) - next_in_px += 1 - schedule = schedule_append(schedule, "wr") - - # record max needed buffer depth - if len(buffer) > buffer_max_size: - buffer_max_size = len(buffer) - - # insert dummy write operations for data at the input FM tail-end that is never read (e.g. in case of stride > 1) - while next_in_px <= (h_padded * w_padded - 1): - next_in_px += 1 - schedule = schedule_append(schedule, "w") - - # add 1 extra cycle after final READ+WRITE cycle for transition b/w feature maps - if schedule[-1][1] == "wr": - schedule_append(schedule, "n") - - # find buffer access patterns - buffer_access_patterns = [] - for x in range(output_cycles): - if idx_px_relative[:, x].tolist() not in buffer_access_patterns: - buffer_access_patterns.append(idx_px_relative[:, x].tolist()) - - ### determine buffer partitioning into REG FIFOs (parallel access) and BRAM FIFOs (line buffers) - # TODO: this part doesn't fully account for M>1 for 2D buffers yet - REG_BRAM_THRESHOLD = 8 - # how many "unused" registers are allowed between buffer positions that will be accessed in parallel - # example: - # 0: only consecutive access patterns will be implemented in regs, rest in (LUTRAM/BRAM) line buffers - # 2: [0, 3, 6] access pattern is still allowed and will be implemented with one 7-position shift reg - - code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_max_size)] - self.buffer_depth = buffer_max_size # for resource estimation + k_h, k_w = k + kernel_width = (k_w - 1) * dilation_w + 1 # incl. dilation + kernel_height = (k_h - 1) * dilation_h + 1 # incl. dilation + # check for valid configuration assert ( - len(buffer_access_patterns) == 1 - ), "ERROR: Buffer access pattern is not static" - buf_static_access_pattern = buffer_access_patterns[0] - reg_fifos = [] - reg_fifos_depth = [] - bram_fifos = [] - bram_fifos_depth = [] - current = [] - for i in range(len(buf_static_access_pattern)): - access_idx = buf_static_access_pattern[i] - if len(current) == 0: - current.append(access_idx) - else: - # assume non-decreasing index order in access pattern - # TODO: this assumption does not hold for M>1 for the 2D case - distance = access_idx - max(current) - if not (distance - 1 > REG_BRAM_THRESHOLD): - for i in range(distance - 1): - # insert dummy into REG FIFO (not read as part of window) - current.append(-1) - # assign this access to same REG FIFO as previous one - current.append(access_idx) - else: - # assign skipped accesses to new BRAM FIFO - bram_fifos.append([-1] * (distance - 1)) - bram_fifos_depth.append( - math.ceil((distance - 1) / M) - ) # really ceil? - # start with new REG FIFO - reg_fifos.append(current) - # reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # allows for MMV in the 1D case - reg_fifos_depth.append(len(current)) - current = [] - current.append(access_idx) - reg_fifos.append(current) - # reg_fifos_depth.append(math.ceil((max(current)+1)/M)) # allows for MMV in the 1D case - reg_fifos_depth.append(len(current)) - - code_gen_dict["$GENERATE_REG_FIFOS$"] = [] - for i in range(len(reg_fifos)): - code_gen_dict["$GENERATE_REG_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] reg_fifo_{id}_in; - wire [IN_WIDTH-1:0] reg_fifo_{id}_out; - wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; - {name}_reg_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) - ) - reg_buffer_inst_{id} - ( - .CLK(CLK), - .shift_enable(shift_enable), - .shift_in(reg_fifo_{id}_in), - .shift_out(reg_fifo_{id}_out), - .data_out(reg_fifo_{id}) - );""".format( - name=self.get_verilog_top_module_name(), - id=i, - len=reg_fifos_depth[i], - ) - ) - - code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] - for i in range(len(bram_fifos)): - code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( - """ - wire [IN_WIDTH-1:0] bram_fifo_{id}_in; - wire [IN_WIDTH-1:0] bram_fifo_{id}_out; - {name}_ram_buffer - #( - .WIDTH(IN_WIDTH), - .DEPTH({len}) - ) - ram_buffer_inst_{id} - ( - .CLK(CLK), - .RST(RST), - .shift_enable(shift_enable), - .shift_in(bram_fifo_{id}_in), - .shift_out(bram_fifo_{id}_out) - );""".format( - name=self.get_verilog_top_module_name(), - id=i, - len=bram_fifos_depth[i], - ) - ) - - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] - out_idx = mmv_out - 1 - for fifo_id, reg_fifo in enumerate(reg_fifos): - for fifo_idx, access_idx in enumerate(reg_fifo): - if access_idx != -1: - code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( - "assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];".format( - out_idx=out_idx, - fifo_id=fifo_id, - access_idx=reg_fifos_depth[fifo_id] - - 1 - - int((max(reg_fifo) - access_idx) / M), - mmv_idx=(max(reg_fifo) - access_idx) % M, - mmv=M, - ) - ) - # reversal: out_idx=0 -> oldest buffer element -> highest access_idx - out_idx = out_idx - 1 - assert out_idx == -1, "ERROR: Not all output vector elements connected" - - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] - for i in range(len(reg_fifos)): - if i == 0: - # first FIFO containing newest elements -> input comes from input reg - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = reg_input;""".format( - fifo_id=i, - ) - ) - else: - # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) - input_fifo_id = i - 1 - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out;""".format( - fifo_id=i, input_fifo_id=input_fifo_id - ) - ) - for i in range(len(bram_fifos)): - input_fifo_id = i - code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( - """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out;""".format( - fifo_id=i, input_fifo_id=input_fifo_id - ) - ) - - ( - start_sequence, - loop_counter, - loop_sequence_1_counter, - loop_sequence_1, - loop_sequence_2, - end_sequence, - ) = schedule_map_controller(schedule) - - start_sequence = schedule_map_cmds(start_sequence) - loop_sequence_1 = schedule_map_cmds(loop_sequence_1) - loop_sequence_2 = schedule_map_cmds(loop_sequence_2) - end_sequence = schedule_map_cmds(end_sequence) - - cycles_total = 0 - for t in schedule: - cycles_total += t[0] - # add extra cycle if schedule ends on READ - if schedule[-1][1] == "r": - cycles_total += 1 - code_gen_dict["$CYCLES_TOTAL$"] = [str(cycles_total)] - - code_gen_dict["$START_COUNTER$"] = [str(start_sequence[0])] - code_gen_dict["$LOOP_MAIN_COUNTER$"] = [str(loop_sequence_1_counter)] - code_gen_dict["$LOOP_INTER_COUNTER$"] = [str(loop_counter)] - - code_gen_dict["$LOOP_MAIN_1_COUNTER$"] = [str(loop_sequence_1[0])] - code_gen_dict["$LOOP_MAIN_2_COUNTER$"] = [str(loop_sequence_1[2])] - - code_gen_dict["$LOOP_INTER_1_COUNTER$"] = [str(loop_sequence_2[0])] - code_gen_dict["$LOOP_INTER_2_COUNTER$"] = [str(loop_sequence_2[2])] - - code_gen_dict["$LOOP_END_1_COUNTER$"] = [str(end_sequence[0])] - code_gen_dict["$LOOP_END_2_COUNTER$"] = [str(end_sequence[2])] - - code_gen_dict["$READ_CMD_MAP$"] = [ - "{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][0], - loop_sequence_1[1][0], - loop_sequence_1[3][0], - loop_sequence_2[1][0], - loop_sequence_2[3][0], - end_sequence[1][0], - end_sequence[3][0], - ) - ] - code_gen_dict["$WRITE_CMD_MAP$"] = [ - "{{ {}, {}, {}, {}, {}, {}, {} }}".format( - start_sequence[1][1], - loop_sequence_1[1][1], - loop_sequence_1[3][1], - loop_sequence_2[1][1], - loop_sequence_2[3][1], - end_sequence[1][1], - end_sequence[3][1], - ) - ] + kernel_height <= ifm_dim_h + and kernel_width <= ifm_dim_w + and stride_h <= ifm_dim_h + and stride_w <= ifm_dim_w + ), "Illegal conv configuration: kernel or stride > FM dimension" - code_gen_dict["$SIMD$"] = [str(simd)] - code_gen_dict["$MMV_IN$"] = [str(mmv_in)] - code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] + if k_h == 1 and k_w == 1: + assert simd == ifm_ch, "1x1 Kernel only supported in parallel mode (SIMD=C)" - return template_path, code_gen_dict - - def select_impl_style(self): - ifm_ch = self.get_nodeattr("IFMChannels") - k = self.get_nodeattr("ConvKernelDim") - simd = self.get_nodeattr("SIMD") - M = self.get_nodeattr("M") - - k_h, k_w = k # init folding config if self.get_nodeattr("parallel_window"): - mmv_in = M * 1 + # mmv_in = M * 1 mmv_out = M * k_h * k_w assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" else: - mmv_in = 1 + # mmv_in = 1 mmv_out = 1 assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide C" - # TODO: check allowed hyperparams - # for 1D case: it does not matter if dummy dim is x or y - # TODO: move/duplicate these checks in corresponding convert_to_hls transformation (?) - # choose implementation style if mmv_out > 1 or (k_h == 1 and k_w == 1): impl_style = "parallel" @@ -1106,6 +663,9 @@ def select_impl_style(self): else: impl_style = "default" + assert ( + impl_style == "default" + ), "ERROR: Parallel window mode not yet implemented" return impl_style def generate_hdl(self): @@ -1114,12 +674,12 @@ def generate_hdl(self): # prepare code generation by filling out dictionaries if impl_style == "default": template_path, code_gen_dict = self.prepare_codegen_default() - elif impl_style == "parallel": - template_path, code_gen_dict = self.prepare_codegen_parallel() + else: + raise Exception("Requested impl. style not implemented") # add general parameters to dictionary code_gen_dict["$TOP_MODULE_NAME$"] = [self.get_verilog_top_module_name()] - # save top module name so we can refer to it even after this node has been renamed + # save top module name so we can refer to it after this node has been renamed # (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] @@ -1157,7 +717,8 @@ def generate_hdl(self): ) as f: f.write(template_wrapper) - # set ipgen_path and ip_path so that HLS-Synth transformation and stich_ip transformation do not complain + # set ipgen_path and ip_path so that HLS-Synth transformation + # and stich_ip transformation do not complain self.set_nodeattr("ipgen_path", code_gen_dir) self.set_nodeattr("ip_path", code_gen_dir) @@ -1191,7 +752,6 @@ def prepare_rtlsim(self): def code_generation_ipi(self): """Constructs and returns the TCL for node instantiation in Vivado IPI.""" - vlnv = self.get_nodeattr("ip_vlnv") code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") cmd = [ diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index 23943084ab..5c94272bad 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -109,6 +109,7 @@ def apply(self, model): "FMPadding_Batch", "ConvolutionInputGenerator", "ConvolutionInputGenerator1D", + "ConvolutionInputGenerator_rtl", ] # these ops are preceded by depthwise SWG and have special behavior, # as explained in the SetFolding docstring @@ -174,6 +175,7 @@ def apply(self, model): if op_type in [ "ConvolutionInputGenerator", "ConvolutionInputGenerator1D", + "ConvolutionInputGenerator_rtl", ]: depthwise = node_inst.get_nodeattr("depthwise") if depthwise == 0: diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 7dcae82afe..56438ac6b6 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -73,6 +73,9 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod if use_rtl_swg and exec_mode == "cppsim": pytest.skip("cppsim not supported for RTL SWG") + if use_rtl_swg and kernel_size == 1: + pytest.skip("1x1 kernel not supported by current RTL SWG") + if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, kernel_size, kernel_size] diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index d3ea9d117c..eeeb093294 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -28,17 +28,14 @@ import pytest -import numpy as np from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.im2col import compute_conv_output_dim -from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.util.basic import gen_finn_dt_tensor import finn.core.onnx_exec as oxe -from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode @@ -133,11 +130,6 @@ def make_single_slidingwindow_modelwrapper( model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", odt) - # DEBUG - # swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] - # swg_inst = getCustomOp(swg_node) - # swg_inst.set_nodeattr("rtlsim_trace", "/home/felixj/WD/finn/finn-rtllib/swg/swg_test_trace.vcd") - return model @@ -147,38 +139,24 @@ def prepare_inputs(input_tensor): # input datatype @pytest.mark.parametrize("idt", [DataType["UINT4"]]) - -# @pytest.mark.parametrize( -# "conv_config", -# [ -# [[12,12], [3, 3], [1, 1], [1, 1]], -# [[13,13], [3, 3], [1, 1], [1, 1]], -# [[12,12], [3, 3], [2, 2], [1, 1]], -# [[13,13], [3, 3], [2, 2], [1, 1]], -# ], -# ) # kernel size -@pytest.mark.parametrize("k", [[1, 1], [2, 2], [3, 3], [1, 2], [1, 3]]) +@pytest.mark.parametrize("k", [[2, 2], [3, 3], [1, 3]]) # input dimension -@pytest.mark.parametrize( - "ifm_dim", [[8, 8], [13, 13], [1, 11], [1, 12], [1, 13], [1, 14]] -) +@pytest.mark.parametrize("ifm_dim", [[24, 24], [13, 13], [1, 14]]) # input channels @pytest.mark.parametrize("ifm_ch", [6]) # Stride -@pytest.mark.parametrize("stride", [[1, 1], [2, 2], [1, 2]]) +@pytest.mark.parametrize("stride", [[1, 1], [2, 2]]) # Dilation -@pytest.mark.parametrize("dilation", [[1, 1], [2, 2], [1, 3]]) +@pytest.mark.parametrize("dilation", [[1, 1], [2, 2]]) # depthwise @pytest.mark.parametrize("dw", [0, 1]) - # input channel parallelism ("SIMD") @pytest.mark.parametrize("simd", [1, 2, 3, 6]) # parallel_window enable (MMV_out = M*K) -@pytest.mark.parametrize("parallel_window", [0, 1]) +@pytest.mark.parametrize("parallel_window", [0]) # in/out MMV ("M") @pytest.mark.parametrize("m", [1]) - # Flip dimensions @pytest.mark.parametrize("flip", [False]) @pytest.mark.slow @@ -186,11 +164,6 @@ def prepare_inputs(input_tensor): def test_fpgadataflow_slidingwindow_rtl( idt, k, ifm_dim, ifm_ch, stride, dilation, dw, simd, m, parallel_window, flip ): - # ifm_dim = conv_config[0] - # k = conv_config[1] - # stride = conv_config[2] - # dilation= conv_config[3] - if flip: if ( ifm_dim[0] == ifm_dim[1] @@ -228,7 +201,8 @@ def test_fpgadataflow_slidingwindow_rtl( k_w == 1 and (stride_w != 1 or dilation_w != 1) ): pytest.skip( - "Illegal convolution configuration: stride or dilation defined for unitary kernel dim" + """Illegal convolution configuration: + stride or dilation defined for unitary kernel dim""" ) if k_h == 1 and k_w == 1 and simd != ifm_ch: pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") @@ -274,17 +248,6 @@ def test_fpgadataflow_slidingwindow_rtl( ) y_expected = oxe.execute_onnx(golden, input_dict)["outp"] - # DEBUG - print("-------expected:") - print(y_expected) - print("--------produced:") - print(y_produced) - - node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] - inst = getCustomOp(node) - cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") - print("RTLSIM cycles: %d" % cycles_rtlsim) - if dw == 0: assert (y_produced == y_expected).all() else: @@ -294,9 +257,3 @@ def test_fpgadataflow_slidingwindow_rtl( y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) assert (y_produced == y_expected).all() - - -# exp_cycles_dict = model.analysis(exp_cycles_per_layer) -# exp_cycles = exp_cycles_dict[node.name] -# assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) -# assert exp_cycles != 0 From dfb428541d5d2832a4c3b4dd31a66b1c86ea45e1 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 9 Sep 2022 21:11:22 +0200 Subject: [PATCH 155/628] Lint fix --- .../fpgadataflow/convert_to_hls_layers.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 8306024eaa..850bcf6616 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -132,7 +132,7 @@ def apply(self, model): ) graph.node.insert(node_ind, padding_node) - if (self.use_rtl_variant): + if self.use_rtl_variant: ConvInpGen_node = helper.make_node( "ConvolutionInputGenerator_rtl", [ConvInpGen_input], @@ -166,12 +166,13 @@ def apply(self, model): if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: assert is_square_image, ( - "%s : DownSampler currently only supports square input images." + """%s : DownSampler currently only supports square + input images.""" % n.name ) assert is_equal_stride, ( - """%s : DownSampler currently only supports equal stride value - along different axes.""" + """%s : DownSampler currently only supports equal stride + value along different axes.""" % n.name ) ConvInpGen_idim = ConvInpGen_idim_h @@ -226,7 +227,8 @@ def apply(self, model): ) else: # 1D images and/or kernels assert is_1d_convolution, ( - "%s: ConvolutionInputGenerator1D works only for 1D convs" + """%s: ConvolutionInputGenerator1D works only + for 1D convs""" % n.name ) if dilation_h > 1 or dilation_w > 1: @@ -1697,4 +1699,4 @@ def apply(self, model): if graph_modified: model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) - return (model, graph_modified) \ No newline at end of file + return (model, graph_modified) From 38d6543502af2475e0b840fad0a8d5e87a6f550a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 13 Sep 2022 23:35:18 +0200 Subject: [PATCH 156/628] [Build] support batched verification i/o, one at a time --- src/finn/builder/build_dataflow_steps.py | 110 +++++++++++++++-------- 1 file changed, 73 insertions(+), 37 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 59f77650da..0aae7c9e6e 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -121,44 +121,80 @@ def verify_step( verify_out_dir = cfg.output_dir + "/verification_output" intermediate_models_dir = cfg.output_dir + "/intermediate_models" os.makedirs(verify_out_dir, exist_ok=True) - (in_npy, exp_out_npy) = cfg._resolve_verification_io_pair() - if need_parent: - assert ( - cfg.save_intermediate_models - ), "Enable save_intermediate_models for verification" - parent_model_fn = intermediate_models_dir + "/dataflow_parent.onnx" - child_model_fn = intermediate_models_dir + "/verify_%s.onnx" % step_name - model.save(child_model_fn) - out_tensor_name = ModelWrapper(parent_model_fn).graph.output[0].name - out_dict = execute_parent( - parent_model_fn, child_model_fn, in_npy, return_full_ctx=True - ) - out_npy = out_dict[out_tensor_name] - else: - inp_tensor_name = model.graph.input[0].name - out_tensor_name = model.graph.output[0].name - inp_dict = {inp_tensor_name: in_npy} - if rtlsim_pre_hook is not None: - out_dict = rtlsim_exec(model, inp_dict, pre_hook=rtlsim_pre_hook) + (in_npy_all, exp_out_npy_all) = cfg._resolve_verification_io_pair() + bsize_in = in_npy_all.shape[0] + bsize_out = exp_out_npy_all.shape[0] + assert bsize_in == bsize_out, "Batch sizes don't match for verification IO pair" + all_res = True + for b in range(bsize_in): + in_npy = np.expand_dims(in_npy_all[b], axis=0) + exp_out_npy = np.expand_dims(exp_out_npy_all[b], axis=0) + if need_parent: + assert ( + cfg.save_intermediate_models + ), "Enable save_intermediate_models for verification" + parent_model_fn = intermediate_models_dir + "/dataflow_parent.onnx" + child_model_fn = intermediate_models_dir + "/verify_%s.onnx" % step_name + model.save(child_model_fn) + parent_model = ModelWrapper(parent_model_fn) + out_tensor_name = parent_model.graph.output[0].name + exp_ishape = parent_model.get_tensor_shape(parent_model.graph.input[0].name) + if in_npy.shape != exp_ishape: + print( + "Verification input has shape %s while model expects %s" + % (str(in_npy.shape), str(exp_ishape)) + ) + print("Attempting to force model shape on verification input") + in_npy = in_npy.reshape(exp_ishape) + out_dict = execute_parent( + parent_model_fn, child_model_fn, in_npy, return_full_ctx=True + ) + out_npy = out_dict[out_tensor_name] else: - out_dict = execute_onnx(model, inp_dict, True) - out_npy = out_dict[out_tensor_name] - res = np.isclose(exp_out_npy, out_npy, atol=1e-3).all() - res_to_str = {True: "SUCCESS", False: "FAIL"} - res_str = res_to_str[res] - if cfg.verify_save_full_context: - verification_output_fn = verify_out_dir + "/verify_%s_%s.npz" % ( - step_name, - res_str, - ) - np.savez(verification_output_fn, **out_dict) - else: - verification_output_fn = verify_out_dir + "/verify_%s_%s.npy" % ( - step_name, - res_str, - ) - np.save(verification_output_fn, out_npy) - print("Verification for %s : %s" % (step_name, res_str)) + inp_tensor_name = model.graph.input[0].name + out_tensor_name = model.graph.output[0].name + exp_ishape = model.get_tensor_shape(inp_tensor_name) + if in_npy.shape != exp_ishape: + print( + "Verification input has shape %s while model expects %s" + % (str(in_npy.shape), str(exp_ishape)) + ) + print("Attempting to force model shape on verification input") + in_npy = in_npy.reshape(exp_ishape) + inp_dict = {inp_tensor_name: in_npy} + if rtlsim_pre_hook is not None: + out_dict = rtlsim_exec(model, inp_dict, pre_hook=rtlsim_pre_hook) + else: + out_dict = execute_onnx(model, inp_dict, True) + out_npy = out_dict[out_tensor_name] + exp_oshape = exp_out_npy.shape + if out_npy.shape != exp_oshape: + print( + "Verification output has shape %s while model produces %s" + % (str(exp_oshape), str(out_npy.shape)) + ) + print("Attempting to force model shape on verification output") + out_npy = out_npy.reshape(exp_oshape) + + res = np.isclose(exp_out_npy, out_npy, atol=1e-3).all() + all_res = all_res and res + res_to_str = {True: "SUCCESS", False: "FAIL"} + res_str = res_to_str[res] + if cfg.verify_save_full_context: + verification_output_fn = verify_out_dir + "/verify_%s_%d_%s.npz" % ( + step_name, + b, + res_str, + ) + np.savez(verification_output_fn, **out_dict) + else: + verification_output_fn = verify_out_dir + "/verify_%s_%d_%s.npy" % ( + step_name, + b, + res_str, + ) + np.save(verification_output_fn, out_npy) + print("Verification for %s : %s" % (step_name, res_to_str[all_res])) def prepare_for_stitched_ip_rtlsim(verify_model, cfg): From 25499411efb47adcf242329b81e4dd59a3916597 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 13 Sep 2022 23:35:47 +0200 Subject: [PATCH 157/628] [Util] set property on SDP nodes to get full context --- src/finn/util/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/util/test.py b/src/finn/util/test.py index f5d3b1c30b..bfe4aa0bb8 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -180,6 +180,7 @@ def execute_parent(parent_path, child_path, input_tensor_npy, return_full_ctx=Fa sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) sdp_node.set_nodeattr("model", child_path) + sdp_node.set_nodeattr("return_full_exec_context", 1 if return_full_ctx else 0) ret = execute_onnx(parent_model, {iname: input_tensor_npy}, True) if return_full_ctx: return ret From 0813455822eb317cc5dca4ba9f41cd7b74f81479 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 14 Sep 2022 13:17:45 +0200 Subject: [PATCH 158/628] [Build] keep different vcd's from batch inputs --- src/finn/builder/build_dataflow_steps.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 0aae7c9e6e..6e1fb16a53 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -29,6 +29,7 @@ import json import numpy as np import os +import shutil from copy import deepcopy from distutils.dir_util import copy_tree from qonnx.core.modelwrapper import ModelWrapper @@ -194,6 +195,11 @@ def verify_step( res_str, ) np.save(verification_output_fn, out_npy) + if cfg.verify_save_rtlsim_waveforms: + vcd_path = model.get_metadata_prop("rtlsim_trace") + if vcd_path is not None and os.path.isfile(vcd_path): + new_vcd_path = vcd_path.replace(".vcd", "_%d.vcd" % b) + shutil.move(vcd_path, new_vcd_path) print("Verification for %s : %s" % (step_name, res_to_str[all_res])) From 10329d791c271a0aaf5dd787c27284fb8ab6cf7d Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 14 Sep 2022 13:18:18 +0200 Subject: [PATCH 159/628] [Test] batch-4 inputs and result checking for test_build_dataflow --- .../build_dataflow/expected_output.npy | Bin 136 -> 160 bytes src/finn/qnn-data/build_dataflow/input.npy | Bin 3264 -> 12672 bytes tests/util/test_build_dataflow.py | 23 +++++++++++++----- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/finn/qnn-data/build_dataflow/expected_output.npy b/src/finn/qnn-data/build_dataflow/expected_output.npy index a8d09384633791b7e3760dc8a2d1ba88a05d526d..98037351bb4ee49985a98631750f18e9b86965b1 100644 GIT binary patch delta 44 dcmeBRT);TNmeFLQos$AP0|YQZX+|gwqX2|=184vM delta 20 XcmZ3$*ugl#meFvcof8KW0|Wp7F~|c& diff --git a/src/finn/qnn-data/build_dataflow/input.npy b/src/finn/qnn-data/build_dataflow/input.npy index edd24de05a33a15ebc330cdab31f3d77d2c47196..8bece67b7daf5b7668ff5e7515f15a891146b00b 100644 GIT binary patch literal 12672 zcmeHNNoW*76zv>T6qkTv1P?YC9hXs}1aV2URTS5_fVf68Bom{=7$+e{lc-1#PZAVF z#KRyWIf-Wv;?c8+2k|5#A|6CUL_GMO*2l7$I5R!{OAr0wQ_FPMtN-fPuchZk)4rX% z4_0}7-ax7?+tZp$t@cw5?e(d&pK3py>&<1lj-Af6Wm&&D)7g_neNRWGJB#vy`n10+ z?Jr+RU6}TV{IX?|mq@}u!axZOjBfC}RU18T4zvc^jdC?qOO68LLP*b`yt#-j!tc8y z^y>=Vc>^6W>0Gqa3S}QMCYAo2M>%WNO-4Qrxw_u-hJoLLl>HIyM1B-?od)wc%Gz=t z7(>9@104ajraVOZCxCMiN?dKJ0uINr3tFd)$FVzS@cNap$lExBwFYTE&}Fh32vwtWW%$87~vTW$ht065*yHf7sxfyZXx(6;$dO$h?e+itK9Bb}^l+bpy%ar3q^ z)>rV@Vr+X0tV`=OnfyK1XkU~yr<~sP!k(P5%>~{kYuxB3zVg@!-Y#HiuQ%oJo;>@1 zGVl)m_zk)k{=$6&btd$Yf}bu#yW9&E(nF~Kh`1A%gt7La{i)D83;%OWHZo>$FH5`j zLOqlNW6P7k^P0?WaqVvz|GB0T>uHEH+QfYX`*IoixP)s$p?Jc5t&C@!c!m1?9`>9y zcs#%OA=?YR6SeM^g>@WQgE4Yhuum3`)yQiK=Rlvq<2h7(RRfd$dD39Y$IyoI$y**1=m@W`7Vqs{NIh)o1s2yi z7s78Myo*uaV;f_(0qYpFuEe#wA`Xmg2OM$v*x+q+gvT>2$Bt)GZQ)+z6tH+#?l~B{ zufXGd$UNdX5Z=L%%exM^d;gKa-sLw9wk6z=w~0`D4dk3? zH_!DTeU10>LHU-3eZK*1<;R$B^clNfQN9=0cw2$>x9^IyQSQ}=8x>FdqbNJDkMTzQ zsuuZjg=a4DgJSQ-!RNse@ECLG!?s`1cmBrxSbZ-AkKf3I1E!oi+!sfYVt9-LY~RMK zM>(qSTetYjb@Z9a^MllhZ%CPGCBk_teP>&emo!|*m{W<^aU3`j@BM2AegK}tl;HpX literal 3264 zcmeH|v1?R85XPUKh)60+c}3(@Qk)@b6g8U|gNl(u5->Ji%w157$$1xJG)5m*mH|P; z>PoTl53smOti;MfEDlQxYb){ly|G^95)lwPz*z{bjEPL(7bfv8Mfx~^V{`v>O!M=Dk z-m>h9HYLeW6gk&&P0;;3KiskxrQ?gd2<(|<^S>^LZMrOuys_D_35lOO>0mpO=C`~y zk|@x_TG+=czm~Y|FlI53fy)k?=rR5o*FikH;DZ#MkhAbwBoKPg|9yV%L;wTsg@*FTjNkKitd0d0|W_ z$rlOFG_!2}_p0)c|BfH;YqUBUSp!v}D2#eew?TdB9dQ1PHAaHnizkXN`JB|WD14WV zbLyA7ZKXn_Q`KAF~evHE@5B?TYZ0pVXy_1g#^%~czcw@zYZW3}uuI1&>{k))f ztIa$bTBC38MlwFmmou!$hP{)(WoTb-x7S_P!;qJ^KPZlS0yXkp*jp!YijP3MxZitf z`7f%Dx58_oE+6n-xtGKN;4277PY2G#WwoUa`k|htY!&(PX6Y06@~Cp05^f8e58sv{ z;j}O#*!SkL^fkqiqnjk*nsR>@jw{ak!#if*;r+wLw*`B}kLY|Gcz>O0OZh`C8Gg!_ zxLZLEc+0|s0FS+G{J0y8)XaAWomvkG-R?`?_nAErM+~?rTo5=bWJ2Sp=6pk0r;q4+ z1bQ;85xdnpAiZ50F0;H57iV`@KJ1TsP3)9Ft=EM~fu33)7o_t)$)v6voHPAkjXWDk z;-YB3d@LW_aMDBSwmzPf{z*J@o@g*R-% aJu)#p?`>93y6((t Date: Wed, 14 Sep 2022 17:12:09 +0200 Subject: [PATCH 160/628] Changes to make --- fetch-repos.sh | 3 +++ src/finn/transformation/fpgadataflow/templates.py | 3 +++ src/finn/util/basic.py | 4 ++-- src/finn/util/platforms.py | 2 ++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 2dd5e51934..9130c183aa 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -37,6 +37,7 @@ OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" +# TODO: KV260 ADD commit version QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" @@ -47,6 +48,8 @@ HLSLIB_URL="https://github.com/Xilinx/finn-hlslib.git" OMX_URL="https://github.com/maltanar/oh-my-xilinx.git" AVNET_BDF_URL="https://github.com/Avnet/bdf.git" XIL_BDF_URL="https://github.com/Xilinx/XilinxBoardStore.git" +# TODO: KV260 ADD KV260 board downloads + QONNX_DIR="qonnx" FINN_EXP_DIR="finn-experimental" diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index 78bcdea0d7..0554a9cc8e 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -126,6 +126,9 @@ } elseif {$BOARD == "Pynq-Z1"} { set ZYNQ_TYPE "zynq_7000" set_property board_part www.digilentinc.com:pynq-z1:part0:1.0 [current_project] +} elseif {$BOARD == "kv260_som"} { + set ZYNQ_TYPE "zynq_us+" + set_property board_part xilinx.com:kv260_som:part0:1.3 [current_project] } else { puts "Unrecognized board" } diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 960b7f7c82..3bc5b803db 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -40,7 +40,7 @@ pynq_part_map["ZCU104"] = "xczu7ev-ffvc1156-2-e" pynq_part_map["ZCU111"] = "xczu28dr-ffvg1517-2-e" pynq_part_map["RFSoC2x2"] = "xczu28dr-ffvg1517-2-e" -pynq_part_map["kv260_som"] = "SK-KV260-G" +pynq_part_map["KV260_SOM"] = "xck26-sfvc784-2LV-c" # native AXI HP port width (in bits) for PYNQ boards @@ -52,7 +52,7 @@ pynq_native_port_width["ZCU104"] = 128 pynq_native_port_width["ZCU111"] = 128 pynq_native_port_width["RFSoC2x2"] = 128 -pynq_native_port_width["kv260_som"] = 128 +pynq_native_port_width["KV260_SOM"] = 128 # Alveo device and platform mappings alveo_part_map = dict() diff --git a/src/finn/util/platforms.py b/src/finn/util/platforms.py index 8212cb5712..ad8604f46e 100644 --- a/src/finn/util/platforms.py +++ b/src/finn/util/platforms.py @@ -467,6 +467,7 @@ def compute_resources(self): ] +# TODO: ADD KV260 to platform list platforms = dict() platforms["U50"] = Alveo_NxU50_Platform platforms["U200"] = Alveo_NxU200_Platform @@ -478,3 +479,4 @@ def compute_resources(self): platforms["ZCU104"] = ZU7EV_Platform platforms["ZCU102"] = ZU9EG_Platform platforms["ZCU111"] = ZU28DR_Platform +# platforms["kv260_som"] = # TODO kv260 platform... xck26_ \ No newline at end of file From 4f69f4fcaa854567f0f3d0e067da0423417227c6 Mon Sep 17 00:00:00 2001 From: patrickg <44997541+patrickgeel@users.noreply.github.com> Date: Thu, 15 Sep 2022 10:38:31 +0200 Subject: [PATCH 161/628] update fetch-repos.sh --- fetch-repos.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 9130c183aa..c29805c01e 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -36,8 +36,8 @@ HLSLIB_COMMIT="e9946e5e56acd85837e8e79224d2bb60764bed69" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" +KV260_BDF_COMMIT="98e0d3efc901f0b974006bc4370c2a7ad8856c79" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" -# TODO: KV260 ADD commit version QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" @@ -48,8 +48,7 @@ HLSLIB_URL="https://github.com/Xilinx/finn-hlslib.git" OMX_URL="https://github.com/maltanar/oh-my-xilinx.git" AVNET_BDF_URL="https://github.com/Avnet/bdf.git" XIL_BDF_URL="https://github.com/Xilinx/XilinxBoardStore.git" -# TODO: KV260 ADD KV260 board downloads - +KV260_BDF_URL="https://github.com/Xilinx/XilinxBoardStore.git" QONNX_DIR="qonnx" FINN_EXP_DIR="finn-experimental" @@ -60,6 +59,7 @@ HLSLIB_DIR="finn-hlslib" OMX_DIR="oh-my-xilinx" AVNET_BDF_DIR="avnet-bdf" XIL_BDF_DIR="xil-bdf" +KV260_SOM_BDF_DIR="KV260-SOM-bdf" # absolute path to this script, e.g. /home/user/bin/foo.sh SCRIPT=$(readlink -f "$0") @@ -107,6 +107,7 @@ fetch_board_files() { unzip -q pynq-z2.zip cp -r $SCRIPTPATH/deps/$AVNET_BDF_DIR/* $SCRIPTPATH/deps/board_files/ cp -r $SCRIPTPATH/deps/$XIL_BDF_DIR/boards/Xilinx/rfsoc2x2 $SCRIPTPATH/deps/board_files/; + cp -r $SCRIPTPATH/deps/$KV260_SOM_BDF_DIR/boards/Xilinx/kv260_som $SCRIPTPATH/deps/board_files/; cd $OLD_PWD } @@ -119,6 +120,7 @@ fetch_repo $HLSLIB_URL $HLSLIB_COMMIT $HLSLIB_DIR fetch_repo $OMX_URL $OMX_COMMIT $OMX_DIR fetch_repo $AVNET_BDF_URL $AVNET_BDF_COMMIT $AVNET_BDF_DIR fetch_repo $XIL_BDF_URL $XIL_BDF_COMMIT $XIL_BDF_DIR +fetch_repo $KV260_BDF_URL $KV260_BDF_COMMIT $KV260_SOM_BDF_DIR # download extra Pynq board files and extract if needed if [ ! -d "$SCRIPTPATH/deps/board_files" ]; then From 5904e5cbcb08af5a0aeedf66cfbba162f7fec888 Mon Sep 17 00:00:00 2001 From: patrickg <44997541+patrickgeel@users.noreply.github.com> Date: Thu, 15 Sep 2022 10:43:51 +0200 Subject: [PATCH 162/628] fix name in fetch repos and capitalize kv260 in templates --- fetch-repos.sh | 2 +- src/finn/transformation/fpgadataflow/templates.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index c29805c01e..5288186241 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -59,7 +59,7 @@ HLSLIB_DIR="finn-hlslib" OMX_DIR="oh-my-xilinx" AVNET_BDF_DIR="avnet-bdf" XIL_BDF_DIR="xil-bdf" -KV260_SOM_BDF_DIR="KV260-SOM-bdf" +KV260_SOM_BDF_DIR="kv260-som-bdf" # absolute path to this script, e.g. /home/user/bin/foo.sh SCRIPT=$(readlink -f "$0") diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index 0554a9cc8e..0870fa40c8 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -126,7 +126,7 @@ } elseif {$BOARD == "Pynq-Z1"} { set ZYNQ_TYPE "zynq_7000" set_property board_part www.digilentinc.com:pynq-z1:part0:1.0 [current_project] -} elseif {$BOARD == "kv260_som"} { +} elseif {$BOARD == "KV260_SOM"} { set ZYNQ_TYPE "zynq_us+" set_property board_part xilinx.com:kv260_som:part0:1.3 [current_project] } else { From 5e0e89895a1a0c8ce246f7d17c975ff834cbc6e7 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 16 Sep 2022 14:56:52 +0200 Subject: [PATCH 163/628] Address reviewer comments --- finn-rtllib/swg/swg_template_default.sv | 38 +++----- finn-rtllib/swg/swg_template_wrapper.v | 29 ++---- .../convolutioninputgenerator_rtl.py | 92 ++++++++++--------- .../fpgadataflow/convert_to_hls_layers.py | 31 +++++-- .../fpgadataflow/set_folding.py | 6 +- .../test_convert_to_hls_conv_layer.py | 8 +- ...est_fpgadataflow_convinputgenerator_rtl.py | 2 +- 7 files changed, 96 insertions(+), 110 deletions(-) diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 2d255a35ed..0aa309f890 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -36,28 +36,19 @@ module $TOP_MODULE_NAME$_controller #( logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS-1; logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS-1; - logic [INCR_BITWIDTH-1:0] tail_incr_reg = 'x; assign addr_incr = ADDR_INCREMENT_MAP[State]; - assign tail_incr = tail_incr_reg; // combinational logic for tail_incr generation - uwire tail_incr_inner_condition; - generate - if (IS_DEPTHWISE) - assign tail_incr_inner_condition = (Counter_loop_kh >= 0); - else - assign tail_incr_inner_condition = 0; - endgenerate - - always @ (tail_incr_inner_condition, Counter_loop_w, Counter_loop_h) begin + uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; + always_comb begin : blkTail if (tail_incr_inner_condition) - tail_incr_reg = 1; + tail_incr = 1; else if (Counter_loop_w >= 0) - tail_incr_reg = $TAIL_INCR_W$; + tail_incr = $TAIL_INCR_W$; else if (Counter_loop_h >= 0) - tail_incr_reg = $TAIL_INCR_H$; + tail_incr = $TAIL_INCR_H$; else - tail_incr_reg = $TAIL_INCR_LAST$; + tail_incr = $TAIL_INCR_LAST$; end // combinational next state logic @@ -132,13 +123,8 @@ module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; logic [WIDTH-1:0] Out = 'x; always_ff @(posedge clk) begin - if (!rst_n) begin - Out <= 'x; - end - else begin - if (read_enable) Out <= Ram[read_addr]; - if (write_enable) Ram[write_addr] <= data_in; - end + if (read_enable) Out <= Ram[read_addr]; + if (write_enable) Ram[write_addr] <= data_in; end assign data_out = Out; @@ -213,7 +199,7 @@ module $TOP_MODULE_NAME$_impl #( logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] Newest_buffered_elem = -1; logic [$clog2(LAST_READ_ELEM+1)+1-1:0] Current_elem = 0; logic [$clog2(LAST_READ_ELEM+1)+1-1:0] First_elem_next_window = 0; - logic [$clog2(ELEM_PER_WINDOW) -1:0] K = 0; + logic [$clog2(ELEM_PER_WINDOW) -1:0] Position_in_window = 0; logic [$clog2(BUF_ELEM_TOTAL)+1 -1:0] Window_buffer_read_addr_reg = 0; logic [$clog2(BUF_ELEM_TOTAL)-1:0] Window_buffer_write_addr_reg = 0; @@ -255,7 +241,7 @@ module $TOP_MODULE_NAME$_impl #( Newest_buffered_elem <= -1; Current_elem <= 0; First_elem_next_window <= 0; - K <= 0; + Position_in_window <= 0; Window_buffer_read_addr_reg <= 0; Window_buffer_write_addr_reg <= 0; Fetching_done <= 0; @@ -295,10 +281,10 @@ module $TOP_MODULE_NAME$_impl #( Window_buffer_read_addr_reg <= ra + ra_correct; //keep track where we are within a window - K <= (K != ELEM_PER_WINDOW - 1)? K+1 : 0; + Position_in_window <= (Position_in_window != ELEM_PER_WINDOW - 1)? Position_in_window+1 : 0; //update first element of next window to allow buffer overwrite up until that point - if (K == 0) + if (Position_in_window == 0) First_elem_next_window <= First_elem_next_window + tail_incr; //check if this is the last write cycle (Writing_done will be true afterwards) diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index 1b470817d6..4411348beb 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -1,14 +1,16 @@ `timescale 1 ns / 1 ps module $TOP_MODULE_NAME$ ( - ap_clk, - ap_rst_n, - in0_V_TDATA, - in0_V_TVALID, - in0_V_TREADY, - out_V_TDATA, - out_V_TVALID, - out_V_TREADY +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) +input ap_clk, +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) +input ap_rst_n, +input [BUF_IN_WIDTH-1:0] in0_V_TDATA, +input in0_V_TVALID, +output in0_V_TREADY, +output [BUF_OUT_WIDTH-1:0] out_V_TDATA, +output out_V_TVALID, +input out_V_TREADY ); // top-level parameters (set via code-generation) @@ -21,17 +23,6 @@ parameter MMV_OUT = $MMV_OUT$; parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; -(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) -input ap_clk; -(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) -input ap_rst_n; -input [BUF_IN_WIDTH-1:0] in0_V_TDATA; -input in0_V_TVALID; -output in0_V_TREADY; -output [BUF_OUT_WIDTH-1:0] out_V_TDATA; -output out_V_TVALID; -input out_V_TREADY; - $TOP_MODULE_NAME$_impl #( .BIT_WIDTH(BIT_WIDTH), diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 98351942b9..366dd396d1 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020, Xilinx +# Copyright (c) 2022, Xilinx # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -72,7 +72,7 @@ def get_nodeattr_types(self): "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), "M": ("i", False, 1), - "parallel_window": ("i", False, 0, {0, 1}), + "parallel_window": ("i", False, 0, {0}), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs @@ -212,6 +212,49 @@ def get_1d_conv_attrs_normalized(self): return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) + def get_buffer_depth(self): + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + simd = self.get_nodeattr("SIMD") + + k_h, k_w = k + h, w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + mmv_in = 1 + mmv_out = 1 + channel_factor = int(ifm_ch / simd) + + impl_style = self.select_impl_style() + if impl_style == "default": + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_min_size = ( + (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + + # add additional buffer space in case of stride > 1 + # this minimizes cycle count as it allows an earlier pre-load of inputs + buffer_depth = ( + buffer_min_size + + max( + 0, + ((stride_w - 1) - (int(mmv_out * k_h * k_w / mmv_in))) + * channel_factor, + ) + + max( + 0, + ((stride_h - 1) * w - (int(mmv_out * k_h * k_w / mmv_in))) + * channel_factor, + ) + ) + else: + buffer_depth = 0 + raise Exception("Requested impl. style not implemented") + return buffer_depth + def get_exp_cycles(self): simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") @@ -268,17 +311,11 @@ def get_exp_cycles(self): def bram_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - impl_style = self.select_impl_style() - # call codegen preparation to populate self.buffer_depth - if impl_style == "default": - self.prepare_codegen_default() - else: - raise Exception("Requested impl. style not implemented") # NOTE: Actual BRAM usage might be lower in some cases. # This does not account for the exact Vivado behavior yet. buffer_width = simd * self.get_input_datatype().bitwidth() - buffer_depth = self.buffer_depth + buffer_depth = self.get_buffer_depth() if ram_style == "block" or ram_style == "auto": if buffer_depth <= 512: ram_width = 36 @@ -321,15 +358,8 @@ def bram_estimation(self): def lut_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - impl_style = self.select_impl_style() - # call codegen preparation to populate self.buffer_depth - if impl_style == "default": - self.prepare_codegen_default() - else: - raise Exception("Requested impl. style not implemented") - buffer_width = simd * self.get_input_datatype().bitwidth() - buffer_depth = self.buffer_depth + buffer_depth = self.get_buffer_depth() if ram_style == "distributed": ram_luts = int(buffer_width * math.ceil(buffer_depth / 38)) else: @@ -339,15 +369,8 @@ def lut_estimation(self): def uram_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - impl_style = self.select_impl_style() - # call codegen preparation to populate self.buffer_depth - if impl_style == "default": - self.prepare_codegen_default() - else: - raise Exception("Requested impl. style not implemented") - buffer_width = simd * self.get_input_datatype().bitwidth() - buffer_depth = self.buffer_depth + buffer_depth = self.get_buffer_depth() if ram_style == "ultra": ram_depth = 4096 @@ -460,21 +483,7 @@ def prepare_codegen_default(self): (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 ) * channel_factor - # add additional buffer space in case of stride > 1 - # this minimizes cycle count as it allows an earlier pre-load of input elements - buffer_actual_size = ( - buffer_min_size - + max( - 0, - ((stride_w - 1) - (int(mmv_out * k_h * k_w / mmv_in))) * channel_factor, - ) - + max( - 0, - ((stride_h - 1) * w - (int(mmv_out * k_h * k_w / mmv_in))) - * channel_factor, - ) - ) - self.buffer_depth = buffer_actual_size # for resource estimation + buffer_actual_size = self.get_buffer_depth() code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] # compute some intermediate values, e.g., kernel "width" = k_w incl. dilation @@ -643,9 +652,6 @@ def select_impl_style(self): and stride_w <= ifm_dim_w ), "Illegal conv configuration: kernel or stride > FM dimension" - if k_h == 1 and k_w == 1: - assert simd == ifm_ch, "1x1 Kernel only supported in parallel mode (SIMD=C)" - # init folding config if self.get_nodeattr("parallel_window"): # mmv_in = M * 1 diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 850bcf6616..540c217cbc 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -132,7 +132,26 @@ def apply(self, model): ) graph.node.insert(node_ind, padding_node) - if self.use_rtl_variant: + is_kernel_pointwise = k_h == 1 and k_w == 1 + is_square_image = ConvInpGen_idim_h == ConvInpGen_idim_w + is_square_kernel = k_h == k_w + is_equal_stride = stride_h == stride_w + is_1d_convolution = (k_h == 1 and k_w > 1 and ifm_dim_h == 1) or ( + k_h > 1 and k_w == 1 and ifm_dim_w == 1 + ) + + # Ensure that RTL variant is not inserted for unsupported configuration + is_rtl_variant_compatible = True + if is_kernel_pointwise: + is_rtl_variant_compatible = False + if self.use_rtl_variant: + warnings.warn( + """%s : RTL ConvInpGen requested for unsupported + configuration. Falling back to HLS implementation.""" + % n.name + ) + + if self.use_rtl_variant and is_rtl_variant_compatible: ConvInpGen_node = helper.make_node( "ConvolutionInputGenerator_rtl", [ConvInpGen_input], @@ -151,19 +170,11 @@ def apply(self, model): inputDataType=dt.name, outputDataType=dt.name, depthwise=depthwise, - name="ConvolutionInputGenerator_rtl" + n.name, + name="ConvolutionInputGenerator_rtl_" + n.name, ) graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) else: # Ensure that only supported HLS nodes are inserted - is_square_image = ConvInpGen_idim_h == ConvInpGen_idim_w - is_square_kernel = k_h == k_w - is_kernel_pointwise = k_h == 1 and k_w == 1 - is_equal_stride = stride_h == stride_w - is_1d_convolution = (k_h == 1 and k_w > 1 and ifm_dim_h == 1) or ( - k_h > 1 and k_w == 1 and ifm_dim_w == 1 - ) - if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: assert is_square_image, ( """%s : DownSampler currently only supports square diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index 5c94272bad..e24e24f1f8 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -172,11 +172,7 @@ def apply(self, model): "Expected SWU on DW op input, found " + swu_node.op_type ) elif op_type in simd_ops: - if op_type in [ - "ConvolutionInputGenerator", - "ConvolutionInputGenerator1D", - "ConvolutionInputGenerator_rtl", - ]: + if op_type.startswith("ConvolutionInputGenerator"): depthwise = node_inst.get_nodeattr("depthwise") if depthwise == 0: max_simd = node_inst.get_nodeattr("IFMChannels") diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 56438ac6b6..8c9f110c31 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -164,14 +164,10 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod inp_dict = {model.graph.input[0].name: x} assert oxe.compare_execution(model, new_model, inp_dict) - if use_rtl_swg: - downsampler_op_type = "ConvolutionInputGenerator_rtl" - else: - downsampler_op_type = "DownSampler" if kernel_size == 1 and stride > 1 and pad == 0: - assert new_model.graph.node[1].op_type == downsampler_op_type + assert new_model.graph.node[1].op_type == "DownSampler" if exec_mode == "rtlsim": - node = new_model.get_nodes_by_op_type(downsampler_op_type)[0] + node = new_model.get_nodes_by_op_type("DownSampler")[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = new_model.analysis(exp_cycles_per_layer) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index eeeb093294..5da1fa6eb1 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -142,7 +142,7 @@ def prepare_inputs(input_tensor): # kernel size @pytest.mark.parametrize("k", [[2, 2], [3, 3], [1, 3]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[24, 24], [13, 13], [1, 14]]) +@pytest.mark.parametrize("ifm_dim", [[24, 24], [15, 6], [13, 13], [1, 14]]) # input channels @pytest.mark.parametrize("ifm_ch", [6]) # Stride From dd95873c76beb3db8244afb7f8a56635cd404b18 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 19 Sep 2022 09:50:49 +0200 Subject: [PATCH 164/628] [Deps] update QONNX --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index e105f4cbfb..0026e750b5 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="92184fea2dd417bc7a53c82811fef271e4833c4c" +QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From e8f7c756929a07b123257f97a95ab05a151cfb2e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 20 Sep 2022 00:38:20 +0200 Subject: [PATCH 165/628] [FIFO] add hw maxcount tracking to rtl FIFOs with opt attribute --- finn-rtllib/memstream/hdl/Q_srl.v | 7 ++++++- src/finn/custom_op/fpgadataflow/streamingfifo.py | 10 ++++++++++ src/finn/custom_op/fpgadataflow/templates.py | 3 +++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v index b4e89628a4..3c884770e0 100644 --- a/finn-rtllib/memstream/hdl/Q_srl.v +++ b/finn-rtllib/memstream/hdl/Q_srl.v @@ -69,7 +69,7 @@ `define Q_srl -module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); +module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); parameter depth = 16; // - greatest #items in queue (2 <= depth <= 256) parameter width = 16; // - width of data (i_d, o_d) @@ -90,7 +90,9 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); wire o_b; // - output stream back-pressure output [addrwidth:0] count; // - output number of elems in queue + output [addrwidth:0] maxcount; // - maximum observed count since reset + reg [addrwidth:0] maxcount_reg; // - maximum count seen until now reg [addrwidth-1:0] addr, addr_, a_; // - SRL16 address // for data output reg shift_en_; // - SRL16 shift enable @@ -124,6 +126,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); assign o_d = srlo; // - output data from queue assign o_v = o_v_reg; // - output valid if non-empty assign i_b = i_b_reg; // - input bp if full + assign maxcount = maxcount_reg; assign i_r = !i_b; assign o_b = !o_r; @@ -140,6 +143,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); addr_full <= 0; o_v_reg <= 0; i_b_reg <= 1; + maxcount_reg <= '0; end else begin state <= state_; @@ -147,6 +151,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); addr_full <= addr_full_; o_v_reg <= o_v_reg_; i_b_reg <= i_b_reg_; + maxcount_reg <= (count > maxcount_reg ? count : maxcount_reg); end end // always @ (posedge clock) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index a7c3cd0be5..a0346f50bf 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -68,6 +68,8 @@ def get_nodeattr_types(self): "auto", {"auto", "block", "distributed", "ultra"}, ), + # whether depth monitoring is enabled (impl_style=rtl only) + "depth_monitor": ("i", False, 0), } my_attrs.update(super().get_nodeattr_types()) @@ -97,6 +99,14 @@ def infer_node_datatype(self, model): def verify_node(self): pass + def get_verilog_top_module_intf_names(self): + ret = super().get_verilog_top_module_intf_names() + is_rtl = self.get_nodeattr("impl_style") == "rtl" + is_depth_monitor = self.get_nodeattr("depth_monitor") == 1 + if is_rtl and is_depth_monitor: + ret["ap_none"] = ["maxcount"] + return ret + def get_verilog_top_module_name(self): "Return the Verilog top module name for this node." diff --git a/src/finn/custom_op/fpgadataflow/templates.py b/src/finn/custom_op/fpgadataflow/templates.py index e73fa9bb28..c7bbc3f139 100644 --- a/src/finn/custom_op/fpgadataflow/templates.py +++ b/src/finn/custom_op/fpgadataflow/templates.py @@ -319,6 +319,7 @@ ap_clk, ap_rst_n, count, +maxcount, in0_$HLS_SNAME$_TDATA, in0_$HLS_SNAME$_TVALID, in0_$HLS_SNAME$_TREADY, @@ -330,6 +331,7 @@ input ap_clk; input ap_rst_n; output $COUNT_RANGE$ count; +output $COUNT_RANGE$ maxcount; input $IN_RANGE$ in0_$HLS_SNAME$_TDATA; input in0_$HLS_SNAME$_TVALID; output in0_$HLS_SNAME$_TREADY; @@ -346,6 +348,7 @@ .clock(ap_clk), .reset(!ap_rst_n), .count(count), + .maxcount(maxcount), .i_d(in0_$HLS_SNAME$_TDATA), .i_v(in0_$HLS_SNAME$_TVALID), .i_r(in0_$HLS_SNAME$_TREADY), From 81f010b81cbc9708facf3f501791a8db1ee4d83c Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 20 Sep 2022 00:38:49 +0200 Subject: [PATCH 166/628] [FIFO] use hw maxcount monitoring in InsertAndSetFIFODepths --- .../fpgadataflow/set_fifo_depths.py | 38 ++++++++----------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 0139c71666..495b7460c8 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -267,13 +267,15 @@ def apply(self, model): # gather FIFO names, check they are of expected depth fifos = {} - for node in model.graph.node: - if node.op_type == "StreamingFIFO": - fifos[node.name] = 0 - node = getCustomOp(node) - # check depths and fix as necessary - if node.get_nodeattr("depth") != self.max_depth: - node.set_nodeattr("depth", self.max_depth) + fifo_nodes = model.get_nodes_by_op_type("StreamingFIFO") + for node in fifo_nodes: + fifos[node.name] = 0 + node = getCustomOp(node) + node.set_nodeattr("depth_monitor", 1) + node.set_nodeattr("impl_style", "rtl") + # check depths and fix as necessary + if node.get_nodeattr("depth") != self.max_depth: + node.set_nodeattr("depth", self.max_depth) # insert FIFOs and do all transformations for RTLsim model = model.transform(AnnotateCycles()) @@ -324,21 +326,6 @@ def apply(self, model): else: set_signal(sim, "tvalid", 0) - # check/update all fifo counts - for key in fifos: - current_state = sim.internals["finn_design_i"][key]["inst"][ - key + "_" + key - ]["state"] - current_addr = sim.internals["finn_design_i"][key]["inst"][ - key + "_" + key - ]["addr"] - if current_state == 2: - current_count = current_addr + 2 - else: - current_count = current_state - if current_count > fifos[key]: - fifos[key] = current_count - # since latency estimation is very pessimistic, detect first output # and fast-forward the sim if get_signal(sim, "tvalid") != 0 and not output_detected: @@ -352,6 +339,12 @@ def apply(self, model): "No output detected, calculated FIFO depths may not be correct" ) + for ind, node in enumerate(fifo_nodes): + maxcount_name = "maxcount_%d" % ind + if ind == 0: + maxcount_name = "maxcount" + fifos[node.name] = sim[maxcount_name] + # Apply depths back into the model; # also set in/outFIFODepth to zero for non-FIFO # nodes, preventing further FIFO insertion @@ -364,6 +357,7 @@ def apply(self, model): depth = optimize_depth(fifos[node.name]) node_inst = getCustomOp(node) node_inst.set_nodeattr("depth", depth) + node_inst.set_nodeattr("depth_monitor", 0) # Set FIFO implementation/ram styles if depth > self.max_qsrl_depth: node_inst.set_nodeattr("impl_style", "vivado") From 20e4613f577e14b79a275e4f8dcae405e2df82b1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 20 Sep 2022 14:55:30 +0200 Subject: [PATCH 167/628] [FIFO] instead of fixed-depth large FIFO in sim, use tensor size --- .../fpgadataflow/set_fifo_depths.py | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 495b7460c8..a500a9a756 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -192,10 +192,11 @@ class InsertAndSetFIFODepths(Transformation): - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of Verilog FIFOs (Q_srl.v) - max_depth : how deep the "max"-sized FIFOs initially inserted will be + if set to None, use the tensor size as the depth - swg_exception : call CapConvolutionFIFODepths to make convolution FIFOs smaller where appropriate - vivado_ram_style : the StreamingFIFO.ram_style attribute to be used for - large FIFOs implemented by Vivado + large FIFOs implemented by Vivado afterwards Assumed input graph properties: - all nodes are fpgadataflow nodes @@ -210,7 +211,7 @@ class InsertAndSetFIFODepths(Transformation): necessary to insert FIFOs between them to prevent stalls due to bursty behavior. The sizes of those FIFOs are hard to predict analytically, so we do the following: - - insert very deep (default 16k deep) FIFOs between all fpgadataflow nodes + - insert deep (=tensor size) FIFOs between all fpgadataflow nodes - create stitched design - run through rtlsim with stream of multiple random input images (to fill pipeline) - keep track of observed maximum occupancy for each FIFO during rtlsim @@ -223,7 +224,7 @@ def __init__( fpgapart, clk_ns=10.0, max_qsrl_depth=256, - max_depth=2**14, + max_depth=None, swg_exception=True, vivado_ram_style="auto", ): @@ -236,6 +237,9 @@ def __init__( self.vivado_ram_style = vivado_ram_style def apply(self, model): + # these optypes may potentially use external weights + # we'll temporarily change them to use decoupled mode for FIFO sizing + extw_optypes = ["MatrixVectorActivation", "VectorVectorActivation"] # change external to decoupled and warn user # this way we are sure we have exactly one input/output modified_fc_nodes = [] @@ -246,9 +250,15 @@ def apply(self, model): ) assert node.op_type != "StreamingFIFO", "Found existing StreamingFIFO node" node = getCustomOp(node) - node.set_nodeattr("inFIFODepth", self.max_depth) - node.set_nodeattr("outFIFODepth", self.max_depth) - if node.onnx_node.op_type == "MatrixVectorActivation": + if self.max_depth is not None: + node.set_nodeattr("inFIFODepth", self.max_depth) + node.set_nodeattr("outFIFODepth", self.max_depth) + else: + i_depth = np.prod(node.get_folded_input_shape()[:-1]) + o_depth = np.prod(node.get_folded_output_shape()[:-1]) + node.set_nodeattr("inFIFODepth", i_depth) + node.set_nodeattr("outFIFODepth", o_depth) + if node.onnx_node.op_type in extw_optypes: mmode = node.get_nodeattr("mem_mode") if mmode == "external": modified_fc_nodes.append(node.onnx_node.name) @@ -370,9 +380,9 @@ def apply(self, model): else: getCustomOp(node).set_nodeattr("inFIFODepth", 0) getCustomOp(node).set_nodeattr("outFIFODepth", 0) - # for every FC node we changed from external to decoupled, + # for every extw node we changed from external to decoupled, # change back and reset implementation - if node.op_type == "MatrixVectorActivation": + if node.op_type in extw_optypes: if node.name in modified_fc_nodes: node_inst = getCustomOp(node) node_inst.set_nodeattr("mem_mode", "external") From 6ccc9499276c8f1958008e923f9d4fdb09480531 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 20 Sep 2022 14:55:30 +0200 Subject: [PATCH 168/628] [FIFO] instead of fixed-depth large FIFO in sim, use tensor size --- .../fpgadataflow/set_fifo_depths.py | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 495b7460c8..90ea853b60 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -192,10 +192,11 @@ class InsertAndSetFIFODepths(Transformation): - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of Verilog FIFOs (Q_srl.v) - max_depth : how deep the "max"-sized FIFOs initially inserted will be + if set to None, use the tensor size as the depth - swg_exception : call CapConvolutionFIFODepths to make convolution FIFOs smaller where appropriate - vivado_ram_style : the StreamingFIFO.ram_style attribute to be used for - large FIFOs implemented by Vivado + large FIFOs implemented by Vivado afterwards Assumed input graph properties: - all nodes are fpgadataflow nodes @@ -210,7 +211,7 @@ class InsertAndSetFIFODepths(Transformation): necessary to insert FIFOs between them to prevent stalls due to bursty behavior. The sizes of those FIFOs are hard to predict analytically, so we do the following: - - insert very deep (default 16k deep) FIFOs between all fpgadataflow nodes + - insert deep (=tensor size) FIFOs between all fpgadataflow nodes - create stitched design - run through rtlsim with stream of multiple random input images (to fill pipeline) - keep track of observed maximum occupancy for each FIFO during rtlsim @@ -223,7 +224,7 @@ def __init__( fpgapart, clk_ns=10.0, max_qsrl_depth=256, - max_depth=2**14, + max_depth=None, swg_exception=True, vivado_ram_style="auto", ): @@ -236,6 +237,9 @@ def __init__( self.vivado_ram_style = vivado_ram_style def apply(self, model): + # these optypes may potentially use external weights + # we'll temporarily change them to use decoupled mode for FIFO sizing + extw_optypes = ["MatrixVectorActivation", "VectorVectorActivation"] # change external to decoupled and warn user # this way we are sure we have exactly one input/output modified_fc_nodes = [] @@ -246,9 +250,15 @@ def apply(self, model): ) assert node.op_type != "StreamingFIFO", "Found existing StreamingFIFO node" node = getCustomOp(node) - node.set_nodeattr("inFIFODepth", self.max_depth) - node.set_nodeattr("outFIFODepth", self.max_depth) - if node.onnx_node.op_type == "MatrixVectorActivation": + if self.max_depth is not None: + node.set_nodeattr("inFIFODepth", self.max_depth) + node.set_nodeattr("outFIFODepth", self.max_depth) + else: + i_depth = np.prod(node.get_folded_input_shape()[:-1]) + o_depth = np.prod(node.get_folded_output_shape()[:-1]) + node.set_nodeattr("inFIFODepth", i_depth) + node.set_nodeattr("outFIFODepth", o_depth) + if node.onnx_node.op_type in extw_optypes: mmode = node.get_nodeattr("mem_mode") if mmode == "external": modified_fc_nodes.append(node.onnx_node.name) @@ -274,7 +284,9 @@ def apply(self, model): node.set_nodeattr("depth_monitor", 1) node.set_nodeattr("impl_style", "rtl") # check depths and fix as necessary - if node.get_nodeattr("depth") != self.max_depth: + if (self.max_depth is not None) and ( + node.get_nodeattr("depth") != self.max_depth + ): node.set_nodeattr("depth", self.max_depth) # insert FIFOs and do all transformations for RTLsim @@ -370,9 +382,9 @@ def apply(self, model): else: getCustomOp(node).set_nodeattr("inFIFODepth", 0) getCustomOp(node).set_nodeattr("outFIFODepth", 0) - # for every FC node we changed from external to decoupled, + # for every extw node we changed from external to decoupled, # change back and reset implementation - if node.op_type == "MatrixVectorActivation": + if node.op_type in extw_optypes: if node.name in modified_fc_nodes: node_inst = getCustomOp(node) node_inst.set_nodeattr("mem_mode", "external") From d4e91a1987fb8c3bbb0af38749a98d4a30b7ef57 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 20 Sep 2022 16:55:17 +0200 Subject: [PATCH 169/628] [InsertDWC] use impl_style=vivado if widths not divisible --- src/finn/transformation/fpgadataflow/insert_dwc.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py index 9817f2e3d2..efc1799235 100644 --- a/src/finn/transformation/fpgadataflow/insert_dwc.py +++ b/src/finn/transformation/fpgadataflow/insert_dwc.py @@ -81,6 +81,12 @@ def apply(self, model): dwc_in_width = n0.get_outstream_width() # determine dwc outwidth dwc_out_width = n1.get_instream_width() + larger_width = max(dwc_in_width, dwc_out_width) + smaller_width = min(dwc_in_width, dwc_out_width) + if larger_width % smaller_width == 0: + impl_style = "hls" + else: + impl_style = "vivado" # determine shape for dwc dwc_shape = n0.get_normal_output_shape() @@ -105,6 +111,7 @@ def apply(self, model): inWidth=dwc_in_width, outWidth=dwc_out_width, dataType=str(dtype.name), + impl_style=impl_style, ) # insert dwc graph.node.insert(node_ind + 1, dwc_node) From ecb5a7e377805df7013aec65878583c1984a271a Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 22 Sep 2022 09:37:02 +0200 Subject: [PATCH 170/628] Working initial implementation --- finn-rtllib/swg/swg_template_axilite.v | 567 ++++++++++++++++++ finn-rtllib/swg/swg_template_default.sv | 30 +- .../swg/swg_template_default_dynamic.sv | 431 +++++++++++++ .../swg/swg_template_wrapper_dynamic.v | 156 +++++ .../convolutioninputgenerator_rtl.py | 124 +++- ...dataflow_convinputgenerator_rtl_dynamic.py | 322 ++++++++++ 6 files changed, 1608 insertions(+), 22 deletions(-) create mode 100644 finn-rtllib/swg/swg_template_axilite.v create mode 100644 finn-rtllib/swg/swg_template_default_dynamic.sv create mode 100644 finn-rtllib/swg/swg_template_wrapper_dynamic.v create mode 100644 tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py diff --git a/finn-rtllib/swg/swg_template_axilite.v b/finn-rtllib/swg/swg_template_axilite.v new file mode 100644 index 0000000000..9479c7f80d --- /dev/null +++ b/finn-rtllib/swg/swg_template_axilite.v @@ -0,0 +1,567 @@ + +`timescale 1 ns / 1 ps + +module $TOP_MODULE_NAME$_axilite # +( + // Users to add parameters here + + // User parameters ends + // Do not modify the parameters beyond this line + + // Width of S_AXI data bus + parameter integer C_S_AXI_DATA_WIDTH = 32, + // Width of S_AXI address bus + parameter integer C_S_AXI_ADDR_WIDTH = 6 +) +( + // Users to add ports here + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg0, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg1, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg2, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg3, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg4, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg5, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg6, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg7, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg8, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg9, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg10, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg11, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg12, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg13, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg14, + output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg15, + + // User ports ends + // Do not modify the ports beyond this line + + // Global Clock Signal + input wire S_AXI_ACLK, + // Global Reset Signal. This Signal is Active LOW + input wire S_AXI_ARESETN, + // Write address (issued by master, acceped by Slave) + input wire [C_S_AXI_ADDR_WIDTH-1 : 0] S_AXI_AWADDR, + // Write channel Protection type. This signal indicates the + // privilege and security level of the transaction, and whether + // the transaction is a data access or an instruction access. + input wire [2 : 0] S_AXI_AWPROT, + // Write address valid. This signal indicates that the master signaling + // valid write address and control information. + input wire S_AXI_AWVALID, + // Write address ready. This signal indicates that the slave is ready + // to accept an address and associated control signals. + output wire S_AXI_AWREADY, + // Write data (issued by master, acceped by Slave) + input wire [C_S_AXI_DATA_WIDTH-1 : 0] S_AXI_WDATA, + // Write strobes. This signal indicates which byte lanes hold + // valid data. There is one write strobe bit for each eight + // bits of the write data bus. + input wire [(C_S_AXI_DATA_WIDTH/8)-1 : 0] S_AXI_WSTRB, + // Write valid. This signal indicates that valid write + // data and strobes are available. + input wire S_AXI_WVALID, + // Write ready. This signal indicates that the slave + // can accept the write data. + output wire S_AXI_WREADY, + // Write response. This signal indicates the status + // of the write transaction. + output wire [1 : 0] S_AXI_BRESP, + // Write response valid. This signal indicates that the channel + // is signaling a valid write response. + output wire S_AXI_BVALID, + // Response ready. This signal indicates that the master + // can accept a write response. + input wire S_AXI_BREADY, + // Read address (issued by master, acceped by Slave) + input wire [C_S_AXI_ADDR_WIDTH-1 : 0] S_AXI_ARADDR, + // Protection type. This signal indicates the privilege + // and security level of the transaction, and whether the + // transaction is a data access or an instruction access. + input wire [2 : 0] S_AXI_ARPROT, + // Read address valid. This signal indicates that the channel + // is signaling valid read address and control information. + input wire S_AXI_ARVALID, + // Read address ready. This signal indicates that the slave is + // ready to accept an address and associated control signals. + output wire S_AXI_ARREADY, + // Read data (issued by slave) + output wire [C_S_AXI_DATA_WIDTH-1 : 0] S_AXI_RDATA, + // Read response. This signal indicates the status of the + // read transfer. + output wire [1 : 0] S_AXI_RRESP, + // Read valid. This signal indicates that the channel is + // signaling the required read data. + output wire S_AXI_RVALID, + // Read ready. This signal indicates that the master can + // accept the read data and response information. + input wire S_AXI_RREADY +); + +// AXI4LITE signals +reg [C_S_AXI_ADDR_WIDTH-1 : 0] axi_awaddr; +reg axi_awready; +reg axi_wready; +reg [1 : 0] axi_bresp; +reg axi_bvalid; +reg [C_S_AXI_ADDR_WIDTH-1 : 0] axi_araddr; +reg axi_arready; +reg [C_S_AXI_DATA_WIDTH-1 : 0] axi_rdata; +reg [1 : 0] axi_rresp; +reg axi_rvalid; + +// Example-specific design signals +// local parameter for addressing 32 bit / 64 bit C_S_AXI_DATA_WIDTH +// ADDR_LSB is used for addressing 32/64 bit registers/memories +// ADDR_LSB = 2 for 32 bits (n downto 2) +// ADDR_LSB = 3 for 64 bits (n downto 3) +localparam integer ADDR_LSB = (C_S_AXI_DATA_WIDTH/32) + 1; +localparam integer OPT_MEM_ADDR_BITS = 3; +//---------------------------------------------- +//-- Signals for user logic register space example +//------------------------------------------------ +//-- Number of Slave Registers 16 +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg0; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg1; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg2; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg3; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg4; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg5; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg6; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg7; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg8; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg9; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg10; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg11; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg12; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg13; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg14; +reg [C_S_AXI_DATA_WIDTH-1:0] slv_reg15; +wire slv_reg_rden; +wire slv_reg_wren; +reg [C_S_AXI_DATA_WIDTH-1:0] reg_data_out; +integer byte_index; +reg aw_en; + +// I/O Connections assignments + +assign S_AXI_AWREADY = axi_awready; +assign S_AXI_WREADY = axi_wready; +assign S_AXI_BRESP = axi_bresp; +assign S_AXI_BVALID = axi_bvalid; +assign S_AXI_ARREADY = axi_arready; +assign S_AXI_RDATA = axi_rdata; +assign S_AXI_RRESP = axi_rresp; +assign S_AXI_RVALID = axi_rvalid; +// Implement axi_awready generation +// axi_awready is asserted for one S_AXI_ACLK clock cycle when both +// S_AXI_AWVALID and S_AXI_WVALID are asserted. axi_awready is +// de-asserted when reset is low. + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_awready <= 1'b0; + aw_en <= 1'b1; + end + else + begin + if (~axi_awready && S_AXI_AWVALID && S_AXI_WVALID && aw_en) + begin + // slave is ready to accept write address when + // there is a valid write address and write data + // on the write address and data bus. This design + // expects no outstanding transactions. + axi_awready <= 1'b1; + aw_en <= 1'b0; + end + else if (S_AXI_BREADY && axi_bvalid) + begin + aw_en <= 1'b1; + axi_awready <= 1'b0; + end + else + begin + axi_awready <= 1'b0; + end + end +end + +// Implement axi_awaddr latching +// This process is used to latch the address when both +// S_AXI_AWVALID and S_AXI_WVALID are valid. + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_awaddr <= 0; + end + else + begin + if (~axi_awready && S_AXI_AWVALID && S_AXI_WVALID && aw_en) + begin + // Write Address latching + axi_awaddr <= S_AXI_AWADDR; + end + end +end + +// Implement axi_wready generation +// axi_wready is asserted for one S_AXI_ACLK clock cycle when both +// S_AXI_AWVALID and S_AXI_WVALID are asserted. axi_wready is +// de-asserted when reset is low. + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_wready <= 1'b0; + end + else + begin + if (~axi_wready && S_AXI_WVALID && S_AXI_AWVALID && aw_en ) + begin + // slave is ready to accept write data when + // there is a valid write address and write data + // on the write address and data bus. This design + // expects no outstanding transactions. + axi_wready <= 1'b1; + end + else + begin + axi_wready <= 1'b0; + end + end +end + +// Implement memory mapped register select and write logic generation +// The write data is accepted and written to memory mapped registers when +// axi_awready, S_AXI_WVALID, axi_wready and S_AXI_WVALID are asserted. Write strobes are used to +// select byte enables of slave registers while writing. +// These registers are cleared when reset (active low) is applied. +// Slave register write enable is asserted when valid address and data are available +// and the slave is ready to accept the write address and write data. +assign slv_reg_wren = axi_wready && S_AXI_WVALID && axi_awready && S_AXI_AWVALID; + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + slv_reg0 <= 0; + slv_reg1 <= 0; + slv_reg2 <= 0; + slv_reg3 <= 0; + slv_reg4 <= 0; + slv_reg5 <= 0; + slv_reg6 <= 0; + slv_reg7 <= 0; + slv_reg8 <= 0; + slv_reg9 <= 0; + slv_reg10 <= 0; + slv_reg11 <= 0; + slv_reg12 <= 0; + slv_reg13 <= 0; + slv_reg14 <= 0; + slv_reg15 <= 0; + end + else begin + if (slv_reg_wren) + begin + case ( axi_awaddr[ADDR_LSB+OPT_MEM_ADDR_BITS:ADDR_LSB] ) + 4'h0: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 0 + slv_reg0[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h1: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 1 + slv_reg1[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h2: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 2 + slv_reg2[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h3: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 3 + slv_reg3[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h4: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 4 + slv_reg4[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h5: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 5 + slv_reg5[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h6: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 6 + slv_reg6[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h7: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 7 + slv_reg7[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h8: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 8 + slv_reg8[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'h9: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 9 + slv_reg9[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hA: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 10 + slv_reg10[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hB: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 11 + slv_reg11[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hC: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 12 + slv_reg12[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hD: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 13 + slv_reg13[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hE: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 14 + slv_reg14[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + 4'hF: + for ( byte_index = 0; byte_index <= (C_S_AXI_DATA_WIDTH/8)-1; byte_index = byte_index+1 ) + if ( S_AXI_WSTRB[byte_index] == 1 ) begin + // Respective byte enables are asserted as per write strobes + // Slave register 15 + slv_reg15[(byte_index*8) +: 8] <= S_AXI_WDATA[(byte_index*8) +: 8]; + end + default : begin + slv_reg0 <= slv_reg0; + slv_reg1 <= slv_reg1; + slv_reg2 <= slv_reg2; + slv_reg3 <= slv_reg3; + slv_reg4 <= slv_reg4; + slv_reg5 <= slv_reg5; + slv_reg6 <= slv_reg6; + slv_reg7 <= slv_reg7; + slv_reg8 <= slv_reg8; + slv_reg9 <= slv_reg9; + slv_reg10 <= slv_reg10; + slv_reg11 <= slv_reg11; + slv_reg12 <= slv_reg12; + slv_reg13 <= slv_reg13; + slv_reg14 <= slv_reg14; + slv_reg15 <= slv_reg15; + end + endcase + end + end +end + +// Implement write response logic generation +// The write response and response valid signals are asserted by the slave +// when axi_wready, S_AXI_WVALID, axi_wready and S_AXI_WVALID are asserted. +// This marks the acceptance of address and indicates the status of +// write transaction. + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_bvalid <= 0; + axi_bresp <= 2'b0; + end + else + begin + if (axi_awready && S_AXI_AWVALID && ~axi_bvalid && axi_wready && S_AXI_WVALID) + begin + // indicates a valid write response is available + axi_bvalid <= 1'b1; + axi_bresp <= 2'b0; // 'OKAY' response + end // work error responses in future + else + begin + if (S_AXI_BREADY && axi_bvalid) + //check if bready is asserted while bvalid is high) + //(there is a possibility that bready is always asserted high) + begin + axi_bvalid <= 1'b0; + end + end + end +end + +// Implement axi_arready generation +// axi_arready is asserted for one S_AXI_ACLK clock cycle when +// S_AXI_ARVALID is asserted. axi_awready is +// de-asserted when reset (active low) is asserted. +// The read address is also latched when S_AXI_ARVALID is +// asserted. axi_araddr is reset to zero on reset assertion. + +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_arready <= 1'b0; + axi_araddr <= 32'b0; + end + else + begin + if (~axi_arready && S_AXI_ARVALID) + begin + // indicates that the slave has acceped the valid read address + axi_arready <= 1'b1; + // Read address latching + axi_araddr <= S_AXI_ARADDR; + end + else + begin + axi_arready <= 1'b0; + end + end +end + +// Implement axi_arvalid generation +// axi_rvalid is asserted for one S_AXI_ACLK clock cycle when both +// S_AXI_ARVALID and axi_arready are asserted. The slave registers +// data are available on the axi_rdata bus at this instance. The +// assertion of axi_rvalid marks the validity of read data on the +// bus and axi_rresp indicates the status of read transaction.axi_rvalid +// is deasserted on reset (active low). axi_rresp and axi_rdata are +// cleared to zero on reset (active low). +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_rvalid <= 0; + axi_rresp <= 0; + end + else + begin + if (axi_arready && S_AXI_ARVALID && ~axi_rvalid) + begin + // Valid read data is available at the read data bus + axi_rvalid <= 1'b1; + axi_rresp <= 2'b0; // 'OKAY' response + end + else if (axi_rvalid && S_AXI_RREADY) + begin + // Read data is accepted by the master + axi_rvalid <= 1'b0; + end + end +end + +// Implement memory mapped register select and read logic generation +// Slave register read enable is asserted when valid address is available +// and the slave is ready to accept the read address. +assign slv_reg_rden = axi_arready & S_AXI_ARVALID & ~axi_rvalid; +always @(*) +begin + // Address decoding for reading registers + case ( axi_araddr[ADDR_LSB+OPT_MEM_ADDR_BITS:ADDR_LSB] ) + 4'h0 : reg_data_out <= slv_reg0; + 4'h1 : reg_data_out <= slv_reg1; + 4'h2 : reg_data_out <= slv_reg2; + 4'h3 : reg_data_out <= slv_reg3; + 4'h4 : reg_data_out <= slv_reg4; + 4'h5 : reg_data_out <= slv_reg5; + 4'h6 : reg_data_out <= slv_reg6; + 4'h7 : reg_data_out <= slv_reg7; + 4'h8 : reg_data_out <= slv_reg8; + 4'h9 : reg_data_out <= slv_reg9; + 4'hA : reg_data_out <= slv_reg10; + 4'hB : reg_data_out <= slv_reg11; + 4'hC : reg_data_out <= slv_reg12; + 4'hD : reg_data_out <= slv_reg13; + 4'hE : reg_data_out <= slv_reg14; + 4'hF : reg_data_out <= slv_reg15; + default : reg_data_out <= 0; + endcase +end + +// Output register or memory read data +always @( posedge S_AXI_ACLK ) +begin + if ( S_AXI_ARESETN == 1'b0 ) + begin + axi_rdata <= 0; + end + else + begin + // When there is a valid read address (S_AXI_ARVALID) with + // acceptance of read address by the slave (axi_arready), + // output the read dada + if (slv_reg_rden) + begin + axi_rdata <= reg_data_out; // register read data + end + end +end + +// Add user logic here +assign cfg_reg0 = slv_reg0; +assign cfg_reg1 = slv_reg1; +assign cfg_reg2 = slv_reg2; +assign cfg_reg3 = slv_reg3; +assign cfg_reg4 = slv_reg4; +assign cfg_reg5 = slv_reg5; +assign cfg_reg6 = slv_reg6; +assign cfg_reg7 = slv_reg7; +assign cfg_reg8 = slv_reg8; +assign cfg_reg9 = slv_reg9; +assign cfg_reg10 = slv_reg10; +assign cfg_reg11 = slv_reg11; +assign cfg_reg12 = slv_reg12; +assign cfg_reg13 = slv_reg13; +assign cfg_reg14 = slv_reg14; +assign cfg_reg15 = slv_reg15; +// User logic ends + +endmodule diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 0aa309f890..ecbcffc115 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -30,11 +30,11 @@ module $TOP_MODULE_NAME$_controller #( state_e State = $INNERMOST_STATE$; state_e state_next; - logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS-1; - logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS-1; - logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS-1; - logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS-1; - logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS-1; + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; assign addr_incr = ADDR_INCREMENT_MAP[State]; @@ -71,29 +71,29 @@ module $TOP_MODULE_NAME$_controller #( always_ff @ (posedge clk) begin if(!rst_n) begin State <= $INNERMOST_STATE$; - Counter_loop_h <= LOOP_H_ITERATIONS-1; - Counter_loop_w <= LOOP_W_ITERATIONS-1; - Counter_loop_kh <= LOOP_KH_ITERATIONS-1; - Counter_loop_kw <= LOOP_KW_ITERATIONS-1; - Counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + Counter_loop_h <= LOOP_H_ITERATIONS; + Counter_loop_w <= LOOP_W_ITERATIONS; + Counter_loop_kh <= LOOP_KH_ITERATIONS; + Counter_loop_kw <= LOOP_KW_ITERATIONS; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; end else if(advance) begin State <= state_next; if (State == $INNERMOST_STATE$) begin if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; else begin - Counter_loop_simd <= LOOP_SIMD_ITERATIONS-1; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; else begin - Counter_loop_kw <= LOOP_KW_ITERATIONS-1; + Counter_loop_kw <= LOOP_KW_ITERATIONS; if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; else begin - Counter_loop_kh <= LOOP_KH_ITERATIONS-1; + Counter_loop_kh <= LOOP_KH_ITERATIONS; if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; else begin - Counter_loop_w <= LOOP_W_ITERATIONS-1; + Counter_loop_w <= LOOP_W_ITERATIONS; if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; - else Counter_loop_h <= LOOP_H_ITERATIONS-1; + else Counter_loop_h <= LOOP_H_ITERATIONS; end end end diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv new file mode 100644 index 0000000000..96bd8cc591 --- /dev/null +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -0,0 +1,431 @@ +module $TOP_MODULE_NAME$_controller #( + int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, + int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, + int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, + int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, + int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, + + int unsigned CNTR_BITWIDTH, + int unsigned INCR_BITWIDTH, + + bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$, + + bit IS_DEPTHWISE = $IS_DEPTHWISE$ +)( + input logic clk, + input logic rst_n, + + input logic advance, + output logic [INCR_BITWIDTH-1:0] addr_incr, + output logic [INCR_BITWIDTH-1:0] tail_incr, + + input logic cfg_valid, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_simd, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_kw, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_kh, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_w, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_simd, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_kw, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_kh, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_w, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_w, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_last +); + + // (dynamic) configuration registers + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_simd = LOOP_SIMD_ITERATIONS; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kw = LOOP_KW_ITERATIONS; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kh = LOOP_KH_ITERATIONS; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_w = LOOP_W_ITERATIONS; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_h = LOOP_H_ITERATIONS; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_simd = ADDR_INCREMENT_MAP[1]; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kw = ADDR_INCREMENT_MAP[2]; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kh = ADDR_INCREMENT_MAP[3]; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_w = ADDR_INCREMENT_MAP[4]; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_h = ADDR_INCREMENT_MAP[5]; + logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_w = $TAIL_INCR_W$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_h = $TAIL_INCR_H$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_last = $TAIL_INCR_LAST$; + + // configuration reset/set logic + always_ff @ (posedge clk) begin + if(cfg_valid) begin + Cfg_cntr_simd <= cfg_cntr_simd; + Cfg_cntr_kw <= cfg_cntr_kw; + Cfg_cntr_kh <= cfg_cntr_kh; + Cfg_cntr_w <= cfg_cntr_w; + Cfg_cntr_h <= cfg_cntr_h; + Cfg_incr_head_simd <= cfg_incr_head_simd; + Cfg_incr_head_kw <= cfg_incr_head_kw; + Cfg_incr_head_kh <= cfg_incr_head_kh; + Cfg_incr_head_w <= cfg_incr_head_w; + Cfg_incr_head_h <= cfg_incr_head_h; + Cfg_incr_tail_w <= cfg_incr_tail_w; + Cfg_incr_tail_h <= cfg_incr_tail_h; + Cfg_incr_tail_last <= cfg_incr_tail_last; + end + end + + // state and counters + typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H + } state_e; + state_e State = $INNERMOST_STATE$; + state_e state_next; + + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; + + //assign addr_incr = ADDR_INCREMENT_MAP[State]; + always_comb begin : blkHead + case (State) + 0 : addr_incr = 0; + 1 : addr_incr = Cfg_incr_head_simd; + 2 : addr_incr = Cfg_incr_head_kw; + 3 : addr_incr = Cfg_incr_head_kh; + 4 : addr_incr = Cfg_incr_head_w; + 5 : addr_incr = Cfg_incr_head_h; + endcase + end + + // combinational logic for tail_incr generation + uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; + always_comb begin : blkTail + if (tail_incr_inner_condition) + tail_incr = 1; + else if (Counter_loop_w >= 0) + tail_incr = Cfg_incr_tail_w; + else if (Counter_loop_h >= 0) + tail_incr = Cfg_incr_tail_h; + else + tail_incr = Cfg_incr_tail_last; + end + + // combinational next state logic + always_comb begin : blkState + state_next = State; + if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; + else begin + if(Counter_loop_simd < 0) begin + state_next = + (Counter_loop_kw >= 0)? STATE_LOOP_KW : + (Counter_loop_kh >= 0)? STATE_LOOP_KH : + (Counter_loop_w >= 0)? STATE_LOOP_W : + (Counter_loop_h >= 0)? STATE_LOOP_H : + /* else */ STATE_START; + end + end + end : blkState + + // sequential logic + always_ff @ (posedge clk) begin + if(!rst_n) begin + State <= $INNERMOST_STATE$; + Counter_loop_h <= Cfg_cntr_h; + Counter_loop_w <= Cfg_cntr_w; + Counter_loop_kh <= Cfg_cntr_kh; + Counter_loop_kw <= Cfg_cntr_kw; + Counter_loop_simd <= Cfg_cntr_simd; + end + else if(advance) begin + State <= state_next; + if (State == $INNERMOST_STATE$) begin + if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; + else begin + Counter_loop_simd <= Cfg_cntr_simd; + if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; + else begin + Counter_loop_kw <= Cfg_cntr_kw; + if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; + else begin + Counter_loop_kh <= Cfg_cntr_kh; + if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; + else begin + Counter_loop_w <= Cfg_cntr_w; + if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; + else Counter_loop_h <= Cfg_cntr_h; + end + end + end + end + end + end + end + +endmodule : $TOP_MODULE_NAME$_controller + +module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + int unsigned WIDTH, + int unsigned DEPTH +)( + input logic clk, + input logic rst_n, + + input logic write_enable, + input logic [$clog2(DEPTH)-1:0] write_addr, + input logic [WIDTH-1:0] data_in, + + input logic read_enable, + input logic [$clog2(DEPTH)-1:0] read_addr, // absolute (!) read address of cyclic buffer + output logic [WIDTH-1:0] data_out +); + + $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; + logic [WIDTH-1:0] Out = 'x; + always_ff @(posedge clk) begin + if (read_enable) Out <= Ram[read_addr]; + if (write_enable) Ram[write_addr] <= data_in; + end + assign data_out = Out; + +endmodule : $TOP_MODULE_NAME$_cyclic_buffer_addressable + +module $TOP_MODULE_NAME$_impl #( + int BIT_WIDTH, + int SIMD, + int MMV_IN, + int MMV_OUT, + int LAST_READ_ELEM = $LAST_READ_ELEM$, + int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, + int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, + int ELEM_PER_WINDOW = $ELEM_PER_WINDOW$, + + int unsigned CNTR_BITWIDTH, + int unsigned INCR_BITWIDTH +)( + input logic ap_clk, + input logic ap_rst_n, + + input logic in0_V_V_TVALID, + output logic in0_V_V_TREADY, + input logic [BIT_WIDTH * SIMD * MMV_IN-1:0] in0_V_V_TDATA, + + output logic out_V_V_TVALID, + input logic out_V_V_TREADY, + output logic [BIT_WIDTH * SIMD * MMV_OUT-1:0] out_V_V_TDATA, + + input logic cfg_valid, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_simd, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_kw, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_kh, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_w, + input logic [CNTR_BITWIDTH-1:0] cfg_cntr_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_simd, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_kw, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_kh, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_w, + input logic [INCR_BITWIDTH-1:0] cfg_incr_head_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_w, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_h, + input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_last, + input logic [31:0] cfg_last_read, //todo: reduce bitwidth to $clog2(LAST_READ_ELEM+1) + input logic [31:0] cfg_last_write +); + // derived Constants + localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; + localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; + localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; + + // (dynamic) configuration registers + logic [31:0] Cfg_last_read = LAST_READ_ELEM; + logic [31:0] Cfg_last_write = LAST_WRITE_ELEM; + + // configuration reset/set logic + always_ff @ (posedge ap_clk) begin + if(cfg_valid) begin + Cfg_last_read <= cfg_last_read; + Cfg_last_write <= cfg_last_write; + end + end + + // main buffer instantiation + uwire [BUF_IN_WIDTH -1:0] window_buffer_in; + uwire [BUF_OUT_WIDTH-1:0] window_buffer_out; + uwire window_buffer_write_enable; + uwire window_buffer_read_enable; + uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr; + uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; + $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + .WIDTH(BUF_IN_WIDTH), + .DEPTH(BUF_ELEM_TOTAL) + ) window_buffer_inst ( + .clk(ap_clk), + .rst_n(ap_rst_n), + + .write_enable(window_buffer_write_enable), + .write_addr(window_buffer_write_addr), + .data_in(window_buffer_in), + + .read_enable(window_buffer_read_enable), + .read_addr(window_buffer_read_addr), + .data_out(window_buffer_out) + ); + + //controller instantiation + uwire advance_controller; + uwire signed [INCR_BITWIDTH-1:0] addr_incr; + uwire [INCR_BITWIDTH-1:0] tail_incr; + $TOP_MODULE_NAME$_controller #( + .CNTR_BITWIDTH(CNTR_BITWIDTH), + .INCR_BITWIDTH(INCR_BITWIDTH) + ) controller_inst ( + .clk(ap_clk), + .rst_n(ap_rst_n), + .advance(advance_controller), + .addr_incr(addr_incr), + .tail_incr(tail_incr), + + .cfg_valid(cfg_valid), + .cfg_cntr_simd(cfg_cntr_simd), + .cfg_cntr_kw(cfg_cntr_kw), + .cfg_cntr_kh(cfg_cntr_kh), + .cfg_cntr_w(cfg_cntr_w), + .cfg_cntr_h(cfg_cntr_h), + .cfg_incr_head_simd(cfg_incr_head_simd), + .cfg_incr_head_kw(cfg_incr_head_kw), + .cfg_incr_head_kh(cfg_incr_head_kh), + .cfg_incr_head_w(cfg_incr_head_w), + .cfg_incr_head_h(cfg_incr_head_h), + .cfg_incr_tail_w(cfg_incr_tail_w), + .cfg_incr_tail_h(cfg_incr_tail_h), + .cfg_incr_tail_last(cfg_incr_tail_last) + ); + + // Counters/address registers + // Add a sign bit even to (most) unsigned counters and Window_buffer_read_addr_reg, + // so we can use automatic sign extension and simplify calculations w/ signed increment. + // Alternatively, we could manually sign-extend and shave off a bit here or there. + logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] Newest_buffered_elem = -1; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] Current_elem = 0; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] First_elem_next_window = 0; + logic [$clog2(ELEM_PER_WINDOW) -1:0] Position_in_window = 0; + logic [$clog2(BUF_ELEM_TOTAL)+1 -1:0] Window_buffer_read_addr_reg = 0; + logic [$clog2(BUF_ELEM_TOTAL)-1:0] Window_buffer_write_addr_reg = 0; + + // Control signals/registers + uwire read_cmd = + !reading_done && ( // if there is still an input element left to read + Fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(First_elem_next_window) && + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(Current_elem) + ) // (over-)write to buffer if oldest buffered element will no longer be needed + ); + uwire read_ok = read_cmd && in0_V_V_TVALID; + uwire reading_done = Newest_buffered_elem == Cfg_last_read; + + uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; + logic Fetching_done = 0; + + logic Write_cmd = 0; + logic Writing_done = 0; + uwire write_ok = Write_cmd && out_V_V_TREADY; + uwire write_blocked = Write_cmd && !out_V_V_TREADY;; + + //assign buffer control + assign window_buffer_write_addr = Window_buffer_write_addr_reg; + assign window_buffer_read_addr = Window_buffer_read_addr_reg; + assign window_buffer_write_enable = read_ok; + assign window_buffer_read_enable = fetch_cmd; + assign advance_controller = fetch_cmd; + + //assign I/O ports + assign window_buffer_in = in0_V_V_TDATA; + assign out_V_V_TDATA = window_buffer_out; + assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) + assign out_V_V_TVALID = ap_rst_n && Write_cmd; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + + //main process for advancing counters + always_ff @(posedge ap_clk) begin + if(!ap_rst_n) begin + Newest_buffered_elem <= -1; + Current_elem <= 0; + First_elem_next_window <= 0; + Position_in_window <= 0; + Window_buffer_read_addr_reg <= 0; + Window_buffer_write_addr_reg <= 0; + Fetching_done <= 0; + Write_cmd <= 0; + Writing_done <= 0; + end + else begin + if (read_ok) begin + Window_buffer_write_addr_reg <= (Window_buffer_write_addr_reg == BUF_ELEM_TOTAL-1)? 0 : Window_buffer_write_addr_reg + 1; + Newest_buffered_elem <= Newest_buffered_elem+1; + + if (Newest_buffered_elem == Cfg_last_read-1) begin + Window_buffer_write_addr_reg <= 0; + end + //check if this is the last read cycle (reading_done will be true afterwards) + if ((Newest_buffered_elem == Cfg_last_read-1) && Writing_done) begin + //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) + //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) + Newest_buffered_elem <= -1; + Current_elem <= 0; + Window_buffer_read_addr_reg <= 0; + First_elem_next_window <= 0; + Writing_done <= 0; + Fetching_done <= 0; + end + end + + if (fetch_cmd) begin + //count up to track which element index is about to be read from the buffer, and where it is located within the buffer + //use increment value calculated by controller + + // absolute buffer address wrap-around + automatic logic signed [$clog2(BUF_ELEM_TOTAL)+1:0] ra = $signed(Window_buffer_read_addr_reg) + $signed(addr_incr); + automatic logic signed [$clog2(BUF_ELEM_TOTAL+1):0] ra_correct = + (ra >= BUF_ELEM_TOTAL)? -BUF_ELEM_TOTAL : + (ra < 0)? BUF_ELEM_TOTAL : 0; + Window_buffer_read_addr_reg <= ra + ra_correct; + + //keep track where we are within a window + Position_in_window <= (Position_in_window != ELEM_PER_WINDOW - 1)? Position_in_window+1 : 0; + + //update first element of next window to allow buffer overwrite up until that point + if (Position_in_window == 0) + First_elem_next_window <= First_elem_next_window + tail_incr; + + //check if this is the last write cycle (Writing_done will be true afterwards) + if (Current_elem == Cfg_last_write) + Fetching_done <= 1; + else + Current_elem <= $signed(Current_elem) + addr_incr; + + // determine if prefetched data will be outstanding in the next cycle + // if we fetch in this cycle -> yes + // if we do not fetch nor write -> do not change + // if we do not fetch but write successfully-> clear outstanding data + Write_cmd <= fetch_cmd; + end + + if (write_ok) + Write_cmd <= fetch_cmd; + + if (write_ok && Fetching_done) begin + //check if this is the last write cycle (Writing_done will be true afterwards) + if (reading_done || (read_ok && (Newest_buffered_elem == Cfg_last_read - 1))) begin + //start processing of next FM if reading is done already, or completes in the same cycle + Newest_buffered_elem <= -1; + Current_elem <= 0; + Window_buffer_read_addr_reg <= 0; + First_elem_next_window <= 0; + Fetching_done <= 0; + end else + Writing_done <= 1; + end + end + end + +endmodule : $TOP_MODULE_NAME$_impl diff --git a/finn-rtllib/swg/swg_template_wrapper_dynamic.v b/finn-rtllib/swg/swg_template_wrapper_dynamic.v new file mode 100644 index 0000000000..d6f839de43 --- /dev/null +++ b/finn-rtllib/swg/swg_template_wrapper_dynamic.v @@ -0,0 +1,156 @@ +`timescale 1 ns / 1 ps + +module $TOP_MODULE_NAME$ #( + // top-level parameters (set via code-generation) + parameter BIT_WIDTH = $BIT_WIDTH$, + parameter SIMD = $SIMD$, + parameter MMV_IN = $MMV_IN$, + parameter MMV_OUT = $MMV_OUT$, + + parameter CNTR_BITWIDTH = $CNTR_BITWIDTH$, + parameter INCR_BITWIDTH = $INCR_BITWIDTH$, + + // derived constants + parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN, + parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT, + + parameter integer C_s_axi_cfg_DATA_WIDTH = 32, + parameter integer C_s_axi_cfg_ADDR_WIDTH = 6 +) +( + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + input ap_clk, + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + input ap_rst_n, + input [BUF_IN_WIDTH-1:0] in0_V_TDATA, + input in0_V_TVALID, + output in0_V_TREADY, + output [BUF_OUT_WIDTH-1:0] out_V_TDATA, + output out_V_TVALID, + input out_V_TREADY, + + // Ports of Axi Slave Bus Interface s_axi_cfg + //input wire s_axi_cfg_aclk, + //input wire s_axi_cfg_aresetn, + input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_awaddr, + input wire [2 : 0] s_axi_cfg_awprot, + input wire s_axi_cfg_awvalid, + output wire s_axi_cfg_awready, + input wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_wdata, + input wire [(C_s_axi_cfg_DATA_WIDTH/8)-1 : 0] s_axi_cfg_wstrb, + input wire s_axi_cfg_wvalid, + output wire s_axi_cfg_wready, + output wire [1 : 0] s_axi_cfg_bresp, + output wire s_axi_cfg_bvalid, + input wire s_axi_cfg_bready, + input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_araddr, + input wire [2 : 0] s_axi_cfg_arprot, + input wire s_axi_cfg_arvalid, + output wire s_axi_cfg_arready, + output wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_rdata, + output wire [1 : 0] s_axi_cfg_rresp, + output wire s_axi_cfg_rvalid, + input wire s_axi_cfg_rready +); + +wire cfg_valid; +wire [CNTR_BITWIDTH-1:0] cfg_cntr_simd; +wire [CNTR_BITWIDTH-1:0] cfg_cntr_kw; +wire [CNTR_BITWIDTH-1:0] cfg_cntr_kh; +wire [CNTR_BITWIDTH-1:0] cfg_cntr_w; +wire [CNTR_BITWIDTH-1:0] cfg_cntr_h; +wire [INCR_BITWIDTH-1:0] cfg_incr_head_simd; +wire [INCR_BITWIDTH-1:0] cfg_incr_head_kw; +wire [INCR_BITWIDTH-1:0] cfg_incr_head_kh; +wire [INCR_BITWIDTH-1:0] cfg_incr_head_w; +wire [INCR_BITWIDTH-1:0] cfg_incr_head_h; +wire [INCR_BITWIDTH-1:0] cfg_incr_tail_w; +wire [INCR_BITWIDTH-1:0] cfg_incr_tail_h; +wire [INCR_BITWIDTH-1:0] cfg_incr_tail_last; +wire [31:0] cfg_last_read; +wire [31:0] cfg_last_write; + +// Instantiation of Axi Bus Interface s_axi_cfg +$TOP_MODULE_NAME$_axilite # ( + .C_S_AXI_DATA_WIDTH(C_s_axi_cfg_DATA_WIDTH), + .C_S_AXI_ADDR_WIDTH(C_s_axi_cfg_ADDR_WIDTH) +) axilite_cfg_inst ( + .S_AXI_ACLK(ap_clk), + .S_AXI_ARESETN(ap_rst_n), + .S_AXI_AWADDR(s_axi_cfg_awaddr), + .S_AXI_AWPROT(s_axi_cfg_awprot), + .S_AXI_AWVALID(s_axi_cfg_awvalid), + .S_AXI_AWREADY(s_axi_cfg_awready), + .S_AXI_WDATA(s_axi_cfg_wdata), + .S_AXI_WSTRB(s_axi_cfg_wstrb), + .S_AXI_WVALID(s_axi_cfg_wvalid), + .S_AXI_WREADY(s_axi_cfg_wready), + .S_AXI_BRESP(s_axi_cfg_bresp), + .S_AXI_BVALID(s_axi_cfg_bvalid), + .S_AXI_BREADY(s_axi_cfg_bready), + .S_AXI_ARADDR(s_axi_cfg_araddr), + .S_AXI_ARPROT(s_axi_cfg_arprot), + .S_AXI_ARVALID(s_axi_cfg_arvalid), + .S_AXI_ARREADY(s_axi_cfg_arready), + .S_AXI_RDATA(s_axi_cfg_rdata), + .S_AXI_RRESP(s_axi_cfg_rresp), + .S_AXI_RVALID(s_axi_cfg_rvalid), + .S_AXI_RREADY(s_axi_cfg_rready), + + .cfg_reg0(cfg_valid), + .cfg_reg1(cfg_cntr_simd), + .cfg_reg2(cfg_cntr_kw), + .cfg_reg3(cfg_cntr_kh), + .cfg_reg4(cfg_cntr_w), + .cfg_reg5(cfg_cntr_h), + .cfg_reg6(cfg_incr_head_simd), + .cfg_reg7(cfg_incr_head_kw), + .cfg_reg8(cfg_incr_head_kh), + .cfg_reg9(cfg_incr_head_w), + .cfg_reg10(cfg_incr_head_h), + .cfg_reg11(cfg_incr_tail_w), + .cfg_reg12(cfg_incr_tail_h), + .cfg_reg13(cfg_incr_tail_last), + .cfg_reg14(cfg_last_read), + .cfg_reg15(cfg_last_write) +); + +$TOP_MODULE_NAME$_impl +#( + .BIT_WIDTH(BIT_WIDTH), + .SIMD(SIMD), + .MMV_IN(MMV_IN), + .MMV_OUT(MMV_OUT), + .CNTR_BITWIDTH(CNTR_BITWIDTH), + .INCR_BITWIDTH(INCR_BITWIDTH) +) +impl +( + .ap_clk(ap_clk), + .ap_rst_n(ap_rst_n), + .in0_V_V_TDATA(in0_V_TDATA), + .in0_V_V_TVALID(in0_V_TVALID), + .in0_V_V_TREADY(in0_V_TREADY), + .out_V_V_TDATA(out_V_TDATA), + .out_V_V_TVALID(out_V_TVALID), + .out_V_V_TREADY(out_V_TREADY), + + .cfg_valid(cfg_valid), + .cfg_cntr_simd(cfg_cntr_simd), + .cfg_cntr_kw(cfg_cntr_kw), + .cfg_cntr_kh(cfg_cntr_kh), + .cfg_cntr_w(cfg_cntr_w), + .cfg_cntr_h(cfg_cntr_h), + .cfg_incr_head_simd(cfg_incr_head_simd), + .cfg_incr_head_kw(cfg_incr_head_kw), + .cfg_incr_head_kh(cfg_incr_head_kh), + .cfg_incr_head_w(cfg_incr_head_w), + .cfg_incr_head_h(cfg_incr_head_h), + .cfg_incr_tail_w(cfg_incr_tail_w), + .cfg_incr_tail_h(cfg_incr_tail_h), + .cfg_incr_tail_last(cfg_incr_tail_last), + .cfg_last_read(cfg_last_read), + .cfg_last_write(cfg_last_write) +); + +endmodule //TOP_MODULE_NAME diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 366dd396d1..fab0ce3871 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -453,7 +453,9 @@ def prepare_codegen_default(self): # Default implementation style for MMV_out = 1: addressable cyclic buffer # Computing incremental addressing scheme directly.. template_path = ( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_default.sv" + os.environ["FINN_ROOT"] + + "/finn-rtllib/swg/swg_template_default_dynamic.sv" + # TODO: add switch ) code_gen_dict = {} @@ -585,11 +587,24 @@ def prepare_codegen_default(self): code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_SIMD"] loop_simd_iterations -= 1 # -1 because state is initial state - code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 1)] - code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 1)] - code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 1)] - code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 1)] - code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 1)] + code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 2)] + code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 2)] + code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 2)] + code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 2)] + code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 2)] + + cntr_bitwidth = math.ceil( + math.log2( + max( + loop_h_iterations - 2 + 1, + loop_w_iterations - 2 + 1, + loop_kh_iterations - 2 + 1, + loop_kw_iterations - 2 + 1, + loop_simd_iterations - 2 + 1, + ) + ) + ) + code_gen_dict["$CNTR_BITWIDTH$"] = [str(cntr_bitwidth)] incr_bitwidth = 1 + math.ceil( math.log2( @@ -621,6 +636,11 @@ def prepare_codegen_default(self): abs(addr_incr_end_row), ) ] + code_gen_dict["$INCR_HEAD_SIMD$"] = [str(addr_incr_end_simd)] + code_gen_dict["$INCR_HEAD_KW$"] = [str(addr_incr_end_window_elem)] + code_gen_dict["$INCR_HEAD_KH$"] = [str(addr_incr_end_window_row)] + code_gen_dict["$INCR_HEAD_W$"] = [str(addr_incr_end_window)] + code_gen_dict["$INCR_HEAD_H$"] = [str(addr_incr_end_row)] code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] code_gen_dict["$SIMD$"] = [str(simd)] @@ -700,14 +720,21 @@ def generate_hdl(self): with open(template_path, "r") as f: template = f.read() with open( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_wrapper.v", "r" + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_wrapper_dynamic.v", + "r" + # TODO: add switch ) as f: template_wrapper = f.read() + with open( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_axilite.v", "r" + ) as f: + template_axilite = f.read() for key in code_gen_dict: # transform list into long string separated by '\n' code_gen_line = "\n".join(code_gen_dict[key]) template = template.replace(key, code_gen_line) template_wrapper = template_wrapper.replace(key, code_gen_line) + template_axilite = template_axilite.replace(key, code_gen_line) with open( os.path.join( code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" @@ -722,6 +749,13 @@ def generate_hdl(self): "w", ) as f: f.write(template_wrapper) + with open( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" + ), + "w", + ) as f: + f.write(template_axilite) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain @@ -742,6 +776,7 @@ def prepare_rtlsim(self): verilog_files = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", + self.get_nodeattr("gen_top_module") + "_axilite.v", ] # build the Verilator emu library @@ -773,12 +808,87 @@ def code_generation_ipi(self): code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" ) ), + "add_files -norecurse %s" + % ( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" + ) + ), "create_bd_cell -type module -reference %s %s" % (self.get_nodeattr("gen_top_module"), self.onnx_node.name), ] return cmd + def get_verilog_top_module_intf_names(self): + # Overload default HLSCustomOp implementation to add axilite control IF + """Return a dict of names of input and output interfaces. + The keys reflect the protocols each interface implements: + 'clk', 'rst', 'm_axis', 's_axis', 'aximm', 'axilite'. + Values are lists of tuples (axis, aximm) or names (axilite): + 'axis' tuples correspond to the list of node inputs in order, + each tuple is (interface_name, interface_width_bits). + axilite always assumed to be 32 bits and is not tuple (name only). + Each block must have at most one aximm and one axilite.""" + intf_names = {} + intf_names["clk"] = ["ap_clk"] + intf_names["rst"] = ["ap_rst_n"] + sname = self.hls_sname() + intf_names["s_axis"] = [("in0_" + sname, self.get_instream_width_padded())] + intf_names["m_axis"] = [("out_" + sname, self.get_outstream_width_padded())] + intf_names["aximm"] = [] + intf_names["axilite"] = ["s_axi_cfg"] + return intf_names + + def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): + """Returns a configuration dict to re-configure FM dimension during + runtime. Stride and dilation can also be changed. Certain restrictions + apply (e.g. component must be synthesized for largest buffer size).""" + # TODO: Make a standalone version to call from Python driver? + # TODO: Add more safeguards + + k = self.get_nodeattr("ConvKernelDim") + if stride is None: + stride = self.get_nodeattr("Stride") + if dilation is None: + dilation = self.get_nodeattr("Dilation") + + k_h, k_w = k + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ifm_dim_h, ifm_dim_w = ifm_dim + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) + ofm_dim = [ofm_dim_h, ofm_dim_w] + + # update attributes + self.set_nodeattr("IFMDim", ifm_dim) + self.set_nodeattr("OFMDim", ofm_dim) + self.set_nodeattr("Stride", stride) + self.set_nodeattr("Dilation", dilation) + + # (re-)call codegen and extract new values + # each setting is mapped to an axi-lite register address + template_path, code_gen_dict = self.prepare_codegen_default() + config = { + "cfg_cntr_simd": (1 * 4, int(code_gen_dict["$LOOP_SIMD_ITERATIONS$"][0])), + "cfg_cntr_kw": (2 * 4, int(code_gen_dict["$LOOP_KW_ITERATIONS$"][0])), + "cfg_cntr_kh": (3 * 4, int(code_gen_dict["$LOOP_KH_ITERATIONS$"][0])), + "cfg_cntr_w": (4 * 4, int(code_gen_dict["$LOOP_W_ITERATIONS$"][0])), + "cfg_cntr_h": (5 * 4, int(code_gen_dict["$LOOP_H_ITERATIONS$"][0])), + "cfg_incr_head_simd": (6 * 4, int(code_gen_dict["$INCR_HEAD_SIMD$"][0])), + "cfg_incr_head_kw": (7 * 4, int(code_gen_dict["$INCR_HEAD_KW$"][0])), + "cfg_incr_head_kh": (8 * 4, int(code_gen_dict["$INCR_HEAD_KH$"][0])), + "cfg_incr_head_w": (9 * 4, int(code_gen_dict["$INCR_HEAD_W$"][0])), + "cfg_incr_head_h": (10 * 4, int(code_gen_dict["$INCR_HEAD_H$"][0])), + "cfg_incr_tail_w": (11 * 4, int(code_gen_dict["$TAIL_INCR_W$"][0])), + "cfg_incr_tail_h": (12 * 4, int(code_gen_dict["$TAIL_INCR_H$"][0])), + "cfg_incr_tail_last": (13 * 4, int(code_gen_dict["$TAIL_INCR_LAST$"][0])), + "cfg_last_read": (14 * 4, int(code_gen_dict["$LAST_READ_ELEM$"][0])), + "cfg_last_write": (15 * 4, int(code_gen_dict["$LAST_WRITE_ELEM$"][0])), + } + return config + def code_generation_ipgen(self, model, fpgapart, clk): """Normally: Generates C++ code and tcl script for IP generation. Here: Generates (System-)Verilog code for IP generation.""" diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py new file mode 100644 index 0000000000..9120aa91d0 --- /dev/null +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -0,0 +1,322 @@ +# Copyright (c) 2022, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +from onnx import TensorProto, helper +from pyverilator.util.axi_utils import axilite_write, reset_rtlsim +from qonnx.core.datatype import DataType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.general.im2col import compute_conv_output_dim +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.util.basic import gen_finn_dt_tensor + +import finn.core.onnx_exec as oxe +from finn.core.rtlsim_exec import rtlsim_exec +from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP +from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO +from finn.transformation.fpgadataflow.prepare_ip import PrepareIP + + +def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + im2col_node = helper.make_node( + "Im2Col", + ["inp"], + ["outp"], + domain="finn.custom_op.general", + stride=[stride_h, stride_w], + kernel_size=[k_h, k_w], + input_shape=str((1, ifm_dim_h, ifm_dim_w, ifm_ch)), + dilations=[dilation_h, dilation_w], + pad_amount=[0, 0, 0, 0], + pad_value=0, + ) + graph = helper.make_graph( + nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] + ) + + model = helper.make_model(graph, producer_name="im2col-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + return model + + +def make_single_slidingwindow_modelwrapper( + k, ifm_ch, ifm_dim, ofm_dim, simd, m, parallel_window, stride, dilation, idt, dw=0 +): + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h, ofm_dim_w = ofm_dim + + odt = idt + inp = helper.make_tensor_value_info( + "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] + ) + outp = helper.make_tensor_value_info( + "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] + ) + + SlidingWindow_node = helper.make_node( + "ConvolutionInputGenerator_rtl", + ["inp"], + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + ConvKernelDim=[k_h, k_w], + IFMChannels=ifm_ch, + IFMDim=[ifm_dim_h, ifm_dim_w], + OFMDim=[ofm_dim_h, ofm_dim_w], + SIMD=simd, + M=m, + parallel_window=parallel_window, + Stride=[stride_h, stride_w], + Dilation=[dilation_h, dilation_w], + inputDataType=idt.name, + outputDataType=odt.name, + depthwise=dw, + ) + graph = helper.make_graph( + nodes=[SlidingWindow_node], + name="slidingwindow_graph", + inputs=[inp], + outputs=[outp], + ) + + model = helper.make_model(graph, producer_name="slidingwindow-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", odt) + + return model + + +def prepare_inputs(input_tensor): + return {"inp": input_tensor} + + +# input datatype +@pytest.mark.parametrize("idt", [DataType["UINT4"]]) +# kernel size +@pytest.mark.parametrize("k", [[3, 3]]) +# input dimension +@pytest.mark.parametrize("ifm_dim_series", [[[32, 32], [16, 16], [8, 8]]]) +# input channels +@pytest.mark.parametrize("ifm_ch", [6]) +# Stride +@pytest.mark.parametrize("stride", [[1, 1]]) +# Dilation +@pytest.mark.parametrize("dilation", [[1, 1]]) +# depthwise +@pytest.mark.parametrize("dw", [0, 1]) +# input channel parallelism ("SIMD") +@pytest.mark.parametrize("simd", [2, 6]) +# parallel_window enable (MMV_out = M*K) +@pytest.mark.parametrize("parallel_window", [0]) +# in/out MMV ("M") +@pytest.mark.parametrize("m", [1]) +@pytest.mark.slow +@pytest.mark.vivado +def test_fpgadataflow_slidingwindow_rtl_dynamic( + idt, k, ifm_dim_series, ifm_ch, stride, dilation, dw, simd, m, parallel_window +): + # Begin test by generating RTL SWG normally for the first FM of the series. + # The following FM dimensions must be equal or smaller than the initial + # dimensions (in terms of required buffer depth). + ifm_dim = ifm_dim_series[0] + + k_h, k_w = k + ifm_dim_h, ifm_dim_w = ifm_dim + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) + ofm_dim = [ofm_dim_h, ofm_dim_w] + kernel_width = (k_w - 1) * dilation_w + 1 # incl. dilation + kernel_height = (k_h - 1) * dilation_h + 1 # incl. dilation + + if simd > ifm_ch: + pytest.skip("SIMD cannot be larger than number of input channels") + if ifm_ch % simd != 0: + pytest.skip("SIMD must divide number of input channels") + if kernel_height > ifm_dim_h or stride_h > ifm_dim_h: + pytest.skip( + "Illegal convolution configuration: kernel or stride > FM dimension" + ) + if kernel_width > ifm_dim_w or stride_w > ifm_dim_w: + pytest.skip( + "Illegal convolution configuration: kernel or stride > FM dimension" + ) + if (k_h == 1 and (stride_h != 1 or dilation_h != 1)) or ( + k_w == 1 and (stride_w != 1 or dilation_w != 1) + ): + pytest.skip( + """Illegal convolution configuration: + stride or dilation defined for unitary kernel dim""" + ) + if k_h == 1 and k_w == 1 and simd != ifm_ch: + pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") + if parallel_window and simd != ifm_ch: + pytest.skip("Parallel window requires SIMD=C") + + model = make_single_slidingwindow_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + simd=simd, + m=m, + parallel_window=parallel_window, + stride=stride, + dilation=dilation, + idt=idt, + dw=dw, + ) + + # Simulate using stitched-ip-rtlsim so we can use existing infrastructure + # that supports hook functions to re-program configuration before rtlsim + model = model.transform(InsertFIFO(True)) # required for proper simulation + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(CreateStitchedIP("xc7z020clg400-1", 5)) + model.set_metadata_prop("exec_mode", "rtlsim") + + # Helper function that delivers the hook to program the SWG via AXI-Lite + def config_hook(config): + if config is None: + return None + + def write_swg_config(sim): + axi_name = "s_axi_cfg_0_" + # Write config registers to the SWG, dict defines (addr, value) tuples + for config_entry in config.values(): + axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) + axilite_write( + sim, 0, 1, basename=axi_name + ) # 1. set cfg_valid flag (>= 1 cycle) + reset_rtlsim(sim) # 2. reset SWG (>= 1 cycle) + axilite_write( + sim, 0, 0, basename=axi_name + ) # 3. unset cfg_valid flag (not required) + + return write_swg_config + + # Helper function to update tensor dimensions manually because shape inference + # does not work on FINN nodes (they assume well-defined tensor shapes). + def update_tensor_dim(model, tensor_name, new_hw): + shape = model.get_tensor_shape(tensor_name) + shape[1] = new_hw[0] + shape[2] = new_hw[1] + model.set_tensor_shape(tensor_name, shape) + + # Simulate 1 FM for each dimension in the series + for i, ifm_dim in enumerate(ifm_dim_series): + ifm_dim_h, ifm_dim_w = ifm_dim + ofm_dim_h = compute_conv_output_dim(ifm_dim_h, k_h, stride_h, 0, dilation_h) + ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) + ofm_dim = [ofm_dim_h, ofm_dim_w] + + config = None + if i > 0: # skip re-programming for initial FM dimension + # Necessary update of node and tensor attributes to make rtlsim work: + swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] + swg_inst = getCustomOp(swg_node) + update_tensor_dim(model, swg_node.input[0], ifm_dim) + update_tensor_dim(model, swg_node.output[0], ofm_dim) + + # Generate config, also overwrites IFMDim/OFMDim attributes: + config = swg_inst.get_dynamic_config(ifm_dim) + + # Also update FIFO nodes and corresponding tensors + fifo_node = model.get_nodes_by_op_type("StreamingFIFO")[0] + fifo_inst = getCustomOp(fifo_node) + shape = fifo_inst.get_nodeattr("folded_shape") + shape[1] = ifm_dim_h + shape[2] = ifm_dim_w + fifo_inst.set_nodeattr("folded_shape", shape) + update_tensor_dim(model, fifo_node.input[0], ifm_dim) + + fifo_node = model.get_nodes_by_op_type("StreamingFIFO")[1] + fifo_inst = getCustomOp(fifo_node) + shape = fifo_inst.get_nodeattr("folded_shape") + shape[1] = ofm_dim_h + shape[2] = ofm_dim_w + fifo_inst.set_nodeattr("folded_shape", shape) + update_tensor_dim(model, fifo_node.output[0], ofm_dim) + + # Run rtlsim on stitched-ip + x = gen_finn_dt_tensor(idt, (1, ifm_dim_h, ifm_dim_w, ifm_ch)) + context = prepare_inputs(x) + rtlsim_exec(model, context, pre_hook=config_hook(config)) + y_produced = context["outp"] + + # Generate golden result + golden = make_single_im2col_modelwrapper( + k=k, + ifm_ch=ifm_ch, + ifm_dim=ifm_dim, + ofm_dim=ofm_dim, + stride=stride, + dilation=dilation, + idt=idt, + ) + input_dict = prepare_inputs(x) + y_expected = oxe.execute_onnx(golden, input_dict)["outp"] + + # Check result + if dw == 0: + assert (y_produced == y_expected).all() + else: + y_expected = y_expected.reshape( + 1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd + ) + y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) + y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) + assert (y_produced == y_expected).all() From 394d1fa2c02a71a8a5ce5e6a5c4cff3c16c3a3a6 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 09:50:30 +0200 Subject: [PATCH 171/628] [Refactor] deprecate singuler in/outFIFODepth: in/outFIFODepths only --- src/finn/builder/build_dataflow_config.py | 2 +- .../fpgadataflow/channelwise_op_batch.py | 4 +-- .../custom_op/fpgadataflow/hlscustomop.py | 7 ++--- .../fpgadataflow/matrixvectoractivation.py | 7 +++-- .../fpgadataflow/thresholding_batch.py | 4 +-- .../fpgadataflow/vectorvectoractivation.py | 7 +++-- .../fpgadataflow/convert_to_hls_layers.py | 1 + .../fpgadataflow/insert_fifo.py | 30 ++++++------------- .../fpgadataflow/set_fifo_depths.py | 26 +++++++++++----- tests/end2end/test_end2end_bnn_pynq.py | 4 +-- 10 files changed, 47 insertions(+), 45 deletions(-) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 13946c9d1e..51e7516101 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -257,7 +257,7 @@ class DataflowBuildConfig: #: setting the FIFO sizes. auto_fifo_strategy: Optional[ AutoFIFOSizingMethod - ] = AutoFIFOSizingMethod.CHARACTERIZE + ] = AutoFIFOSizingMethod.LARGEFIFO_RTLSIM #: Memory resource type for large FIFOs #: Only relevant when `auto_fifo_depths = True` diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 3ed76db298..9d08a24328 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -103,8 +103,8 @@ def get_nodeattr_types(self): "paramDataType": ("s", True, ""), "outputDataType": ("s", True, ""), # input and output FIFO depths - "inFIFODepth": ("i", False, 0), - "outFIFODepth": ("i", False, 0), + "inFIFODepths": ("ints", False, [0]), + "outFIFODepths": ("ints", False, [0]), # number of input vectors, examples: # [1] is a single vector (like a FC layer with batch=1) # [4] is four vectors (like a FC layer with batch=4) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 79ae695756..7d322dc372 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -108,12 +108,9 @@ def get_nodeattr_types(self): # ID of FPGA device to which this Op is allocated, in # a multi-FPGA setting "device_id": ("i", False, 0), - # input and output FIFO depths - "inFIFODepth": ("i", False, 2), - "outFIFODepth": ("i", False, 2), # input and output FIFO depths for multi-I/O nodes - "inFIFODepths": ("ints", False, []), - "outFIFODepths": ("ints", False, []), + "inFIFODepths": ("ints", False, [2]), + "outFIFODepths": ("ints", False, [2]), "output_hook": ("s", False, ""), # accumulated characteristic function over two periods "io_chrc_in": ("t", False, np.asarray([], dtype=np.int32)), diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index d5dbc86c4e..e78a918e81 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1227,8 +1227,11 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() ) - in_fifo_depth = self.get_nodeattr("inFIFODepth") - out_fifo_depth = self.get_nodeattr("outFIFODepth") + # TODO can we deprecate this entirely? this looks like legacy code + # that does not really serve a purpose - FIFO sizes are not typically + # allocated at this point; at best they are set to 2 as the default + in_fifo_depth = 2 + out_fifo_depth = 2 # insert depth pragmas only if specified if in_fifo_depth != 0: self.code_gen_dict["$PRAGMAS$"].append( diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 828ddd9737..110e456cbd 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -76,8 +76,8 @@ def get_nodeattr_types(self): "weightDataType": ("s", True, ""), "outputDataType": ("s", True, ""), # input and output FIFO depths - "inFIFODepth": ("i", False, 0), - "outFIFODepth": ("i", False, 0), + "inFIFODepths": ("ints", False, [0]), + "outFIFODepths": ("ints", False, [0]), # number of input vectors, examples: # [1] is a single vector (like a FC layer with batch=1) # [4] is four vectors (like a FC layer with batch=4) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index abcb1c756d..6391f27bbb 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -578,8 +578,11 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() ) - in_fifo_depth = self.get_nodeattr("inFIFODepth") - out_fifo_depth = self.get_nodeattr("outFIFODepth") + # TODO can we deprecate this entirely? this looks like legacy code + # that does not really serve a purpose - FIFO sizes are not typically + # allocated at this point; at best they are set to 2 as the default + in_fifo_depth = 2 + out_fifo_depth = 2 # insert depth pragmas only if specified if in_fifo_depth != 0: self.code_gen_dict["$PRAGMAS$"].append( diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 429bc34ffc..753dbb0f87 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1234,6 +1234,7 @@ def apply(self, model): inputDataType=dt.name, numInputVectors=vecs, NumOutputStreams=n_outputs, + outFIFODepths=[2] * n_outputs, name="DuplicateStreams_Batch_" + node.name, ) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index b9222cf3ee..e75d1880cb 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -138,14 +138,8 @@ def apply(self, model): # check if outFIFOdepth attribute of first node # and inFIFOdepth attribute of consumer node is equal - if idx_out == 0: - n0_depth = n0.get_nodeattr("outFIFODepth") - else: - n0_depth = n0.get_nodeattr("outFIFODepths")[idx_out] - if idx_inp == 0: - n1_depth = n1.get_nodeattr("inFIFODepth") - else: - n1_depth = n1.get_nodeattr("inFIFODepths")[idx_inp] + n0_depth = n0.get_nodeattr("outFIFODepths")[idx_out] + n1_depth = n1.get_nodeattr("inFIFODepths")[idx_inp] if n0_depth == n1_depth: fifo_depth = n0_depth @@ -187,18 +181,12 @@ def apply(self, model): if inp == output_name: consumer.input[idx] = fifo_output_tensor.name # ensure created FIFO depth is reflected on both sides - if idx_out == 0: - n0.set_nodeattr("outFIFODepth", fifo_depth) - else: - odepths = n0.get_nodeattr("outFIFODepths") - odepths[idx_out] = fifo_depth - n0.set_nodeattr("outFIFODepths", odepths) - if idx_inp == 0: - n1.set_nodeattr("inFIFODepth", fifo_depth) - else: - idepths = n1.get_nodeattr("inFIFODepths") - idepths[idx_inp] = fifo_depth - n1.set_nodeattr("inFIFODepths", idepths) + odepths = n0.get_nodeattr("outFIFODepths") + odepths[idx_out] = fifo_depth + n0.set_nodeattr("outFIFODepths", odepths) + idepths = n1.get_nodeattr("inFIFODepths") + idepths[idx_inp] = fifo_depth + n1.set_nodeattr("inFIFODepths", idepths) graph_modified = True @@ -221,7 +209,7 @@ def apply(self, model): else: fld_shape = n0.get_folded_input_shape(inp_ind) dtype = n0.get_input_datatype(inp_ind) - fifo_depth = n0.get_nodeattr("inFIFODepth") + fifo_depth = n0.get_nodeattr("inFIFODepths")[inp_ind] if fifo_depth <= 2: warnings.warn("Overriding input FIFO depth to 32") diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 90ea853b60..f715aaeffb 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -250,14 +250,21 @@ def apply(self, model): ) assert node.op_type != "StreamingFIFO", "Found existing StreamingFIFO node" node = getCustomOp(node) + ifd = node.get_nodeattr("inFIFODepths") + ofd = node.get_nodeattr("outFIFODepths") if self.max_depth is not None: - node.set_nodeattr("inFIFODepth", self.max_depth) - node.set_nodeattr("outFIFODepth", self.max_depth) + ifd = [self.max_depth] * len(ifd) + ofd = [self.max_depth] * len(ofd) else: - i_depth = np.prod(node.get_folded_input_shape()[:-1]) - o_depth = np.prod(node.get_folded_output_shape()[:-1]) - node.set_nodeattr("inFIFODepth", i_depth) - node.set_nodeattr("outFIFODepth", o_depth) + # set each FIFO to its tensor size + # (except stream width hence the :-1) + for i in range(len(ifd)): + ifd[i] = np.prod(node.get_folded_input_shape(i)[:-1]) + for o in range(len(ofd)): + ofd[o] = np.prod(node.get_folded_output_shape(o)[:-1]) + node.set_nodeattr("inFIFODepths", ifd) + node.set_nodeattr("outFIFODepths", ofd) + if node.onnx_node.op_type in extw_optypes: mmode = node.get_nodeattr("mem_mode") if mmode == "external": @@ -380,8 +387,11 @@ def apply(self, model): reset_implementation(node_inst) del fifos[node.name] else: - getCustomOp(node).set_nodeattr("inFIFODepth", 0) - getCustomOp(node).set_nodeattr("outFIFODepth", 0) + inst = getCustomOp(node) + ifd = inst.get_nodeattr("inFIFODepths") + ofd = inst.get_nodeattr("outFIFODepths") + inst.set_nodeattr("inFIFODepths", [0] * len(ifd)) + inst.set_nodeattr("outFIFODepths", [0] * len(ofd)) # for every extw node we changed from external to decoupled, # change back and reset implementation if node.op_type in extw_optypes: diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 103f18b514..5f787d1f88 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -569,8 +569,8 @@ def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): for node in hls_layers: if node.op_type != "StreamingFIFO": op_inst = getCustomOp(node) - assert op_inst.get_nodeattr("inFIFODepth") == 0 - assert op_inst.get_nodeattr("outFIFODepth") == 0 + assert op_inst.get_nodeattr("inFIFODepths") == [0] + assert op_inst.get_nodeattr("outFIFODepths") == [0] model.save( get_checkpoint_name( topology, wbits, abits, QONNX_export, "fifodepth_" + kind From ab8fe1e200a96e16d50fc8cfe9f214966d92cb98 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 09:58:44 +0200 Subject: [PATCH 172/628] [Refactor] remove remaining outFIFODepth refs --- .../transformation/fpgadataflow/derive_characteristic.py | 5 +---- src/finn/transformation/fpgadataflow/insert_fifo.py | 7 ++++--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index c171fce3cd..849ecea1e9 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -149,7 +149,7 @@ def applyNodeLocal(self, node): assert ( len(prod_chrc) == 2 * period ), "Found unexpected characterization attribute" - if prod.get_nodeattr("outFIFODepth") > 2: + if any({[x > 2 for x in prod.get_nodeattr("outFIFODepths")]}): # FIFO depth already set, can skip this node return (node, False) @@ -180,9 +180,6 @@ def applyNodeLocal(self, node): # set output FIFO depth for this (producing) node # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) # for each tensor - if len(out_fifo_depths) > 0: - prod.set_nodeattr("outFIFODepth", out_fifo_depths[0]) - # used only for multi-producer nodes prod.set_nodeattr("outFIFODepths", out_fifo_depths) except KeyError: diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index e75d1880cb..b3c83d96e8 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -256,10 +256,11 @@ def apply(self, model): ), """Insert tlast marker should be done after inserting the FIFOs""" n0 = getCustomOp(final_node) + out_ind = list(final_node.output).index(graph_out_name) # determine fifo node attributes - fld_shape = n0.get_folded_output_shape() - dtype = n0.get_output_datatype() - fifo_depth = n0.get_nodeattr("outFIFODepth") + fld_shape = n0.get_folded_output_shape(out_ind) + dtype = n0.get_output_datatype(out_ind) + fifo_depth = n0.get_nodeattr("outFIFODepths")[out_ind] if fifo_depth <= 2: warnings.warn("Overriding output FIFO depth to 32") From 06f9f81aab6825972f330dc11076e6807456a176 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 11:13:15 +0200 Subject: [PATCH 173/628] [Refactor] typo fix in DeriveFIFOSizes --- src/finn/transformation/fpgadataflow/derive_characteristic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 849ecea1e9..8226797210 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -149,7 +149,7 @@ def applyNodeLocal(self, node): assert ( len(prod_chrc) == 2 * period ), "Found unexpected characterization attribute" - if any({[x > 2 for x in prod.get_nodeattr("outFIFODepths")]}): + if any([x > 2 for x in prod.get_nodeattr("outFIFODepths")]): # FIFO depth already set, can skip this node return (node, False) From 3f2d6d97aa15a9c33057cb5ab20f39aa1c3a0dd5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 12:02:24 +0200 Subject: [PATCH 174/628] [Refactor] add optional ind=0 argument to HLSCustomOp stream properties --- .../fpgadataflow/addstreams_batch.py | 12 ++++---- .../fpgadataflow/channelwise_op_batch.py | 16 +++++------ src/finn/custom_op/fpgadataflow/checksum.py | 14 +++++----- src/finn/custom_op/fpgadataflow/concat.py | 8 +++--- .../fpgadataflow/convolutioninputgenerator.py | 14 +++++----- .../convolutioninputgenerator1d.py | 16 +++++------ .../custom_op/fpgadataflow/downsampler.py | 16 +++++------ .../fpgadataflow/duplicatestreams_batch.py | 12 ++++---- .../custom_op/fpgadataflow/fmpadding_batch.py | 14 +++++----- .../fpgadataflow/globalaccpool_batch.py | 16 +++++------ .../custom_op/fpgadataflow/hlscustomop.py | 28 ++++++++++++------- src/finn/custom_op/fpgadataflow/iodma.py | 14 +++++----- .../fpgadataflow/labelselect_batch.py | 16 +++++------ src/finn/custom_op/fpgadataflow/lookup.py | 16 +++++------ .../fpgadataflow/matrixvectoractivation.py | 12 ++++---- src/finn/custom_op/fpgadataflow/pool_batch.py | 16 +++++------ .../streamingdatawidthconverter_batch.py | 16 +++++------ .../custom_op/fpgadataflow/streamingfifo.py | 12 ++++---- .../fpgadataflow/streamingmaxpool_batch.py | 16 +++++------ .../fpgadataflow/thresholding_batch.py | 16 +++++------ .../custom_op/fpgadataflow/tlastmarker.py | 8 +++--- src/finn/custom_op/fpgadataflow/upsampler.py | 16 +++++------ .../fpgadataflow/vectorvectoractivation.py | 16 +++++------ 23 files changed, 174 insertions(+), 166 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 1190ad0646..cd0af6b3ab 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -73,10 +73,10 @@ def get_folded_input_shape(self, ind=0): ishape = tuple(vecs + [ich // pe, pe]) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): return self.get_normal_input_shape() - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_folded_input_shape() def make_shape_compatible_op(self, model): @@ -127,11 +127,11 @@ def verify_node(self): return info_messages - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" # we need to set output datatype to the next larger int or uint # enhancement: consider specifying w/ explicit outputDataType attribute @@ -142,14 +142,14 @@ def get_output_datatype(self): else: return DataType.get_smallest_possible(2 * idt.max()) - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns input stream width.""" ibits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") in_width = pe * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width.""" obits = self.get_output_datatype().bitwidth() pe = self.get_nodeattr("PE") diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 9d08a24328..f2d9f1aeb2 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -221,23 +221,23 @@ def lut_estimation(self): # total cost return comparator_cost + lutram_cost - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() return i_bits * self.get_nodeattr("PE") - def get_outstream_width(self): + def get_outstream_width(self, ind=0): o_bits = self.get_output_datatype().bitwidth() return o_bits * self.get_nodeattr("PE") - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ich = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") fold = ich // pe @@ -245,17 +245,17 @@ def get_folded_input_shape(self): folded_input_shape = tuple(vecs + [fold, pe]) return folded_input_shape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): # same shape as input return self.get_folded_input_shape() - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ich = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) normal_input_shape = tuple(vecs + [ich]) return normal_input_shape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): # same shape as input return self.get_normal_input_shape() diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index bde285eb0d..21a09a1c5e 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -77,31 +77,31 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" # here same as input data type return DataType[self.get_nodeattr("inputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): dtype = DataType[self.get_nodeattr("inputDataType")] folded_shape = self.get_nodeattr("folded_shape") in_width = folded_shape[-1] * dtype.bitwidth() return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): return self.get_instream_width() - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): return self.get_nodeattr("folded_shape") def get_folded_output_shape(self): return self.get_nodeattr("folded_shape") - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): # derive normal shape from folded shape # checksum nodes are inserted in between fpgadataflow nodes # the folded shape could be for example (1, nf, pe) @@ -127,7 +127,7 @@ def get_normal_input_shape(self): def get_ap_int_max_w(self): return max(super().get_ap_int_max_w(), 32) - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): # same shape as input return self.get_normal_input_shape() diff --git a/src/finn/custom_op/fpgadataflow/concat.py b/src/finn/custom_op/fpgadataflow/concat.py index 5fcf9cf96c..4437bcd198 100644 --- a/src/finn/custom_op/fpgadataflow/concat.py +++ b/src/finn/custom_op/fpgadataflow/concat.py @@ -74,12 +74,12 @@ def get_normal_input_shape(self, ind=0): def get_folded_input_shape(self, ind=0): return self.get_normal_input_shape(ind) - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): total_elems = self.get_total_elems() vecs = list(self.get_nodeattr("numInputVectors")) return tuple(vecs + [total_elems]) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_normal_output_shape() def make_shape_compatible_op(self, model): @@ -106,7 +106,7 @@ def get_input_datatype(self, ind=0): # input dt identical for all inputs return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): return self.get_input_datatype() def get_instream_width(self, ind=0): @@ -115,7 +115,7 @@ def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() return elems * ibits - def get_outstream_width(self): + def get_outstream_width(self, ind=0): obits = self.get_output_datatype().bitwidth() total_elems = self.get_total_elems() out_width = total_elems * obits diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 251a9882c5..6f039f7d67 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -99,13 +99,13 @@ def get_nodeattr(self, name): assert ret[0] == ret[1] == 1, "Only dilation=1 supported" return ret - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") @@ -114,7 +114,7 @@ def get_folded_input_shape(self): folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -126,7 +126,7 @@ def get_normal_output_shape(self): oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -158,11 +158,11 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] @@ -176,7 +176,7 @@ def get_instream_width(self): in_width = simd * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns stream width, input and output stream width are equal for the sliding window function, so the function to determine the input stream width can be reused.""" diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index aba74baecc..f1c84662cc 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -91,13 +91,13 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") @@ -106,7 +106,7 @@ def get_folded_input_shape(self): folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -118,7 +118,7 @@ def get_normal_output_shape(self): oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -153,15 +153,15 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") @@ -169,7 +169,7 @@ def get_instream_width(self): in_width = simd * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): if self.use_parallel_window_output(): # feed all window pixels in parallel k_h, k_w = self.get_nodeattr("ConvKernelDim") diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index da29a524b6..e5819cccdd 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -73,21 +73,21 @@ def get_exp_cycles(self): exp_cycles = channels / simd * batch_size * idim * idim return int(exp_cycles) - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): idim = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") ishape = (batch, idim, idim, num_ch) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): odim = self.get_downsampled_odim() num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") oshape = (batch, odim, odim, num_ch) return oshape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): normal_ishape = list(self.get_normal_input_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -96,7 +96,7 @@ def get_folded_input_shape(self): folded_ishape = normal_ishape[:-1] + [fold, simd] return tuple(folded_ishape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): normal_oshape = list(self.get_normal_output_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -129,21 +129,21 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" ret = DataType[self.get_nodeattr("inputDataType")] return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return ibits * simd - def get_outstream_width(self): + def get_outstream_width(self, ind=0): obits = self.get_output_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return obits * simd diff --git a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py index 7aee3a401e..93cde15ca7 100644 --- a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py @@ -61,13 +61,13 @@ def get_nodeattr_types(self): def get_num_output_streams(self): return self.get_nodeattr("NumOutputStreams") - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) ishape = tuple(vecs + [ch]) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") vecs = list(self.get_nodeattr("numInputVectors")) @@ -138,22 +138,22 @@ def verify_node(self): return info_messages - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("inputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns input stream width.""" ibits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") in_width = pe * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width.""" obits = self.get_output_datatype().bitwidth() pe = self.get_nodeattr("PE") diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index d69ea471ea..2034fb9381 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -90,20 +90,20 @@ def get_exp_cycles(self): exp_cycles = (channels / simd) * batch_size * odim_h * odim_w return int(exp_cycles) - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): idim_h, idim_w = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") ishape = (1, idim_h, idim_w, num_ch) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): odim_h, odim_w = self.get_padded_odim() num_ch = self.get_nodeattr("NumChannels") oshape = (1, odim_h, odim_w, num_ch) return oshape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): normal_ishape = list(self.get_normal_input_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -112,7 +112,7 @@ def get_folded_input_shape(self): folded_ishape = normal_ishape[:-1] + [fold, simd] return tuple(folded_ishape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): normal_oshape = list(self.get_normal_output_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -144,7 +144,7 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" ret = DataType[self.get_nodeattr("inputDataType")] # the hlslib op always pads with zeros, so ensure that the DataType @@ -156,12 +156,12 @@ def get_output_datatype(self): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return ibits * simd - def get_outstream_width(self): + def get_outstream_width(self, ind=0): obits = self.get_output_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return obits * simd diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index adafa7dcf3..e7fa5bc004 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -56,13 +56,13 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) ishape = tuple(vecs + [ch]) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") vecs = list(self.get_nodeattr("numInputVectors")) @@ -71,7 +71,7 @@ def get_folded_input_shape(self): folded_ishape = tuple(vecs + [folds, pe]) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) if len(vecs) == 1: @@ -80,7 +80,7 @@ def get_normal_output_shape(self): oshape = tuple([vecs[0]] + [1, 1, ch]) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): ch = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") unfolded_shape = list(self.get_normal_output_shape()) @@ -139,11 +139,11 @@ def verify_node(self): return info_messages - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" # determine data type from image size and input type idt = DataType[self.get_nodeattr("inputDataType")] @@ -155,14 +155,14 @@ def get_output_datatype(self): extreme_value = npixels * idt.max() return DataType.get_smallest_possible(extreme_value) - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns input stream width.""" ibits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") in_width = pe * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width.""" obits = self.get_output_datatype().bitwidth() pe = self.get_nodeattr("PE") diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index 7d322dc372..d6993206be 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -697,40 +697,48 @@ def pragmas(self): HLSCustomOp class but has to be filled by every node.""" pass - def get_normal_input_shape(self): + def get_input_datatype(self, ind=0): + """Returns FINN DataType of input stream ind.""" + raise Exception("get_input_datatype not implemented for this op") + + def get_output_datatype(self, ind=0): + """Returns FINN DataType of output stream ind.""" + raise Exception("get_output_datatype not implemented for this op") + + def get_normal_input_shape(self, ind=0): """Returns normal input shape if implemented.""" raise Exception("get_normal_input_shape not implemented for this op") - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): """Returns folded output shape if implemented.""" raise Exception("get_normal_output_shape not implemented for this op") - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): """Returns folded input shape (according to synapse folding), if implemented.""" raise Exception("get_folded_input_shape not implemented for this op") - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): """Returns folded output shape (according to neuron folding), if implemented.""" raise Exception("get_folded_output_shape not implemented for this op") - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns input stream width, if implemented.""" raise Exception("get_instream_width not implemented for this op") - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width, if implemented.""" raise Exception("get_outstream_width not implemented for this op") - def get_instream_width_padded(self): + def get_instream_width_padded(self, ind=0): """Returns input stream width padded to a multiple of 8. This is required by the AXI Stream spec.""" - in_width = self.get_instream_width() + in_width = self.get_instream_width(ind=ind) return roundup_to_integer_multiple(in_width, 8) - def get_outstream_width_padded(self): + def get_outstream_width_padded(self, ind=0): """Returns output stream width padded to a multiple of 8. This is required by the AXI Stream spec.""" - out_width = self.get_outstream_width() + out_width = self.get_outstream_width(ind=ind) return roundup_to_integer_multiple(out_width, 8) def get_ap_int_max_w(self): diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 33ee1d359c..a80eb29a6d 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -106,10 +106,10 @@ def get_normal_input_shape(self): ishape = tuple(vecs + [num_ch]) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): return self.get_normal_input_shape() - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): if self.get_nodeattr("direction") == "in": raise ValueError("Folded input shape not defined for input IODMA") else: @@ -126,7 +126,7 @@ def get_folded_input_shape(self): shape.append(elems_per_word) return tuple(shape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): if self.get_nodeattr("direction") == "out": raise ValueError("Folded output shape not defined for output IODMA") else: @@ -166,15 +166,15 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("dataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() - def get_instream_width(self): + def get_instream_width(self, ind=0): if self.get_nodeattr("direction") == "in": return self.get_nodeattr("intfWidth") elif self.get_nodeattr("direction") == "out": @@ -182,7 +182,7 @@ def get_instream_width(self): else: raise ValueError("Invalid IODMA direction, please set to in or out") - def get_outstream_width(self): + def get_outstream_width(self, ind=0): if self.get_nodeattr("direction") == "out": return self.get_nodeattr("intfWidth") elif self.get_nodeattr("direction") == "in": diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index 3e27ee0111..03f89bd7ec 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -70,13 +70,13 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): nlabels = self.get_nodeattr("Labels") vecs = list(self.get_nodeattr("numInputVectors")) ishape = tuple(vecs + [nlabels]) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): nlabels = self.get_nodeattr("Labels") pe = self.get_nodeattr("PE") vecs = list(self.get_nodeattr("numInputVectors")) @@ -85,13 +85,13 @@ def get_folded_input_shape(self): folded_ishape = tuple(vecs + [folds, pe]) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): k = self.get_nodeattr("K") vecs = list(self.get_nodeattr("numInputVectors")) oshape = tuple(vecs + [k]) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): k = self.get_nodeattr("K") vecs = list(self.get_nodeattr("numInputVectors")) oshape = tuple(vecs + [k, 1]) @@ -152,24 +152,24 @@ def verify_node(self): return info_messages - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" ret = DataType[self.get_nodeattr("inputDataType")] return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" ret = DataType[self.get_nodeattr("outputDataType")] return ret - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns input stream width.""" ibits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") in_width = pe * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width.""" return self.get_output_datatype().bitwidth() diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index 613a91b628..fd3e2b5b1c 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -75,21 +75,21 @@ def get_exp_cycles(self): exp_cycles = int(n_inputs) return exp_cycles - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): return self.get_nodeattr("InputShape") - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): ishape = self.get_normal_input_shape() emb_dim = self.get_nodeattr("EmbeddingDim") oshape = list(ishape) + [emb_dim] return tuple(oshape) - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ishape = self.get_normal_input_shape() folded_ishape = list(ishape) + [1] return tuple(folded_ishape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): ishape = self.get_normal_input_shape() mem_mode = self.get_nodeattr("mem_mode") emb_dim = self.get_nodeattr("EmbeddingDim") @@ -135,19 +135,19 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): ret = DataType[self.get_nodeattr("InputType")] return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): ret = DataType[self.get_nodeattr("EmbeddingType")] return ret - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() return ibits - def get_outstream_width(self): + def get_outstream_width(self, ind=0): folded_oshape = self.get_folded_output_shape() obits = self.get_output_datatype().bitwidth() return obits * folded_oshape[-1] diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index e78a918e81..69763fbea8 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -409,16 +409,16 @@ def get_weight_datatype(self): """Returns FINN DataType of weights.""" return DataType[self.get_nodeattr("weightDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() in_width = i_bits * self.get_nodeattr("SIMD") return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): o_bits = self.get_output_datatype().bitwidth() out_width = o_bits * self.get_nodeattr("PE") return out_width @@ -474,7 +474,7 @@ def get_folded_input_shape(self, ind=0): return folded_input_shape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): mh = self.get_nodeattr("MH") pe = self.get_nodeattr("PE") nf = mh // pe @@ -482,13 +482,13 @@ def get_folded_output_shape(self): folded_output_shape = tuple(vecs + [nf, pe]) return folded_output_shape - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): mw = self.get_nodeattr("MW") vecs = list(self.get_nodeattr("numInputVectors")) normal_input_shape = tuple(vecs + [mw]) return normal_input_shape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): mh = self.get_nodeattr("MH") vecs = list(self.get_nodeattr("numInputVectors")) normal_output_shape = tuple(vecs + [mh]) diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index 3bf187fa9a..91cd537bae 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -74,11 +74,11 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("InputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" fxn = self.get_nodeattr("Function") odt = DataType[self.get_nodeattr("OutputDataType")] @@ -98,7 +98,7 @@ def get_output_datatype(self): return odt - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ifm_ch = self.get_nodeattr("Channels") odims = self.get_nodeattr("OutImgDims") batch_size = self.get_nodeattr("BatchSize") @@ -107,7 +107,7 @@ def get_normal_input_shape(self): ishape = (batch_size, *odims, k_prod * ifm_ch) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): normal_ishape = list(self.get_normal_input_shape()) ifm_ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") @@ -116,14 +116,14 @@ def get_folded_input_shape(self): folded_ishape = normal_ishape[:-1] + [fold, pe] return tuple(folded_ishape) - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): ofm_ch = self.get_nodeattr("Channels") odims = self.get_nodeattr("OutImgDims") batch_size = self.get_nodeattr("BatchSize") oshape = (batch_size, *odims, ofm_ch) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): normal_oshape = list(self.get_normal_output_shape()) ifm_ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") @@ -147,13 +147,13 @@ def get_exp_cycles(self): exp_cycles = ((ifm_ch * k_prod) / pe) * np.prod(odims) * batch_size return int(exp_cycles) - def get_instream_width(self): + def get_instream_width(self, ind=0): dt_bits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") in_width = int(dt_bits * pe) return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): dt_bits = self.get_output_datatype().bitwidth() pe = self.get_nodeattr("PE") out_width = int(dt_bits * pe) diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 1e6b72e4d5..a3aa9d570d 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -60,19 +60,19 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("dataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("dataType")] - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ishape = self.get_nodeattr("shape") return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): oshape = self.get_nodeattr("shape") return oshape @@ -97,7 +97,7 @@ def check_divisible_iowidths(self): Please adjust PE and SIMD values so that OutWidth % InWidth = 0 or alternatively use impl_style = vivado""" - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): self.check_divisible_iowidths() iwidth = self.get_nodeattr("inWidth") ishape = self.get_normal_input_shape() @@ -117,7 +117,7 @@ def get_folded_input_shape(self): dummy_t = dummy_t.reshape(new_shape) return dummy_t.shape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): self.check_divisible_iowidths() owidth = self.get_nodeattr("outWidth") oshape = self.get_normal_output_shape() @@ -142,11 +142,11 @@ def get_number_output_values(self): folded_oshape = self.get_folded_output_shape() return np.prod(folded_oshape[:-1]) - def get_instream_width(self): + def get_instream_width(self, ind=0): in_width = self.get_nodeattr("inWidth") return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): out_width = self.get_nodeattr("outWidth") return out_width diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index f24cdcb932..d0accc2d36 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -206,7 +206,7 @@ def ipgen_singlenode_code(self): self.set_nodeattr("ip_vlnv", vlnv) self.code_gen_dict.clear() - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): depth = self.get_adjusted_depth() # depth has to be between 2 and 256 with the current # StreamingFIFO implementation @@ -237,22 +237,22 @@ def get_normal_input_shape(self): return normal_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): return self.get_normal_input_shape() - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): return self.get_nodeattr("folded_shape") - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_nodeattr("folded_shape") - def get_instream_width(self): + def get_instream_width(self, ind=0): dtype = DataType[self.get_nodeattr("dataType")] folded_shape = self.get_nodeattr("folded_shape") in_width = folded_shape[-1] * dtype.bitwidth() return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): dtype = DataType[self.get_nodeattr("dataType")] folded_shape = self.get_nodeattr("folded_shape") in_width = folded_shape[-1] * dtype.bitwidth() diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 882b40a0aa..a0e60931ed 100755 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -57,11 +57,11 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("dataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("dataType")] @@ -82,13 +82,13 @@ def is_1d(self): ifm_dim, k, ifm_ch = self.get_1d_attrs_normalized() return (ifm_dim[0] == 1) and (k[0] == 1) - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("ImgDim") ifm_ch = self.get_nodeattr("NumChannels") ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("ImgDim") ifm_ch = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") @@ -99,7 +99,7 @@ def get_folded_input_shape(self): folded_ishape = (1, ifm_dim_h, ifm_dim_w, 1, ifm_ch) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("ImgDim") k_h, k_w = tuple(self.get_nodeattr("PoolDim")) ifm_ch = self.get_nodeattr("NumChannels") @@ -116,7 +116,7 @@ def get_normal_output_shape(self): oshape = (1, ofm_dim_h, ofm_dim_w, ifm_ch) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): # even though there is no folding in the current hlslib op, # insert a time multiplexing axis to remain compatible with the # shapes produced by the rest of the dataflow pipeline @@ -155,7 +155,7 @@ def get_exp_cycles(self): # TODO: adjust inaccurate formula return int(ifm_dim[1] * ifm_dim[1] * (1 + 1 / (k[1] * k[1]))) - def get_instream_width(self): + def get_instream_width(self, ind=0): dt_bits = self.get_input_datatype().bitwidth() pe = self.get_nodeattr("PE") ifm_ch = self.get_nodeattr("NumChannels") @@ -165,7 +165,7 @@ def get_instream_width(self): in_width = int(dt_bits * ifm_ch) return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """For streaming maxpool out stream width is the same as in stream width""" return self.get_instream_width() diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 110e456cbd..62e51cc7bf 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -185,11 +185,11 @@ def lut_estimation(self): # total cost return comparator_cost + lutram_cost - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] @@ -221,11 +221,11 @@ def minimize_accumulator_width(self, model): self.set_nodeattr("weightDataType", tdt.name) return DataType[self.get_nodeattr("weightDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() return i_bits * self.get_nodeattr("PE") - def get_outstream_width(self): + def get_outstream_width(self, ind=0): o_bits = self.get_output_datatype().bitwidth() return o_bits * self.get_nodeattr("PE") @@ -251,7 +251,7 @@ def get_ap_int_max_w(self): weightstream = self.get_weightstream_width() return max([weightstream, temp_value]) - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ich = self.get_nodeattr("NumChannels") pe = self.get_nodeattr("PE") fold = ich // pe @@ -259,17 +259,17 @@ def get_folded_input_shape(self): folded_input_shape = tuple(vecs + [fold, pe]) return folded_input_shape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): # same shape as input return self.get_folded_input_shape() - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ich = self.get_nodeattr("NumChannels") vecs = list(self.get_nodeattr("numInputVectors")) normal_input_shape = tuple(vecs + [ich]) return normal_input_shape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): # same shape as input return self.get_normal_input_shape() diff --git a/src/finn/custom_op/fpgadataflow/tlastmarker.py b/src/finn/custom_op/fpgadataflow/tlastmarker.py index 7386aa7e63..1bd32442a1 100644 --- a/src/finn/custom_op/fpgadataflow/tlastmarker.py +++ b/src/finn/custom_op/fpgadataflow/tlastmarker.py @@ -218,21 +218,21 @@ def pragmas(self): def get_number_output_values(self): return self.get_nodeattr("NumIters") - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): stream_width = self.get_nodeattr("StreamWidth") elem_width = self.get_nodeattr("ElemWidth") n_packed_elems = stream_width // elem_width n_iters = self.get_nodeattr("NumIters") return (1, n_iters, n_packed_elems) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_folded_input_shape() - def get_instream_width(self): + def get_instream_width(self, ind=0): stream_width = self.get_nodeattr("StreamWidth") return stream_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): stream_width = self.get_nodeattr("StreamWidth") return stream_width diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index eb51fe39fc..a018fd35aa 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -73,7 +73,7 @@ def get_exp_cycles(self): exp_cycles = OFMDim * reps return int(exp_cycles) - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): IFMDim = self.get_nodeattr("IFMDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") @@ -84,7 +84,7 @@ def get_normal_input_shape(self): ishape = (batch, IFMDim, 1, num_ch) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): OFMDim = self.get_nodeattr("OFMDim") num_ch = self.get_nodeattr("NumChannels") batch = self.get_nodeattr("numInputVectors") @@ -95,11 +95,11 @@ def get_normal_output_shape(self): oshape = (batch, OFMDim, 1, num_ch) return oshape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): normal_ishape = list(self.get_normal_input_shape()) return tuple(normal_ishape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): normal_oshape = list(self.get_normal_output_shape()) return tuple(normal_oshape) @@ -129,21 +129,21 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" ret = DataType[self.get_nodeattr("inputDataType")] return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() ifm_ch = self.get_nodeattr("NumChannels") return ibits * ifm_ch - def get_outstream_width(self): + def get_outstream_width(self, ind=0): obits = self.get_output_datatype().bitwidth() ifm_ch = self.get_nodeattr("NumChannels") return obits * ifm_ch diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 6391f27bbb..f9d09907e0 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -176,7 +176,7 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] @@ -184,21 +184,21 @@ def get_weight_datatype(self): """Returns FINN DataType of weights.""" return DataType[self.get_nodeattr("weightDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() in_width = i_bits * self.get_nodeattr("PE") return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): o_bits = self.get_output_datatype().bitwidth() out_width = o_bits * self.get_nodeattr("PE") return out_width - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): k_h, k_w = self.get_nodeattr("Kernel") sf = k_h * k_w dim_h, dim_w = self.get_nodeattr("Dim") @@ -208,7 +208,7 @@ def get_folded_input_shape(self): folded_input_shape = tuple([1, dim_h, dim_w, sf * nf, pe]) return folded_input_shape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): ch = self.get_nodeattr("Channels") pe = self.get_nodeattr("PE") nf = ch // pe @@ -216,14 +216,14 @@ def get_folded_output_shape(self): folded_output_shape = tuple([1, dim_h, dim_w, nf, pe]) return folded_output_shape - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") k_h, k_w = self.get_nodeattr("Kernel") normal_input_shape = tuple([1, dim_h, dim_w, k_h * k_w * ch]) return normal_input_shape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): ch = self.get_nodeattr("Channels") dim_h, dim_w = self.get_nodeattr("Dim") normal_output_shape = tuple([1, dim_h, dim_w, ch]) From 9375e30d61aaeb19410f29812d8c27bf594a23a5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 12:02:54 +0200 Subject: [PATCH 175/628] [FIFO] simplifying idt retrival after refactor --- src/finn/transformation/fpgadataflow/insert_fifo.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index b3c83d96e8..260525c5d5 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -203,12 +203,8 @@ def apply(self, model): n_input = first_node.input[inp_ind] n0 = getCustomOp(first_node) # determine fifo node attributes - if inp_ind == 0: - fld_shape = n0.get_folded_input_shape() - dtype = n0.get_input_datatype() - else: - fld_shape = n0.get_folded_input_shape(inp_ind) - dtype = n0.get_input_datatype(inp_ind) + fld_shape = n0.get_folded_input_shape(inp_ind) + dtype = n0.get_input_datatype(inp_ind) fifo_depth = n0.get_nodeattr("inFIFODepths")[inp_ind] if fifo_depth <= 2: From 6933d080379a9499f4200e6658ba38291b9c54f2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 14:44:37 +0200 Subject: [PATCH 176/628] [Refactor] add missing ind param to Checksum HLSCustomOp --- src/finn/custom_op/fpgadataflow/checksum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index 21a09a1c5e..c927c07df2 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -98,7 +98,7 @@ def get_outstream_width(self, ind=0): def get_folded_input_shape(self, ind=0): return self.get_nodeattr("folded_shape") - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_nodeattr("folded_shape") def get_normal_input_shape(self, ind=0): From 5b1b2ebe104b5a9dfcee112ca791d07535ab9a2f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 14:56:05 +0200 Subject: [PATCH 177/628] [Refactor] fix fifo depths property for concat layers at conversion time --- src/finn/transformation/fpgadataflow/convert_to_hls_layers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 753dbb0f87..9153124973 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1662,6 +1662,7 @@ def apply(self, model): ElemsPerStream=elems_per_stream, inputDataType=dt0.name, numInputVectors=inp_vec, + inFIFODepths=[2] * len(node.input), ) graph.node.insert(node_ind, new_node) # remove old node From 5d005447e01a8b55f1df8e1043385fc8da767320 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 22 Sep 2022 15:14:42 +0200 Subject: [PATCH 178/628] [FIFO] fix MVAU characterization tests --- tests/fpgadataflow/test_fpgadataflow_mvau.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index a2f3448592..a7e7eba7ee 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -438,7 +438,7 @@ def test_fpgadataflow_fclayer_large_depth_decoupled_mode_rtlsim( @pytest.mark.parametrize("mh", [32]) @pytest.mark.fpgadataflow @pytest.mark.vivado -def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): +def test_fclayer_fifocharacterize_rtlsim(mem_mode, idt, wdt, act, nf, sf, mw, mh): if nf == -1: nf = mh if sf == -1: @@ -472,12 +472,13 @@ def test_fclayer_fifocharacterize(mem_mode, idt, wdt, act, nf, sf, mw, mh): model = model.transform(PrepareRTLSim()) model = model.transform(DeriveCharacteristic(exp_total_cycles)) node_inst = getCustomOp(model.graph.node[0]) - period_attr = node_inst.get_nodeattr("io_characteristic_period") + period_attr = node_inst.get_nodeattr("io_chrc_period") assert period_attr == exp_total_cycles - chrc = node_inst.get_nodeattr("io_characteristic") - assert len(chrc) == 4 * exp_total_cycles - chrc = np.asarray(chrc, dtype=np.uint8).reshape(2, -1) + chrc_in = node_inst.get_nodeattr("io_chrc_in") + chrc_out = node_inst.get_nodeattr("io_chrc_out") + assert chrc_in.shape == (1, 2 * exp_total_cycles) + assert chrc_out.shape == (1, 2 * exp_total_cycles) # first sf cycles should read input continuously - assert (chrc[0, :sf] == range(1, sf + 1)).all() + assert (chrc_in[0, :sf] == range(1, sf + 1)).all() # all outputs should be produced within the exp n of cycles - assert chrc[1, exp_total_cycles] == nf + assert chrc_out[0, exp_total_cycles] == nf From fdb00bb5a914012dfc10e8e3aa2eb14d97e7ddc2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 22 Sep 2022 17:35:58 +0100 Subject: [PATCH 179/628] [Style] Minor style fixes to added components --- finn-rtllib/swg/swg_template_default.sv | 30 +++++++++++++++++++ finn-rtllib/swg/swg_template_wrapper.v | 30 +++++++++++++++++++ .../convolutioninputgenerator_rtl.py | 8 +++-- ...est_fpgadataflow_convinputgenerator_rtl.py | 3 +- 4 files changed, 67 insertions(+), 4 deletions(-) diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 0aa309f890..97517438a0 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -1,3 +1,33 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ module $TOP_MODULE_NAME$_controller #( int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index 4411348beb..0cc3579a25 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -1,3 +1,33 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ `timescale 1 ns / 1 ps module $TOP_MODULE_NAME$ ( diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 366dd396d1..8312b975ed 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, Xilinx +# Copyright (C) 2022, Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -59,7 +59,7 @@ class ConvolutionInputGenerator_rtl(HLSCustomOp): """Class that does not correspond to one of the finn-hlslib ConvolutionInputGenerator (sliding window) function variants. Generates an RTL ConvolutionInputGenerator - implementation based on (System-)Verilog templates.""" + implementation based on (System-)Verilog templates, defined in finn-rtllib/swg.""" def __init__(self, onnx_node): super().__init__(onnx_node) @@ -389,7 +389,9 @@ def execute_node(self, context, graph): folded_ishape = self.get_folded_input_shape() if mode == "cppsim": - raise Exception("cppsim not possible for RTL SWG") + raise Exception( + "cppsim not possible for RTL SWG, please set exec_mode to rtlsim" + ) elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 5da1fa6eb1..007360a5fd 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, Xilinx +# Copyright (C) 2022, Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -161,6 +161,7 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("flip", [False]) @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow def test_fpgadataflow_slidingwindow_rtl( idt, k, ifm_dim, ifm_ch, stride, dilation, dw, simd, m, parallel_window, flip ): From 77f07339e7beae1b1ac3b713c7d450989097f0d8 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 23 Sep 2022 11:48:10 +0100 Subject: [PATCH 180/628] [CustomOp] Add comments to convinputgen rtl --- .../convolutioninputgenerator_rtl.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 8312b975ed..399b36e150 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -71,7 +71,9 @@ def get_nodeattr_types(self): "IFMDim": ("ints", True, []), # [H, W] = [Y, X] "OFMDim": ("ints", True, []), # [H, W] = [Y, X] "SIMD": ("i", True, 0), + # additional parallelization parameter - not yet implemented "M": ("i", False, 1), + # alternative implementation style - not yet implemented "parallel_window": ("i", False, 0, {0}), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] @@ -90,6 +92,7 @@ def get_nodeattr_types(self): "auto", {"auto", "block", "distributed", "ultra"}, ), + # attribute to save top module name - not user configurable "gen_top_module": ("s", False, ""), } my_attrs.update(super().get_nodeattr_types()) @@ -570,7 +573,7 @@ def prepare_codegen_default(self): code_gen_dict["$TAIL_INCR_H$"] = [str(tail_incr_h)] code_gen_dict["$TAIL_INCR_LAST$"] = [str(tail_incr_last_window)] - # support SIMD = C and k_w = 1 cases + # support SIMD = IFMChannels and k_w = 1 cases # for k = [k_h, k_w] = [1, k_w], no adjustment is needed # for k = [k_h, k_w] = [1, 1], do not use this impl. style (mmv_out=K=1) # innermost loop is executed at least once -> adjust if needed @@ -658,16 +661,22 @@ def select_impl_style(self): if self.get_nodeattr("parallel_window"): # mmv_in = M * 1 mmv_out = M * k_h * k_w - assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" + assert ( + ifm_ch == simd + ), "Constraint violated: SIMD must be equal to IFMChannels" else: # mmv_in = 1 mmv_out = 1 - assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide C" + assert ( + ifm_ch % simd == 0 + ), "Constraint violated: SIMD must divide IFMChannels" # choose implementation style if mmv_out > 1 or (k_h == 1 and k_w == 1): impl_style = "parallel" - assert ifm_ch == simd, "Constraint violated: SIMD must be equal to C" + assert ( + ifm_ch == simd + ), "Constraint violated: SIMD must be equal to IFMChannels" else: impl_style = "default" From ee676c7a671e4ff4e2c06cfe6f2e1d0a4cb3623c Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 23 Sep 2022 16:41:40 +0200 Subject: [PATCH 181/628] Add dynamic mode switch --- .../convolutioninputgenerator_rtl.py | 95 ++++++++++--------- ...dataflow_convinputgenerator_rtl_dynamic.py | 14 ++- 2 files changed, 58 insertions(+), 51 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index fab0ce3871..61581be725 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -79,6 +79,9 @@ def get_nodeattr_types(self): "inputDataType": ("s", True, ""), "outputDataType": ("s", True, ""), "depthwise": ("i", False, 0, {0, 1}), + # Enable reprogrammable implementation to change FM dimensions, + # stride, or dilation during runtime + "dynamic_mode": ("i", False, 0, {0, 1}), # FPGA resource type for ConvolutionInputGenerator input buffer # auto -- let Vivado decide # block -- use BRAM @@ -452,11 +455,11 @@ def execute_node(self, context, graph): def prepare_codegen_default(self): # Default implementation style for MMV_out = 1: addressable cyclic buffer # Computing incremental addressing scheme directly.. - template_path = ( - os.environ["FINN_ROOT"] - + "/finn-rtllib/swg/swg_template_default_dynamic.sv" - # TODO: add switch - ) + if self.get_nodeattr("dynamic_mode"): + template_select = "/finn-rtllib/swg/swg_template_default_dynamic.sv" + else: + template_select = "/finn-rtllib/swg/swg_template_default.sv" + template_path = os.environ["FINN_ROOT"] + template_select code_gen_dict = {} ifm_ch = self.get_nodeattr("IFMChannels") @@ -719,11 +722,11 @@ def generate_hdl(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") with open(template_path, "r") as f: template = f.read() - with open( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_wrapper_dynamic.v", - "r" - # TODO: add switch - ) as f: + if self.get_nodeattr("dynamic_mode"): + template_select = "/finn-rtllib/swg/swg_template_wrapper_dynamic.v" + else: + template_select = "/finn-rtllib/swg/swg_template_wrapper.v" + with open(os.environ["FINN_ROOT"] + template_select, "r") as f: template_wrapper = f.read() with open( os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_axilite.v", "r" @@ -749,13 +752,16 @@ def generate_hdl(self): "w", ) as f: f.write(template_wrapper) - with open( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" - ), - "w", - ) as f: - f.write(template_axilite) + + # AXI-Lite reg. file component is only needed for dynamic mode + if self.get_nodeattr("dynamic_mode"): + with open( + os.path.join( + code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" + ), + "w", + ) as f: + f.write(template_axilite) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain @@ -776,8 +782,9 @@ def prepare_rtlsim(self): verilog_files = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", - self.get_nodeattr("gen_top_module") + "_axilite.v", ] + if self.get_nodeattr("dynamic_mode"): + verilog_files.append(self.get_nodeattr("gen_top_module") + "_axilite.v") # build the Verilator emu library sim = PyVerilator.build( @@ -795,29 +802,23 @@ def code_generation_ipi(self): """Constructs and returns the TCL for node instantiation in Vivado IPI.""" code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - cmd = [ - "add_files -norecurse %s" - % ( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v" - ) - ), - "add_files -norecurse %s" - % ( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" - ) - ), - "add_files -norecurse %s" - % ( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" - ) - ), - "create_bd_cell -type module -reference %s %s" - % (self.get_nodeattr("gen_top_module"), self.onnx_node.name), + sourcefiles = [ + self.get_nodeattr("gen_top_module") + "_wrapper.v", + self.get_nodeattr("gen_top_module") + "_impl.sv", ] + if self.get_nodeattr("dynamic_mode"): + sourcefiles += [self.get_nodeattr("gen_top_module") + "_axilite.v"] + + sourcefiles = [os.path.join(code_gen_dir, f) for f in sourcefiles] + + cmd = [] + for f in sourcefiles: + cmd += ["add_files -norecurse %s" % (f)] + cmd += [ + "create_bd_cell -type module -reference %s %s" + % (self.get_nodeattr("gen_top_module"), self.onnx_node.name) + ] return cmd def get_verilog_top_module_intf_names(self): @@ -837,15 +838,18 @@ def get_verilog_top_module_intf_names(self): intf_names["s_axis"] = [("in0_" + sname, self.get_instream_width_padded())] intf_names["m_axis"] = [("out_" + sname, self.get_outstream_width_padded())] intf_names["aximm"] = [] - intf_names["axilite"] = ["s_axi_cfg"] + if self.get_nodeattr("dynamic_mode"): + intf_names["axilite"] = ["s_axi_cfg"] + else: + intf_names["axilite"] = [] return intf_names def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): """Returns a configuration dict to re-configure FM dimension during runtime. Stride and dilation can also be changed. Certain restrictions apply (e.g. component must be synthesized for largest buffer size).""" - # TODO: Make a standalone version to call from Python driver? - # TODO: Add more safeguards + # NOTE: For better driver integration, this functionality could be packaged + # as a standalone function in the future k = self.get_nodeattr("ConvKernelDim") if stride is None: @@ -861,11 +865,16 @@ def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) ofm_dim = [ofm_dim_h, ofm_dim_w] - # update attributes + # update attributes and perform sanity check + original_buffer_depth = self.get_buffer_depth() self.set_nodeattr("IFMDim", ifm_dim) self.set_nodeattr("OFMDim", ofm_dim) self.set_nodeattr("Stride", stride) self.set_nodeattr("Dilation", dilation) + assert ( + self.get_buffer_depth() <= original_buffer_depth + ), """Error: requested + dynamic configuration does not fit in generated buffer implementation.""" # (re-)call codegen and extract new values # each setting is mapped to an axi-lite register address diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 9120aa91d0..f2d51d9ea6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -120,6 +120,7 @@ def make_single_slidingwindow_modelwrapper( inputDataType=idt.name, outputDataType=odt.name, depthwise=dw, + dynamic_mode=1, ) graph = helper.make_graph( nodes=[SlidingWindow_node], @@ -235,16 +236,13 @@ def config_hook(config): def write_swg_config(sim): axi_name = "s_axi_cfg_0_" - # Write config registers to the SWG, dict defines (addr, value) tuples + # 1. Write config registers to the SWG, dict defines (addr, value) tuples for config_entry in config.values(): axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) - axilite_write( - sim, 0, 1, basename=axi_name - ) # 1. set cfg_valid flag (>= 1 cycle) - reset_rtlsim(sim) # 2. reset SWG (>= 1 cycle) - axilite_write( - sim, 0, 0, basename=axi_name - ) # 3. unset cfg_valid flag (not required) + # 2. Set cfg_valid flag (>= 1 cycle) + axilite_write(sim, 0, 1, basename=axi_name) + # 3. Reset component (>= 1 cycle) + reset_rtlsim(sim) return write_swg_config From edf0a91e6302f91b220c4d4733a65678a4c3c206 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 23 Sep 2022 15:54:54 +0100 Subject: [PATCH 182/628] [customOp] Fix typo in eltwise customOp --- src/finn/custom_op/fpgadataflow/eltwise.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index d8c55b2283..a29e871fab 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -151,7 +151,7 @@ def verify_node(self): info_messages.append("All necessary attributes exist") except Exception: info_messages.append( - """The required LabelSelect_Batch attributes do not exist.""" + """The required StreamingEltwise attributes do not exist.""" ) return info_messages From 53a2ba12435bfa15e4da7fe86fec44f8396bcfb5 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Fri, 23 Sep 2022 16:17:32 +0100 Subject: [PATCH 183/628] [MakeScaleResizeNHWC] Update on test script to correct errors. Now also verifying that the transform actually happened rather than just fonctionnality --- .../streamline/test_scale_resize_nhwc.py | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/transformation/streamline/test_scale_resize_nhwc.py b/tests/transformation/streamline/test_scale_resize_nhwc.py index 06faa83719..f10930f4e7 100644 --- a/tests/transformation/streamline/test_scale_resize_nhwc.py +++ b/tests/transformation/streamline/test_scale_resize_nhwc.py @@ -3,9 +3,11 @@ import numpy as np import onnx import onnx.helper as oh +import qonnx.core.data_layout as DataLayout from onnx import TensorProto from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper +from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.basic import gen_finn_dt_tensor @@ -61,7 +63,9 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) + model.set_tensor_layout("inp", DataLayout.NCHW) model = model.transform(InferShapes()) + model = model.transform(InferDataLayouts()) return model @@ -113,8 +117,10 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) + model.set_tensor_layout("inp", DataLayout.NHWC) model = model.transform(InferShapes()) + model = model.transform(InferDataLayouts()) return model @@ -160,7 +166,7 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): transpose_node2 = onnx.helper.make_node( "Transpose", - inputs=["out_up"], + inputs=["outp_up"], outputs=["outp"], name="Transpose2", perm=[0, 2, 3, 1], @@ -178,12 +184,25 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) + model.set_tensor_layout("inp", DataLayout.NHWC) model = model.transform(InferShapes()) + model = model.transform(InferDataLayouts()) return model +def check_transform(model): + graph = model.graph + node_ind = 0 + for n in graph.node: + node_ind += 1 + if n.op_type == "Upsample" or n.op_type == "Resize": + if model.get_tensor_layout(n.output[0]) == DataLayout.NHWC: + return True + return False + + @pytest.mark.streamline # input dimension @pytest.mark.parametrize("ifm_dim", [[2**i, 2**i] for i in range(3, 6)]) @@ -222,6 +241,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # transform Resize into ResizeNHWC resize_model1 = resize_model1.transform(MakeScaleResizeNHWC()) + resize_model1 = resize_model1.transform(InferDataLayouts()) # execute transformed model output_node_name1 = resize_model1.graph.output[0].name @@ -232,6 +252,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # compare outputs assert (expected1 == output1).all() + assert check_transform(resize_model1) # execute second model output_dict2 = oxe.execute_onnx(resize_model2, input_dict_nhwc) @@ -239,6 +260,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # transform Resize into ResizeNHWC resize_model2 = resize_model2.transform(MakeScaleResizeNHWC()) + resize_model2 = resize_model2.transform(InferDataLayouts()) # execute transformed model output_node_name2 = resize_model2.graph.output[0].name @@ -249,6 +271,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # compare outputs assert (expected2 == output2).all() + assert check_transform(resize_model2) # execute third model output_dict3 = oxe.execute_onnx(resize_model3, input_dict_nhwc) @@ -256,6 +279,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # transform Resize into ResizeNHWC resize_model3 = resize_model3.transform(MakeScaleResizeNHWC()) + resize_model3 = resize_model3.transform(InferDataLayouts()) # execute transformed model output_node_name3 = resize_model3.graph.output[0].name @@ -266,3 +290,4 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # compare outputs assert (expected3 == output3).all() + assert check_transform(resize_model3) From 49c896884ab947f3064fb43e55b034b08a7dabfb Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 26 Sep 2022 11:42:53 +0100 Subject: [PATCH 184/628] [Deps] Update finn-hlslib commit & fix typo --- fetch-repos.sh | 2 +- tests/fpgadataflow/test_fpgadataflow_vvau.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index ec73772927..88c67e8bed 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="36e6c8cb1019ba0307e1886011692a58e02f3bfa" +HLSLIB_COMMIT="bb43a97f799b63f536885919f03ecdfcfb04f405" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 5adc9ef3db..03ddb12863 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -153,7 +153,7 @@ def prepare_inputs(input_tensor): return {"inp": input_tensor} -# mem_mode: const or decoupled +# input datatype @pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]]) # weight datatype @pytest.mark.parametrize("wdt", [DataType["INT4"]]) From ea142d6580d532c902ee994ae267877efdb2f220 Mon Sep 17 00:00:00 2001 From: patrickg <44997541+patrickgeel@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:58:08 +0200 Subject: [PATCH 185/628] remove unnecessary comments --- src/finn/util/platforms.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/finn/util/platforms.py b/src/finn/util/platforms.py index ad8604f46e..134f7478bd 100644 --- a/src/finn/util/platforms.py +++ b/src/finn/util/platforms.py @@ -467,7 +467,6 @@ def compute_resources(self): ] -# TODO: ADD KV260 to platform list platforms = dict() platforms["U50"] = Alveo_NxU50_Platform platforms["U200"] = Alveo_NxU200_Platform @@ -478,5 +477,4 @@ def compute_resources(self): platforms["Ultra96"] = ZU3EG_Platform platforms["ZCU104"] = ZU7EV_Platform platforms["ZCU102"] = ZU9EG_Platform -platforms["ZCU111"] = ZU28DR_Platform -# platforms["kv260_som"] = # TODO kv260 platform... xck26_ \ No newline at end of file +platforms["ZCU111"] = ZU28DR_Platform \ No newline at end of file From dc830f118d82e7122e8f47dc21c9e6b26611cd66 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 26 Sep 2022 16:16:46 +0200 Subject: [PATCH 186/628] [FIFO] SystemVerilog -> Verilog fix in Q_srl.v --- finn-rtllib/memstream/hdl/Q_srl.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v index 3c884770e0..2f3d813504 100644 --- a/finn-rtllib/memstream/hdl/Q_srl.v +++ b/finn-rtllib/memstream/hdl/Q_srl.v @@ -143,7 +143,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); addr_full <= 0; o_v_reg <= 0; i_b_reg <= 1; - maxcount_reg <= '0; + maxcount_reg <= 0; end else begin state <= state_; From 584f0fb5d07e2719d03bbfbbc8f34bb6d35a12ce Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 26 Sep 2022 16:43:06 +0100 Subject: [PATCH 187/628] [customOp] Temp. reverse changes to checksum axilite interface name --- src/finn/custom_op/fpgadataflow/checksum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index 7510e10133..bde285eb0d 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -329,5 +329,5 @@ def pragmas(self): def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() # expose axilite interface - intf_names["axilite"] = ["s_axilite_checksum"] + intf_names["axilite"] = ["s_axi_checksum"] return intf_names From 22fd71106b0ce21fb4805ba2ccc1c0803cb65fde Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 26 Sep 2022 16:56:20 +0100 Subject: [PATCH 188/628] [Pre-commit] Run pre-commit on changed files --- src/finn/transformation/fpgadataflow/templates.py | 2 +- src/finn/util/platforms.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index 0870fa40c8..f52bad0ffb 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -128,7 +128,7 @@ set_property board_part www.digilentinc.com:pynq-z1:part0:1.0 [current_project] } elseif {$BOARD == "KV260_SOM"} { set ZYNQ_TYPE "zynq_us+" - set_property board_part xilinx.com:kv260_som:part0:1.3 [current_project] + set_property board_part xilinx.com:kv260_som:part0:1.3 [current_project] } else { puts "Unrecognized board" } diff --git a/src/finn/util/platforms.py b/src/finn/util/platforms.py index 134f7478bd..8212cb5712 100644 --- a/src/finn/util/platforms.py +++ b/src/finn/util/platforms.py @@ -477,4 +477,4 @@ def compute_resources(self): platforms["Ultra96"] = ZU3EG_Platform platforms["ZCU104"] = ZU7EV_Platform platforms["ZCU102"] = ZU9EG_Platform -platforms["ZCU111"] = ZU28DR_Platform \ No newline at end of file +platforms["ZCU111"] = ZU28DR_Platform From a8d3fb6cd503721cdb444cefe07a7aca9f97477e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 26 Sep 2022 16:16:46 +0200 Subject: [PATCH 189/628] [FIFO] SystemVerilog -> Verilog fix in Q_srl.v --- finn-rtllib/memstream/hdl/Q_srl.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v index 3c884770e0..2f3d813504 100644 --- a/finn-rtllib/memstream/hdl/Q_srl.v +++ b/finn-rtllib/memstream/hdl/Q_srl.v @@ -143,7 +143,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); addr_full <= 0; o_v_reg <= 0; i_b_reg <= 1; - maxcount_reg <= '0; + maxcount_reg <= 0; end else begin state <= state_; From 8bfedbe8f1a686c261a3c2ad570a2c3ed89472eb Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 27 Sep 2022 18:33:12 +0200 Subject: [PATCH 190/628] [FIFO] C++ template for faster rtlsim with verilator --- src/finn/qnn-data/cpp/verilator_fifosim.cpp | 165 ++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 src/finn/qnn-data/cpp/verilator_fifosim.cpp diff --git a/src/finn/qnn-data/cpp/verilator_fifosim.cpp b/src/finn/qnn-data/cpp/verilator_fifosim.cpp new file mode 100644 index 0000000000..535a81005c --- /dev/null +++ b/src/finn/qnn-data/cpp/verilator_fifosim.cpp @@ -0,0 +1,165 @@ +/* Copyright (C) 2022, Advanced Micro Devices, Inc. +All rights reserved. +# +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +# +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +# +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +# +* Neither the name of FINN nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. +# +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + +/* +verilator -Wno-fatal -Mdir . -y /data/finn/sickag/vivado_stitch_proj_k9817tc7 --CFLAGS "--std=c++11" -O3 --x-assign fast --x-initial fast --noassert --cc finn_design_wrapper.v --top-module finn_design_wrapper --exe sim-semseg.cpp --threads 4 + +make OPT_FAST="-O3 -march=native" -j4 -C $(pwd) -f Vfinn_design_wrapper.mk Vfinn_design_wrapper +*/ + +#include +#include +#include +#include +#include "verilated.h" +#include "verilated_vcd_c.h" +#include "Vfinn_design_wrapper.h" + +using namespace std; + +Vfinn_design_wrapper * top; + +// code taken from pyverilator_wrapper.cpp generated by PyVerilator + +// this is required by verilator for verilog designs using $time +// main_time is incremented in eval +double main_time = 0; + +double sc_time_stamp() { +return main_time; +} +// function definitions +// helper functions for basic verilator tasks +extern "C" { //Open an extern C closed below +Vfinn_design_wrapper* construct() { + Verilated::commandArgs(0, (const char**) nullptr); + Vfinn_design_wrapper* top = new Vfinn_design_wrapper(); + return top; +} +int eval(Vfinn_design_wrapper* top) { + top->eval(); + main_time++; + return 0; +} +int destruct(Vfinn_design_wrapper* top) { + if (top != nullptr) { + delete top; + top = nullptr; + } + return 0; +} +} + +// end of code taken from pyverilator_wrapper.cpp generated by PyVerilator + +inline void toggle_clk() { + eval(top); + top->ap_clk = 1; + eval(top); + top->ap_clk = 0; +} + +void reset() { + top->ap_rst_n = 0; + for(unsigned i = 0; i < 10; i++) { + toggle_clk(); + } + top->ap_rst_n = 1; +} + +int main(int argc, char *argv[]) { + top = construct(); + + unsigned n_iters_per_input = @ITERS_PER_INPUT@; + unsigned n_iters_per_output = @ITERS_PER_OUTPUT@; + unsigned n_inputs = @N_INPUTS@; + unsigned max_iters = @MAX_ITERS@; + + reset(); + + top->m_axis_0_tready = 1; + top->s_axis_0_tvalid = 1; + + unsigned n_in_txns = 0, n_out_txns = 0, iters = 0; + unsigned latency = 0; + + bool exit_criterion = false; + + cout << "Simulation starting" << endl; + cout << "Number of inputs to write " << n_iters_per_input * n_inputs << endl; + cout << "Number of outputs to expect " << n_iters_per_output * n_inputs << endl; + cout << "Timeout clock cycles " << max_iters << endl; + + chrono::steady_clock::time_point begin = chrono::steady_clock::now(); + + while(!exit_criterion) { + if(top->s_axis_0_tready == 1) { + n_in_txns++; + if(n_in_txns == n_iters_per_input * n_inputs) { + top->s_axis_0_tvalid = 0; + cout << "All inputs written at cycle " << iters << endl; + } + } + if(top->m_axis_0_tvalid == 1) { + n_out_txns++; + if(n_out_txns == n_iters_per_output) { + latency = iters; + } + } + toggle_clk(); + iters++; + if(iters % 1000 == 0) { + cout << "Elapsed iters " << iters << " inps " << n_in_txns << " outs " << n_out_txns << endl; + chrono::steady_clock::time_point end = chrono::steady_clock::now(); + cout << "Elapsed since last report = " << chrono::duration_cast(end - begin).count() << "[s]" << endl; + begin = end; + } + + exit_criterion = ((n_in_txns >= n_iters_per_input * n_inputs) && (n_out_txns >= n_iters_per_output * n_inputs)) || (iters > max_iters); + } + + cout << "Simulation finished" << endl; + cout << "Number of inputs consumed " << n_in_txns << endl; + cout << "Number of outputs produced " << n_out_txns << endl; + cout << "Number of clock cycles " << iters << endl; + + ofstream results_file; + results_file.open("results.txt", ios::out | ios::trunc); + results_file << "N_IN_TXNS" << "\t" << n_in_txns << endl; + results_file << "N_OUT_TXNS" << "\t" << n_out_txns << endl; + results_file << "N_CYCLES" << "\t" << iters << endl; + results_file << "LATENCY" << "\t" << latency << endl; +@FIFO_DEPTH_LOGGING@ + results_file.close(); + + + + destruct(top); + + return 0; +} From f9d86f40aed7807db88062719e5efdddbfad7016 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 27 Sep 2022 18:34:11 +0200 Subject: [PATCH 191/628] [FIFO] add new verilator_fifosim util function to call C++ rtlsim --- src/finn/util/pyverilator.py | 168 ++++++++++++++++++++++++++++++++++- 1 file changed, 167 insertions(+), 1 deletion(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index f6a51da8e4..f66458b9ee 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,10 +26,176 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pkg_resources as pk + +import numpy as np import os +import shutil from pyverilator import PyVerilator +from qonnx.custom_op.registry import getCustomOp + +from finn.util.basic import ( + get_rtlsim_trace_depth, + launch_process_helper, + make_build_dir, +) + + +def make_single_verilog_file(model): + """Dump all Verilog code used by stitched IP into a single file. + This is because large models with many files require a verilator + command line too long for bash on most systems""" + + vivado_stitch_proj_dir = model.get_metadata_prop("vivado_stitch_proj") + with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f: + all_verilog_srcs = f.read().split() + + def file_to_dir(x): + return os.path.dirname(os.path.realpath(x)) + + def file_to_basename(x): + return os.path.basename(os.path.realpath(x)) + + top_module_file_name = file_to_basename(model.get_metadata_prop("wrapper_filename")) + + # dump all Verilog code to a single file + # this is because large models with many files require + # a verilator command line too long for bash on most systems + # NOTE: there are duplicates in this list, and some files + # are identical but in multiple directories (regslice_core.v) + + # remove duplicates from list by doing list -> set -> list + all_verilog_files = list( + set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + ) + + # remove all but one instances of regslice_core.v + filtered_verilog_files = [] + remove_entry = False + for vfile in all_verilog_files: + if "regslice_core" in vfile: + if not remove_entry: + filtered_verilog_files.append(vfile) + remove_entry = True + else: + filtered_verilog_files.append(vfile) + + # concatenate all verilog code into a single file + with open(vivado_stitch_proj_dir + "/" + top_module_file_name, "w") as wf: + for vfile in filtered_verilog_files: + with open(vfile) as rf: + wf.write("//Added from " + vfile + "\n\n") + wf.write(rf.read()) + return vivado_stitch_proj_dir + + +def verilator_fifosim(model, n_inputs, max_iters=100000000): + """Create a Verilator model of stitched IP and use a simple C++ + driver to drive the input stream. Useful for FIFO sizing, latency + and throughput measurement.""" + + vivado_stitch_proj_dir = make_single_verilog_file(model) + build_dir = make_build_dir("verilator_fifosim_") + fifosim_cpp_fname = pk.resource_filename( + "finn.qnn-data", "cpp/verilator_fifosim.cpp" + ) + with open(fifosim_cpp_fname, "r") as f: + fifosim_cpp_template = f.read() + assert len(model.graph.input) == 1, "Only a single input stream is supported" + assert len(model.graph.output) == 1, "Only a single output stream is supported" + iname = model.graph.input[0].name + first_node = model.find_consumer(iname) + oname = model.graph.output[0].name + last_node = model.find_producer(oname) + assert (first_node is not None) and ( + last_node is not None + ), "Failed to find first/last nodes" + fnode_inst = getCustomOp(first_node) + lnode_inst = getCustomOp(last_node) + ishape_folded = fnode_inst.get_folded_input_shape() + oshape_folded = lnode_inst.get_folded_output_shape() + + fifo_log = [] + fifo_log_templ = ' results_file << "maxcount%s" << "\\t" ' + fifo_log_templ += "<< to_string(top->maxcount%s) << endl;" + fifo_nodes = model.get_nodes_by_op_type("StreamingFIFO") + fifo_ind = 0 + for fifo_node in fifo_nodes: + fifo_node = getCustomOp(fifo_node) + if fifo_node.get_nodeattr("depth_monitor") == 1: + suffix = "" if fifo_ind == 0 else "_%d" % fifo_ind + fifo_log.append(fifo_log_templ % (suffix, suffix)) + fifo_ind += 1 + fifo_log = "\n".join(fifo_log) + + template_dict = { + "ITERS_PER_INPUT": np.prod(ishape_folded[:-1]), + "ITERS_PER_OUTPUT": np.prod(oshape_folded[:-1]), + "N_INPUTS": n_inputs, + "MAX_ITERS": max_iters, + "FIFO_DEPTH_LOGGING": fifo_log, + } + + for (key, val) in template_dict.items(): + fifosim_cpp_template = fifosim_cpp_template.replace(f"@{key}@", str(val)) + + with open(build_dir + "/verilator_fifosim.cpp", "w") as f: + f.write(fifosim_cpp_template) + + which_verilator = shutil.which("verilator") + if which_verilator is None: + raise Exception("'verilator' executable not found") + + verilator_args = [ + "perl", + which_verilator, + "-Wno-fatal", + "-Mdir", + build_dir, + "-y", + vivado_stitch_proj_dir, + "--CFLAGS", + "--std=c++11", + "-O3", + "--x-assign", + "fast", + "--x-initial", + "fast", + "--noassert", + "--cc", + "finn_design_wrapper.v", + "--top-module", + "finn_design_wrapper", + "--exe", + "verilator_fifosim.cpp", + "--threads", + "4", + ] + launch_process_helper(verilator_args, cwd=build_dir) + + proc_env = os.environ.copy() + proc_env["OPT_FAST"] = "-O3 -march=native" + make_args = [ + "make", + "-j4", + "-C", + build_dir, + "-f", + "Vfinn_design_wrapper.mk", + "Vfinn_design_wrapper", + ] + launch_process_helper(make_args, proc_env=proc_env, cwd=build_dir) + + sim_launch_args = ["./Vfinn_design_wrapper"] + launch_process_helper(sim_launch_args, cwd=build_dir) -from finn.util.basic import get_rtlsim_trace_depth, make_build_dir + with open(build_dir + "/results.txt", "r") as f: + results = f.read().strip().split("\n") + ret_dict = {} + for result_line in results: + key, val = result_line.split("\t") + ret_dict[key] = int(val) + return ret_dict def pyverilate_stitched_ip( From 5605792645c15ee7d39be187f6eacf74c319d812 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 27 Sep 2022 18:35:33 +0200 Subject: [PATCH 192/628] [FIFO] add new build option to enable C++-based FIFO sizing --- src/finn/builder/build_dataflow_config.py | 4 + src/finn/builder/build_dataflow_steps.py | 1 + .../fpgadataflow/set_fifo_depths.py | 108 +++++++++++------- 3 files changed, 69 insertions(+), 44 deletions(-) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 51e7516101..3821442709 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -259,6 +259,10 @@ class DataflowBuildConfig: AutoFIFOSizingMethod ] = AutoFIFOSizingMethod.LARGEFIFO_RTLSIM + #: Avoid using C++ rtlsim for auto FIFO sizing and rtlsim throughput test + #: if set to True, always using Python instead + force_python_rtlsim: Optional[bool] = False + #: Memory resource type for large FIFOs #: Only relevant when `auto_fifo_depths = True` large_fifo_mem_style: Optional[LargeFIFOMemStyle] = LargeFIFOMemStyle.AUTO diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index ad7e1da054..790145054d 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -487,6 +487,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period(), vivado_ram_style=cfg.large_fifo_mem_style, + force_python_sim=cfg.force_python_rtlsim, ) ) else: diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index f715aaeffb..948e87511d 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -42,7 +42,7 @@ from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.util.fpgadataflow import is_fpgadataflow_node -from finn.util.pyverilator import pyverilate_stitched_ip +from finn.util.pyverilator import pyverilate_stitched_ip, verilator_fifosim def reset_implementation(node): @@ -227,6 +227,7 @@ def __init__( max_depth=None, swg_exception=True, vivado_ram_style="auto", + force_python_sim=False, ): super().__init__() self.fpgapart = fpgapart @@ -235,6 +236,7 @@ def __init__( self.max_depth = max_depth self.swg_exception = swg_exception self.vivado_ram_style = vivado_ram_style + self.force_python_sim = force_python_sim def apply(self, model): # these optypes may potentially use external weights @@ -306,57 +308,75 @@ def apply(self, model): model = model.transform(CreateStitchedIP(self.fpgapart, self.clk_ns)) model.set_metadata_prop("exec_mode", "rtlsim") - # calculate input frequency (number of cycles for each input word) - first_node = getCustomOp(model.graph.node[0]) - ncycles_per_input = max( - 1, - int( - math.ceil( - perf["max_cycles"] - / ( - np.prod(first_node.get_folded_input_shape()) - / first_node.get_folded_input_shape()[-1] + if self.force_python_sim: + # do rtlsim in Python for FIFO sizing + # calculate input frequency (number of cycles for each input word) + first_node = getCustomOp(model.graph.node[0]) + ncycles_per_input = max( + 1, + int( + math.ceil( + perf["max_cycles"] + / ( + np.prod(first_node.get_folded_input_shape()) + / first_node.get_folded_input_shape()[-1] + ) ) - ) - ), - ) + ), + ) - # set sufficiently large threshold for 1 image to fully execute and exit - ncycles = int(latency + max_cycles) + # set sufficiently large threshold for 1 image to fully execute and exit + ncycles = int(latency + max_cycles) - # prepare pyverilator model - sim = pyverilate_stitched_ip(model) + # prepare pyverilator model + sim = pyverilate_stitched_ip(model) - reset_rtlsim(sim) - toggle_clk(sim) + reset_rtlsim(sim) + toggle_clk(sim) - # set all input valids to 0 and output readies to 1 - # set input data to some constant - set_signal(sim, "tvalid", 0) - set_signal(sim, "tready", 1) - set_signal(sim, "tdata", 0) + # set all input valids to 0 and output readies to 1 + # set input data to some constant + set_signal(sim, "tvalid", 0) + set_signal(sim, "tready", 1) + set_signal(sim, "tdata", 0) + + output_detected = False + while ncycles > 0: + toggle_clk(sim) + # set/unset valids + if ncycles % ncycles_per_input == 0: + set_signal(sim, "tvalid", 1) + else: + set_signal(sim, "tvalid", 0) - output_detected = False - while ncycles > 0: - toggle_clk(sim) - # set/unset valids - if ncycles % ncycles_per_input == 0: - set_signal(sim, "tvalid", 1) - else: - set_signal(sim, "tvalid", 0) + # since latency estimation is very pessimistic, detect first output + # and fast-forward the sim + if get_signal(sim, "tvalid") != 0 and not output_detected: + ncycles = max_cycles + output_detected = True + else: + ncycles = ncycles - 1 - # since latency estimation is very pessimistic, detect first output - # and fast-forward the sim - if get_signal(sim, "tvalid") != 0 and not output_detected: - ncycles = max_cycles - output_detected = True + if not output_detected: + warnings.warn( + "No output detected, calculated FIFO depths may not be correct" + ) + else: + # do rtlsim in C++ for FIFO sizing + # determine # inputs for FIFO sizing according to topology type + swg_nodes = [ + x for x in model.graph.node if "ConvolutionInputGenerator" in x.op_type + ] + if len(swg_nodes) == 0: + # MLP, no layer overlap + # assuming half the nodes are now FIFOs, use half the # of + # nodes as # inputs to drive the imulation + n_inputs = int(len(model.graph.node) / 2) else: - ncycles = ncycles - 1 - - if not output_detected: - warnings.warn( - "No output detected, calculated FIFO depths may not be correct" - ) + # convnet, single input is typically enough to fill entire + # layer pipeline due to overlaps + n_inputs = 1 + sim = verilator_fifosim(model, n_inputs) for ind, node in enumerate(fifo_nodes): maxcount_name = "maxcount_%d" % ind From 168c2a0588b65ad17eb7a65f3781cfdf08a5bbdf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 27 Sep 2022 18:36:48 +0200 Subject: [PATCH 193/628] [Test] parametrize FIFO sizing test for method --- tests/fpgadataflow/test_fifosizing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 37efc5124b..28b2c4ac0f 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -49,12 +49,13 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.slow @pytest.mark.vivado -def test_fifosizing_linear(): +@pytest.mark.parametrize("method", ["largefifo_rtlsim", "characterize"]) +def test_fifosizing_linear(method): tmp_output_dir = fetch_test_model("tfc") cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=True, - auto_fifo_strategy="characterize", + auto_fifo_strategy=method, target_fps=10000, synth_clk_period_ns=10.0, board="Pynq-Z1", From 9c6651f7c8e9c987996e42da366bfdbbebb71deb Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 28 Sep 2022 21:57:28 +0200 Subject: [PATCH 194/628] Revert "Revert "Merge branch 'feature/rtlsim-vivado-ip' into feature/new-fifo-sizing-residual"" This reverts commit fc9e880a646ed19e35e2cf32f4a8d085453a4561. --- docker/Dockerfile.finn | 2 +- fetch-repos.sh | 2 +- src/finn/builder/build_dataflow_config.py | 4 + src/finn/builder/build_dataflow_steps.py | 70 ++-- .../verilog/custom_axis_infrastructure.vh | 346 ++++++++++++++++++ src/finn/util/pyverilator.py | 47 ++- 6 files changed, 430 insertions(+), 41 deletions(-) create mode 100644 src/finn/qnn-data/verilog/custom_axis_infrastructure.vh diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 9c18c03d7b..b3c669ec10 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -65,7 +65,7 @@ RUN locale-gen "en_US.UTF-8" RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev RUN git clone https://github.com/verilator/verilator RUN cd verilator && \ - git checkout v4.012 && \ + git checkout v4.224 && \ autoconf && \ ./configure && \ make -j4 && \ diff --git a/fetch-repos.sh b/fetch-repos.sh index 60ea4eb307..0026e750b5 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -30,7 +30,7 @@ QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" -PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" +PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 3821442709..8d46b9e7e6 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -333,6 +333,10 @@ class DataflowBuildConfig: #: Override the number of inputs for rtlsim performance measurement. rtlsim_batch_size: Optional[int] = 1 + #: If set to True, FIFOs and DWCs with impl_style=vivado will be kept during + #: rtlsim, otherwise they will be replaced by HLS implementations. + rtlsim_use_vivado_comps: Optional[bool] = True + def _resolve_hls_clk_period(self): if self.hls_clk_period_ns is None: # use same clk for synth and hls if not explicitly specified diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 790145054d..f196ed6331 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -167,40 +167,44 @@ def verify_step( def prepare_for_stitched_ip_rtlsim(verify_model, cfg): - need_restitch = False - # rtlsim only supports certain impl_style for some nodes - # StreamingFIFO must have impl_style=rtl - for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): - inst = getCustomOp(fifo_layer) - if inst.get_nodeattr("impl_style") != "rtl": - inst.set_nodeattr("impl_style", "rtl") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # StreamingDataWidthConverter must have impl_style=hls - for dwc_layer in verify_model.get_nodes_by_op_type( - "StreamingDataWidthConverter_Batch" - ): - inst = getCustomOp(dwc_layer) - if inst.get_nodeattr("impl_style") != "hls": - inst.set_nodeattr("impl_style", "hls") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # if we've made alterations to the model, need to do some re-prep - if need_restitch: - print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") - verify_model = verify_model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) - verify_model = verify_model.transform(HLSSynthIP()) - verify_model = verify_model.transform( - CreateStitchedIP( - cfg._resolve_fpga_part(), - cfg.synth_clk_period_ns, - vitis=False, + if not cfg.rtlsim_use_vivado_comps: + need_restitch = False + # switch impl_style=vivado components to rtl/hls + # StreamingFIFO must have impl_style=rtl + for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): + inst = getCustomOp(fifo_layer) + if inst.get_nodeattr("impl_style") != "rtl": + inst.set_nodeattr("impl_style", "rtl") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # StreamingDataWidthConverter must have impl_style=hls + for dwc_layer in verify_model.get_nodes_by_op_type( + "StreamingDataWidthConverter_Batch" + ): + inst = getCustomOp(dwc_layer) + if inst.get_nodeattr("impl_style") != "hls": + inst.set_nodeattr("impl_style", "hls") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # if we've made alterations to the model, need to do some re-prep + if need_restitch: + print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") + verify_model = verify_model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) ) - ) + verify_model = verify_model.transform(HLSSynthIP()) + verify_model = verify_model.transform( + CreateStitchedIP( + cfg._resolve_fpga_part(), + cfg.synth_clk_period_ns, + vitis=False, + ) + ) + else: + print("rtlsim_use_vivado_comps is enabled, may yield incorrect results") + # set top-level prop for stitched-ip rtlsim and launch verify_model.set_metadata_prop("exec_mode", "rtlsim") # TODO make configurable diff --git a/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh new file mode 100644 index 0000000000..1c8b6403e8 --- /dev/null +++ b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh @@ -0,0 +1,346 @@ +// (c) Copyright 2011-2013 Xilinx, Inc. All rights reserved. +// +// This file contains confidential and proprietary information +// of Xilinx, Inc. and is protected under U.S. and +// international copyright and other intellectual property +// laws. +// +// DISCLAIMER +// This disclaimer is not a license and does not grant any +// rights to the materials distributed herewith. Except as +// otherwise provided in a valid license issued to you by +// Xilinx, and to the maximum extent permitted by applicable +// law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND +// WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES +// AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING +// BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- +// INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and +// (2) Xilinx shall not be liable (whether in contract or tort, +// including negligence, or under any other theory of +// liability) for any loss or damage of any kind or nature +// related to, arising under or in connection with these +// materials, including for any direct, or any indirect, +// special, incidental, or consequential loss or damage +// (including loss of data, profits, goodwill, or any type of +// loss or damage suffered as a result of any action brought +// by a third party) even if such damage or loss was +// reasonably foreseeable or Xilinx had been advised of the +// possibility of the same. +// +// CRITICAL APPLICATIONS +// Xilinx products are not designed or intended to be fail- +// safe, or for use in any application requiring fail-safe +// performance, such as life-support or safety devices or +// systems, Class III medical devices, nuclear facilities, +// applications related to the deployment of airbags, or any +// other applications that could lead to death, personal +// injury, or severe property or environmental damage +// (individually and collectively, "Critical +// Applications"). Customer assumes the sole risk and +// liability of any use of Xilinx products in Critical +// Applications, subject only to applicable laws and +// regulations governing limitations on product liability. +// +// THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS +// PART OF THIS FILE AT ALL TIMES. +//----------------------------------------------------------------------------- +// +// Generic Functions used by AXIS-Interconnect and Infrastrucutre Modules +// +// Verilog-standard: Verilog 2001 +//-------------------------------------------------------------------------- +// Global Parameters: +// +// Functions: +// f_clogb2 +// f_gcd +// f_lcm +// f_get_tdata_indx +// f_get_tstrb_indx +// f_get_tkeep_indx +// f_get_tlast_indx +// f_get_tid_indx +// f_get_tdest_indx +// f_get_tuser_indx +// f_payload_width +// Tasks: +// t_display_tdata_error +//-------------------------------------------------------------------------- +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Global Parameters +/////////////////////////////////////////////////////////////////////////////// +// Define Signal Set indices +localparam G_INDX_SS_TREADY = 0; +localparam G_INDX_SS_TDATA = 1; +localparam G_INDX_SS_TSTRB = 2; +localparam G_INDX_SS_TKEEP = 3; +localparam G_INDX_SS_TLAST = 4; +localparam G_INDX_SS_TID = 5; +localparam G_INDX_SS_TDEST = 6; +localparam G_INDX_SS_TUSER = 7; +localparam G_MASK_SS_TREADY = 32'h1 << G_INDX_SS_TREADY; +localparam G_MASK_SS_TDATA = 32'h1 << G_INDX_SS_TDATA; +localparam G_MASK_SS_TSTRB = 32'h1 << G_INDX_SS_TSTRB; +localparam G_MASK_SS_TKEEP = 32'h1 << G_INDX_SS_TKEEP; +localparam G_MASK_SS_TLAST = 32'h1 << G_INDX_SS_TLAST; +localparam G_MASK_SS_TID = 32'h1 << G_INDX_SS_TID ; +localparam G_MASK_SS_TDEST = 32'h1 << G_INDX_SS_TDEST; +localparam G_MASK_SS_TUSER = 32'h1 << G_INDX_SS_TUSER; + +// Task DRC error levels +localparam G_TASK_SEVERITY_ERR = 2; +localparam G_TASK_SEVERITY_WARNING = 1; +localparam G_TASK_SEVERITY_INFO = 0; + +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Functions +/////////////////////////////////////////////////////////////////////////////// +// ceiling logb2 + function integer f_clogb2 (input integer size); + integer s; + begin + s = size; + s = s - 1; + for (f_clogb2=1; s>1; f_clogb2=f_clogb2+1) + s = s >> 1; + end + endfunction // clogb2 + + // Calculates the Greatest Common Divisor between two integers using the + // euclidean algorithm. + function automatic integer f_gcd ( + input integer a, + input integer b + ); + begin : main + integer A, B, done, swap; + A = a; + B = b; + done = 0; + while(!done) + begin + if (A < B ) begin + swap = A; + A = B; + B = swap; + end else if ( B != 0 ) begin + A = A - B; + end else begin + done = 1; + end + end + + f_gcd = A; + end + endfunction + + + // Calculates the Lowest Common Denominator between two integers + function integer f_lcm ( + input integer a, + input integer b + ); + begin : main + f_lcm = ( a / f_gcd(a, b)) * b; + end + endfunction + + // Returns back the index to the TDATA portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdata_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + f_get_tdata_indx = 0; + end + endfunction + + // Returns back the index to the tstrb portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tstrb_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdata_indx(DAW, IDW, DEW, USW, SST); + // If TDATA exists, then add its width to its base to get the tstrb index + f_get_tstrb_indx = SST[G_INDX_SS_TDATA] ? cur_indx + DAW : cur_indx; + end + endfunction + + // Returns back the index to the tkeep portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tkeep_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tstrb_indx(DAW, IDW, DEW, USW, SST); + f_get_tkeep_indx = SST[G_INDX_SS_TSTRB] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tlast portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tlast_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tkeep_indx(DAW, IDW, DEW, USW, SST); + f_get_tlast_indx = SST[G_INDX_SS_TKEEP] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tid portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tid_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tlast_indx(DAW, IDW, DEW, USW, SST); + f_get_tid_indx = SST[G_INDX_SS_TLAST] ? cur_indx + 1 : cur_indx; + end + endfunction + + // Returns back the index to the tdest portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdest_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tid_indx(DAW, IDW, DEW, USW, SST); + f_get_tdest_indx = SST[G_INDX_SS_TID] ? cur_indx + IDW : cur_indx; + end + endfunction + + // Returns back the index to the tuser portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tuser_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdest_indx(DAW, IDW, DEW, USW, SST); + f_get_tuser_indx = SST[G_INDX_SS_TDEST] ? cur_indx + DEW : cur_indx; + end + endfunction + + // Payload is the sum of all the AXIS signals present except for + // TREADY/TVALID + function integer f_payload_width ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tuser_indx(DAW, IDW, DEW, USW, SST); + f_payload_width = SST[G_INDX_SS_TUSER] ? cur_indx + USW : cur_indx; + // Ensure that the return value is never less than 1 + f_payload_width = (f_payload_width < 1) ? 1 : f_payload_width; + end + endfunction + + task t_check_tdata_width( + input integer data_width, + input [8*80-1:0] var_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tdata_width + if (data_width%8 != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s (%2d) must be a multiple of 8.", var_name, data_width); + $display(" AXI4-Stream data width is only defined for byte multiples. See the "); + $display(" AMBA4 AXI4-Stream Protocol Specification v1.0 Section 2.1 for more"); + $display(" information."); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask + + task t_check_tuser_width( + input integer tuser_width, + input [8*80-1:0] tuser_name, + input integer tdata_width, + input [8*80-1:0] tdata_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tuser_width + integer tdata_bytes; + tdata_bytes = tdata_width/8; + if ((tuser_width%tdata_bytes) != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s == %2d is not the recommended value of 'an integer ", tuser_name, tuser_width); + $display(" multiple of the width of the interface (%s == %2d) in bytes.' AXI4-Stream", tdata_name, tdata_width); + $display(" TUSER width in this module is only defined when the TUSER is the"); + $display(" recommended value. See the AMBA4 AXI4-Stream Protocol Specification v1.0"); + $display(" Section 2.1, 2.3.3 and 2.8 for more information. "); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index f66458b9ee..8b9f88ff01 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -240,14 +240,34 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - all_verilog_files = list( - set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + src_exts = [".v", ".sv"] + + all_verilog_src_files = list( + set( + filter( + lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs + ) + ) + ) + + verilog_header_dir = make_build_dir("pyverilator_vh_") + # use custom version of axis infrastructure vh + custom_vh = pk.resource_filename( + "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" ) + shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") + for fn in all_verilog_srcs: + if fn.endswith(".vh"): + if "axis_infrastructure_v1_1_0.vh" in fn: + # skip, we use a custom version for this file without recursive gcd + continue + else: + shutil.copy(fn, verilog_header_dir) # remove all but one instances of regslice_core.v filtered_verilog_files = [] remove_entry = False - for vfile in all_verilog_files: + for vfile in all_verilog_src_files: if "regslice_core" in vfile: if not remove_entry: filtered_verilog_files.append(vfile) @@ -260,7 +280,12 @@ def file_to_basename(x): for vfile in filtered_verilog_files: with open(vfile) as rf: wf.write("//Added from " + vfile + "\n\n") - wf.write(rf.read()) + lines = rf.read() + for line in lines.split("\n"): + # break down too-long lines, Verilator complains otherwise + if len(line) > 20000: + line = line.replace("&", "\n&") + wf.write("\n" + line) verilator_args = [] # disable common verilator warnings that should be harmless but commonly occur @@ -274,10 +299,20 @@ def file_to_basename(x): # force inlining of all submodules to ensure we can read internal signals properly if read_internal_signals: verilator_args += ["--inline-mult", "0"] + # add defines to make certain XPM src files work with Verilator + verilator_args.append("-DDISABLE_XPM_ASSERTIONS") + verilator_args.append("-DOBSOLETE") + verilator_args.append("-DONESPIN") + verilator_args.append("--bbox-unsup") + vivado_path = os.environ["VIVADO_PATH"] + # additional SystemVerilog modules to make XPMs work with Verilator + xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" + xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" + xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" sim = PyVerilator.build( - top_module_file_name, - verilog_path=[vivado_stitch_proj_dir], + [top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], + verilog_path=[vivado_stitch_proj_dir, verilog_header_dir], build_dir=build_dir, trace_depth=get_rtlsim_trace_depth(), top_module_name=top_module_name, From 04217956f7bb1ed753fa0cd86e372e214b45b359 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 28 Sep 2022 21:59:12 +0200 Subject: [PATCH 195/628] Revert "Revert "Merge branch 'feature/rtlsim-vivado-ip' into feature/new-fifo-sizing-residual"" This reverts commit fc9e880a646ed19e35e2cf32f4a8d085453a4561. --- docker/Dockerfile.finn | 2 +- fetch-repos.sh | 2 +- src/finn/builder/build_dataflow_config.py | 4 + src/finn/builder/build_dataflow_steps.py | 70 ++-- .../verilog/custom_axis_infrastructure.vh | 346 ++++++++++++++++++ src/finn/util/pyverilator.py | 50 ++- 6 files changed, 433 insertions(+), 41 deletions(-) create mode 100644 src/finn/qnn-data/verilog/custom_axis_infrastructure.vh diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 9c18c03d7b..b3c669ec10 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -65,7 +65,7 @@ RUN locale-gen "en_US.UTF-8" RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev RUN git clone https://github.com/verilator/verilator RUN cd verilator && \ - git checkout v4.012 && \ + git checkout v4.224 && \ autoconf && \ ./configure && \ make -j4 && \ diff --git a/fetch-repos.sh b/fetch-repos.sh index 60ea4eb307..0026e750b5 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -30,7 +30,7 @@ QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" -PYVERILATOR_COMMIT="64b8294ff1afebb47be76fcad6ae87027e0402c2" +PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="79d7c61fbe318bfcd56e3c35bbfb774995a7870c" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 51e7516101..d98455576b 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -329,6 +329,10 @@ class DataflowBuildConfig: #: Override the number of inputs for rtlsim performance measurement. rtlsim_batch_size: Optional[int] = 1 + #: If set to True, FIFOs and DWCs with impl_style=vivado will be kept during + #: rtlsim, otherwise they will be replaced by HLS implementations. + rtlsim_use_vivado_comps: Optional[bool] = True + def _resolve_hls_clk_period(self): if self.hls_clk_period_ns is None: # use same clk for synth and hls if not explicitly specified diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index ad7e1da054..175863e84d 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -167,40 +167,44 @@ def verify_step( def prepare_for_stitched_ip_rtlsim(verify_model, cfg): - need_restitch = False - # rtlsim only supports certain impl_style for some nodes - # StreamingFIFO must have impl_style=rtl - for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): - inst = getCustomOp(fifo_layer) - if inst.get_nodeattr("impl_style") != "rtl": - inst.set_nodeattr("impl_style", "rtl") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # StreamingDataWidthConverter must have impl_style=hls - for dwc_layer in verify_model.get_nodes_by_op_type( - "StreamingDataWidthConverter_Batch" - ): - inst = getCustomOp(dwc_layer) - if inst.get_nodeattr("impl_style") != "hls": - inst.set_nodeattr("impl_style", "hls") - inst.set_nodeattr("code_gen_dir_ipgen", "") - inst.set_nodeattr("ipgen_path", "") - need_restitch = True - # if we've made alterations to the model, need to do some re-prep - if need_restitch: - print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") - verify_model = verify_model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) - verify_model = verify_model.transform(HLSSynthIP()) - verify_model = verify_model.transform( - CreateStitchedIP( - cfg._resolve_fpga_part(), - cfg.synth_clk_period_ns, - vitis=False, + if not cfg.rtlsim_use_vivado_comps: + need_restitch = False + # switch impl_style=vivado components to rtl/hls + # StreamingFIFO must have impl_style=rtl + for fifo_layer in verify_model.get_nodes_by_op_type("StreamingFIFO"): + inst = getCustomOp(fifo_layer) + if inst.get_nodeattr("impl_style") != "rtl": + inst.set_nodeattr("impl_style", "rtl") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # StreamingDataWidthConverter must have impl_style=hls + for dwc_layer in verify_model.get_nodes_by_op_type( + "StreamingDataWidthConverter_Batch" + ): + inst = getCustomOp(dwc_layer) + if inst.get_nodeattr("impl_style") != "hls": + inst.set_nodeattr("impl_style", "hls") + inst.set_nodeattr("code_gen_dir_ipgen", "") + inst.set_nodeattr("ipgen_path", "") + need_restitch = True + # if we've made alterations to the model, need to do some re-prep + if need_restitch: + print("Need to regen/re-stitch some IP for STITCHED_IP_RTLSIM") + verify_model = verify_model.transform( + PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) ) - ) + verify_model = verify_model.transform(HLSSynthIP()) + verify_model = verify_model.transform( + CreateStitchedIP( + cfg._resolve_fpga_part(), + cfg.synth_clk_period_ns, + vitis=False, + ) + ) + else: + print("rtlsim_use_vivado_comps is enabled, may yield incorrect results") + # set top-level prop for stitched-ip rtlsim and launch verify_model.set_metadata_prop("exec_mode", "rtlsim") # TODO make configurable diff --git a/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh new file mode 100644 index 0000000000..1c8b6403e8 --- /dev/null +++ b/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh @@ -0,0 +1,346 @@ +// (c) Copyright 2011-2013 Xilinx, Inc. All rights reserved. +// +// This file contains confidential and proprietary information +// of Xilinx, Inc. and is protected under U.S. and +// international copyright and other intellectual property +// laws. +// +// DISCLAIMER +// This disclaimer is not a license and does not grant any +// rights to the materials distributed herewith. Except as +// otherwise provided in a valid license issued to you by +// Xilinx, and to the maximum extent permitted by applicable +// law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND +// WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES +// AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING +// BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- +// INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and +// (2) Xilinx shall not be liable (whether in contract or tort, +// including negligence, or under any other theory of +// liability) for any loss or damage of any kind or nature +// related to, arising under or in connection with these +// materials, including for any direct, or any indirect, +// special, incidental, or consequential loss or damage +// (including loss of data, profits, goodwill, or any type of +// loss or damage suffered as a result of any action brought +// by a third party) even if such damage or loss was +// reasonably foreseeable or Xilinx had been advised of the +// possibility of the same. +// +// CRITICAL APPLICATIONS +// Xilinx products are not designed or intended to be fail- +// safe, or for use in any application requiring fail-safe +// performance, such as life-support or safety devices or +// systems, Class III medical devices, nuclear facilities, +// applications related to the deployment of airbags, or any +// other applications that could lead to death, personal +// injury, or severe property or environmental damage +// (individually and collectively, "Critical +// Applications"). Customer assumes the sole risk and +// liability of any use of Xilinx products in Critical +// Applications, subject only to applicable laws and +// regulations governing limitations on product liability. +// +// THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS +// PART OF THIS FILE AT ALL TIMES. +//----------------------------------------------------------------------------- +// +// Generic Functions used by AXIS-Interconnect and Infrastrucutre Modules +// +// Verilog-standard: Verilog 2001 +//-------------------------------------------------------------------------- +// Global Parameters: +// +// Functions: +// f_clogb2 +// f_gcd +// f_lcm +// f_get_tdata_indx +// f_get_tstrb_indx +// f_get_tkeep_indx +// f_get_tlast_indx +// f_get_tid_indx +// f_get_tdest_indx +// f_get_tuser_indx +// f_payload_width +// Tasks: +// t_display_tdata_error +//-------------------------------------------------------------------------- +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Global Parameters +/////////////////////////////////////////////////////////////////////////////// +// Define Signal Set indices +localparam G_INDX_SS_TREADY = 0; +localparam G_INDX_SS_TDATA = 1; +localparam G_INDX_SS_TSTRB = 2; +localparam G_INDX_SS_TKEEP = 3; +localparam G_INDX_SS_TLAST = 4; +localparam G_INDX_SS_TID = 5; +localparam G_INDX_SS_TDEST = 6; +localparam G_INDX_SS_TUSER = 7; +localparam G_MASK_SS_TREADY = 32'h1 << G_INDX_SS_TREADY; +localparam G_MASK_SS_TDATA = 32'h1 << G_INDX_SS_TDATA; +localparam G_MASK_SS_TSTRB = 32'h1 << G_INDX_SS_TSTRB; +localparam G_MASK_SS_TKEEP = 32'h1 << G_INDX_SS_TKEEP; +localparam G_MASK_SS_TLAST = 32'h1 << G_INDX_SS_TLAST; +localparam G_MASK_SS_TID = 32'h1 << G_INDX_SS_TID ; +localparam G_MASK_SS_TDEST = 32'h1 << G_INDX_SS_TDEST; +localparam G_MASK_SS_TUSER = 32'h1 << G_INDX_SS_TUSER; + +// Task DRC error levels +localparam G_TASK_SEVERITY_ERR = 2; +localparam G_TASK_SEVERITY_WARNING = 1; +localparam G_TASK_SEVERITY_INFO = 0; + +/////////////////////////////////////////////////////////////////////////////// +// BEGIN Functions +/////////////////////////////////////////////////////////////////////////////// +// ceiling logb2 + function integer f_clogb2 (input integer size); + integer s; + begin + s = size; + s = s - 1; + for (f_clogb2=1; s>1; f_clogb2=f_clogb2+1) + s = s >> 1; + end + endfunction // clogb2 + + // Calculates the Greatest Common Divisor between two integers using the + // euclidean algorithm. + function automatic integer f_gcd ( + input integer a, + input integer b + ); + begin : main + integer A, B, done, swap; + A = a; + B = b; + done = 0; + while(!done) + begin + if (A < B ) begin + swap = A; + A = B; + B = swap; + end else if ( B != 0 ) begin + A = A - B; + end else begin + done = 1; + end + end + + f_gcd = A; + end + endfunction + + + // Calculates the Lowest Common Denominator between two integers + function integer f_lcm ( + input integer a, + input integer b + ); + begin : main + f_lcm = ( a / f_gcd(a, b)) * b; + end + endfunction + + // Returns back the index to the TDATA portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdata_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + f_get_tdata_indx = 0; + end + endfunction + + // Returns back the index to the tstrb portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tstrb_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdata_indx(DAW, IDW, DEW, USW, SST); + // If TDATA exists, then add its width to its base to get the tstrb index + f_get_tstrb_indx = SST[G_INDX_SS_TDATA] ? cur_indx + DAW : cur_indx; + end + endfunction + + // Returns back the index to the tkeep portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tkeep_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tstrb_indx(DAW, IDW, DEW, USW, SST); + f_get_tkeep_indx = SST[G_INDX_SS_TSTRB] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tlast portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tlast_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tkeep_indx(DAW, IDW, DEW, USW, SST); + f_get_tlast_indx = SST[G_INDX_SS_TKEEP] ? cur_indx + DAW/8 : cur_indx; + end + endfunction + + // Returns back the index to the tid portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tid_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tlast_indx(DAW, IDW, DEW, USW, SST); + f_get_tid_indx = SST[G_INDX_SS_TLAST] ? cur_indx + 1 : cur_indx; + end + endfunction + + // Returns back the index to the tdest portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tdest_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tid_indx(DAW, IDW, DEW, USW, SST); + f_get_tdest_indx = SST[G_INDX_SS_TID] ? cur_indx + IDW : cur_indx; + end + endfunction + + // Returns back the index to the tuser portion of TPAYLOAD, returns 0 if the + // signal is not enabled. + function integer f_get_tuser_indx ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tdest_indx(DAW, IDW, DEW, USW, SST); + f_get_tuser_indx = SST[G_INDX_SS_TDEST] ? cur_indx + DEW : cur_indx; + end + endfunction + + // Payload is the sum of all the AXIS signals present except for + // TREADY/TVALID + function integer f_payload_width ( + input integer DAW, // TDATA Width + input integer IDW, // TID Width + input integer DEW, // TDEST Width + input integer USW, // TUSER Width + input [31:0] SST // Signal Set + ); + begin : main + integer cur_indx; + cur_indx = f_get_tuser_indx(DAW, IDW, DEW, USW, SST); + f_payload_width = SST[G_INDX_SS_TUSER] ? cur_indx + USW : cur_indx; + // Ensure that the return value is never less than 1 + f_payload_width = (f_payload_width < 1) ? 1 : f_payload_width; + end + endfunction + + task t_check_tdata_width( + input integer data_width, + input [8*80-1:0] var_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tdata_width + if (data_width%8 != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s (%2d) must be a multiple of 8.", var_name, data_width); + $display(" AXI4-Stream data width is only defined for byte multiples. See the "); + $display(" AMBA4 AXI4-Stream Protocol Specification v1.0 Section 2.1 for more"); + $display(" information."); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask + + task t_check_tuser_width( + input integer tuser_width, + input [8*80-1:0] tuser_name, + input integer tdata_width, + input [8*80-1:0] tdata_name, + input [8*80-1:0] inst_name, + input integer severity_lvl, + output integer ret_val + ); + // Severity levels: + // 0 = INFO + // 1 = WARNING + // 2 = ERROR + begin : t_check_tuser_width + integer tdata_bytes; + tdata_bytes = tdata_width/8; + if ((tuser_width%tdata_bytes) != 0) begin + // 000 1 2 3 4 5 6 7 8 + // 012 0 0 0 0 0 0 0 0 + if (severity_lvl >= 2) begin + $display("ERROR: %m::%s", inst_name); + end else if (severity_lvl == 1) begin + $display("WARNING: %m::%s", inst_name); + end else begin + $display("INFO: %m::%s", inst_name); + end + $display(" Parameter %s == %2d is not the recommended value of 'an integer ", tuser_name, tuser_width); + $display(" multiple of the width of the interface (%s == %2d) in bytes.' AXI4-Stream", tdata_name, tdata_width); + $display(" TUSER width in this module is only defined when the TUSER is the"); + $display(" recommended value. See the AMBA4 AXI4-Stream Protocol Specification v1.0"); + $display(" Section 2.1, 2.3.3 and 2.8 for more information. "); + ret_val = 1; + end else begin + ret_val = 0; + end + end + endtask diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index f6a51da8e4..5396281397 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,7 +26,10 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pkg_resources as pk + import os +import shutil from pyverilator import PyVerilator from finn.util.basic import get_rtlsim_trace_depth, make_build_dir @@ -74,14 +77,34 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list - all_verilog_files = list( - set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + src_exts = [".v", ".sv"] + + all_verilog_src_files = list( + set( + filter( + lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs + ) + ) + ) + + verilog_header_dir = make_build_dir("pyverilator_vh_") + # use custom version of axis infrastructure vh + custom_vh = pk.resource_filename( + "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" ) + shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") + for fn in all_verilog_srcs: + if fn.endswith(".vh"): + if "axis_infrastructure_v1_1_0.vh" in fn: + # skip, we use a custom version for this file without recursive gcd + continue + else: + shutil.copy(fn, verilog_header_dir) # remove all but one instances of regslice_core.v filtered_verilog_files = [] remove_entry = False - for vfile in all_verilog_files: + for vfile in all_verilog_src_files: if "regslice_core" in vfile: if not remove_entry: filtered_verilog_files.append(vfile) @@ -94,7 +117,12 @@ def file_to_basename(x): for vfile in filtered_verilog_files: with open(vfile) as rf: wf.write("//Added from " + vfile + "\n\n") - wf.write(rf.read()) + lines = rf.read() + for line in lines.split("\n"): + # break down too-long lines, Verilator complains otherwise + if len(line) > 20000: + line = line.replace("&", "\n&") + wf.write("\n" + line) verilator_args = [] # disable common verilator warnings that should be harmless but commonly occur @@ -108,10 +136,20 @@ def file_to_basename(x): # force inlining of all submodules to ensure we can read internal signals properly if read_internal_signals: verilator_args += ["--inline-mult", "0"] + # add defines to make certain XPM src files work with Verilator + verilator_args.append("-DDISABLE_XPM_ASSERTIONS") + verilator_args.append("-DOBSOLETE") + verilator_args.append("-DONESPIN") + verilator_args.append("--bbox-unsup") + vivado_path = os.environ["VIVADO_PATH"] + # additional SystemVerilog modules to make XPMs work with Verilator + xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" + xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" + xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" sim = PyVerilator.build( - top_module_file_name, - verilog_path=[vivado_stitch_proj_dir], + [top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], + verilog_path=[vivado_stitch_proj_dir, verilog_header_dir], build_dir=build_dir, trace_depth=get_rtlsim_trace_depth(), top_module_name=top_module_name, From 8c5f1aaa6a81d5afde101911196570ee8eba3b62 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 28 Sep 2022 22:39:44 +0200 Subject: [PATCH 196/628] [FIFO] keep perf metric dict keys consistent in C++ fifo sim --- src/finn/qnn-data/cpp/verilator_fifosim.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/finn/qnn-data/cpp/verilator_fifosim.cpp b/src/finn/qnn-data/cpp/verilator_fifosim.cpp index 535a81005c..7d52ed7093 100644 --- a/src/finn/qnn-data/cpp/verilator_fifosim.cpp +++ b/src/finn/qnn-data/cpp/verilator_fifosim.cpp @@ -152,8 +152,9 @@ int main(int argc, char *argv[]) { results_file.open("results.txt", ios::out | ios::trunc); results_file << "N_IN_TXNS" << "\t" << n_in_txns << endl; results_file << "N_OUT_TXNS" << "\t" << n_out_txns << endl; - results_file << "N_CYCLES" << "\t" << iters << endl; - results_file << "LATENCY" << "\t" << latency << endl; + results_file << "cycles" << "\t" << iters << endl; + results_file << "N" << "\t" << n_inputs << endl; + results_file << "latency_cycles" << "\t" << latency << endl; @FIFO_DEPTH_LOGGING@ results_file.close(); From 59b19dd3699426a549b25f3926716278742ad72b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 28 Sep 2022 22:40:08 +0200 Subject: [PATCH 197/628] [Build] enable using C++ fifosim for rtlsim perf measurement --- src/finn/builder/build_dataflow_steps.py | 62 +++++++++++++++++++----- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index f196ed6331..07f971bdbd 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -29,6 +29,7 @@ import json import numpy as np import os +import warnings from copy import deepcopy from distutils.dir_util import copy_tree from qonnx.core.modelwrapper import ModelWrapper @@ -112,6 +113,7 @@ get_rtlsim_trace_depth, pyverilate_get_liveness_threshold_cycles, ) +from finn.util.pyverilator import verilator_fifosim from finn.util.test import execute_parent @@ -486,12 +488,20 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) elif cfg.auto_fifo_strategy == "largefifo_rtlsim": + # multi-in/out streams currently not supported in our C++ verilator driver + model_multi_io = len(model.graph.input) > 1 or len(model.graph.output) > 1 + force_python_sim = model_multi_io or cfg.force_python_rtlsim + if model_multi_io: + warnings.warn( + "Multi-in/out streams currently not supported " + + "in FINN C++ verilator driver, falling back to Python" + ) model = model.transform( InsertAndSetFIFODepths( cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period(), vivado_ram_style=cfg.large_fifo_mem_style, - force_python_sim=cfg.force_python_rtlsim, + force_python_sim=force_python_sim, ) ) else: @@ -588,20 +598,48 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi # prepare ip-stitched rtlsim rtlsim_model = deepcopy(model) rtlsim_model = prepare_for_stitched_ip_rtlsim(rtlsim_model, cfg) - # run with single input to get latency - orig_rtlsim_trace_depth = get_rtlsim_trace_depth() + # multi-in/out streams currently not supported in our C++ verilator driver + model_multi_io = ( + len(rtlsim_model.graph.input) > 1 or len(rtlsim_model.graph.output) > 1 + ) + force_python_rtlsim = cfg.force_python_rtlsim or model_multi_io + if model_multi_io: + warnings.warn( + "Multi-in/out streams currently not supported " + + "in FINN C++ verilator driver, falling back to Python" + ) rtlsim_bs = int(cfg.rtlsim_batch_size) - assert rtlsim_bs > 0, "rtlsim batch size must be >0" - if cfg.verify_save_rtlsim_waveforms: - # set depth to 3 for layer-by-layer visibility - os.environ["RTLSIM_TRACE_DEPTH"] = "3" + if force_python_rtlsim: + # run with single input to get latency + orig_rtlsim_trace_depth = get_rtlsim_trace_depth() + assert rtlsim_bs > 0, "rtlsim batch size must be >0" + if cfg.verify_save_rtlsim_waveforms: + # set depth to 3 for layer-by-layer visibility + os.environ["RTLSIM_TRACE_DEPTH"] = "3" + rtlsim_model.set_metadata_prop( + "rtlsim_trace", + "%s/rtlsim_perf_batch_%d.vcd" % (report_dir, rtlsim_bs), + ) rtlsim_model.set_metadata_prop( - "rtlsim_trace", "%s/rtlsim_perf_batch_%d.vcd" % (report_dir, rtlsim_bs) + "extra_verilator_args", str(["-CFLAGS", "-O3"]) ) - rtlsim_model.set_metadata_prop("extra_verilator_args", str(["-CFLAGS", "-O3"])) - rtlsim_perf_dict = throughput_test_rtlsim(rtlsim_model, rtlsim_bs) - rtlsim_latency = rtlsim_perf_dict["cycles"] - rtlsim_perf_dict["latency_cycles"] = rtlsim_latency + rtlsim_perf_dict = throughput_test_rtlsim(rtlsim_model, rtlsim_bs) + rtlsim_latency = rtlsim_perf_dict["cycles"] + rtlsim_perf_dict["latency_cycles"] = rtlsim_latency + else: + rtlsim_perf_dict = verilator_fifosim(model, rtlsim_bs) + # keep keys consistent between the Python and C++-styles + cycles = rtlsim_perf_dict["cycles"] + clk_ns = float(model.get_metadata_prop("clk_ns")) + fclk_mhz = 1 / (clk_ns * 0.001) + runtime_s = (cycles * clk_ns) * (10**-9) + rtlsim_perf_dict["runtime[ms]"] = runtime_s * 1000 + rtlsim_perf_dict["throughput[images/s]"] = rtlsim_bs / runtime_s + rtlsim_perf_dict["fclk[mhz]"] = fclk_mhz + for (key, val) in rtlsim_perf_dict.items(): + if "max_count" in key: + del rtlsim_perf_dict[key] + with open(report_dir + "/rtlsim_performance.json", "w") as f: json.dump(rtlsim_perf_dict, f, indent=2) if cfg.verify_save_rtlsim_waveforms: From bacb7f0a82268d59998da0af8ff03a42c62ab475 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 29 Sep 2022 09:26:16 +0100 Subject: [PATCH 198/628] [Tests] Add jenkins marker for downsampler test --- tests/fpgadataflow/test_fpgadataflow_downsampler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py index e815a3d800..64da0a2368 100644 --- a/tests/fpgadataflow/test_fpgadataflow_downsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -113,6 +113,7 @@ def build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=False @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow def test_fpgadataflow_downsampler(is_1d, flip_1d, exec_mode): if flip_1d and not is_1d: pytest.skip("flip_1d only applicable for is_1d") From 8f6a22ac8ff1e7943dc3fabec7f03b9f32e7d24e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 29 Sep 2022 11:03:36 +0200 Subject: [PATCH 199/628] [FIFO] small bugfix in input txn counting --- src/finn/qnn-data/cpp/verilator_fifosim.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/qnn-data/cpp/verilator_fifosim.cpp b/src/finn/qnn-data/cpp/verilator_fifosim.cpp index 7d52ed7093..565aab23e9 100644 --- a/src/finn/qnn-data/cpp/verilator_fifosim.cpp +++ b/src/finn/qnn-data/cpp/verilator_fifosim.cpp @@ -118,7 +118,7 @@ int main(int argc, char *argv[]) { chrono::steady_clock::time_point begin = chrono::steady_clock::now(); while(!exit_criterion) { - if(top->s_axis_0_tready == 1) { + if(top->s_axis_0_tready == 1 && top->s_axis_0_tvalid == 1) { n_in_txns++; if(n_in_txns == n_iters_per_input * n_inputs) { top->s_axis_0_tvalid = 0; From efcc83a43b31c6cada430acbc7b6c97a65e42871 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 29 Sep 2022 16:12:12 +0200 Subject: [PATCH 200/628] [FIFO] don't round-up sizes to 2^x, StreamingFIFO does it dynamically --- src/finn/transformation/fpgadataflow/set_fifo_depths.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 948e87511d..4a451982bd 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -72,8 +72,9 @@ def optimize_depth(depth): # Q_srl FIFOs do not benefit from size < 32 # add some slack return 32 - # round to nearest power of two for Vivado IP FIFO implementation - return int(2 ** math.ceil(math.log2(depth))) + # otherwise leave as is + # will be rounded to nearest power of two for Vivado-style FIFO + return int(depth) class RemoveShallowFIFOs(Transformation): From 389a9156a963eb809ce901e1f8ec8409c4297fec Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 29 Sep 2022 16:35:18 +0200 Subject: [PATCH 201/628] [SWGG] inherit interface dict from HLSCustomOp to remain compatible --- .../fpgadataflow/convolutioninputgenerator_rtl.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 6b61807075..665325bdee 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -842,17 +842,9 @@ def get_verilog_top_module_intf_names(self): each tuple is (interface_name, interface_width_bits). axilite always assumed to be 32 bits and is not tuple (name only). Each block must have at most one aximm and one axilite.""" - intf_names = {} - intf_names["clk"] = ["ap_clk"] - intf_names["rst"] = ["ap_rst_n"] - sname = self.hls_sname() - intf_names["s_axis"] = [("in0_" + sname, self.get_instream_width_padded())] - intf_names["m_axis"] = [("out_" + sname, self.get_outstream_width_padded())] - intf_names["aximm"] = [] + intf_names = super().get_verilog_top_module_intf_names() if self.get_nodeattr("dynamic_mode"): intf_names["axilite"] = ["s_axi_cfg"] - else: - intf_names["axilite"] = [] return intf_names def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): From 1a4166c9496578c831387cb6388caa011ac6de7e Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 4 Oct 2022 09:27:59 +0200 Subject: [PATCH 202/628] FIFO] bugfix post-reset ibp condition on Q_srl --- finn-rtllib/memstream/hdl/Q_srl.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v index b4e89628a4..d05af67072 100644 --- a/finn-rtllib/memstream/hdl/Q_srl.v +++ b/finn-rtllib/memstream/hdl/Q_srl.v @@ -139,7 +139,7 @@ module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count); addr <= 0; addr_full <= 0; o_v_reg <= 0; - i_b_reg <= 1; + i_b_reg <= 0; end else begin state <= state_; From acefb52590a61a9d216748c1511d9d121c4ae754 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 22 Aug 2022 17:02:33 +0200 Subject: [PATCH 203/628] [Stitch] explicitly mark finn_design as top level --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 00e2cc3bb4..3746e41866 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -404,6 +404,7 @@ def apply(self, model): wrapper_filename = "%s/hdl/%s_wrapper.v" % (bd_base, block_name) tcl.append("add_files -norecurse %s" % wrapper_filename) model.set_metadata_prop("wrapper_filename", wrapper_filename) + tcl.append("set_property top finn_design_wrapper [current_fileset]") # synthesize to DCP and export stub, DCP and constraints if self.vitis: tcl.append( From 1ce5af7789403c4ee9f5092f752342e84780a52a Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 4 Oct 2022 15:56:54 +0100 Subject: [PATCH 204/628] [Util] Add comment about usage of axis infrastructure vh --- src/finn/util/pyverilator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 5396281397..d7ed3e261f 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -89,6 +89,7 @@ def file_to_basename(x): verilog_header_dir = make_build_dir("pyverilator_vh_") # use custom version of axis infrastructure vh + # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) custom_vh = pk.resource_filename( "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" ) From 392052b1198abc8aee2cc7b59f000e8f9422075d Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Oct 2022 09:33:03 +0100 Subject: [PATCH 205/628] [CustomOps] Add missing indices in shape/datatype fcts --- .../fpgadataflow/convolutioninputgenerator.py | 2 +- .../convolutioninputgenerator_rtl.py | 16 ++++++++-------- src/finn/custom_op/fpgadataflow/eltwise.py | 12 ++++++------ .../custom_op/fpgadataflow/fmpadding_batch.py | 2 +- src/finn/custom_op/fpgadataflow/iodma.py | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 6f039f7d67..1566445999 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -166,7 +166,7 @@ def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): """Returns stream width, input and output stream width are equal for the sliding window function""" ibits = self.get_input_datatype().bitwidth() diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 399b36e150..5424050a8e 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -98,13 +98,13 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") ishape = (1, ifm_dim_h, ifm_dim_w, ifm_ch) return ishape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") simd = self.get_nodeattr("SIMD") @@ -113,7 +113,7 @@ def get_folded_input_shape(self): folded_ishape = (1, ifm_dim_h, ifm_dim_w, wf, simd) return folded_ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -125,7 +125,7 @@ def get_normal_output_shape(self): oshape = (1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch) return oshape - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): k_h, k_w = self.get_nodeattr("ConvKernelDim") ifm_dim_h, ifm_dim_w = self.get_nodeattr("IFMDim") ifm_ch = self.get_nodeattr("IFMChannels") @@ -160,15 +160,15 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" return DataType[self.get_nodeattr("inputDataType")] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") ifm_ch = self.get_nodeattr("IFMChannels") @@ -176,7 +176,7 @@ def get_instream_width(self): in_width = simd * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): if self.get_nodeattr("parallel_window"): # feed all window pixels in parallel k_h, k_w = self.get_nodeattr("ConvKernelDim") diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index a29e871fab..a7b9c814e2 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -91,10 +91,10 @@ def get_folded_input_shape(self, ind=0): ishape = tuple(vecs + [ich // pe, pe]) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): return self.get_normal_input_shape() - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): return self.get_folded_input_shape() def make_shape_compatible_op(self, model): @@ -156,11 +156,11 @@ def verify_node(self): return info_messages - def get_input_datatype(self, id=0): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" - return DataType[self.get_nodeattr("inputDataType" + str(id))] + return DataType[self.get_nodeattr("inputDataType" + str(ind))] - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" op = self.get_nodeattr("eltwiseOp") idt0 = self.get_input_datatype(0) @@ -196,7 +196,7 @@ def get_instream_width(self, ind=0): in_width = pe * ibits return in_width - def get_outstream_width(self): + def get_outstream_width(self, ind=0): """Returns output stream width.""" obits = self.get_output_datatype().bitwidth() pe = self.get_nodeattr("PE") diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index 2034fb9381..50eaaff94b 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -152,7 +152,7 @@ def get_input_datatype(self, ind=0): assert ret.allowed(0), "FMPadding_Batch DataType must support zero" return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index a80eb29a6d..65683079fc 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -100,7 +100,7 @@ def get_nodeattr_types(self): my_attrs.update(super().get_nodeattr_types()) return my_attrs - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): vecs = list(self.get_nodeattr("numInputVectors")) num_ch = self.get_nodeattr("NumChannels") ishape = tuple(vecs + [num_ch]) From 72ded22a5cfe843936ef8d24b808a37a595c78be Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Oct 2022 09:35:36 +0100 Subject: [PATCH 206/628] [CustomOp] Remove overwriting of default FIFO sizes --- src/finn/custom_op/fpgadataflow/channelwise_op_batch.py | 3 --- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index f2d9f1aeb2..46adca680d 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -102,9 +102,6 @@ def get_nodeattr_types(self): "inputDataType": ("s", True, ""), "paramDataType": ("s", True, ""), "outputDataType": ("s", True, ""), - # input and output FIFO depths - "inFIFODepths": ("ints", False, [0]), - "outFIFODepths": ("ints", False, [0]), # number of input vectors, examples: # [1] is a single vector (like a FC layer with batch=1) # [4] is four vectors (like a FC layer with batch=4) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 62e51cc7bf..f2cc64668d 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -75,9 +75,6 @@ def get_nodeattr_types(self): "inputDataType": ("s", True, ""), "weightDataType": ("s", True, ""), "outputDataType": ("s", True, ""), - # input and output FIFO depths - "inFIFODepths": ("ints", False, [0]), - "outFIFODepths": ("ints", False, [0]), # number of input vectors, examples: # [1] is a single vector (like a FC layer with batch=1) # [4] is four vectors (like a FC layer with batch=4) From 99d81dfcc6a87f6bef978655a6d403f1ee58aab1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 5 Oct 2022 12:26:24 +0200 Subject: [PATCH 207/628] [Streamline] bugfix in AbsorbConsecutiveTransposes without this, a single trailing Transpose at the end of the NN gets eaten up --- src/finn/transformation/streamline/absorb.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index a983e67750..50dcbaa0ed 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -492,6 +492,8 @@ def apply(self, model): if node.op_type == "Transpose": next_nodes = model.find_consumers(node.output[0]) perms1 = list(get_by_name(node.attribute, "perm").ints) + if len(next_nodes) == 0: + continue # check if all nodes after fork are opposite transposes all_opposite_transposes = True for next_node in next_nodes: From e8ca508b9d172d54f2dac3e191c3db44d59237fc Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 5 Oct 2022 15:41:36 +0200 Subject: [PATCH 208/628] [Test] introduce test_fpgadataflow_conv_dynamic --- ...dataflow_convinputgenerator_rtl_dynamic.py | 231 +++++++++++++++--- 1 file changed, 202 insertions(+), 29 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index f2d51d9ea6..2a3413cb13 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -28,23 +28,220 @@ import pytest +import copy +import numpy as np +import onnx.parser as oprs from onnx import TensorProto, helper from pyverilator.util.axi_utils import axilite_write, reset_rtlsim from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.custom_op.registry import getCustomOp -from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from qonnx.transformation.infer_datatypes import InferDataTypes +from qonnx.transformation.infer_shapes import InferShapes +from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul +from qonnx.util.basic import gen_finn_dt_tensor, get_by_name import finn.core.onnx_exec as oxe +import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls +import finn.transformation.streamline.absorb as absorb +from finn.core.onnx_exec import execute_onnx from finn.core.rtlsim_exec import rtlsim_exec +from finn.transformation.fpgadataflow.create_dataflow_partition import ( + CreateDataflowPartition, +) from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): + np.random.seed(0) + ishp = (1, ifm, idim, idim) + int_dim = compute_conv_output_dim(idim, k, stride) + odim = compute_conv_output_dim(int_dim, k, stride) + oshp = (1, ofm, odim, odim) + wshp = (ofm, ifm, k, k) + wshp_1 = (ofm, ofm, k, k) + ishp_str = str(list(ishp)) + oshp_str = str(list(oshp)) + wshp_str = str(list(wshp)) + wshp_1_str = str(list(wshp_1)) + kshp_str = str([k, k]) + pad_str = str([0, 0, 0, 0]) + stride_str = str([stride, stride]) + dil_str = str([1, 1]) + + input = f""" + < + ir_version: 7, + opset_import: ["" : 9] + > + agraph (float{ishp_str} in0) => (float{oshp_str} out0) + < + float{wshp_str} param_c0_weight, + float{wshp_1_str} param_c1_weight + > + {{ + conv0 = Conv< + dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_str}, + strides={stride_str} + >(in0, param_c0_weight) + out0 = Conv< + dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_str}, + strides={stride_str} + >(conv0, param_c1_weight) + }} + """ + model = oprs.parse_model(input) + model = ModelWrapper(model) + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + model.set_tensor_datatype("in0", idt) + model.set_tensor_datatype("param_c0_weight", wdt) + model.set_tensor_datatype("param_c1_weight", wdt) + model.set_initializer("param_c0_weight", gen_finn_dt_tensor(wdt, wshp)) + model.set_initializer("param_c1_weight", gen_finn_dt_tensor(wdt, wshp_1)) + return model + + +def update_conv_model_dims(model, idim_new): + cnode = model.get_nodes_by_op_type("Conv")[0] + k, _ = get_by_name(cnode.attribute, "kernel_shape").ints + stride, _ = get_by_name(cnode.attribute, "strides").ints + ishp = model.get_tensor_shape("in0") + n, ci, _, _ = ishp + n, co, _, _ = model.get_tensor_shape("out0") + int_dim = compute_conv_output_dim(idim_new, k, stride) + odim = compute_conv_output_dim(int_dim, k, stride) + model.set_tensor_shape("in0", (n, ci, idim_new, idim_new)) + model.set_tensor_shape("out0", (n, co, odim, odim)) + # remove all existing shapes + del model.graph.value_info[:] + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + return model + + +# Helper function to update tensor dimensions manually because shape inference +# does not work on FINN nodes (they assume well-defined tensor shapes). +def update_tensor_dim(model, tensor_name, new_hw): + shape = model.get_tensor_shape(tensor_name) + shape[1] = new_hw[0] + shape[2] = new_hw[1] + model.set_tensor_shape(tensor_name, shape) + + +# Helper function that delivers the hook to program the SWG via AXI-Lite +def config_hook(configs): + if configs is None: + return None + + def write_swg_config(sim): + for axi_name, config in configs: + # 1. Write config registers to the SWG, dict defines (addr, value) tuples + for config_entry in config.values(): + axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) + # 2. Set cfg_valid flag (>= 1 cycle) + axilite_write(sim, 0, 1, basename=axi_name) + # 3. Reset component (>= 1 cycle) + reset_rtlsim(sim) + + return write_swg_config + + +@pytest.mark.slow +@pytest.mark.vivado +def test_fpgadataflow_conv_dynamic(): + idims = [32, 16] + ifm = 4 + k = 4 + stride = 1 + ofm = 8 + idt = DataType["UINT8"] + wdt = DataType["INT2"] + exp_cfgs = [] + largest_model = None + for idim in idims: + ishp = (1, ifm, idim, idim) + np.random.seed(0) + inp = gen_finn_dt_tensor(idt, ishp) + model = create_conv_model(idim, ifm, k, stride, ofm, idt, wdt) + _, _, int_dim, _ = model.get_tensor_shape("conv0") + _, _, odim, _ = model.get_tensor_shape("out0") + if idim == max(idims): + # use largest model for hardware conversion + largest_model = copy.deepcopy(model) + golden = execute_onnx(model, {"in0": inp})["out0"] + exp_cfg = (idim, int_dim, odim, inp, golden) + exp_cfgs.append(exp_cfg) + + # convert to hardware and prepare simulation + model = largest_model.transform(LowerConvsToMatMul()) + model = model.transform(to_hls.InferConvInpGen(use_rtl_variant=True)) + model = model.transform( + to_hls.InferQuantizedMatrixVectorActivation(mem_mode="decoupled") + ) + model = model.transform(absorb.AbsorbConsecutiveTransposes()) + parent_model = model.transform(CreateDataflowPartition()) + sdp_inst = getCustomOp( + parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] + ) + model = ModelWrapper(sdp_inst.get_nodeattr("model")) + for swg_node in model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl"): + getCustomOp(swg_node).set_nodeattr("SIMD", 1) + getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) + getCustomOp(swg_node).set_nodeattr("inFIFODepth", 16) + getCustomOp(swg_node).set_nodeattr("outFIFODepth", 16) + print("SWG initial config:") + idim = getCustomOp(swg_node).get_nodeattr("IFMDim") + print(getCustomOp(swg_node).get_dynamic_config(idim)) + model = model.transform(InsertFIFO()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(HLSSynthIP()) + model = model.transform(CreateStitchedIP("xc7z020clg400-1", 5)) + model.set_metadata_prop("exec_mode", "rtlsim") + + # loop through experiment configurations + for exp_cfg in exp_cfgs: + idim, int_dim, odim, inp, golden = exp_cfg + # model.set_metadata_prop("rtlsim_trace", "trace_size0.vcd") + # get config for the new dimensions + swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") + swg0 = getCustomOp(swg_nodes[0]) + update_tensor_dim(model, swg0.onnx_node.input[0], (idim, idim)) + update_tensor_dim(model, swg0.onnx_node.output[0], (int_dim, int_dim)) + config0 = swg0.get_dynamic_config((idim, idim)) + swg1 = getCustomOp(swg_nodes[1]) + update_tensor_dim(model, swg1.onnx_node.input[0], (int_dim, int_dim)) + update_tensor_dim(model, swg1.onnx_node.output[0], (odim, odim)) + config1 = swg1.get_dynamic_config((int_dim, int_dim)) + configs = [("s_axi_cfg_0_", config0), ("s_axi_cfg_1_", config1)] + # adjust folded shapes for I/O FIFOs + # (since rtlsim_exec uses folded shape info to fold global i/o tensors) + first_node = getCustomOp(model.graph.node[0]) + first_node_shp = list(first_node.get_folded_input_shape()) + first_node_shp[1] = idim + first_node_shp[2] = idim + first_node.set_nodeattr("folded_shape", first_node_shp) + update_tensor_dim(model, first_node.onnx_node.input[0], (idim, idim)) + last_node = getCustomOp(model.graph.node[-1]) + last_node_shp = list(last_node.get_folded_output_shape()) + last_node_shp[1] = odim + last_node_shp[2] = odim + update_tensor_dim(model, last_node.onnx_node.output[0], (odim, odim)) + last_node.set_nodeattr("folded_shape", last_node_shp) + model.set_metadata_prop("rtlsim_trace", "trace_size1.vcd") + ctx = {"global_in": inp.transpose(0, 2, 3, 1)} + rtlsim_exec(model, ctx, pre_hook=config_hook(configs)) + ret = ctx["global_out"].transpose(0, 3, 1, 2) + assert np.isclose(golden, ret).all() + + def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilation, idt): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim @@ -229,31 +426,6 @@ def test_fpgadataflow_slidingwindow_rtl_dynamic( model = model.transform(CreateStitchedIP("xc7z020clg400-1", 5)) model.set_metadata_prop("exec_mode", "rtlsim") - # Helper function that delivers the hook to program the SWG via AXI-Lite - def config_hook(config): - if config is None: - return None - - def write_swg_config(sim): - axi_name = "s_axi_cfg_0_" - # 1. Write config registers to the SWG, dict defines (addr, value) tuples - for config_entry in config.values(): - axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) - # 2. Set cfg_valid flag (>= 1 cycle) - axilite_write(sim, 0, 1, basename=axi_name) - # 3. Reset component (>= 1 cycle) - reset_rtlsim(sim) - - return write_swg_config - - # Helper function to update tensor dimensions manually because shape inference - # does not work on FINN nodes (they assume well-defined tensor shapes). - def update_tensor_dim(model, tensor_name, new_hw): - shape = model.get_tensor_shape(tensor_name) - shape[1] = new_hw[0] - shape[2] = new_hw[1] - model.set_tensor_shape(tensor_name, shape) - # Simulate 1 FM for each dimension in the series for i, ifm_dim in enumerate(ifm_dim_series): ifm_dim_h, ifm_dim_w = ifm_dim @@ -261,7 +433,7 @@ def update_tensor_dim(model, tensor_name, new_hw): ofm_dim_w = compute_conv_output_dim(ifm_dim_w, k_w, stride_w, 0, dilation_w) ofm_dim = [ofm_dim_h, ofm_dim_w] - config = None + configs = None if i > 0: # skip re-programming for initial FM dimension # Necessary update of node and tensor attributes to make rtlsim work: swg_node = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")[0] @@ -271,6 +443,7 @@ def update_tensor_dim(model, tensor_name, new_hw): # Generate config, also overwrites IFMDim/OFMDim attributes: config = swg_inst.get_dynamic_config(ifm_dim) + configs = [("s_axi_cfg_0_", config)] # Also update FIFO nodes and corresponding tensors fifo_node = model.get_nodes_by_op_type("StreamingFIFO")[0] @@ -292,7 +465,7 @@ def update_tensor_dim(model, tensor_name, new_hw): # Run rtlsim on stitched-ip x = gen_finn_dt_tensor(idt, (1, ifm_dim_h, ifm_dim_w, ifm_ch)) context = prepare_inputs(x) - rtlsim_exec(model, context, pre_hook=config_hook(config)) + rtlsim_exec(model, context, pre_hook=config_hook(configs)) y_produced = context["outp"] # Generate golden result From 95fb4deb5bad3acc8058ea106a9515aee2d19199 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Oct 2022 15:43:31 +0100 Subject: [PATCH 209/628] [Jenkins] Add marker to fifosizing test --- tests/fpgadataflow/test_fifosizing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 37efc5124b..5fd1439bd0 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -49,6 +49,7 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow def test_fifosizing_linear(): tmp_output_dir = fetch_test_model("tfc") cfg = build_cfg.DataflowBuildConfig( From 4c0978e8fb9e845fac6b1b1c32d7b6fa1be4106c Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 6 Oct 2022 10:04:13 +0100 Subject: [PATCH 210/628] [CustomOp] Update fmpadding to new hls code --- fetch-repos.sh | 2 +- .../custom_op/fpgadataflow/fmpadding_batch.py | 55 +++++++++---------- .../test_fpgadataflow_fmpadding.py | 44 +++------------ 3 files changed, 36 insertions(+), 65 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 36c9ae5578..b0f6400ed1 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="e7f2de91d1a2ddadaaea06b8f4c20e97a575470e" +HLSLIB_COMMIT="d27f6b6c5d8f1bb208db395659389603f63ad4be" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index d69ea471ea..d4dfc258ae 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -47,10 +47,6 @@ def get_nodeattr_types(self): # spatial size of input images "ImgDim": ("ints", True, []), # [H, W] = [Y, X] # total padding (per dimension) to apply - # NOTE: Current padding scheme that is applied tries to pad the same - # amount of zeros in front and behind the image for each dimension. - # As an example, a padding scheme such as [1, x, 3, x] is equal - # to [2, x, 2, x] "Padding": ( "ints", True, @@ -62,10 +58,6 @@ def get_nodeattr_types(self): "SIMD": ("i", False, 1), # FINN input datatype "inputDataType": ("s", True, ""), - # controls distribution of padded pixels - # in case of uneven padding -- see FMPadding fxn - # in hlslib - "PaddingStyle": ("i", False, 2, {2, 1}), # shape describing input vecs per execution "numInputVectors": ("i", False, 1), } @@ -179,23 +171,21 @@ def defines(self, var): pad = self.get_nodeattr("Padding") pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] - is_square = idim_h == idim_w + is_square_img = idim_h == idim_w + is_square_pad = pad_h == pad_w - if is_square: - assert ( - pad_h == pad_w - ), "Only equal padding along the dimensions for square images is supported" + if is_square_img and is_square_pad: self.code_gen_dict["$DEFINES$"] = [ """#define ImgDim1 {}\n#define OutputDim1 {}\n - #define Padding1 {}\n#define NumChannels1 {}\n - #define SIMD1 {}\n#define PaddingStyle1 {}\n + #define PaddingBefore1 {}\n#define PaddingBehind1 {}\n + #define NumChannels1 {}\n#define SIMD1 {}\n #define numReps {}\n""".format( idim_h, odim_h, - pad_h, + pad[0], + pad[2], self.get_nodeattr("NumChannels"), self.get_nodeattr("SIMD"), - self.get_nodeattr("PaddingStyle"), self.get_nodeattr("numInputVectors"), ) ] @@ -204,20 +194,22 @@ def defines(self, var): """ #define OutputDim1_x {}\n #define OutputDim1_y {}\n - #define Padding1_x {}\n - #define Padding1_y {}\n + #define PaddingLeft1 {}\n + #define PaddingRight1 {}\n + #define PaddingTop1 {}\n + #define PaddingBottom1 {}\n #define NumChannels1 {}\n #define SIMD1 {}\n - #define PaddingStyle1 {}\n #define numReps {}\n """.format( odim_w, odim_h, - pad_w, - pad_h, + pad[1], + pad[3], + pad[0], + pad[2], self.get_nodeattr("NumChannels"), self.get_nodeattr("SIMD"), - self.get_nodeattr("PaddingStyle"), self.get_nodeattr("numInputVectors"), ) ] @@ -254,21 +246,26 @@ def docompute(self): node = self.onnx_node idim_h, idim_w = self.get_nodeattr("ImgDim") - is_square = idim_h == idim_w + pad = self.get_nodeattr("Padding") + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + is_square_img = idim_h == idim_w + is_square_pad = pad_h == pad_w - if is_square: + if is_square_img and is_square_pad: hls_call = node.op_type self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} (in0, out, numReps);""".format( + """{} (in0, out, numReps);""".format( hls_call, in_t ) ] else: hls_call = "FMPadding_nonsquare_Batch" self.code_gen_dict["$DOCOMPUTE$"] = [ - """{} (in0, out, numReps);""".format( + """{} (in0, out, numReps);""".format( hls_call, in_t ) ] diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 2e2da0da7a..1218bac503 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -53,12 +53,11 @@ target_clk_ns = 10 -def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_style): +def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt): pad_h = padding[0] + padding[2] pad_w = padding[1] + padding[3] idim_h, idim_w = idim - assert pad_style == 2, "only pad_style == 2 supported in hlslib" assert pad_h > 0 or pad_w > 0, "Output dim should be greater than input dim" odim_h = idim_h + pad_h odim_w = idim_w + pad_w @@ -80,7 +79,6 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty Padding=padding, NumChannels=num_ch, inputDataType=str(idt.name), - PaddingStyle=pad_style, numInputVectors=1, SIMD=simd, ) @@ -99,15 +97,15 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty # input image dimension -@pytest.mark.parametrize("idim", [[8, 8], [10, 8]]) +@pytest.mark.parametrize("idim", [[2, 2], [8, 8], [10, 8]]) # number of rows and number of cols to add -@pytest.mark.parametrize("pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3]]) +@pytest.mark.parametrize( + "pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3], [7, 0, 8, 0]] +) # number of channels -@pytest.mark.parametrize("num_ch", [2, 4]) +@pytest.mark.parametrize("num_ch", [1, 2, 4]) # Input parallelism @pytest.mark.parametrize("simd", [1, 2]) -# PaddingStyle: selects behavior when (odim-idim)%2 != 0 -@pytest.mark.parametrize("pad_style", [2]) # FINN input datatype @pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]]) # execution mode @@ -115,7 +113,7 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): +def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode): if num_ch % simd != 0: pytest.skip(" num_ch % simd != 0, skipping") @@ -123,19 +121,13 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] - if idim_h == idim_w and pad_h != pad_w: - pytest.skip( - """Only equal padding along the dimensions for square images - is supported, skipping""" - ) - # generate input data x = gen_finn_dt_tensor(idt, [1, idim_h, idim_w, num_ch]) input_dict = {"inp": x} odim_h = idim_h + pad_h odim_w = idim_w + pad_w - model = make_single_fmpadding_modelwrapper(idim, pad, num_ch, simd, idt, pad_style) + model = make_single_fmpadding_modelwrapper(idim, pad, num_ch, simd, idt) model = model.transform(InferShapes()) model = model.transform(SetExecMode(mode)) model = model.transform(GiveUniqueNodeNames()) @@ -150,26 +142,8 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): expected_oshape = (1, odim_h, odim_w, num_ch) assert y_produced.shape == expected_oshape - # calculate reference - # calculate correct pad according to parameters - if pad_style == 2: - if pad_h % 2 == 0: - pad_up = pad_h // 2 - else: - pad_up = pad_h // 2 + 1 - if pad_w % 2 == 0: - pad_left = pad_w // 2 - else: - pad_left = pad_w // 2 + 1 - else: - pad_up = pad_h // 2 - pad_left = pad_w // 2 - - pad_down = pad_h - pad_up - pad_right = pad_w - pad_left - y_expected = np.pad( - x, ((0, 0), (pad_up, pad_down), (pad_left, pad_right), (0, 0)), "constant" + x, ((0, 0), (pad[0], pad[2]), (pad[1], pad[3]), (0, 0)), "constant" ) assert (y_produced == y_expected).all() From c0dc4af4bce141afbd4984f778529ff089c58ff3 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 6 Oct 2022 12:09:43 +0100 Subject: [PATCH 211/628] [Refactor] Minor fixes on code --- .../custom_op/fpgadataflow/hlscustomop.py | 9 ++-- .../custom_op/fpgadataflow/streamingfifo.py | 2 - .../fpgadataflow/insert_fifo.py | 44 ++++++++++--------- 3 files changed, 30 insertions(+), 25 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index d6993206be..f307be95c3 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -786,7 +786,6 @@ def derive_characteristic_fxns(self, period, override_rtlsim_dict=None): io_dict = { "inputs": { "in0": [0 for i in range(n_inps)], - # "weights": wei * num_w_reps }, "outputs": {"out": []}, } @@ -824,7 +823,12 @@ def monitor_txns(sim_obj): liveness_threshold=period, hook_preclk=monitor_txns, ) - assert total_cycle_count <= period + assert ( + total_cycle_count <= period + ), """Total cycle count from rtl simulation is higher than + specified period, please set the period higher than {}""".format( + total_cycle_count + ) self.set_nodeattr("io_chrc_period", period) def accumulate_char_fxn(chrc): @@ -859,7 +863,6 @@ def accumulate_char_fxn(chrc): all_txns_out[out_idx, :] = txn_out all_pad_out.append(pad_out) - # TODO specialize here for DuplicateStreams and AddStreams self.set_nodeattr("io_chrc_in", all_txns_in) self.set_nodeattr("io_chrc_out", all_txns_out) self.set_nodeattr("io_chrc_pads_in", all_pad_in) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index d0accc2d36..40d016de43 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -208,8 +208,6 @@ def ipgen_singlenode_code(self): def get_normal_input_shape(self, ind=0): depth = self.get_adjusted_depth() - # depth has to be between 2 and 256 with the current - # StreamingFIFO implementation assert depth >= 2, """Depth is too low""" if depth > 256 and self.get_nodeattr("impl_style") == "rtl": warnings.warn( diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 260525c5d5..79bd717a5d 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -88,10 +88,7 @@ def __init__( ): super().__init__() self.create_shallow_fifos = create_shallow_fifos - if max_qsrl_depth is None: - self.max_qsrl_depth = 1000000 - else: - self.max_qsrl_depth = max_qsrl_depth + self.max_qsrl_depth = max_qsrl_depth self.vivado_ram_style = vivado_ram_style def apply(self, model): @@ -120,15 +117,10 @@ def apply(self, model): # check if folded_shape of output of first node and # input of the second node is equal n1 = getCustomOp(consumer) - idx_inp = 0 for idx, inp in enumerate(consumer.input): if inp == output_name: - if idx == 0: - fld_shape_2 = n1.get_folded_input_shape() - idx_inp = 0 - else: - fld_shape_2 = n1.get_folded_input_shape(ind=idx) - idx_inp = idx + fld_shape_2 = n1.get_folded_input_shape(ind=idx) + idx_inp = idx assert _suitable_folded_shapes( fld_shape, fld_shape_2 ), """The @@ -141,10 +133,7 @@ def apply(self, model): n0_depth = n0.get_nodeattr("outFIFODepths")[idx_out] n1_depth = n1.get_nodeattr("inFIFODepths")[idx_inp] - if n0_depth == n1_depth: - fifo_depth = n0_depth - elif n0_depth != n1_depth: - fifo_depth = max(n0_depth, n1_depth) + fifo_depth = max(n0_depth, n1_depth) if fifo_depth > 2 or self.create_shallow_fifos: # assumption: HLS streaming components already have @@ -159,9 +148,15 @@ def apply(self, model): ) graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) - impl_style = ( - "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" - ) + + if ( + self.max_qsrl_depth is None + or fifo_depth <= self.max_qsrl_depth + ): + impl_style = "rtl" + else: + impl_style = "vivado" + fifo_node = oh.make_node( "StreamingFIFO", [output_name], @@ -219,7 +214,11 @@ def apply(self, model): ) graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) - impl_style = "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" + + if self.max_qsrl_depth is None or fifo_depth <= self.max_qsrl_depth: + impl_style = "rtl" + else: + impl_style = "vivado" fifo_node = oh.make_node( "StreamingFIFO", @@ -270,7 +269,12 @@ def apply(self, model): ) graph.value_info.append(fifo_input_tensor) model.set_tensor_datatype(fifo_input_tensor.name, dtype) - impl_style = "vivado" if fifo_depth > self.max_qsrl_depth else "rtl" + + if self.max_qsrl_depth is None or fifo_depth <= self.max_qsrl_depth: + impl_style = "rtl" + else: + impl_style = "vivado" + fifo_node = oh.make_node( "StreamingFIFO", [fifo_input_tensor.name], From 01a62e7f4999d450f64fa4b0012081419e793373 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Oct 2022 12:35:27 +0100 Subject: [PATCH 212/628] [EltWise] Add node attribute for inFIFODepths --- src/finn/custom_op/fpgadataflow/eltwise.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index a7b9c814e2..1a3dd54ccd 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -55,6 +55,7 @@ def get_nodeattr_types(self): # [4] is four vectors (like a FC layer with batch=4) # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) "numInputVectors": ("ints", False, [1]), + "inFIFODepths": ("ints", False, [2, 2]), } my_attrs.update(super().get_nodeattr_types()) return my_attrs From a799fe8506d0484585ca8e79e3cd09bd22bbf10d Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Oct 2022 13:01:35 +0100 Subject: [PATCH 213/628] [EltWise] Change order for node attribute update --- src/finn/custom_op/fpgadataflow/eltwise.py | 35 ++++++++++++---------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index 1a3dd54ccd..d6284750c7 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -42,22 +42,25 @@ def __init__(self, onnx_node): super().__init__(onnx_node) def get_nodeattr_types(self): - my_attrs = { - "NumChannels": ("i", True, ""), - "PE": ("i", True, ""), - # FINN DataTypes for inputs; output datatype inferred from input - "inputDataType0": ("s", True, ""), - "inputDataType1": ("s", True, ""), - # type of EltwiseFunction for the operation - "eltwiseOp": ("s", True, "", ["Add", "Sub", "AbsDiff"]), - # number of input vectors, examples: - # [1] is a single vector (like a FC layer with batch=1) - # [4] is four vectors (like a FC layer with batch=4) - # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) - "numInputVectors": ("ints", False, [1]), - "inFIFODepths": ("ints", False, [2, 2]), - } - my_attrs.update(super().get_nodeattr_types()) + + my_attrs = super().get_nodeattr_types() + my_attrs.update( + { + "NumChannels": ("i", True, ""), + "PE": ("i", True, ""), + # FINN DataTypes for inputs; output datatype inferred from input + "inputDataType0": ("s", True, ""), + "inputDataType1": ("s", True, ""), + # type of EltwiseFunction for the operation + "eltwiseOp": ("s", True, "", ["Add", "Sub", "AbsDiff"]), + # number of input vectors, examples: + # [1] is a single vector (like a FC layer with batch=1) + # [4] is four vectors (like a FC layer with batch=4) + # [1, 4, 4] is four * four vectors (like a conv layer with batch=1) + "numInputVectors": ("ints", False, [1]), + "inFIFODepths": ("ints", False, [2, 2]), + } + ) return my_attrs def get_eltwise_op_lambda(self): From c52ab50dc16f567383375cc5cd026d464862cc0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Mon, 10 Oct 2022 12:41:37 +0100 Subject: [PATCH 214/628] First prototype for dynamically sized FM padding. --- finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 203 ++++++++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv | 125 +++++++++++ 2 files changed, 328 insertions(+) create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding_axi.sv create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv new file mode 100644 index 0000000000..4af2598e37 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -0,0 +1,203 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief Feature map padding. + * @author Thomas B. Preußer + *****************************************************************************/ + +module fmpadding_axi #( + int unsigned XCOUNTER_BITS, + int unsigned YCOUNTER_BITS, + int unsigned NUM_CHANNELS, + int unsigned SIMD, + int unsigned ELEM_BITS, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + +/* + //- AXI Lite ------------------------ + // Writing + input s_axilite_AWVALID, + output s_axilite_AWREADY, + input [2:0] s_axilite_AWADDR, + + input s_axilite_WVALID, + output s_axilite_WREADY, + input [31:0] s_axilite_WDATA, + input [ 3:0] s_axilite_WSTRB, + + output s_axilite_BVALID, + input s_axilite_BREADY, + output [1:0] s_axilite_BRESP, + + // Reading + input s_axilite_ARVALID, + output s_axilite_ARREADY, + input [3:0] s_axilite_ARADDR, + + output s_axilite_RVALID, + input s_axilite_RREADY, + output [31:0] s_axilite_RDATA, + output [ 1:0] s_axilite_RRESP, +*/ + + input logic we, + input logic [ 2:0] wa, + input logic [31:0] wd, + + //- AXI Stream - Input -------------- + output logic s_axis_tready, + input logic s_axis_tvalid, + input logic [STREAM_BITS-1:0] s_axis_tdata, + + //- AXI Stream - Output ------------- + input logic m_axis_tready, + output logic m_axis_tvalid, + output logic [STREAM_BITS-1:0] m_axis_tdata +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + //----------------------------------------------------------------------- + // Dynamically configurable state + typedef logic [XCOUNTER_BITS-1:0] xcount_t; + xcount_t XEnd = 0; + xcount_t XOn = 0; + xcount_t XOff = 0; + + typedef logic [YCOUNTER_BITS-1:0] ycount_t; + ycount_t YEnd = 0; + ycount_t YOn = 0; + ycount_t YOff = 0; + always_ff @(posedge clk) begin + if(we) begin + unique case(wa) + 0: XOn <= wd; + 1: XOff <= wd; + 2: XEnd <= wd; + + 4: YOn <= wd; + 5: YOff <= wd; + 6: YEnd <= wd; + + default: assert(0) else begin + $error("Illegal write address."); + $stop; + end + endcase + end + end + + //----------------------------------------------------------------------- + // Cascaded enables for the nested counters: SCount, XCount, YCount + uwire sen; + uwire xen; + uwire yen; + + //- S-Counter: SIMD fold ------------ + initial begin + if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin + $error("Channel count must be SIMD multiple."); + $finish; + end + end + // Count SF-2, SF-3, ..., 1, 0, -1 + localparam int unsigned SF = NUM_CHANNELS/SIMD; + typedef logic [$clog2(SF-1):0] scount_t; + scount_t SCount = SF-2; + + assign xen = sen && SCount[$left(SCount)]; + uwire sclr = rst || xen; + always_ff @(posedge clk) begin + if(sclr) SCount <= SF-2; + else if(sen) SCount <= SCount - 1; + end + + //- X-Counter: image width ---------- + xcount_t XCount = 0; + + assign yen = xen && (XCount == XEnd); + uwire xclr = rst || yen; + always_ff @(posedge clk) begin + if(xclr) XCount <= 0; + else if(xen) XCount <= XCount + 1; + end + uwire xfwd = (XOn <= XCount) && (XCount < XOff); + + //- Y-Counter: image height --------- + ycount_t YCount = 0; + + uwire yclr = rst || (yen && (YCount == YEnd)); + always_ff @(posedge clk) begin + if(yclr) YCount <= 0; + else if(yen) YCount <= YCount + 1; + end + uwire yfwd = (YOn <= YCount) && (YCount < YOff); + + //----------------------------------------------------------------------- + // Input forwarding and edge padding + typedef struct { + logic vld; + logic [STREAM_BITS-1:0] dat; + } buf_t; + buf_t A = '{ vld: 0, dat: 'x }; + buf_t B = '{ vld: 0, dat: 'x }; + + uwire fwd = xfwd && yfwd; + assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); + assign s_axis_tready = !A.vld; + assign m_axis_tvalid = B.vld; + assign m_axis_tdata = B.dat; + + always_ff @(posedge clk) begin + if(rst) begin + B <= '{ vld: 0, dat: 'x }; + end + else if(m_axis_tready || !B.vld) begin + B.vld <= s_axis_tvalid || A.vld || !fwd; + B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; + end + end + + always_ff @(posedge clk) begin + if(rst) begin + A <= '{ vld: 0, dat: 'x }; + end + else begin + A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); + if(!A.vld) A.dat <= s_axis_tdata; + end + end + +endmodule : fmpadding_axi diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv new file mode 100644 index 0000000000..ae2377f3dd --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv @@ -0,0 +1,125 @@ + +module fmpadding_axi_tb #( + int unsigned XCOUNTER_BITS = 8, + int unsigned YCOUNTER_BITS = 8, + int unsigned NUM_CHANNELS = 4, + int unsigned SIMD = 2, + int unsigned ELEM_BITS = 4 +)(); + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8); + + //- Global Control ------------------ + logic clk = 0; + always #5ns clk = !clk; + logic rst; + + // Parameter Configuration ---------- + logic we; + logic [ 2:0] wa; + logic [31:0] wd; + + //- AXI Stream - Input -------------- + uwire s_axis_tready; + logic s_axis_tvalid; + logic [STREAM_BITS-1:0] s_axis_tdata; + + //- AXI Stream - Output ------------- + logic m_axis_tready; + uwire m_axis_tvalid; + uwire [STREAM_BITS-1:0] m_axis_tdata; + + + // DUT + fmpadding_axi #( + .XCOUNTER_BITS(XCOUNTER_BITS), + .YCOUNTER_BITS(YCOUNTER_BITS), + .NUM_CHANNELS(NUM_CHANNELS), + .SIMD(SIMD), + .ELEM_BITS(ELEM_BITS) + ) dut ( + .ap_clk(clk), .ap_rst_n(!rst), + .we, .wa, .wd, + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, + .m_axis_tready, .m_axis_tvalid, .m_axis_tdata + ); + + // Stimuli + localparam int unsigned IMAGES = 2; + localparam int unsigned XSIZE = 10; + localparam int unsigned YSIZE = 7; + localparam int unsigned PAD_LEFT = 2; + localparam int unsigned PAD_RIGHT = 3; + localparam int unsigned PAD_TOP = 1; + localparam int unsigned PAD_BOTTOM = 2; + initial begin + we = 0; + wa = 'x; + wd = 'x; + + s_axis_tvalid = 0; + s_axis_tdata = 'x; + + // Configure Parameters + rst = 1; + @(posedge clk); + we <= 1; + /* XOn */ wa <= 0; wd <= PAD_LEFT; @(posedge clk); + /* XOff */ wa <= 1; wd <= XSIZE - PAD_RIGHT; @(posedge clk); + /* XEnd */ wa <= 2; wd <= XSIZE - 1; @(posedge clk); + /* YOn */ wa <= 4; wd <= PAD_TOP; @(posedge clk); + /* YOff */ wa <= 5; wd <= YSIZE - PAD_BOTTOM; @(posedge clk); + /* YEnd */ wa <= 6; wd <= YSIZE - 1; @(posedge clk); + we <= 0; + wa <= 'x; + wd <= 'x; + @(posedge clk); + rst <= 0; + + // Feed data input + s_axis_tvalid <= 1; + for(int unsigned i = 0; i < IMAGES * (XSIZE-PAD_LEFT-PAD_RIGHT) * (YSIZE-PAD_TOP-PAD_BOTTOM) * (NUM_CHANNELS/SIMD); i++) begin + s_axis_tdata <= i; + @(posedge clk iff s_axis_tready); + if($urandom()%5 == 0) begin + s_axis_tvalid <= 0; + s_axis_tdata <= 'x; + @(posedge clk); + s_axis_tvalid <= 1; + end + end + s_axis_tvalid <= 0; + s_axis_tdata <= 'x; + end + + // Ouput Throttler + initial begin + m_axis_tready = 0; + @(posedge clk iff !rst); + m_axis_tready <= 1; + forever @(posedge clk iff m_axis_tvalid) begin + m_axis_tready <= 0; + repeat(4-$clog2(1+$urandom()%15)) @(posedge clk); + m_axis_tready <= 1; + end + end + + // Output logger + initial begin + repeat(IMAGES) begin + for(int unsigned y = 0; y < YSIZE; y++) begin + for(int unsigned x = 0; x < XSIZE; x++) begin + automatic string delim = " "; + for(int unsigned s = 0; s < NUM_CHANNELS/SIMD; s++) begin + @(posedge clk iff m_axis_tvalid && m_axis_tready); + $write("%s%02X", delim, m_axis_tdata); + delim = ":"; + end + end + $display(); + end + $display("----"); + end + $finish; + end + +endmodule : fmpadding_axi_tb From 17c1df91e8861d7e812faa4b0cdad87c97505b7c Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 10 Oct 2022 17:31:43 +0100 Subject: [PATCH 215/628] [Test] Removing two configurations from fmpadding test --- tests/fpgadataflow/test_fpgadataflow_fmpadding.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 1218bac503..34928ce45b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -97,13 +97,13 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt): # input image dimension -@pytest.mark.parametrize("idim", [[2, 2], [8, 8], [10, 8]]) +@pytest.mark.parametrize("idim", [[8, 8], [10, 8]]) # number of rows and number of cols to add @pytest.mark.parametrize( "pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3], [7, 0, 8, 0]] ) # number of channels -@pytest.mark.parametrize("num_ch", [1, 2, 4]) +@pytest.mark.parametrize("num_ch", [2, 4]) # Input parallelism @pytest.mark.parametrize("simd", [1, 2]) # FINN input datatype From 03ccfb5ec314813701702b444927f308ee929783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 11 Oct 2022 15:17:34 +0100 Subject: [PATCH 216/628] Added AXI-Light adapter to dynamically-sized feature map padding. --- finn-rtllib/fmpadding/hdl/axi2we.sv | 122 ++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding.sv | 177 ++++++++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 149 +++------------ finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv | 67 +++++-- 4 files changed, 376 insertions(+), 139 deletions(-) create mode 100644 finn-rtllib/fmpadding/hdl/axi2we.sv create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding.sv diff --git a/finn-rtllib/fmpadding/hdl/axi2we.sv b/finn-rtllib/fmpadding/hdl/axi2we.sv new file mode 100644 index 0000000000..0740eac5f8 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/axi2we.sv @@ -0,0 +1,122 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief AXI-Light adapter for trivial write enable interface. + * @author Thomas B. Preußer + *****************************************************************************/ + +module axi2we #( + int unsigned ADDR_BITS +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + + //- AXI Lite ------------------------ + // Writing + input s_axilite_AWVALID, + output s_axilite_AWREADY, + input [ADDR_BITS-1:0] s_axilite_AWADDR, + + input s_axilite_WVALID, + output s_axilite_WREADY, + input [31:0] s_axilite_WDATA, + input [ 3:0] s_axilite_WSTRB, + + output s_axilite_BVALID, + input s_axilite_BREADY, + output [1:0] s_axilite_BRESP, + + // Reading tied to all-ones + input s_axilite_ARVALID, + output s_axilite_ARREADY, + input [3:0] s_axilite_ARADDR, + + output s_axilite_RVALID, + input s_axilite_RREADY, + output [31:0] s_axilite_RDATA, + output [ 1:0] s_axilite_RRESP, + + // Write Enable Interface + output logic we, + output logic [ADDR_BITS-1:0] wa, + output logic [ 31:0] wd +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + + logic WABusy = 0; + logic WDBusy = 0; + logic [ADDR_BITS-1:0] Addr = 'x; + logic [ 31:0] Data = 'x; + + assign we = WABusy && WDBusy && s_axilite_BREADY; + assign wa = Addr; + assign wd = Data; + + uwire clr_wr = rst || we; + always_ff @(posedge clk) begin + if(clr_wr) begin + WABusy <= 0; + Addr <= 'x; + WDBusy <= 0; + Data <= 'x; + end + else begin + if(!WABusy) begin + WABusy <= s_axilite_AWVALID; + Addr <= s_axilite_AWADDR; + end + if(!WDBusy) begin + WDBusy <= s_axilite_WVALID; + Data <= s_axilite_WDATA; + end + end + end + assign s_axilite_AWREADY = !WABusy; + assign s_axilite_WREADY = !WDBusy; + assign s_axilite_BVALID = WABusy && WDBusy; + assign s_axilite_BRESP = '0; // OK + + // Answer all reads with '1 + logic RValid = 0; + uwire clr_rd = rst || (RValid && s_axilite_RREADY); + always_ff @(posedge clk) begin + if(clr_rd) RValid <= 0; + else if(!RValid) RValid <= s_axilite_ARVALID; + end + assign s_axilite_ARREADY = !RValid; + assign s_axilite_RVALID = RValid; + assign s_axilite_RDATA = '1; + assign s_axilite_RRESP = '0; // OK + +endmodule : axi2we diff --git a/finn-rtllib/fmpadding/hdl/fmpadding.sv b/finn-rtllib/fmpadding/hdl/fmpadding.sv new file mode 100644 index 0000000000..7e408f6241 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding.sv @@ -0,0 +1,177 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief Feature map padding. + * @author Thomas B. Preußer + *****************************************************************************/ + +module fmpadding #( + int unsigned XCOUNTER_BITS, + int unsigned YCOUNTER_BITS, + int unsigned NUM_CHANNELS, + int unsigned SIMD, + int unsigned ELEM_BITS, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + + // Parameter Configuration ---------- + input logic we, + input logic [ 2:0] wa, + input logic [31:0] wd, + + //- AXI Stream - Input -------------- + output logic s_axis_tready, + input logic s_axis_tvalid, + input logic [STREAM_BITS-1:0] s_axis_tdata, + + //- AXI Stream - Output ------------- + input logic m_axis_tready, + output logic m_axis_tvalid, + output logic [STREAM_BITS-1:0] m_axis_tdata +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + //----------------------------------------------------------------------- + // Dynamically configurable state + typedef logic [XCOUNTER_BITS-1:0] xcount_t; + xcount_t XEnd = 0; + xcount_t XOn = 0; + xcount_t XOff = 0; + + typedef logic [YCOUNTER_BITS-1:0] ycount_t; + ycount_t YEnd = 0; + ycount_t YOn = 0; + ycount_t YOff = 0; + always_ff @(posedge clk) begin + if(we) begin + unique case(wa) + 0: XOn <= wd; + 1: XOff <= wd; + 2: XEnd <= wd; + + 4: YOn <= wd; + 5: YOff <= wd; + 6: YEnd <= wd; + + default: assert(0) else begin + $error("Illegal write address."); + $stop; + end + endcase + end + end + + //----------------------------------------------------------------------- + // Cascaded enables for the nested counters: SCount, XCount, YCount + uwire sen; + uwire xen; + uwire yen; + + //- S-Counter: SIMD fold ------------ + initial begin + if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin + $error("Channel count must be SIMD multiple."); + $finish; + end + end + // Count SF-2, SF-3, ..., 1, 0, -1 + localparam int unsigned SF = NUM_CHANNELS/SIMD; + typedef logic [$clog2(SF-1):0] scount_t; + scount_t SCount = SF-2; + + assign xen = sen && SCount[$left(SCount)]; + uwire sclr = rst || xen; + always_ff @(posedge clk) begin + if(sclr) SCount <= SF-2; + else if(sen) SCount <= SCount - 1; + end + + //- X-Counter: image width ---------- + xcount_t XCount = 0; + + assign yen = xen && (XCount == XEnd); + uwire xclr = rst || yen; + always_ff @(posedge clk) begin + if(xclr) XCount <= 0; + else if(xen) XCount <= XCount + 1; + end + uwire xfwd = (XOn <= XCount) && (XCount < XOff); + + //- Y-Counter: image height --------- + ycount_t YCount = 0; + + uwire yclr = rst || (yen && (YCount == YEnd)); + always_ff @(posedge clk) begin + if(yclr) YCount <= 0; + else if(yen) YCount <= YCount + 1; + end + uwire yfwd = (YOn <= YCount) && (YCount < YOff); + + //----------------------------------------------------------------------- + // Input forwarding and edge padding + typedef struct { + logic vld; + logic [STREAM_BITS-1:0] dat; + } buf_t; + buf_t A = '{ vld: 0, dat: 'x }; + buf_t B = '{ vld: 0, dat: 'x }; + + uwire fwd = xfwd && yfwd; + assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); + assign s_axis_tready = !A.vld; + assign m_axis_tvalid = B.vld; + assign m_axis_tdata = B.dat; + + always_ff @(posedge clk) begin + if(rst) begin + B <= '{ vld: 0, dat: 'x }; + end + else if(m_axis_tready || !B.vld) begin + B.vld <= s_axis_tvalid || A.vld || !fwd; + B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; + end + end + + always_ff @(posedge clk) begin + if(rst) begin + A <= '{ vld: 0, dat: 'x }; + end + else begin + A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); + if(!A.vld) A.dat <= s_axis_tdata; + end + end + +endmodule : fmpadding diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv index 4af2598e37..f5313801e5 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -44,7 +44,6 @@ module fmpadding_axi #( input logic ap_clk, input logic ap_rst_n, -/* //- AXI Lite ------------------------ // Writing input s_axilite_AWVALID, @@ -69,11 +68,6 @@ module fmpadding_axi #( input s_axilite_RREADY, output [31:0] s_axilite_RDATA, output [ 1:0] s_axilite_RRESP, -*/ - - input logic we, - input logic [ 2:0] wa, - input logic [31:0] wd, //- AXI Stream - Input -------------- output logic s_axis_tready, @@ -86,118 +80,35 @@ module fmpadding_axi #( output logic [STREAM_BITS-1:0] m_axis_tdata ); - uwire clk = ap_clk; - uwire rst = !ap_rst_n; - - //----------------------------------------------------------------------- - // Dynamically configurable state - typedef logic [XCOUNTER_BITS-1:0] xcount_t; - xcount_t XEnd = 0; - xcount_t XOn = 0; - xcount_t XOff = 0; - - typedef logic [YCOUNTER_BITS-1:0] ycount_t; - ycount_t YEnd = 0; - ycount_t YOn = 0; - ycount_t YOff = 0; - always_ff @(posedge clk) begin - if(we) begin - unique case(wa) - 0: XOn <= wd; - 1: XOff <= wd; - 2: XEnd <= wd; - - 4: YOn <= wd; - 5: YOff <= wd; - 6: YEnd <= wd; - - default: assert(0) else begin - $error("Illegal write address."); - $stop; - end - endcase - end - end - - //----------------------------------------------------------------------- - // Cascaded enables for the nested counters: SCount, XCount, YCount - uwire sen; - uwire xen; - uwire yen; - - //- S-Counter: SIMD fold ------------ - initial begin - if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin - $error("Channel count must be SIMD multiple."); - $finish; - end - end - // Count SF-2, SF-3, ..., 1, 0, -1 - localparam int unsigned SF = NUM_CHANNELS/SIMD; - typedef logic [$clog2(SF-1):0] scount_t; - scount_t SCount = SF-2; - - assign xen = sen && SCount[$left(SCount)]; - uwire sclr = rst || xen; - always_ff @(posedge clk) begin - if(sclr) SCount <= SF-2; - else if(sen) SCount <= SCount - 1; - end - - //- X-Counter: image width ---------- - xcount_t XCount = 0; - - assign yen = xen && (XCount == XEnd); - uwire xclr = rst || yen; - always_ff @(posedge clk) begin - if(xclr) XCount <= 0; - else if(xen) XCount <= XCount + 1; - end - uwire xfwd = (XOn <= XCount) && (XCount < XOff); - - //- Y-Counter: image height --------- - ycount_t YCount = 0; - - uwire yclr = rst || (yen && (YCount == YEnd)); - always_ff @(posedge clk) begin - if(yclr) YCount <= 0; - else if(yen) YCount <= YCount + 1; - end - uwire yfwd = (YOn <= YCount) && (YCount < YOff); - - //----------------------------------------------------------------------- - // Input forwarding and edge padding - typedef struct { - logic vld; - logic [STREAM_BITS-1:0] dat; - } buf_t; - buf_t A = '{ vld: 0, dat: 'x }; - buf_t B = '{ vld: 0, dat: 'x }; - - uwire fwd = xfwd && yfwd; - assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); - assign s_axis_tready = !A.vld; - assign m_axis_tvalid = B.vld; - assign m_axis_tdata = B.dat; - - always_ff @(posedge clk) begin - if(rst) begin - B <= '{ vld: 0, dat: 'x }; - end - else if(m_axis_tready || !B.vld) begin - B.vld <= s_axis_tvalid || A.vld || !fwd; - B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; - end - end - - always_ff @(posedge clk) begin - if(rst) begin - A <= '{ vld: 0, dat: 'x }; - end - else begin - A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); - if(!A.vld) A.dat <= s_axis_tdata; - end - end + // AXI-Lite Adapter + uwire we; + uwire [ 2:0] wa; + uwire [31:0] wd; + axi2we #(.ADDR_BITS(3)) axilight_adapter ( + .ap_clk, .ap_rst_n, + + .s_axilite_AWVALID, .s_axilite_AWREADY, .s_axilite_AWADDR, + .s_axilite_WVALID, .s_axilite_WREADY, .s_axilite_WDATA, .s_axilite_WSTRB, + .s_axilite_BVALID, .s_axilite_BREADY, .s_axilite_BRESP, + + .s_axilite_ARVALID, .s_axilite_ARREADY, .s_axilite_ARADDR, + .s_axilite_RVALID, .s_axilite_RREADY, .s_axilite_RDATA, .s_axilite_RRESP, + + .we, .wa, .wd + ); + + // Actual Padding + fmpadding #( + .XCOUNTER_BITS(XCOUNTER_BITS), .YCOUNTER_BITS(YCOUNTER_BITS), + .NUM_CHANNELS(NUM_CHANNELS), .SIMD(SIMD), + .ELEM_BITS(ELEM_BITS) + ) padding ( + .ap_clk, .ap_rst_n, + + .we, .wa, .wd, + + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, + .m_axis_tready, .m_axis_tvalid, .m_axis_tdata + ); endmodule : fmpadding_axi diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv index ae2377f3dd..fedf5dcb69 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv @@ -13,10 +13,14 @@ module fmpadding_axi_tb #( always #5ns clk = !clk; logic rst; - // Parameter Configuration ---------- - logic we; - logic [ 2:0] wa; - logic [31:0] wd; + // AXI-Light for Parameter Configuration + logic s_axilite_AWVALID; + uwire s_axilite_AWREADY; + logic [2:0] s_axilite_AWADDR; + + logic s_axilite_WVALID; + uwire s_axilite_WREADY; + logic [31:0] s_axilite_WDATA; //- AXI Stream - Input -------------- uwire s_axis_tready; @@ -38,7 +42,13 @@ module fmpadding_axi_tb #( .ELEM_BITS(ELEM_BITS) ) dut ( .ap_clk(clk), .ap_rst_n(!rst), - .we, .wa, .wd, + + .s_axilite_AWVALID, .s_axilite_AWREADY, .s_axilite_AWADDR, + .s_axilite_WVALID, .s_axilite_WREADY, .s_axilite_WDATA, .s_axilite_WSTRB('1), + .s_axilite_BVALID(), .s_axilite_BREADY('1), .s_axilite_BRESP(), + .s_axilite_ARVALID('0), .s_axilite_ARREADY(), .s_axilite_ARADDR('x), + .s_axilite_RVALID(), .s_axilite_RREADY('0), .s_axilite_RDATA(), .s_axilite_RRESP(), + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, .m_axis_tready, .m_axis_tvalid, .m_axis_tdata ); @@ -51,29 +61,45 @@ module fmpadding_axi_tb #( localparam int unsigned PAD_RIGHT = 3; localparam int unsigned PAD_TOP = 1; localparam int unsigned PAD_BOTTOM = 2; + + task axi_write(input logic [2:0] wa, input logic [31:0] wd); + s_axilite_AWVALID <= 1; + s_axilite_AWADDR <= wa; + @(posedge clk iff s_axilite_AWREADY); + s_axilite_AWVALID <= 0; + s_axilite_AWADDR <= 'x; + + s_axilite_WVALID <= 1; + s_axilite_WDATA <= wd; + @(posedge clk iff s_axilite_WREADY); + s_axilite_WVALID <= 0; + s_axilite_WDATA <= 'x; + endtask : axi_write + + initial begin - we = 0; - wa = 'x; - wd = 'x; + s_axilite_AWVALID = 0; + s_axilite_AWADDR = 'x; + s_axilite_WVALID = 0; + s_axilite_WDATA = 'x; s_axis_tvalid = 0; s_axis_tdata = 'x; // Configure Parameters - rst = 1; + rst = 0; @(posedge clk); - we <= 1; - /* XOn */ wa <= 0; wd <= PAD_LEFT; @(posedge clk); - /* XOff */ wa <= 1; wd <= XSIZE - PAD_RIGHT; @(posedge clk); - /* XEnd */ wa <= 2; wd <= XSIZE - 1; @(posedge clk); - /* YOn */ wa <= 4; wd <= PAD_TOP; @(posedge clk); - /* YOff */ wa <= 5; wd <= YSIZE - PAD_BOTTOM; @(posedge clk); - /* YEnd */ wa <= 6; wd <= YSIZE - 1; @(posedge clk); - we <= 0; - wa <= 'x; - wd <= 'x; + /* XOn */ axi_write(0, PAD_LEFT); + /* XOff */ axi_write(1, XSIZE - PAD_RIGHT); + /* XEnd */ axi_write(2, XSIZE - 1); + /* YOn */ axi_write(4, PAD_TOP); + /* YOff */ axi_write(5, YSIZE - PAD_BOTTOM); + /* YEnd */ axi_write(6, YSIZE - 1); + @(posedge clk); + rst <= 1; @(posedge clk); rst <= 0; + @(posedge clk); // Feed data input s_axis_tvalid <= 1; @@ -91,7 +117,7 @@ module fmpadding_axi_tb #( s_axis_tdata <= 'x; end - // Ouput Throttler + // Output Throttler initial begin m_axis_tready = 0; @(posedge clk iff !rst); @@ -105,6 +131,7 @@ module fmpadding_axi_tb #( // Output logger initial begin + @(negedge rst); repeat(IMAGES) begin for(int unsigned y = 0; y < YSIZE; y++) begin for(int unsigned x = 0; x < XSIZE; x++) begin From cba8536a5f50862848901d26acb869405c2999cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Mon, 10 Oct 2022 12:41:37 +0100 Subject: [PATCH 217/628] First prototype for dynamically sized FM padding. --- finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 203 ++++++++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv | 125 +++++++++++ 2 files changed, 328 insertions(+) create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding_axi.sv create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv new file mode 100644 index 0000000000..4af2598e37 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -0,0 +1,203 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief Feature map padding. + * @author Thomas B. Preußer + *****************************************************************************/ + +module fmpadding_axi #( + int unsigned XCOUNTER_BITS, + int unsigned YCOUNTER_BITS, + int unsigned NUM_CHANNELS, + int unsigned SIMD, + int unsigned ELEM_BITS, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + +/* + //- AXI Lite ------------------------ + // Writing + input s_axilite_AWVALID, + output s_axilite_AWREADY, + input [2:0] s_axilite_AWADDR, + + input s_axilite_WVALID, + output s_axilite_WREADY, + input [31:0] s_axilite_WDATA, + input [ 3:0] s_axilite_WSTRB, + + output s_axilite_BVALID, + input s_axilite_BREADY, + output [1:0] s_axilite_BRESP, + + // Reading + input s_axilite_ARVALID, + output s_axilite_ARREADY, + input [3:0] s_axilite_ARADDR, + + output s_axilite_RVALID, + input s_axilite_RREADY, + output [31:0] s_axilite_RDATA, + output [ 1:0] s_axilite_RRESP, +*/ + + input logic we, + input logic [ 2:0] wa, + input logic [31:0] wd, + + //- AXI Stream - Input -------------- + output logic s_axis_tready, + input logic s_axis_tvalid, + input logic [STREAM_BITS-1:0] s_axis_tdata, + + //- AXI Stream - Output ------------- + input logic m_axis_tready, + output logic m_axis_tvalid, + output logic [STREAM_BITS-1:0] m_axis_tdata +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + //----------------------------------------------------------------------- + // Dynamically configurable state + typedef logic [XCOUNTER_BITS-1:0] xcount_t; + xcount_t XEnd = 0; + xcount_t XOn = 0; + xcount_t XOff = 0; + + typedef logic [YCOUNTER_BITS-1:0] ycount_t; + ycount_t YEnd = 0; + ycount_t YOn = 0; + ycount_t YOff = 0; + always_ff @(posedge clk) begin + if(we) begin + unique case(wa) + 0: XOn <= wd; + 1: XOff <= wd; + 2: XEnd <= wd; + + 4: YOn <= wd; + 5: YOff <= wd; + 6: YEnd <= wd; + + default: assert(0) else begin + $error("Illegal write address."); + $stop; + end + endcase + end + end + + //----------------------------------------------------------------------- + // Cascaded enables for the nested counters: SCount, XCount, YCount + uwire sen; + uwire xen; + uwire yen; + + //- S-Counter: SIMD fold ------------ + initial begin + if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin + $error("Channel count must be SIMD multiple."); + $finish; + end + end + // Count SF-2, SF-3, ..., 1, 0, -1 + localparam int unsigned SF = NUM_CHANNELS/SIMD; + typedef logic [$clog2(SF-1):0] scount_t; + scount_t SCount = SF-2; + + assign xen = sen && SCount[$left(SCount)]; + uwire sclr = rst || xen; + always_ff @(posedge clk) begin + if(sclr) SCount <= SF-2; + else if(sen) SCount <= SCount - 1; + end + + //- X-Counter: image width ---------- + xcount_t XCount = 0; + + assign yen = xen && (XCount == XEnd); + uwire xclr = rst || yen; + always_ff @(posedge clk) begin + if(xclr) XCount <= 0; + else if(xen) XCount <= XCount + 1; + end + uwire xfwd = (XOn <= XCount) && (XCount < XOff); + + //- Y-Counter: image height --------- + ycount_t YCount = 0; + + uwire yclr = rst || (yen && (YCount == YEnd)); + always_ff @(posedge clk) begin + if(yclr) YCount <= 0; + else if(yen) YCount <= YCount + 1; + end + uwire yfwd = (YOn <= YCount) && (YCount < YOff); + + //----------------------------------------------------------------------- + // Input forwarding and edge padding + typedef struct { + logic vld; + logic [STREAM_BITS-1:0] dat; + } buf_t; + buf_t A = '{ vld: 0, dat: 'x }; + buf_t B = '{ vld: 0, dat: 'x }; + + uwire fwd = xfwd && yfwd; + assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); + assign s_axis_tready = !A.vld; + assign m_axis_tvalid = B.vld; + assign m_axis_tdata = B.dat; + + always_ff @(posedge clk) begin + if(rst) begin + B <= '{ vld: 0, dat: 'x }; + end + else if(m_axis_tready || !B.vld) begin + B.vld <= s_axis_tvalid || A.vld || !fwd; + B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; + end + end + + always_ff @(posedge clk) begin + if(rst) begin + A <= '{ vld: 0, dat: 'x }; + end + else begin + A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); + if(!A.vld) A.dat <= s_axis_tdata; + end + end + +endmodule : fmpadding_axi diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv new file mode 100644 index 0000000000..ae2377f3dd --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv @@ -0,0 +1,125 @@ + +module fmpadding_axi_tb #( + int unsigned XCOUNTER_BITS = 8, + int unsigned YCOUNTER_BITS = 8, + int unsigned NUM_CHANNELS = 4, + int unsigned SIMD = 2, + int unsigned ELEM_BITS = 4 +)(); + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8); + + //- Global Control ------------------ + logic clk = 0; + always #5ns clk = !clk; + logic rst; + + // Parameter Configuration ---------- + logic we; + logic [ 2:0] wa; + logic [31:0] wd; + + //- AXI Stream - Input -------------- + uwire s_axis_tready; + logic s_axis_tvalid; + logic [STREAM_BITS-1:0] s_axis_tdata; + + //- AXI Stream - Output ------------- + logic m_axis_tready; + uwire m_axis_tvalid; + uwire [STREAM_BITS-1:0] m_axis_tdata; + + + // DUT + fmpadding_axi #( + .XCOUNTER_BITS(XCOUNTER_BITS), + .YCOUNTER_BITS(YCOUNTER_BITS), + .NUM_CHANNELS(NUM_CHANNELS), + .SIMD(SIMD), + .ELEM_BITS(ELEM_BITS) + ) dut ( + .ap_clk(clk), .ap_rst_n(!rst), + .we, .wa, .wd, + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, + .m_axis_tready, .m_axis_tvalid, .m_axis_tdata + ); + + // Stimuli + localparam int unsigned IMAGES = 2; + localparam int unsigned XSIZE = 10; + localparam int unsigned YSIZE = 7; + localparam int unsigned PAD_LEFT = 2; + localparam int unsigned PAD_RIGHT = 3; + localparam int unsigned PAD_TOP = 1; + localparam int unsigned PAD_BOTTOM = 2; + initial begin + we = 0; + wa = 'x; + wd = 'x; + + s_axis_tvalid = 0; + s_axis_tdata = 'x; + + // Configure Parameters + rst = 1; + @(posedge clk); + we <= 1; + /* XOn */ wa <= 0; wd <= PAD_LEFT; @(posedge clk); + /* XOff */ wa <= 1; wd <= XSIZE - PAD_RIGHT; @(posedge clk); + /* XEnd */ wa <= 2; wd <= XSIZE - 1; @(posedge clk); + /* YOn */ wa <= 4; wd <= PAD_TOP; @(posedge clk); + /* YOff */ wa <= 5; wd <= YSIZE - PAD_BOTTOM; @(posedge clk); + /* YEnd */ wa <= 6; wd <= YSIZE - 1; @(posedge clk); + we <= 0; + wa <= 'x; + wd <= 'x; + @(posedge clk); + rst <= 0; + + // Feed data input + s_axis_tvalid <= 1; + for(int unsigned i = 0; i < IMAGES * (XSIZE-PAD_LEFT-PAD_RIGHT) * (YSIZE-PAD_TOP-PAD_BOTTOM) * (NUM_CHANNELS/SIMD); i++) begin + s_axis_tdata <= i; + @(posedge clk iff s_axis_tready); + if($urandom()%5 == 0) begin + s_axis_tvalid <= 0; + s_axis_tdata <= 'x; + @(posedge clk); + s_axis_tvalid <= 1; + end + end + s_axis_tvalid <= 0; + s_axis_tdata <= 'x; + end + + // Ouput Throttler + initial begin + m_axis_tready = 0; + @(posedge clk iff !rst); + m_axis_tready <= 1; + forever @(posedge clk iff m_axis_tvalid) begin + m_axis_tready <= 0; + repeat(4-$clog2(1+$urandom()%15)) @(posedge clk); + m_axis_tready <= 1; + end + end + + // Output logger + initial begin + repeat(IMAGES) begin + for(int unsigned y = 0; y < YSIZE; y++) begin + for(int unsigned x = 0; x < XSIZE; x++) begin + automatic string delim = " "; + for(int unsigned s = 0; s < NUM_CHANNELS/SIMD; s++) begin + @(posedge clk iff m_axis_tvalid && m_axis_tready); + $write("%s%02X", delim, m_axis_tdata); + delim = ":"; + end + end + $display(); + end + $display("----"); + end + $finish; + end + +endmodule : fmpadding_axi_tb From ee2748a2a03bbb2090637f9a5714faef1ce03ff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 11 Oct 2022 15:17:34 +0100 Subject: [PATCH 218/628] Added AXI-Light adapter to dynamically-sized feature map padding. --- finn-rtllib/fmpadding/hdl/axi2we.sv | 122 ++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding.sv | 177 ++++++++++++++++++ finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 149 +++------------ finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv | 67 +++++-- 4 files changed, 376 insertions(+), 139 deletions(-) create mode 100644 finn-rtllib/fmpadding/hdl/axi2we.sv create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding.sv diff --git a/finn-rtllib/fmpadding/hdl/axi2we.sv b/finn-rtllib/fmpadding/hdl/axi2we.sv new file mode 100644 index 0000000000..0740eac5f8 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/axi2we.sv @@ -0,0 +1,122 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief AXI-Light adapter for trivial write enable interface. + * @author Thomas B. Preußer + *****************************************************************************/ + +module axi2we #( + int unsigned ADDR_BITS +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + + //- AXI Lite ------------------------ + // Writing + input s_axilite_AWVALID, + output s_axilite_AWREADY, + input [ADDR_BITS-1:0] s_axilite_AWADDR, + + input s_axilite_WVALID, + output s_axilite_WREADY, + input [31:0] s_axilite_WDATA, + input [ 3:0] s_axilite_WSTRB, + + output s_axilite_BVALID, + input s_axilite_BREADY, + output [1:0] s_axilite_BRESP, + + // Reading tied to all-ones + input s_axilite_ARVALID, + output s_axilite_ARREADY, + input [3:0] s_axilite_ARADDR, + + output s_axilite_RVALID, + input s_axilite_RREADY, + output [31:0] s_axilite_RDATA, + output [ 1:0] s_axilite_RRESP, + + // Write Enable Interface + output logic we, + output logic [ADDR_BITS-1:0] wa, + output logic [ 31:0] wd +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + + logic WABusy = 0; + logic WDBusy = 0; + logic [ADDR_BITS-1:0] Addr = 'x; + logic [ 31:0] Data = 'x; + + assign we = WABusy && WDBusy && s_axilite_BREADY; + assign wa = Addr; + assign wd = Data; + + uwire clr_wr = rst || we; + always_ff @(posedge clk) begin + if(clr_wr) begin + WABusy <= 0; + Addr <= 'x; + WDBusy <= 0; + Data <= 'x; + end + else begin + if(!WABusy) begin + WABusy <= s_axilite_AWVALID; + Addr <= s_axilite_AWADDR; + end + if(!WDBusy) begin + WDBusy <= s_axilite_WVALID; + Data <= s_axilite_WDATA; + end + end + end + assign s_axilite_AWREADY = !WABusy; + assign s_axilite_WREADY = !WDBusy; + assign s_axilite_BVALID = WABusy && WDBusy; + assign s_axilite_BRESP = '0; // OK + + // Answer all reads with '1 + logic RValid = 0; + uwire clr_rd = rst || (RValid && s_axilite_RREADY); + always_ff @(posedge clk) begin + if(clr_rd) RValid <= 0; + else if(!RValid) RValid <= s_axilite_ARVALID; + end + assign s_axilite_ARREADY = !RValid; + assign s_axilite_RVALID = RValid; + assign s_axilite_RDATA = '1; + assign s_axilite_RRESP = '0; // OK + +endmodule : axi2we diff --git a/finn-rtllib/fmpadding/hdl/fmpadding.sv b/finn-rtllib/fmpadding/hdl/fmpadding.sv new file mode 100644 index 0000000000..7e408f6241 --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding.sv @@ -0,0 +1,177 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @brief Feature map padding. + * @author Thomas B. Preußer + *****************************************************************************/ + +module fmpadding #( + int unsigned XCOUNTER_BITS, + int unsigned YCOUNTER_BITS, + int unsigned NUM_CHANNELS, + int unsigned SIMD, + int unsigned ELEM_BITS, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) +)( + //- Global Control ------------------ + input logic ap_clk, + input logic ap_rst_n, + + // Parameter Configuration ---------- + input logic we, + input logic [ 2:0] wa, + input logic [31:0] wd, + + //- AXI Stream - Input -------------- + output logic s_axis_tready, + input logic s_axis_tvalid, + input logic [STREAM_BITS-1:0] s_axis_tdata, + + //- AXI Stream - Output ------------- + input logic m_axis_tready, + output logic m_axis_tvalid, + output logic [STREAM_BITS-1:0] m_axis_tdata +); + + uwire clk = ap_clk; + uwire rst = !ap_rst_n; + + //----------------------------------------------------------------------- + // Dynamically configurable state + typedef logic [XCOUNTER_BITS-1:0] xcount_t; + xcount_t XEnd = 0; + xcount_t XOn = 0; + xcount_t XOff = 0; + + typedef logic [YCOUNTER_BITS-1:0] ycount_t; + ycount_t YEnd = 0; + ycount_t YOn = 0; + ycount_t YOff = 0; + always_ff @(posedge clk) begin + if(we) begin + unique case(wa) + 0: XOn <= wd; + 1: XOff <= wd; + 2: XEnd <= wd; + + 4: YOn <= wd; + 5: YOff <= wd; + 6: YEnd <= wd; + + default: assert(0) else begin + $error("Illegal write address."); + $stop; + end + endcase + end + end + + //----------------------------------------------------------------------- + // Cascaded enables for the nested counters: SCount, XCount, YCount + uwire sen; + uwire xen; + uwire yen; + + //- S-Counter: SIMD fold ------------ + initial begin + if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin + $error("Channel count must be SIMD multiple."); + $finish; + end + end + // Count SF-2, SF-3, ..., 1, 0, -1 + localparam int unsigned SF = NUM_CHANNELS/SIMD; + typedef logic [$clog2(SF-1):0] scount_t; + scount_t SCount = SF-2; + + assign xen = sen && SCount[$left(SCount)]; + uwire sclr = rst || xen; + always_ff @(posedge clk) begin + if(sclr) SCount <= SF-2; + else if(sen) SCount <= SCount - 1; + end + + //- X-Counter: image width ---------- + xcount_t XCount = 0; + + assign yen = xen && (XCount == XEnd); + uwire xclr = rst || yen; + always_ff @(posedge clk) begin + if(xclr) XCount <= 0; + else if(xen) XCount <= XCount + 1; + end + uwire xfwd = (XOn <= XCount) && (XCount < XOff); + + //- Y-Counter: image height --------- + ycount_t YCount = 0; + + uwire yclr = rst || (yen && (YCount == YEnd)); + always_ff @(posedge clk) begin + if(yclr) YCount <= 0; + else if(yen) YCount <= YCount + 1; + end + uwire yfwd = (YOn <= YCount) && (YCount < YOff); + + //----------------------------------------------------------------------- + // Input forwarding and edge padding + typedef struct { + logic vld; + logic [STREAM_BITS-1:0] dat; + } buf_t; + buf_t A = '{ vld: 0, dat: 'x }; + buf_t B = '{ vld: 0, dat: 'x }; + + uwire fwd = xfwd && yfwd; + assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); + assign s_axis_tready = !A.vld; + assign m_axis_tvalid = B.vld; + assign m_axis_tdata = B.dat; + + always_ff @(posedge clk) begin + if(rst) begin + B <= '{ vld: 0, dat: 'x }; + end + else if(m_axis_tready || !B.vld) begin + B.vld <= s_axis_tvalid || A.vld || !fwd; + B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; + end + end + + always_ff @(posedge clk) begin + if(rst) begin + A <= '{ vld: 0, dat: 'x }; + end + else begin + A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); + if(!A.vld) A.dat <= s_axis_tdata; + end + end + +endmodule : fmpadding diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv index 4af2598e37..f5313801e5 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -44,7 +44,6 @@ module fmpadding_axi #( input logic ap_clk, input logic ap_rst_n, -/* //- AXI Lite ------------------------ // Writing input s_axilite_AWVALID, @@ -69,11 +68,6 @@ module fmpadding_axi #( input s_axilite_RREADY, output [31:0] s_axilite_RDATA, output [ 1:0] s_axilite_RRESP, -*/ - - input logic we, - input logic [ 2:0] wa, - input logic [31:0] wd, //- AXI Stream - Input -------------- output logic s_axis_tready, @@ -86,118 +80,35 @@ module fmpadding_axi #( output logic [STREAM_BITS-1:0] m_axis_tdata ); - uwire clk = ap_clk; - uwire rst = !ap_rst_n; - - //----------------------------------------------------------------------- - // Dynamically configurable state - typedef logic [XCOUNTER_BITS-1:0] xcount_t; - xcount_t XEnd = 0; - xcount_t XOn = 0; - xcount_t XOff = 0; - - typedef logic [YCOUNTER_BITS-1:0] ycount_t; - ycount_t YEnd = 0; - ycount_t YOn = 0; - ycount_t YOff = 0; - always_ff @(posedge clk) begin - if(we) begin - unique case(wa) - 0: XOn <= wd; - 1: XOff <= wd; - 2: XEnd <= wd; - - 4: YOn <= wd; - 5: YOff <= wd; - 6: YEnd <= wd; - - default: assert(0) else begin - $error("Illegal write address."); - $stop; - end - endcase - end - end - - //----------------------------------------------------------------------- - // Cascaded enables for the nested counters: SCount, XCount, YCount - uwire sen; - uwire xen; - uwire yen; - - //- S-Counter: SIMD fold ------------ - initial begin - if((NUM_CHANNELS < 1) || (NUM_CHANNELS % SIMD != 0)) begin - $error("Channel count must be SIMD multiple."); - $finish; - end - end - // Count SF-2, SF-3, ..., 1, 0, -1 - localparam int unsigned SF = NUM_CHANNELS/SIMD; - typedef logic [$clog2(SF-1):0] scount_t; - scount_t SCount = SF-2; - - assign xen = sen && SCount[$left(SCount)]; - uwire sclr = rst || xen; - always_ff @(posedge clk) begin - if(sclr) SCount <= SF-2; - else if(sen) SCount <= SCount - 1; - end - - //- X-Counter: image width ---------- - xcount_t XCount = 0; - - assign yen = xen && (XCount == XEnd); - uwire xclr = rst || yen; - always_ff @(posedge clk) begin - if(xclr) XCount <= 0; - else if(xen) XCount <= XCount + 1; - end - uwire xfwd = (XOn <= XCount) && (XCount < XOff); - - //- Y-Counter: image height --------- - ycount_t YCount = 0; - - uwire yclr = rst || (yen && (YCount == YEnd)); - always_ff @(posedge clk) begin - if(yclr) YCount <= 0; - else if(yen) YCount <= YCount + 1; - end - uwire yfwd = (YOn <= YCount) && (YCount < YOff); - - //----------------------------------------------------------------------- - // Input forwarding and edge padding - typedef struct { - logic vld; - logic [STREAM_BITS-1:0] dat; - } buf_t; - buf_t A = '{ vld: 0, dat: 'x }; - buf_t B = '{ vld: 0, dat: 'x }; - - uwire fwd = xfwd && yfwd; - assign sen = (m_axis_tready || !B.vld) && (s_axis_tvalid || A.vld || !fwd); - assign s_axis_tready = !A.vld; - assign m_axis_tvalid = B.vld; - assign m_axis_tdata = B.dat; - - always_ff @(posedge clk) begin - if(rst) begin - B <= '{ vld: 0, dat: 'x }; - end - else if(m_axis_tready || !B.vld) begin - B.vld <= s_axis_tvalid || A.vld || !fwd; - B.dat <= !fwd? '0 : A.vld? A.dat : s_axis_tdata; - end - end - - always_ff @(posedge clk) begin - if(rst) begin - A <= '{ vld: 0, dat: 'x }; - end - else begin - A.vld <= (A.vld || s_axis_tvalid) && ((B.vld && !m_axis_tready) || !fwd); - if(!A.vld) A.dat <= s_axis_tdata; - end - end + // AXI-Lite Adapter + uwire we; + uwire [ 2:0] wa; + uwire [31:0] wd; + axi2we #(.ADDR_BITS(3)) axilight_adapter ( + .ap_clk, .ap_rst_n, + + .s_axilite_AWVALID, .s_axilite_AWREADY, .s_axilite_AWADDR, + .s_axilite_WVALID, .s_axilite_WREADY, .s_axilite_WDATA, .s_axilite_WSTRB, + .s_axilite_BVALID, .s_axilite_BREADY, .s_axilite_BRESP, + + .s_axilite_ARVALID, .s_axilite_ARREADY, .s_axilite_ARADDR, + .s_axilite_RVALID, .s_axilite_RREADY, .s_axilite_RDATA, .s_axilite_RRESP, + + .we, .wa, .wd + ); + + // Actual Padding + fmpadding #( + .XCOUNTER_BITS(XCOUNTER_BITS), .YCOUNTER_BITS(YCOUNTER_BITS), + .NUM_CHANNELS(NUM_CHANNELS), .SIMD(SIMD), + .ELEM_BITS(ELEM_BITS) + ) padding ( + .ap_clk, .ap_rst_n, + + .we, .wa, .wd, + + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, + .m_axis_tready, .m_axis_tvalid, .m_axis_tdata + ); endmodule : fmpadding_axi diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv index ae2377f3dd..fedf5dcb69 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv @@ -13,10 +13,14 @@ module fmpadding_axi_tb #( always #5ns clk = !clk; logic rst; - // Parameter Configuration ---------- - logic we; - logic [ 2:0] wa; - logic [31:0] wd; + // AXI-Light for Parameter Configuration + logic s_axilite_AWVALID; + uwire s_axilite_AWREADY; + logic [2:0] s_axilite_AWADDR; + + logic s_axilite_WVALID; + uwire s_axilite_WREADY; + logic [31:0] s_axilite_WDATA; //- AXI Stream - Input -------------- uwire s_axis_tready; @@ -38,7 +42,13 @@ module fmpadding_axi_tb #( .ELEM_BITS(ELEM_BITS) ) dut ( .ap_clk(clk), .ap_rst_n(!rst), - .we, .wa, .wd, + + .s_axilite_AWVALID, .s_axilite_AWREADY, .s_axilite_AWADDR, + .s_axilite_WVALID, .s_axilite_WREADY, .s_axilite_WDATA, .s_axilite_WSTRB('1), + .s_axilite_BVALID(), .s_axilite_BREADY('1), .s_axilite_BRESP(), + .s_axilite_ARVALID('0), .s_axilite_ARREADY(), .s_axilite_ARADDR('x), + .s_axilite_RVALID(), .s_axilite_RREADY('0), .s_axilite_RDATA(), .s_axilite_RRESP(), + .s_axis_tready, .s_axis_tvalid, .s_axis_tdata, .m_axis_tready, .m_axis_tvalid, .m_axis_tdata ); @@ -51,29 +61,45 @@ module fmpadding_axi_tb #( localparam int unsigned PAD_RIGHT = 3; localparam int unsigned PAD_TOP = 1; localparam int unsigned PAD_BOTTOM = 2; + + task axi_write(input logic [2:0] wa, input logic [31:0] wd); + s_axilite_AWVALID <= 1; + s_axilite_AWADDR <= wa; + @(posedge clk iff s_axilite_AWREADY); + s_axilite_AWVALID <= 0; + s_axilite_AWADDR <= 'x; + + s_axilite_WVALID <= 1; + s_axilite_WDATA <= wd; + @(posedge clk iff s_axilite_WREADY); + s_axilite_WVALID <= 0; + s_axilite_WDATA <= 'x; + endtask : axi_write + + initial begin - we = 0; - wa = 'x; - wd = 'x; + s_axilite_AWVALID = 0; + s_axilite_AWADDR = 'x; + s_axilite_WVALID = 0; + s_axilite_WDATA = 'x; s_axis_tvalid = 0; s_axis_tdata = 'x; // Configure Parameters - rst = 1; + rst = 0; @(posedge clk); - we <= 1; - /* XOn */ wa <= 0; wd <= PAD_LEFT; @(posedge clk); - /* XOff */ wa <= 1; wd <= XSIZE - PAD_RIGHT; @(posedge clk); - /* XEnd */ wa <= 2; wd <= XSIZE - 1; @(posedge clk); - /* YOn */ wa <= 4; wd <= PAD_TOP; @(posedge clk); - /* YOff */ wa <= 5; wd <= YSIZE - PAD_BOTTOM; @(posedge clk); - /* YEnd */ wa <= 6; wd <= YSIZE - 1; @(posedge clk); - we <= 0; - wa <= 'x; - wd <= 'x; + /* XOn */ axi_write(0, PAD_LEFT); + /* XOff */ axi_write(1, XSIZE - PAD_RIGHT); + /* XEnd */ axi_write(2, XSIZE - 1); + /* YOn */ axi_write(4, PAD_TOP); + /* YOff */ axi_write(5, YSIZE - PAD_BOTTOM); + /* YEnd */ axi_write(6, YSIZE - 1); + @(posedge clk); + rst <= 1; @(posedge clk); rst <= 0; + @(posedge clk); // Feed data input s_axis_tvalid <= 1; @@ -91,7 +117,7 @@ module fmpadding_axi_tb #( s_axis_tdata <= 'x; end - // Ouput Throttler + // Output Throttler initial begin m_axis_tready = 0; @(posedge clk iff !rst); @@ -105,6 +131,7 @@ module fmpadding_axi_tb #( // Output logger initial begin + @(negedge rst); repeat(IMAGES) begin for(int unsigned y = 0; y < YSIZE; y++) begin for(int unsigned x = 0; x < XSIZE; x++) begin From 4f4bec7ab5979f27bce822250c87103f52de0aa6 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 12 Oct 2022 19:11:29 +0300 Subject: [PATCH 219/628] [FMPadding] conversion, inst template, CustomOp for FMPadding_rtl --- .../fmpadding/hdl/fmpadding_template.sv | 112 +++++ .../custom_op/fpgadataflow/fmpadding_rtl.py | 386 ++++++++++++++++++ .../fpgadataflow/convert_to_hls_layers.py | 6 +- 3 files changed, 503 insertions(+), 1 deletion(-) create mode 100644 finn-rtllib/fmpadding/hdl/fmpadding_template.sv create mode 100644 src/finn/custom_op/fpgadataflow/fmpadding_rtl.py diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.sv b/finn-rtllib/fmpadding/hdl/fmpadding_template.sv new file mode 100644 index 0000000000..ee5b7041ae --- /dev/null +++ b/finn-rtllib/fmpadding/hdl/fmpadding_template.sv @@ -0,0 +1,112 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +module $TOP_MODULE_NAME$( +//- Global Control ------------------ +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) +input logic ap_clk, +(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) +input logic ap_rst_n, + +//- AXI Lite ------------------------ +// Writing +input s_axilite_AWVALID, +output s_axilite_AWREADY, +input [2:0] s_axilite_AWADDR, + +input s_axilite_WVALID, +output s_axilite_WREADY, +input [31:0] s_axilite_WDATA, +input [ 3:0] s_axilite_WSTRB, + +output s_axilite_BVALID, +input s_axilite_BREADY, +output [1:0] s_axilite_BRESP, + +// Reading +input s_axilite_ARVALID, +output s_axilite_ARREADY, +input [3:0] s_axilite_ARADDR, + +output s_axilite_RVALID, +input s_axilite_RREADY, +output [31:0] s_axilite_RDATA, +output [ 1:0] s_axilite_RRESP, + +//- AXI Stream - Input -------------- +output logic in0_V_tready, +input logic in0_V_tvalid, +input logic [STREAM_BITS-1:0] in0_V_tdata, + +//- AXI Stream - Output ------------- +input logic out_V_tready, +output logic out_V_tvalid, +output logic [STREAM_BITS-1:0] out_V_tdata +); + + +fmpadding_axi #( +.XCOUNTER_BITS($XCOUNTER_BITS$), +.YCOUNTER_BITS($YCOUNTER_BITS$), +.NUM_CHANNELS($NUM_CHANNELS$), +.SIMD($SIMD$), +.ELEM_BITS($ELEM_BITS$) +) +$TOP_MODULE_NAME$_impl +( + .ap_clk(ap_clk), + .ap_rst_n(ap_rst_n), + .s_axilite_AWVALID, + .s_axilite_AWREADY, + .s_axilite_AWADDR, + .s_axilite_WVALID, + .s_axilite_WREADY, + .s_axilite_WDATA, + .s_axilite_WSTRB, + .s_axilite_BVALID, + .s_axilite_BREADY, + .s_axilite_BRESP, + .s_axilite_ARVALID, + .s_axilite_ARREADY, + .s_axilite_ARADDR, + .s_axilite_RVALID, + .s_axilite_RREADY, + .s_axilite_RDATA, + .s_axilite_RRESP, + .s_axis_tready(in0_V_tready), + .s_axis_tvalid(in0_V_tvalid), + .s_axis_tdata(in0_V_tdata), + .m_axis_tready(out_V_tready), + .m_axis_tvalid(out_V_tvalid), + .m_axis_tdata(out_V_tdata) +); + +endmodule diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py new file mode 100644 index 0000000000..5de3e64bfd --- /dev/null +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -0,0 +1,386 @@ +# Copyright (C) 2022, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import math +import numpy as np +import os +import shutil +import warnings +from qonnx.core.datatype import DataType + +from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp +from finn.util.basic import get_rtlsim_trace_depth, make_build_dir +from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy + +try: + from pyverilator import PyVerilator +except ModuleNotFoundError: + PyVerilator = None + + +class FMPadding_rtl(HLSCustomOp): + """CustomOp wrapper for the finn-rtllib fmpadding_axi component + Supports adjusting the padding amount and spatial feature sizes at + runtime.""" + + def __init__(self, onnx_node): + super().__init__(onnx_node) + + def get_nodeattr_types(self): + my_attrs = { + # spatial size of input images + "ImgDim": ("ints", True, []), # [H, W] = [Y, X] + # total padding (per dimension) to apply + "Padding": ( + "ints", + True, + [1, 1, 1, 1], + ), # [H_begin, W_begin, H_end, W_end] = [Y_begin, X_begin, Y_end, X_end] + # number of channels in input image + "NumChannels": ("i", True, 0), + # SIMD Input parallelism + "SIMD": ("i", False, 1), + # FINN input datatype + "inputDataType": ("s", True, ""), + # controls distribution of padded pixels + # in case of uneven padding -- see FMPadding fxn + # in hlslib + "PaddingStyle": ("i", False, 2, {2, 1}), + # shape describing input vecs per execution + "numInputVectors": ("i", False, 1), + # Enable reprogrammable implementation to change FM dimensions, + # stride, or dilation during runtime + "dynamic_mode": ("i", False, 0, {0, 1}), + # attribute to save top module name - not user configurable + "gen_top_module": ("s", False, ""), + } + my_attrs.update(super().get_nodeattr_types()) + return my_attrs + + def get_padded_odim(self): + "Return the padded spatial size of the output." + idim_h, idim_w = self.get_nodeattr("ImgDim") + pad = self.get_nodeattr("Padding") + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + odim_h = idim_h + pad_h + odim_w = idim_w + pad_w + return [odim_h, odim_w] + + def get_exp_cycles(self): + odim_h, odim_w = self.get_padded_odim() + channels = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + batch_size = self.get_nodeattr("numInputVectors") + exp_cycles = (channels / simd) * batch_size * odim_h * odim_w + return int(exp_cycles) + + def get_normal_input_shape(self): + idim_h, idim_w = self.get_nodeattr("ImgDim") + num_ch = self.get_nodeattr("NumChannels") + ishape = (1, idim_h, idim_w, num_ch) + return ishape + + def get_normal_output_shape(self): + odim_h, odim_w = self.get_padded_odim() + num_ch = self.get_nodeattr("NumChannels") + + oshape = (1, odim_h, odim_w, num_ch) + return oshape + + def get_folded_input_shape(self): + normal_ishape = list(self.get_normal_input_shape()) + ifm_ch = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + assert ifm_ch % simd == 0, "SIMD must divide input channels" + fold = int(normal_ishape[-1] / simd) + folded_ishape = normal_ishape[:-1] + [fold, simd] + return tuple(folded_ishape) + + def get_folded_output_shape(self): + normal_oshape = list(self.get_normal_output_shape()) + ifm_ch = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + assert ifm_ch % simd == 0, "SIMD must divide input channels" + fold = int(normal_oshape[-1] / simd) + folded_oshape = normal_oshape[:-1] + [fold, simd] + return tuple(folded_oshape) + + def make_shape_compatible_op(self, model): + exp_ishape = self.get_normal_input_shape() + oshape = self.get_normal_output_shape() + ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) + assert ishape == exp_ishape, "Unexpect input shape for SameResize." + return super().make_const_shape_op(oshape) + + def infer_node_datatype(self, model): + node = self.onnx_node + idt = model.get_tensor_datatype(node.input[0]) + if idt != self.get_input_datatype(): + warn_str = "inputDataType changing for %s: %s -> %s " % ( + node.name, + str(self.get_input_datatype()), + str(idt), + ) + warnings.warn(warn_str) + self.set_nodeattr("inputDataType", idt.name) + model.set_tensor_datatype(node.output[0], idt) + + def verify_node(self): + pass + + def get_input_datatype(self): + """Returns FINN DataType of input.""" + ret = DataType[self.get_nodeattr("inputDataType")] + # the hlslib op always pads with zeros, so ensure that the DataType + # is able to represent zeros + assert ret.allowed(0), "FMPadding_Batch DataType must support zero" + return ret + + def get_output_datatype(self): + """Returns FINN DataType of output. (Same as input datatype)""" + return self.get_input_datatype() + + def get_instream_width(self): + ibits = self.get_input_datatype().bitwidth() + simd = self.get_nodeattr("SIMD") + return ibits * simd + + def get_outstream_width(self): + obits = self.get_output_datatype().bitwidth() + simd = self.get_nodeattr("SIMD") + return obits * simd + + def get_number_output_values(self): + folded_oshape = self.get_folded_output_shape() + return np.prod(folded_oshape[:-1]) + + def get_verilog_top_module_intf_names(self): + # Overload default HLSCustomOp implementation to add axilite control IF + intf_names = super().get_verilog_top_module_intf_names() + if self.get_nodeattr("dynamic_mode"): + intf_names["axilite"] = ["s_axilite"] + return intf_names + + def execute_node(self, context, graph): + mode = self.get_nodeattr("exec_mode") + node = self.onnx_node + exp_ishape = self.get_normal_input_shape() + exp_oshape = self.get_normal_output_shape() + folded_ishape = self.get_folded_input_shape() + + if mode == "cppsim": + raise Exception( + "cppsim not possible for FMPadding_rtl, please set exec_mode to rtlsim" + ) + elif mode == "rtlsim": + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + else: + raise Exception( + """Invalid value for attribute exec_mode! Is currently set to: {} + has to be set to one of the following value ("cppsim", "rtlsim")""".format( + mode + ) + ) + + inp = context[node.input[0]] + assert str(inp.dtype) == "float32", "Input datatype is not float32" + assert ( + inp.shape == exp_ishape + ), """Input shape doesn't + match expected shape (1, ImgDim_h, ImgDim_w, NumChannels).""" + export_idt = self.get_input_datatype() + + reshaped_input = inp.reshape(folded_ishape) + np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) + + sim = self.get_rtlsim() + nbits = self.get_instream_width() + rtlsim_inp = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) + super().reset_rtlsim(sim) + super().toggle_clk(sim) + assert False, "Need register config here until default values are implemented" + rtlsim_output = self.rtlsim(sim, rtlsim_inp) + odt = export_idt + target_bits = odt.bitwidth() + packed_bits = self.get_outstream_width() + out_npy_path = "{}/output.npy".format(code_gen_dir) + out_shape = self.get_folded_output_shape() + rtlsim_output_to_npy( + rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits + ) + # load and reshape output + output = np.load(out_npy_path) + output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) + context[node.output[0]] = output + + assert ( + context[node.output[0]].shape == exp_oshape + ), """Output shape doesn't match expected shape + (1, OutputDim_H, OutputDim_W, NumChannels).""" + + def generate_hdl(self): + dimY, dimX = self.get_nodeattr("ImgDim") + padT, padL, padB, padR = self.get_nodeattr("Padding") + chans = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + idt = self.get_nodeattr("inputDataType") + y_counter_bits = int(math.log2(padT + dimY + padB)) + x_counter_bits = int(math.log2(padL + dimX + padR)) + topname = self.get_verilog_top_module_name() + rtlsrc = os.environ["FINN_ROOT"] + "/finn-rtllib/fmpadding/hdl" + template_path = rtlsrc + "/fmpadding_template.sv" + code_gen_dict = { + "XCOUNTER_BITS": x_counter_bits, + "YCOUNTER_BITS": y_counter_bits, + "NUM_CHANNELS": chans, + "SIMD": simd, + "ELEM_BITS": idt.bitwidth(), + "TOP_MODULE_NAME": topname, + } + # save top module name so we can refer to it after this node has been renamed + # (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) + self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) + + # apply code generation to templates + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + with open(template_path, "r") as f: + template = f.read() + for key_name in code_gen_dict: + key = "$%s$" % key_name + # transform list into long string separated by '\n' + code_gen_line = "\n".join(code_gen_dict[key]) + template = template.replace(key, code_gen_line) + + with open( + os.path.join(code_gen_dir, topname + ".sv"), + "w", + ) as f: + f.write(template) + + shutil.copyfile(rtlsrc + "/fmpadding_axi.sv", code_gen_dir) + shutil.copyfile(rtlsrc + "/fmpadding.sv", code_gen_dir) + # set ipgen_path and ip_path so that HLS-Synth transformation + # and stich_ip transformation do not complain + self.set_nodeattr("ipgen_path", code_gen_dir) + self.set_nodeattr("ip_path", code_gen_dir) + + def prepare_rtlsim(self): + """Creates a Verilator emulation library for the RTL code generated + for this node, sets the rtlsim_so attribute to its path and returns + a PyVerilator wrapper around it.""" + # Modified to use generated (System-)Verilog instead of HLS output products + + if PyVerilator is None: + raise ImportError("Installation of PyVerilator is required.") + + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + verilog_paths = [code_gen_dir] + verilog_files = [ + "fmpadding_axi.sv", + "fmpadding.sv", + self.get_nodeattr("gen_top_module") + ".sv", + ] + + # build the Verilator emu library + sim = PyVerilator.build( + verilog_files, + build_dir=make_build_dir("pyverilator_" + self.onnx_node.name + "_"), + verilog_path=verilog_paths, + trace_depth=get_rtlsim_trace_depth(), + top_module_name=self.get_verilog_top_module_name(), + ) + # save generated lib filename in attribute + self.set_nodeattr("rtlsim_so", sim.lib._name) + return sim + + def code_generation_ipi(self): + """Constructs and returns the TCL for node instantiation in Vivado IPI.""" + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + + sourcefiles = [ + "fmpadding_axi.sv", + "fmpadding.sv", + self.get_nodeattr("gen_top_module") + ".sv", + ] + + sourcefiles = [os.path.join(code_gen_dir, f) for f in sourcefiles] + + cmd = [] + for f in sourcefiles: + cmd += ["add_files -norecurse %s" % (f)] + cmd += [ + "create_bd_cell -type module -reference %s %s" + % (self.get_nodeattr("gen_top_module"), self.onnx_node.name) + ] + return cmd + + def code_generation_ipgen(self, model, fpgapart, clk): + """Normally: Generates C++ code and tcl script for IP generation. + Here: Generates (System-)Verilog code for IP generation.""" + self.generate_hdl() + + def ipgen_singlenode_code(self): + """Normally: Builds the bash script for IP generation.""" + pass + + def code_generation_cppsim(self, model): + """Normally: Generates C++ code for simulation (cppsim).""" + pass + + def compile_singlenode_code(self): + pass + + def global_includes(self): + pass + + def defines(self, var): + pass + + def read_npy_data(self): + pass + + def strm_decl(self): + pass + + def docompute(self): + pass + + def dataoutstrm(self): + pass + + def save_as_npy(self): + pass + + def blackboxfunction(self): + pass + + def pragmas(self): + pass diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index b7db49eb22..1d040780f7 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -117,8 +117,12 @@ def apply(self, model): ConvInpGen_idim_h = odim_padding_h ConvInpGen_idim_w = odim_padding_w + padding_optype = ( + "FMPadding_rtl" if self.use_rtl_variant else "FMPadding_Batch" + ) + padding_node = helper.make_node( - "FMPadding_Batch", + padding_optype, [i2c_input], [padding_out], domain="finn.custom_op.fpgadataflow", From ba9a45f04339c34a529a3eeeba845d2dd6e41745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Thu, 13 Oct 2022 09:18:57 +0100 Subject: [PATCH 220/628] Added capability to custom-initialize dynamic parameter registers upon design configuration. --- finn-rtllib/fmpadding/hdl/fmpadding.sv | 21 +++++++++++++------ finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 10 +++++++++ finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv | 2 ++ 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding.sv b/finn-rtllib/fmpadding/hdl/fmpadding.sv index 7e408f6241..b3e7f2b65a 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding.sv @@ -38,6 +38,14 @@ module fmpadding #( int unsigned NUM_CHANNELS, int unsigned SIMD, int unsigned ELEM_BITS, + + int unsigned INIT_XON, + int unsigned INIT_XOFF, + int unsigned INIT_XEND, + int unsigned INIT_YON, + int unsigned INIT_YOFF, + int unsigned INIT_YEND, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) )( //- Global Control ------------------ @@ -66,14 +74,15 @@ module fmpadding #( //----------------------------------------------------------------------- // Dynamically configurable state typedef logic [XCOUNTER_BITS-1:0] xcount_t; - xcount_t XEnd = 0; - xcount_t XOn = 0; - xcount_t XOff = 0; + xcount_t XEnd = INIT_XEND; + xcount_t XOn = INIT_XON; + xcount_t XOff = INIT_XOFF; typedef logic [YCOUNTER_BITS-1:0] ycount_t; - ycount_t YEnd = 0; - ycount_t YOn = 0; - ycount_t YOff = 0; + ycount_t YEnd = INIT_YEND; + ycount_t YOn = INIT_YON; + ycount_t YOff = INIT_YOFF; + always_ff @(posedge clk) begin if(we) begin unique case(wa) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv index f5313801e5..71cdf0a452 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -38,6 +38,14 @@ module fmpadding_axi #( int unsigned NUM_CHANNELS, int unsigned SIMD, int unsigned ELEM_BITS, + + int unsigned INIT_XON, + int unsigned INIT_XOFF, + int unsigned INIT_XEND, + int unsigned INIT_YON, + int unsigned INIT_YOFF, + int unsigned INIT_YEND, + localparam int unsigned STREAM_BITS = 8*(1 + (SIMD*ELEM_BITS-1)/8) )( //- Global Control ------------------ @@ -101,6 +109,8 @@ module fmpadding_axi #( fmpadding #( .XCOUNTER_BITS(XCOUNTER_BITS), .YCOUNTER_BITS(YCOUNTER_BITS), .NUM_CHANNELS(NUM_CHANNELS), .SIMD(SIMD), + .INIT_XON(INIT_XON), .INIT_XOFF(INIT_XOFF), .INIT_XEND(INIT_XEND), + .INIT_YON(INIT_YON), .INIT_YOFF(INIT_YOFF), .INIT_YEND(INIT_YEND), .ELEM_BITS(ELEM_BITS) ) padding ( .ap_clk, .ap_rst_n, diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv index fedf5dcb69..741689b3a7 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi_tb.sv @@ -39,6 +39,8 @@ module fmpadding_axi_tb #( .YCOUNTER_BITS(YCOUNTER_BITS), .NUM_CHANNELS(NUM_CHANNELS), .SIMD(SIMD), + .INIT_XON(0), .INIT_XOFF(0), .INIT_XEND(0), + .INIT_YON(0), .INIT_YOFF(0), .INIT_YEND(0), .ELEM_BITS(ELEM_BITS) ) dut ( .ap_clk(clk), .ap_rst_n(!rst), From e2cc28d3e1fb213a667a75071813ad2dc238e7c1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 13 Oct 2022 17:57:52 +0300 Subject: [PATCH 221/628] [Pad] update fmpadding_rtl template --- .../fmpadding/hdl/fmpadding_template.sv | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.sv b/finn-rtllib/fmpadding/hdl/fmpadding_template.sv index ee5b7041ae..66f235fc6a 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_template.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_template.sv @@ -62,14 +62,14 @@ output [31:0] s_axilite_RDATA, output [ 1:0] s_axilite_RRESP, //- AXI Stream - Input -------------- -output logic in0_V_tready, -input logic in0_V_tvalid, -input logic [STREAM_BITS-1:0] in0_V_tdata, +output logic in0_V_TREADY, +input logic in0_V_TVALID, +input logic [$STREAM_BITS$-1:0] in0_V_TDATA, //- AXI Stream - Output ------------- -input logic out_V_tready, -output logic out_V_tvalid, -output logic [STREAM_BITS-1:0] out_V_tdata +input logic out_V_TREADY, +output logic out_V_TVALID, +output logic [$STREAM_BITS$-1:0] out_V_TDATA ); @@ -78,7 +78,13 @@ fmpadding_axi #( .YCOUNTER_BITS($YCOUNTER_BITS$), .NUM_CHANNELS($NUM_CHANNELS$), .SIMD($SIMD$), -.ELEM_BITS($ELEM_BITS$) +.ELEM_BITS($ELEM_BITS$), +.INIT_XON($INIT_XON$), +.INIT_XOFF($INIT_XOFF$), +.INIT_XEND($INIT_XEND$), +.INIT_YON($INIT_YON$), +.INIT_YOFF($INIT_YOFF$), +.INIT_YEND($INIT_YEND$) ) $TOP_MODULE_NAME$_impl ( @@ -101,12 +107,12 @@ $TOP_MODULE_NAME$_impl .s_axilite_RREADY, .s_axilite_RDATA, .s_axilite_RRESP, - .s_axis_tready(in0_V_tready), - .s_axis_tvalid(in0_V_tvalid), - .s_axis_tdata(in0_V_tdata), - .m_axis_tready(out_V_tready), - .m_axis_tvalid(out_V_tvalid), - .m_axis_tdata(out_V_tdata) + .s_axis_tready(in0_V_TREADY), + .s_axis_tvalid(in0_V_TVALID), + .s_axis_tdata(in0_V_TDATA), + .m_axis_tready(out_V_TREADY), + .m_axis_tvalid(out_V_TVALID), + .m_axis_tdata(out_V_TDATA) ); endmodule From d6feac7802dada8c84168aab957cb848eb58983b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 13 Oct 2022 17:58:07 +0300 Subject: [PATCH 222/628] [Pad] add FMPadding_rtl to op registry, fixes to its CustomOp --- src/finn/custom_op/fpgadataflow/__init__.py | 2 + .../custom_op/fpgadataflow/fmpadding_rtl.py | 73 ++++++++++++++----- 2 files changed, 56 insertions(+), 19 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/__init__.py b/src/finn/custom_op/fpgadataflow/__init__.py index e5eb483a00..56d4230a3a 100644 --- a/src/finn/custom_op/fpgadataflow/__init__.py +++ b/src/finn/custom_op/fpgadataflow/__init__.py @@ -43,6 +43,7 @@ from finn.custom_op.fpgadataflow.duplicatestreams_batch import DuplicateStreams_Batch from finn.custom_op.fpgadataflow.eltwise import StreamingEltwise from finn.custom_op.fpgadataflow.fmpadding_batch import FMPadding_Batch +from finn.custom_op.fpgadataflow.fmpadding_rtl import FMPadding_rtl from finn.custom_op.fpgadataflow.globalaccpool_batch import GlobalAccPool_Batch from finn.custom_op.fpgadataflow.iodma import IODMA from finn.custom_op.fpgadataflow.labelselect_batch import LabelSelect_Batch @@ -91,3 +92,4 @@ custom_op["StreamingConcat"] = StreamingConcat custom_op["CheckSum"] = CheckSum custom_op["StreamingEltwise"] = StreamingEltwise +custom_op["FMPadding_rtl"] = FMPadding_rtl diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index 5de3e64bfd..fe8b70d135 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -32,6 +32,7 @@ import shutil import warnings from qonnx.core.datatype import DataType +from qonnx.util.basic import roundup_to_integer_multiple from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp from finn.util.basic import get_rtlsim_trace_depth, make_build_dir @@ -226,7 +227,6 @@ def execute_node(self, context, graph): ) super().reset_rtlsim(sim) super().toggle_clk(sim) - assert False, "Need register config here until default values are implemented" rtlsim_output = self.rtlsim(sim, rtlsim_inp) odt = export_idt target_bits = odt.bitwidth() @@ -246,25 +246,60 @@ def execute_node(self, context, graph): ), """Output shape doesn't match expected shape (1, OutputDim_H, OutputDim_W, NumChannels).""" - def generate_hdl(self): - dimY, dimX = self.get_nodeattr("ImgDim") - padT, padL, padB, padR = self.get_nodeattr("Padding") - chans = self.get_nodeattr("NumChannels") - simd = self.get_nodeattr("SIMD") - idt = self.get_nodeattr("inputDataType") + def get_template_values(self, ifm_dims, pads, chans, simd, idt): + dimY, dimX = ifm_dims + padT, padL, padB, padR = pads y_counter_bits = int(math.log2(padT + dimY + padB)) x_counter_bits = int(math.log2(padL + dimX + padR)) topname = self.get_verilog_top_module_name() - rtlsrc = os.environ["FINN_ROOT"] + "/finn-rtllib/fmpadding/hdl" - template_path = rtlsrc + "/fmpadding_template.sv" + stream_bits = idt.bitwidth() * simd + stream_bits = int(roundup_to_integer_multiple(stream_bits, 8)) code_gen_dict = { - "XCOUNTER_BITS": x_counter_bits, - "YCOUNTER_BITS": y_counter_bits, - "NUM_CHANNELS": chans, - "SIMD": simd, + "XCOUNTER_BITS": int(x_counter_bits), + "YCOUNTER_BITS": int(y_counter_bits), + "NUM_CHANNELS": int(chans), + "SIMD": int(simd), "ELEM_BITS": idt.bitwidth(), "TOP_MODULE_NAME": topname, + "INIT_XON": int(padL), + "INIT_XOFF": int(padL + dimX), + "INIT_XEND": int(padL + dimX + padR), + "INIT_YON": int(padT), + "INIT_YOFF": int(padT + dimY), + "INIT_YEND": int(padT + dimY + padB), + "STREAM_BITS": int(stream_bits), + } + return code_gen_dict + + def get_dynamic_config(self, ifm_dims, pads): + """Returns a configuration dict to re-configure FM dimension and + padding amounts during runtime.""" + + dims = self.get_nodeattr("ImgDim") + pads = self.get_nodeattr("Padding") + chans = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + idt = self.get_input_datatype() + code_gen_dict = self.get_template_values(dims, pads, chans, simd, idt) + config = { + "XON": (0, (code_gen_dict["INIT_XON"])), + "XOFF": (1, (code_gen_dict["INIT_XOFF"])), + "XEND": (2, (code_gen_dict["INIT_XEND"])), + "YON": (4, (code_gen_dict["INIT_YON"])), + "YOFF": (5, (code_gen_dict["INIT_YOFF"])), + "YEND": (6, (code_gen_dict["INIT_YEND"])), } + return config + + def generate_hdl(self): + rtlsrc = os.environ["FINN_ROOT"] + "/finn-rtllib/fmpadding/hdl" + template_path = rtlsrc + "/fmpadding_template.sv" + dims = self.get_nodeattr("ImgDim") + pads = self.get_nodeattr("Padding") + chans = self.get_nodeattr("NumChannels") + simd = self.get_nodeattr("SIMD") + idt = self.get_input_datatype() + code_gen_dict = self.get_template_values(dims, pads, chans, simd, idt) # save top module name so we can refer to it after this node has been renamed # (e.g. by GiveUniqueNodeNames(prefix) during MakeZynqProject) self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) @@ -275,18 +310,17 @@ def generate_hdl(self): template = f.read() for key_name in code_gen_dict: key = "$%s$" % key_name - # transform list into long string separated by '\n' - code_gen_line = "\n".join(code_gen_dict[key]) - template = template.replace(key, code_gen_line) + template = template.replace(key, str(code_gen_dict[key_name])) with open( - os.path.join(code_gen_dir, topname + ".sv"), + os.path.join(code_gen_dir, self.get_verilog_top_module_name() + ".sv"), "w", ) as f: f.write(template) - shutil.copyfile(rtlsrc + "/fmpadding_axi.sv", code_gen_dir) - shutil.copyfile(rtlsrc + "/fmpadding.sv", code_gen_dir) + sv_files = ["fmpadding_axi.sv", "fmpadding.sv", "axi2we.sv"] + for sv_file in sv_files: + shutil.copy(rtlsrc + "/" + sv_file, code_gen_dir) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain self.set_nodeattr("ipgen_path", code_gen_dir) @@ -306,6 +340,7 @@ def prepare_rtlsim(self): verilog_files = [ "fmpadding_axi.sv", "fmpadding.sv", + "axi2we.sv", self.get_nodeattr("gen_top_module") + ".sv", ] From b09ee8f77e190287604fdabc57bd4f6485769cc2 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 13 Oct 2022 17:58:39 +0300 Subject: [PATCH 223/628] [Test] expand testcase to cover FMPadding_rtl --- .../test_fpgadataflow_fmpadding.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 2e2da0da7a..1f6f5dea1e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -53,7 +53,9 @@ target_clk_ns = 10 -def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_style): +def make_single_fmpadding_modelwrapper( + optype, idim, padding, num_ch, simd, idt, pad_style +): pad_h = padding[0] + padding[2] pad_w = padding[1] + padding[3] idim_h, idim_w = idim @@ -71,7 +73,7 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty ) FMPadding = helper.make_node( - "FMPadding_Batch", + optype, ["inp"], ["outp"], domain="finn.custom_op.fpgadataflow", @@ -112,10 +114,16 @@ def make_single_fmpadding_modelwrapper(idim, padding, num_ch, simd, idt, pad_sty @pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]]) # execution mode @pytest.mark.parametrize("mode", ["cppsim", "rtlsim"]) +# implementation style +@pytest.mark.parametrize("impl_style", ["rtl", "hls"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): +def test_fpgadataflow_fmpadding( + idim, pad, num_ch, simd, pad_style, idt, mode, impl_style +): + if impl_style == "rtl" and mode == "cppsim": + pytest.skip("rtl implstyle has no cppsim, skipping") if num_ch % simd != 0: pytest.skip(" num_ch % simd != 0, skipping") @@ -135,7 +143,11 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): odim_h = idim_h + pad_h odim_w = idim_w + pad_w - model = make_single_fmpadding_modelwrapper(idim, pad, num_ch, simd, idt, pad_style) + optype = {"hls": "FMPadding_Batch", "rtl": "FMPadding_rtl"}[impl_style] + + model = make_single_fmpadding_modelwrapper( + optype, idim, pad, num_ch, simd, idt, pad_style + ) model = model.transform(InferShapes()) model = model.transform(SetExecMode(mode)) model = model.transform(GiveUniqueNodeNames()) @@ -146,6 +158,10 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) + node = model.get_nodes_by_op_type(optype)[0] + inst = getCustomOp(node) + inst.set_nodeattr("rtlsim_trace", "fmpadding_rtlsim.vcd") + y_produced = oxe.execute_onnx(model, input_dict)["outp"] expected_oshape = (1, odim_h, odim_w, num_ch) assert y_produced.shape == expected_oshape @@ -175,7 +191,7 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, pad_style, idt, mode): assert (y_produced == y_expected).all() if mode == "rtlsim": - node = model.get_nodes_by_op_type("FMPadding_Batch")[0] + node = model.get_nodes_by_op_type(optype)[0] inst = getCustomOp(node) cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") exp_cycles_dict = model.analysis(exp_cycles_per_layer) From ff348d47402a8dbb2cfcb3a96f740a61a0d79d14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 14 Oct 2022 06:41:57 +0100 Subject: [PATCH 224/628] Add sanity checking for generics. --- finn-rtllib/fmpadding/hdl/fmpadding.sv | 40 ++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding.sv b/finn-rtllib/fmpadding/hdl/fmpadding.sv index b3e7f2b65a..82ccba132c 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding.sv @@ -71,6 +71,46 @@ module fmpadding #( uwire clk = ap_clk; uwire rst = !ap_rst_n; + //----------------------------------------------------------------------- + // Parameter Sanity Checking + initial begin + automatic bit fail = 0; + + if(XCOUNTER_BITS < $clog2(1+INIT_XEND)) begin + $error("XCounter size too small to accommodate end count."); + fail = 1; + end + if(XCOUNTER_BITS < $clog2(1+INIT_XON)) begin + $error("XCounter size too small to accommodate ON count."); + fail = 1; + end + if(XCOUNTER_BITS < $clog2(1+INIT_XOFF)) begin + $error("XCounter size too small to accommodate OFF count."); + fail = 1; + end + if(YCOUNTER_BITS < $clog2(1+INIT_YEND)) begin + $error("YCounter size too small to accommodate end count."); + fail = 1; + end + if(YCOUNTER_BITS < $clog2(1+INIT_YON)) begin + $error("YCounter size too small to accommodate ON count."); + fail = 1; + end + if(YCOUNTER_BITS < $clog2(1+INIT_YOFF)) begin + $error("YCounter size too small to accommodate OFF count."); + fail = 1; + end + + if((INIT_XEND < INIT_XON) || (INIT_XOFF <= INIT_XON)) begin + $warning("Initial empty X output range."); + end + if((INIT_YEND < INIT_YON) || (INIT_YOFF <= INIT_YON)) begin + $warning("Initial empty Y output range."); + end + + if(fail) $finish(); + end + //----------------------------------------------------------------------- // Dynamically configurable state typedef logic [XCOUNTER_BITS-1:0] xcount_t; From 9d4f4ae051f67f0d774751a884c26ed326fd9db8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 14 Oct 2022 17:56:25 +0300 Subject: [PATCH 225/628] [Pad] bugfixes in FMPadding_rtl config gen --- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index fe8b70d135..0b15562604 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -249,8 +249,8 @@ def execute_node(self, context, graph): def get_template_values(self, ifm_dims, pads, chans, simd, idt): dimY, dimX = ifm_dims padT, padL, padB, padR = pads - y_counter_bits = int(math.log2(padT + dimY + padB)) - x_counter_bits = int(math.log2(padL + dimX + padR)) + y_counter_bits = int(math.ceil(math.log2(padT + dimY + padB))) + x_counter_bits = int(math.ceil(math.log2(padL + dimX + padR))) topname = self.get_verilog_top_module_name() stream_bits = idt.bitwidth() * simd stream_bits = int(roundup_to_integer_multiple(stream_bits, 8)) @@ -263,10 +263,10 @@ def get_template_values(self, ifm_dims, pads, chans, simd, idt): "TOP_MODULE_NAME": topname, "INIT_XON": int(padL), "INIT_XOFF": int(padL + dimX), - "INIT_XEND": int(padL + dimX + padR), + "INIT_XEND": int(padL + dimX + padR - 1), "INIT_YON": int(padT), "INIT_YOFF": int(padT + dimY), - "INIT_YEND": int(padT + dimY + padB), + "INIT_YEND": int(padT + dimY + padB - 1), "STREAM_BITS": int(stream_bits), } return code_gen_dict From 5be498de84273223cbebbf2569e6ac8ea54477a4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 14 Oct 2022 17:58:07 +0300 Subject: [PATCH 226/628] [Test] cover FMPadding_rtl as part of tests, compute refernece differently --- .../test_fpgadataflow_fmpadding.py | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 1f6f5dea1e..1c182a75c0 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -131,10 +131,10 @@ def test_fpgadataflow_fmpadding( pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] - if idim_h == idim_w and pad_h != pad_w: + if idim_h == idim_w and pad_h != pad_w and impl_style != "rtl": pytest.skip( """Only equal padding along the dimensions for square images - is supported, skipping""" + is supported for HLS, skipping""" ) # generate input data @@ -160,7 +160,6 @@ def test_fpgadataflow_fmpadding( model = model.transform(PrepareRTLSim()) node = model.get_nodes_by_op_type(optype)[0] inst = getCustomOp(node) - inst.set_nodeattr("rtlsim_trace", "fmpadding_rtlsim.vcd") y_produced = oxe.execute_onnx(model, input_dict)["outp"] expected_oshape = (1, odim_h, odim_w, num_ch) @@ -168,21 +167,27 @@ def test_fpgadataflow_fmpadding( # calculate reference # calculate correct pad according to parameters - if pad_style == 2: - if pad_h % 2 == 0: - pad_up = pad_h // 2 + if impl_style == "hls": + if pad_style == 2: + if pad_h % 2 == 0: + pad_up = pad_h // 2 + else: + pad_up = pad_h // 2 + 1 + if pad_w % 2 == 0: + pad_left = pad_w // 2 + else: + pad_left = pad_w // 2 + 1 else: - pad_up = pad_h // 2 + 1 - if pad_w % 2 == 0: + pad_up = pad_h // 2 pad_left = pad_w // 2 - else: - pad_left = pad_w // 2 + 1 - else: - pad_up = pad_h // 2 - pad_left = pad_w // 2 - pad_down = pad_h - pad_up - pad_right = pad_w - pad_left + pad_down = pad_h - pad_up + pad_right = pad_w - pad_left + else: + pad_up = pad[0] + pad_left = pad[1] + pad_down = pad[2] + pad_right = pad[3] y_expected = np.pad( x, ((0, 0), (pad_up, pad_down), (pad_left, pad_right), (0, 0)), "constant" From 076566477b1b649503937d071286d9b04910a990 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 15:43:00 +0300 Subject: [PATCH 227/628] [Test] add explicit timeout to dynamic conv sizing test --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 2a3413cb13..23fa79a2a2 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -31,6 +31,7 @@ import copy import numpy as np import onnx.parser as oprs +import os from onnx import TensorProto, helper from pyverilator.util.axi_utils import axilite_write, reset_rtlsim from qonnx.core.datatype import DataType @@ -55,6 +56,7 @@ from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.util.basic import pyverilate_get_liveness_threshold_cycles def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): @@ -209,7 +211,6 @@ def test_fpgadataflow_conv_dynamic(): # loop through experiment configurations for exp_cfg in exp_cfgs: idim, int_dim, odim, inp, golden = exp_cfg - # model.set_metadata_prop("rtlsim_trace", "trace_size0.vcd") # get config for the new dimensions swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") swg0 = getCustomOp(swg_nodes[0]) @@ -235,9 +236,11 @@ def test_fpgadataflow_conv_dynamic(): last_node_shp[2] = odim update_tensor_dim(model, last_node.onnx_node.output[0], (odim, odim)) last_node.set_nodeattr("folded_shape", last_node_shp) - model.set_metadata_prop("rtlsim_trace", "trace_size1.vcd") ctx = {"global_in": inp.transpose(0, 2, 3, 1)} + liveness_prev = pyverilate_get_liveness_threshold_cycles() + os.environ["LIVENESS_THRESHOLD"] = "10000000" rtlsim_exec(model, ctx, pre_hook=config_hook(configs)) + os.environ["LIVENESS_THRESHOLD"] = str(liveness_prev) ret = ctx["global_out"].transpose(0, 3, 1, 2) assert np.isclose(golden, ret).all() From 8e8c48bfd23330c28f629a84b5f6df490dda7ee4 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 16:08:14 +0300 Subject: [PATCH 228/628] [Test] add padding support to conv_dynamic test --- ...dataflow_convinputgenerator_rtl_dynamic.py | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 23fa79a2a2..0bc1450d09 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -41,7 +41,10 @@ from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes -from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul +from qonnx.transformation.lower_convs_to_matmul import ( + LowerConvsToMatMul, + _auto_pad_to_explicit_padding, +) from qonnx.util.basic import gen_finn_dt_tensor, get_by_name import finn.core.onnx_exec as oxe @@ -59,10 +62,14 @@ from finn.util.basic import pyverilate_get_liveness_threshold_cycles -def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): +def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode): np.random.seed(0) ishp = (1, ifm, idim, idim) - int_dim = compute_conv_output_dim(idim, k, stride) + pad_0 = _auto_pad_to_explicit_padding(pad_mode, idim, idim, k, k, stride, stride, 2) + int_dim = compute_conv_output_dim(idim, k, stride, total_pad=pad_0[0] + pad_0[2]) + pad_1 = _auto_pad_to_explicit_padding( + pad_mode, int_dim, int_dim, k, k, stride, stride, 2 + ) odim = compute_conv_output_dim(int_dim, k, stride) oshp = (1, ofm, odim, odim) wshp = (ofm, ifm, k, k) @@ -72,7 +79,8 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): wshp_str = str(list(wshp)) wshp_1_str = str(list(wshp_1)) kshp_str = str([k, k]) - pad_str = str([0, 0, 0, 0]) + pad_0_str = str(list(pad_0)) + pad_1_str = str(list(pad_1)) stride_str = str([stride, stride]) dil_str = str([1, 1]) @@ -88,11 +96,11 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): > {{ conv0 = Conv< - dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_str}, + dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_0_str}, strides={stride_str} >(in0, param_c0_weight) out0 = Conv< - dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_str}, + dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_1_str}, strides={stride_str} >(conv0, param_c1_weight) }} @@ -154,9 +162,10 @@ def write_swg_config(sim): return write_swg_config +@pytest.mark.parametrize("pad_mode", ["VALID"]) @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_conv_dynamic(): +def test_fpgadataflow_conv_dynamic(pad_mode): idims = [32, 16] ifm = 4 k = 4 @@ -170,7 +179,7 @@ def test_fpgadataflow_conv_dynamic(): ishp = (1, ifm, idim, idim) np.random.seed(0) inp = gen_finn_dt_tensor(idt, ishp) - model = create_conv_model(idim, ifm, k, stride, ofm, idt, wdt) + model = create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode) _, _, int_dim, _ = model.get_tensor_shape("conv0") _, _, odim, _ = model.get_tensor_shape("out0") if idim == max(idims): @@ -192,6 +201,11 @@ def test_fpgadataflow_conv_dynamic(): parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] ) model = ModelWrapper(sdp_inst.get_nodeattr("model")) + assert len(model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")) == 2 + if pad_mode == "VALID": + assert len(model.get_nodes_by_op_type("FMPadding_rtl")) == 0 + else: + assert len(model.get_nodes_by_op_type("FMPadding_rtl")) == 2 for swg_node in model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl"): getCustomOp(swg_node).set_nodeattr("SIMD", 1) getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) From efede2beedc1828b89266b8b776e1441528cfc97 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 17:07:20 +0300 Subject: [PATCH 229/628] [Test] get dynamic conv test ready for dynamic FMPad --- ...dataflow_convinputgenerator_rtl_dynamic.py | 49 ++++++++++++++----- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 0bc1450d09..7d3e693405 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -154,8 +154,10 @@ def write_swg_config(sim): # 1. Write config registers to the SWG, dict defines (addr, value) tuples for config_entry in config.values(): axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) - # 2. Set cfg_valid flag (>= 1 cycle) - axilite_write(sim, 0, 1, basename=axi_name) + # 2. Set cfg_valid flag (>= 1 cycle) for SWGG + # TODO better interface names to separate SWGG and padding + if "s_axi_cfg" in axi_name: + axilite_write(sim, 0, 1, basename=axi_name) # 3. Reset component (>= 1 cycle) reset_rtlsim(sim) @@ -182,11 +184,14 @@ def test_fpgadataflow_conv_dynamic(pad_mode): model = create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode) _, _, int_dim, _ = model.get_tensor_shape("conv0") _, _, odim, _ = model.get_tensor_shape("out0") + pad0 = get_by_name(model.graph.node[0].attribute, "pads").ints + pad1 = get_by_name(model.graph.node[1].attribute, "pads").ints if idim == max(idims): # use largest model for hardware conversion largest_model = copy.deepcopy(model) golden = execute_onnx(model, {"in0": inp})["out0"] - exp_cfg = (idim, int_dim, odim, inp, golden) + print("pads: %s %s" % (str(pad0), str(pad1))) + exp_cfg = (idim, int_dim, odim, pad0, pad1, inp, golden) exp_cfgs.append(exp_cfg) # convert to hardware and prepare simulation @@ -206,14 +211,13 @@ def test_fpgadataflow_conv_dynamic(pad_mode): assert len(model.get_nodes_by_op_type("FMPadding_rtl")) == 0 else: assert len(model.get_nodes_by_op_type("FMPadding_rtl")) == 2 - for swg_node in model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl"): + dyn_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") + dyn_nodes += model.get_nodes_by_op_type("FMPadding_rtl") + for swg_node in dyn_nodes: getCustomOp(swg_node).set_nodeattr("SIMD", 1) getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) getCustomOp(swg_node).set_nodeattr("inFIFODepth", 16) getCustomOp(swg_node).set_nodeattr("outFIFODepth", 16) - print("SWG initial config:") - idim = getCustomOp(swg_node).get_nodeattr("IFMDim") - print(getCustomOp(swg_node).get_dynamic_config(idim)) model = model.transform(InsertFIFO()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) @@ -224,18 +228,39 @@ def test_fpgadataflow_conv_dynamic(pad_mode): # loop through experiment configurations for exp_cfg in exp_cfgs: - idim, int_dim, odim, inp, golden = exp_cfg + idim, int_dim, odim, pad0, pad1, inp, golden = exp_cfg + conv0_idim = idim + pad0[0] + pad0[2] + conv1_idim = int_dim + pad1[0] + pad1[2] # get config for the new dimensions swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") swg0 = getCustomOp(swg_nodes[0]) - update_tensor_dim(model, swg0.onnx_node.input[0], (idim, idim)) + update_tensor_dim(model, swg0.onnx_node.input[0], (conv0_idim, conv0_idim)) update_tensor_dim(model, swg0.onnx_node.output[0], (int_dim, int_dim)) - config0 = swg0.get_dynamic_config((idim, idim)) + config0 = swg0.get_dynamic_config((conv0_idim, conv0_idim)) swg1 = getCustomOp(swg_nodes[1]) - update_tensor_dim(model, swg1.onnx_node.input[0], (int_dim, int_dim)) + update_tensor_dim(model, swg1.onnx_node.input[0], (conv1_idim, conv1_idim)) update_tensor_dim(model, swg1.onnx_node.output[0], (odim, odim)) - config1 = swg1.get_dynamic_config((int_dim, int_dim)) + config1 = swg1.get_dynamic_config((conv1_idim, conv1_idim)) configs = [("s_axi_cfg_0_", config0), ("s_axi_cfg_1_", config1)] + if pad_mode != "VALID": + pad_nodes = model.get_nodes_by_op_type("FMPadding_rtl") + padder0 = getCustomOp(pad_nodes[0]) + update_tensor_dim(model, padder0.onnx_node.input[0], (idim, idim)) + update_tensor_dim( + model, padder0.onnx_node.output[0], (conv0_idim, conv0_idim) + ) + pad_config0 = padder0.get_dynamic_config((idim, idim), pad0) + padder1 = getCustomOp(pad_nodes[1]) + update_tensor_dim(model, padder1.onnx_node.input[0], (int_dim, int_dim)) + update_tensor_dim( + model, padder1.onnx_node.output[0], (conv1_idim, conv1_idim) + ) + pad_config1 = padder1.get_dynamic_config((int_dim, int_dim), pad1) + configs.append(("s_axilite_0_", pad_config0)) + configs.append(("s_axilite_1_", pad_config1)) + print("FMPadding_rtl configs") + print(pad_config0) + print(pad_config1) # adjust folded shapes for I/O FIFOs # (since rtlsim_exec uses folded shape info to fold global i/o tensors) first_node = getCustomOp(model.graph.node[0]) From cb0bf3cbec75701d9ec910279c9dd6369f15c27b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 17:38:58 +0300 Subject: [PATCH 230/628] [Pad] switch top-level template SystemVerilog->Verilog this keeps Vivado happy for interface inference for blocks --- .../hdl/{fmpadding_template.sv => fmpadding_template.v} | 0 src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 9 +++++---- 2 files changed, 5 insertions(+), 4 deletions(-) rename finn-rtllib/fmpadding/hdl/{fmpadding_template.sv => fmpadding_template.v} (100%) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.sv b/finn-rtllib/fmpadding/hdl/fmpadding_template.v similarity index 100% rename from finn-rtllib/fmpadding/hdl/fmpadding_template.sv rename to finn-rtllib/fmpadding/hdl/fmpadding_template.v diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index 0b15562604..0ca11c6be3 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -293,7 +293,7 @@ def get_dynamic_config(self, ifm_dims, pads): def generate_hdl(self): rtlsrc = os.environ["FINN_ROOT"] + "/finn-rtllib/fmpadding/hdl" - template_path = rtlsrc + "/fmpadding_template.sv" + template_path = rtlsrc + "/fmpadding_template.v" dims = self.get_nodeattr("ImgDim") pads = self.get_nodeattr("Padding") chans = self.get_nodeattr("NumChannels") @@ -313,7 +313,7 @@ def generate_hdl(self): template = template.replace(key, str(code_gen_dict[key_name])) with open( - os.path.join(code_gen_dir, self.get_verilog_top_module_name() + ".sv"), + os.path.join(code_gen_dir, self.get_verilog_top_module_name() + ".v"), "w", ) as f: f.write(template) @@ -341,7 +341,7 @@ def prepare_rtlsim(self): "fmpadding_axi.sv", "fmpadding.sv", "axi2we.sv", - self.get_nodeattr("gen_top_module") + ".sv", + self.get_nodeattr("gen_top_module") + ".v", ] # build the Verilator emu library @@ -363,7 +363,8 @@ def code_generation_ipi(self): sourcefiles = [ "fmpadding_axi.sv", "fmpadding.sv", - self.get_nodeattr("gen_top_module") + ".sv", + "axi2we.sv", + self.get_nodeattr("gen_top_module") + ".v", ] sourcefiles = [os.path.join(code_gen_dir, f) for f in sourcefiles] From e73d81af6aa5152b1408c3420620d0ae12f0181a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 17:40:15 +0300 Subject: [PATCH 231/628] [Test] cover cases with padding in dynamic conv tests --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 7d3e693405..d1b926e4c2 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -70,7 +70,7 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode): pad_1 = _auto_pad_to_explicit_padding( pad_mode, int_dim, int_dim, k, k, stride, stride, 2 ) - odim = compute_conv_output_dim(int_dim, k, stride) + odim = compute_conv_output_dim(int_dim, k, stride, total_pad=pad_1[0] + pad_1[2]) oshp = (1, ofm, odim, odim) wshp = (ofm, ifm, k, k) wshp_1 = (ofm, ofm, k, k) @@ -164,7 +164,7 @@ def write_swg_config(sim): return write_swg_config -@pytest.mark.parametrize("pad_mode", ["VALID"]) +@pytest.mark.parametrize("pad_mode", ["SAME_UPPER", "VALID"]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_conv_dynamic(pad_mode): From 1bfb12f1a5556c95c19c6e4db5598e045d7c4b20 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 20:34:54 +0300 Subject: [PATCH 232/628] [Pad] bugfix: use dynamic parameters for dynamic config gen --- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index 0ca11c6be3..df6f40ab6f 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -275,12 +275,10 @@ def get_dynamic_config(self, ifm_dims, pads): """Returns a configuration dict to re-configure FM dimension and padding amounts during runtime.""" - dims = self.get_nodeattr("ImgDim") - pads = self.get_nodeattr("Padding") chans = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") idt = self.get_input_datatype() - code_gen_dict = self.get_template_values(dims, pads, chans, simd, idt) + code_gen_dict = self.get_template_values(ifm_dims, pads, chans, simd, idt) config = { "XON": (0, (code_gen_dict["INIT_XON"])), "XOFF": (1, (code_gen_dict["INIT_XOFF"])), From 739026ca70621696fc78b27107a49ce1820b6928 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 16 Oct 2022 20:35:17 +0300 Subject: [PATCH 233/628] [Test] remove print/debug statements from dynamic conv test --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index d1b926e4c2..0db3c139fb 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -190,7 +190,6 @@ def test_fpgadataflow_conv_dynamic(pad_mode): # use largest model for hardware conversion largest_model = copy.deepcopy(model) golden = execute_onnx(model, {"in0": inp})["out0"] - print("pads: %s %s" % (str(pad0), str(pad1))) exp_cfg = (idim, int_dim, odim, pad0, pad1, inp, golden) exp_cfgs.append(exp_cfg) @@ -258,9 +257,6 @@ def test_fpgadataflow_conv_dynamic(pad_mode): pad_config1 = padder1.get_dynamic_config((int_dim, int_dim), pad1) configs.append(("s_axilite_0_", pad_config0)) configs.append(("s_axilite_1_", pad_config1)) - print("FMPadding_rtl configs") - print(pad_config0) - print(pad_config1) # adjust folded shapes for I/O FIFOs # (since rtlsim_exec uses folded shape info to fold global i/o tensors) first_node = getCustomOp(model.graph.node[0]) @@ -277,7 +273,7 @@ def test_fpgadataflow_conv_dynamic(pad_mode): last_node.set_nodeattr("folded_shape", last_node_shp) ctx = {"global_in": inp.transpose(0, 2, 3, 1)} liveness_prev = pyverilate_get_liveness_threshold_cycles() - os.environ["LIVENESS_THRESHOLD"] = "10000000" + os.environ["LIVENESS_THRESHOLD"] = "100000" rtlsim_exec(model, ctx, pre_hook=config_hook(configs)) os.environ["LIVENESS_THRESHOLD"] = str(liveness_prev) ret = ctx["global_out"].transpose(0, 3, 1, 2) From 0bfcc545b4f338eda084a7ee93c3de4d2c9b5821 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 17 Oct 2022 17:08:51 +0200 Subject: [PATCH 234/628] [Pad] bring FMPadding_rtl shape/dtype/stream fxn interfaces up to speed --- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index df6f40ab6f..d0302540bd 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -101,20 +101,20 @@ def get_exp_cycles(self): exp_cycles = (channels / simd) * batch_size * odim_h * odim_w return int(exp_cycles) - def get_normal_input_shape(self): + def get_normal_input_shape(self, ind=0): idim_h, idim_w = self.get_nodeattr("ImgDim") num_ch = self.get_nodeattr("NumChannels") ishape = (1, idim_h, idim_w, num_ch) return ishape - def get_normal_output_shape(self): + def get_normal_output_shape(self, ind=0): odim_h, odim_w = self.get_padded_odim() num_ch = self.get_nodeattr("NumChannels") oshape = (1, odim_h, odim_w, num_ch) return oshape - def get_folded_input_shape(self): + def get_folded_input_shape(self, ind=0): normal_ishape = list(self.get_normal_input_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -123,7 +123,7 @@ def get_folded_input_shape(self): folded_ishape = normal_ishape[:-1] + [fold, simd] return tuple(folded_ishape) - def get_folded_output_shape(self): + def get_folded_output_shape(self, ind=0): normal_oshape = list(self.get_normal_output_shape()) ifm_ch = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") @@ -155,7 +155,7 @@ def infer_node_datatype(self, model): def verify_node(self): pass - def get_input_datatype(self): + def get_input_datatype(self, ind=0): """Returns FINN DataType of input.""" ret = DataType[self.get_nodeattr("inputDataType")] # the hlslib op always pads with zeros, so ensure that the DataType @@ -163,16 +163,16 @@ def get_input_datatype(self): assert ret.allowed(0), "FMPadding_Batch DataType must support zero" return ret - def get_output_datatype(self): + def get_output_datatype(self, ind=0): """Returns FINN DataType of output. (Same as input datatype)""" return self.get_input_datatype() - def get_instream_width(self): + def get_instream_width(self, ind=0): ibits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return ibits * simd - def get_outstream_width(self): + def get_outstream_width(self, ind=0): obits = self.get_output_datatype().bitwidth() simd = self.get_nodeattr("SIMD") return obits * simd From 43a37155ae01519b4fe07c19b3b521310c1e6627 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 17 Oct 2022 17:57:37 +0200 Subject: [PATCH 235/628] [Pad] more changes to RTL inst template for Verilog compatibility --- .../fmpadding/hdl/fmpadding_template.v | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.v b/finn-rtllib/fmpadding/hdl/fmpadding_template.v index 66f235fc6a..c70dcbeaa6 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_template.v +++ b/finn-rtllib/fmpadding/hdl/fmpadding_template.v @@ -32,9 +32,9 @@ module $TOP_MODULE_NAME$( //- Global Control ------------------ (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) -input logic ap_clk, +input ap_clk, (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) -input logic ap_rst_n, +input ap_rst_n, //- AXI Lite ------------------------ // Writing @@ -62,14 +62,14 @@ output [31:0] s_axilite_RDATA, output [ 1:0] s_axilite_RRESP, //- AXI Stream - Input -------------- -output logic in0_V_TREADY, -input logic in0_V_TVALID, -input logic [$STREAM_BITS$-1:0] in0_V_TDATA, +output in0_V_TREADY, +input in0_V_TVALID, +input [$STREAM_BITS$-1:0] in0_V_TDATA, //- AXI Stream - Output ------------- -input logic out_V_TREADY, -output logic out_V_TVALID, -output logic [$STREAM_BITS$-1:0] out_V_TDATA +input out_V_TREADY, +output out_V_TVALID, +output [$STREAM_BITS$-1:0] out_V_TDATA ); @@ -90,23 +90,23 @@ $TOP_MODULE_NAME$_impl ( .ap_clk(ap_clk), .ap_rst_n(ap_rst_n), - .s_axilite_AWVALID, - .s_axilite_AWREADY, - .s_axilite_AWADDR, - .s_axilite_WVALID, - .s_axilite_WREADY, - .s_axilite_WDATA, - .s_axilite_WSTRB, - .s_axilite_BVALID, - .s_axilite_BREADY, - .s_axilite_BRESP, - .s_axilite_ARVALID, - .s_axilite_ARREADY, - .s_axilite_ARADDR, - .s_axilite_RVALID, - .s_axilite_RREADY, - .s_axilite_RDATA, - .s_axilite_RRESP, + .s_axilite_AWVALID(s_axilite_AWVALID), + .s_axilite_AWREADY(s_axilite_AWREADY), + .s_axilite_AWADDR(s_axilite_AWADDR), + .s_axilite_WVALID(s_axilite_WVALID), + .s_axilite_WREADY(s_axilite_WREADY), + .s_axilite_WDATA(s_axilite_WDATA), + .s_axilite_WSTRB(s_axilite_WSTRB), + .s_axilite_BVALIDs_axilite_BVALID(), + .s_axilite_BREADY(s_axilite_BREADY), + .s_axilite_BRESP(s_axilite_BRESP), + .s_axilite_ARVALID(s_axilite_ARVALID), + .s_axilite_ARREADY(s_axilite_ARREADY), + .s_axilite_ARADDR(s_axilite_ARADDR), + .s_axilite_RVALID(s_axilite_RVALID), + .s_axilite_RREADY(s_axilite_RREADY), + .s_axilite_RDATA(s_axilite_RDATA), + .s_axilite_RRESP(s_axilite_RRESP), .s_axis_tready(in0_V_TREADY), .s_axis_tvalid(in0_V_TVALID), .s_axis_tdata(in0_V_TDATA), From 835ba0fa874bdd77109e33a4cf43b49d726c30e8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 17 Oct 2022 19:05:53 +0200 Subject: [PATCH 236/628] [Pad] typo fix in template --- finn-rtllib/fmpadding/hdl/fmpadding_template.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.v b/finn-rtllib/fmpadding/hdl/fmpadding_template.v index c70dcbeaa6..25062a81cb 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_template.v +++ b/finn-rtllib/fmpadding/hdl/fmpadding_template.v @@ -97,7 +97,7 @@ $TOP_MODULE_NAME$_impl .s_axilite_WREADY(s_axilite_WREADY), .s_axilite_WDATA(s_axilite_WDATA), .s_axilite_WSTRB(s_axilite_WSTRB), - .s_axilite_BVALIDs_axilite_BVALID(), + .s_axilite_BVALID(s_axilite_BVALID), .s_axilite_BREADY(s_axilite_BREADY), .s_axilite_BRESP(s_axilite_BRESP), .s_axilite_ARVALID(s_axilite_ARVALID), From 6e91fcd6664a12fbb9447e8ac5eb62437484de7b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 19 Oct 2022 10:45:01 +0200 Subject: [PATCH 237/628] [SWGG] rename dynamic cfg interface to s_axilite --- .../swg/swg_template_wrapper_dynamic.v | 96 +++++++++---------- .../convolutioninputgenerator_rtl.py | 2 +- ...dataflow_convinputgenerator_rtl_dynamic.py | 23 +++-- 3 files changed, 63 insertions(+), 58 deletions(-) diff --git a/finn-rtllib/swg/swg_template_wrapper_dynamic.v b/finn-rtllib/swg/swg_template_wrapper_dynamic.v index d6f839de43..8d16dc10bb 100644 --- a/finn-rtllib/swg/swg_template_wrapper_dynamic.v +++ b/finn-rtllib/swg/swg_template_wrapper_dynamic.v @@ -14,13 +14,13 @@ module $TOP_MODULE_NAME$ #( parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN, parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT, - parameter integer C_s_axi_cfg_DATA_WIDTH = 32, - parameter integer C_s_axi_cfg_ADDR_WIDTH = 6 + parameter integer C_s_axilite_DATA_WIDTH = 32, + parameter integer C_s_axilite_ADDR_WIDTH = 6 ) ( - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) input ap_clk, - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) input ap_rst_n, input [BUF_IN_WIDTH-1:0] in0_V_TDATA, input in0_V_TVALID, @@ -29,28 +29,28 @@ module $TOP_MODULE_NAME$ #( output out_V_TVALID, input out_V_TREADY, - // Ports of Axi Slave Bus Interface s_axi_cfg - //input wire s_axi_cfg_aclk, - //input wire s_axi_cfg_aresetn, - input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_awaddr, - input wire [2 : 0] s_axi_cfg_awprot, - input wire s_axi_cfg_awvalid, - output wire s_axi_cfg_awready, - input wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_wdata, - input wire [(C_s_axi_cfg_DATA_WIDTH/8)-1 : 0] s_axi_cfg_wstrb, - input wire s_axi_cfg_wvalid, - output wire s_axi_cfg_wready, - output wire [1 : 0] s_axi_cfg_bresp, - output wire s_axi_cfg_bvalid, - input wire s_axi_cfg_bready, - input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_araddr, - input wire [2 : 0] s_axi_cfg_arprot, - input wire s_axi_cfg_arvalid, - output wire s_axi_cfg_arready, - output wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_rdata, - output wire [1 : 0] s_axi_cfg_rresp, - output wire s_axi_cfg_rvalid, - input wire s_axi_cfg_rready + // Ports of Axi Slave Bus Interface s_axilite + //input wire s_axilite_aclk, + //input wire s_axilite_aresetn, + input wire [C_s_axilite_ADDR_WIDTH-1 : 0] s_axilite_awaddr, + input wire [2 : 0] s_axilite_awprot, + input wire s_axilite_awvalid, + output wire s_axilite_awready, + input wire [C_s_axilite_DATA_WIDTH-1 : 0] s_axilite_wdata, + input wire [(C_s_axilite_DATA_WIDTH/8)-1 : 0] s_axilite_wstrb, + input wire s_axilite_wvalid, + output wire s_axilite_wready, + output wire [1 : 0] s_axilite_bresp, + output wire s_axilite_bvalid, + input wire s_axilite_bready, + input wire [C_s_axilite_ADDR_WIDTH-1 : 0] s_axilite_araddr, + input wire [2 : 0] s_axilite_arprot, + input wire s_axilite_arvalid, + output wire s_axilite_arready, + output wire [C_s_axilite_DATA_WIDTH-1 : 0] s_axilite_rdata, + output wire [1 : 0] s_axilite_rresp, + output wire s_axilite_rvalid, + input wire s_axilite_rready ); wire cfg_valid; @@ -70,32 +70,32 @@ wire [INCR_BITWIDTH-1:0] cfg_incr_tail_last; wire [31:0] cfg_last_read; wire [31:0] cfg_last_write; -// Instantiation of Axi Bus Interface s_axi_cfg +// Instantiation of Axi Bus Interface s_axilite $TOP_MODULE_NAME$_axilite # ( - .C_S_AXI_DATA_WIDTH(C_s_axi_cfg_DATA_WIDTH), - .C_S_AXI_ADDR_WIDTH(C_s_axi_cfg_ADDR_WIDTH) + .C_S_AXI_DATA_WIDTH(C_s_axilite_DATA_WIDTH), + .C_S_AXI_ADDR_WIDTH(C_s_axilite_ADDR_WIDTH) ) axilite_cfg_inst ( .S_AXI_ACLK(ap_clk), .S_AXI_ARESETN(ap_rst_n), - .S_AXI_AWADDR(s_axi_cfg_awaddr), - .S_AXI_AWPROT(s_axi_cfg_awprot), - .S_AXI_AWVALID(s_axi_cfg_awvalid), - .S_AXI_AWREADY(s_axi_cfg_awready), - .S_AXI_WDATA(s_axi_cfg_wdata), - .S_AXI_WSTRB(s_axi_cfg_wstrb), - .S_AXI_WVALID(s_axi_cfg_wvalid), - .S_AXI_WREADY(s_axi_cfg_wready), - .S_AXI_BRESP(s_axi_cfg_bresp), - .S_AXI_BVALID(s_axi_cfg_bvalid), - .S_AXI_BREADY(s_axi_cfg_bready), - .S_AXI_ARADDR(s_axi_cfg_araddr), - .S_AXI_ARPROT(s_axi_cfg_arprot), - .S_AXI_ARVALID(s_axi_cfg_arvalid), - .S_AXI_ARREADY(s_axi_cfg_arready), - .S_AXI_RDATA(s_axi_cfg_rdata), - .S_AXI_RRESP(s_axi_cfg_rresp), - .S_AXI_RVALID(s_axi_cfg_rvalid), - .S_AXI_RREADY(s_axi_cfg_rready), + .S_AXI_AWADDR(s_axilite_awaddr), + .S_AXI_AWPROT(s_axilite_awprot), + .S_AXI_AWVALID(s_axilite_awvalid), + .S_AXI_AWREADY(s_axilite_awready), + .S_AXI_WDATA(s_axilite_wdata), + .S_AXI_WSTRB(s_axilite_wstrb), + .S_AXI_WVALID(s_axilite_wvalid), + .S_AXI_WREADY(s_axilite_wready), + .S_AXI_BRESP(s_axilite_bresp), + .S_AXI_BVALID(s_axilite_bvalid), + .S_AXI_BREADY(s_axilite_bready), + .S_AXI_ARADDR(s_axilite_araddr), + .S_AXI_ARPROT(s_axilite_arprot), + .S_AXI_ARVALID(s_axilite_arvalid), + .S_AXI_ARREADY(s_axilite_arready), + .S_AXI_RDATA(s_axilite_rdata), + .S_AXI_RRESP(s_axilite_rresp), + .S_AXI_RVALID(s_axilite_rvalid), + .S_AXI_RREADY(s_axilite_rready), .cfg_reg0(cfg_valid), .cfg_reg1(cfg_cntr_simd), diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 665325bdee..77a3d18974 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -844,7 +844,7 @@ def get_verilog_top_module_intf_names(self): Each block must have at most one aximm and one axilite.""" intf_names = super().get_verilog_top_module_intf_names() if self.get_nodeattr("dynamic_mode"): - intf_names["axilite"] = ["s_axi_cfg"] + intf_names["axilite"] = ["s_axilite"] return intf_names def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 0db3c139fb..36204de359 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -155,8 +155,8 @@ def write_swg_config(sim): for config_entry in config.values(): axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) # 2. Set cfg_valid flag (>= 1 cycle) for SWGG - # TODO better interface names to separate SWGG and padding - if "s_axi_cfg" in axi_name: + # TODO direct add wren register to generated config? + if len(config) == 15: axilite_write(sim, 0, 1, basename=axi_name) # 3. Reset component (>= 1 cycle) reset_rtlsim(sim) @@ -215,8 +215,8 @@ def test_fpgadataflow_conv_dynamic(pad_mode): for swg_node in dyn_nodes: getCustomOp(swg_node).set_nodeattr("SIMD", 1) getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) - getCustomOp(swg_node).set_nodeattr("inFIFODepth", 16) - getCustomOp(swg_node).set_nodeattr("outFIFODepth", 16) + getCustomOp(swg_node).set_nodeattr("inFIFODepths", [16]) + getCustomOp(swg_node).set_nodeattr("outFIFODepths", [16]) model = model.transform(InsertFIFO()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) @@ -235,12 +235,11 @@ def test_fpgadataflow_conv_dynamic(pad_mode): swg0 = getCustomOp(swg_nodes[0]) update_tensor_dim(model, swg0.onnx_node.input[0], (conv0_idim, conv0_idim)) update_tensor_dim(model, swg0.onnx_node.output[0], (int_dim, int_dim)) - config0 = swg0.get_dynamic_config((conv0_idim, conv0_idim)) + swg_config0 = swg0.get_dynamic_config((conv0_idim, conv0_idim)) swg1 = getCustomOp(swg_nodes[1]) update_tensor_dim(model, swg1.onnx_node.input[0], (conv1_idim, conv1_idim)) update_tensor_dim(model, swg1.onnx_node.output[0], (odim, odim)) - config1 = swg1.get_dynamic_config((conv1_idim, conv1_idim)) - configs = [("s_axi_cfg_0_", config0), ("s_axi_cfg_1_", config1)] + swg_config1 = swg1.get_dynamic_config((conv1_idim, conv1_idim)) if pad_mode != "VALID": pad_nodes = model.get_nodes_by_op_type("FMPadding_rtl") padder0 = getCustomOp(pad_nodes[0]) @@ -255,8 +254,14 @@ def test_fpgadataflow_conv_dynamic(pad_mode): model, padder1.onnx_node.output[0], (conv1_idim, conv1_idim) ) pad_config1 = padder1.get_dynamic_config((int_dim, int_dim), pad1) - configs.append(("s_axilite_0_", pad_config0)) - configs.append(("s_axilite_1_", pad_config1)) + configs = [ + ("s_axilite_0_", pad_config0), + ("s_axilite_1_", swg_config0), + ("s_axilite_2_", pad_config1), + ("s_axilite_3_", swg_config1), + ] + else: + configs = [("s_axilite_0_", swg_config0), ("s_axilite_1_", swg_config1)] # adjust folded shapes for I/O FIFOs # (since rtlsim_exec uses folded shape info to fold global i/o tensors) first_node = getCustomOp(model.graph.node[0]) From 46257f31844fa99555a0038d49e3f97a42aac390 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 19 Oct 2022 10:47:07 +0200 Subject: [PATCH 238/628] [DynConv] make get_dynamic_config return defaults with None params --- .../custom_op/fpgadataflow/convolutioninputgenerator_rtl.py | 4 +++- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 77a3d18974..0a51c9e881 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -847,13 +847,15 @@ def get_verilog_top_module_intf_names(self): intf_names["axilite"] = ["s_axilite"] return intf_names - def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): + def get_dynamic_config(self, ifm_dim=None, stride=None, dilation=None): """Returns a configuration dict to re-configure FM dimension during runtime. Stride and dilation can also be changed. Certain restrictions apply (e.g. component must be synthesized for largest buffer size).""" # NOTE: For better driver integration, this functionality could be packaged # as a standalone function in the future + if ifm_dim is None: + ifm_dim = self.get_nodeattr("IFMDim") k = self.get_nodeattr("ConvKernelDim") if stride is None: stride = self.get_nodeattr("Stride") diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index d0302540bd..a85c765a00 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -271,10 +271,14 @@ def get_template_values(self, ifm_dims, pads, chans, simd, idt): } return code_gen_dict - def get_dynamic_config(self, ifm_dims, pads): + def get_dynamic_config(self, ifm_dims=None, pads=None): """Returns a configuration dict to re-configure FM dimension and padding amounts during runtime.""" + if ifm_dims is None: + ifm_dims = self.get_nodeattr("ImgDim") + if pads is None: + pads = self.get_nodeattr("Padding") chans = self.get_nodeattr("NumChannels") simd = self.get_nodeattr("SIMD") idt = self.get_input_datatype() From 79d6414823ea13a1e299da73a232d9f37a588da9 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 19 Oct 2022 11:32:37 +0200 Subject: [PATCH 239/628] [Pad] use contiguous, 4b-aligned config addrs for RTL variant --- finn-rtllib/fmpadding/hdl/axi2we.sv | 2 +- finn-rtllib/fmpadding/hdl/fmpadding.sv | 15 +++++++-------- finn-rtllib/fmpadding/hdl/fmpadding_axi.sv | 8 ++++---- finn-rtllib/fmpadding/hdl/fmpadding_template.v | 4 ++-- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 12 ++++++------ 5 files changed, 20 insertions(+), 21 deletions(-) diff --git a/finn-rtllib/fmpadding/hdl/axi2we.sv b/finn-rtllib/fmpadding/hdl/axi2we.sv index 0740eac5f8..842ba3632c 100644 --- a/finn-rtllib/fmpadding/hdl/axi2we.sv +++ b/finn-rtllib/fmpadding/hdl/axi2we.sv @@ -57,7 +57,7 @@ module axi2we #( // Reading tied to all-ones input s_axilite_ARVALID, output s_axilite_ARREADY, - input [3:0] s_axilite_ARADDR, + input [ADDR_BITS-1:0] s_axilite_ARADDR, output s_axilite_RVALID, input s_axilite_RREADY, diff --git a/finn-rtllib/fmpadding/hdl/fmpadding.sv b/finn-rtllib/fmpadding/hdl/fmpadding.sv index 08bcf9043b..904c7c381f 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding.sv @@ -53,7 +53,7 @@ module fmpadding #( // Parameter Configuration ---------- input logic we, - input logic [ 2:0] wa, + input logic [ 4:0] wa, input logic [31:0] wd, //- AXI Stream - Input -------------- @@ -125,13 +125,12 @@ module fmpadding #( always_ff @(posedge clk) begin if(we) begin unique case(wa) - 0: XOn <= wd; - 1: XOff <= wd; - 2: XEnd <= wd; - - 4: YOn <= wd; - 5: YOff <= wd; - 6: YEnd <= wd; + 0*4: XOn <= wd; + 1*4: XOff <= wd; + 2*4: XEnd <= wd; + 3*4: YOn <= wd; + 4*4: YOff <= wd; + 5*4: YEnd <= wd; default: assert(0) else begin $error("Illegal write address."); diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv index c2d4fd2e79..5948341d00 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv +++ b/finn-rtllib/fmpadding/hdl/fmpadding_axi.sv @@ -55,7 +55,7 @@ module fmpadding_axi #( // Writing input s_axilite_AWVALID, output s_axilite_AWREADY, - input [2:0] s_axilite_AWADDR, + input [4:0] s_axilite_AWADDR, input s_axilite_WVALID, output s_axilite_WREADY, @@ -69,7 +69,7 @@ module fmpadding_axi #( // Reading input s_axilite_ARVALID, output s_axilite_ARREADY, - input [3:0] s_axilite_ARADDR, + input [4:0] s_axilite_ARADDR, output s_axilite_RVALID, input s_axilite_RREADY, @@ -89,9 +89,9 @@ module fmpadding_axi #( // AXI-Lite Adapter uwire we; - uwire [ 2:0] wa; + uwire [ 4:0] wa; uwire [31:0] wd; - axi2we #(.ADDR_BITS(3)) axilight_adapter ( + axi2we #(.ADDR_BITS(5)) axilight_adapter ( .ap_clk, .ap_rst_n, .s_axilite_AWVALID, .s_axilite_AWREADY, .s_axilite_AWADDR, diff --git a/finn-rtllib/fmpadding/hdl/fmpadding_template.v b/finn-rtllib/fmpadding/hdl/fmpadding_template.v index 25062a81cb..0b0f40f86a 100644 --- a/finn-rtllib/fmpadding/hdl/fmpadding_template.v +++ b/finn-rtllib/fmpadding/hdl/fmpadding_template.v @@ -40,7 +40,7 @@ input ap_rst_n, // Writing input s_axilite_AWVALID, output s_axilite_AWREADY, -input [2:0] s_axilite_AWADDR, +input [4:0] s_axilite_AWADDR, input s_axilite_WVALID, output s_axilite_WREADY, @@ -54,7 +54,7 @@ output [1:0] s_axilite_BRESP, // Reading input s_axilite_ARVALID, output s_axilite_ARREADY, -input [3:0] s_axilite_ARADDR, +input [4:0] s_axilite_ARADDR, output s_axilite_RVALID, input s_axilite_RREADY, diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index a85c765a00..c47f9d52a2 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -284,12 +284,12 @@ def get_dynamic_config(self, ifm_dims=None, pads=None): idt = self.get_input_datatype() code_gen_dict = self.get_template_values(ifm_dims, pads, chans, simd, idt) config = { - "XON": (0, (code_gen_dict["INIT_XON"])), - "XOFF": (1, (code_gen_dict["INIT_XOFF"])), - "XEND": (2, (code_gen_dict["INIT_XEND"])), - "YON": (4, (code_gen_dict["INIT_YON"])), - "YOFF": (5, (code_gen_dict["INIT_YOFF"])), - "YEND": (6, (code_gen_dict["INIT_YEND"])), + "XON": (0 * 4, (code_gen_dict["INIT_XON"])), + "XOFF": (1 * 4, (code_gen_dict["INIT_XOFF"])), + "XEND": (2 * 4, (code_gen_dict["INIT_XEND"])), + "YON": (3 * 4, (code_gen_dict["INIT_YON"])), + "YOFF": (4 * 4, (code_gen_dict["INIT_YOFF"])), + "YEND": (5 * 4, (code_gen_dict["INIT_YEND"])), } return config From a2de701327b1efa07094da560de8f8467e228541 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 19 Oct 2022 13:50:57 +0200 Subject: [PATCH 240/628] [SWG] generate wren as part of config reg generation --- .../fpgadataflow/convolutioninputgenerator_rtl.py | 1 + .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 8 ++------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 0a51c9e881..ecc502b132 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -885,6 +885,7 @@ def get_dynamic_config(self, ifm_dim=None, stride=None, dilation=None): # each setting is mapped to an axi-lite register address template_path, code_gen_dict = self.prepare_codegen_default() config = { + "cfg_wren": (0 * 4, 1), "cfg_cntr_simd": (1 * 4, int(code_gen_dict["$LOOP_SIMD_ITERATIONS$"][0])), "cfg_cntr_kw": (2 * 4, int(code_gen_dict["$LOOP_KW_ITERATIONS$"][0])), "cfg_cntr_kh": (3 * 4, int(code_gen_dict["$LOOP_KH_ITERATIONS$"][0])), diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 36204de359..9ca19c6c59 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -151,14 +151,10 @@ def config_hook(configs): def write_swg_config(sim): for axi_name, config in configs: - # 1. Write config registers to the SWG, dict defines (addr, value) tuples + # Write config registers to the SWG/FMPadding dict + # defines (addr, value) tuples for config_entry in config.values(): axilite_write(sim, config_entry[0], config_entry[1], basename=axi_name) - # 2. Set cfg_valid flag (>= 1 cycle) for SWGG - # TODO direct add wren register to generated config? - if len(config) == 15: - axilite_write(sim, 0, 1, basename=axi_name) - # 3. Reset component (>= 1 cycle) reset_rtlsim(sim) return write_swg_config From 3fd3503c8defa5f25829b32566469a649b161495 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 20 Oct 2022 12:20:29 +0200 Subject: [PATCH 241/628] [Test] extend test_fpgadataflow_conv_dynamic coverage --- ...dataflow_convinputgenerator_rtl_dynamic.py | 59 ++++++++++++++----- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 9ca19c6c59..3f8743062e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -62,8 +62,10 @@ from finn.util.basic import pyverilate_get_liveness_threshold_cycles -def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode): +def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise): np.random.seed(0) + group = ifm if depthwise else 1 + group_str = str(group) ishp = (1, ifm, idim, idim) pad_0 = _auto_pad_to_explicit_padding(pad_mode, idim, idim, k, k, stride, stride, 2) int_dim = compute_conv_output_dim(idim, k, stride, total_pad=pad_0[0] + pad_0[2]) @@ -71,9 +73,9 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode): pad_mode, int_dim, int_dim, k, k, stride, stride, 2 ) odim = compute_conv_output_dim(int_dim, k, stride, total_pad=pad_1[0] + pad_1[2]) - oshp = (1, ofm, odim, odim) - wshp = (ofm, ifm, k, k) - wshp_1 = (ofm, ofm, k, k) + oshp = (1, ifm, odim, odim) if depthwise else (1, ofm, odim, odim) + wshp = (ifm, 1, k, k) if depthwise else (ofm, ifm, k, k) + wshp_1 = (ifm, 1, k, k) if depthwise else (ofm, ofm, k, k) ishp_str = str(list(ishp)) oshp_str = str(list(oshp)) wshp_str = str(list(wshp)) @@ -96,11 +98,11 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode): > {{ conv0 = Conv< - dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_0_str}, + dilations={dil_str},group={group_str},kernel_shape={kshp_str},pads={pad_0_str}, strides={stride_str} >(in0, param_c0_weight) out0 = Conv< - dilations={dil_str},group=1,kernel_shape={kshp_str},pads={pad_1_str}, + dilations={dil_str},group={group_str},kernel_shape={kshp_str},pads={pad_1_str}, strides={stride_str} >(conv0, param_c1_weight) }} @@ -160,15 +162,37 @@ def write_swg_config(sim): return write_swg_config -@pytest.mark.parametrize("pad_mode", ["SAME_UPPER", "VALID"]) +cfg0 = { + "idims": [32, 16], + "ifm": 4, + "k": 4, + "stride": 1, + "ofm": 8, + "depthwise": False, + "pad_mode": "SAME_UPPER", +} +cfg1 = { + "idims": [128, 4], + "ifm": 64, + "k": 3, + "stride": 1, + "ofm": 64, + "depthwise": True, + "pad_mode": "SAME_UPPER", +} + + +@pytest.mark.parametrize("cfg", [cfg0, cfg1]) @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_conv_dynamic(pad_mode): - idims = [32, 16] - ifm = 4 - k = 4 - stride = 1 - ofm = 8 +def test_fpgadataflow_conv_dynamic(cfg): + pad_mode = cfg["pad_mode"] + depthwise = cfg["depthwise"] + idims = cfg["idims"] + ifm = cfg["ifm"] + k = cfg["k"] + stride = cfg["stride"] + ofm = cfg["ofm"] idt = DataType["UINT8"] wdt = DataType["INT2"] exp_cfgs = [] @@ -177,7 +201,9 @@ def test_fpgadataflow_conv_dynamic(pad_mode): ishp = (1, ifm, idim, idim) np.random.seed(0) inp = gen_finn_dt_tensor(idt, ishp) - model = create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode) + model = create_conv_model( + idim, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise + ) _, _, int_dim, _ = model.get_tensor_shape("conv0") _, _, odim, _ = model.get_tensor_shape("out0") pad0 = get_by_name(model.graph.node[0].attribute, "pads").ints @@ -195,6 +221,7 @@ def test_fpgadataflow_conv_dynamic(pad_mode): model = model.transform( to_hls.InferQuantizedMatrixVectorActivation(mem_mode="decoupled") ) + model = model.transform(to_hls.InferVectorVectorActivation()) model = model.transform(absorb.AbsorbConsecutiveTransposes()) parent_model = model.transform(CreateDataflowPartition()) sdp_inst = getCustomOp( @@ -213,6 +240,10 @@ def test_fpgadataflow_conv_dynamic(pad_mode): getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) getCustomOp(swg_node).set_nodeattr("inFIFODepths", [16]) getCustomOp(swg_node).set_nodeattr("outFIFODepths", [16]) + comp_nodes = model.get_nodes_by_op_type("MatrixVectorActivation") + comp_nodes += model.get_nodes_by_op_type("VectorVectorActivation") + for comp_node in comp_nodes: + getCustomOp(comp_node).set_nodeattr("PE", 1) model = model.transform(InsertFIFO()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) From 17051adb86eb3eb5c2e51b13375dc2f6282c47ab Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 20 Oct 2022 23:36:43 +0200 Subject: [PATCH 242/628] [Test] use higher PE config for dyn conv tests --- ...fpgadataflow_convinputgenerator_rtl_dynamic.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 3f8743062e..d7085e8491 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -57,6 +57,7 @@ ) from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.util.basic import pyverilate_get_liveness_threshold_cycles @@ -152,6 +153,7 @@ def config_hook(configs): return None def write_swg_config(sim): + reset_rtlsim(sim) for axi_name, config in configs: # Write config registers to the SWG/FMPadding dict # defines (addr, value) tuples @@ -193,7 +195,7 @@ def test_fpgadataflow_conv_dynamic(cfg): k = cfg["k"] stride = cfg["stride"] ofm = cfg["ofm"] - idt = DataType["UINT8"] + idt = DataType["UINT4"] wdt = DataType["INT2"] exp_cfgs = [] largest_model = None @@ -236,14 +238,19 @@ def test_fpgadataflow_conv_dynamic(cfg): dyn_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") dyn_nodes += model.get_nodes_by_op_type("FMPadding_rtl") for swg_node in dyn_nodes: - getCustomOp(swg_node).set_nodeattr("SIMD", 1) + getCustomOp(swg_node).set_nodeattr("SIMD", 4) getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) getCustomOp(swg_node).set_nodeattr("inFIFODepths", [16]) getCustomOp(swg_node).set_nodeattr("outFIFODepths", [16]) comp_nodes = model.get_nodes_by_op_type("MatrixVectorActivation") comp_nodes += model.get_nodes_by_op_type("VectorVectorActivation") for comp_node in comp_nodes: - getCustomOp(comp_node).set_nodeattr("PE", 1) + if depthwise: + getCustomOp(comp_node).set_nodeattr("PE", 4) + else: + getCustomOp(comp_node).set_nodeattr("SIMD", 4) + getCustomOp(comp_node).set_nodeattr("PE", 4) + model = model.transform(InsertDWC()) model = model.transform(InsertFIFO()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) @@ -306,6 +313,8 @@ def test_fpgadataflow_conv_dynamic(cfg): ctx = {"global_in": inp.transpose(0, 2, 3, 1)} liveness_prev = pyverilate_get_liveness_threshold_cycles() os.environ["LIVENESS_THRESHOLD"] = "100000" + # model.set_metadata_prop("rtlsim_trace", "trace_%d.vcd" % idim) + # import pdb; pdb.set_trace() rtlsim_exec(model, ctx, pre_hook=config_hook(configs)) os.environ["LIVENESS_THRESHOLD"] = str(liveness_prev) ret = ctx["global_out"].transpose(0, 3, 1, 2) From c7c15b7c9910c71a2bc5c147bfd55c30da649c10 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 21 Oct 2022 16:36:28 +0100 Subject: [PATCH 243/628] [Transform] Extend inFIFODepths for extw nodes --- src/finn/transformation/fpgadataflow/insert_iodma.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/insert_iodma.py b/src/finn/transformation/fpgadataflow/insert_iodma.py index 4b4eb6362f..28bcd9598a 100644 --- a/src/finn/transformation/fpgadataflow/insert_iodma.py +++ b/src/finn/transformation/fpgadataflow/insert_iodma.py @@ -211,7 +211,8 @@ def apply(self, model): # attached IODMA fc_extw_nodes = list( filter( - lambda x: x.op_type == "MatrixVectorActivation" + lambda x: x.op_type + in ["MatrixVectorActivation", "VectorVectorActivation"] and getCustomOp(x).get_nodeattr("mem_mode") == "external" and model.find_producer(x.input[1]) is None, all_nodes, @@ -259,6 +260,10 @@ def apply(self, model): ) fc_node.input[1] = fc_node_in.name model.graph.node.insert(0, dma_node) + # expand inFIFODepths for new second input of node + infifo_depth = fc_inst.get_nodeattr("inFIFODepths") + infifo_depth.append(8) + fc_inst.set_nodeattr("inFIFODepths", infifo_depth) modified = True if modified: model = model.transform(SortGraph()) From 83550c6470ffdf1915c16fd8fa5b7483374c2425 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Mon, 24 Oct 2022 14:24:38 +0200 Subject: [PATCH 244/628] [SWGG] incorporate Yaman's changes + small fixes --- finn-rtllib/swg/swg_template_default.sv | 36 ++++--- .../swg/swg_template_default_dynamic.sv | 73 +++++++------- .../swg/swg_template_wrapper_dynamic.v | 94 +++++++++---------- .../convolutioninputgenerator_rtl.py | 61 +++++------- ...dataflow_convinputgenerator_rtl_dynamic.py | 15 +-- 5 files changed, 132 insertions(+), 147 deletions(-) diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 15f1bd75db..f944b955fb 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -36,7 +36,6 @@ module $TOP_MODULE_NAME$_controller #( int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, - bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$, bit IS_DEPTHWISE = $IS_DEPTHWISE$ )( @@ -66,7 +65,18 @@ module $TOP_MODULE_NAME$_controller #( logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; - assign addr_incr = ADDR_INCREMENT_MAP[State]; + // combinational logic for addr_incr generation + always_comb begin : blkHead + case (State) + 0 : addr_incr = 0; + 1 : addr_incr = $HEAD_INCR_SIMD$; + 2 : addr_incr = $HEAD_INCR_KW$; + 3 : addr_incr = $HEAD_INCR_KH$; + 4 : addr_incr = $HEAD_INCR_W$; + 5 : addr_incr = $HEAD_INCR_H$; + default: addr_incr = 0; + endcase + end // combinational logic for tail_incr generation uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; @@ -139,7 +149,6 @@ module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( int unsigned DEPTH )( input logic clk, - input logic rst_n, input logic write_enable, input logic [$clog2(DEPTH)-1:0] write_addr, @@ -182,7 +191,7 @@ module $TOP_MODULE_NAME$_impl #( input logic out_V_V_TREADY, output logic [BIT_WIDTH * SIMD * MMV_OUT-1:0] out_V_V_TDATA ); - // derived Constants + // derived constants localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; @@ -199,7 +208,6 @@ module $TOP_MODULE_NAME$_impl #( .DEPTH(BUF_ELEM_TOTAL) ) window_buffer_inst ( .clk(ap_clk), - .rst_n(ap_rst_n), .write_enable(window_buffer_write_enable), .write_addr(window_buffer_write_addr), @@ -234,6 +242,15 @@ module $TOP_MODULE_NAME$_impl #( logic [$clog2(BUF_ELEM_TOTAL)-1:0] Window_buffer_write_addr_reg = 0; // Control signals/registers + logic Write_cmd = 0; + logic Writing_done = 0; + uwire write_ok = Write_cmd && out_V_V_TREADY; + uwire write_blocked = Write_cmd && !out_V_V_TREADY; + + logic Fetching_done = 0; + uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; + + uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; uwire read_cmd = !reading_done && ( // if there is still an input element left to read Fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) @@ -242,15 +259,6 @@ module $TOP_MODULE_NAME$_impl #( ) // (over-)write to buffer if oldest buffered element will no longer be needed ); uwire read_ok = read_cmd && in0_V_V_TVALID; - uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; - - uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; - logic Fetching_done = 0; - - logic Write_cmd = 0; - logic Writing_done = 0; - uwire write_ok = Write_cmd && out_V_V_TREADY; - uwire write_blocked = Write_cmd && !out_V_V_TREADY;; //assign buffer control assign window_buffer_write_addr = Window_buffer_write_addr_reg; diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv index 96bd8cc591..c98985a28e 100644 --- a/finn-rtllib/swg/swg_template_default_dynamic.sv +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -1,15 +1,7 @@ module $TOP_MODULE_NAME$_controller #( - int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, - int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, - int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, - int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, - int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, - int unsigned CNTR_BITWIDTH, int unsigned INCR_BITWIDTH, - bit [INCR_BITWIDTH-1:0] ADDR_INCREMENT_MAP[6] = $ADDR_INCREMENT_MAP$, - bit IS_DEPTHWISE = $IS_DEPTHWISE$ )( input logic clk, @@ -36,16 +28,16 @@ module $TOP_MODULE_NAME$_controller #( ); // (dynamic) configuration registers - logic [CNTR_BITWIDTH-1:0] Cfg_cntr_simd = LOOP_SIMD_ITERATIONS; - logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kw = LOOP_KW_ITERATIONS; - logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kh = LOOP_KH_ITERATIONS; - logic [CNTR_BITWIDTH-1:0] Cfg_cntr_w = LOOP_W_ITERATIONS; - logic [CNTR_BITWIDTH-1:0] Cfg_cntr_h = LOOP_H_ITERATIONS; - logic [INCR_BITWIDTH-1:0] Cfg_incr_head_simd = ADDR_INCREMENT_MAP[1]; - logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kw = ADDR_INCREMENT_MAP[2]; - logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kh = ADDR_INCREMENT_MAP[3]; - logic [INCR_BITWIDTH-1:0] Cfg_incr_head_w = ADDR_INCREMENT_MAP[4]; - logic [INCR_BITWIDTH-1:0] Cfg_incr_head_h = ADDR_INCREMENT_MAP[5]; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_simd = $LOOP_SIMD_ITERATIONS$; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kw = $LOOP_KW_ITERATIONS$; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kh = $LOOP_KH_ITERATIONS$; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_w = $LOOP_W_ITERATIONS$; + logic [CNTR_BITWIDTH-1:0] Cfg_cntr_h = $LOOP_H_ITERATIONS$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_simd = $HEAD_INCR_SIMD$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kw = $HEAD_INCR_KW$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_kh = $HEAD_INCR_KH$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_w = $HEAD_INCR_W$; + logic [INCR_BITWIDTH-1:0] Cfg_incr_head_h = $HEAD_INCR_H$; logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_w = $TAIL_INCR_W$; logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_h = $TAIL_INCR_H$; logic [INCR_BITWIDTH-1:0] Cfg_incr_tail_last = $TAIL_INCR_LAST$; @@ -81,13 +73,13 @@ module $TOP_MODULE_NAME$_controller #( state_e State = $INNERMOST_STATE$; state_e state_next; - logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; - logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; - logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; - logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; - logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; + logic signed [$clog2($LOOP_H_ITERATIONS$ +2)+1-1:0] Counter_loop_h = $LOOP_H_ITERATIONS$; + logic signed [$clog2($LOOP_W_ITERATIONS$ +2)+1-1:0] Counter_loop_w = $LOOP_W_ITERATIONS$; + logic signed [$clog2($LOOP_KH_ITERATIONS$ +2)+1-1:0] Counter_loop_kh = $LOOP_KH_ITERATIONS$; + logic signed [$clog2($LOOP_KW_ITERATIONS$ +2)+1-1:0] Counter_loop_kw = $LOOP_KW_ITERATIONS$; + logic signed [$clog2($LOOP_SIMD_ITERATIONS$+2)+1-1:0] Counter_loop_simd = $LOOP_SIMD_ITERATIONS$; - //assign addr_incr = ADDR_INCREMENT_MAP[State]; + // combinational logic for addr_incr generation always_comb begin : blkHead case (State) 0 : addr_incr = 0; @@ -96,6 +88,7 @@ module $TOP_MODULE_NAME$_controller #( 3 : addr_incr = Cfg_incr_head_kh; 4 : addr_incr = Cfg_incr_head_w; 5 : addr_incr = Cfg_incr_head_h; + default: addr_incr = 0; endcase end @@ -170,7 +163,6 @@ module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( int unsigned DEPTH )( input logic clk, - input logic rst_n, input logic write_enable, input logic [$clog2(DEPTH)-1:0] write_addr, @@ -196,13 +188,13 @@ module $TOP_MODULE_NAME$_impl #( int SIMD, int MMV_IN, int MMV_OUT, + int unsigned CNTR_BITWIDTH, + int unsigned INCR_BITWIDTH, + int LAST_READ_ELEM = $LAST_READ_ELEM$, int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, - int ELEM_PER_WINDOW = $ELEM_PER_WINDOW$, - - int unsigned CNTR_BITWIDTH, - int unsigned INCR_BITWIDTH + int ELEM_PER_WINDOW = $ELEM_PER_WINDOW$ )( input logic ap_clk, input logic ap_rst_n, @@ -229,10 +221,10 @@ module $TOP_MODULE_NAME$_impl #( input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_w, input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_h, input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_last, - input logic [31:0] cfg_last_read, //todo: reduce bitwidth to $clog2(LAST_READ_ELEM+1) + input logic [31:0] cfg_last_read, input logic [31:0] cfg_last_write ); - // derived Constants + // derived constants localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; @@ -261,7 +253,6 @@ module $TOP_MODULE_NAME$_impl #( .DEPTH(BUF_ELEM_TOTAL) ) window_buffer_inst ( .clk(ap_clk), - .rst_n(ap_rst_n), .write_enable(window_buffer_write_enable), .write_addr(window_buffer_write_addr), @@ -314,6 +305,15 @@ module $TOP_MODULE_NAME$_impl #( logic [$clog2(BUF_ELEM_TOTAL)-1:0] Window_buffer_write_addr_reg = 0; // Control signals/registers + logic Write_cmd = 0; + logic Writing_done = 0; + uwire write_ok = Write_cmd && out_V_V_TREADY; + uwire write_blocked = Write_cmd && !out_V_V_TREADY; + + logic Fetching_done = 0; + uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; + + uwire reading_done = Newest_buffered_elem == Cfg_last_read; uwire read_cmd = !reading_done && ( // if there is still an input element left to read Fetching_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) @@ -322,15 +322,6 @@ module $TOP_MODULE_NAME$_impl #( ) // (over-)write to buffer if oldest buffered element will no longer be needed ); uwire read_ok = read_cmd && in0_V_V_TVALID; - uwire reading_done = Newest_buffered_elem == Cfg_last_read; - - uwire fetch_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !write_blocked && !Fetching_done; - logic Fetching_done = 0; - - logic Write_cmd = 0; - logic Writing_done = 0; - uwire write_ok = Write_cmd && out_V_V_TREADY; - uwire write_blocked = Write_cmd && !out_V_V_TREADY;; //assign buffer control assign window_buffer_write_addr = Window_buffer_write_addr_reg; diff --git a/finn-rtllib/swg/swg_template_wrapper_dynamic.v b/finn-rtllib/swg/swg_template_wrapper_dynamic.v index d6f839de43..ca870ace11 100644 --- a/finn-rtllib/swg/swg_template_wrapper_dynamic.v +++ b/finn-rtllib/swg/swg_template_wrapper_dynamic.v @@ -14,13 +14,13 @@ module $TOP_MODULE_NAME$ #( parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN, parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT, - parameter integer C_s_axi_cfg_DATA_WIDTH = 32, - parameter integer C_s_axi_cfg_ADDR_WIDTH = 6 + parameter integer C_s_axilite_DATA_WIDTH = 32, + parameter integer C_s_axilite_ADDR_WIDTH = 6 ) ( - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) input ap_clk, - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axi_cfg" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) input ap_rst_n, input [BUF_IN_WIDTH-1:0] in0_V_TDATA, input in0_V_TVALID, @@ -29,28 +29,26 @@ module $TOP_MODULE_NAME$ #( output out_V_TVALID, input out_V_TREADY, - // Ports of Axi Slave Bus Interface s_axi_cfg - //input wire s_axi_cfg_aclk, - //input wire s_axi_cfg_aresetn, - input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_awaddr, - input wire [2 : 0] s_axi_cfg_awprot, - input wire s_axi_cfg_awvalid, - output wire s_axi_cfg_awready, - input wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_wdata, - input wire [(C_s_axi_cfg_DATA_WIDTH/8)-1 : 0] s_axi_cfg_wstrb, - input wire s_axi_cfg_wvalid, - output wire s_axi_cfg_wready, - output wire [1 : 0] s_axi_cfg_bresp, - output wire s_axi_cfg_bvalid, - input wire s_axi_cfg_bready, - input wire [C_s_axi_cfg_ADDR_WIDTH-1 : 0] s_axi_cfg_araddr, - input wire [2 : 0] s_axi_cfg_arprot, - input wire s_axi_cfg_arvalid, - output wire s_axi_cfg_arready, - output wire [C_s_axi_cfg_DATA_WIDTH-1 : 0] s_axi_cfg_rdata, - output wire [1 : 0] s_axi_cfg_rresp, - output wire s_axi_cfg_rvalid, - input wire s_axi_cfg_rready + // Ports of Axi Slave Bus Interface s_axilite + input [C_s_axilite_ADDR_WIDTH-1 : 0] s_axilite_awaddr, + input [2 : 0] s_axilite_awprot, + input s_axilite_awvalid, + output s_axilite_awready, + input [C_s_axilite_DATA_WIDTH-1 : 0] s_axilite_wdata, + input [(C_s_axilite_DATA_WIDTH/8)-1 : 0] s_axilite_wstrb, + input s_axilite_wvalid, + output s_axilite_wready, + output [1 : 0] s_axilite_bresp, + output s_axilite_bvalid, + input s_axilite_bready, + input [C_s_axilite_ADDR_WIDTH-1 : 0] s_axilite_araddr, + input [2 : 0] s_axilite_arprot, + input s_axilite_arvalid, + output s_axilite_arready, + output [C_s_axilite_DATA_WIDTH-1 : 0] s_axilite_rdata, + output [1 : 0] s_axilite_rresp, + output s_axilite_rvalid, + input s_axilite_rready ); wire cfg_valid; @@ -70,32 +68,32 @@ wire [INCR_BITWIDTH-1:0] cfg_incr_tail_last; wire [31:0] cfg_last_read; wire [31:0] cfg_last_write; -// Instantiation of Axi Bus Interface s_axi_cfg +// Instantiation of Axi Bus Interface s_axilite $TOP_MODULE_NAME$_axilite # ( - .C_S_AXI_DATA_WIDTH(C_s_axi_cfg_DATA_WIDTH), - .C_S_AXI_ADDR_WIDTH(C_s_axi_cfg_ADDR_WIDTH) + .C_S_AXI_DATA_WIDTH(C_s_axilite_DATA_WIDTH), + .C_S_AXI_ADDR_WIDTH(C_s_axilite_ADDR_WIDTH) ) axilite_cfg_inst ( .S_AXI_ACLK(ap_clk), .S_AXI_ARESETN(ap_rst_n), - .S_AXI_AWADDR(s_axi_cfg_awaddr), - .S_AXI_AWPROT(s_axi_cfg_awprot), - .S_AXI_AWVALID(s_axi_cfg_awvalid), - .S_AXI_AWREADY(s_axi_cfg_awready), - .S_AXI_WDATA(s_axi_cfg_wdata), - .S_AXI_WSTRB(s_axi_cfg_wstrb), - .S_AXI_WVALID(s_axi_cfg_wvalid), - .S_AXI_WREADY(s_axi_cfg_wready), - .S_AXI_BRESP(s_axi_cfg_bresp), - .S_AXI_BVALID(s_axi_cfg_bvalid), - .S_AXI_BREADY(s_axi_cfg_bready), - .S_AXI_ARADDR(s_axi_cfg_araddr), - .S_AXI_ARPROT(s_axi_cfg_arprot), - .S_AXI_ARVALID(s_axi_cfg_arvalid), - .S_AXI_ARREADY(s_axi_cfg_arready), - .S_AXI_RDATA(s_axi_cfg_rdata), - .S_AXI_RRESP(s_axi_cfg_rresp), - .S_AXI_RVALID(s_axi_cfg_rvalid), - .S_AXI_RREADY(s_axi_cfg_rready), + .S_AXI_AWADDR(s_axilite_awaddr), + .S_AXI_AWPROT(s_axilite_awprot), + .S_AXI_AWVALID(s_axilite_awvalid), + .S_AXI_AWREADY(s_axilite_awready), + .S_AXI_WDATA(s_axilite_wdata), + .S_AXI_WSTRB(s_axilite_wstrb), + .S_AXI_WVALID(s_axilite_wvalid), + .S_AXI_WREADY(s_axilite_wready), + .S_AXI_BRESP(s_axilite_bresp), + .S_AXI_BVALID(s_axilite_bvalid), + .S_AXI_BREADY(s_axilite_bready), + .S_AXI_ARADDR(s_axilite_araddr), + .S_AXI_ARPROT(s_axilite_arprot), + .S_AXI_ARVALID(s_axilite_arvalid), + .S_AXI_ARREADY(s_axilite_arready), + .S_AXI_RDATA(s_axilite_rdata), + .S_AXI_RRESP(s_axilite_rresp), + .S_AXI_RVALID(s_axilite_rvalid), + .S_AXI_RREADY(s_axilite_rready), .cfg_reg0(cfg_valid), .cfg_reg1(cfg_cntr_simd), diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 49e2621ecd..1afd23d3a1 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -29,7 +29,6 @@ import math import numpy as np import os -from math import copysign from qonnx.core.datatype import DataType from qonnx.custom_op.general import im2col from qonnx.custom_op.general.im2col import compute_conv_output_dim @@ -574,10 +573,6 @@ def prepare_codegen_default(self): tail_incr_last_window = buffer_min_size - 1 code_gen_dict["$IS_DEPTHWISE$"] = ["0"] - code_gen_dict["$TAIL_INCR_W$"] = [str(tail_incr_w)] - code_gen_dict["$TAIL_INCR_H$"] = [str(tail_incr_h)] - code_gen_dict["$TAIL_INCR_LAST$"] = [str(tail_incr_last_window)] - # support SIMD = IFMChannels and k_w = 1 cases # for k = [k_h, k_w] = [1, k_w], no adjustment is needed # for k = [k_h, k_w] = [1, 1], do not use this impl. style (mmv_out=K=1) @@ -595,12 +590,6 @@ def prepare_codegen_default(self): code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_SIMD"] loop_simd_iterations -= 1 # -1 because state is initial state - code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 2)] - code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 2)] - code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 2)] - code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 2)] - code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 2)] - cntr_bitwidth = math.ceil( math.log2( max( @@ -613,6 +602,11 @@ def prepare_codegen_default(self): ) ) code_gen_dict["$CNTR_BITWIDTH$"] = [str(cntr_bitwidth)] + code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 2)] + code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 2)] + code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 2)] + code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 2)] + code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 2)] incr_bitwidth = 1 + math.ceil( math.log2( @@ -629,26 +623,14 @@ def prepare_codegen_default(self): ) ) code_gen_dict["$INCR_BITWIDTH$"] = [str(incr_bitwidth)] - code_gen_dict["$ADDR_INCREMENT_MAP$"] = [ - "'{{ {}'d0, {}'d{}, {}'d{}, {}'d{}, {}'d{}, {}'d{}}}".format( - incr_bitwidth, - int(copysign(incr_bitwidth, addr_incr_end_simd)), - abs(addr_incr_end_simd), - int(copysign(incr_bitwidth, addr_incr_end_window_elem)), - abs(addr_incr_end_window_elem), - int(copysign(incr_bitwidth, addr_incr_end_window_row)), - abs(addr_incr_end_window_row), - int(copysign(incr_bitwidth, addr_incr_end_window)), - abs(addr_incr_end_window), - int(copysign(incr_bitwidth, addr_incr_end_row)), - abs(addr_incr_end_row), - ) - ] - code_gen_dict["$INCR_HEAD_SIMD$"] = [str(addr_incr_end_simd)] - code_gen_dict["$INCR_HEAD_KW$"] = [str(addr_incr_end_window_elem)] - code_gen_dict["$INCR_HEAD_KH$"] = [str(addr_incr_end_window_row)] - code_gen_dict["$INCR_HEAD_W$"] = [str(addr_incr_end_window)] - code_gen_dict["$INCR_HEAD_H$"] = [str(addr_incr_end_row)] + code_gen_dict["$HEAD_INCR_SIMD$"] = [str(addr_incr_end_simd)] + code_gen_dict["$HEAD_INCR_KW$"] = [str(addr_incr_end_window_elem)] + code_gen_dict["$HEAD_INCR_KH$"] = [str(addr_incr_end_window_row)] + code_gen_dict["$HEAD_INCR_W$"] = [str(addr_incr_end_window)] + code_gen_dict["$HEAD_INCR_H$"] = [str(addr_incr_end_row)] + code_gen_dict["$TAIL_INCR_W$"] = [str(tail_incr_w)] + code_gen_dict["$TAIL_INCR_H$"] = [str(tail_incr_h)] + code_gen_dict["$TAIL_INCR_LAST$"] = [str(tail_incr_last_window)] code_gen_dict["$ELEM_PER_WINDOW$"] = [str(elem_per_window)] code_gen_dict["$SIMD$"] = [str(simd)] @@ -844,16 +826,18 @@ def get_verilog_top_module_intf_names(self): Each block must have at most one aximm and one axilite.""" intf_names = super().get_verilog_top_module_intf_names() if self.get_nodeattr("dynamic_mode"): - intf_names["axilite"] = ["s_axi_cfg"] + intf_names["axilite"] = ["s_axilite"] return intf_names - def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): + def get_dynamic_config(self, ifm_dim=None, stride=None, dilation=None): """Returns a configuration dict to re-configure FM dimension during runtime. Stride and dilation can also be changed. Certain restrictions apply (e.g. component must be synthesized for largest buffer size).""" # NOTE: For better driver integration, this functionality could be packaged # as a standalone function in the future + if ifm_dim is None: + ifm_dim = self.get_nodeattr("IFMDim") k = self.get_nodeattr("ConvKernelDim") if stride is None: stride = self.get_nodeattr("Stride") @@ -883,16 +867,17 @@ def get_dynamic_config(self, ifm_dim, stride=None, dilation=None): # each setting is mapped to an axi-lite register address template_path, code_gen_dict = self.prepare_codegen_default() config = { + "cfg_wren": (0 * 4, 1), "cfg_cntr_simd": (1 * 4, int(code_gen_dict["$LOOP_SIMD_ITERATIONS$"][0])), "cfg_cntr_kw": (2 * 4, int(code_gen_dict["$LOOP_KW_ITERATIONS$"][0])), "cfg_cntr_kh": (3 * 4, int(code_gen_dict["$LOOP_KH_ITERATIONS$"][0])), "cfg_cntr_w": (4 * 4, int(code_gen_dict["$LOOP_W_ITERATIONS$"][0])), "cfg_cntr_h": (5 * 4, int(code_gen_dict["$LOOP_H_ITERATIONS$"][0])), - "cfg_incr_head_simd": (6 * 4, int(code_gen_dict["$INCR_HEAD_SIMD$"][0])), - "cfg_incr_head_kw": (7 * 4, int(code_gen_dict["$INCR_HEAD_KW$"][0])), - "cfg_incr_head_kh": (8 * 4, int(code_gen_dict["$INCR_HEAD_KH$"][0])), - "cfg_incr_head_w": (9 * 4, int(code_gen_dict["$INCR_HEAD_W$"][0])), - "cfg_incr_head_h": (10 * 4, int(code_gen_dict["$INCR_HEAD_H$"][0])), + "cfg_incr_head_simd": (6 * 4, int(code_gen_dict["$HEAD_INCR_SIMD$"][0])), + "cfg_incr_head_kw": (7 * 4, int(code_gen_dict["$HEAD_INCR_KW$"][0])), + "cfg_incr_head_kh": (8 * 4, int(code_gen_dict["$HEAD_INCR_KH$"][0])), + "cfg_incr_head_w": (9 * 4, int(code_gen_dict["$HEAD_INCR_W$"][0])), + "cfg_incr_head_h": (10 * 4, int(code_gen_dict["$HEAD_INCR_H$"][0])), "cfg_incr_tail_w": (11 * 4, int(code_gen_dict["$TAIL_INCR_W$"][0])), "cfg_incr_tail_h": (12 * 4, int(code_gen_dict["$TAIL_INCR_H$"][0])), "cfg_incr_tail_last": (13 * 4, int(code_gen_dict["$TAIL_INCR_LAST$"][0])), diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 2a3413cb13..979dcbfab8 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -31,6 +31,7 @@ import copy import numpy as np import onnx.parser as oprs +import os from onnx import TensorProto, helper from pyverilator.util.axi_utils import axilite_write, reset_rtlsim from qonnx.core.datatype import DataType @@ -55,6 +56,7 @@ from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP +from finn.util.basic import pyverilate_get_liveness_threshold_cycles def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt): @@ -193,8 +195,8 @@ def test_fpgadataflow_conv_dynamic(): for swg_node in model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl"): getCustomOp(swg_node).set_nodeattr("SIMD", 1) getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) - getCustomOp(swg_node).set_nodeattr("inFIFODepth", 16) - getCustomOp(swg_node).set_nodeattr("outFIFODepth", 16) + getCustomOp(swg_node).set_nodeattr("inFIFODepths", [16]) + getCustomOp(swg_node).set_nodeattr("outFIFODepths", [16]) print("SWG initial config:") idim = getCustomOp(swg_node).get_nodeattr("IFMDim") print(getCustomOp(swg_node).get_dynamic_config(idim)) @@ -209,7 +211,6 @@ def test_fpgadataflow_conv_dynamic(): # loop through experiment configurations for exp_cfg in exp_cfgs: idim, int_dim, odim, inp, golden = exp_cfg - # model.set_metadata_prop("rtlsim_trace", "trace_size0.vcd") # get config for the new dimensions swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") swg0 = getCustomOp(swg_nodes[0]) @@ -220,7 +221,7 @@ def test_fpgadataflow_conv_dynamic(): update_tensor_dim(model, swg1.onnx_node.input[0], (int_dim, int_dim)) update_tensor_dim(model, swg1.onnx_node.output[0], (odim, odim)) config1 = swg1.get_dynamic_config((int_dim, int_dim)) - configs = [("s_axi_cfg_0_", config0), ("s_axi_cfg_1_", config1)] + configs = [("s_axilite_0_", config0), ("s_axilite_1_", config1)] # adjust folded shapes for I/O FIFOs # (since rtlsim_exec uses folded shape info to fold global i/o tensors) first_node = getCustomOp(model.graph.node[0]) @@ -235,9 +236,11 @@ def test_fpgadataflow_conv_dynamic(): last_node_shp[2] = odim update_tensor_dim(model, last_node.onnx_node.output[0], (odim, odim)) last_node.set_nodeattr("folded_shape", last_node_shp) - model.set_metadata_prop("rtlsim_trace", "trace_size1.vcd") ctx = {"global_in": inp.transpose(0, 2, 3, 1)} + liveness_prev = pyverilate_get_liveness_threshold_cycles() + os.environ["LIVENESS_THRESHOLD"] = "100000" rtlsim_exec(model, ctx, pre_hook=config_hook(configs)) + os.environ["LIVENESS_THRESHOLD"] = str(liveness_prev) ret = ctx["global_out"].transpose(0, 3, 1, 2) assert np.isclose(golden, ret).all() @@ -443,7 +446,7 @@ def test_fpgadataflow_slidingwindow_rtl_dynamic( # Generate config, also overwrites IFMDim/OFMDim attributes: config = swg_inst.get_dynamic_config(ifm_dim) - configs = [("s_axi_cfg_0_", config)] + configs = [("s_axilite_0_", config)] # Also update FIFO nodes and corresponding tensors fifo_node = model.get_nodes_by_op_type("StreamingFIFO")[0] From 5d8a0260063c3362a7232c441e255ff3f7a07a5b Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 24 Oct 2022 14:17:03 +0100 Subject: [PATCH 245/628] [CustomOp] Delete stream depth pragmas for in/out streams in MVAU and VVAUs --- .../fpgadataflow/matrixvectoractivation.py | 14 -------------- .../fpgadataflow/vectorvectoractivation.py | 14 -------------- 2 files changed, 28 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 69763fbea8..df9d1f1e70 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1227,20 +1227,6 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() ) - # TODO can we deprecate this entirely? this looks like legacy code - # that does not really serve a purpose - FIFO sizes are not typically - # allocated at this point; at best they are set to 2 as the default - in_fifo_depth = 2 - out_fifo_depth = 2 - # insert depth pragmas only if specified - if in_fifo_depth != 0: - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=%d variable=in0" % in_fifo_depth - ) - if out_fifo_depth != 0: - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=%d variable=out" % out_fifo_depth - ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" ) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 0375bdea68..a411d245a9 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -901,20 +901,6 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() ) - # TODO can we deprecate this entirely? this looks like legacy code - # that does not really serve a purpose - FIFO sizes are not typically - # allocated at this point; at best they are set to 2 as the default - in_fifo_depth = 2 - out_fifo_depth = 2 - # insert depth pragmas only if specified - if in_fifo_depth != 0: - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=%d variable=in0" % in_fifo_depth - ) - if out_fifo_depth != 0: - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=%d variable=out" % out_fifo_depth - ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" ) From 1c7d81f39849d16f104effbba96e1fa5ee7495b6 Mon Sep 17 00:00:00 2001 From: Hugo LE BLEVEC Date: Wed, 26 Oct 2022 17:49:16 +0100 Subject: [PATCH 246/628] [set_fifo_depths] Adding a new transformation to split fifos larger than the max allowed depth --- src/finn/builder/build_dataflow_config.py | 4 + src/finn/builder/build_dataflow_steps.py | 3 + .../fpgadataflow/set_fifo_depths.py | 75 ++++++++++++- tests/fpgadataflow/test_split_large_fifos.py | 104 ++++++++++++++++++ 4 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 tests/fpgadataflow/test_split_large_fifos.py diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index d3c4156d9b..2068d83f6f 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -253,6 +253,10 @@ class DataflowBuildConfig: #: for each FIFO. auto_fifo_depths: Optional[bool] = True + #: Whether FIFO nodes with depth larger than 32768 will be split. + #: Allow to configure very large FIFOs in the folding_config_file. + split_large_fifos: Optional[bool] = False + #: When `auto_fifo_depths = True`, select which method will be used for #: setting the FIFO sizes. auto_fifo_strategy: Optional[ diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 5da608c27d..72f3fb2255 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -98,6 +98,7 @@ from finn.transformation.fpgadataflow.set_fifo_depths import ( InsertAndSetFIFODepths, RemoveShallowFIFOs, + SplitLargeFifos, ) from finn.transformation.fpgadataflow.set_folding import SetFolding from finn.transformation.fpgadataflow.synth_ooc import SynthOutOfContext @@ -551,6 +552,8 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform(GiveReadableTensorNames()) if cfg.folding_config_file is not None: model = model.transform(ApplyConfig(cfg.folding_config_file)) + if cfg.split_large_fifos: + model = model.transform(SplitLargeFifos()) # remove any shallow FIFOs model = model.transform(RemoveShallowFIFOs()) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index f715aaeffb..3e841bf585 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -29,10 +29,17 @@ import math import numpy as np import warnings +from onnx import TensorProto, helper from pyverilator.util.axi_utils import reset_rtlsim, toggle_clk +from qonnx.core.datatype import DataType from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.base import Transformation -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from qonnx.transformation.general import ( + GiveReadableTensorNames, + GiveUniqueNodeNames, + SortGraph, +) +from qonnx.util.basic import get_by_name from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance from finn.transformation.fpgadataflow.annotate_cycles import AnnotateCycles @@ -414,3 +421,69 @@ def apply(self, model): model = model.transform(RemoveShallowFIFOs()) return (model, False) + + +class SplitLargeFifos(Transformation): + """Split FIFOs with a depth larger than 32768 into smaller ones + to ensure that they can be correctly generated.""" + + def apply(self, model): + graph = model.graph + node_ind = 0 + graph_modified = False + for n in graph.node: + node_ind += 1 + if n.op_type == "StreamingFIFO": + depth = get_by_name(n.attribute, "depth") + if depth.i > 32768: + n0 = getCustomOp(n) + fld_shape = n0.get_folded_output_shape() + dtype = n0.get_nodeattr("dataType") + impl_style = n0.get_nodeattr("impl_style") + ram_style = n0.get_nodeattr("ram_style") + shape = model.get_tensor_shape(n.input[0]) + split_n = math.ceil(depth.i / 32768) + fifo_depth = math.ceil(depth.i / split_n) + for i in range(split_n): + if i == 0: + inp = n.input[0] + else: + inp = n.name + "_" + str(i - 1) + "_out" + if i == split_n - 1: + outp = n.output[0] + else: + outp = n.name + "_" + str(i) + "_out" + out_tensor = helper.make_tensor_value_info( + outp, TensorProto.FLOAT, shape + ) + graph.value_info.append(out_tensor) + model.set_tensor_datatype(out_tensor.name, DataType[dtype]) + fifo_node = helper.make_node( + "StreamingFIFO", + [inp], + [outp], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + depth=fifo_depth, + folded_shape=fld_shape, + dataType=dtype, + impl_style=impl_style, + ram_style=ram_style, + ) + graph.node.insert(node_ind + i, fifo_node) + + graph.node.remove(n) + if n.output[0] != "global_out": + consumer = model.find_consumer(n.output[0]) + n1 = getCustomOp(consumer) + n1.set_nodeattr("outFIFODepth", fifo_depth) + if n.input[0] != "global_in": + producer = model.find_producer(n.input[0]) + n2 = getCustomOp(producer) + n2.set_nodeattr("inFIFODepth", fifo_depth) + graph_modified = True + if graph_modified: + model = model.transform(SortGraph()) + model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveReadableTensorNames()) + return (model, graph_modified) diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py new file mode 100644 index 0000000000..ab9230ad39 --- /dev/null +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -0,0 +1,104 @@ +# Copyright (c) 2022 Xilinx, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import pytest + +import json +import shutil +from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from math import ceil + +import finn.builder.build_dataflow as build +import finn.builder.build_dataflow_config as build_cfg +from finn.util.basic import make_build_dir +from finn.util.test import get_trained_network_and_ishape + + +def fetch_test_model(topology, wbits=2, abits=2): + tmp_output_dir = make_build_dir("build_fifosizing_%s_" % topology) + (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) + chkpt_name = tmp_output_dir + "/model.onnx" + BrevitasONNXManager.export(model, ishape, chkpt_name) + return tmp_output_dir + + +def get_folding_cfg(depth=65536): + cfg = dict() + cfg["Defaults"] = dict() + for i in range(3): + key = "StreamingFIFO_" + str(i) + cfg[key] = {"depth": depth, "ram_style": "auto", "impl_style": "rtl"} + return cfg + + +@pytest.mark.slow +@pytest.mark.vivado +@pytest.mark.fpgadataflow +@pytest.mark.parametrize("depth", [16384, 65536, 45000]) +def test_split_large_fifos(depth): + tmp_output_dir = fetch_test_model("tfc") + folding_cfg = get_folding_cfg(depth) + with open(tmp_output_dir + "/folding_config.json", "w") as f: + json.dump(folding_cfg, f, indent=2) + cfg = build_cfg.DataflowBuildConfig( + output_dir=tmp_output_dir, + auto_fifo_depths=False, + split_large_fifos=True, + folding_config_file=tmp_output_dir + "/folding_config.json", + target_fps=10000, + synth_clk_period_ns=10.0, + board="Pynq-Z1", + rtlsim_batch_size=100, + shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, + generate_outputs=[ + build_cfg.DataflowOutputType.ESTIMATE_REPORTS, + build_cfg.DataflowOutputType.STITCHED_IP, + build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE, + ], + default_mem_mode=build_cfg.ComputeEngineMemMode.DECOUPLED, + ) + build.build_dataflow_cfg(tmp_output_dir + "/model.onnx", cfg) + with open(tmp_output_dir + "/report/estimate_network_performance.json") as f: + est_data = json.load(f) + with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: + sim_data = json.load(f) + assert ( + float(sim_data["throughput[images/s]"]) + / float(est_data["estimated_throughput_fps"]) + > 0.9 + ) + with open(tmp_output_dir + "/final_hw_config.json") as f: + hw_config = json.load(f) + n = 0 + for key in hw_config: + if "StreamingFIFO" in key: + n += 1 + assert n == 3 * ceil(depth / 32768) + 1 + + shutil.rmtree(tmp_output_dir) From 3d002f7e168278eed6a76212fd84828906d67395 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 27 Oct 2022 13:35:01 +0200 Subject: [PATCH 247/628] [SWGG] improve HDL code --- finn-rtllib/swg/swg_template_default.sv | 18 ++++++------------ .../swg/swg_template_default_dynamic.sv | 18 ++++++------------ 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index f944b955fb..06e65e9111 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -67,29 +67,23 @@ module $TOP_MODULE_NAME$_controller #( // combinational logic for addr_incr generation always_comb begin : blkHead - case (State) + unique case (State) 0 : addr_incr = 0; 1 : addr_incr = $HEAD_INCR_SIMD$; 2 : addr_incr = $HEAD_INCR_KW$; 3 : addr_incr = $HEAD_INCR_KH$; 4 : addr_incr = $HEAD_INCR_W$; 5 : addr_incr = $HEAD_INCR_H$; - default: addr_incr = 0; endcase end // combinational logic for tail_incr generation uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; - always_comb begin : blkTail - if (tail_incr_inner_condition) - tail_incr = 1; - else if (Counter_loop_w >= 0) - tail_incr = $TAIL_INCR_W$; - else if (Counter_loop_h >= 0) - tail_incr = $TAIL_INCR_H$; - else - tail_incr = $TAIL_INCR_LAST$; - end + assign tail_incr = + tail_incr_inner_condition? 1 : + Counter_loop_w >= 0? $TAIL_INCR_W$ : + Counter_loop_h >= 0? $TAIL_INCR_H$ : + /* else */ $TAIL_INCR_LAST$; // combinational next state logic always_comb begin : blkState diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv index c98985a28e..eb53978b58 100644 --- a/finn-rtllib/swg/swg_template_default_dynamic.sv +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -81,29 +81,23 @@ module $TOP_MODULE_NAME$_controller #( // combinational logic for addr_incr generation always_comb begin : blkHead - case (State) + unique case (State) 0 : addr_incr = 0; 1 : addr_incr = Cfg_incr_head_simd; 2 : addr_incr = Cfg_incr_head_kw; 3 : addr_incr = Cfg_incr_head_kh; 4 : addr_incr = Cfg_incr_head_w; 5 : addr_incr = Cfg_incr_head_h; - default: addr_incr = 0; endcase end // combinational logic for tail_incr generation uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; - always_comb begin : blkTail - if (tail_incr_inner_condition) - tail_incr = 1; - else if (Counter_loop_w >= 0) - tail_incr = Cfg_incr_tail_w; - else if (Counter_loop_h >= 0) - tail_incr = Cfg_incr_tail_h; - else - tail_incr = Cfg_incr_tail_last; - end + assign tail_incr = + tail_incr_inner_condition? 1 : + Counter_loop_w >= 0? Cfg_incr_tail_w : + Counter_loop_h >= 0? Cfg_incr_tail_h : + /* else */ Cfg_incr_tail_last; // combinational next state logic always_comb begin : blkState From ad6319d52540b834f5ab0a01f1f8a2b82d51fddf Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 27 Oct 2022 15:05:51 +0100 Subject: [PATCH 248/628] [CustomOp] Add dtype functions to StreamingFIFO --- .../custom_op/fpgadataflow/streamingfifo.py | 60 +++++++++++-------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 40d016de43..c71e8ffe32 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -46,32 +46,34 @@ def __init__(self, onnx_node): self.strm_fifo_wrapper = templates.strm_fifo_wrapper def get_nodeattr_types(self): - my_attrs = { - # FIFO depth - "depth": ("i", True, 0), - # folded shape of input/output - "folded_shape": ("ints", True, []), - # FINN DataTypes for inputs/outputs - "dataType": ("s", True, ""), - # Toggle between hls or IPI implementation - # rtl - use the hls generated IP during stitching - # vivado - use the AXI Infrastructure FIFO - "impl_style": ("s", False, "rtl", {"rtl", "vivado"}), - # FPGA resource type for FIFOs when impl_style is vivado - # auto -- let Vivado decide - # block -- use BRAM - # distributed -- use LUTRAM - # ultra -- use URAM (on UltraScale+) - "ram_style": ( - "s", - False, - "auto", - {"auto", "block", "distributed", "ultra"}, - ), - # whether depth monitoring is enabled (impl_style=rtl only) - "depth_monitor": ("i", False, 0), - } - my_attrs.update(super().get_nodeattr_types()) + my_attrs = super().get_nodeattr_types() + my_attrs.update( + { + # FIFO depth + "depth": ("i", True, 0), + # folded shape of input/output + "folded_shape": ("ints", True, []), + # FINN DataTypes for inputs/outputs + "dataType": ("s", True, ""), + # Toggle between hls or IPI implementation + # rtl - use the hls generated IP during stitching + # vivado - use the AXI Infrastructure FIFO + "impl_style": ("s", False, "rtl", {"rtl", "vivado"}), + # FPGA resource type for FIFOs when impl_style is vivado + # auto -- let Vivado decide + # block -- use BRAM + # distributed -- use LUTRAM + # ultra -- use URAM (on UltraScale+) + "ram_style": ( + "s", + False, + "auto", + {"auto", "block", "distributed", "ultra"}, + ), + # whether depth monitoring is enabled (impl_style=rtl only) + "depth_monitor": ("i", False, 0), + } + ) return my_attrs @@ -256,6 +258,12 @@ def get_outstream_width(self, ind=0): in_width = folded_shape[-1] * dtype.bitwidth() return in_width + def get_input_datatype(self, ind=0): + return DataType[self.get_nodeattr("dataType")] + + def get_output_datatype(self, ind=0): + return DataType[self.get_nodeattr("dataType")] + def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") node = self.onnx_node From a8343b2d9e616624ff94e258aca67d80a25d17a8 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 1 Nov 2022 12:34:25 +0100 Subject: [PATCH 249/628] [Convert] skip streaming eltwise conversion if static inputs --- .../fpgadataflow/convert_to_hls_layers.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 7e4ab34af7..525af7ea92 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1165,10 +1165,16 @@ def apply(self, model): result = node.output[0] in0_shape = model.get_tensor_shape(in0) in1_shape = model.get_tensor_shape(in1) + in0_static = not (model.get_initializer(in0) is None) + in1_static = not (model.get_initializer(in1) is None) # skip if different shapes on inputs if in0_shape != in1_shape: continue + # skip if any of inputs have initializers + # (this node is meant for adding two dynamic streams) + if in0_static or in1_static: + continue idt0 = model.get_tensor_datatype(in0) idt1 = model.get_tensor_datatype(in1) @@ -1694,6 +1700,10 @@ def apply(self, model): ) if not dt_coherent: continue + # skip conversion if any inputs are static + all_static = all([model.get_initializer(x) is None for x in node.input]) + if not all_static: + continue # skip conversion if inputs are not integers if not dt0.is_integer(): continue @@ -1739,10 +1749,16 @@ def apply(self, model): result = node.output[0] in0_shape = model.get_tensor_shape(in0) in1_shape = model.get_tensor_shape(in1) + in0_static = not (model.get_initializer(in0) is None) + in1_static = not (model.get_initializer(in1) is None) # skip if different shapes on inputs if in0_shape != in1_shape: continue + # skip if any of inputs have initializers + # (this node is meant for two dynamic streams) + if in0_static or in1_static: + continue idt0 = model.get_tensor_datatype(in0) idt1 = model.get_tensor_datatype(in1) From c30839290e4f594aef4c7198a9f807c405a3838e Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 1 Nov 2022 15:36:39 +0000 Subject: [PATCH 250/628] [Streamline/Absorb] Reorder insertion/rewire of new trn node --- src/finn/transformation/streamline/absorb.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index a983e67750..3af34eba8e 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -580,7 +580,6 @@ def apply(self, model): trans_input = mt_cand.output[0] trans_output = new_tensor_name # fix tensor shapes for Resize and Transpose - # n, c, h, w = model.get_tensor_shape(mt_cand.input[0]) n, c, hx, wx = model.get_tensor_shape(mt_cand.output[0]) model.set_tensor_shape(trans_input, (n, hx, wx, c)) model.set_tensor_shape(trans_output, (n, c, hx, wx)) @@ -591,13 +590,13 @@ def apply(self, model): [trans_output], perm=[0, 3, 1, 2], ) - graph.node.insert(node_ind + 1, new_transpose) # rewire nodes final_t_cands = model.find_consumers(mt_cand.output[0]) # rewire next nodes' inputs for final_t_cand in final_t_cands: final_t_cand.input[0] = trans_output mt_cand.output[0] = trans_input + graph.node.insert(node_ind + 1, new_transpose) graph_modified = True if graph_modified: model = model.transform(InferDataTypes()) From 64d131a3b355c40e787a6826d98f870da500b0d1 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 2 Nov 2022 12:01:49 +0000 Subject: [PATCH 251/628] [Tests] Remove PrepareRTLSim step from bnn test --- tests/end2end/test_end2end_bnn_pynq.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 5f787d1f88..ccdd8816ee 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -80,7 +80,6 @@ from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode from finn.transformation.fpgadataflow.set_fifo_depths import InsertAndSetFIFODepths from finn.transformation.move_reshape import RemoveCNVtoFCFlatten @@ -597,7 +596,6 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) - model = model.transform(PrepareRTLSim()) model.set_metadata_prop("exec_mode", "rtlsim") os.environ["LIVENESS_THRESHOLD"] = str(int(latency * 1.1)) if rtlsim_trace: From e4a1817c0fa08b0caa5f603970d50a0d9b6da6d1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 3 Nov 2022 09:57:40 +0100 Subject: [PATCH 252/628] [Test] add rectangular cases to dynamic spatial FM size test --- ...dataflow_convinputgenerator_rtl_dynamic.py | 129 ++++++++++++------ 1 file changed, 87 insertions(+), 42 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index d7085e8491..4cf7a902be 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -63,18 +63,33 @@ from finn.util.basic import pyverilate_get_liveness_threshold_cycles -def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise): +def create_conv_model( + idim_h, idim_w, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise +): np.random.seed(0) group = ifm if depthwise else 1 group_str = str(group) - ishp = (1, ifm, idim, idim) - pad_0 = _auto_pad_to_explicit_padding(pad_mode, idim, idim, k, k, stride, stride, 2) - int_dim = compute_conv_output_dim(idim, k, stride, total_pad=pad_0[0] + pad_0[2]) + ishp = (1, ifm, idim_h, idim_w) + pad_0 = _auto_pad_to_explicit_padding( + pad_mode, idim_h, idim_w, k, k, stride, stride, 2 + ) + int_dim_h = compute_conv_output_dim( + idim_h, k, stride, total_pad=pad_0[0] + pad_0[2] + ) + int_dim_w = compute_conv_output_dim( + idim_w, k, stride, total_pad=pad_0[1] + pad_0[3] + ) + pad_1 = _auto_pad_to_explicit_padding( - pad_mode, int_dim, int_dim, k, k, stride, stride, 2 + pad_mode, int_dim_h, int_dim_w, k, k, stride, stride, 2 ) - odim = compute_conv_output_dim(int_dim, k, stride, total_pad=pad_1[0] + pad_1[2]) - oshp = (1, ifm, odim, odim) if depthwise else (1, ofm, odim, odim) + odim_h = compute_conv_output_dim( + int_dim_h, k, stride, total_pad=pad_1[0] + pad_1[2] + ) + odim_w = compute_conv_output_dim( + int_dim_w, k, stride, total_pad=pad_1[1] + pad_1[3] + ) + oshp = (1, ifm, odim_h, odim_w) if depthwise else (1, ofm, odim_h, odim_w) wshp = (ifm, 1, k, k) if depthwise else (ofm, ifm, k, k) wshp_1 = (ifm, 1, k, k) if depthwise else (ofm, ofm, k, k) ishp_str = str(list(ishp)) @@ -120,17 +135,19 @@ def create_conv_model(idim, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise): return model -def update_conv_model_dims(model, idim_new): +def update_conv_model_dims(model, idim_new_h, idim_new_w): cnode = model.get_nodes_by_op_type("Conv")[0] k, _ = get_by_name(cnode.attribute, "kernel_shape").ints stride, _ = get_by_name(cnode.attribute, "strides").ints ishp = model.get_tensor_shape("in0") n, ci, _, _ = ishp n, co, _, _ = model.get_tensor_shape("out0") - int_dim = compute_conv_output_dim(idim_new, k, stride) - odim = compute_conv_output_dim(int_dim, k, stride) - model.set_tensor_shape("in0", (n, ci, idim_new, idim_new)) - model.set_tensor_shape("out0", (n, co, odim, odim)) + int_dim_h = compute_conv_output_dim(idim_new_h, k, stride) + int_dim_w = compute_conv_output_dim(idim_new_w, k, stride) + odim_h = compute_conv_output_dim(int_dim_h, k, stride) + odim_w = compute_conv_output_dim(int_dim_w, k, stride) + model.set_tensor_shape("in0", (n, ci, idim_new_h, idim_new_w)) + model.set_tensor_shape("out0", (n, co, odim_h, odim_w)) # remove all existing shapes del model.graph.value_info[:] model = model.transform(InferShapes()) @@ -165,7 +182,16 @@ def write_swg_config(sim): cfg0 = { - "idims": [32, 16], + "idims": [(32, 32), (8, 8)], + "ifm": 64, + "k": 3, + "stride": 1, + "ofm": 64, + "depthwise": True, + "pad_mode": "SAME_UPPER", +} +cfg1 = { + "idims": [(32, 16), (16, 8)], "ifm": 4, "k": 4, "stride": 1, @@ -173,8 +199,8 @@ def write_swg_config(sim): "depthwise": False, "pad_mode": "SAME_UPPER", } -cfg1 = { - "idims": [128, 4], +cfg2 = { + "idims": [(64, 128), (2, 4)], "ifm": 64, "k": 3, "stride": 1, @@ -184,7 +210,7 @@ def write_swg_config(sim): } -@pytest.mark.parametrize("cfg", [cfg0, cfg1]) +@pytest.mark.parametrize("cfg", [cfg0, cfg1, cfg2]) @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_conv_dynamic(cfg): @@ -200,21 +226,30 @@ def test_fpgadataflow_conv_dynamic(cfg): exp_cfgs = [] largest_model = None for idim in idims: - ishp = (1, ifm, idim, idim) + idim_h, idim_w = idim + ishp = (1, ifm, idim_h, idim_w) np.random.seed(0) inp = gen_finn_dt_tensor(idt, ishp) model = create_conv_model( - idim, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise + idim_h, idim_w, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise ) - _, _, int_dim, _ = model.get_tensor_shape("conv0") - _, _, odim, _ = model.get_tensor_shape("out0") + _, _, int_dim_h, int_dim_w = model.get_tensor_shape("conv0") + _, _, odim_h, odim_w = model.get_tensor_shape("out0") pad0 = get_by_name(model.graph.node[0].attribute, "pads").ints pad1 = get_by_name(model.graph.node[1].attribute, "pads").ints if idim == max(idims): # use largest model for hardware conversion largest_model = copy.deepcopy(model) golden = execute_onnx(model, {"in0": inp})["out0"] - exp_cfg = (idim, int_dim, odim, pad0, pad1, inp, golden) + exp_cfg = ( + (idim_h, idim_w), + (int_dim_h, int_dim_w), + (odim_h, odim_w), + pad0, + pad1, + inp, + golden, + ) exp_cfgs.append(exp_cfg) # convert to hardware and prepare simulation @@ -261,33 +296,43 @@ def test_fpgadataflow_conv_dynamic(cfg): # loop through experiment configurations for exp_cfg in exp_cfgs: - idim, int_dim, odim, pad0, pad1, inp, golden = exp_cfg - conv0_idim = idim + pad0[0] + pad0[2] - conv1_idim = int_dim + pad1[0] + pad1[2] + ( + (idim_h, idim_w), + (int_dim_h, int_dim_w), + (odim_h, odim_w), + pad0, + pad1, + inp, + golden, + ) = exp_cfg + conv0_idim_h = idim_h + pad0[0] + pad0[2] + conv0_idim_w = idim_w + pad0[1] + pad0[3] + conv1_idim_h = int_dim_h + pad1[0] + pad1[2] + conv1_idim_w = int_dim_w + pad1[1] + pad1[3] # get config for the new dimensions swg_nodes = model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl") swg0 = getCustomOp(swg_nodes[0]) - update_tensor_dim(model, swg0.onnx_node.input[0], (conv0_idim, conv0_idim)) - update_tensor_dim(model, swg0.onnx_node.output[0], (int_dim, int_dim)) - swg_config0 = swg0.get_dynamic_config((conv0_idim, conv0_idim)) + update_tensor_dim(model, swg0.onnx_node.input[0], (conv0_idim_h, conv0_idim_w)) + update_tensor_dim(model, swg0.onnx_node.output[0], (int_dim_h, int_dim_w)) + swg_config0 = swg0.get_dynamic_config((conv0_idim_h, conv0_idim_w)) swg1 = getCustomOp(swg_nodes[1]) - update_tensor_dim(model, swg1.onnx_node.input[0], (conv1_idim, conv1_idim)) - update_tensor_dim(model, swg1.onnx_node.output[0], (odim, odim)) - swg_config1 = swg1.get_dynamic_config((conv1_idim, conv1_idim)) + update_tensor_dim(model, swg1.onnx_node.input[0], (conv1_idim_h, conv1_idim_w)) + update_tensor_dim(model, swg1.onnx_node.output[0], (odim_h, odim_w)) + swg_config1 = swg1.get_dynamic_config((conv1_idim_h, conv1_idim_w)) if pad_mode != "VALID": pad_nodes = model.get_nodes_by_op_type("FMPadding_rtl") padder0 = getCustomOp(pad_nodes[0]) - update_tensor_dim(model, padder0.onnx_node.input[0], (idim, idim)) + update_tensor_dim(model, padder0.onnx_node.input[0], (idim_h, idim_w)) update_tensor_dim( - model, padder0.onnx_node.output[0], (conv0_idim, conv0_idim) + model, padder0.onnx_node.output[0], (conv0_idim_h, conv0_idim_w) ) - pad_config0 = padder0.get_dynamic_config((idim, idim), pad0) + pad_config0 = padder0.get_dynamic_config((idim_h, idim_w), pad0) padder1 = getCustomOp(pad_nodes[1]) - update_tensor_dim(model, padder1.onnx_node.input[0], (int_dim, int_dim)) + update_tensor_dim(model, padder1.onnx_node.input[0], (int_dim_h, int_dim_w)) update_tensor_dim( - model, padder1.onnx_node.output[0], (conv1_idim, conv1_idim) + model, padder1.onnx_node.output[0], (conv1_idim_h, conv1_idim_w) ) - pad_config1 = padder1.get_dynamic_config((int_dim, int_dim), pad1) + pad_config1 = padder1.get_dynamic_config((int_dim_h, int_dim_w), pad1) configs = [ ("s_axilite_0_", pad_config0), ("s_axilite_1_", swg_config0), @@ -300,15 +345,15 @@ def test_fpgadataflow_conv_dynamic(cfg): # (since rtlsim_exec uses folded shape info to fold global i/o tensors) first_node = getCustomOp(model.graph.node[0]) first_node_shp = list(first_node.get_folded_input_shape()) - first_node_shp[1] = idim - first_node_shp[2] = idim + first_node_shp[1] = idim_h + first_node_shp[2] = idim_w first_node.set_nodeattr("folded_shape", first_node_shp) - update_tensor_dim(model, first_node.onnx_node.input[0], (idim, idim)) + update_tensor_dim(model, first_node.onnx_node.input[0], (idim_h, idim_w)) last_node = getCustomOp(model.graph.node[-1]) last_node_shp = list(last_node.get_folded_output_shape()) - last_node_shp[1] = odim - last_node_shp[2] = odim - update_tensor_dim(model, last_node.onnx_node.output[0], (odim, odim)) + last_node_shp[1] = odim_h + last_node_shp[2] = odim_w + update_tensor_dim(model, last_node.onnx_node.output[0], (odim_h, odim_w)) last_node.set_nodeattr("folded_shape", last_node_shp) ctx = {"global_in": inp.transpose(0, 2, 3, 1)} liveness_prev = pyverilate_get_liveness_threshold_cycles() From 803eeb537c57531d32de2267d82a85510bbb0ec1 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 7 Nov 2022 16:47:50 +0000 Subject: [PATCH 253/628] Revert "FINN-13: Version control the installed XRT version." This reverts commit 81fefd999708220bd38b4021c6515115161b96ed. --- docker/Dockerfile.finn | 17 ----------------- run-docker.sh | 2 +- xrt_supported_versions.txt | 1 - 3 files changed, 1 insertion(+), 19 deletions(-) delete mode 100644 xrt_supported_versions.txt diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 78ec490b2b..d6ab67b42e 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -81,23 +81,6 @@ RUN cd verilator && \ RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb RUN rm /tmp/$XRT_DEB_VERSION.deb -RUN if [ ! -f "/opt/xilinx/xrt/include/version.h" ]; then \ - echo "ERROR: XRT not installed. Please install XRT" ; exit 1 ; \ - fi ; - -# Inherit the xrt_supported_versions arg, a file, and copy it to docker -ARG xrt_supported_versions -ADD $xrt_supported_versions ./ -ARG XILINX_VERSION -RUN echo "XRT installed. proceeding to check version compatibility" ; \ - xrt_build_ver="$XILINX_VERSION:"$(grep 'xrt_build_version_hash\[\]' /opt/xilinx/xrt/include/version.h | sed 's/";//' | sed 's/^.*"//') ; \ - if grep -Fxq "$xrt_build_ver" $xrt_supported_versions ; then \ - echo "XRT version $xrt_build_ver is supported." ; \ - echo "XRT Runtime setup Done" ; \ - else \ - echo "ERROR: $xrt_build_ver does not match supported versions" ; \ - exit 1 ; \ - fi ; # versioned Python package requirements for FINN compiler # these are given in requirements.txt diff --git a/run-docker.sh b/run-docker.sh index aab0df1f1c..8b8d0a509a 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -178,7 +178,7 @@ if [ "$FINN_DOCKER_PREBUILT" = "0" ]; then # Need to ensure this is done within the finn/ root folder: OLD_PWD=$(pwd) cd $SCRIPTPATH - docker build -f docker/Dockerfile.finn --build-arg XILINX_VERSION=$FINN_XILINX_VERSION --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --build-arg xrt_supported_versions=xrt_supported_versions.txt --tag=$FINN_DOCKER_TAG . + docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --tag=$FINN_DOCKER_TAG . if [ "$?" -ne 0 ]; then echo "Error occurred during docker build, exiting" exit 1 diff --git a/xrt_supported_versions.txt b/xrt_supported_versions.txt deleted file mode 100644 index 3828906123..0000000000 --- a/xrt_supported_versions.txt +++ /dev/null @@ -1 +0,0 @@ -2022.1:f5505e402c2ca1ffe45eb6d3a9399b23a0dc8776 From a1454a3133dbd4e2c8201ea76dfda5e4641d3500 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 7 Nov 2022 16:48:23 +0000 Subject: [PATCH 254/628] Revert "Exit Dockerfile.finn & run_docker.sh process if XRT_DEB_VERSION is not set" This reverts commit 1f17010edf8d256512ba8b9350e0a6321df16955. --- docker/Dockerfile.finn | 7 +------ run-docker.sh | 4 ---- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index d6ab67b42e..72a7c0ca03 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -38,12 +38,6 @@ WORKDIR /workspace ENV TZ="Europe/Dublin" RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -# Make sure the XRT_DEB_VERSION --build-arg was passed to this script -ARG XRT_DEB_VERSION -RUN if [ -z "$XRT_DEB_VERSION" ]; then \ - echo "XRT_DEB_VERSION is not set, exiting now" ; exit 1 ; \ - fi - RUN apt-get update && \ apt-get install -y \ build-essential \ @@ -78,6 +72,7 @@ RUN cd verilator && \ make install # install XRT +ARG XRT_DEB_VERSION RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb RUN rm /tmp/$XRT_DEB_VERSION.deb diff --git a/run-docker.sh b/run-docker.sh index 8b8d0a509a..381be35293 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -179,10 +179,6 @@ if [ "$FINN_DOCKER_PREBUILT" = "0" ]; then OLD_PWD=$(pwd) cd $SCRIPTPATH docker build -f docker/Dockerfile.finn --build-arg XRT_DEB_VERSION=$XRT_DEB_VERSION --tag=$FINN_DOCKER_TAG . - if [ "$?" -ne 0 ]; then - echo "Error occurred during docker build, exiting" - exit 1 - fi cd $OLD_PWD fi # Launch container with current directory mounted From b23f0eb93f544463482ba0630ef69ef40a2d181c Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 7 Nov 2022 16:57:04 +0000 Subject: [PATCH 255/628] [Docker] Remove obsolete xrt arg in dockerfile --- docker/Dockerfile.finn | 1 - 1 file changed, 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 72a7c0ca03..b3c669ec10 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -72,7 +72,6 @@ RUN cd verilator && \ make install # install XRT -ARG XRT_DEB_VERSION RUN wget https://www.xilinx.com/bin/public/openDownload?filename=$XRT_DEB_VERSION.deb -O /tmp/$XRT_DEB_VERSION.deb RUN apt install -y /tmp/$XRT_DEB_VERSION.deb RUN rm /tmp/$XRT_DEB_VERSION.deb From 9f8cf74b1347f253baeb8d696048b3ebb7c2b56b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 10 Nov 2022 19:48:29 +0300 Subject: [PATCH 256/628] [FIFO] also generate compilation script for C++ FIFO sizing --- src/finn/util/pyverilator.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 516063f7e0..dd22272e64 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -171,10 +171,10 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): "--threads", "4", ] - launch_process_helper(verilator_args, cwd=build_dir) proc_env = os.environ.copy() - proc_env["OPT_FAST"] = "-O3 -march=native" + gcc_args = "-O3 -march=native" + proc_env["OPT_FAST"] = gcc_args make_args = [ "make", "-j4", @@ -184,6 +184,14 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): "Vfinn_design_wrapper.mk", "Vfinn_design_wrapper", ] + + with open(build_dir + "/compile.sh", "w") as f: + f.write("#!/bin/bash" + "\n") + f.write("export OPT_FAST='%s'\n" % gcc_args) + f.write(" ".join(verilator_args) + "\n") + f.write(" ".join(make_args) + "\n") + + launch_process_helper(verilator_args, cwd=build_dir) launch_process_helper(make_args, proc_env=proc_env, cwd=build_dir) sim_launch_args = ["./Vfinn_design_wrapper"] From 8fe057cc009ca62975cec823a2581b30e62d9ced Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 10 Nov 2022 19:49:01 +0300 Subject: [PATCH 257/628] [FIFO] improve C++ FIFO sizing template --- src/finn/qnn-data/cpp/verilator_fifosim.cpp | 36 +++++++++++++++++---- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/src/finn/qnn-data/cpp/verilator_fifosim.cpp b/src/finn/qnn-data/cpp/verilator_fifosim.cpp index 565aab23e9..0993b1624f 100644 --- a/src/finn/qnn-data/cpp/verilator_fifosim.cpp +++ b/src/finn/qnn-data/cpp/verilator_fifosim.cpp @@ -26,12 +26,6 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* -verilator -Wno-fatal -Mdir . -y /data/finn/sickag/vivado_stitch_proj_k9817tc7 --CFLAGS "--std=c++11" -O3 --x-assign fast --x-initial fast --noassert --cc finn_design_wrapper.v --top-module finn_design_wrapper --exe sim-semseg.cpp --threads 4 - -make OPT_FAST="-O3 -march=native" -j4 -C $(pwd) -f Vfinn_design_wrapper.mk Vfinn_design_wrapper -*/ - #include #include #include @@ -58,6 +52,7 @@ return main_time; extern "C" { //Open an extern C closed below Vfinn_design_wrapper* construct() { Verilated::commandArgs(0, (const char**) nullptr); + //Verilated::traceEverOn(true); Vfinn_design_wrapper* top = new Vfinn_design_wrapper(); return top; } @@ -73,6 +68,27 @@ int destruct(Vfinn_design_wrapper* top) { } return 0; } + +VerilatedVcdC* tfp; +VerilatedVcdC* start_vcd_trace(Vfinn_design_wrapper* top, const char* filename) { + VerilatedVcdC* tfp = new VerilatedVcdC; + top->trace(tfp, 99); + tfp->open(filename); + return tfp; +} +int add_to_vcd_trace(VerilatedVcdC* tfp, int time) { + tfp->dump(time); + return 0; +} +int flush_vcd_trace(VerilatedVcdC* tfp) { + tfp->flush(); + return 0; +} +int stop_vcd_trace(VerilatedVcdC* tfp) { + tfp->close(); + return 0; +} + } // end of code taken from pyverilator_wrapper.cpp generated by PyVerilator @@ -80,10 +96,13 @@ int destruct(Vfinn_design_wrapper* top) { inline void toggle_clk() { eval(top); top->ap_clk = 1; + //add_to_vcd_trace(tfp, main_time); eval(top); top->ap_clk = 0; + //add_to_vcd_trace(tfp, main_time); } + void reset() { top->ap_rst_n = 0; for(unsigned i = 0; i < 10; i++) { @@ -94,7 +113,7 @@ void reset() { int main(int argc, char *argv[]) { top = construct(); - + //tfp = start_vcd_trace(top, "trace.vcd"); unsigned n_iters_per_input = @ITERS_PER_INPUT@; unsigned n_iters_per_output = @ITERS_PER_OUTPUT@; unsigned n_inputs = @N_INPUTS@; @@ -143,6 +162,9 @@ int main(int argc, char *argv[]) { exit_criterion = ((n_in_txns >= n_iters_per_input * n_inputs) && (n_out_txns >= n_iters_per_output * n_inputs)) || (iters > max_iters); } + //flush_vcd_trace(tfp); + //stop_vcd_trace(tfp); + cout << "Simulation finished" << endl; cout << "Number of inputs consumed " << n_in_txns << endl; cout << "Number of outputs produced " << n_out_txns << endl; From c8b5368d03fbd70b7383f54b47bd2d27bc0070e7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Sun, 13 Nov 2022 18:04:15 +0300 Subject: [PATCH 258/628] [FIFO] bugfixes+improvements in verilator fifosim template - move input token counting to after clock tick - introduce separate tracker for last seen output - introduce (disabled by default) vcd tracing capabilities --- src/finn/qnn-data/cpp/verilator_fifosim.cpp | 43 +++++++++++++-------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/src/finn/qnn-data/cpp/verilator_fifosim.cpp b/src/finn/qnn-data/cpp/verilator_fifosim.cpp index 0993b1624f..d0aca9efe7 100644 --- a/src/finn/qnn-data/cpp/verilator_fifosim.cpp +++ b/src/finn/qnn-data/cpp/verilator_fifosim.cpp @@ -34,6 +34,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "verilated_vcd_c.h" #include "Vfinn_design_wrapper.h" +#ifdef DEBUG +#define TRACE(x) x +#else +#define TRACE(x) ; +#endif + using namespace std; Vfinn_design_wrapper * top; @@ -52,7 +58,7 @@ return main_time; extern "C" { //Open an extern C closed below Vfinn_design_wrapper* construct() { Verilated::commandArgs(0, (const char**) nullptr); - //Verilated::traceEverOn(true); + TRACE(Verilated::traceEverOn(true)); Vfinn_design_wrapper* top = new Vfinn_design_wrapper(); return top; } @@ -69,6 +75,7 @@ int destruct(Vfinn_design_wrapper* top) { return 0; } +TRACE( VerilatedVcdC* tfp; VerilatedVcdC* start_vcd_trace(Vfinn_design_wrapper* top, const char* filename) { VerilatedVcdC* tfp = new VerilatedVcdC; @@ -88,6 +95,7 @@ int stop_vcd_trace(VerilatedVcdC* tfp) { tfp->close(); return 0; } +) } @@ -96,10 +104,10 @@ int stop_vcd_trace(VerilatedVcdC* tfp) { inline void toggle_clk() { eval(top); top->ap_clk = 1; - //add_to_vcd_trace(tfp, main_time); + TRACE(add_to_vcd_trace(tfp, main_time)); eval(top); top->ap_clk = 0; - //add_to_vcd_trace(tfp, main_time); + TRACE(add_to_vcd_trace(tfp, main_time)); } @@ -113,7 +121,7 @@ void reset() { int main(int argc, char *argv[]) { top = construct(); - //tfp = start_vcd_trace(top, "trace.vcd"); + TRACE(tfp = start_vcd_trace(top, "trace.vcd")); unsigned n_iters_per_input = @ITERS_PER_INPUT@; unsigned n_iters_per_output = @ITERS_PER_OUTPUT@; unsigned n_inputs = @N_INPUTS@; @@ -124,7 +132,7 @@ int main(int argc, char *argv[]) { top->m_axis_0_tready = 1; top->s_axis_0_tvalid = 1; - unsigned n_in_txns = 0, n_out_txns = 0, iters = 0; + unsigned n_in_txns = 0, n_out_txns = 0, iters = 0, last_output_at = 0; unsigned latency = 0; bool exit_criterion = false; @@ -132,11 +140,19 @@ int main(int argc, char *argv[]) { cout << "Simulation starting" << endl; cout << "Number of inputs to write " << n_iters_per_input * n_inputs << endl; cout << "Number of outputs to expect " << n_iters_per_output * n_inputs << endl; - cout << "Timeout clock cycles " << max_iters << endl; + cout << "No-output timeout clock cycles " << max_iters << endl; chrono::steady_clock::time_point begin = chrono::steady_clock::now(); while(!exit_criterion) { + toggle_clk(); + iters++; + if(iters % 1000 == 0) { + cout << "Elapsed iters " << iters << " inps " << n_in_txns << " outs " << n_out_txns << endl; + chrono::steady_clock::time_point end = chrono::steady_clock::now(); + cout << "Elapsed since last report = " << chrono::duration_cast(end - begin).count() << "[s]" << endl; + begin = end; + } if(top->s_axis_0_tready == 1 && top->s_axis_0_tvalid == 1) { n_in_txns++; if(n_in_txns == n_iters_per_input * n_inputs) { @@ -146,24 +162,17 @@ int main(int argc, char *argv[]) { } if(top->m_axis_0_tvalid == 1) { n_out_txns++; + last_output_at = iters; if(n_out_txns == n_iters_per_output) { latency = iters; } } - toggle_clk(); - iters++; - if(iters % 1000 == 0) { - cout << "Elapsed iters " << iters << " inps " << n_in_txns << " outs " << n_out_txns << endl; - chrono::steady_clock::time_point end = chrono::steady_clock::now(); - cout << "Elapsed since last report = " << chrono::duration_cast(end - begin).count() << "[s]" << endl; - begin = end; - } - exit_criterion = ((n_in_txns >= n_iters_per_input * n_inputs) && (n_out_txns >= n_iters_per_output * n_inputs)) || (iters > max_iters); + exit_criterion = ((n_in_txns >= n_iters_per_input * n_inputs) && (n_out_txns >= n_iters_per_output * n_inputs)) || ((iters-last_output_at) > max_iters); } - //flush_vcd_trace(tfp); - //stop_vcd_trace(tfp); + TRACE(flush_vcd_trace(tfp)); + TRACE(stop_vcd_trace(tfp)); cout << "Simulation finished" << endl; cout << "Number of inputs consumed " << n_in_txns << endl; From ea7518e2661d508a0a68d32fbf53a038cf671a43 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Wed, 16 Nov 2022 15:16:54 +0000 Subject: [PATCH 259/628] [fix-comments]: in/outFIFODepth renamed to in/outFIFODepths --- .../transformation/fpgadataflow/derive_characteristic.py | 4 ++-- src/finn/transformation/fpgadataflow/insert_fifo.py | 6 +++--- src/finn/transformation/fpgadataflow/set_fifo_depths.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 8226797210..f783f7ae71 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -127,7 +127,7 @@ def apply(self, model: ModelWrapper): class DeriveFIFOSizes(NodeLocalTransformation): """Prerequisite: DeriveCharacteristic already called on graph. For each node in the graph, use the accumulated I/O characteristic function - to perform FIFO sizing, setting the in/outFIFODepth attributes of HLSCustomOp + to perform FIFO sizing, setting the in/outFIFODepths attributes of HLSCustomOp nodes. * num_workers (int or None) number of parallel workers, see documentation in @@ -178,7 +178,7 @@ def applyNodeLocal(self, node): fifo_depth = int((prod_chrc_part - cons_chrc_part).max()) out_fifo_depths.append(fifo_depth) # set output FIFO depth for this (producing) node - # InsertFIFO looks at the max of (outFIFODepth, inFIFODepth) + # InsertFIFO looks at the max of (outFIFODepths, inFIFODepths) # for each tensor prod.set_nodeattr("outFIFODepths", out_fifo_depths) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 79bd717a5d..38c2d60c9a 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -67,7 +67,7 @@ class InsertFIFO(Transformation): between fpgadataflow nodes. Takes the setting for the depth from the surrounding nodes by extracting - node attribute 'outFIFODepth' of the previous and node attribute 'inFIFODepth' + node attribute 'outFIFODepths' of the previous and node attribute 'inFIFODepths' of the subsequent node. max() of these two values sets the FIFO depth. Constructor arguments: @@ -128,8 +128,8 @@ def apply(self, model): folded output shape of the second node. A streaming fifo can't be implemented in between these nodes.""" - # check if outFIFOdepth attribute of first node - # and inFIFOdepth attribute of consumer node is equal + # check if outFIFOdepths attribute of first node + # and inFIFOdepths attribute of consumer node is equal n0_depth = n0.get_nodeattr("outFIFODepths")[idx_out] n1_depth = n1.get_nodeattr("inFIFODepths")[idx_inp] diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index f715aaeffb..5b3ead6d67 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -201,7 +201,7 @@ class InsertAndSetFIFODepths(Transformation): Assumed input graph properties: - all nodes are fpgadataflow nodes - no FIFOs inserted, - - (inFIFODepth/outFIFODepth attrs will be ignored) + - (inFIFODepths/outFIFODepths attrs will be ignored) Output: - graph with appropriate-depth FIFOs inserted @@ -216,7 +216,7 @@ class InsertAndSetFIFODepths(Transformation): - run through rtlsim with stream of multiple random input images (to fill pipeline) - keep track of observed maximum occupancy for each FIFO during rtlsim - when sim finished, update each FIFO depth to maximum observed occupancy - and set inFIFODepth/outFIFODepth attrs to 0 on relevant nodes + and set inFIFODepths/outFIFODepths attrs to 0 on relevant nodes """ def __init__( @@ -365,7 +365,7 @@ def apply(self, model): fifos[node.name] = sim[maxcount_name] # Apply depths back into the model; - # also set in/outFIFODepth to zero for non-FIFO + # also set in/outFIFODepths to zero for non-FIFO # nodes, preventing further FIFO insertion for node in model.graph.node: # set FIFO depth, reset FIFO implementation, From abc4ccc10793655de53b77ea893982866078b946 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Wed, 16 Nov 2022 15:18:54 +0000 Subject: [PATCH 260/628] [notebooks]: ensure that the new (renamed) attribute is addressed correctly --- .../bnn-pynq/cnv_end2end_example.ipynb | 20 +++++++++---------- .../bnn-pynq/tfc_end2end_example.ipynb | 14 ++++++------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index a2747e3921..28155d6f3e 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -359,21 +359,21 @@ "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# each tuple is (PE, SIMD, in_fifo_depth) for a layer\n", "folding = [\n", - " (16, 3, 128),\n", - " (32, 32, 128),\n", - " (16, 32, 128),\n", - " (16, 32, 128),\n", - " (4, 32, 81),\n", - " (1, 32, 2),\n", - " (1, 4, 2),\n", - " (1, 8, 128),\n", - " (5, 1, 3),\n", + " (16, 3, [128]),\n", + " (32, 32, [128]),\n", + " (16, 32, [128]),\n", + " (16, 32, [128]),\n", + " (4, 32, [81]),\n", + " (1, 32, [2]),\n", + " (1, 4, [2]),\n", + " (1, 8, [128]),\n", + " (5, 1, [3]),\n", "]\n", "for fcl, (pe, simd, ififodepth) in zip(fc_layers, folding):\n", " fcl_inst = getCustomOp(fcl)\n", " fcl_inst.set_nodeattr(\"PE\", pe)\n", " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepth\", ififodepth)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififodepth)\n", "\n", "# use same SIMD values for the sliding window operators\n", "swg_layers = model.get_nodes_by_op_type(\"ConvolutionInputGenerator\")\n", diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index a6f05df309..c4fc92b97c 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -559,17 +559,17 @@ "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", "config = [\n", - " (16, 49, 16, 64, \"block\"),\n", - " (8, 8, 64, 64, \"auto\"),\n", - " (8, 8, 64, 64, \"auto\"),\n", - " (10, 8, 64, 10, \"distributed\"),\n", + " (16, 49, [16], [64], \"block\"),\n", + " (8, 8, [64], [64], \"auto\"),\n", + " (8, 8, [64], [64], \"auto\"),\n", + " (10, 8, [64], [10], \"distributed\"),\n", "]\n", "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", " fcl_inst = getCustomOp(fcl)\n", " fcl_inst.set_nodeattr(\"PE\", pe)\n", " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepth\", ififo)\n", - " fcl_inst.set_nodeattr(\"outFIFODepth\", ofifo)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", + " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)\n", " \n", "# set parallelism for input quantizer to be same as first layer's SIMD\n", @@ -590,7 +590,7 @@ "metadata": {}, "source": [ "Besides PE and SIMD three other node attributes are set. `ram_style` specifies how the weights are to be stored (BRAM, LUTRAM, and so on). It can be selected explicitly or with the option `auto` you can let Vivado decide.\n", - "`inFIFODepth` and `outFIFODepth` specifies the FIFO depths that is needed by the node from the surrounding FIFOs. These attributes are used in the transformation 'InsertFIFO' to insert the appropriate FIFOs between the nodes, which will be automatically called as part of the hardware build process.\n", + "`inFIFODepths` and `outFIFODepths` specifies the FIFO depths that is needed by the node from the surrounding FIFOs. These attributes are used in the transformation 'InsertFIFO' to insert the appropriate FIFOs between the nodes, which will be automatically called as part of the hardware build process.\n", "\n", "In previous versions of FINN we had to call transformations to insert data width converters, FIFOs and `TLastMarker` manually at this step. This is no longer needed, as all this is taken care of by the `ZynqBuild` or `VitisBuild` transformations." ] From c0cda8f78a2b1cc43e273d2b45e72148d6d073c6 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 25 Nov 2022 19:20:00 +0100 Subject: [PATCH 261/628] Make SIMD support independent from PE --- .../fpgadataflow/vectorvectoractivation.py | 16 ++++---- tests/fpgadataflow/test_fpgadataflow_vvau.py | 39 +++++++++++-------- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index da99da2e02..813b673b39 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -225,10 +225,10 @@ def get_output_datatype(self, ind=0): def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") - if simd > 1: - pe = self.get_nodeattr("Channels") - else: - pe = self.get_nodeattr("PE") + #if simd > 1: + #pe = self.get_nodeattr("Channels") + #else: + pe = self.get_nodeattr("PE") in_width = i_bits * simd * pe return in_width @@ -242,10 +242,10 @@ def get_folded_input_shape(self, ind=0): dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") simd = self.get_nodeattr("SIMD") - if simd > 1: - pe = self.get_nodeattr("Channels") - else: - pe = self.get_nodeattr("PE") + #if simd > 1: + #pe = self.get_nodeattr("Channels") + #else: + pe = self.get_nodeattr("PE") sf = k_h * k_w // simd nf = ch // pe diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index c54284dee9..ea4be47334 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -27,7 +27,6 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest - import numpy as np from onnx import TensorProto, helper from qonnx.core.datatype import DataType @@ -37,6 +36,8 @@ # from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.transformation.infer_datatypes import InferDataTypes +from qonnx.transformation.infer_shapes import InferShapes import finn.core.onnx_exec as oxe @@ -47,6 +48,9 @@ from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.fpgadataflow.minimize_accumulator_width import ( + MinimizeAccumulatorWidth, +) def _infer_sparse_weight_tensor(W_conv, k_h, k_w, channels): @@ -150,6 +154,11 @@ def _make_single_vvau_modelwrapper( model.set_tensor_datatype("thresh", tdt) model.set_initializer("thresh", T) + # Minimize accumulator width to obtain realistic HLS reports + model = model.transform(MinimizeAccumulatorWidth()) + model = model.transform(InferShapes()) + model = model.transform(InferDataTypes()) + return model @@ -158,27 +167,27 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType["UINT4"], DataType["UINT8"]]) +@pytest.mark.parametrize("idt", [DataType["UINT4"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType["INT4"]]) +@pytest.mark.parametrize("wdt", [DataType["UINT4"]]) # activation: None or DataType @pytest.mark.parametrize("act", [DataType["UINT4"], None]) # PE -@pytest.mark.parametrize("pe", [1, "channels"]) +@pytest.mark.parametrize("pe", [1,2,3,6]) # SIMD -@pytest.mark.parametrize("simd", [1]) +@pytest.mark.parametrize("simd", [1,9]) # Input image shape @pytest.mark.parametrize("dim_h", [10]) -@pytest.mark.parametrize("dim_w", [10, 1]) +@pytest.mark.parametrize("dim_w", [10]) # Kernel shape @pytest.mark.parametrize("k_h", [3]) -@pytest.mark.parametrize("k_w", [3, 1]) +@pytest.mark.parametrize("k_w", [3]) # Number of input and output channels -@pytest.mark.parametrize("channels", [3, 4]) +@pytest.mark.parametrize("channels", [6]) # memory mode -@pytest.mark.parametrize("mem_mode", ["const", "decoupled"]) +@pytest.mark.parametrize("mem_mode", ["const"]) # execution mode -@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) +@pytest.mark.parametrize("exec_mode", ["cppsim","rtlsim"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado @@ -194,6 +203,9 @@ def test_fpgadataflow_vvau( if channels % pe != 0: pytest.skip("Requirement Channels divisable by PE is violated.") + #if pe < channels and simd > 1: + # pytest.skip("Do not apply SIMD parallelism before max PE parallelism") + # Generate weights in expected shape for ONNX and HLS node W = gen_finn_dt_tensor(wdt, (channels, 1, k_h, k_w)) # shape: [channels, 1, k, k] W_onnx = _infer_sparse_weight_tensor( @@ -251,13 +263,6 @@ def test_fpgadataflow_vvau( "outp" ] - with open("vvau_test_expected.txt", "w") as f: - f.write("-------expected:\n") - f.write(str(y_expected)) - with open("vvau_test_produced.txt", "w") as f: - f.write("--------produced:\n") - f.write(str(y_produced)) - assert (y_produced == y_expected).all(), "incorrect result" # if exec_mode == "rtlsim": From 0fb57af5ca46657970309fa5a9599adee356d933 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 1 Dec 2022 09:11:35 +0100 Subject: [PATCH 262/628] [RTL SWG] Rework parallel-output implementation style --- finn-rtllib/swg/swg_template_parallel.sv | 406 ++++++++++++++++++ .../convolutioninputgenerator_rtl.py | 284 +++++++++++- 2 files changed, 685 insertions(+), 5 deletions(-) create mode 100644 finn-rtllib/swg/swg_template_parallel.sv diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv new file mode 100644 index 0000000000..432c374764 --- /dev/null +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -0,0 +1,406 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +module $TOP_MODULE_NAME$_controller #( + int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, + int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, + int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, + int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, + int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, + + int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, + + bit IS_DEPTHWISE = $IS_DEPTHWISE$ +)( + input logic clk, + input logic rst_n, + + input logic advance, + output logic [INCR_BITWIDTH-1:0] addr_incr, + output logic [INCR_BITWIDTH-1:0] tail_incr +); + + // state and counters + typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H + } state_e; + state_e State = $INNERMOST_STATE$; + state_e state_next; + + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; + + // combinational logic for addr_incr generation + always_comb begin : blkHead + unique case (State) + 0 : addr_incr = 0; + 1 : addr_incr = $HEAD_INCR_SIMD$; + 2 : addr_incr = $HEAD_INCR_KW$; + 3 : addr_incr = $HEAD_INCR_KH$; + 4 : addr_incr = $HEAD_INCR_W$; + 5 : addr_incr = $HEAD_INCR_H$; + endcase + end + + // combinational logic for tail_incr generation + uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; + assign tail_incr = + tail_incr_inner_condition? 1 : + Counter_loop_w >= 0? $TAIL_INCR_W$ : + Counter_loop_h >= 0? $TAIL_INCR_H$ : + /* else */ $TAIL_INCR_LAST$; + + // combinational next state logic + always_comb begin : blkState + state_next = State; + if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; + else begin + if(Counter_loop_simd < 0) begin + state_next = + (Counter_loop_kw >= 0)? STATE_LOOP_KW : + (Counter_loop_kh >= 0)? STATE_LOOP_KH : + (Counter_loop_w >= 0)? STATE_LOOP_W : + (Counter_loop_h >= 0)? STATE_LOOP_H : + /* else */ STATE_START; + end + end + end : blkState + + // sequential logic + always_ff @ (posedge clk) begin + if(!rst_n) begin + State <= $INNERMOST_STATE$; + Counter_loop_h <= LOOP_H_ITERATIONS; + Counter_loop_w <= LOOP_W_ITERATIONS; + Counter_loop_kh <= LOOP_KH_ITERATIONS; + Counter_loop_kw <= LOOP_KW_ITERATIONS; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; + end + else if(advance) begin + State <= state_next; + if (State == $INNERMOST_STATE$) begin + if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; + else begin + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; + if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; + else begin + Counter_loop_kw <= LOOP_KW_ITERATIONS; + if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; + else begin + Counter_loop_kh <= LOOP_KH_ITERATIONS; + if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; + else begin + Counter_loop_w <= LOOP_W_ITERATIONS; + if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; + else Counter_loop_h <= LOOP_H_ITERATIONS; + end + end + end + end + end + end + end + +endmodule : $TOP_MODULE_NAME$_controller + +module $TOP_MODULE_NAME$_reg_buffer +#( + parameter WIDTH = 1, + parameter DEPTH = 1 +) +( + CLK, + shift_enable, + shift_in, + shift_out, + data_out +); + +input CLK, shift_enable; +input [WIDTH-1:0] shift_in; +output [WIDTH-1:0] shift_out; +output [WIDTH*DEPTH-1:0] data_out; + +reg [WIDTH-1:0] data [DEPTH-1:0]; + +assign shift_out = data[DEPTH-1]; + +for (genvar e=0; e0; i=i-1) + data[i] <= data[i-1]; + data[0] <= shift_in; + end +end +endmodule : $TOP_MODULE_NAME$_reg_buffer + +module $TOP_MODULE_NAME$_ram_buffer +#( + parameter WIDTH = 1, + parameter DEPTH = 1 +) +( + CLK, + RST, + shift_enable, + shift_in, + shift_out +); + +input CLK, RST, shift_enable; +input [WIDTH-1:0] shift_in; +output [WIDTH-1:0] shift_out; + +reg [WIDTH-1:0] out_reg; +assign shift_out = out_reg; + +integer addr_w, addr_r; //TODO: minimize width + simplify + +$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; + +always @(posedge CLK) begin + if (RST == 1'b0) begin + addr_w <= 0; + addr_r <= 1; + end else begin + if (shift_enable) begin + ram[addr_w] <= shift_in; + out_reg <= ram[addr_r]; + + if (addr_w == DEPTH-1) + addr_w <= 0; + else + addr_w <= addr_w + 1; + + if (addr_r == DEPTH-1) + addr_r <= 0; + else + addr_r <= addr_r + 1; + end + end +end +endmodule : $TOP_MODULE_NAME$_ram_buffer + +module $TOP_MODULE_NAME$_wb +#( + parameter IN_WIDTH = 1, //bit-width*C*MMV_in + parameter OUT_ELEM_WIDTH = 1, //bit-width*C + parameter OUT_WIDTH = 1, //bit-width*C*MMV_out + parameter BUFFER_ELEM_TOTAL = 1 +) +( + CLK, + RST, + data_in, + shift_enable, + data_out +); + +input CLK, RST; +input [IN_WIDTH-1:0] data_in; +input shift_enable; +output [OUT_WIDTH-1:0] data_out; + +$GENERATE_REG_FIFOS$ + +$GENERATE_BRAM_FIFOS$ + +//Fixed interconnect between linear buffers +$GENERATE_BUFFER_CONNECTION$ + +//Fixed REG FIFO <-> output mapping +$GENERATE_OUTPUT_MAPPING$ + + +endmodule : $TOP_MODULE_NAME$_wb + +module $TOP_MODULE_NAME$_impl #( + int BIT_WIDTH, + int SIMD, + int MMV_IN, + int MMV_OUT, + int LAST_READ_ELEM = $LAST_READ_ELEM$, + int FIRST_WRITE_ELEM = $FIRST_WRITE_ELEM$, + int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, + int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, + int INCR_BITWIDTH = $INCR_BITWIDTH$ +)( + input logic ap_clk, + input logic ap_rst_n, + + input logic in0_V_V_TVALID, + output logic in0_V_V_TREADY, + input logic [BIT_WIDTH * SIMD * MMV_IN-1:0] in0_V_V_TDATA, + + output logic out_V_V_TVALID, + input logic out_V_V_TREADY, + output logic [BIT_WIDTH * SIMD * MMV_OUT-1:0] out_V_V_TDATA +); + // derived constants + localparam int unsigned BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; + localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; + localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; + + //main buffer instantiation + uwire [BUF_IN_WIDTH -1:0] window_buffer_in; + uwire [BUF_OUT_WIDTH-1:0] window_buffer_out; + uwire window_buffer_shift_enable; + $TOP_MODULE_NAME$_wb + #( + .IN_WIDTH(BUF_IN_WIDTH), + .OUT_ELEM_WIDTH(BUF_OUT_ELEM_WIDTH), + .OUT_WIDTH(BUF_OUT_WIDTH), + .BUFFER_ELEM_TOTAL(BUF_ELEM_TOTAL) + ) + window_buffer_inst + ( + .CLK(ap_clk), + .RST(ap_rst_n), + .data_in(window_buffer_in), + .shift_enable(window_buffer_shift_enable), + .data_out(window_buffer_out) + ); + + //controller instantiation + uwire advance_controller; + uwire signed [INCR_BITWIDTH-1:0] addr_incr; + uwire [INCR_BITWIDTH-1:0] tail_incr; + $TOP_MODULE_NAME$_controller controller_inst ( + .clk(ap_clk), + .rst_n(ap_rst_n), + .advance(advance_controller), + .addr_incr(addr_incr), + .tail_incr(tail_incr) + ); + + // Counters/address registers + // Add a sign bit even to (most) unsigned counters and Window_buffer_read_addr_reg, + // so we can use automatic sign extension and simplify calculations w/ signed increment. + // Alternatively, we could manually sign-extend and shave off a bit here or there. + logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] Newest_buffered_elem = -1; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] Current_elem = FIRST_WRITE_ELEM; + logic [$clog2(LAST_READ_ELEM+1)+1-1:0] First_elem_next_window = 0; + + // Control signals/registers + logic Writing_done = 0; + logic write_done = 0; + + uwire write_ok = write_cmd && (out_V_V_TREADY || write_done); + uwire write_blocked = write_cmd && !out_V_V_TREADY && !write_done; + + uwire write_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !Writing_done;; + + uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; + uwire read_cmd = + !reading_done && ( // if there is still an input element left to read + Writing_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(First_elem_next_window) && + $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(Current_elem) + ) // (over-)write to buffer if oldest buffered element will no longer be needed + ); + uwire read_ok = read_cmd && in0_V_V_TVALID && !write_blocked; + + // includes waiting on W if W-only cycle: wait only on W no R/W to wait for + uwire advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); + + //assign buffer control + assign window_buffer_shift_enable = advance; + assign advance_controller = write_ok; + + //assign I/O ports + assign window_buffer_in = in0_V_V_TDATA; + assign out_V_V_TDATA = window_buffer_out; + assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) + assign out_V_V_TVALID = ap_rst_n && write_cmd && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + + //write done logic + always_ff @(posedge ap_clk) begin + if (advance) begin + write_done <= 1'b0; //reset flag + end else if (write_ok) // successful W in this cycle, but R still outstanding + write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! + end + + //main process for advancing counters + always_ff @(posedge ap_clk) begin + if(!ap_rst_n) begin + Newest_buffered_elem <= -1; + Current_elem <= FIRST_WRITE_ELEM; + First_elem_next_window <= 0; + Writing_done <= 0; + end + else begin + if (read_ok) begin + Newest_buffered_elem <= Newest_buffered_elem+1; + + //check if this is the last read cycle (reading_done will be true afterwards) + if ((Newest_buffered_elem == LAST_READ_ELEM-1) && Writing_done) begin + //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) + //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) + Newest_buffered_elem <= -1; + Current_elem <= FIRST_WRITE_ELEM; + First_elem_next_window <= 0; + Writing_done <= 0; + end + end + + if (write_ok) begin + First_elem_next_window <= First_elem_next_window + tail_incr; + + //check if this is the last write cycle (Writing_done will be true afterwards) + if (Current_elem == LAST_WRITE_ELEM) begin + Writing_done <= 1; + + if (reading_done || (read_ok && (Newest_buffered_elem == LAST_READ_ELEM - 1))) begin + //start processing of next FM if reading is done already, or completes in the same cycle + Newest_buffered_elem <= -1; + Current_elem <= FIRST_WRITE_ELEM; + First_elem_next_window <= 0; + Writing_done <= 0; + end + end + else + Current_elem <= $signed(Current_elem) + addr_incr; + end + end + end + +endmodule : $TOP_MODULE_NAME$_impl diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 1afd23d3a1..1ae4022b79 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -72,8 +72,8 @@ def get_nodeattr_types(self): "SIMD": ("i", True, 0), # additional parallelization parameter - not yet implemented "M": ("i", False, 1), - # alternative implementation style - not yet implemented - "parallel_window": ("i", False, 0, {0}), + # Enable parallel window output (requires full SIMD unfolding) + "parallel_window": ("i", False, 0, {0, 1}), "Stride": ("ints", True, []), # [H, W] = [Y, X] "Dilation": ("ints", True, []), # [H, W] = [Y, X] # FINN DataTypes for inputs, weights, outputs @@ -639,6 +639,281 @@ def prepare_codegen_default(self): return template_path, code_gen_dict + def prepare_codegen_parallel(self): + # Parallel implementation style for MMV_out = K: + # mix of shift-registers (for parallel read) and line buffers (BRAM or LUTRAM) + # compute a static schedule by analyzing access pattern (from im2col function) + template_path = ( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" + ) + code_gen_dict = {} + + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + simd = self.get_nodeattr("SIMD") + M = self.get_nodeattr("M") + + k_h, k_w = k + h, w = ifm_dim + pad = [0, 0, 0, 0] # padding happens in separate padding node for now + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + pad_h = pad[0] + pad[2] + pad_w = pad[1] + pad[3] + out_dim_h = im2col.compute_conv_output_dim(h, k_h, stride_h, pad_h, dilation_h) + out_dim_w = im2col.compute_conv_output_dim(w, k_w, stride_w, pad_w, dilation_w) + mmv_in = M * 1 + mmv_out = M * k_h * k_w + channel_factor = int(ifm_ch / simd) + + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_min_size = ( + (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + + # buffer_actual_size = self.get_buffer_depth() # TODO: Move to this method + buffer_actual_size = buffer_min_size + 1 + code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] + + # compute some intermediate values, e.g., kernel "width" = k_w incl. dilation + # or cols/rows that are skipped due to imperfect stride<->dim combination + kernel_width = (k_w - 1) * dilation_w + 1 + kernel_height = (k_h - 1) * dilation_h + 1 + skip_columns = w % (kernel_width + (out_dim_w - 1) * stride_w) + skip_rows = h % (kernel_height + (out_dim_h - 1) * stride_h) + + # compute address increment values for 5-loop nest #TODO: simplify + addr_incr_end_simd = 1 + addr_incr_end_window_elem = (dilation_w - 1) * channel_factor + 1 + addr_incr_end_window_row = ( + ((w - kernel_width) * channel_factor) # remaining line + + ((dilation_h - 1) * w * channel_factor) # skip lines + + 1 # wrap-around of minimally sized buffer + ) + addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 + addr_incr_end_row = ( + -buffer_min_size + + ((skip_columns + kernel_width) * channel_factor) # remaining line + + ((stride_h - 1) * w * channel_factor) # skip lines + + 1 + ) + + # set certain threshold indices to detect when reading/writing finishes + code_gen_dict["$LAST_READ_ELEM$"] = [str(h * w * channel_factor - 1)] + code_gen_dict["$LAST_WRITE_ELEM$"] = [ + str(((h - skip_rows - 1) * w + (w - skip_columns)) * channel_factor - 1) + ] + + # default controller loop structure: # iterations (counters) map directly + loop_h_iterations = out_dim_h + loop_w_iterations = out_dim_w # -> innermost loop + loop_kh_iterations = 1 # k_h + loop_kw_iterations = 1 # k_w + loop_simd_iterations = 1 # channel_factor + + if loop_w_iterations == 1: + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_H"] + loop_h_iterations -= 1 # -1 because state is initial state + else: + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_W"] + loop_w_iterations -= 1 # -1 because state is initial state + + tail_incr_w = addr_incr_end_window + buffer_min_size - 1 + tail_incr_h = addr_incr_end_row + buffer_min_size - 1 + tail_incr_last_window = buffer_min_size - 1 + code_gen_dict["$IS_DEPTHWISE$"] = ["0"] + + # overwrite new loop bounds: + addr_incr_end_simd = 1 + addr_incr_end_window_elem = 1 + addr_incr_end_window_row = 1 + addr_incr_end_window = tail_incr_w + addr_incr_end_row = tail_incr_h + + # add init value for CURRENT_ELEM counter = last elem of first window + code_gen_dict["$FIRST_WRITE_ELEM$"] = [str(buffer_min_size - 1)] + + cntr_bitwidth = math.ceil( + math.log2( + max( + loop_h_iterations - 2 + 1, + loop_w_iterations - 2 + 1, + loop_kh_iterations - 2 + 1, + loop_kw_iterations - 2 + 1, + loop_simd_iterations - 2 + 1, + ) + ) + ) + code_gen_dict["$CNTR_BITWIDTH$"] = [str(cntr_bitwidth)] + code_gen_dict["$LOOP_H_ITERATIONS$"] = [str(loop_h_iterations - 2)] + code_gen_dict["$LOOP_W_ITERATIONS$"] = [str(loop_w_iterations - 2)] + code_gen_dict["$LOOP_KH_ITERATIONS$"] = [str(loop_kh_iterations - 2)] + code_gen_dict["$LOOP_KW_ITERATIONS$"] = [str(loop_kw_iterations - 2)] + code_gen_dict["$LOOP_SIMD_ITERATIONS$"] = [str(loop_simd_iterations - 2)] + + incr_bitwidth = 1 + math.ceil( + math.log2( + max( + abs(addr_incr_end_simd) + 1, + abs(addr_incr_end_window_elem) + 1, + abs(addr_incr_end_window_row) + 1, + abs(addr_incr_end_window) + 1, + abs(addr_incr_end_row) + 1, + abs(tail_incr_w) + 1, + abs(tail_incr_h) + 1, + abs(tail_incr_last_window) + 1, + ) + ) + ) + code_gen_dict["$INCR_BITWIDTH$"] = [str(incr_bitwidth)] + code_gen_dict["$HEAD_INCR_SIMD$"] = [str(addr_incr_end_simd)] + code_gen_dict["$HEAD_INCR_KW$"] = [str(addr_incr_end_window_elem)] + code_gen_dict["$HEAD_INCR_KH$"] = [str(addr_incr_end_window_row)] + code_gen_dict["$HEAD_INCR_W$"] = [str(addr_incr_end_window)] + code_gen_dict["$HEAD_INCR_H$"] = [str(addr_incr_end_row)] + code_gen_dict["$TAIL_INCR_W$"] = [str(tail_incr_w)] + code_gen_dict["$TAIL_INCR_H$"] = [str(tail_incr_h)] + code_gen_dict["$TAIL_INCR_LAST$"] = [str(tail_incr_last_window)] + + code_gen_dict["$SIMD$"] = [str(simd)] + code_gen_dict["$MMV_IN$"] = [str(mmv_in)] + code_gen_dict["$MMV_OUT$"] = [str(mmv_out)] + + # prepare buffer partitioning into "reg_fifos" and "bram_fifos" + # use normalized ([H,W]=[1,W]) dimensions for 1D case + ( + ifm_ch, + [ifm_dim_h, ifm_dim_w], + [ofm_dim_h, ofm_dim_w], + [k_h, k_w], + [stride_h, stride_w], + [dilation_h, dilation_w], + ) = self.get_1d_conv_attrs_normalized() + + reg_fifos = [] + bram_fifos_depth = [] + + px_idx = 0 + for ky in range(k_h): + reg_fifo = [] + for kx in range(k_w): + reg_fifo.append(px_idx) + px_idx += 1 + if kx < (k_w - 1): + reg_fifo.extend([-1] * (dilation_w - 1)) + px_idx += dilation_w - 1 + reg_fifos.append(reg_fifo) + + if ky < (k_h - 1): + line_buffer_len = (w - kernel_width) + w * (dilation_h - 1) + bram_fifos_depth.append(line_buffer_len) + px_idx += line_buffer_len + + code_gen_dict["$GENERATE_REG_FIFOS$"] = [] + for i, reg_fifo in enumerate(reg_fifos): + code_gen_dict["$GENERATE_REG_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] reg_fifo_{id}_in; + wire [IN_WIDTH-1:0] reg_fifo_{id}_out; + wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; + {name}_reg_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + reg_buffer_inst_{id} + ( + .CLK(CLK), + .shift_enable(shift_enable), + .shift_in(reg_fifo_{id}_in), + .shift_out(reg_fifo_{id}_out), + .data_out(reg_fifo_{id}) + );""".format( + name=self.get_verilog_top_module_name(), + id=i, + len=len(reg_fifo), + ) + ) + + code_gen_dict["$GENERATE_BRAM_FIFOS$"] = [] + for i, bram_fifo_depth in enumerate(bram_fifos_depth): + code_gen_dict["$GENERATE_BRAM_FIFOS$"].append( + """ + wire [IN_WIDTH-1:0] bram_fifo_{id}_in; + wire [IN_WIDTH-1:0] bram_fifo_{id}_out; + {name}_ram_buffer + #( + .WIDTH(IN_WIDTH), + .DEPTH({len}) + ) + ram_buffer_inst_{id} + ( + .CLK(CLK), + .RST(RST), + .shift_enable(shift_enable), + .shift_in(bram_fifo_{id}_in), + .shift_out(bram_fifo_{id}_out) + );""".format( + name=self.get_verilog_top_module_name(), + id=i, + len=bram_fifo_depth, + ) + ) + + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"] = [] + out_idx = mmv_out - 1 + for fifo_id, reg_fifo in enumerate(reg_fifos): + for fifo_idx, access_idx in enumerate(reg_fifo): + if access_idx != -1: + code_gen_dict["$GENERATE_OUTPUT_MAPPING$"].append( + """assign data_out[OUT_ELEM_WIDTH*{out_idx}+:OUT_ELEM_WIDTH] + = reg_fifo_{fifo_id}[{access_idx}*{mmv}*OUT_ELEM_WIDTH+ + OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];""".format( + out_idx=out_idx, + fifo_id=fifo_id, + access_idx=len(reg_fifo) + - 1 + - int((max(reg_fifo) - access_idx) / M), + mmv_idx=(max(reg_fifo) - access_idx) % M, + mmv=M, + ) + ) + # reversal: out_idx=0 -> oldest buffer element -> highest access_idx + out_idx = out_idx - 1 + assert out_idx == -1, "ERROR: Not all output vector elements connected" + + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"] = [] + for i in range(len(reg_fifos)): + if i == 0: + # first FIFO containing newest elements -> input comes from input reg + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = data_in;""".format( + fifo_id=i, + ) + ) + else: + # other REG FIFOs -> input comes from connected BRAM FIFO (line buffer) + input_fifo_id = i - 1 + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign reg_fifo_{fifo_id}_in = bram_fifo_{input_fifo_id}_out; + """.format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) + for i in range(len(bram_fifos_depth)): + input_fifo_id = i + code_gen_dict["$GENERATE_BUFFER_CONNECTION$"].append( + """assign bram_fifo_{fifo_id}_in = reg_fifo_{input_fifo_id}_out; + """.format( + fifo_id=i, input_fifo_id=input_fifo_id + ) + ) + + return template_path, code_gen_dict + def select_impl_style(self): simd = self.get_nodeattr("SIMD") M = self.get_nodeattr("M") @@ -685,9 +960,6 @@ def select_impl_style(self): else: impl_style = "default" - assert ( - impl_style == "default" - ), "ERROR: Parallel window mode not yet implemented" return impl_style def generate_hdl(self): @@ -696,6 +968,8 @@ def generate_hdl(self): # prepare code generation by filling out dictionaries if impl_style == "default": template_path, code_gen_dict = self.prepare_codegen_default() + elif impl_style == "parallel": + template_path, code_gen_dict = self.prepare_codegen_parallel() else: raise Exception("Requested impl. style not implemented") From 462b747edafb62d0f02f8f3e857e6324d9de14fc Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 1 Dec 2022 13:41:49 +0000 Subject: [PATCH 263/628] [pre-commit] Update flake8 link --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dfc83ba618..5a7f70f8f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,7 +61,7 @@ repos: - id: black language_version: python3 -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: - id: flake8 From 02b9f6def0bc9d18c6391e01ec3d768f1dc9f957 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 1 Dec 2022 14:01:56 +0000 Subject: [PATCH 264/628] [pre-commit] Specify python for flake8 --- .pre-commit-config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5a7f70f8f6..e90e10d26f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -68,3 +68,5 @@ repos: # black-compatible flake-8 config args: ['--max-line-length=88', # black default '--extend-ignore=E203'] # E203 is not PEP8 compliant + language_version: python3 + From ad6ae42a698b90e1d93bb5e3a4f53619097c8a3b Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 1 Dec 2022 14:13:19 +0000 Subject: [PATCH 265/628] [pre-commit] Update rev version for flake8 --- .pre-commit-config.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e90e10d26f..6113923329 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -62,11 +62,9 @@ repos: language_version: python3 - repo: https://github.com/PyCQA/flake8 - rev: 3.9.2 + rev: 6.0.0 hooks: - id: flake8 # black-compatible flake-8 config args: ['--max-line-length=88', # black default '--extend-ignore=E203'] # E203 is not PEP8 compliant - language_version: python3 - From ebf54c4889fecdd98d907a91b0796515f842367f Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 1 Dec 2022 14:29:14 +0000 Subject: [PATCH 266/628] [GA] Fix python version in lintin GA --- .github/workflows/pre-commit.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 20f5b48f7a..f61af878ff 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -17,6 +17,8 @@ jobs: - name: Setup Python uses: actions/setup-python@v3 + with: + python-version: '3.8' - name: Run Lint uses: pre-commit/action@v3.0.0 From 087f6b34cc737ce1db28bcab643696df6617b99f Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 1 Dec 2022 14:34:06 +0000 Subject: [PATCH 267/628] [pre-commit] Reverse update of flake8 rev --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6113923329..5a7f70f8f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -62,7 +62,7 @@ repos: language_version: python3 - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 3.9.2 hooks: - id: flake8 # black-compatible flake-8 config From d222db36ee8e740931def8302a7b8e099fe18fbf Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 1 Dec 2022 08:01:06 -0800 Subject: [PATCH 268/628] Updating MVAU LUT estimation - Using accDataType rather than an estimate - Updated the estimate equation for case when accDataType is not specified - Adding logic check that thresholds are also using LUTRAM rather than BRAM --- .../fpgadataflow/matrixvectoractivation.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index df9d1f1e70..ed19b93bb2 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -350,13 +350,23 @@ def lut_estimation(self): # adder tree addertree_luts = (W + A) * (2 * Q - 1) # accumulator - acc_bits = W + A + np.ceil(math.log(MW, 2)) + acc_datatype = self.get_accumulator_datatype() + # if accDataType is not set, then it will default to INT32, which would + # be a large overestimate in most (if not all) cases. In this scenario, + # we would use the minimum accumulator as determined by the data types. + alpha = math.log(MW, 2) + W + A - 1 - int(idt.signed()) + phi = lambda x_: math.log(1 + pow(2, -x_), 2) + acc_bits = min( + acc_datatype.bitwidth(), + np.ceil(alpha + phi(alpha) + 1) + ) acc_luts = acc_bits # thresholds and threshold comparators thr_luts = 0 comp_luts = 0 noact = self.get_nodeattr("noActivation") - if noact == 0: + tmem_style = self.get_nodeattr("ram_style_thresholds") + if (noact == 0) and (tmem_style == "distributed"): odt = self.get_output_datatype() B = odt.bitwidth() thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64) @@ -405,6 +415,10 @@ def get_input_datatype(self, ind=0): else: raise Exception("Undefined input ind for this layer type") + def get_accumulator_datatype(self): + """Returns FINN DataType of accumulator""" + return DataType[self.get_nodeattr("accDataType")] + def get_weight_datatype(self): """Returns FINN DataType of weights.""" return DataType[self.get_nodeattr("weightDataType")] From aaf03f5738d2daada44851f39e2442db1d44f9a2 Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 1 Dec 2022 08:01:25 -0800 Subject: [PATCH 269/628] Updating VVAU LUT estimation - Using accDataType rather than an estimate - Updated the estimate equation for case when accDataType is not specified - Adding logic check that thresholds are also using LUTRAM rather than BRAM --- .../fpgadataflow/vectorvectoractivation.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index a411d245a9..a0b9268957 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -216,6 +216,10 @@ def get_weight_datatype(self): """Returns FINN DataType of weights.""" return DataType[self.get_nodeattr("weightDataType")] + def get_accumulator_datatype(self): + """Returns FINN DataType of accumulator""" + return DataType[self.get_nodeattr("accDataType")] + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] @@ -1172,14 +1176,25 @@ def lut_estimation(self): else: mult_luts = (2 * math.ceil((W + A) / 6) - 1) * (W + A) # accumulator + acc_datatype = self.get_accumulator_datatype() + acc_bits = acc_datatype.bitwidth() k_h, k_w = self.get_nodeattr("Kernel") - acc_bits = W + A + math.ceil(math.log(k_h * k_w, 2)) + # if accDataType is not set, then it will default to INT32, which would + # be a large overestimate in most (if not all) cases. In this scenario, + # we would use the minimum accumulator as determined by the data types. + alpha = math.log(k_h * k_w, 2) + W + A - 1 - int(idt.signed()) + phi = lambda x_: math.log(1 + pow(2, -x_), 2) + acc_bits = min( + acc_datatype.bitwidth(), + np.ceil(alpha + phi(alpha) + 1) + ) acc_luts = acc_bits # thresholds and threshold comparators thr_luts = 0 comp_luts = 0 noact = self.get_nodeattr("noActivation") - if noact == 0: + tmem_style = self.get_nodeattr("ram_style_thresholds") + if (noact == 0) and (tmem_style == "distributed"): odt = self.get_output_datatype() B = odt.bitwidth() thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64) From 48095e47dd0b6f471a02f52223f8e244a9d763ee Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 1 Dec 2022 08:32:40 -0800 Subject: [PATCH 270/628] Updating qonnx URL and commit --- fetch-repos.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index b0f6400ed1..b7b616e166 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" +QONNX_COMMIT="13d777a2aa0dc449dc3de7aa369c1e155d6ce2c2 " FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" @@ -38,7 +38,7 @@ AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" -QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" +QONNX_URL="https://github.com/i-colbert/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" BREVITAS_URL="https://github.com/Xilinx/brevitas.git" PYVERILATOR_URL="https://github.com/maltanar/pyverilator.git" From a5ffaca3e3208a580153bac121443bd33ee95f85 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 2 Nov 2022 16:04:00 +0100 Subject: [PATCH 271/628] [FIFO] support FIFO splitting for pow2 size optimizations --- .../fpgadataflow/set_fifo_depths.py | 99 +++++++++++++------ 1 file changed, 69 insertions(+), 30 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index dccd97020b..9b882eeac0 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -39,7 +39,6 @@ GiveUniqueNodeNames, SortGraph, ) -from qonnx.util.basic import get_by_name from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance from finn.transformation.fpgadataflow.annotate_cycles import AnnotateCycles @@ -427,32 +426,80 @@ class SplitLargeFifos(Transformation): """Split FIFOs with a depth larger than 32768 into smaller ones to ensure that they can be correctly generated.""" + def __init__( + self, + max_qsrl_depth=256, + ): + super().__init__() + self.max_qsrl_depth = max_qsrl_depth + + def get_split_configs(self, depth): + max_size = 32768 + + def floor_pow2(x): + if (x & (x - 1) == 0) and x != 0: + return x + else: + return 1 << ((x - 1).bit_length() - 1) + + ret = [] + # trivial case: for small FIFOs, return as-is with rtl style + if depth <= self.max_qsrl_depth: + return [(depth, "rtl")] + # first pass: ensure max depth of 32k is respected + # (restricted by Vivado AXIS infra IP) + + remainder = depth + while remainder != 0: + if remainder > max_size: + ret.append(max_size) + remainder -= max_size + else: + ret.append(remainder) + remainder = 0 + # second pass: break non-power-of-2 sized FIFOs + # into several ones + ret_pass2 = [] + + for cand_depth in ret: + cand_floor_pow2 = floor_pow2(cand_depth) + ret_pass2.append(cand_floor_pow2) + if cand_floor_pow2 < cand_depth: + ret_pass2.append(cand_depth - cand_floor_pow2) + # finally, add impl_style to each split FIFO + ret_final = [] + for cand_depth in ret_pass2: + if cand_depth <= self.max_qsrl_depth: + ret_final.append((cand_depth, "rtl")) + else: + ret_final.append((cand_depth, "vivado")) + + return ret_final + def apply(self, model): graph = model.graph node_ind = 0 graph_modified = False - for n in graph.node: + for node in graph.node: node_ind += 1 - if n.op_type == "StreamingFIFO": - depth = get_by_name(n.attribute, "depth") - if depth.i > 32768: - n0 = getCustomOp(n) - fld_shape = n0.get_folded_output_shape() - dtype = n0.get_nodeattr("dataType") - impl_style = n0.get_nodeattr("impl_style") - ram_style = n0.get_nodeattr("ram_style") - shape = model.get_tensor_shape(n.input[0]) - split_n = math.ceil(depth.i / 32768) - fifo_depth = math.ceil(depth.i / split_n) - for i in range(split_n): + if node.op_type == "StreamingFIFO": + n_inst = getCustomOp(node) + depth = n_inst.get_nodeattr("depth") + cfgs = self.get_split_configs(depth) + if len(cfgs) > 1: + fld_shape = n_inst.get_folded_output_shape() + dtype = n_inst.get_nodeattr("dataType") + ram_style = n_inst.get_nodeattr("ram_style") + shape = model.get_tensor_shape(node.input[0]) + for i, (fifo_depth, impl_style) in enumerate(cfgs): if i == 0: - inp = n.input[0] + inp = node.input[0] else: - inp = n.name + "_" + str(i - 1) + "_out" - if i == split_n - 1: - outp = n.output[0] + inp = node.name + "_" + str(i - 1) + "_out" + if i == len(cfgs) - 1: + outp = node.output[0] else: - outp = n.name + "_" + str(i) + "_out" + outp = node.name + "_" + str(i) + "_out" out_tensor = helper.make_tensor_value_info( outp, TensorProto.FLOAT, shape ) @@ -469,21 +516,13 @@ def apply(self, model): dataType=dtype, impl_style=impl_style, ram_style=ram_style, + name=node.name + "_" + str(i), ) graph.node.insert(node_ind + i, fifo_node) - graph.node.remove(n) - if n.output[0] != "global_out": - consumer = model.find_consumer(n.output[0]) - n1 = getCustomOp(consumer) - n1.set_nodeattr("outFIFODepth", fifo_depth) - if n.input[0] != "global_in": - producer = model.find_producer(n.input[0]) - n2 = getCustomOp(producer) - n2.set_nodeattr("inFIFODepth", fifo_depth) + graph.node.remove(node) graph_modified = True if graph_modified: model = model.transform(SortGraph()) - model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) - return (model, graph_modified) + return (model, False) From bce67e959b3ace89840ff7b722902a042d98fcd0 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 2 Dec 2022 12:40:52 +0100 Subject: [PATCH 272/628] [Build] move FIFO splitting past final json folding config file gen --- src/finn/builder/build_dataflow_steps.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 72f3fb2255..359a137df1 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -552,10 +552,6 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform(GiveReadableTensorNames()) if cfg.folding_config_file is not None: model = model.transform(ApplyConfig(cfg.folding_config_file)) - if cfg.split_large_fifos: - model = model.transform(SplitLargeFifos()) - # remove any shallow FIFOs - model = model.transform(RemoveShallowFIFOs()) # extract the final configuration and save it as json hw_attrs = [ @@ -572,6 +568,13 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model, cfg.output_dir + "/final_hw_config.json", hw_attrs ) + # perform FIFO splitting and shallow FIFO removal only after the final config + # json file has been written. otherwise, since these transforms may add/remove + # FIFOs, we get name mismatch problems when trying to reuse the final config. + if cfg.split_large_fifos: + model = model.transform(SplitLargeFifos()) + model = model.transform(RemoveShallowFIFOs()) + # after FIFOs are ready to go, call PrepareIP and HLSSynthIP again # this will only run for the new nodes (e.g. FIFOs and DWCs) model = model.transform( From 5d95403745e3c22f1dbf97bd2e0e520afcffd0e5 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 2 Dec 2022 12:49:19 +0100 Subject: [PATCH 273/628] [FIFO] better documentation for SplitLargeFIFOs, FIFO in caps --- src/finn/builder/build_dataflow_steps.py | 4 +-- .../fpgadataflow/set_fifo_depths.py | 30 ++++++++++--------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 359a137df1..e9d2859756 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -98,7 +98,7 @@ from finn.transformation.fpgadataflow.set_fifo_depths import ( InsertAndSetFIFODepths, RemoveShallowFIFOs, - SplitLargeFifos, + SplitLargeFIFOs, ) from finn.transformation.fpgadataflow.set_folding import SetFolding from finn.transformation.fpgadataflow.synth_ooc import SynthOutOfContext @@ -572,7 +572,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): # json file has been written. otherwise, since these transforms may add/remove # FIFOs, we get name mismatch problems when trying to reuse the final config. if cfg.split_large_fifos: - model = model.transform(SplitLargeFifos()) + model = model.transform(SplitLargeFIFOs()) model = model.transform(RemoveShallowFIFOs()) # after FIFOs are ready to go, call PrepareIP and HLSSynthIP again diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 9b882eeac0..6d1202d861 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -422,20 +422,23 @@ def apply(self, model): return (model, False) -class SplitLargeFifos(Transformation): - """Split FIFOs with a depth larger than 32768 into smaller ones - to ensure that they can be correctly generated.""" +class SplitLargeFIFOs(Transformation): + """Split large FIFOs before implementation, for two reasons: + - impl_style="vivado" supports a max depth of 32k. Any larger + FIFOs must be implemented as a sequence of smaller FIFOs. + - impl_style="vivado" requires power-of-two depths, which is + normally handled by rounding up to the nearest power-of-two. + So a FIFO of size 8196 normally gets rounded-up to a depth of + 16384 and wastes a lot of resources. Here, instead, we split + this up into two FIFOs of depth 8192 + 4. + """ - def __init__( - self, - max_qsrl_depth=256, - ): + def __init__(self, max_qsrl_depth=256, max_vivado_depth=32768): super().__init__() self.max_qsrl_depth = max_qsrl_depth + self.max_vivado_depth = max_vivado_depth def get_split_configs(self, depth): - max_size = 32768 - def floor_pow2(x): if (x & (x - 1) == 0) and x != 0: return x @@ -446,14 +449,13 @@ def floor_pow2(x): # trivial case: for small FIFOs, return as-is with rtl style if depth <= self.max_qsrl_depth: return [(depth, "rtl")] - # first pass: ensure max depth of 32k is respected + # first pass: ensure max depth is respected # (restricted by Vivado AXIS infra IP) - remainder = depth while remainder != 0: - if remainder > max_size: - ret.append(max_size) - remainder -= max_size + if remainder > self.max_vivado_depth: + ret.append(self.max_vivado_depth) + remainder -= self.max_vivado_depth else: ret.append(remainder) remainder = 0 From d6fe28ff031f6d8bdde72602fa184f2cbed09e51 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 2 Dec 2022 13:04:05 +0000 Subject: [PATCH 274/628] [tests] Mark ext_weights board tests as xfail --- tests/end2end/test_ext_weights.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index 9483ccf0b2..e230f735d7 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -113,6 +113,7 @@ def test_end2end_ext_weights_build(): @pytest.mark.board @pytest.mark.end2end +@pytest.mark.fail def test_end2end_ext_weights_dataset(): # make sure we have local copies of mnist dataset files subprocess.check_output(["mkdir", "-p", mnist_local]) @@ -129,6 +130,7 @@ def test_end2end_ext_weights_dataset(): @pytest.mark.end2end +@pytest.mark.xfail def test_end2end_ext_weights_run_on_hw(): build_env = get_build_env(build_kind, target_clk_ns) deploy_dir = get_checkpoint_name("build") From 82076f1014a47af4b6b65592108e98ff7b5ca2c1 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 2 Dec 2022 13:05:12 +0000 Subject: [PATCH 275/628] [tests] Mark cybsec board tests as xfail --- tests/end2end/test_end2end_cybsec_mlp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index b6482dc96c..290afc3084 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -229,6 +229,7 @@ def test_end2end_cybsec_mlp_build(QONNX_export): @pytest.mark.end2end +@pytest.mark.xfail @pytest.mark.parametrize("QONNX_export", [False, True]) def test_end2end_cybsec_mlp_run_on_hw(QONNX_export): build_env = get_build_env(build_kind, target_clk_ns) From f5b8a2e883dd57c67e5e8232ec9e6ebad234cbcc Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 2 Dec 2022 14:24:32 +0100 Subject: [PATCH 276/628] [FIFO] cleaner impl of get_fifo_split_configs --- .../fpgadataflow/set_fifo_depths.py | 92 ++++++++++--------- 1 file changed, 51 insertions(+), 41 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 6d1202d861..f3e6a27736 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -422,6 +422,54 @@ def apply(self, model): return (model, False) +def get_fifo_split_configs(depth, max_qsrl_depth, max_vivado_depth): + def floor_pow2(x): + if (x & (x - 1) == 0) and x != 0: + return x + else: + return 1 << ((x - 1).bit_length() - 1) + + def decompose_pow2(x): + if x <= max_qsrl_depth: + return [x] + else: + r = floor_pow2(x) + if x == r: + return [x] + else: + return [r, *decompose_pow2(x - r)] + + ret = [] + # trivial case: for small FIFOs, return as-is with rtl style + if depth <= max_qsrl_depth: + return [(depth, "rtl")] + # first pass: ensure max depth is respected + # (restricted by Vivado AXIS infra IP) + remainder = depth + while remainder != 0: + if remainder > max_vivado_depth: + ret.append(max_vivado_depth) + remainder -= max_vivado_depth + else: + ret.append(remainder) + remainder = 0 + # second pass: break non-power-of-2 sized FIFOs + # into several ones + + ret_pass2 = list(map(decompose_pow2, ret)) + ret_pass2 = [x for dec_list in ret_pass2 for x in dec_list] + + # finally, add impl_style to each split FIFO + ret_final = [] + for cand_depth in ret_pass2: + if cand_depth <= max_qsrl_depth: + ret_final.append((cand_depth, "rtl")) + else: + ret_final.append((cand_depth, "vivado")) + + return ret_final + + class SplitLargeFIFOs(Transformation): """Split large FIFOs before implementation, for two reasons: - impl_style="vivado" supports a max depth of 32k. Any larger @@ -438,46 +486,6 @@ def __init__(self, max_qsrl_depth=256, max_vivado_depth=32768): self.max_qsrl_depth = max_qsrl_depth self.max_vivado_depth = max_vivado_depth - def get_split_configs(self, depth): - def floor_pow2(x): - if (x & (x - 1) == 0) and x != 0: - return x - else: - return 1 << ((x - 1).bit_length() - 1) - - ret = [] - # trivial case: for small FIFOs, return as-is with rtl style - if depth <= self.max_qsrl_depth: - return [(depth, "rtl")] - # first pass: ensure max depth is respected - # (restricted by Vivado AXIS infra IP) - remainder = depth - while remainder != 0: - if remainder > self.max_vivado_depth: - ret.append(self.max_vivado_depth) - remainder -= self.max_vivado_depth - else: - ret.append(remainder) - remainder = 0 - # second pass: break non-power-of-2 sized FIFOs - # into several ones - ret_pass2 = [] - - for cand_depth in ret: - cand_floor_pow2 = floor_pow2(cand_depth) - ret_pass2.append(cand_floor_pow2) - if cand_floor_pow2 < cand_depth: - ret_pass2.append(cand_depth - cand_floor_pow2) - # finally, add impl_style to each split FIFO - ret_final = [] - for cand_depth in ret_pass2: - if cand_depth <= self.max_qsrl_depth: - ret_final.append((cand_depth, "rtl")) - else: - ret_final.append((cand_depth, "vivado")) - - return ret_final - def apply(self, model): graph = model.graph node_ind = 0 @@ -487,7 +495,9 @@ def apply(self, model): if node.op_type == "StreamingFIFO": n_inst = getCustomOp(node) depth = n_inst.get_nodeattr("depth") - cfgs = self.get_split_configs(depth) + cfgs = get_fifo_split_configs( + depth, self.max_qsrl_depth, self.max_vivado_depth + ) if len(cfgs) > 1: fld_shape = n_inst.get_folded_output_shape() dtype = n_inst.get_nodeattr("dataType") From c04baacec361cd07bc72d2eaf362b2a4dc8aa512 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 2 Dec 2022 14:25:11 +0100 Subject: [PATCH 277/628] [Test] fix test_split_large_fifos, include pow2 behavior --- tests/fpgadataflow/test_split_large_fifos.py | 40 +++++++++++++++----- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py index ab9230ad39..eab8072fc8 100644 --- a/tests/fpgadataflow/test_split_large_fifos.py +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -32,10 +32,12 @@ import json import shutil from brevitas.export.onnx.generic.manager import BrevitasONNXManager -from math import ceil +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.registry import getCustomOp import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg +from finn.transformation.fpgadataflow.set_fifo_depths import get_fifo_split_configs from finn.util.basic import make_build_dir from finn.util.test import get_trained_network_and_ishape @@ -53,7 +55,7 @@ def get_folding_cfg(depth=65536): cfg["Defaults"] = dict() for i in range(3): key = "StreamingFIFO_" + str(i) - cfg[key] = {"depth": depth, "ram_style": "auto", "impl_style": "rtl"} + cfg[key] = {"depth": depth, "ram_style": "auto", "impl_style": "vivado"} return cfg @@ -93,12 +95,32 @@ def test_split_large_fifos(depth): / float(est_data["estimated_throughput_fps"]) > 0.9 ) - with open(tmp_output_dir + "/final_hw_config.json") as f: - hw_config = json.load(f) - n = 0 - for key in hw_config: - if "StreamingFIFO" in key: - n += 1 - assert n == 3 * ceil(depth / 32768) + 1 + model = ModelWrapper( + tmp_output_dir + "/intermediate_models/step_set_fifo_depths.onnx" + ) + # exclude final FIFO node (output FIFO, not part of test) + fifo_nodes = model.get_nodes_by_op_type("StreamingFIFO")[:-1] + golden_cfg = get_fifo_split_configs(depth, 256, 32768) + for i, fifo_node in enumerate(fifo_nodes): + inst = getCustomOp(fifo_node) + fifo_depth = inst.get_nodeattr("depth") + assert fifo_depth == golden_cfg[i % len(golden_cfg)][0] shutil.rmtree(tmp_output_dir) + + +def test_split_large_fifo_configs(): + ret0 = get_fifo_split_configs(513, 256, 32768) + assert ret0 == [(512, "vivado"), (1, "rtl")] + ret1 = get_fifo_split_configs(1200, 256, 32768) + assert ret1 == [(1024, "vivado"), (176, "rtl")] + ret2 = get_fifo_split_configs(45000, 256, 32768) + assert ret2 == [ + (32768, "vivado"), + (8192, "vivado"), + (2048, "vivado"), + (1024, "vivado"), + (512, "vivado"), + (256, "rtl"), + (200, "rtl"), + ] From df8a70fda5cc7f1d4876c70f3cec2e34f59bcbd8 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 5 Dec 2022 12:45:16 -0800 Subject: [PATCH 278/628] Update vectorvectoractivation.py --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index a0b9268957..d5216a8711 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -1193,8 +1193,8 @@ def lut_estimation(self): thr_luts = 0 comp_luts = 0 noact = self.get_nodeattr("noActivation") - tmem_style = self.get_nodeattr("ram_style_thresholds") - if (noact == 0) and (tmem_style == "distributed"): + # TODO - add 'ram_style_threshold' node attribute + if noact == 0: odt = self.get_output_datatype() B = odt.bitwidth() thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64) From 8debc4a95bac16369ccbf242fa439ce08679b7a8 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 6 Dec 2022 12:53:17 +0000 Subject: [PATCH 279/628] [tests] set clk period higher for bnn end2end tests --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index ccdd8816ee..79cfafa22d 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -102,7 +102,7 @@ ) build_dir = os.environ["FINN_BUILD_DIR"] -target_clk_ns = 10 +target_clk_ns = 20 mem_mode = "decoupled" rtlsim_trace = False From 5bfdc0e0e813ee8f615e197aee835c67e9c1b762 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 8 Dec 2022 14:23:37 +0000 Subject: [PATCH 280/628] [VitisBuild] Reverse check for node name in ipgen and add prefix for node names in vitis build --- src/finn/transformation/fpgadataflow/prepare_ip.py | 6 +----- src/finn/transformation/fpgadataflow/vitis_build.py | 3 ++- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/prepare_ip.py b/src/finn/transformation/fpgadataflow/prepare_ip.py index 7c2dfd9beb..2ebd6310f0 100644 --- a/src/finn/transformation/fpgadataflow/prepare_ip.py +++ b/src/finn/transformation/fpgadataflow/prepare_ip.py @@ -46,11 +46,7 @@ def _codegen_single_node(node, model, fpgapart, clk): # get the path of the code generation directory code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen") # ensure that there is a directory - if ( - code_gen_dir == "" - or not os.path.isdir(code_gen_dir) - or not str(node.name) in code_gen_dir - ): + if code_gen_dir == "" or not os.path.isdir(code_gen_dir): code_gen_dir = make_build_dir( prefix="code_gen_ipgen_" + str(node.name) + "_" ) diff --git a/src/finn/transformation/fpgadataflow/vitis_build.py b/src/finn/transformation/fpgadataflow/vitis_build.py index 855b30fe95..97da4d4152 100644 --- a/src/finn/transformation/fpgadataflow/vitis_build.py +++ b/src/finn/transformation/fpgadataflow/vitis_build.py @@ -411,12 +411,13 @@ def apply(self, model): # Build each kernel individually sdp_nodes = model.get_nodes_by_op_type("StreamingDataflowPartition") for sdp_node in sdp_nodes: + prefix = sdp_node.name + "_" sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") kernel_model = ModelWrapper(dataflow_model_filename) kernel_model = kernel_model.transform(InsertFIFO()) kernel_model = kernel_model.transform(RemoveUnusedTensors()) - kernel_model = kernel_model.transform(GiveUniqueNodeNames()) + kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) kernel_model = kernel_model.transform( PrepareIP(self.fpga_part, self.period_ns) From 0d8896cfd5d638825b2cbd7faf64840edadd2e6f Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 12 Dec 2022 11:09:54 +0000 Subject: [PATCH 281/628] [Tests] Fix typo in ext_weights file and set verbose to true --- tests/end2end/test_ext_weights.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index e230f735d7..0a92c74a38 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -90,6 +90,7 @@ def test_end2end_ext_weights_build(): output_dir = make_build_dir("test_end2end_ext_weights_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, + verbose=True, folding_config_file=folding_config_file, synth_clk_period_ns=target_clk_ns, board=build_env["board"], @@ -113,7 +114,7 @@ def test_end2end_ext_weights_build(): @pytest.mark.board @pytest.mark.end2end -@pytest.mark.fail +@pytest.mark.xfail def test_end2end_ext_weights_dataset(): # make sure we have local copies of mnist dataset files subprocess.check_output(["mkdir", "-p", mnist_local]) From c60c27fbb7f874543ce1c9f348974109df83aac5 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 13 Dec 2022 11:30:13 +0000 Subject: [PATCH 282/628] [Tests] Add missing marker and clean up topk test --- tests/fpgadataflow/test_fpgadataflow_concat.py | 2 ++ tests/transformation/streamline/test_move_flatten_past_topk.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_concat.py b/tests/fpgadataflow/test_fpgadataflow_concat.py index 8488a34dff..5fff286e54 100644 --- a/tests/fpgadataflow/test_fpgadataflow_concat.py +++ b/tests/fpgadataflow/test_fpgadataflow_concat.py @@ -72,6 +72,7 @@ def forward(self, *args): @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.parametrize("idt", [DataType["INT4"]]) +@pytest.mark.fpgadataflow @pytest.mark.vivado @pytest.mark.slow def test_fpgadataflow_concat(exec_mode, idt): @@ -107,6 +108,7 @@ def test_fpgadataflow_concat(exec_mode, idt): assert (exp_out == ret_sim[oname]).all() +@pytest.mark.fpgadataflow @pytest.mark.vivado @pytest.mark.slow def test_fpgadataflow_concat_stitchedip(): diff --git a/tests/transformation/streamline/test_move_flatten_past_topk.py b/tests/transformation/streamline/test_move_flatten_past_topk.py index 83d7a28c05..d1478088e2 100644 --- a/tests/transformation/streamline/test_move_flatten_past_topk.py +++ b/tests/transformation/streamline/test_move_flatten_past_topk.py @@ -47,7 +47,7 @@ @pytest.mark.parametrize("data_layout", [DataLayout.NHWC, DataLayout.NCHW]) # batch size @pytest.mark.parametrize("batch_size", [1, 2]) -def test_move_flatten_past_affine(data_layout, batch_size): +def test_move_flatten_past_topk(data_layout, batch_size): if data_layout == DataLayout.NHWC: ishape = [batch_size, 1, 1, 1024] oshape = [batch_size, 1024] From fd9a8541977450208818abfe1efd05ab2a5a4a42 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 13 Dec 2022 15:07:08 +0100 Subject: [PATCH 283/628] [Util] remove redundant code from pyverilator utils --- src/finn/util/pyverilator.py | 95 ++++++++++++------------------------ 1 file changed, 32 insertions(+), 63 deletions(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index dd22272e64..287f923530 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -65,9 +65,32 @@ def file_to_basename(x): # are identical but in multiple directories (regslice_core.v) # remove duplicates from list by doing list -> set -> list + src_exts = [".v", ".sv"] + all_verilog_files = list( - set(filter(lambda x: x.endswith(".v") or x.endswith(".sv"), all_verilog_srcs)) + set( + filter( + lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs + ) + ) + ) + + verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" + os.makedirs(verilog_header_dir, exist_ok=True) + + # use custom version of axis infrastructure vh + # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) + custom_vh = pk.resource_filename( + "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" ) + shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") + for fn in all_verilog_srcs: + if fn.endswith(".vh"): + if "axis_infrastructure_v1_1_0.vh" in fn: + # skip, we use a custom version for this file without recursive gcd + continue + else: + shutil.copy(fn, verilog_header_dir) # remove all but one instances of regslice_core.v filtered_verilog_files = [] @@ -85,7 +108,12 @@ def file_to_basename(x): for vfile in filtered_verilog_files: with open(vfile) as rf: wf.write("//Added from " + vfile + "\n\n") - wf.write(rf.read()) + lines = rf.read() + for line in lines.split("\n"): + # break down too-long lines, Verilator complains otherwise + if len(line) > 20000: + line = line.replace("&", "\n&") + wf.write("\n" + line) return vivado_stitch_proj_dir @@ -227,12 +255,8 @@ def pyverilate_stitched_ip( if PyVerilator is None: raise ImportError("Installation of PyVerilator is required.") - vivado_stitch_proj_dir = model.get_metadata_prop("vivado_stitch_proj") - with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f: - all_verilog_srcs = f.read().split() - - def file_to_dir(x): - return os.path.dirname(os.path.realpath(x)) + vivado_stitch_proj_dir = make_single_verilog_file(model) + verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" def file_to_basename(x): return os.path.basename(os.path.realpath(x)) @@ -241,61 +265,6 @@ def file_to_basename(x): top_module_name = top_module_file_name.strip(".v") build_dir = make_build_dir("pyverilator_ipstitched_") - # dump all Verilog code to a single file - # this is because large models with many files require - # a verilator command line too long for bash on most systems - # NOTE: there are duplicates in this list, and some files - # are identical but in multiple directories (regslice_core.v) - - # remove duplicates from list by doing list -> set -> list - src_exts = [".v", ".sv"] - - all_verilog_src_files = list( - set( - filter( - lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs - ) - ) - ) - - verilog_header_dir = make_build_dir("pyverilator_vh_") - # use custom version of axis infrastructure vh - # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) - custom_vh = pk.resource_filename( - "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" - ) - shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") - for fn in all_verilog_srcs: - if fn.endswith(".vh"): - if "axis_infrastructure_v1_1_0.vh" in fn: - # skip, we use a custom version for this file without recursive gcd - continue - else: - shutil.copy(fn, verilog_header_dir) - - # remove all but one instances of regslice_core.v - filtered_verilog_files = [] - remove_entry = False - for vfile in all_verilog_src_files: - if "regslice_core" in vfile: - if not remove_entry: - filtered_verilog_files.append(vfile) - remove_entry = True - else: - filtered_verilog_files.append(vfile) - - # concatenate all verilog code into a single file - with open(vivado_stitch_proj_dir + "/" + top_module_file_name, "w") as wf: - for vfile in filtered_verilog_files: - with open(vfile) as rf: - wf.write("//Added from " + vfile + "\n\n") - lines = rf.read() - for line in lines.split("\n"): - # break down too-long lines, Verilator complains otherwise - if len(line) > 20000: - line = line.replace("&", "\n&") - wf.write("\n" + line) - verilator_args = [] # disable common verilator warnings that should be harmless but commonly occur # in large quantities for Vivado HLS-generated verilog code From ef5aa8bc79defb0e1f8bd380bf1d44451f83b366 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 13 Dec 2022 15:07:41 +0100 Subject: [PATCH 284/628] [Test] cover both python and cpp mode for largefifo_rtlsim --- tests/fpgadataflow/test_fifosizing.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 28b2c4ac0f..116df98d17 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -49,14 +49,19 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.slow @pytest.mark.vivado -@pytest.mark.parametrize("method", ["largefifo_rtlsim", "characterize"]) +@pytest.mark.parametrize( + "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] +) def test_fifosizing_linear(method): + force_python_rtlsim = "python" in method + method_key = "largefifo_rtlsim" if "largefifo_rtlsim" in method else "characterize" tmp_output_dir = fetch_test_model("tfc") cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=True, - auto_fifo_strategy=method, + auto_fifo_strategy=method_key, target_fps=10000, + force_python_rtlsim=force_python_rtlsim, synth_clk_period_ns=10.0, board="Pynq-Z1", rtlsim_batch_size=100, From 2c5d07fa44e6b67ac7745e1f181178493734457b Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 19 Dec 2022 17:21:04 +0100 Subject: [PATCH 285/628] [Util] refactor verilator prep into two functions --- src/finn/util/pyverilator.py | 38 +++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 287f923530..a00899cf78 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -41,11 +41,29 @@ ) -def make_single_verilog_file(model): +def make_single_source_file(filtered_verilog_files, target_file): """Dump all Verilog code used by stitched IP into a single file. This is because large models with many files require a verilator command line too long for bash on most systems""" + # concatenate all verilog code into a single file + with open(target_file, "w") as wf: + for vfile in filtered_verilog_files: + with open(vfile) as rf: + wf.write("//Added from " + vfile + "\n\n") + lines = rf.read() + for line in lines.split("\n"): + # break down too-long lines, Verilator complains otherwise + if len(line) > 20000: + line = line.replace("&", "\n&") + wf.write("\n" + line) + + +def prepare_stitched_ip_for_verilator(model): + """Prepare sources from given stitched IP for verilator simulation, including + generating a single source file and replacing certain Vivado infrastructure + headers with Verilator-compatible ones""" + vivado_stitch_proj_dir = model.get_metadata_prop("vivado_stitch_proj") with open(vivado_stitch_proj_dir + "/all_verilog_srcs.txt", "r") as f: all_verilog_srcs = f.read().split() @@ -103,17 +121,9 @@ def file_to_basename(x): else: filtered_verilog_files.append(vfile) - # concatenate all verilog code into a single file - with open(vivado_stitch_proj_dir + "/" + top_module_file_name, "w") as wf: - for vfile in filtered_verilog_files: - with open(vfile) as rf: - wf.write("//Added from " + vfile + "\n\n") - lines = rf.read() - for line in lines.split("\n"): - # break down too-long lines, Verilator complains otherwise - if len(line) > 20000: - line = line.replace("&", "\n&") - wf.write("\n" + line) + target_file = vivado_stitch_proj_dir + "/" + top_module_file_name + make_single_source_file(filtered_verilog_files, target_file) + return vivado_stitch_proj_dir @@ -122,7 +132,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): driver to drive the input stream. Useful for FIFO sizing, latency and throughput measurement.""" - vivado_stitch_proj_dir = make_single_verilog_file(model) + vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) build_dir = make_build_dir("verilator_fifosim_") fifosim_cpp_fname = pk.resource_filename( "finn.qnn-data", "cpp/verilator_fifosim.cpp" @@ -255,7 +265,7 @@ def pyverilate_stitched_ip( if PyVerilator is None: raise ImportError("Installation of PyVerilator is required.") - vivado_stitch_proj_dir = make_single_verilog_file(model) + vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" def file_to_basename(x): From e809dab507684302123d87e38b6b89f4291c9935 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 19 Dec 2022 17:21:29 +0100 Subject: [PATCH 286/628] [HLSCustomOp] single-source + prep util fxns for node-by-node rtlsim --- .../custom_op/fpgadataflow/hlscustomop.py | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index f307be95c3..d1326607aa 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -43,6 +43,7 @@ pyverilate_get_liveness_threshold_cycles, ) from finn.util.hls import CallHLS +from finn.util.pyverilator import make_single_source_file from . import templates @@ -174,7 +175,7 @@ def get_all_verilog_paths(self): # default impl only returns the HLS verilog codegen dir return [verilog_path] - def get_all_verilog_filenames(self): + def get_all_verilog_filenames(self, abspath=False): "Return list of all Verilog files used for this node." verilog_files = [] @@ -182,7 +183,10 @@ def get_all_verilog_filenames(self): for verilog_path in verilog_paths: for f in os.listdir(verilog_path): if f.endswith(".v"): - verilog_files += [f] + if abspath: + verilog_files += [verilog_path + "/" + f] + else: + verilog_files += [f] return verilog_files def prepare_rtlsim(self): @@ -192,13 +196,18 @@ def prepare_rtlsim(self): if PyVerilator is None: raise ImportError("Installation of PyVerilator is required.") - verilog_paths = self.get_all_verilog_paths() - verilog_files = self.get_all_verilog_filenames() + + verilog_files = self.get_all_verilog_filenames(abspath=True) + single_src_dir = make_build_dir("rtlsim_" + self.onnx_node.name + "_") + tmp_build_dir = make_build_dir("pyverilator_" + self.onnx_node.name + "_") + target_file = single_src_dir + "/" + self.get_verilog_top_module_name() + ".v" + make_single_source_file(verilog_files, target_file) + # build the Verilator emu library sim = PyVerilator.build( - verilog_files, - build_dir=make_build_dir("pyverilator_" + self.onnx_node.name + "_"), - verilog_path=verilog_paths, + self.get_verilog_top_module_name() + ".v", + build_dir=tmp_build_dir, + verilog_path=[single_src_dir], trace_depth=get_rtlsim_trace_depth(), top_module_name=self.get_verilog_top_module_name(), ) From 111c6ef02aee4baa715574914d20cbcee449b7ca Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 29 Dec 2022 12:07:14 +0000 Subject: [PATCH 287/628] [Tests] Fix ipstitch test for VitisBuild --- tests/fpgadataflow/test_fpgadataflow_ipstitch.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 80f2d724ad..325470a6d6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -348,6 +348,7 @@ def test_fpgadataflow_ipstitch_vitis_end2end(board, period_ns, extw): model = load_test_checkpoint_or_skip(sdp_node.get_nodeattr("model")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(fpga_part, period_ns)) + model = model.transform(HLSSynthIP()) model = model.transform(VitisBuild(fpga_part, period_ns, platform)) model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_vitis.onnx") assert model.get_metadata_prop("platform") == "alveo" From fe6dca926fa6f1ba1f044cb05a752c586ed3df3a Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 30 Dec 2022 15:27:23 +0000 Subject: [PATCH 288/628] [Docs] Fix docstrings in modules --- .../finn.analysis.fpgadataflow.rst | 9 ++++-- docs/finn/source_code/modules.rst | 0 .../fpgadataflow/matrixvectoractivation.py | 2 ++ src/finn/custom_op/fpgadataflow/pool_batch.py | 11 +++---- .../fpgadataflow/thresholding_batch.py | 2 ++ .../fpgadataflow/vectorvectoractivation.py | 2 ++ .../fpgadataflow/insert_fifo.py | 16 +++++----- .../fpgadataflow/set_fifo_depths.py | 30 ++++++++++++------- .../fpgadataflow/set_folding.py | 19 +++++++----- .../fpgadataflow/vitis_build.py | 20 ++++++------- .../qonnx/convert_qonnx_to_finn.py | 12 ++++---- .../qonnx/quant_act_to_multithreshold.py | 15 +++++----- src/finn/util/vcd.py | 16 +++++----- 13 files changed, 89 insertions(+), 65 deletions(-) delete mode 100644 docs/finn/source_code/modules.rst diff --git a/docs/finn/source_code/finn.analysis.fpgadataflow.rst b/docs/finn/source_code/finn.analysis.fpgadataflow.rst index b52e994ee6..57472cb670 100644 --- a/docs/finn/source_code/finn.analysis.fpgadataflow.rst +++ b/docs/finn/source_code/finn.analysis.fpgadataflow.rst @@ -30,6 +30,7 @@ finn.analysis.fpgadataflow.floorplan\_params :undoc-members: :show-inheritance: + finn.analysis.fpgadataflow.hls\_synth\_res\_estimation ------------------------------------------------------------- @@ -38,14 +39,15 @@ finn.analysis.fpgadataflow.hls\_synth\_res\_estimation :undoc-members: :show-inheritance: - finn.analysis.fpgadataflow.op\_and\_param\_counts - -------------------------------------------------- +finn.analysis.fpgadataflow.op\_and\_param\_counts +-------------------------------------------------- - .. automodule:: finn.analysis.fpgadataflow.op_and_param_counts +.. automodule:: finn.analysis.fpgadataflow.op_and_param_counts :members: :undoc-members: :show-inheritance: + finn.analysis.fpgadataflow.post\_synth\_res -------------------------------------------------- @@ -54,6 +56,7 @@ finn.analysis.fpgadataflow.post\_synth\_res :undoc-members: :show-inheritance: + finn.analysis.fpgadataflow.res\_estimation ------------------------------------------------- diff --git a/docs/finn/source_code/modules.rst b/docs/finn/source_code/modules.rst deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index df9d1f1e70..9f34eb1515 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -702,10 +702,12 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): of weights. Arguments: + * weights : numpy array with weights to be put into the file * weight_file_mode : one of {hls_header, decoupled_verilog_dat, decoupled_runtime} * weight_file_name : filename for the weight file to be generated + """ # convert weights into hlslib-compatible format weight_tensor = self.get_hls_compatible_weight_tensor(weights) diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index 91cd537bae..813f13e504 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -42,12 +42,13 @@ class Pool_Batch(HLSCustomOp): Output shape (BatchSize,OutImgDim,OutImgDim,Channels) Notes: - # The input shape was chosen to be compatible with im2col (only true when there - is not folding). - # The actual data layout produced by the hlslib kernels is different - for depthwise ops. - * depthwise SWG: (1, OFMDim, OFMDim, IFMChannels/PE, K, K, PE) + * The input shape was chosen to be compatible with im2col (only true when there + is not folding). + * The actual data layout produced by the hlslib kernels is different + for depthwise ops. + + * depthwise SWG: (1, OFMDim, OFMDim, IFMChannels/PE, K, K, PE) Channels can be folded using PE (SIMD from the input perspective) """ diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index f2cc64668d..1bb37c0fde 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -354,10 +354,12 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): run-time reconfig of weights. Arguments: + * weights : numpy array with weights to be put into the file * weight_file_mode : one of {hls_header, decoupled_verilog_dat, decoupled_runtime} * weight_file_name : filename for the weight file to be generated + """ threshold_tensor = self.get_hls_compatible_threshold_tensor(weights) tdt = self.get_weight_datatype() diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index a411d245a9..d5e29ca22a 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -410,10 +410,12 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): of weights. Arguments: + * weights : numpy array with weights to be put into the file * weight_file_mode : one of {hls_header, decoupled_verilog_dat, decoupled_runtime} * weight_file_name : filename for the weight file to be generated + """ # convert weights into hlslib-compatible format weight_tensor = self.get_hls_compatible_weight_tensor(weights) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 38c2d60c9a..0546643d12 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -71,13 +71,15 @@ class InsertFIFO(Transformation): of the subsequent node. max() of these two values sets the FIFO depth. Constructor arguments: - - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of - Verilog FIFOs (Q_srl.v) - - vivado_ram_style : the StreamingFIFO.ram_style attribute to be used for - large FIFOs implemented by Vivado - - create_shallow_fifos : Normally, shallow-depth (<=2) FIFOs won't be created since - HLS streaming interfaces already have a degree of buffering. - Override with this parameter. + + :parameter max_qsrl_depth: FIFOs deeper than this will use Vivado IP + instead of Verilog FIFOs (Q_srl.v) + :parameter vivado_ram_style: the StreamingFIFO.ram_style attribute + to be used for large FIFOs implemented by Vivado + :parameter create_shallow_fifos: Normally, shallow-depth (<=2) FIFOs + won't be created since HLS streaming interfaces + already have a degree of buffering. + Override with this parameter. The other node attributes necessary to create a FIFO node are taken from the diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 5b3ead6d67..5f3aed500c 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -125,14 +125,17 @@ class CapConvolutionFIFODepths(Transformation): constructor flag is set. Constructor arguments: - - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of - Verilog FIFOs (Q_srl.v) + + :parameter max_qsrl_depth: FIFOs deeper than this will use Vivado IP + instead of Verilog FIFOs (Q_srl.v) Assumed input graph properties: + - all nodes are fpgadataflow nodes - FIFOs inserted with InsertAndSetFIFODepths Output: + - graph with smaller-depth FIFOs for convolutions Background: @@ -188,22 +191,25 @@ class InsertAndSetFIFODepths(Transformation): throughput in the created accelerator. Constructor arguments: - - clk_ns : clock period (used for IP preparation) - - max_qsrl_depth : FIFOs deeper than this will use Vivado IP instead of - Verilog FIFOs (Q_srl.v) - - max_depth : how deep the "max"-sized FIFOs initially inserted will be - if set to None, use the tensor size as the depth - - swg_exception : call CapConvolutionFIFODepths to make convolution FIFOs - smaller where appropriate - - vivado_ram_style : the StreamingFIFO.ram_style attribute to be used for - large FIFOs implemented by Vivado afterwards + + :parameter clk_ns: clock period (used for IP preparation) + :parameter max_qsrl_depth: FIFOs deeper than this will use Vivado IP + instead of Verilog FIFOs (Q_srl.v) + :parameter max_depth: how deep the "max"-sized FIFOs initially inserted + will be. If set to None, use the tensor size as the depth + :parameter swg_exception: call CapConvolutionFIFODepths to make convolution FIFOs + smaller where appropriate + :parameter vivado_ram_style: the StreamingFIFO.ram_style attribute to be used + for large FIFOs implemented by Vivado afterwards Assumed input graph properties: + - all nodes are fpgadataflow nodes - no FIFOs inserted, - (inFIFODepths/outFIFODepths attrs will be ignored) Output: + - graph with appropriate-depth FIFOs inserted Background: @@ -211,12 +217,14 @@ class InsertAndSetFIFODepths(Transformation): necessary to insert FIFOs between them to prevent stalls due to bursty behavior. The sizes of those FIFOs are hard to predict analytically, so we do the following: + - insert deep (=tensor size) FIFOs between all fpgadataflow nodes - create stitched design - run through rtlsim with stream of multiple random input images (to fill pipeline) - keep track of observed maximum occupancy for each FIFO during rtlsim - when sim finished, update each FIFO depth to maximum observed occupancy and set inFIFODepths/outFIFODepths attrs to 0 on relevant nodes + """ def __init__( diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index e24e24f1f8..2301fccdd4 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -62,17 +62,20 @@ class SetFolding(Transformation): Notable exceptions and special behavior: - * When folding dense convolution/FC compute engines ("MVAU"/MatrixVectorActivation), + When folding dense convolution/FC compute engines ("MVAU"/MatrixVectorActivation), which have two attributes (PE and SIMD): - * first increases SIMD while weight stream width per PE is <= mvau_wwidth_max - (configurable in the SetFolding initializer, defaults to 36) - * then increases PE until the target is met or max PE reached - * When folding depthwise convolutions ("VVAU"/VectorVectorActivation) + * first increases SIMD while weight stream width per PE is <= mvau_wwidth_max + (configurable in the SetFolding initializer, defaults to 36) + * then increases PE until the target is met or max PE reached + + When folding depthwise convolutions ("VVAU"/VectorVectorActivation) or spatial reduction ops (Pool_Batch): - * the producer of the node is expected to be a ConvolutionInputGenerator - with depthwise=1, whose SIMD value will be set equal to the PE value of - its consumer node + + * the producer of the node is expected to be a ConvolutionInputGenerator + with depthwise=1, whose SIMD value will be set equal to the PE value of + its consumer node + """ def __init__( diff --git a/src/finn/transformation/fpgadataflow/vitis_build.py b/src/finn/transformation/fpgadataflow/vitis_build.py index 97da4d4152..e0a5666000 100644 --- a/src/finn/transformation/fpgadataflow/vitis_build.py +++ b/src/finn/transformation/fpgadataflow/vitis_build.py @@ -358,16 +358,16 @@ class VitisBuild(Transformation): """Best-effort attempt at building the accelerator with Vitis. It assumes the model has only fpgadataflow nodes - fpga_part: string identifying the target FPGA - period_ns: target clock period - platform: target Alveo platform, one of ["U50", "U200", "U250", "U280"] - strategy: Vitis optimization strategy - enable_debug: add Chipscope to all AXI interfaces - floorplan_file: path to a JSON containing a dictionary with SLR assignments - for each node in the ONNX graph. Must be parse-able by - the ApplyConfig transform. - enable_link: enable linking kernels (.xo files), otherwise just synthesize - them independently. + :parameter fpga_part: string identifying the target FPGA + :parameter period_ns: target clock period + :parameter platform: target Alveo platform, one of ["U50", "U200", "U250", "U280"] + :parameter strategy: Vitis optimization strategy + :parameter enable_debug: add Chipscope to all AXI interfaces + :parameter floorplan_file: path to a JSON containing a dictionary with + SLR assignments for each node in the ONNX graph. + Must be parse-able by the ApplyConfig transform. + :parameter enable_link: enable linking kernels (.xo files), + otherwise just synthesize them independently. """ def __init__( diff --git a/src/finn/transformation/qonnx/convert_qonnx_to_finn.py b/src/finn/transformation/qonnx/convert_qonnx_to_finn.py index 967a127636..34f11d1e95 100644 --- a/src/finn/transformation/qonnx/convert_qonnx_to_finn.py +++ b/src/finn/transformation/qonnx/convert_qonnx_to_finn.py @@ -56,12 +56,12 @@ class ConvertQONNXtoFINN(Transformation): is not converted to a MultiThreshold node. :param filter_function: Each candidate Quant and BinaryQant node is first evaluated - by this function. If the function returns False, - then the node is not converted to a MultiTrheshold node. - The function is given the model and candidate node as parameters. - Per default a filter function is inserted, which disables the conversion of - Quant nodes, which have a bit width of larger than 8. - Defaults to: default_filter_function_generator(max_multithreshold_bit_width=8) + by this function. If the function returns False, + then the node is not converted to a MultiTrheshold node. + The function is given the model and candidate node as parameters. + Per default a filter function is inserted, which disables the conversion of + Quant nodes, which have a bit width of larger than 8. + Defaults to: default_filter_function_generator(max_multithreshold_bit_width=8) """ def __init__( diff --git a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py index 77025ecdf5..db3c5dbfe8 100644 --- a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py +++ b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py @@ -66,8 +66,7 @@ def filter_function(model, q_node): class ConvertQuantActToMultiThreshold(Transformation): - """ - Converts Quant nodes in the activation path to MultiThreshold nodes. + """Converts Quant nodes in the activation path to MultiThreshold nodes. The optional keyword argument `filter_function` presents a way to control which Quant and BipolarQuant nodes in the activation path @@ -75,12 +74,12 @@ class ConvertQuantActToMultiThreshold(Transformation): is not converted to a MultiThreshold node. :param filter_function: Each candidate Quant and BinaryQant node is first evaluated - by this function. If the function returns False, - then the node is not converted to a MultiTrheshold node. - The function is given the model and candidate node as parameters. - Per default a filter function is inserted, which disables the conversion of - Quant nodes, which have a bit width of larger than 8. - Defaults to: default_filter_function_generator(max_multithreshold_bit_width=8) + by this function. If the function returns False, + then the node is not converted to a MultiTrheshold node. + The function is given the model and candidate node as parameters. + Per default a filter function is inserted, which disables the conversion of + Quant nodes, which have a bit width of larger than 8. + Defaults to: default_filter_function_generator(max_multithreshold_bit_width=8) """ def __init__( diff --git a/src/finn/util/vcd.py b/src/finn/util/vcd.py index aaeb3ab920..1f77276d5a 100644 --- a/src/finn/util/vcd.py +++ b/src/finn/util/vcd.py @@ -101,19 +101,21 @@ def get_stream_if_stats(vcd_file, if_base_name): : (, ), where is the combination of (V)alid/(R)eady values, - is the approximate number of rising clock edges spent in - , and is the fraction of to total + is the approximate number of rising clock edges spent in , + and is the fraction of to total amount of time recorded by the trace. Example: - {"{'V': 0, 'R': 0}": (5, 0.0006060606060606061), - "{'V': 1, 'R': 0}": (0, 0.0), - "{'V': 0, 'R': 1}": (7605, 0.9218181818181819), - "{'V': 1, 'R': 1}": (640, 0.07757575757575758)} - + { + "{'V': 0, 'R': 0}": (5, 0.0006060606060606061), + "{'V': 1, 'R': 0}": (0, 0.0), + "{'V': 0, 'R': 1}": (7605, 0.9218181818181819), + "{'V': 1, 'R': 1}": (640, 0.07757575757575758) + } Here we can see the stream was transmitting values 7.7% of the time, and 9.2% of the time there was no incoming data (valid 0, ready 1) """ + if_valid = if_base_name + vname if_ready = if_base_name + rname v = VCDVCD(vcd_file, signals=[if_valid], store_tvs=True) From 888621af1f052c4e18210f5e47b3511e0a170bc1 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 30 Dec 2022 17:09:16 +0000 Subject: [PATCH 289/628] [Docs] Update source code rsts for new features --- docs/finn/source_code/finn.core.rst | 9 +++ .../finn.custom_op.fpgadataflow.rst | 58 ++++++++++++++---- docs/finn/source_code/finn.custom_op.rst | 1 + .../finn.transformation.fpgadataflow.rst | 19 ++++++ docs/finn/source_code/finn.transformation.rst | 44 +++++++++++--- docs/finn/source_code/finn.util.rst | 60 ++++++++++++++++--- .../qonnx.custom_op.channels_last.rst | 41 +++++++++++++ 7 files changed, 205 insertions(+), 27 deletions(-) create mode 100644 docs/finn/source_code/qonnx.custom_op.channels_last.rst diff --git a/docs/finn/source_code/finn.core.rst b/docs/finn/source_code/finn.core.rst index 4e3de458e1..afa1ecffa0 100644 --- a/docs/finn/source_code/finn.core.rst +++ b/docs/finn/source_code/finn.core.rst @@ -37,6 +37,15 @@ qonnx.core.modelwrapper :undoc-members: :show-inheritance: +qonnx.core.onnx\_exec +--------------------------- + +.. automodule:: qonnx.core.onnx_exec + :members: + :undoc-members: + :show-inheritance: + + finn.core.onnx\_exec --------------------------- diff --git a/docs/finn/source_code/finn.custom_op.fpgadataflow.rst b/docs/finn/source_code/finn.custom_op.fpgadataflow.rst index cc56ea603e..fdcf44c6d9 100644 --- a/docs/finn/source_code/finn.custom_op.fpgadataflow.rst +++ b/docs/finn/source_code/finn.custom_op.fpgadataflow.rst @@ -8,7 +8,7 @@ HLS Custom Op Nodes Base Class ---------- -.. automodule:: finn.custom_op.fpgadataflow +.. automodule:: finn.custom_op.fpgadataflow.hlscustomop :members: :undoc-members: :show-inheritance: @@ -29,9 +29,25 @@ finn.custom\_op.fpgadataflow.channelwise\_op\_batch :undoc-members: :show-inheritance: +finn.custom\_op.fpgadataflow.checksum +-------------------------------------- + +.. automodule:: finn.custom_op.fpgadataflow.checksum + :members: + :undoc-members: + :show-inheritance: + +finn.custom\_op.fpgadataflow.concat +------------------------------------- + +.. automodule:: finn.custom_op.fpgadataflow.concat + :members: + :undoc-members: + :show-inheritance: + finn.custom\_op.fpgadataflow.convolutioninputgenerator -------------------------------------------------------------- +-------------------------------------------------------- .. automodule:: finn.custom_op.fpgadataflow.convolutioninputgenerator :members: @@ -46,6 +62,15 @@ finn.custom\_op.fpgadataflow.convolutioninputgenerator1d :undoc-members: :show-inheritance: + +finn.custom\_op.fpgadataflow.convolutioninputgenerator\_rtl +------------------------------------------------------------ + +.. automodule:: finn.custom_op.fpgadataflow.convolutioninputgenerator_rtl + :members: + :undoc-members: + :show-inheritance: + finn.custom\_op.fpgadataflow.downsampler ----------------------------------------- @@ -62,6 +87,16 @@ finn.custom\_op.fpgadataflow.duplicatestreams\_batch :undoc-members: :show-inheritance: + +finn.custom\_op.fpgadataflow.eltwise +------------------------------------- + +.. automodule:: finn.custom_op.fpgadataflow.eltwise + :members: + :undoc-members: + :show-inheritance: + + finn.custom\_op.fpgadataflow.fmpadding\_batch ----------------------------------------------- @@ -79,7 +114,7 @@ finn.custom\_op.fpgadataflow.globalaccpool\_batch :show-inheritance: finn.custom\_op.fpgadataflow.iodma ------------------------------------------------ +------------------------------------ .. automodule:: finn.custom_op.fpgadataflow.iodma :members: @@ -102,6 +137,15 @@ finn.custom\_op.fpgadataflow.lookup :undoc-members: :show-inheritance: +finn.custom\_op.fpgadataflow.matrixvectoractivation +----------------------------------------------------------- + +.. automodule:: finn.custom_op.fpgadataflow.matrixvectoractivation + :members: + :undoc-members: + :show-inheritance: + + finn.custom\_op.fpgadataflow.pool\_batch ----------------------------------------------- @@ -127,14 +171,6 @@ finn.custom\_op.fpgadataflow.streamingdatawidthconverter\_batch :undoc-members: :show-inheritance: -finn.custom\_op.fpgadataflow.matrixvectoractivation ------------------------------------------------------------ - -.. automodule:: finn.custom_op.fpgadataflow.matrixvectoractivation - :members: - :undoc-members: - :show-inheritance: - finn.custom\_op.fpgadataflow.streamingfifo ------------------------------------------------- diff --git a/docs/finn/source_code/finn.custom_op.rst b/docs/finn/source_code/finn.custom_op.rst index 20d90a7bb5..cdbe957c71 100644 --- a/docs/finn/source_code/finn.custom_op.rst +++ b/docs/finn/source_code/finn.custom_op.rst @@ -9,6 +9,7 @@ Submodules :maxdepth: 2 finn.custom_op.fpgadataflow + qonnx.custom_op.channels_last qonnx.custom_op.general Custom Op Nodes diff --git a/docs/finn/source_code/finn.transformation.fpgadataflow.rst b/docs/finn/source_code/finn.transformation.fpgadataflow.rst index b1e7075bdc..9f8ec07930 100644 --- a/docs/finn/source_code/finn.transformation.fpgadataflow.rst +++ b/docs/finn/source_code/finn.transformation.fpgadataflow.rst @@ -62,6 +62,14 @@ finn.transformation.fpgadataflow.create\_stitched\_ip :undoc-members: :show-inheritance: +finn.transformation.fpgadataflow.derive\_characteristic +------------------------------------------------------------ + +.. automodule:: finn.transformation.fpgadataflow.derive_characteristic + :members: + :undoc-members: + :show-inheritance: + finn.transformation.fpgadataflow.externalize\_params ------------------------------------------------------------ @@ -103,6 +111,17 @@ finn.transformation.fpgadataflow.insert\_fifo :undoc-members: :show-inheritance: + +finn.transformation.fpgadataflow.insert\_hook +---------------------------------------------------- + +.. automodule:: finn.transformation.fpgadataflow.insert_hook + :members: + :undoc-members: + :show-inheritance: + + + finn.transformation.fpgadataflow.insert\_iodma ---------------------------------------------------- diff --git a/docs/finn/source_code/finn.transformation.rst b/docs/finn/source_code/finn.transformation.rst index 6a28eeedb2..f42b595a50 100644 --- a/docs/finn/source_code/finn.transformation.rst +++ b/docs/finn/source_code/finn.transformation.rst @@ -20,7 +20,7 @@ Transformation Passes Base Class ---------- -.. automodule:: finn.transformation +.. automodule:: qonnx.transformation.base :members: :undoc-members: :show-inheritance: @@ -42,7 +42,7 @@ qonnx.transformation.bipolar\_to\_xnor :show-inheritance: qonnx.transformation.change\_3d\_tensors\_to\_4d ------------------------------------------------- +------------------------------------------------- .. automodule:: qonnx.transformation.change_3d_tensors_to_4d :members: @@ -57,8 +57,18 @@ qonnx.transformation.change\_datalayout :undoc-members: :show-inheritance: + +qonnx.transformation.channels\_last +-------------------------------------------- + +.. automodule:: qonnx.transformation.channels_last + :members: + :undoc-members: + :show-inheritance: + + qonnx.transformation.create\_generic\_partitions ------------------------------------------------- +------------------------------------------------- .. automodule:: qonnx.transformation.create_generic_partitions :members: @@ -171,13 +181,22 @@ qonnx.transformation.merge\_onnx\_models :show-inheritance: -finn.transformation.move\_reshape +qonnx.transformation.quant\_constant\_folding +---------------------------------------------- + +.. automodule:: qonnx.transformation.quant_constant_folding + :members: + :undoc-members: + :show-inheritance: + + +qonnx.transformation.rebalance\_conv ---------------------------------------- -.. automodule:: finn.transformation.move_reshape - :members: - :undoc-members: - :show-inheritance: +.. automodule:: qonnx.transformation.rebalance_conv + :members: + :undoc-members: + :show-inheritance: qonnx.transformation.remove ------------------------------------- @@ -186,3 +205,12 @@ qonnx.transformation.remove :members: :undoc-members: :show-inheritance: + + +finn.transformation.move\_reshape +---------------------------------------- + +.. automodule:: finn.transformation.move_reshape + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/finn/source_code/finn.util.rst b/docs/finn/source_code/finn.util.rst index 8dffa01632..7ba3b252ab 100644 --- a/docs/finn/source_code/finn.util.rst +++ b/docs/finn/source_code/finn.util.rst @@ -14,6 +14,15 @@ qonnx.util.basic :show-inheritance: +qonnx.util.cleanup +---------------------- + +.. automodule:: qonnx.util.cleanup + :members: + :undoc-members: + :show-inheritance: + + qonnx.util.config -------------------- @@ -22,6 +31,40 @@ qonnx.util.config :undoc-members: :show-inheritance: +qonnx.util.exec\_qonnx +---------------------- + +.. automodule:: qonnx.util.exec_qonnx + :members: + :undoc-members: + :show-inheritance: + +qonnx.util.inference\_cost +-------------------------- + +.. automodule:: qonnx.util.inference_cost + :members: + :undoc-members: + :show-inheritance: + +qonnx.util.onnx +------------------- + +.. automodule:: qonnx.util.onnx + :members: + :undoc-members: + :show-inheritance: + + +qonnx.util.to\_channels\_last +------------------------------ + +.. automodule:: qonnx.util.to_channels_last + :members: + :undoc-members: + :show-inheritance: + + finn.util.basic ---------------------- @@ -64,6 +107,15 @@ finn.util.gdrive :undoc-members: :show-inheritance: +finn.util.hls +--------------- + +.. automodule:: finn.util.hls + :members: + :undoc-members: + :show-inheritance: + + finn.util.imagenet ----------------------------- @@ -72,14 +124,6 @@ finn.util.imagenet :undoc-members: :show-inheritance: -qonnx.util.onnx ---------------------- - -.. automodule:: qonnx.util.onnx - :members: - :undoc-members: - :show-inheritance: - finn.util.platforms -------------------- diff --git a/docs/finn/source_code/qonnx.custom_op.channels_last.rst b/docs/finn/source_code/qonnx.custom_op.channels_last.rst new file mode 100644 index 0000000000..3ad10d94a6 --- /dev/null +++ b/docs/finn/source_code/qonnx.custom_op.channels_last.rst @@ -0,0 +1,41 @@ +************************** +Custom Op - Channels Last +************************** + +Channels Last Custom Ops +========================= + +qonnx.custom\_op.channels\_last.base\_wrapped\_op +-------------------------------------------------- + +.. automodule:: qonnx.custom_op.channels_last.base_wrapped_op + :members: + :undoc-members: + :show-inheritance: + + +qonnx.custom\_op.channels\_last.batch\_normalization +------------------------------------------------------ + +.. automodule:: qonnx.custom_op.channels_last.batch_normalization + :members: + :undoc-members: + :show-inheritance: + + +qonnx.custom\_op.channels\_last.conv +-------------------------------------- + +.. automodule:: qonnx.custom_op.channels_last.conv + :members: + :undoc-members: + :show-inheritance: + + +qonnx.custom\_op.channels\_last.max\_pool +------------------------------------------ + +.. automodule:: qonnx.custom_op.channels_last.max_pool + :members: + :undoc-members: + :show-inheritance: From 99a91584ed816eef6a9588f3c730bf53fb5b68a8 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 5 Jan 2023 11:03:17 +0000 Subject: [PATCH 290/628] [CustomOp] Fix setting of NumReps in Thresholding and N in Eltwise node --- src/finn/custom_op/fpgadataflow/eltwise.py | 2 +- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index d6284750c7..68ed6546c7 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -398,7 +398,7 @@ def docompute(self): "StreamingEltwise", self.get_nodeattr("NumChannels"), self.get_nodeattr("PE"), - self.get_number_output_values(), + int(np.prod(self.get_folded_output_shape()[:-2])), slice_in0, slice_in1, slice_out, diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index f2cc64668d..ec57423767 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -600,8 +600,12 @@ def global_includes(self): # TODO check and add whatever missing def defines(self, var): - numInputVectors = list(self.get_nodeattr("numInputVectors")) - numReps = int(np.prod(numInputVectors)) + if self.get_nodeattr("mem_mode") == "const": + numReps = 1 + else: + numInputVectors = list(self.get_nodeattr("numInputVectors")) + numReps = int(np.prod(numInputVectors)) + self.code_gen_dict["$DEFINES$"] = [ """#define NumChannels1 {}\n #define PE1 {}\n #define numReps {}""".format( self.get_nodeattr("NumChannels"), From a326bea974f8768d2d5959e63a606c3a27554dba Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 5 Jan 2023 15:17:38 +0000 Subject: [PATCH 291/628] [GHA] Update docker image testing to ubuntu 20.04 --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 4374111f22..2c91a0a83b 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -7,7 +7,7 @@ on: jobs: docker: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - name: checkout From c68110a35be161926a925c02e552fd7200dd4d64 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 5 Jan 2023 15:20:04 +0000 Subject: [PATCH 292/628] [GHA] Update quicktest to ubuntu 20.04 --- .github/workflows/quicktest-dev-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index ec92c84665..a726ab584f 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -11,7 +11,7 @@ jobs: test: name: Run quicktest on PR branch - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 steps: - name: checkout From b3282953fdd216dad3306beb021e5a796221381f Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Jan 2023 15:37:15 +0000 Subject: [PATCH 293/628] [requirements] Remove future python package --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9038a5e817..92601e7ecc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ bitstring==3.1.7 clize==4.1.1 dataclasses-json==0.5.7 docrep==0.2.7 -future==0.18.2 gspread==3.6.0 numpy==1.22.0 onnx==1.11.0 From 7654dea277cbafc2c0571b72f106cf77a1908dc9 Mon Sep 17 00:00:00 2001 From: icolbert Date: Fri, 6 Jan 2023 08:41:45 -0800 Subject: [PATCH 294/628] Adding new function attribute to MVAU and VVAU --- .../fpgadataflow/matrixvectoractivation.py | 14 ++++++++++++++ .../fpgadataflow/vectorvectoractivation.py | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index ed19b93bb2..6244bbc8e7 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -650,6 +650,20 @@ def minimize_accumulator_width(self, model): self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] + def minimize_weight_bit_width(self, model): + weights = model.get_initializer(self.onnx_node.input[1]) + w_min = weights.min() + w_max = weights.max() + if w_min < 0: + if abs(w_min) > w_max: + wdt = DataType.get_smallest_possible(w_min) + else: + wdt = DataType.get_smallest_possible(-w_max - 1) + else: + wdt = DataType.get_smallest_possible(w_max) + self.set_nodeattr("weightDataType", wdt.name) + return DataType[self.get_nodeattr("weightDataType")] + def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): """Convert the original numpy weight matrix orig_weight_matrix into a form suitable for passing to the hlslib call: diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index d5216a8711..665ff71810 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -169,6 +169,20 @@ def minimize_accumulator_width(self, model): self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] + def minimize_weight_bit_width(self, model): + weights = model.get_initializer(self.onnx_node.input[1]) + w_min = weights.min() + w_max = weights.max() + if w_min < 0: + if abs(w_min) > w_max: + wdt = DataType.get_smallest_possible(w_min) + else: + wdt = DataType.get_smallest_possible(-w_max - 1) + else: + wdt = DataType.get_smallest_possible(w_max) + self.set_nodeattr("weightDataType", wdt.name) + return DataType[self.get_nodeattr("weightDataType")] + def calc_wmem(self): """Calculates and returns WMEM.""" ch = self.get_nodeattr("Channels") From f353feffc71a7918c75b1b91e11e111dd7ced539 Mon Sep 17 00:00:00 2001 From: icolbert Date: Fri, 6 Jan 2023 13:36:28 -0800 Subject: [PATCH 295/628] Adding check for runtime_writeable_weights --- .../fpgadataflow/matrixvectoractivation.py | 23 +++++++++++-------- .../fpgadataflow/vectorvectoractivation.py | 23 +++++++++++-------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 6244bbc8e7..a1dff7a0ad 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -651,17 +651,20 @@ def minimize_accumulator_width(self, model): return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): - weights = model.get_initializer(self.onnx_node.input[1]) - w_min = weights.min() - w_max = weights.max() - if w_min < 0: - if abs(w_min) > w_max: - wdt = DataType.get_smallest_possible(w_min) + """Minimize the bit width based on the values of the weights""" + runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 0 + if runtime_writable: + weights = model.get_initializer(self.onnx_node.input[1]) + w_min = weights.min() + w_max = weights.max() + if w_min < 0: + if abs(w_min) > w_max: + wdt = DataType.get_smallest_possible(w_min) + else: + wdt = DataType.get_smallest_possible(-w_max - 1) else: - wdt = DataType.get_smallest_possible(-w_max - 1) - else: - wdt = DataType.get_smallest_possible(w_max) - self.set_nodeattr("weightDataType", wdt.name) + wdt = DataType.get_smallest_possible(w_max) + self.set_nodeattr("weightDataType", wdt.name) return DataType[self.get_nodeattr("weightDataType")] def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 665ff71810..5d97244e5b 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -170,17 +170,20 @@ def minimize_accumulator_width(self, model): return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): - weights = model.get_initializer(self.onnx_node.input[1]) - w_min = weights.min() - w_max = weights.max() - if w_min < 0: - if abs(w_min) > w_max: - wdt = DataType.get_smallest_possible(w_min) + """Minimize the bit width based on the values of the weights""" + runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 0 + if runtime_writable: + weights = model.get_initializer(self.onnx_node.input[1]) + w_min = weights.min() + w_max = weights.max() + if w_min < 0: + if abs(w_min) > w_max: + wdt = DataType.get_smallest_possible(w_min) + else: + wdt = DataType.get_smallest_possible(-w_max - 1) else: - wdt = DataType.get_smallest_possible(-w_max - 1) - else: - wdt = DataType.get_smallest_possible(w_max) - self.set_nodeattr("weightDataType", wdt.name) + wdt = DataType.get_smallest_possible(w_max) + self.set_nodeattr("weightDataType", wdt.name) return DataType[self.get_nodeattr("weightDataType")] def calc_wmem(self): From 1b8bf757c112285c1f98b60ba2d475c53643aa6a Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 9 Jan 2023 13:14:01 +0000 Subject: [PATCH 296/628] [DataPacking] Delete check for np.str --- src/finn/util/data_packing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py index 65478d2540..797dad32a2 100644 --- a/src/finn/util/data_packing.py +++ b/src/finn/util/data_packing.py @@ -265,7 +265,7 @@ def numpy_to_hls_code( # define a function to convert a single element into a C++ init string # a single element can be a hex string if we are using packing def elem2str(x): - if type(x) == str or type(x) == np.str_ or type(x) == np.str: + if type(x) == str or type(x) == np.str_: return '%s("%s", 16)' % (hls_dtype, x) elif type(x) == np.float32: if dtype.is_integer(): From 065eb68f82d62d4d71ca0efd68b216f7875b0cfd Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 9 Jan 2023 14:35:51 +0000 Subject: [PATCH 297/628] [Docker] add ignore installed for jupyter package --- docker/Dockerfile.finn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index b3c669ec10..dbafba2476 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -84,7 +84,7 @@ RUN rm requirements.txt # extra Python package dependencies (for testing and interaction) RUN pip install pygments==2.4.1 RUN pip install ipykernel==5.5.5 -RUN pip install jupyter==1.0.0 +RUN pip install jupyter==1.0.0 --ignore-installed RUN pip install markupsafe==2.0.1 RUN pip install matplotlib==3.3.1 --ignore-installed RUN pip install pytest-dependency==0.5.1 From 9df9d642adc73f528eadd07340421491df259a3a Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 9 Jan 2023 16:27:17 +0000 Subject: [PATCH 298/628] [Requirements] Fix version for psutil --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 92601e7ecc..348b1afab9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ onnxoptimizer onnxruntime==1.11.1 pre-commit==2.9.2 protobuf==3.20.2 +psutil==5.9.4 pyscaffold==3.2.1 scipy==1.5.2 setupext-janitor>=1.1.2 From 5ad48be547b87647562886f5bb843aef25d5d706 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 10 Jan 2023 14:48:55 +0000 Subject: [PATCH 299/628] [Util] Change np.int to np.int_ --- src/finn/util/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/util/test.py b/src/finn/util/test.py index bfe4aa0bb8..bd8bde2820 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -91,8 +91,8 @@ def soft_verify_topk(invec, idxvec, k): """Check that the topK indices provided actually point to the topK largest values in the input vector""" np_topk = np.flip(invec.flatten().argsort())[:k] - soft_expected = invec.flatten()[np_topk.astype(np.int).flatten()] - soft_produced = invec.flatten()[idxvec.astype(np.int).flatten()] + soft_expected = invec.flatten()[np_topk.astype(np.int_).flatten()] + soft_produced = invec.flatten()[idxvec.astype(np.int_).flatten()] return (soft_expected == soft_produced).all() From 9f8701643e491030fdcbc0bccb181ca1bfa2bc39 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 10 Jan 2023 18:43:28 +0100 Subject: [PATCH 300/628] [VVAU] Fix BIPOLAR/TERNARY compatibility --- .../fpgadataflow/vectorvectoractivation.py | 42 ++++++++-- src/finn/util/data_packing.py | 2 +- tests/fpgadataflow/test_fpgadataflow_vvau.py | 83 ++++++++++++------- 3 files changed, 86 insertions(+), 41 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 813b673b39..6d4b5fb9e6 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -225,9 +225,9 @@ def get_output_datatype(self, ind=0): def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") - #if simd > 1: - #pe = self.get_nodeattr("Channels") - #else: + # if simd > 1: + # pe = self.get_nodeattr("Channels") + # else: pe = self.get_nodeattr("PE") in_width = i_bits * simd * pe return in_width @@ -242,9 +242,9 @@ def get_folded_input_shape(self, ind=0): dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") simd = self.get_nodeattr("SIMD") - #if simd > 1: - #pe = self.get_nodeattr("Channels") - #else: + # if simd > 1: + # pe = self.get_nodeattr("Channels") + # else: pe = self.get_nodeattr("PE") sf = k_h * k_w // simd nf = ch // pe @@ -351,6 +351,9 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): ), """Weights matrix doesn't have expected shape (channels, 1, kernel_size, kernel_size)""" ret = orig_weight_matrix + if self.get_weight_datatype() == DataType["BIPOLAR"]: + # convert bipolar to binary + ret = (ret + 1) / 2 ret = ret.reshape(ch, k_h * k_w) # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) @@ -649,6 +652,12 @@ def execute_node(self, context, graph): not float32 as expected.""" expected_inp_shape = self.get_folded_input_shape() reshaped_input = context[inputs].reshape(expected_inp_shape) + if self.get_input_datatype() == DataType["BIPOLAR"]: + # store bipolar activations as binary + reshaped_input = (reshaped_input + 1) / 2 + export_idt = DataType["BINARY"] + else: + export_idt = self.get_input_datatype() # make copy before saving the array reshaped_input = reshaped_input.copy() np.save( @@ -664,14 +673,20 @@ def execute_node(self, context, graph): super().exec_precompiled_singlenode_model() # load output npy file super().npy_to_dynamic_output(context) + # reinterpret binary output as bipolar where needed + if self.get_output_datatype() == DataType["BIPOLAR"]: + out = context[node.output[0]] + out = 2 * out - 1 + context[node.output[0]] = out assert ( context[node.output[0]].shape == self.get_normal_output_shape() ), "cppsim did not produce expected output shape" elif mode == "rtlsim": sim = self.get_rtlsim() nbits = self.get_instream_width() - idt = self.get_input_datatype() - inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), idt, nbits) + inp = npy_to_rtlsim_input( + "{}/input_0.npy".format(code_gen_dir), export_idt, nbits + ) super().reset_rtlsim(sim) super().toggle_clk(sim) @@ -756,6 +771,9 @@ def defines(self, var): def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_input_datatype() + if dtype == DataType["BIPOLAR"]: + # use binary for bipolar storage + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits @@ -826,6 +844,11 @@ def docompute(self): ) ] elif mem_mode == "decoupled" or mem_mode == "external": + simd = self.get_nodeattr("SIMD") + if simd > 1: + raise Exception( + "SIMD parallelism not supported for decoupled or external mode" + ) wdt = self.get_weight_datatype() if wdt == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] @@ -853,6 +876,9 @@ def docompute(self): def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") dtype = self.get_output_datatype() + if dtype == DataType["BIPOLAR"]: + # use binary for bipolar storage + dtype = DataType["BINARY"] elem_bits = dtype.bitwidth() packed_bits = self.get_outstream_width() packed_hls_type = "ap_uint<%d>" % packed_bits diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py index 65478d2540..f7ea2ff943 100644 --- a/src/finn/util/data_packing.py +++ b/src/finn/util/data_packing.py @@ -220,7 +220,7 @@ def unpack_innermost_dim_from_hex_string( if conv_dtype == DataType["BIPOLAR"]: ar_list = [2 * x - 1 for x in ar_list] # interpret values as signed values - elif conv_dtype.name.startswith("INT"): + elif dtype.signed(): mask = 2 ** (conv_dtype.bitwidth() - 1) ar_list = [-(x & mask) + (x & ~mask) for x in ar_list] diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index ea4be47334..a418de5728 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -27,30 +27,29 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest + import numpy as np from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.multithreshold import multithreshold - -# from qonnx.custom_op.registry import getCustomOp +from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import gen_finn_dt_tensor import finn.core.onnx_exec as oxe - -# from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer +from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.minimize_accumulator_width import ( + MinimizeAccumulatorWidth, +) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -from finn.transformation.fpgadataflow.minimize_accumulator_width import ( - MinimizeAccumulatorWidth, -) def _infer_sparse_weight_tensor(W_conv, k_h, k_w, channels): @@ -110,7 +109,10 @@ def _make_single_vvau_modelwrapper( if T is not None: no_act = 0 node_inp_list = ["inp", "weights", "thresh"] - actval = odt.min() + if odt == DataType["BIPOLAR"]: + actval = 0 + else: + actval = odt.min() else: no_act = 1 node_inp_list = ["inp", "weights"] @@ -167,15 +169,15 @@ def prepare_inputs(input_tensor): # input datatype -@pytest.mark.parametrize("idt", [DataType["UINT4"]]) +@pytest.mark.parametrize("idt", [DataType["BIPOLAR"], DataType["UINT4"]]) # weight datatype -@pytest.mark.parametrize("wdt", [DataType["UINT4"]]) +@pytest.mark.parametrize("wdt", [DataType["BIPOLAR"], DataType["UINT4"]]) # activation: None or DataType -@pytest.mark.parametrize("act", [DataType["UINT4"], None]) +@pytest.mark.parametrize("act", [DataType["BIPOLAR"], DataType["UINT4"], None]) # PE -@pytest.mark.parametrize("pe", [1,2,3,6]) +@pytest.mark.parametrize("pe", [1, 3, 6]) # SIMD -@pytest.mark.parametrize("simd", [1,9]) +@pytest.mark.parametrize("simd", [1, 9]) # Input image shape @pytest.mark.parametrize("dim_h", [10]) @pytest.mark.parametrize("dim_w", [10]) @@ -187,7 +189,7 @@ def prepare_inputs(input_tensor): # memory mode @pytest.mark.parametrize("mem_mode", ["const"]) # execution mode -@pytest.mark.parametrize("exec_mode", ["cppsim","rtlsim"]) +@pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado @@ -203,9 +205,6 @@ def test_fpgadataflow_vvau( if channels % pe != 0: pytest.skip("Requirement Channels divisable by PE is violated.") - #if pe < channels and simd > 1: - # pytest.skip("Do not apply SIMD parallelism before max PE parallelism") - # Generate weights in expected shape for ONNX and HLS node W = gen_finn_dt_tensor(wdt, (channels, 1, k_h, k_w)) # shape: [channels, 1, k, k] W_onnx = _infer_sparse_weight_tensor( @@ -221,14 +220,23 @@ def test_fpgadataflow_vvau( if act is None: T = None tdt = None - odt = DataType["INT32"] + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + odt = DataType["UINT32"] + else: + odt = DataType["INT32"] else: odt = act - (min_v, max_v) = _calculate_dot_prod_range(idt, wdt, k_h * k_w * channels) + (min_v, max_v) = _calculate_dot_prod_range(idt, wdt, k_h * k_w) n_steps = act.get_num_possible_values() - 1 T = np.random.randint(min_v, max_v - 1, (channels, n_steps)).astype(np.float32) T = np.sort(T, axis=1) - tdt = DataType["INT32"] + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + tdt = DataType["UINT32"] + # bias thresholds to be positive + T = np.ceil((T + (k_h * k_w)) / 2) + assert (T >= 0).all() + else: + tdt = DataType["INT32"] model = _make_single_vvau_modelwrapper( W, pe, simd, k_h, k_w, channels, dim_h, dim_w, wdt, idt, odt, T, tdt, mem_mode @@ -250,14 +258,25 @@ def test_fpgadataflow_vvau( input_dict = prepare_inputs(x_vvau) # Calculate output - y_expected = np.matmul(x, W_onnx) # Y is in [N, H, W, C] format + if wdt == DataType["BIPOLAR"] and idt == DataType["BIPOLAR"]: + # Simulate XNOR-popcount matrix multiplication, see + # qonnx.custom_op.general.xnorpopcount (not usable due to sparse W) + y_expected = np.matmul(x, W_onnx) + y_expected = (y_expected + (k_h * k_w)) / 2 + else: + y_expected = np.matmul(x, W_onnx) # Y is in [N, H, W, C] format + if T is not None: # Reshape Y, as multithreshold expects Y to be in [N, C, H, W] format y_expected = np.transpose(y_expected, (0, 3, 1, 2)) y_expected = multithreshold(y_expected, T) y_expected = np.transpose(y_expected, (0, 2, 3, 1)) - # signed offset - y_expected += act.min() + if act == DataType["BIPOLAR"]: + # binary to bipolar + y_expected = 2 * y_expected - 1 + else: + # signed offset + y_expected += act.min() y_produced = oxe.execute_onnx(model, input_dict, return_full_exec_context=False)[ "outp" @@ -265,11 +284,11 @@ def test_fpgadataflow_vvau( assert (y_produced == y_expected).all(), "incorrect result" - # if exec_mode == "rtlsim": - # node = model.get_nodes_by_op_type("VectorVectorActivation")[0] - # inst = getCustomOp(node) - # cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") - # exp_cycles_dict = model.analysis(exp_cycles_per_layer) - # exp_cycles = exp_cycles_dict[node.name] - # assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) - # assert exp_cycles != 0 + if exec_mode == "rtlsim": + node = model.get_nodes_by_op_type("VectorVectorActivation")[0] + inst = getCustomOp(node) + cycles_rtlsim = inst.get_nodeattr("cycles_rtlsim") + exp_cycles_dict = model.analysis(exp_cycles_per_layer) + exp_cycles = exp_cycles_dict[node.name] + assert np.isclose(exp_cycles, cycles_rtlsim, atol=10) + assert exp_cycles != 0 From 5428315898e1656653c543a704ca61adb4e3c9df Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 11 Jan 2023 12:29:54 +0300 Subject: [PATCH 301/628] [QONNX] make valid_predecessor_op_type a method, fallback to QuantIdentityHandler --- .../qonnx/qonnx_activation_handlers.py | 30 ++++++++++--------- .../qonnx/quant_act_to_multithreshold.py | 20 +++++++------ 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/finn/transformation/qonnx/qonnx_activation_handlers.py b/src/finn/transformation/qonnx/qonnx_activation_handlers.py index a50a585077..9819086d82 100644 --- a/src/finn/transformation/qonnx/qonnx_activation_handlers.py +++ b/src/finn/transformation/qonnx/qonnx_activation_handlers.py @@ -52,9 +52,7 @@ def __init__(self, model: ModelWrapper, quant_node, quant_node_index: int): self._q_node = quant_node self._q_index = quant_node_index - @property @classmethod - @abstractmethod def valid_predecessor_op_types(self): """Defines which op types the preceding node is allowed to have for this type of activation. @@ -284,9 +282,11 @@ class QuantReluHandler(QuantActBaseHandler): """Class for converting a quantized relu operation expressed in the QONNX dialect to the FINN ONNX dialect.""" - valid_predecessor_op_types = [ - "Relu", - ] + @classmethod + def valid_predecessor_op_types(self): + return [ + "Relu", + ] def _check_compatibility(self): if self._q_node.op_type == "Quant": @@ -391,15 +391,17 @@ class QuantIdentityHandler(QuantActBaseHandler): these are equivalent to quantized identity activations. """ - valid_predecessor_op_types = [ - "BatchNormalization", - "Sub", - "Add", - "Mul", - "Div", - "DebugMarker", - None, - ] + @classmethod + def valid_predecessor_op_types(self): + return [ + "BatchNormalization", + "Sub", + "Add", + "Mul", + "Div", + "DebugMarker", + None, + ] def _check_compatibility(self): # Gather parameters to check diff --git a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py index 77025ecdf5..e0f893f35c 100644 --- a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py +++ b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py @@ -30,7 +30,10 @@ import warnings from qonnx.transformation.base import Transformation -from finn.transformation.qonnx.qonnx_activation_handlers import QuantActBaseHandler +from finn.transformation.qonnx.qonnx_activation_handlers import ( + QuantActBaseHandler, + QuantIdentityHandler, +) def default_filter_function_generator(max_multithreshold_bit_width=8): @@ -127,7 +130,7 @@ def apply(self, model): # Check for possible ambiguity in handler selection valid_predecessors = [] for cls in QuantActBaseHandler.__subclasses__(): - valid_predecessors.extend(cls.valid_predecessor_op_types) + valid_predecessors.extend(cls.valid_predecessor_op_types()) if len(valid_predecessors) != len(set(valid_predecessors)): raise RuntimeError( "Two or more activation handlers declare the same " @@ -138,16 +141,15 @@ def apply(self, model): # Try to find a fitting handler for this Quant activation node for handler_cls in QuantActBaseHandler.__subclasses__(): - if predecessor_op_type in handler_cls.valid_predecessor_op_types: + if predecessor_op_type in handler_cls.valid_predecessor_op_types(): handler = handler_cls(model, n, node_ind) break else: - raise ValueError( - f"Quant nodes in the activation path and with predecessor " - f"nodes of type {predecessor_op_type} are currently not " - f"supported by FINN and can not be converted to " - f"MultiThreshold nodes." - ) + # fall back to QuantIdentityHandler here + # it may still not work due to its particular restrictions, + # but better than just erroring out without trying + handler = QuantIdentityHandler(model, n, node_ind) + model = handler.replace_quant_node() graph_modified = True return (model, graph_modified) From 1792ccf1a401c95bc1480e94c4c4c996a142404b Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 13 Jan 2023 14:13:59 +0000 Subject: [PATCH 302/628] [Deps] Update qonnx commit version to incorporate doc string changes --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index b0f6400ed1..5e668e0449 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="f702b17cdb9d5e57f85f43a5d33890647e063de6" +QONNX_COMMIT="7d50273a4dcccb445fb06f57f6bedc17b3707b35" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From dc26864becf174db1029e3695b3bb0858b75d645 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 13 Jan 2023 15:47:09 +0000 Subject: [PATCH 303/628] [GHA] Update repo checkout to v3 --- .github/workflows/docker-image.yml | 7 ++++--- .github/workflows/quicktest-dev-pr.yml | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 2c91a0a83b..00c25a4a31 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,9 +1,10 @@ name: DockerImage on: + pull_request: + branches: [ dev ] push: - branches: - - 'dev' + branches: [ dev ] jobs: docker: @@ -11,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 diff --git a/.github/workflows/quicktest-dev-pr.yml b/.github/workflows/quicktest-dev-pr.yml index a726ab584f..e2ba47ec29 100644 --- a/.github/workflows/quicktest-dev-pr.yml +++ b/.github/workflows/quicktest-dev-pr.yml @@ -15,7 +15,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: DockerRunQuicktest run: | From cd4af94805f6ee6fb4c0c391dbb3e874ed7df53a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 16 Jan 2023 15:45:39 +0100 Subject: [PATCH 304/628] [FIFO] use at least 2 samples also for convnets for cppsim fifo insertion --- src/finn/transformation/fpgadataflow/set_fifo_depths.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 80f5d9a094..8f766bcdb1 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -382,9 +382,9 @@ def apply(self, model): # nodes as # inputs to drive the imulation n_inputs = int(len(model.graph.node) / 2) else: - # convnet, single input is typically enough to fill entire + # convnet, two inputs are typically enough to fill entire # layer pipeline due to overlaps - n_inputs = 1 + n_inputs = 2 sim = verilator_fifosim(model, n_inputs) for ind, node in enumerate(fifo_nodes): From da8295794be7a970a78c22ff3efc49926962c548 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 16 Jan 2023 11:52:42 -0800 Subject: [PATCH 305/628] Fixing reproducibility issue with FINN_BUILD_DIR --- src/finn/util/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 4aba87216c..a252d323dc 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -121,7 +121,7 @@ def make_build_dir(prefix=""): try: tmpdir = tempfile.mkdtemp(prefix=prefix) newdir = tmpdir.replace("/tmp", os.environ["FINN_BUILD_DIR"]) - os.makedirs(newdir) + os.makedirs(newdir, exist_ok=True) return newdir except KeyError: raise Exception( From 4d5e0458dec18d833db8b4623351b27a33a0653f Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 17 Jan 2023 11:59:09 +0000 Subject: [PATCH 306/628] [MVAU] Update minimize accumulator width for bipolar case --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 9f34eb1515..72128fda4c 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -576,6 +576,10 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): def minimize_accumulator_width(self, model): weights = model.get_initializer(self.onnx_node.input[1]) + # since in the calculation the values of the weight matrix are used, + # for the bipolar case they need to be converted to bipolar + if self.get_nodeattr("binaryXnorMode"): + weights = 2 * weights - 1 if len(self.onnx_node.input) > 2: thresholds = model.get_initializer(self.onnx_node.input[2]) else: From 64306ba78b113ba3bd1ce7a641ba1a1e41c883b3 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 17 Jan 2023 15:49:17 +0000 Subject: [PATCH 307/628] [CustomOp] Update ImgDim and numReps in thresholding --- .../fpgadataflow/thresholding_batch.py | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 03cfbe0e09..d9745acf63 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -602,17 +602,17 @@ def global_includes(self): # TODO check and add whatever missing def defines(self, var): - if self.get_nodeattr("mem_mode") == "const": - numReps = 1 - else: - numInputVectors = list(self.get_nodeattr("numInputVectors")) - numReps = int(np.prod(numInputVectors)) + numReps = 1 + numInputVectors = list(self.get_nodeattr("numInputVectors")) + total_spatial_size = int(np.prod(numInputVectors)) self.code_gen_dict["$DEFINES$"] = [ - """#define NumChannels1 {}\n #define PE1 {}\n #define numReps {}""".format( + """#define NumChannels1 {}\n #define PE1 {}\n #define numReps {}\n + #define ImgDim1 {}""".format( self.get_nodeattr("NumChannels"), self.get_nodeattr("PE"), numReps, + total_spatial_size, ) ] if self.get_nodeattr("mem_mode") == "decoupled": @@ -653,7 +653,7 @@ def read_npy_data(self): npy_in = "%s/thresholds.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, numReps);' + 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, ImgDim1);' % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) ) @@ -675,18 +675,13 @@ def strm_decl(self): def docompute(self): tmpl_args = self.get_template_param_values() - # TODO: why put some template parameters into defines and not others? - # should ImgDim be defined or just filled in here like we do now? node = self.onnx_node - inp_vecs = self.get_nodeattr("numInputVectors") - total_spatial_size = int(np.prod(inp_vecs)) mem_mode = self.get_nodeattr("mem_mode") if mem_mode == "const": self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, NumChannels1, PE1, {}, {}> + """{} (in0, out, threshs, numReps);""".format( node.op_type, - total_spatial_size, tmpl_args["TSrcI"], tmpl_args["TDstI"], ) @@ -696,10 +691,9 @@ def docompute(self): # - for cppsim the repetition comes from the threshold stream reader+input # - for synth the unit runs continuously anyway (ap_ctrl_none) self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, NumChannels1, PE1, {}, {}, ActVal1, ThresType1, NumSteps1> - (in0, out, weights, 1);""".format( + """{} + (in0, out, weights, numReps);""".format( "Thresholding_Stream_Batch", - total_spatial_size, tmpl_args["TSrcI"], tmpl_args["TDstI"], ) From 1add5a6785ae4e2bab7df0b1206e223daf586715 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 17 Jan 2023 09:14:02 -0800 Subject: [PATCH 308/628] Create minimize_weight_bit_width.py --- .../fpgadataflow/minimize_weight_bit_width.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py diff --git a/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py new file mode 100644 index 0000000000..de16c65912 --- /dev/null +++ b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py @@ -0,0 +1,49 @@ +# Copyright (c) 2023, Xilinx +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from qonnx.custom_op.registry import getCustomOp +from qonnx.transformation.base import Transformation + +from finn.util.fpgadataflow import is_fpgadataflow_node + + +class MinimizeWeightBitWidth(Transformation): + """For relevant nodes, call the weight bit width minimization + functions to save on resources. May alter tensor weightDataType + if the node does not have runtime writeable weights.""" + + def __init__(self): + super().__init__() + + def apply(self, model): + for node in model.graph.node: + if is_fpgadataflow_node(node) is True: + inst = getCustomOp(node) + if hasattr(inst, "minimize_weight_bit_width"): + inst.minimize_weight_bit_width(model) + return (model, False) \ No newline at end of file From b4d66ed42bd258cda7b78ef5fa9eff2bc546081a Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 17 Jan 2023 09:17:12 -0800 Subject: [PATCH 309/628] Fixing if-else logic to make more sense --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 3 +-- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index a1dff7a0ad..2ac9ad2867 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -652,8 +652,7 @@ def minimize_accumulator_width(self, model): def minimize_weight_bit_width(self, model): """Minimize the bit width based on the values of the weights""" - runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 0 - if runtime_writable: + if not self.get_nodeattr("runtime_writeable_weights"): weights = model.get_initializer(self.onnx_node.input[1]) w_min = weights.min() w_max = weights.max() diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 5d97244e5b..fd74a7b0c9 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -171,8 +171,7 @@ def minimize_accumulator_width(self, model): def minimize_weight_bit_width(self, model): """Minimize the bit width based on the values of the weights""" - runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 0 - if runtime_writable: + if not self.get_nodeattr("runtime_writeable_weights"): weights = model.get_initializer(self.onnx_node.input[1]) w_min = weights.min() w_max = weights.max() From 74dafc8444b42d6a7cb2751f84a5a6d261557272 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 18 Jan 2023 15:10:21 +0100 Subject: [PATCH 310/628] [VVAU] SIMD support for decoupled mode --- .../custom_op/fpgadataflow/vectorvectoractivation.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 6d4b5fb9e6..72158ffcd6 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -473,7 +473,7 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): weight_tensor_pe_flipped = np.flip(weight_tensor_unflipped, axis=-2) # reshape weight tensor (simd_flipped and pe_flipped) to desired shape pe = self.get_nodeattr("PE") - simd = 1 + simd = self.get_nodeattr("SIMD") # simd_flipped weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape( 1, -1, pe * simd @@ -844,11 +844,6 @@ def docompute(self): ) ] elif mem_mode == "decoupled" or mem_mode == "external": - simd = self.get_nodeattr("SIMD") - if simd > 1: - raise Exception( - "SIMD parallelism not supported for decoupled or external mode" - ) wdt = self.get_weight_datatype() if wdt == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] @@ -1249,9 +1244,10 @@ def get_weightstream_width(self): self.get_nodeattr("mem_mode") == "decoupled" or self.get_nodeattr("mem_mode") == "external" ): + simd = self.get_nodeattr("SIMD") pe = self.get_nodeattr("PE") wp = self.get_weight_datatype().bitwidth() - w_width = pe * wp + w_width = simd * pe * wp return w_width else: return 0 From 2008d1544d58383fb275f4c02175c3f428997427 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 18 Jan 2023 14:56:05 +0000 Subject: [PATCH 311/628] [Docs] Fix docstring in SplitLargeFIFOs transform --- src/finn/transformation/fpgadataflow/set_fifo_depths.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index c6bf92578a..8699cd7001 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -501,6 +501,7 @@ def decompose_pow2(x): class SplitLargeFIFOs(Transformation): """Split large FIFOs before implementation, for two reasons: + - impl_style="vivado" supports a max depth of 32k. Any larger FIFOs must be implemented as a sequence of smaller FIFOs. - impl_style="vivado" requires power-of-two depths, which is @@ -508,6 +509,7 @@ class SplitLargeFIFOs(Transformation): So a FIFO of size 8196 normally gets rounded-up to a depth of 16384 and wastes a lot of resources. Here, instead, we split this up into two FIFOs of depth 8192 + 4. + """ def __init__(self, max_qsrl_depth=256, max_vivado_depth=32768): From 962a5585b5f03b4fd2ffbe128c0b4ab7179292c9 Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 18 Jan 2023 08:31:19 -0800 Subject: [PATCH 312/628] Fixing headers to minimize_weight_bit_width.py --- .../transformation/fpgadataflow/minimize_weight_bit_width.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py index de16c65912..147f8281a7 100644 --- a/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py +++ b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, Xilinx +# Copyright (C) 2023, Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without From 9ce9dfec374def40ec7ee2eae1acc738be5d4aa0 Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 18 Jan 2023 09:03:17 -0800 Subject: [PATCH 313/628] Update basic.py --- src/finn/util/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index a252d323dc..4aba87216c 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -121,7 +121,7 @@ def make_build_dir(prefix=""): try: tmpdir = tempfile.mkdtemp(prefix=prefix) newdir = tmpdir.replace("/tmp", os.environ["FINN_BUILD_DIR"]) - os.makedirs(newdir, exist_ok=True) + os.makedirs(newdir) return newdir except KeyError: raise Exception( From 3dc6d8dc9b89e910132f90291461109c2ec30906 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 20 Jan 2023 14:53:06 +0000 Subject: [PATCH 314/628] [Tests] Add jenkins marker for fifosizing test --- tests/fpgadataflow/test_fifosizing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 116df98d17..6b78d399eb 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -49,6 +49,7 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow @pytest.mark.parametrize( "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] ) From 18b8d0591493bb5d595dafd82c7e4c9fca4bb015 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 24 Jan 2023 14:36:31 +0000 Subject: [PATCH 315/628] [Tests] Change copyright header and force python verilator exec --- tests/fpgadataflow/test_split_large_fifos.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py index eab8072fc8..ca6d0e981e 100644 --- a/tests/fpgadataflow/test_split_large_fifos.py +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022 Xilinx, Inc. +# Copyright (C) 2022, Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -74,6 +74,7 @@ def test_split_large_fifos(depth): split_large_fifos=True, folding_config_file=tmp_output_dir + "/folding_config.json", target_fps=10000, + force_python_rtlsim=True, synth_clk_period_ns=10.0, board="Pynq-Z1", rtlsim_batch_size=100, From e282f2936b2fb4c23b65065eb580cabf060ec505 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 26 Jan 2023 08:51:54 +0000 Subject: [PATCH 316/628] [GHA] Update setup-python in pre-commit gha --- .github/workflows/pre-commit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index f61af878ff..5f03379bbc 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -16,7 +16,7 @@ jobs: uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: '3.8' From 61ac5b62e4da00837542d814c656798930deb6bf Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 26 Jan 2023 14:01:41 +0100 Subject: [PATCH 317/628] [VVAU] update resource estimates --- .../fpgadataflow/vectorvectoractivation.py | 61 +++++++++++++------ 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 72158ffcd6..2e86d72d04 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -218,6 +218,10 @@ def get_weight_datatype(self): """Returns FINN DataType of weights.""" return DataType[self.get_nodeattr("weightDataType")] + def get_accumulator_datatype(self): + """Returns FINN DataType of accumulator""" + return DataType[self.get_nodeattr("accDataType")] + def get_output_datatype(self, ind=0): """Returns FINN DataType of output.""" return DataType[self.get_nodeattr("outputDataType")] @@ -1115,7 +1119,7 @@ def code_generation_ipi(self): def uram_estimation(self): P = self.get_nodeattr("PE") - Q = 1 + Q = self.get_nodeattr("SIMD") wdt = self.get_weight_datatype() W = wdt.bitwidth() omega = self.calc_wmem() @@ -1124,7 +1128,7 @@ def uram_estimation(self): mstyle = self.get_nodeattr("ram_style") if ( (mmode == "decoupled" and mstyle != "ultra") - or (mmode == "const" and self.calc_wmem() <= 128) + or (mmode == "const") or (mmode == "external") ): return 0 @@ -1136,9 +1140,11 @@ def bram_estimation(self): """Calculates resource estimation for BRAM""" # TODO add in/out FIFO contributions P = self.get_nodeattr("PE") + Q = self.get_nodeattr("SIMD") wdt = self.get_weight_datatype() W = wdt.bitwidth() omega = self.calc_wmem() + mem_width = Q * W * P # assuming SDP mode RAMB18s (see UG573 Table 1-10) # since this is HLS memory, not using the full width of a BRAM # assuming memories up to 128 deep get implemented in LUTs @@ -1146,23 +1152,24 @@ def bram_estimation(self): mstyle = self.get_nodeattr("ram_style") if ( (mmode == "decoupled" and mstyle in ["distributed", "ultra"]) + or (mstyle == "auto" and self.calc_wmem() <= 128) or (mmode == "const" and self.calc_wmem() <= 128) or (mmode == "external") ): return 0 - if W == 1: - return math.ceil(omega / 16384) * P - elif W == 2: - return math.ceil(omega / 8192) * P - elif W <= 4: - return (math.ceil(omega / 4096)) * (math.ceil(W / 4)) * P - elif W <= 9: - return (math.ceil(omega / 2048)) * (math.ceil(W / 8)) * P - elif W <= 18 or omega > 512: - return (math.ceil(omega / 1024)) * (math.ceil(W / 16)) * P + if mem_width == 1: + return math.ceil(omega / 16384) + elif mem_width == 2: + return math.ceil(omega / 8192) + elif mem_width <= 4: + return (math.ceil(omega / 4096)) * (math.ceil(mem_width / 4)) + elif mem_width <= 9: + return (math.ceil(omega / 2048)) * (math.ceil(mem_width / 8)) + elif mem_width <= 18 or omega > 512: + return (math.ceil(omega / 1024)) * (math.ceil(mem_width / 16)) else: - return (math.ceil(omega / 512)) * (math.ceil(W / 32)) * P + return (math.ceil(omega / 512)) * (math.ceil(mem_width / 32)) def bram_efficiency_estimation(self): P = self.get_nodeattr("PE") @@ -1186,6 +1193,7 @@ def lut_estimation(self): """ # TODO add in/out FIFO contributions P = self.get_nodeattr("PE") + Q = self.get_nodeattr("SIMD") wdt = self.get_weight_datatype() W = wdt.bitwidth() # determine tdt with input and weight data types @@ -1200,29 +1208,46 @@ def lut_estimation(self): if (mmode == "decoupled" and mstyle == "distributed") or ( mmode == "const" and self.calc_wmem() <= 128 ): - c2 = (P * W) * math.ceil(self.calc_wmem() / 64) + c2 = (P * Q * W) * math.ceil(self.calc_wmem() / 64) # multiplication res_type = self.get_nodeattr("resType") if res_type == "dsp": mult_luts = 0 else: - mult_luts = (2 * math.ceil((W + A) / 6) - 1) * (W + A) + mult_luts = Q * (2 * math.ceil((W + A) / 6) - 1) * (W + A) + # adder tree + addertree_luts = (W + A) * (2 * Q - 1) # accumulator + acc_datatype = self.get_accumulator_datatype() + acc_bits = acc_datatype.bitwidth() k_h, k_w = self.get_nodeattr("Kernel") - acc_bits = W + A + math.ceil(math.log(k_h * k_w, 2)) + # if accDataType is not set, then it will default to INT32, which would + # be a large overestimate in most (if not all) cases. In this scenario, + # we would use the minimum accumulator as determined by the data types. + alpha = math.log(k_h * k_w, 2) + W + A - 1 - int(idt.signed()) + + def phi(x_): + return math.log(1 + pow(2, -x_), 2) + + acc_bits = min(acc_datatype.bitwidth(), np.ceil(alpha + phi(alpha) + 1)) acc_luts = acc_bits # thresholds and threshold comparators thr_luts = 0 comp_luts = 0 noact = self.get_nodeattr("noActivation") + # TODO - add 'ram_style_threshold' node attribute if noact == 0: odt = self.get_output_datatype() B = odt.bitwidth() - thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64) + thr_luts = (2**B - 1) * acc_bits * self.calc_tmem() / 64 comp_luts = (2**B - 1) * acc_bits - return int(c0 + c1 * (P * (mult_luts + acc_luts + thr_luts + comp_luts)) + c2) + return int( + c0 + + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) + + c2 + ) def dsp_estimation(self): # multiplication From c60d955b51b9359e0af38c99a3ffe342b58faf17 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 26 Jan 2023 14:10:59 +0000 Subject: [PATCH 318/628] [pyverilator] Add arguments to cpp verilator simulation --- src/finn/util/pyverilator.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index a00899cf78..8d18858569 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -133,6 +133,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): and throughput measurement.""" vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) + verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" build_dir = make_build_dir("verilator_fifosim_") fifosim_cpp_fname = pk.resource_filename( "finn.qnn-data", "cpp/verilator_fifosim.cpp" @@ -184,6 +185,19 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): if which_verilator is None: raise Exception("'verilator' executable not found") + # add defines to make certain XPM src files work with Verilator + xpm_args = [] + xpm_args.append("-DDISABLE_XPM_ASSERTIONS") + xpm_args.append("-DOBSOLETE") + xpm_args.append("-DONESPIN") + xpm_args.append("--bbox-unsup") + vivado_path = os.environ["VIVADO_PATH"] + # additional SystemVerilog modules to make XPMs work with Verilator + xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" + xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" + xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" + verilog_file_arg = ["finn_design_wrapper.v", xpm_memory, xpm_cdc, xpm_fifo] + verilator_args = [ "perl", which_verilator, @@ -192,6 +206,8 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): build_dir, "-y", vivado_stitch_proj_dir, + "-y", + verilog_header_dir, "--CFLAGS", "--std=c++11", "-O3", @@ -201,13 +217,14 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): "fast", "--noassert", "--cc", - "finn_design_wrapper.v", + *verilog_file_arg, "--top-module", "finn_design_wrapper", "--exe", "verilator_fifosim.cpp", "--threads", "4", + *xpm_args, ] proc_env = os.environ.copy() From c3761e1b8b38cdddab2b65cadcad01aa845d66c6 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 26 Jan 2023 14:46:25 +0000 Subject: [PATCH 319/628] [Tests] Extend split large fifo testcase --- tests/fpgadataflow/test_split_large_fifos.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py index ca6d0e981e..85b4a2bfa8 100644 --- a/tests/fpgadataflow/test_split_large_fifos.py +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -63,7 +63,8 @@ def get_folding_cfg(depth=65536): @pytest.mark.vivado @pytest.mark.fpgadataflow @pytest.mark.parametrize("depth", [16384, 65536, 45000]) -def test_split_large_fifos(depth): +@pytest.mark.parametrize("force_python_rtlsim", ["True", "False"]) +def test_split_large_fifos(depth, force_python_rtlsim): tmp_output_dir = fetch_test_model("tfc") folding_cfg = get_folding_cfg(depth) with open(tmp_output_dir + "/folding_config.json", "w") as f: @@ -74,7 +75,7 @@ def test_split_large_fifos(depth): split_large_fifos=True, folding_config_file=tmp_output_dir + "/folding_config.json", target_fps=10000, - force_python_rtlsim=True, + force_python_rtlsim=force_python_rtlsim, synth_clk_period_ns=10.0, board="Pynq-Z1", rtlsim_batch_size=100, From dcf82a0c7b6a56c2a557b1c69712b203f3c19447 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 26 Jan 2023 15:23:28 +0000 Subject: [PATCH 320/628] [Transform] Add defaults, docstring and comments to split large fifos --- src/finn/transformation/fpgadataflow/set_fifo_depths.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 8699cd7001..9ac1000468 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -451,7 +451,9 @@ def apply(self, model): return (model, False) -def get_fifo_split_configs(depth, max_qsrl_depth, max_vivado_depth): +def get_fifo_split_configs(depth, max_qsrl_depth=256, max_vivado_depth=32768): + """Break non-power-of-2 sized FIFO depths into several ones""" + def floor_pow2(x): if (x & (x - 1) == 0) and x != 0: return x @@ -486,6 +488,7 @@ def decompose_pow2(x): # into several ones ret_pass2 = list(map(decompose_pow2, ret)) + # unpack list of lists ret_pass2 = [x for dec_list in ret_pass2 for x in dec_list] # finally, add impl_style to each split FIFO From 99c09dc69a070204f3134c0562d30cf36c5eaa8c Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 09:47:36 +0000 Subject: [PATCH 321/628] [Test] extend FIFO sizing test to cnv and premade conf --- tests/fpgadataflow/test_fifosizing.py | 39 ++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 116df98d17..d2d3b642d8 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -32,6 +32,8 @@ import json import shutil from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.registry import getCustomOp import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg @@ -52,7 +54,14 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.parametrize( "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] ) -def test_fifosizing_linear(method): +@pytest.mark.parametrize( + "topology", + [ + "cnv", + # "tfc" + ], +) +def test_fifosizing_linear(method, topology): force_python_rtlsim = "python" in method method_key = "largefifo_rtlsim" if "largefifo_rtlsim" in method else "characterize" tmp_output_dir = fetch_test_model("tfc") @@ -83,4 +92,32 @@ def test_fifosizing_linear(method): / float(est_data["estimated_throughput_fps"]) > 0.9 ) + # now run the same build using the generated folding and FIFO config + tmp_output_dir_cmp = fetch_test_model("tfc") + cfg_cmp = cfg + cfg_cmp.output_dir = tmp_output_dir_cmp + cfg_cmp.auto_fifo_depths = False + cfg_cmp.target_fps = None + cfg_cmp.generate_outputs = [build_cfg.DataflowOutputType.STITCHED_IP] + cfg_cmp.folding_config_file = tmp_output_dir + "/final_hw_config.json" + build.build_dataflow_cfg(tmp_output_dir_cmp + "/model.onnx", cfg_cmp) + + model0 = ModelWrapper( + tmp_output_dir + "/intermediate_models/step_create_stitched_ip.onnx" + ) + model1 = ModelWrapper( + tmp_output_dir_cmp + "/intermediate_models/step_create_stitched_ip.onnx" + ) + + assert len(model0.graph.node) == len(model1.graph.node) + for i in range(len(model0.graph.node)): + node0 = model0.graph.node[i] + node1 = model1.graph.node[i] + assert node0.op_type == node1.op_type + if node0.op_type == "StreamingFIFO": + node0_inst = getCustomOp(node0) + node1_inst = getCustomOp(node1) + assert node0_inst.get_nodeattr("depth") == node1_inst.get_nodeattr("depth") + shutil.rmtree(tmp_output_dir) + shutil.rmtree(tmp_output_dir_cmp) From fbfe8942cc7aa7af56b631b20c00210f9ded2a7f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 17:22:54 +0100 Subject: [PATCH 322/628] [Deps] update qonnx to get ints attribute fix --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 5e668e0449..ee1923e3a4 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="7d50273a4dcccb445fb06f57f6bedc17b3707b35" +QONNX_COMMIT="f14d7dc92a6baeffa2bef811e902abb121a6f696" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From 049504d29884ccd9afdace830e0de01629995fb1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 17:24:50 +0100 Subject: [PATCH 323/628] [Test] correctly pass topology for fifo sizing test, set fps differently --- tests/fpgadataflow/test_fifosizing.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index d2d3b642d8..611a428c79 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -54,22 +54,16 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.parametrize( "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] ) -@pytest.mark.parametrize( - "topology", - [ - "cnv", - # "tfc" - ], -) +@pytest.mark.parametrize("topology", ["cnv", "tfc"]) def test_fifosizing_linear(method, topology): force_python_rtlsim = "python" in method method_key = "largefifo_rtlsim" if "largefifo_rtlsim" in method else "characterize" - tmp_output_dir = fetch_test_model("tfc") + tmp_output_dir = fetch_test_model(topology) cfg = build_cfg.DataflowBuildConfig( output_dir=tmp_output_dir, auto_fifo_depths=True, auto_fifo_strategy=method_key, - target_fps=10000, + target_fps=10000 if topology == "tfc" else 1000, force_python_rtlsim=force_python_rtlsim, synth_clk_period_ns=10.0, board="Pynq-Z1", @@ -93,7 +87,7 @@ def test_fifosizing_linear(method, topology): > 0.9 ) # now run the same build using the generated folding and FIFO config - tmp_output_dir_cmp = fetch_test_model("tfc") + tmp_output_dir_cmp = fetch_test_model(topology) cfg_cmp = cfg cfg_cmp.output_dir = tmp_output_dir_cmp cfg_cmp.auto_fifo_depths = False From 06f2d3ee0eade926eaae88f1263657f8edd6ff50 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 17:49:44 +0100 Subject: [PATCH 324/628] [Test] restrict fifosizing test to tfc topology cnv has unrelated bug for now --- tests/fpgadataflow/test_fifosizing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 611a428c79..b1655485a0 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -54,7 +54,7 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.parametrize( "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] ) -@pytest.mark.parametrize("topology", ["cnv", "tfc"]) +@pytest.mark.parametrize("topology", ["tfc"]) def test_fifosizing_linear(method, topology): force_python_rtlsim = "python" in method method_key = "largefifo_rtlsim" if "largefifo_rtlsim" in method else "characterize" From b7e2febd505449e5e77372beb899d602e09bc3cf Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 17:50:28 +0100 Subject: [PATCH 325/628] [FIFO] set in/outFIFODepths for FIFO nodes themselves to 0 --- src/finn/custom_op/fpgadataflow/streamingfifo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index c71e8ffe32..522305327f 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -72,6 +72,9 @@ def get_nodeattr_types(self): ), # whether depth monitoring is enabled (impl_style=rtl only) "depth_monitor": ("i", False, 0), + # the FIFO does not need its own FIFOs + "inFIFODepths": ("ints", False, [0]), + "outFIFODepths": ("ints", False, [0]), } ) From fef84fe0d46fce63f79b4f1292452b1ba2d57866 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 17:52:10 +0100 Subject: [PATCH 326/628] [FIFO] make FIFO insertions act more consistently - do not set in/outFIFODepths attrs to 0 after setting - do not set in/outFIFODepths attrs to max() in InsertFIFO - special handling of first/last FIFO depths for characterization --- src/finn/builder/build_dataflow_steps.py | 8 +- .../fpgadataflow/derive_characteristic.py | 13 +- .../fpgadataflow/insert_fifo.py | 161 +++++++++--------- .../fpgadataflow/set_fifo_depths.py | 55 +++++- 4 files changed, 151 insertions(+), 86 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 956b4fd3be..ce0ffb4771 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -527,7 +527,9 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): model = model.transform(DeriveFIFOSizes()) model = model.transform( InsertFIFO( - vivado_ram_style=cfg.large_fifo_mem_style, max_qsrl_depth=256 + vivado_ram_style=cfg.large_fifo_mem_style, + max_qsrl_depth=256, + create_shallow_fifos=True, ) ) model = model.transform(GiveUniqueNodeNames()) @@ -549,6 +551,8 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): force_python_sim=force_python_sim, ) ) + # InsertAndSetFIFODepths internally removes any shallow FIFOs + # so no need to call RemoveShallowFIFOs here else: assert "Unsupported auto_fifo_strategy: " + cfg.auto_fifo_strategy else: @@ -575,6 +579,8 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): "resType", "mem_mode", "runtime_writeable_weights", + "inFIFODepths", + "outFIFODepths", ] extract_model_config_to_json( model, cfg.output_dir + "/final_hw_config.json", hw_attrs diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index f783f7ae71..67eb96995e 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -134,8 +134,9 @@ class DeriveFIFOSizes(NodeLocalTransformation): NodeLocalTransformation for more details. """ - def __init__(self, num_workers=None): + def __init__(self, num_workers=None, io_fifo_depth=32): super().__init__(num_workers=num_workers) + self.io_fifo_depth = io_fifo_depth def applyNodeLocal(self, node): op_type = node.op_type @@ -161,7 +162,7 @@ def applyNodeLocal(self, node): if cons_node is None: # could be final node, will be overridden if so # need an entry in the list anyway - out_fifo_depths.append(2) + out_fifo_depths.append(self.io_fifo_depth) continue cons = registry.getCustomOp(cons_node) cons_chrc = cons.get_nodeattr("io_chrc_in")[0] @@ -182,6 +183,14 @@ def applyNodeLocal(self, node): # for each tensor prod.set_nodeattr("outFIFODepths", out_fifo_depths) + # finally, check node inputs to ensure FIFOs are added to + # any top-level inputs (at least self.io_fifo_depth deep) + in_fifo_depths = prod.get_nodeattr("inFIFODepths") + for (i, input_name) in enumerate(node.input): + if input_name in [x.name for x in model.graph.input]: + in_fifo_depths[i] = max(self.io_fifo_depth, in_fifo_depths[i]) + prod.set_nodeattr("inFIFODepths", in_fifo_depths) + except KeyError: # exception if op_type is not supported raise Exception( diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 0546643d12..50da9cdf16 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -177,14 +177,9 @@ def apply(self, model): for idx, inp in enumerate(consumer.input): if inp == output_name: consumer.input[idx] = fifo_output_tensor.name - # ensure created FIFO depth is reflected on both sides - odepths = n0.get_nodeattr("outFIFODepths") - odepths[idx_out] = fifo_depth - n0.set_nodeattr("outFIFODepths", odepths) - idepths = n1.get_nodeattr("inFIFODepths") - idepths[idx_inp] = fifo_depth - n1.set_nodeattr("inFIFODepths", idepths) - + # removed setting of node attributes based on created + # FIFO sizes here, better to preserve original attrs + # as they are. graph_modified = True if graph_modified is False: @@ -204,41 +199,48 @@ def apply(self, model): dtype = n0.get_input_datatype(inp_ind) fifo_depth = n0.get_nodeattr("inFIFODepths")[inp_ind] - if fifo_depth <= 2: - warnings.warn("Overriding input FIFO depth to 32") - fifo_depth = 32 - - # create fifo node - fifo_output_tensor = oh.make_tensor_value_info( - model.make_new_valueinfo_name(), - TensorProto.FLOAT, - n0.get_normal_input_shape(), - ) - graph.value_info.append(fifo_output_tensor) - model.set_tensor_datatype(fifo_output_tensor.name, dtype) - - if self.max_qsrl_depth is None or fifo_depth <= self.max_qsrl_depth: - impl_style = "rtl" + if fifo_depth > 2 or self.create_shallow_fifos: + # create fifo node + fifo_output_tensor = oh.make_tensor_value_info( + model.make_new_valueinfo_name(), + TensorProto.FLOAT, + n0.get_normal_input_shape(), + ) + graph.value_info.append(fifo_output_tensor) + model.set_tensor_datatype(fifo_output_tensor.name, dtype) + + if ( + self.max_qsrl_depth is None + or fifo_depth <= self.max_qsrl_depth + ): + impl_style = "rtl" + else: + impl_style = "vivado" + + fifo_node = oh.make_node( + "StreamingFIFO", + [n_input], + [fifo_output_tensor.name], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + depth=fifo_depth, + folded_shape=fld_shape, + dataType=str(dtype.name), + impl_style=impl_style, + ram_style=self.vivado_ram_style, + ) + # insert fifo + graph.node.insert(0, fifo_node) + + # set fifo output tensor as new input tensor of second node + first_node.input[inp_ind] = fifo_output_tensor.name else: - impl_style = "vivado" - - fifo_node = oh.make_node( - "StreamingFIFO", - [n_input], - [fifo_output_tensor.name], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - depth=fifo_depth, - folded_shape=fld_shape, - dataType=str(dtype.name), - impl_style=impl_style, - ram_style=self.vivado_ram_style, - ) - # insert fifo - graph.node.insert(0, fifo_node) - - # set fifo output tensor as new input tensor of second node - first_node.input[inp_ind] = fifo_output_tensor.name + warnings.warn( + """Input FIFO for %s has depth %d and won't + be created. This may cause RTL simulation issues. + """ + % (graph_in_name, fifo_depth) + ) # insert FIFO as last node, except when last node is DMA graph_out_names = [x.name for x in model.graph.output] @@ -259,40 +261,47 @@ def apply(self, model): dtype = n0.get_output_datatype(out_ind) fifo_depth = n0.get_nodeattr("outFIFODepths")[out_ind] - if fifo_depth <= 2: - warnings.warn("Overriding output FIFO depth to 32") - fifo_depth = 32 - - # create fifo node - fifo_input_tensor = oh.make_tensor_value_info( - model.make_new_valueinfo_name(), - TensorProto.FLOAT, - n0.get_normal_output_shape(), - ) - graph.value_info.append(fifo_input_tensor) - model.set_tensor_datatype(fifo_input_tensor.name, dtype) - - if self.max_qsrl_depth is None or fifo_depth <= self.max_qsrl_depth: - impl_style = "rtl" + if fifo_depth > 2 or self.create_shallow_fifos: + # create fifo node + fifo_input_tensor = oh.make_tensor_value_info( + model.make_new_valueinfo_name(), + TensorProto.FLOAT, + n0.get_normal_output_shape(), + ) + graph.value_info.append(fifo_input_tensor) + model.set_tensor_datatype(fifo_input_tensor.name, dtype) + + if ( + self.max_qsrl_depth is None + or fifo_depth <= self.max_qsrl_depth + ): + impl_style = "rtl" + else: + impl_style = "vivado" + + fifo_node = oh.make_node( + "StreamingFIFO", + [fifo_input_tensor.name], + [graph_out_name], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + depth=fifo_depth, + folded_shape=fld_shape, + dataType=str(dtype.name), + impl_style=impl_style, + ram_style=self.vivado_ram_style, + ) + # insert fifo + graph.node.append(fifo_node) + + # set fifo output tensor as new input tensor of second node + final_node.output[0] = fifo_input_tensor.name else: - impl_style = "vivado" - - fifo_node = oh.make_node( - "StreamingFIFO", - [fifo_input_tensor.name], - [graph_out_name], - domain="finn.custom_op.fpgadataflow", - backend="fpgadataflow", - depth=fifo_depth, - folded_shape=fld_shape, - dataType=str(dtype.name), - impl_style=impl_style, - ram_style=self.vivado_ram_style, - ) - # insert fifo - graph.node.append(fifo_node) - - # set fifo output tensor as new input tensor of second node - final_node.output[0] = fifo_input_tensor.name + warnings.warn( + """Output FIFO for %s has depth %d and won't + be created. This may cause RTL simulation issues. + """ + % (graph_out_name, fifo_depth) + ) return (model, graph_modified) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 8f766bcdb1..9282d399f8 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -224,7 +224,7 @@ class InsertAndSetFIFODepths(Transformation): - run through rtlsim with stream of multiple random input images (to fill pipeline) - keep track of observed maximum occupancy for each FIFO during rtlsim - when sim finished, update each FIFO depth to maximum observed occupancy - and set inFIFODepths/outFIFODepths attrs to 0 on relevant nodes + and set inFIFODepths/outFIFODepths attrs to that depth as well """ @@ -289,7 +289,7 @@ def apply(self, model): # insert stream infrastructure (DWC/FIFO) model = model.transform(InsertDWC()) - model = model.transform(InsertFIFO()) + model = model.transform(InsertFIFO(create_shallow_fifos=True)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) @@ -416,11 +416,7 @@ def apply(self, model): reset_implementation(node_inst) del fifos[node.name] else: - inst = getCustomOp(node) - ifd = inst.get_nodeattr("inFIFODepths") - ofd = inst.get_nodeattr("outFIFODepths") - inst.set_nodeattr("inFIFODepths", [0] * len(ifd)) - inst.set_nodeattr("outFIFODepths", [0] * len(ofd)) + # (removed setting of node FIFO size attributes to 0 here) # for every extw node we changed from external to decoupled, # change back and reset implementation if node.op_type in extw_optypes: @@ -442,4 +438,49 @@ def apply(self, model): # remove shallow FIFOs model = model.transform(RemoveShallowFIFOs()) + # reflect final values in attributes + for node in model.graph.node: + if node.op_type != "StreamingFIFO": + node_inst = getCustomOp(node) + fifodepth_in = [] + for node_inp in node.input: + prod = model.find_producer(node_inp) + if prod is None: + # no producer for this input + if node_inp in [x.name for x in model.graph.input]: + # top-level input with no FIFO + fifodepth_in.append(0) + else: + # FIFO depth attr applies only to dynamic attributes + pass + else: + # there is a producer for this input + if prod.op_type == "StreamingFIFO": + prod_inst = getCustomOp(prod) + fifodepth_in.append(prod_inst.get_nodeattr("depth")) + else: + # explicitly no FIFO on this dynamic input + fifodepth_in.append(0) + fifodepth_out = [] + for node_out in node.output: + cons = model.find_consumer(node_out) + if cons is None: + # no consumer for this output + if node_out in [x.name for x in model.graph.output]: + # top-level output with no FIFO + fifodepth_out.append(0) + else: + # FIFO depth attr applies only to dynamic attributes + pass + else: + # there is a consumer for this input + if cons.op_type == "StreamingFIFO": + cons_inst = getCustomOp(cons) + fifodepth_out.append(cons_inst.get_nodeattr("depth")) + else: + # explicitly no FIFO on this dynamic output + fifodepth_out.append(0) + node_inst.set_nodeattr("inFIFODepths", fifodepth_in) + node_inst.set_nodeattr("outFIFODepths", fifodepth_out) + return (model, False) From 7f4b20f62a964cfca9d04fca7ab08486b6da3998 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 27 Jan 2023 17:54:08 +0100 Subject: [PATCH 327/628] [SWG] Adjust resource estimates, set_folding --- finn-rtllib/swg/swg_template_parallel.sv | 68 +++--- src/finn/builder/build_dataflow_steps.py | 1 + .../convolutioninputgenerator_rtl.py | 207 ++++++++++-------- .../fpgadataflow/set_folding.py | 42 +++- ...est_fpgadataflow_convinputgenerator_rtl.py | 8 +- 5 files changed, 192 insertions(+), 134 deletions(-) diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv index 432c374764..767f9c6f85 100644 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -192,7 +192,7 @@ output [WIDTH-1:0] shift_out; reg [WIDTH-1:0] out_reg; assign shift_out = out_reg; -integer addr_w, addr_r; //TODO: minimize width + simplify +integer addr_w, addr_r; $RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; @@ -221,9 +221,9 @@ endmodule : $TOP_MODULE_NAME$_ram_buffer module $TOP_MODULE_NAME$_wb #( - parameter IN_WIDTH = 1, //bit-width*C*MMV_in - parameter OUT_ELEM_WIDTH = 1, //bit-width*C - parameter OUT_WIDTH = 1, //bit-width*C*MMV_out + parameter IN_WIDTH = 1, // bit-width*C*MMV_in + parameter OUT_ELEM_WIDTH = 1, // bit-width*C + parameter OUT_WIDTH = 1, // bit-width*C*MMV_out parameter BUFFER_ELEM_TOTAL = 1 ) ( @@ -243,13 +243,12 @@ $GENERATE_REG_FIFOS$ $GENERATE_BRAM_FIFOS$ -//Fixed interconnect between linear buffers +// fixed interconnect between linear buffers $GENERATE_BUFFER_CONNECTION$ -//Fixed REG FIFO <-> output mapping +// fixed REG FIFO -> output mapping $GENERATE_OUTPUT_MAPPING$ - endmodule : $TOP_MODULE_NAME$_wb module $TOP_MODULE_NAME$_impl #( @@ -279,7 +278,7 @@ module $TOP_MODULE_NAME$_impl #( localparam int unsigned BUF_OUT_ELEM_WIDTH = BIT_WIDTH * SIMD; localparam int unsigned BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; - //main buffer instantiation + // main buffer instantiation uwire [BUF_IN_WIDTH -1:0] window_buffer_in; uwire [BUF_OUT_WIDTH-1:0] window_buffer_out; uwire window_buffer_shift_enable; @@ -299,7 +298,7 @@ module $TOP_MODULE_NAME$_impl #( .data_out(window_buffer_out) ); - //controller instantiation + // controller instantiation uwire advance_controller; uwire signed [INCR_BITWIDTH-1:0] addr_incr; uwire [INCR_BITWIDTH-1:0] tail_incr; @@ -311,27 +310,22 @@ module $TOP_MODULE_NAME$_impl #( .tail_incr(tail_incr) ); - // Counters/address registers - // Add a sign bit even to (most) unsigned counters and Window_buffer_read_addr_reg, - // so we can use automatic sign extension and simplify calculations w/ signed increment. - // Alternatively, we could manually sign-extend and shave off a bit here or there. + // counters/address registers logic signed [$clog2(LAST_READ_ELEM+1)+1-1:0] Newest_buffered_elem = -1; logic [$clog2(LAST_READ_ELEM+1)+1-1:0] Current_elem = FIRST_WRITE_ELEM; logic [$clog2(LAST_READ_ELEM+1)+1-1:0] First_elem_next_window = 0; - // Control signals/registers - logic Writing_done = 0; - logic write_done = 0; - - uwire write_ok = write_cmd && (out_V_V_TREADY || write_done); - uwire write_blocked = write_cmd && !out_V_V_TREADY && !write_done; - - uwire write_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !Writing_done;; + // control registers/signals + logic Writing_done = 0; + logic Write_done = 0; + uwire write_ok = write_cmd && (out_V_V_TREADY || Write_done); + uwire write_blocked = write_cmd && !out_V_V_TREADY && !Write_done; + uwire write_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !Writing_done;; uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; - uwire read_cmd = + uwire read_cmd = !reading_done && ( // if there is still an input element left to read - Writing_done || ( // if fetching is done (e.g. for skipped rows at FM end due to stride) + Writing_done || ( // if writing is done (e.g. for skipped rows at FM end due to stride) $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(First_elem_next_window) && $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(Current_elem) ) // (over-)write to buffer if oldest buffered element will no longer be needed @@ -339,27 +333,27 @@ module $TOP_MODULE_NAME$_impl #( uwire read_ok = read_cmd && in0_V_V_TVALID && !write_blocked; // includes waiting on W if W-only cycle: wait only on W no R/W to wait for - uwire advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); + uwire advance = read_ok || (!read_cmd && write_ok) || (!read_cmd && !write_cmd); - //assign buffer control + // assign buffer control assign window_buffer_shift_enable = advance; assign advance_controller = write_ok; - //assign I/O ports + // assign I/O ports assign window_buffer_in = in0_V_V_TDATA; assign out_V_V_TDATA = window_buffer_out; assign in0_V_V_TREADY = ap_rst_n && read_ok; //only asserted if data is available and we can store it (allowed) - assign out_V_V_TVALID = ap_rst_n && write_cmd && !write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) + assign out_V_V_TVALID = ap_rst_n && write_cmd && !Write_done; //only asserted if we have data available and it has not been read yet (don't wait for READY from sink) - //write done logic + // write done logic always_ff @(posedge ap_clk) begin if (advance) begin - write_done <= 1'b0; //reset flag - end else if (write_ok) // successful W in this cycle, but R still outstanding - write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! + Write_done <= 1'b0; //reset flag + end else if (write_ok) //successful W in this cycle, but R still outstanding + Write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! end - //main process for advancing counters + // main process for advancing counters always_ff @(posedge ap_clk) begin if(!ap_rst_n) begin Newest_buffered_elem <= -1; @@ -371,10 +365,10 @@ module $TOP_MODULE_NAME$_impl #( if (read_ok) begin Newest_buffered_elem <= Newest_buffered_elem+1; - //check if this is the last read cycle (reading_done will be true afterwards) + // check if this is the last read cycle (reading_done will be true afterwards) if ((Newest_buffered_elem == LAST_READ_ELEM-1) && Writing_done) begin - //start processing of next FM if writing is done already (possible due to unused input elements at the tail end) - //todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) + // start processing of next FM if writing is done already (possible due to unused input elements at the tail end) + // todo: allow for read overlapping between feature maps (i.e., reading first elements from next FM while still writing last window of current FM) Newest_buffered_elem <= -1; Current_elem <= FIRST_WRITE_ELEM; First_elem_next_window <= 0; @@ -385,12 +379,12 @@ module $TOP_MODULE_NAME$_impl #( if (write_ok) begin First_elem_next_window <= First_elem_next_window + tail_incr; - //check if this is the last write cycle (Writing_done will be true afterwards) + // check if this is the last write cycle (Writing_done will be true afterwards) if (Current_elem == LAST_WRITE_ELEM) begin Writing_done <= 1; if (reading_done || (read_ok && (Newest_buffered_elem == LAST_READ_ELEM - 1))) begin - //start processing of next FM if reading is done already, or completes in the same cycle + // start processing of next FM if reading is done already, or completes in the same cycle Newest_buffered_elem <= -1; Current_elem <= FIRST_WRITE_ELEM; First_elem_next_window <= 0; diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 956b4fd3be..9a6966ac9b 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -409,6 +409,7 @@ def step_target_fps_parallelization(model: ModelWrapper, cfg: DataflowBuildConfi hw_attrs = [ "PE", "SIMD", + "parallel_window", "ram_style", "resType", "mem_mode", diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 1ae4022b79..eae9ffd6bd 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -81,7 +81,7 @@ def get_nodeattr_types(self): "outputDataType": ("s", True, ""), "depthwise": ("i", False, 0, {0, 1}), # Enable reprogrammable implementation to change FM dimensions, - # stride, or dilation during runtime + # stride, or dilation during runtime (requires parallel_window = 0) "dynamic_mode": ("i", False, 0, {0, 1}), # FPGA resource type for ConvolutionInputGenerator input buffer # auto -- let Vivado decide @@ -233,13 +233,13 @@ def get_buffer_depth(self): mmv_out = 1 channel_factor = int(ifm_ch / simd) + # compute minimal buffer length (assuming it holds 1 complete window) + buffer_min_size = ( + (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + impl_style = self.select_impl_style() if impl_style == "default": - # compute minimal buffer length (assuming it holds 1 complete window) - buffer_min_size = ( - (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 - ) * channel_factor - # add additional buffer space in case of stride > 1 # this minimizes cycle count as it allows an earlier pre-load of inputs buffer_depth = ( @@ -255,73 +255,89 @@ def get_buffer_depth(self): * channel_factor, ) ) - else: - buffer_depth = 0 - raise Exception("Requested impl. style not implemented") + elif impl_style == "parallel": + buffer_depth = buffer_min_size + 1 return buffer_depth def get_exp_cycles(self): - simd = self.get_nodeattr("SIMD") - ifm_ch = self.get_nodeattr("IFMChannels") - k = self.get_nodeattr("ConvKernelDim") - ifm_dim = self.get_nodeattr("IFMDim") - ofm_dim = self.get_nodeattr("OFMDim") - stride = self.get_nodeattr("Stride") - dilation = self.get_nodeattr("Dilation") - depthwise = self.get_nodeattr("depthwise") - ifm_dim_h, ifm_dim_w = ifm_dim - ofm_dim_h, ofm_dim_w = ofm_dim - k_h, k_w = k - stride_h, stride_w = stride - dilation_h, dilation_w = dilation - - channel_factor = int(ifm_ch / simd) + impl_style = self.select_impl_style() - if ifm_dim_h == 1 or ifm_dim_w == 1: - # 1D case - ( - ifm_ch, - [ifm_dim_h, ifm_dim_w], - [ofm_dim_h, ofm_dim_w], - [k_h, k_w], - [stride_h, stride_w], - [dilation_h, dilation_w], - ) = self.get_1d_conv_attrs_normalized() - - if depthwise: - exp_cycles = ( - +ofm_dim_w * k_w * channel_factor - + channel_factor * (k_w - 1) * (stride_w - 1) - - (k_w - 1) - + 2 - ) + if impl_style == "parallel": + exp_cycles = self.get_number_input_values() + 2 + elif impl_style == "default": + simd = self.get_nodeattr("SIMD") + ifm_ch = self.get_nodeattr("IFMChannels") + k = self.get_nodeattr("ConvKernelDim") + ifm_dim = self.get_nodeattr("IFMDim") + ofm_dim = self.get_nodeattr("OFMDim") + stride = self.get_nodeattr("Stride") + dilation = self.get_nodeattr("Dilation") + depthwise = self.get_nodeattr("depthwise") + ifm_dim_h, ifm_dim_w = ifm_dim + ofm_dim_h, ofm_dim_w = ofm_dim + k_h, k_w = k + stride_h, stride_w = stride + dilation_h, dilation_w = dilation + + channel_factor = int(ifm_ch / simd) + if ifm_dim_h == 1 or ifm_dim_w == 1: + # 1D case + ( + ifm_ch, + [ifm_dim_h, ifm_dim_w], + [ofm_dim_h, ofm_dim_w], + [k_h, k_w], + [stride_h, stride_w], + [dilation_h, dilation_w], + ) = self.get_1d_conv_attrs_normalized() + + if depthwise: + exp_cycles = ( + +ofm_dim_w * k_w * channel_factor + + channel_factor * (k_w - 1) * (stride_w - 1) + - (k_w - 1) + + 2 + ) + else: + exp_cycles = ofm_dim_w * k_w * channel_factor + 2 else: - exp_cycles = ofm_dim_w * k_w * channel_factor + 2 - else: - # 2D case - buffer_min_size = ( - (k_h - 1) * dilation_h * ifm_dim_w + (k_w - 1) * dilation_w + 1 - ) * channel_factor - cycles_write_block = ofm_dim_w * k_w * k_h * channel_factor - cycles_read_block = stride_w * ifm_dim_w * channel_factor - max_cycles = max(cycles_write_block, cycles_read_block) - if depthwise: - max_cycles += ofm_dim_w * (stride_w - 1) * (channel_factor - 1) - exp_cycles = buffer_min_size + ofm_dim_h * max_cycles # initial buffering - if depthwise: - exp_cycles += (stride_h - 1) * ifm_dim_w * channel_factor + # 2D case + buffer_min_size = ( + (k_h - 1) * dilation_h * ifm_dim_w + (k_w - 1) * dilation_w + 1 + ) * channel_factor + cycles_write_block = ofm_dim_w * k_w * k_h * channel_factor + cycles_read_block = stride_w * ifm_dim_w * channel_factor + max_cycles = max(cycles_write_block, cycles_read_block) + if depthwise: + max_cycles += ofm_dim_w * (stride_w - 1) * (channel_factor - 1) + exp_cycles = buffer_min_size + ofm_dim_h * max_cycles + if depthwise: + exp_cycles += (stride_h - 1) * ifm_dim_w * channel_factor return int(exp_cycles) def bram_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") + impl_style = self.select_impl_style() + [k_h, k_w] = self.get_nodeattr("ConvKernelDim") + [ifm_dim_h, ifm_dim_w] = self.get_nodeattr("IFMDim") + [dilation_h, dilation_w] = self.get_nodeattr("Dilation") - # NOTE: Actual BRAM usage might be lower in some cases. - # This does not account for the exact Vivado behavior yet. - buffer_width = simd * self.get_input_datatype().bitwidth() - buffer_depth = self.get_buffer_depth() if ram_style == "block" or ram_style == "auto": + buffer_width = simd * self.get_input_datatype().bitwidth() + if impl_style == "default": + buffer_depth = self.get_buffer_depth() + buffer_count = 1 + elif impl_style == "parallel": + if ifm_dim_h == 1 or ifm_dim_w == 1: + return 0 # 1D case (no line buffers needed) + kernel_width = (k_w - 1) * dilation_w + 1 + buffer_depth = (ifm_dim_w - kernel_width) + ifm_dim_w * (dilation_h - 1) + buffer_count = k_h - 1 + + # NOTE: Actual BRAM usage might be lower in some cases + # due to imperfect modeling of Vivado behavior if buffer_depth <= 512: ram_width = 36 elif buffer_depth <= 1024: @@ -356,7 +372,9 @@ def bram_estimation(self): remainder_cascade_width = math.ceil(buffer_width / remainder_width) cascade_savings = ram_cascade_width - remainder_cascade_width - return int(ram_cascade_depth * ram_cascade_width - cascade_savings) + return int( + (ram_cascade_depth * ram_cascade_width - cascade_savings) * buffer_count + ) else: return 0 @@ -374,15 +392,28 @@ def lut_estimation(self): def uram_estimation(self): simd = self.get_nodeattr("SIMD") ram_style = self.get_nodeattr("ram_style") - buffer_width = simd * self.get_input_datatype().bitwidth() - buffer_depth = self.get_buffer_depth() + impl_style = self.select_impl_style() + [k_h, k_w] = self.get_nodeattr("ConvKernelDim") + [ifm_dim_h, ifm_dim_w] = self.get_nodeattr("IFMDim") + [dilation_h, dilation_w] = self.get_nodeattr("Dilation") if ram_style == "ultra": + buffer_width = simd * self.get_input_datatype().bitwidth() + if impl_style == "default": + buffer_depth = self.get_buffer_depth() + buffer_count = 1 + elif impl_style == "parallel": + if ifm_dim_h == 1 or ifm_dim_w == 1: + return 0 # 1D case (no line buffers needed) + kernel_width = (k_w - 1) * dilation_w + 1 + buffer_depth = (ifm_dim_w - kernel_width) + ifm_dim_w * (dilation_h - 1) + buffer_count = k_h - 1 + ram_depth = 4096 ram_width = 72 ram_cascade_depth = math.ceil(buffer_depth / ram_depth) ram_cascade_width = math.ceil(buffer_width / ram_width) - return int(ram_cascade_depth * ram_cascade_width) + return int(ram_cascade_depth * ram_cascade_width * buffer_count) else: return 0 @@ -641,8 +672,7 @@ def prepare_codegen_default(self): def prepare_codegen_parallel(self): # Parallel implementation style for MMV_out = K: - # mix of shift-registers (for parallel read) and line buffers (BRAM or LUTRAM) - # compute a static schedule by analyzing access pattern (from im2col function) + # mix of shift-registers (for parallel read) and line buffers (BRAM/URAM/LUT) template_path = ( os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" ) @@ -674,8 +704,7 @@ def prepare_codegen_parallel(self): (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 ) * channel_factor - # buffer_actual_size = self.get_buffer_depth() # TODO: Move to this method - buffer_actual_size = buffer_min_size + 1 + buffer_actual_size = self.get_buffer_depth() code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] # compute some intermediate values, e.g., kernel "width" = k_w incl. dilation @@ -685,34 +714,19 @@ def prepare_codegen_parallel(self): skip_columns = w % (kernel_width + (out_dim_w - 1) * stride_w) skip_rows = h % (kernel_height + (out_dim_h - 1) * stride_h) - # compute address increment values for 5-loop nest #TODO: simplify - addr_incr_end_simd = 1 - addr_incr_end_window_elem = (dilation_w - 1) * channel_factor + 1 - addr_incr_end_window_row = ( - ((w - kernel_width) * channel_factor) # remaining line - + ((dilation_h - 1) * w * channel_factor) # skip lines - + 1 # wrap-around of minimally sized buffer - ) - addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 - addr_incr_end_row = ( - -buffer_min_size - + ((skip_columns + kernel_width) * channel_factor) # remaining line - + ((stride_h - 1) * w * channel_factor) # skip lines - + 1 - ) - # set certain threshold indices to detect when reading/writing finishes code_gen_dict["$LAST_READ_ELEM$"] = [str(h * w * channel_factor - 1)] code_gen_dict["$LAST_WRITE_ELEM$"] = [ str(((h - skip_rows - 1) * w + (w - skip_columns)) * channel_factor - 1) ] - # default controller loop structure: # iterations (counters) map directly + # re-use default controller loop structure + code_gen_dict["$IS_DEPTHWISE$"] = ["0"] loop_h_iterations = out_dim_h - loop_w_iterations = out_dim_w # -> innermost loop - loop_kh_iterations = 1 # k_h - loop_kw_iterations = 1 # k_w - loop_simd_iterations = 1 # channel_factor + loop_w_iterations = out_dim_w # now the innermost loop + loop_kh_iterations = 1 + loop_kw_iterations = 1 + loop_simd_iterations = 1 if loop_w_iterations == 1: code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_H"] @@ -721,12 +735,19 @@ def prepare_codegen_parallel(self): code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_W"] loop_w_iterations -= 1 # -1 because state is initial state + # set head and tail address increment values + addr_incr_end_window = -buffer_min_size + stride_w * channel_factor + 1 + addr_incr_end_row = ( + -buffer_min_size + + ((skip_columns + kernel_width) * channel_factor) # remaining line + + ((stride_h - 1) * w * channel_factor) # skip lines + + 1 + ) + tail_incr_w = addr_incr_end_window + buffer_min_size - 1 tail_incr_h = addr_incr_end_row + buffer_min_size - 1 tail_incr_last_window = buffer_min_size - 1 - code_gen_dict["$IS_DEPTHWISE$"] = ["0"] - # overwrite new loop bounds: addr_incr_end_simd = 1 addr_incr_end_window_elem = 1 addr_incr_end_window_row = 1 @@ -970,6 +991,8 @@ def generate_hdl(self): template_path, code_gen_dict = self.prepare_codegen_default() elif impl_style == "parallel": template_path, code_gen_dict = self.prepare_codegen_parallel() + if self.get_nodeattr("dynamic_mode"): + raise Exception("Dynamic mode is not compatible with parallel_window") else: raise Exception("Requested impl. style not implemented") @@ -1109,6 +1132,8 @@ def get_dynamic_config(self, ifm_dim=None, stride=None, dilation=None): apply (e.g. component must be synthesized for largest buffer size).""" # NOTE: For better driver integration, this functionality could be packaged # as a standalone function in the future + if self.select_impl_style() != "default": + raise Exception("Impl. style is incompatible with dynamic mode") if ifm_dim is None: ifm_dim = self.get_nodeattr("IFMDim") diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index e24e24f1f8..48e5d9f9e1 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -73,6 +73,9 @@ class SetFolding(Transformation): * the producer of the node is expected to be a ConvolutionInputGenerator with depthwise=1, whose SIMD value will be set equal to the PE value of its consumer node + * the VVAU also supports SIMD ("input window") parallelism next to + PE ("channels"), but current ConvInpGen limitations require PE to be fully + unfolded before SIMD is increased """ def __init__( @@ -103,7 +106,9 @@ def apply(self, model): "Thresholding_Batch", ] # these ops use SIMD parallelism, up to a max value of NumChannels - # ConvolutionInputGenerator has a special case when depthwise=1 + # ConvolutionInputGenerator* has a special case when depthwise=1 + # ConvolutionInputGenerator_rtl supports additional parallelism by + # setting parallel_window=1 mode after maxing out SIMD simd_ops = [ "DownSampler", "FMPadding_Batch", @@ -151,15 +156,36 @@ def apply(self, model): max_pe = node_inst.get_nodeattr("Labels") self.optimize_attribute_val(node_inst, max_pe, "PE") elif op_type in depthwise_op_exceptions: + # init/reset SIMD of VVAU + if op_type == "VectorVectorActivation": + node_inst.set_nodeattr("SIMD", 1) max_pe = node_inst.get_nodeattr("Channels") self.optimize_attribute_val(node_inst, max_pe, "PE") + # increase SIMD for VVAU once PE is exhausted + pe = node_inst.get_nodeattr("PE") + cyc = node_inst.get_exp_cycles() + if ( + op_type == "VectorVectorActivation" + and pe == max_pe + and cyc > self.target_cycles_per_frame + ): + max_simd = np.prod(node_inst.get_nodeattr("Kernel")) + self.optimize_attribute_val(node_inst, max_simd, "SIMD") # also set the folding of the upsteam DW SWU # which must be identical to this node swu_node = model.find_producer(node.input[0]) if swu_node.op_type.startswith("ConvolutionInputGenerator"): swu_node_inst = getCustomOp(swu_node) - pe = node_inst.get_nodeattr("PE") swu_node_inst.set_nodeattr("SIMD", pe) + # enable parallel_window mode of RTL SWG if needed + if swu_node.op_type == "ConvolutionInputGenerator_rtl": + if ( + op_type == "VectorVectorActivation" + and node_inst.get_nodeattr("SIMD") > 1 + ): + swu_node_inst.set_nodeattr("parallel_window", 1) + else: + swu_node_inst.set_nodeattr("parallel_window", 0) else: if op_type == "VectorVectorActivation": ksize = np.prod(node_inst.get_nodeattr("Kernel")) @@ -176,7 +202,19 @@ def apply(self, model): depthwise = node_inst.get_nodeattr("depthwise") if depthwise == 0: max_simd = node_inst.get_nodeattr("IFMChannels") + # init/reset parallel_window mode of RTL SWG + if op_type == "ConvolutionInputGenerator_rtl": + node_inst.set_nodeattr("parallel_window", 0) self.optimize_attribute_val(node_inst, max_simd, "SIMD") + # enable parallel_window mode of RTL SWG if needed + simd = node_inst.get_nodeattr("SIMD") + cyc = node_inst.get_exp_cycles() + if ( + op_type == "ConvolutionInputGenerator_rtl" + and simd == max_simd + and cyc > self.target_cycles_per_frame + ): + node_inst.set_nodeattr("parallel_window", 1) else: # depthwise SWGs are handled separately continue diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 007360a5fd..a66038ef29 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -140,9 +140,9 @@ def prepare_inputs(input_tensor): # input datatype @pytest.mark.parametrize("idt", [DataType["UINT4"]]) # kernel size -@pytest.mark.parametrize("k", [[2, 2], [3, 3], [1, 3]]) +@pytest.mark.parametrize("k", [[3, 3], [1, 5]]) # input dimension -@pytest.mark.parametrize("ifm_dim", [[24, 24], [15, 6], [13, 13], [1, 14]]) +@pytest.mark.parametrize("ifm_dim", [[13, 13], [1, 21]]) # input channels @pytest.mark.parametrize("ifm_ch", [6]) # Stride @@ -152,9 +152,9 @@ def prepare_inputs(input_tensor): # depthwise @pytest.mark.parametrize("dw", [0, 1]) # input channel parallelism ("SIMD") -@pytest.mark.parametrize("simd", [1, 2, 3, 6]) +@pytest.mark.parametrize("simd", [1, 3, 6]) # parallel_window enable (MMV_out = M*K) -@pytest.mark.parametrize("parallel_window", [0]) +@pytest.mark.parametrize("parallel_window", [0, 1]) # in/out MMV ("M") @pytest.mark.parametrize("m", [1]) # Flip dimensions From 1a44ee457137acb1ebd74cb1a1322b3a64920af0 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Fri, 27 Jan 2023 20:24:24 +0100 Subject: [PATCH 328/628] [Test] remove FIFO attr==0 condition in end2end_bnn_pynq --- tests/end2end/test_end2end_bnn_pynq.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 79cfafa22d..858363d6d3 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -564,12 +564,6 @@ def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): model = model.transform(InsertAndSetFIFODepths(test_fpga_part, target_clk_ns)) fifo_layers = model.get_nodes_by_op_type("StreamingFIFO") assert len(fifo_layers) > 0 - hls_layers = model.get_finn_nodes() - for node in hls_layers: - if node.op_type != "StreamingFIFO": - op_inst = getCustomOp(node) - assert op_inst.get_nodeattr("inFIFODepths") == [0] - assert op_inst.get_nodeattr("outFIFODepths") == [0] model.save( get_checkpoint_name( topology, wbits, abits, QONNX_export, "fifodepth_" + kind From ac1cb729fab0aaef398fd5a02f4549976bb414f6 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Sun, 29 Jan 2023 17:00:04 +0100 Subject: [PATCH 329/628] [SWG] Add documentation --- docs/finn/img/rtl_swg_impl_styles.png | Bin 0 -> 103635 bytes docs/finn/internals.rst | 77 ++++++++++++++++++ .../convolutioninputgenerator_rtl.py | 34 ++++++-- .../fpgadataflow/convert_to_hls_layers.py | 14 +--- 4 files changed, 104 insertions(+), 21 deletions(-) create mode 100644 docs/finn/img/rtl_swg_impl_styles.png diff --git a/docs/finn/img/rtl_swg_impl_styles.png b/docs/finn/img/rtl_swg_impl_styles.png new file mode 100644 index 0000000000000000000000000000000000000000..265ff9b915e79f8e93ca4f987bb49e57f8a2bd3e GIT binary patch literal 103635 zcmce;cUV(f+a?}Fr8iMgkS+oupfu^dDN+QnP*r;Gy((2vs(?rdMLX|C`R*bPYU4i*mF27w}>Ie}02ek^Lrq2H%1 zEW8xF)fvlB3{+y=?CaEWYBnO8XVwMY;E_p~2{E)E%o5Q%FP>qKg1j-|$P>2f&lR%k z&l9qf@aRt$5qENO+F>Ersj~c_f>Qy~n}~(mq7tzstsB|fHNG215^MXGVy){ZoYI>d zg-(Ey<)!fJ!#do(R6Z)Hbww|G9_bIJUmbV8oh7mO-Z!hpxWrY?WXX&yZ3o&_cCN_7 zG}j|1mt}vr;pXCI@n&yF&`6z=Nz(^67xYj;)EiOr3FQ;(g6hoc6*p|Y_iPTbTYX*u0zvfah`wo@qJ&2$GJrDtL z0VFVTtN~wZXxkUfR_tD}viZxVbJgq%H?`UqHi!pOtO0pL_x2n4yY3wL{NswximA9w z^Bo2LRx7hCdZ&0O%f<})GKgyZNH7ep>ng=capRi%rXL&#GV zUT`EsXk9Yf@~A(|)(4SAJZ2Ibpd>Wzoaam=)(tYPQ))ktER~Ruc;9boxj3Ric;nAz zZ4V?|o3MWjoRZTV)4Dayx>aBA+1_);g^}y8!n=tt#{SS%=>>v*i6=o)x!3 zRGw;8qdt+>yqE#Kjb;_;w{~wHuO#o%*$anbMaBoae0N?`fRqbN9xqcfQ&K5ChLLOW zY?bnlUPbJ*z2e$7Y&pGtdi2Q`$)nwF72BghC`%;yk-{MZh%^S84v66|X%MLD6|fEy zZohPo2NYlU08CM{hC<%q=9v`$q_wLxXv_9{NUiPcG!qFCSBH_3b2K%4@ht->5&m-BW)p*cSd)ugw zRkg{YgOa0^{-T$-`K2sf<(>! zjQpt=um@@aH%fn$(SUBfKUxB`o!}DkG0-1S^=P{)mUXOMjb{>QuT1&CF%ORpD6JrO zrY9kLpCPr0JKWX<6sfttT$Jy?t918Gb& zY9Q5-(CY;LEf4Z8k8B-PR@1JFCJ)J}{a{m1j<{@7Y_|OlSO)Ul5BG~QF8PfO*8qC8 z#N!~N6lZ-Gqr zEcq_ahDiO)mha@V_mg#K1-ate4Cds?Pv$l)AAjQkojxAVWTRJFj&VBX0&quF?OuhM z?u;5jPJ?vMwj~xEEs>|AM+M8VAb&dbHS@_- zd$dDB@tuoeWNipKRS;p~fvldM0GjXcqX&M;pKVN}5C=qP~f1bNFC_`yGfmnnrSzB};%jL)}={E%%c?h%(`4+N9 z7K)(AF8*V}00q7@zx$F`{3SpmW!BqPyPB0>pDI4tpYS_5yB>_n8pW#JebKki^|V4f6Np8s7R50li%+1-tOu&eMI zB5AtC0W7))Z?1x-SY;x_JX&Qm+L4WumFFJShgQ4_J+SkEX2)K_H6HT%fZgYZx>uB! z+6mQUb-@gP(_eMwCyxw-5~}uWEcyI=F;&F1MjUVWG`Iurhecw;dw;dTDp|^4A0SdL zK_zF!AN{E8b?)khAO*g;)VmH_`WZ zM^8h})_6~{o6|O-2V7i*H7QvI^w=8eI~;x>yB%Iv7A|%;&mjaJp$Z7a0l(@270vxm z)sY3zg`-_~fVU+Ay7PcxupBJS`P{%~sGJ{)Gt`o=iJD(M@9@jmS0(%?O*BTGHAZfo zxp(Cp##K0^>GS00;3CmmkTMfse~AA>KqF5Kvw(c%PcrB(OHm0vD0{r7R(|#V!AbL% z`n|?vgjMzaU$9fBIEhJ#WFxv+p4ug`{})+i ztaQlF&k(l~Zm{r6s(3-3>8vTfs);btYy6r&)OK}7|zbQ&Cq3h${Ys}*w zRax%3_>lKaF3b^MZK8ZhMEqc0I%vNBqQBdKcQ6z!7wFIz@l)<0unjCZj};Ta<)d5RUX$>nwOdL$4N9Ii5R|{lnq}wVJIqG2O6>!S(YvQf z*z+cd_JjfYto@m8i8WHpBdpN?=3KZKVn6BISdh4FNZWMMAQnlzcxa(1U)4uidm92K zdUX6MwdxNJ+iq_Uux8sPu_Y12m_O!Q^v{x2YziPF>lCEaVzx_UUDBj~idbxsq{F7v zo#w@8#0LGJKQavgE^vG0>lHvEWS_R>Y=5VZE$x0M>M1B zOoNYly45+4QkDtgf*10|+H2}X?Mibii6lLZOH&1$Gk2>vtDC`qkgwYC_cB~KWTM@f$oo+~FQ1m>fJ4iL@Y0Bd?TPmeUX zOS#+7U{>rI1bMQH-Npu!@@}5f<3K=2jhz|lh@_;&oMt2}W#FUtk;2ldNt6_EGC)PU z^YR?ls|(Xc2NJqK-mSZl`+6BZ9KJa5xH&eXgTB3|n8S|Bs<`l`ux+pj_FZQV9YZj5 z-*2>=>EbTaG4z9$tMaPaiW4eg+HY!MdLmZyP_DvK@*Q#&?|w}^nMw&BT@0|_KP-W- ze4uY$7Kcq2(2*)LO*~ZzrBF~nfZ58tAGlc-zgNeqq;y-*S#5RZ9Iam?CC|W?LU&=# z{IF5D#B$XPf4xS8{@N#)R>P<@5wpxOAV2+mP8mEfQa&HFGt(*cFrTxBk+gl_7tuD= zS{aO)DRnPJF4|N-!Ipfo-s|pMU_<_pDU9NRZXJ%+_*5dg3lXU9FlGK+?&${xc%_Q9 zQ{r-Np5OU8@@9pAzepIOxGVGNk!Je>6KdFQxd{NRt>@^Y} z@EtM!B?`)iJDldUVw@~rQ;>7_(t4FnmU++A2D982e~xBzCk(xutI8S?U%Cw)!_3=U z^gbII0kjAD@)~{J=wWn3L+=`);UN^ZqDyBmqzb)>#?#=|NVqlo;3K^?Mb-`W=dq`9 zK+*nEGlZ@RojX-?7B-#%Ph*+N)`kZ30^%UCLI~JpU+Xo4taNNhk%ofo{0+|7Z2!0Z zEgPGzs4JmF5Z36m)cc6=${Sro&Y7?o8#$cgXqYDJAyGx@U?86N?Yhs6g{GF8lB-@m z4KRH;?oL`EzZ?$UqVjvbXc%*!=n~#-aumEA?=QcW)o|tKt6h2Ffc>X(Dt#m~IlKkh z_gpajKz-BZxjwrPPS%YnbPxN$S=QSs33H>Kve;q92!`95!MOE7wQFmWt?dm@_bfD< zCyEf=#Z{zIx^~&$MEh1`i|VJhjFj=O$g07sC}I^e6!7kFf{qH80>bMhh7XUZZ07PJ z@p?wGB<~Mwg1+W$ej{_Lg=j4rK$^$tyojfQ5pcuo1Y3DnQT6}B-|)e`s{hrX z;5)f{WR?z^bb9!;07fhIes*Z!+eT5z zAvX6*^E=rc26Op4;G2q|f+Qa*=DJUu4O5d9gIT&$!ogdXF#C{jzn~<>63WQ}n58%S zG@m2h&odxe2cvm^4)t`^Bo3+5G`#h(s@NxdTW)iTrXqKiF)TvMxpXV<#rep1wlI|y zX=>C3d{BK3__~QzQ)&IMzaptGnLiKalw{>FJb3^8u*$)mDwd`sy(9e*0(c);V%ru# z0jeg`GokAYlFQ<9_$1Q9xU*xtNPrlvD@G14_=T(TsNnm{73zHEu}U?Tt|Q`*Ikp+) z+@mJZ7_sCg@kX=_zhxwu=!2jkJHu>0#lQQQ;dPkzijnEQZz$sqLFy4<6*<-~MX(Pi z20C=@@8sI(6|C8$JU`>kS>RTN>y&q@?Gh;A6-B8e<&L?8QEqlMjBT5M99U zRYFU^U?FNJHe9_T!r6%MfXOvyE|X2A4xOSW~?0YX*u7&!jt2i`@_O zz1oG4SFDd3Rch93T~_wN6E3Y#mor&#_Ey+5a-E&YYWE6j7S$?2-E0u% zd0Gssxs?QGQpWU52 z!TipDVo%=hmo_2uI;kU{jYevtVvZX1RA)DaHSEWJ|COxSXhyaR?|o&Bb*@=^PQN^_ z@HK4Hgwdcn!ePs@Lz>9uRwES{KdMd`CV_F4Y+ zOUd$EHI1DYdUxgtDwD>`RCj@~h)+b`Dnyc(i!7HtH%U)Xd6J`9{NfGbO2kvYV{RTj zIkPLOkYjV~DFIv(O*($BAj?dbXm3|BEPtSz!|SjH?bWv%{LF$Enz(nZyuU37bnBex z-LVXHV=tkwu%CQw8FLAlxRBoyuKe#D-!5f*#HD-`U$&8N^2t6Svy4!2bslO=6y>E} zvjhpzlU=Es*hN&yFXJ9JCd7p;@nyf3QftmLe&L4XsRAWW9kT?^rMwm1Gn{(`Jb0>0 zrY^ZpU39z+bLSpNJ3pu{PF@%0iu#bxFSM+C@~2q*(OChx;o;kO9c6iePm=W?M^g0b z(pu&(D%k=gyfpMe;b*$vA#5O8N&tByLx-&|HpV54S$lNJYg!nP`KhlX!t$8yyw*0^ zo`WxEGI9nR!7w8L8Sy^gE9}m<*7j3C-DqA`_6@7EHg1*MAboP(={Lb3f2)NT>VP-x0SE+xTDF3%c<>|zPEn~b0?;^#FC`pXD_adN3@Ft zSb(>rPn?Xl2(*hW>eRQtopN>6`W#DN2QFrMfba9sm6 zpRhy6!oULXwJTe8V@e+2k+9|9HtPnu29H5y^D~o}t2#y93!YQC+*4W8+a1dGdH3AF z5XsO8GnnvgUk)VxNJGz5$EM>15Ht-C%pkAeb-d7qQb9>mIp*ce5*(S35{BY(}6s#M_``=klT$%JtG8m$NSUaBymrB@{@RwxS_Iw*PIMuWo#=j}CRk|0!zY}Lh3BIiz zSC99ZBhj*3N$?QMM7WW_Uus}Oa$h>Hd{sTX5z1P5=-no|m)Y`q4(Y&8(?|Ub$Ma(e zUjaF*C@&Eg%Zq?9;=9j|Fk=&nrhYEgVqs!vE)b1Zwf#|^6g`B`9@k)KN=cz z66)3TIl(+trU($3A4cAi%3#Mnz}WS``7Gn9aRX1WfqC+4r?wXH=m&9c%_~|Is;Bwo z0@2Y=yR+dUm1rSD5{B33RaJ!4L>+wEKfs|m@g%dJPIHJRJt}88D0H!ng+&3sb$hVs zv@E`@5E}0}g{_+m8`h3uuD(Na*9cYToa%>j51G|&k?DH0cC8uPYkJkz995DOfP~eq zrSIf1w=sJXUMJbdJj7qZ7-c0XMl?*`XA^B1jByW%Kz84e6AW(8A=D{PT=c(7DAw?b zwdOg-EI?NV4<$Y&4HPG|j0PTjaAHwd+iuBf>GT3DQplT#ZA;`WmE5DCN`S!RnnDx4 znO{T3)w>ZbkzW1@lCJ2_>ub+AV5M+l+whCUU&5{hbG5GCONs*wTwA@vd@yG>tm%MS zd$2A1w9m-u^Hmk*N3F9y=C>2muI4)GRmWVn3U_}g`pvo=pdCEt$Y%XQ!j_A_>7?Jr zR}uEe!0^rMP9eASOkhT_8Pi)&SJ;w=4O^eO6wrxIZLA)i`A}DxJHiec`trT~mENg`6>9-JKvhxUnT!&gev&tF`I?0~k{`zN zn?+-^&0ol*zdC%YtwZNmX`@*Ldxdy*nbf*8wpT?!*VWRY#Hw8w>5f=Tt`%k8`{?&l zG)8kDv9hK7>!>&SWag!^9rA`IyPcBHq%qZU@wu!XNOLWN9HE;I>wVws9%CMXzb^ z)@j}~e|ex0A85rPw|_9|CakIRiT+4&d8?&V@~1vn4ff0U!GcsEYI{Hep z`N_6+%g%KNyh(RgwFen2wwSp(;_*%65XWo;I9ro?gbc)VWFY=Bie`U$vEu7=)+g)K zr$KkF#@o%@Y74{<)ke9B9{M)NBjlGm_Gr#>GJ*-RLp?6?{1C3^FmNM=JZBJRUI z_7i*{vhna`t*&!$FZ?~SpZ~Su2aNZ=UsL$e@G#G`8Z{QwR=%iD#_<~Y4}Nl0Qv&vF zr|IhWeZM(a!*y&45rzzP#mP|Qks*4hF=QfKK5w|wclIg+(V77Js4ti2yP}DDi*Lfq z<)YStpMYI+SDGyiRBV@rQ7>IlI@}-khr-ZuBR;=<=`mX?H};-cWqUSg!2?O(C>K`f z)ZGHkhq>Vo=!yNzKCWai;7(Cw7aJy|Lpn+AqY!j3A;dCDAtjoM74J3B?yJJC8_&jY zu?g8Q)oxzdEC}C{!%h$kuB~}7z?VN1GBOx0D^FKthM)F>;_ljY#!#{2rTuma8i-u+ zXN4L`bIOpjg@pj19d>o(H)CbjCMKcd%rbeJnJtJe4g^p9Tmj`x zLf8~9hw`cDk+o#@XzI0A*qRYhao~*Dl=MlG)%F;atMB}!ArvDr$)6sh8~W`f_%W=@ z5FoUDZ!j!qht0V?;&$e|TujrpazN=y>6S`E z@v0Q{4IH*qpQ;omsmNSVH)^^znmAInW!1A0RIg#GK}|&9!q6Mb%G&y29lm@n4i2xI zO|fl!f~NAr8Jh#7hoaeuV*p+zdI2=V$r1)V9R9^0uFL#_3j=s2Y!bUkA|axfjpaR0 z`Pf{8Fr|t0G8zOT_o(_(Lr8O6jyi{XFr(DljlK~6JMhlIs1cZsLbh~Q0bC0+>#tJU zB6_MAo3PV9y7kd*6m}a4Zfd(P|L!?Xq;<<8X}@4DU~1|K4kl3wyUOm?0udeAYJ#uM z1h`k_hKu31SkR%%;oE*=uv7ar-xjTKrIJ34A7T>TU(T|5f<2{#)0&&tkkVADGyKdlDSo zeqg*7OvA1CInNK#fHa-c_75Lgbv{hGF|oCF9<$K|&*3HtD2K+69@Md^;{N!$my~0Y zxqY4CniHR_?8rgH^?g5jDwrg&+O$>wcAM@Qt6Uqe$gR=+6yuX=OQ-7SY#Gs8SmuR2 z86ZA2$E}HTQZ%-c{nA`n_5klk6H(!9?)`5598k@1vDG=|jHNdf#xvCI+qP2x{uLtI z&SOx&{-e{zP1yIEcG`Kfu}Lx*8>|ToB-{ z23QXVEX+ZVcTwa`ZSmtkz=h$XWau+v`S>p)9G7cR{9JUA%XS#Wq2m0mBB+#%n`b2g zzIXp|1dv<*4Mh4tJV^KEEp}n6nzt1(u|yb;rhD$R^WN`tZ?b1UiUh86IkUF9W!O-h zj8NxbPaWEci4!Ovm;d%B5|mLhx|N@l)P0uPv@_2Kli>#Oz08^3=`kwixV(^v;eJV+ z8M@131$dJoS4K*F65{zS$DBO_SD2`}wuS6WhOinc z6ipXBSl-g1m2=c-gFs;d4gEO*sD21{z^@Q>=kY4B^z!?4(kbK+NMirbXvNBi9z|s? zyhRJxXVs)ii~g)*3wQJO8e~bF-K-J4sgOi1=GYd|un$C``nPl4gj?2m`loLy2X`Gx zoh;lRMQtT;xMKk>b=RaWY-%7!otKz5l5bS!_V*~hq#Uccg`E5Pwmm)plVz{iCc3pDCp_HHqcrb3mj7BAxsENjurLWBoD2=+K4@)Mgqz%+Y4~ z3w^r%fKbT~W&C{&t^u!|7Q$&Cb0ZvT%t=2$Im2h84eO?Z z-E({vPd-JUh|N$d(bw-}KG(0}NreOjycaPLwT&5SY@vC!9IQlzb8CCwJ%5NqSNZ^XwuPs!|T`x2Gdqk?fpJB^@sW<4o9u^;F2AhZL^Tz5fdJ@7`~@%(rZEE81SM8R#2lPzjQ@K~4+neb zP}$`i)VK{DcR{C1)S-U@4TbC+J<38$9(fb#S5cu=LcsW5bRWQ=qJ^cFHxCw`cn`!l zWHoayqco9Ba}?*?8fv4#Ow%%d z%H7-mQT5={np&93@;={4h#|Tie*rJK5Ngn&!hdywa7<>&Lygg&D!2oozkDyZdE1t_ zDgaqh!J0{@()eiqVh3*1X6Lz`?)lWz@AeZZU+>1}a=)rZ8UWEi7RdC~yBKSdsU)(CR&lwht7xDjwBh;8VO^by?FiLTopc0UUp;-L&2e^(6Ip2h znO@{C1hv}xSj-eY!%tGIwtlv3d_y$IZ$S1>8}H3On%m){V}Q1m6dVrs58SV}TWhNC zw`uLSjW?j=B!X~0kR^|3p2!{`x&Sg(-7}m?YZh0!=658m*(V01tNf#HK{0fI6PCL1 z3?@@AOPuZYRDsH6Y!Y&@>)TQK)9i^~A!K@bTp~N7(ASft?!?V8se$#9nucmH53v_6 z`iI{&?|1)#Xaf=rf$gp=do)u>B7bOu_oJjTZJ!+HQQR!))MdC+%S;cG20*2@O$#mTjfW(S^uHCOGxvLsxSV*#T$+Fm-=s zk0&fh?8o1;bXp!3^$bRn;>kH`UWX)qqIrHNVJ_&(lyrh3L09*~a1_+0gxa`b{DdKp z8ufS4bptdS&toaj-H@OiP}iW~z0U$El-~@hiA+QT(17CeT-~!PJ)d`eP;H)#^mJr?>Nel95ch7{wzI5Z3doW8tEf63Kkb?S z(2mviSNMpWzy6GyN>jRPxlisvu&Wc0X7zW`75yj6g#6S0ZdUDo*>(Rn6MFx@nx?oJ zv}t=wPfxNR$m9i5F^_g|lMZ_8B3AsGogqMgX!qP-c_fjM@H(e7(HFM1w!hBIMi6!` zUe%yt$oz>m$FlGKT*7k~hZVU?*i2n&IZZuBb~7Bj1eWMe8FWMiXKsazX06x|6C%RG zs5N*lfq~_k)%klmlRvsoH)o+`oX5-UV4w{xhw>wqSXHUea0(!e^zT#>)k=Ttl$|@Q z%G$&InP!3H_z_SMyD}6|6imnZg526Vr@RR%>H5jS<4&|{0%CStB0Yw)RwAN#nHi2- zJ_V4@j#w8oQmmPTzmMvzAPAp9r5fAbGcEV;G=&gwS8P9r5Zqg2B!HNnGI0m% z(m}5ffU6_Kf3k*JX}@L{kan4Pb5YX%S3PeN7M7Bd@ zqC0O}uM@>QW#G_qEA2{_-)(pizY%Ef->av}SIpUG+pnrVUS@7sGN^oza1QDOD^I!e zRJsGfzU!AbUma))dR2^aV4GY+{^GAYs@A55}G-YXf^Uj9H+?gFdSxf4=Y+WhmyKxppY6G&juutSK)Ef=6xBWE-u}0lPI;%Q0nGB@hR)#Gj%sOMD%HPA+XvBq5=~^s9f>+QhV-i9ZAiWlV*o< z+a3T4MeQqJ5mDZsnxHMQ$&?v!dCXMyu&LFO-`g^KrpEod9q`CF9LmdIqU`1}^Xs#H z8vW{C1mynpZ@15;!?{{}TkpH8h3ER?u@Q54`_T=r{co%(ag~K2&_O zPT|wJRVE?eR`K|`w`deu%d0c%va2&6e#xZr5GW@_^E}l!#uhTdQ1e{?w#a&sS-tZj z`*~_6bxm5e1#KM<$Kk>lVN4H005WVL;N?RN=4#J4g{N*+iI=>Fwb*(cz+v_IoqUc+r#8~TWe+ew<6oh#|Ji+^x;pnfPcX~dY1Iv%@ghPB2)hRe7{Z;a?qF$II(g^1yF;R(l3_Hz45x=V!GbkQ8OPnP+-J((bkTi1y zcAakhIRWYdAoNe=+Kqu|`-U$2a+XiK|+rTO_o0_T=6@42k_F$9*lIdxNw#kX-4*0aHKB8$5(>Kk++|mcz&G+Q}oLoxGjns1O z)hghPI%ZZXCb$doUn)1g0~wk^*s0_;|9=-jD z(&&b}iXQRBa9?UQ`1)pfP#kuH%%y-&`Ui}x9-COEc?%OnTAf*y4_F(}`$)W*l5JS$ zvl_^-B@LY_SxR&(M-3Ey0*;&@rde&mm>@y z51j2Dkei}zJ|l4ZS8C5lv7Vdo+7*KwM?F5Wf+_&aW3^fAvtr0yRC`TGpdtOib)PwLKR0J+Tj4PY%%RC6+}zaHy7JwfxLq)7P# zaJq&yT-9PYh@C!~o0*0Tc*%GEhT71T40HDz4FI;hq2dbs@@86UcBnuj?$fN=WgJfj zAfB%1>(kxX2Bf=7^&^ODmMG_~(l17m4g;?)AcoL#ToDIe=9Jz#D5vYp;i{L95gJHV z2dPG$LslF2l@2OCm2*Ej-*O5VaU>6ilw-2!i&yN@eXkbs)lM^B8lKBxi6W$&NTu(#dldB2N_fu0k}-ag;XJpktE_05^L_Y# zW0j>J&ApxaeT%3&Ue(PUPhO4_cXYho0;^i_;!c)?mb~AJ?emiagijI;>#`$D0-HF* z{8AYJ36t7yhK~Cth+Yptfex;Y;e&~p|P z0#I(RFm2UfinBS<@$V&=r2>|KNA+q|m_BE#f77wv^Hit4tz+?4C8GV+CApEY-2Inq;_mL#z8nl(jZuuvZ6Vyn zfbE6R%edy`-U%FJnTX~_U_E#4Lc_yM9yCwyuR+BP6*n{?ELVNs(T9CH&5wWU?JQ0U z1i7Q0tyA*-;cC%jIvstQ=W129kXAU{0~UV}j;E=qvc>YBzd7B$QqJ~sp3J^an#_NH zt#9Tur<%TunZ{bPgRi+C{8ssXBO(DYO2nK>%W zE)?5eyp=k#&xJAR_vfZ5yDQ6|>%9lIw%sca9Ue`1e_FkU{87mw=QK)heoPR*gvd98 za!U_Os+fQ(I{v9*z3CAbc<#IdJNfvdfO+Up^8{OFb8N^=%7kH>F zmb{Ez;@x0Jy}d0=b@5NP6y#2}`QJ5w{&eo2%m3D)L5jOxozUXBBlmuSu`}jri{L-6 z^GE}E{E?Be;nvM(r`u$Z{Wqu@;7b28(p~`F*+qzVXVe$(#uE;^3XtT;c_jt~cD+T>BYYIc0|Ie?cz_0%*|DDs0$^nQ&QDfyWdqf7XM0FwmG2@Ks zc;jW4VO8Mw#l^P(`UjlWiU*%gz@@T_Jr*J+~eX# zPeL?r^3P|5>2?@lPTE%2O8=~BexQ+R|Hacl;zfAYO_vnQsON#Zb8(9RFUj>y-Z_sw(LzN>BqoJ8^;s#_&=LR2V00Mnbr{?4Nk8O1~ zkU91XQ9GaySUG9CT=t@afVE@&*$ie#*+fhJ3X+}z&(zEF^=O|0#P#p zP^-!uE7auBCT zFDKIiC_Tc5f;3q38{x3{(cZ9kVDo2XyPpYIl)cYS(XF0ug13jYka?H9?{cy<^pjY# z;dH?JC-(&n^K$B^e`LkQwFXSc9UH((aZYewsD8H0Pe_F_6vy^e2k1|q(5UKt@OJVi z*|nfa3vA7&GKrT55bHj-LtQeyNbN3gmF@nF_(7f)I$&BBO)v0qA)&t9WOEQ1fdH}( z_J`99m7UtbF{3D7h|e^Z&CH`Lon&3+m$bkrd-jK1fJX}dT z5f1$}LkUt{;RjZxmtLc5em~Q_X#WBLevs=;BWpTjgxm{1&!vg1wuuwfLLG{gN+nl* zr6MI3yZJUeYWS(wLIL`^rSn~FFAPRu9%W718J+pThDrW&=8NZK3MJ`VqSX&?gm=HF zM8siet>YuD=FjI12nB3k08|wPj8Ettu=4b(h6ix&*Q4P~6WInO@s;Uiiai>H0o0-8 zehsi%dnmBS8q39-?iTk$e33;uNdr^37lL`$-Hd`a3?nFF#?&qpdM3!2eg~8i8C+9G zRaON2R5|a00+Wr?4pNR&gIa#vR`Qgj>+mz7C?zkYe#!jPiq6N)dMx^5{{u-aisqn5QyG2y4%~pQ#JJ1`ULB*{* z5YK20cqninJG;he$hez%l+539XTwJB~Av3F8;2edYGNJX_a2_ z)5T4HeceB{_g5_S+N9i;UodcP?v!E8R29EUPThv6a7)lL8udnWJPZ5V9;SbL9NU!! zQ+j>CwbBc5r`e@`7%@9Yc?dmlI1C%BvGi%V((=dE{xuN%kE=L{u-?hkpUkV_DjIQ` z@sd!(w14)!kN1C83cn2uJ@STJl=FVSmv_!U1NHNrbU&?_D_~~-4v@+nPhLSa2vrFHZ)Q5u6@sYPFHvq?sv>oUx`FtBA)M9Y`qstUNrD!3*JOK>NeV{#M zsxhCi`Pd+z(4F5w}Z{Mz`|)(sd~-Uwk|V4TlEB6kMGLnB8}_UPx4ZG z1PzCCQWOHdTUSxr;ct_r$QS4S~FUqGl1&!q?(a*l}z|t?hf_FLRdKVU^e4DyM zqt`iNgHvR&i`!nWtRf-l0*!iU-^0*hDf51ge@juv+2;>iyrbwj@t;x$l9ydDcHLjpqz?T!PdCUchYEtZtpn<1@uMO z4OY?%TeU{F!H^O=_PV`i_B=4pD5(|zUJ9w-71F)HmxY7^&=L=LG65WqJ|Z)TU4Tmg z-jcl8M1l0jEdgr~ffT?S2g~@a5%+j}_8&LFkhNYD*buC8{>57q+f0_S8{|j!Im9~U zQxoN|l8bwDV}_@1?XArWo{!vJ{8du}@XVUuttc!IbNanDsbeLjM$I56n~?bXy8G_s z7Ofs&4DW!%U#IIG`bV)@n)qqK(a8 zGBNPsF)%~Gi@?9wmoCY#BAc+4P#QR5Z7l6JHFmJF3<%!fPN-f+*N#Z6Rae^6AfYQl4nhI`p{S;Aq5UTz)wHP2!U0KgXEOJjvy9#$HU5$ok zJ;8Kxvw2J00mmwLkt%G75__3KwmHPWU6L&i@&Gi-dSysr}(HjV9nh{PGs5lLNJ9v z!)xi~CAwF*+sV^2eXAt__#h-$GMIWs z;l3`wp5gVp@++M?FH!wTrs4@M%g?sfM5oA~iu?wZ@zTyAVG)_qBcXb?$7(w6Lw<=b z(+wp|EEPkGC$tXd9xVAsrZGH zOn^VD(FC|ae}1d_{+c7(udhvCO@kSS#Rr|HzrX{i-!vc$@Xqh*MA$bD-uPcavDOI_B9L4GWXQV&6`!tDx}y{JE6CVq(yXb?lGWO`FgRf+1o((7V-2^N zHILj0+4;{8u{V77Y+Gd7eUFHWrxqDiqK_)hzM-Qk&u3%e3g3NlQNlL9V$m9Kp(F-| z9R&k^_{F>s+zmTUir*d)``I~RJCnvm-{)Ag5b(2?PZm(%r|MlCPVu-mEuKdY7j?7j zCrBGne2D<`6F|2Qk^qC_iwCtDa58)II;Zwo0lf?$?p)PQ?9146|K0Tha#_NAL6_Cj zlZxl}EmX6g*y)v1Ds5jTs%^SmDX8CzwZ*UH?ErDB6JJe3yKk3me`6h4pFcDXn(hK_ zb8bKJsqbG%ptDLHcTjrs83cqkQXB(KC~0r&M0oav7tK+T%+VXwQG4dXj8X^d07L0V zH2Kh7iZt3F9!mSKJ~XF&?O$N>w2_i|<2}C<@I?$0FJ1r3E=i37fCH5mQYrePb>lk+ zIg#XWjD}0I@axGqt4O`~+~K=lGc<{Kw2hvwUo^L|HQ>qUDFf4|dS|Qj$zWp=iJuGR z+-k@+)x)EuMgt&RsTywASn_q9vV&>k+}e5+zZDWXK@Ohyjm2mJPP1$ow;F*P{HlL+ z2l=mXXcjc3a z-94YA;_lM4PX<1;u4(zbYJn6$A_p@VM^S5lO-3e)K;d}=Fkw|xk{{Vg_`~EQ+fU!& zghyl++zFO(8tHQPdq0q?08j0ahFRDwfq@Lg{n2i*N2&V3T`SEP7{E$LS0(R4RzSK`A5lo3Vyxny|- z_LvEmjO*J;=%^I~7_*fGxW5og$D@i|-~||P+m~XW7I}O}hLzOlx2Ct!P9v9tma-h~ z$fW`_&mOBGo<9ws(WCt#5?a2^I?mF6rF!a{d`(j{I<=P+(69XCYXnp{~ z09Yuv#`(lg$6t z1QBsW^8ZWmUIxrki)F0}^k}KWzE;X<8vO4ZtFaL}zi$a_gJ#?>a2n!uO@NQ80_Ssh zu(xz*Tzorg8`zUl4Rhr1O9vCNbzn<&|ARHOz3d~Pj~v`k^rt@Jf5TY~6?uW^P5RQ$ z>}QHR0k=_;t{hx(YR=R_V%Qo1(L|V2oS1wdo13Qr{n!LA*r#TaT5iw5BCx}2;Y3bP;P0C}8nu^2?j5fV zDEZ<4bS~WoOzaf-JCWpFKywb#T&C1Rnr%IVVO1=*x$ZUQ+W`O^>BFq&r_9#zt2uk} zZ7qn2=YmkII9Pc!gBJINrc_ru>og?eX3X}%{$E6L2#)+aGa>e1H*}a5F#kQN>SDRg z>Rislpy_`S0#DC}O_aH@KB^5)uUMS!WKy}N5ldfPZ?vnX^KhMS7s%pX{EPYuxn2#E zHGNt2KZtt^s4BazZFH*$h!P@-bW4eJcOyunhzLjtNO#EwQ9%JgkdSVWmfAEZs32X^ z-QB&3f887FdEW2+&N$=zW1KaV+IOy)bFMkBc}+e2MkMo(P(`v_UgN#`Q&SG7m^*k? zTaSh4hcZNs5@J1HJ8fsa`x$YA13-H0?82~sb6vbFTourwi0QK9qm6NbNR;6e2C2UJ z^p36e28kCGUcJ6gRb5HGf4VebuXrW!yxJ;-Cbe$Off@Em3s`b2bTTg+=`X`y(O?VV z!NJ;Dh4b(E-_+YEt`u?)JKSD=)4AJXJ^hm3e0o#4I`kY8j?n&WuFY%x&b5%R$&kV- zeC1tl(-CLabgWi_8A3_JDvs8B+|W)gvs$Kg^So+dQz%U6E5(R2a$jo!-U zy@|S!vj7+Y9LluG;mhigl@`8tbn?~7Bg^j`I9DkT#JPo4t_Afj^{mxG0gx)*BgI_4 zSysC?7Uc%@&60-fVt1qFbkXl)^|}#pgxl{{a*&O+WykRgkZ6ViR^Xdh_406`@=VJE zqecN@0Q-lq404F=tm?2qx8wK#w~4D2H_9X8qp3Y|uhZ(?GHzWoayOqq+F2rxIZkTg z`?!NG2iB?}kT%i!oIi@q<wv1%HvSr6{`k-Hb^kZk;z&6=JO9hJtTetpbsP$1-bB04>L z4r4j$?K_3T+qZJH)fVDhVsz~LLT#%crAPG-- z_cW^i*7~agK{4>3d3AH_f8{mqkc|gPxy$H3j1QY$~4sMe{GyDY&7AqHf;nQB)&Vr=_^b4K`l9? za&M;#ac#IEL)nPr@&w2!Y^bh6T4Jy7jMRv-(TMZqu6h<1tvs#Hk-k@`%T2WQ=?&r< zhi$W`#RYJkpSLUTS`EHBD?(Bty^8?|P=tA$K0?N4T)a>-8WlLq+p2EijoqWsHh>e| zU+|v-3_^(F7#+ttIzap})3l9v;iA3yW87XlXNR%Vq`eqK%4P`T=p2=^%*?5?G_wmq z7s67!|0|BGxcf)Wj~ouSS_$ooadEMndg;y)gAdl)p-A7)f7{Ehfp_ANF*%>}{!;>- zf#H9KqH^D>FsUmX&eDFy{s-?`};ZNQT@h+4|=%rv6{XE zQ4PB}dEo&I>HSd+&Ac;>G$j<|!;{VSB=W)3(tyIoYp5AGU9ssG*CD(kg-o zfdDO%$L=yNLZg(cX6&Z6pHAV$)*?FECaqZ>L~7sM7SUbvdk9GWW@$u*oW~J^y$TEl zq0<1gaa`~}=MZsHdZeB5CD7Pz?1egQ@||-PJ91y0vqPU`eUs}LHV3m>FOY?K-Z3{g zCv0$#d%uqL`_D+{)vcZM13A1&ALY{r}X7!Wp zKt@7VRgI`=8d5y2^lm;atIDeM9J261IXqM*=AtH9oZ7k9_PT?Gsl&#EeGlZUXv9CT ziX5o{${TI?(X=K2LV0}=%xqA7Oq~9hb-{L&9&R`Y;XZvZu2S}VLC|6lT z(?E7Ef(|L)6W;RU#G-K=gfN>2l;&UIGDizjEh^R22arO$+Cp7Pe0s~LO`$(^AUIzG z!mvHfor~L^aZ;W~pWz9y8})poK&`DjZ}TOT5DF5(MGw%_Z{^#k{%0J=#LxfS*R04v z8!*nz#OEbIuuF;LQRBFS+a<@L>eUty23%Lkw{tE00Li9hbnZJ4P!(#GD0tnxtfS>^aNRJR%Srvge1KwymE)YcHEK&LZY1z{z$BjB0anFhs8b@<#;HG9%e`Lx~8?2ip z8<;E>svOT)Pda4Ph?MW<0)$OpngY!1Qvgcccxdf4O1DB6a-msPxuK1YpQ$wL#^JGp z51ebih_F~39nBrH0^)3342th5;S{ki1W#b3v89gbS6IqW`A zKhEq|4*>x>#@W02A}4S$6xsl+!QEfNGc_FYR)qlWMXkHq-_3?pWqkiE< z#kI?&mZE)N{fUkL0U*A$L-1HKBb9P*VKm08&4r7;KL$fjVDQuQhQ}*#M%tUYW&A<* zHk$ zz}?n5%q;}2imMJVBjJDL-cX(oJv7VbI^4U*?>HW-X3_hk0&tvuVZN92K?o@H3nG_X z*1mm0r$qGwP{$DV?(Nw|PhJJT6)o1U&K@klu~Yi4LOjSW#C}DLu87eqQ!h)Yj|lKG-`D37D2PM;ABz<1*k#6-r{S_w1H| zL%k*+EI=e}umMjc(l!-KnRr>t+=#{77pA zj_b|T)6sQh-Ofvv zQvd1U);@=CqBlLTp@THHWN`{JQ4TdAk@RF;$x&18WL?m21hyysPx}FEfWVu3J%xr8 z*k{8-2#a2!k7pn$`YFgSEicSqG$$YEao87cprgQ%XMxY!xA~!n7E0-Yo{_U8y1USi z{}#>j&*VMlJ!WGl5J1CDCgW7U zGQ?EBbnv~$Yma}S)E}@J zra0O;Anv_@3y%g^EFsAapO4g##r2k`gs0!S6Yv;{f&FY4BWsnh)lZ@SN#S%35I{lW z##gesp(Fx0r}U*zACR1WIu#vw{1K>9^$|eW2hF};TJh7xe@!8||C(v1pfmhnm?@e5 zW>N9jUlKTJK+kji5o6i1z4{1z<7EM2eK?KOI>z&l$>8B0NAqr0#Kb&9TGo3G#MZU1 zxLPbdGV9L;5v>1Lv!v~$SM^r5$4iY2wQoJ%v%f<`{=JM9)EK)8%e~|!+`(IjVPoqb z2Z(FYjfM2``EL?y2zT#g(9aWYvl)Z%9|-*Xm9?-3I{x3_%dkc&%-FNZ03VHz!KBtI zbpZr}vX~Qc;+Mo9_NJ9&N83h%&|6NR_4h3MZ~YGzCL9|B!R(^E_bY1wthef`%&Ho0 z*h^N8Al!Dn4+c#>(7d`&<*L}TE%H9e$G75dwkeYX#O7)9;stRJ&=}arF8|W5l`_8& z(Wqm7Wu0Idt;Ttvuy(&-5+e{Q9-1O6%bXeq&J}tCybJ$v<%SXf056nz_DJfNp`i;I zRY0FuaqKLiHYLz)tP`+YzKVzBKX~;UC%SB{s0|f#rBBRYHd4Z#upleA_O?X$ z&6Ri=i;wIpc$|#khxen5M0UERpQj8oc0ib`b$gjjA2dqnJkVUA5!ibeJc=8I_nh$r zp|^0;l|fk?)59V*Rg$hHka~RSrxOeu{L@~6v)W}p9avG+ZaVZu294gqOPWHYIw?9R z!D=ndvetoG6+6t-f@^(5@G1mB4W#_VBQD zU$AWZ0AXi+!mP+|>fLv|M)`%xeGyX)=Di#AJCOx0$^b;JM)pIItf!ARebP{yDsT~9 zfE1#HyOEk<8(VrVVkYjT+QT9FP`&c@9`xo+M;_!k-zfvElpu@nxF+x~fOxS$HCC2wRdBPRfB-z&~$!F@q+3QB&LAt9s=Rw zRSK*#M48I?P4Pb{ePk8bN+7=)8e%B)>PA3Q`SE#bZ~~UZ2@lPsacdM-YD^Qnz?ZM( zVWWL&J9htZ-^655V1~*->hLEq?(f?|@W9ii2d)N^VZbkAgulB3?W{f~$X9{HcOGh# zt}@3PDfBmqi~mj|IDNpExSrHLAzEH@n{pruT&v0z~T zPa_L??H*3S&Gu*oz!Qk6UG`Z6`6}RiImCbUU!iO+2|<#g$gj zZuqjT_LbY#gE~9+a;(ksGyd1j1fZBk4TjhP71}s9pqUn%xa`drB8WEbtaN=MZgKUZ z(d@y8+svt>bpZGO#LkxrQyi7LkH`GW^8xw`Xj0A8#?T_VlV}K<(D2av$+pihBD5SJ zePhWdH>H^YvJo640u9T>Slp*zcl`JQH7b1lJBU4s>leYm^p%6qmvf5?jDc`$UbWl< zXAwQ6Y!x1>rue}9cf;;a=SJzEHS5WGoxTbKeK%s&v#cQS;XrvNsmGS;BOB|#$Ee+vp=M9S%U&Rd#w zp@?FMVyk z%O$qJP~Z~m4~69E`Uq(t^sg>T9ZNu^Vg63WJd$8FSj^kQ35JDB?^5CgFO@5qq>d>K~_-;`HS?|MtVSm=PlL#ZyAud z>4FP%I21L2Y_cyA$2l^4pHEKDg>u$j`TJKCg+p^N6=VxJS=#*j3Ln8v=*mAfa;so{ zGVs;nLa29eO?U{ZRL5Ve;L5*XUp|MOfp01G-u~=sL;}{;XSO>cldFoEmI4cnYdBRra)bo}VO(U7xmMlE~Pg^jr zQY<$N{C|oLFa~d#wpKZ0;A^5XAif0h0)dP=ZDWkTyF(nN@X8%(`CBmZ{Yx{|%ndAZM3y}*nE&$g z!C{a#{Hy&>u;wo>-B(&ETPJV-O9+1*2qQ6SRIvUI7}Nf^yq-gv#k3>0tn3F4Z93_H ztTPC372q-@U6wcr>j(BCs>%lR1m7e6*Lj4yEQiJPFTtT8a3BfNItQZ~AcfWBg)V;@ zH~25K-BU~DJXOZS0ZkzF`-$UI1>IQA)*dy&|GRSp)tQ*5TbExQiz;?wy9pAB9?LBy zoJxV;vY_f-j4BX&sJ4d*j9*)J>mF(dkj+V78vgwER{a#%vU@I89&)Uv4FLrNxx=9L zQ|;}hwdAKRO(*E?O?ne!aPOBD2|E{jFh-N4fOCtpWCuvXoSf9Ifv~n)B;I(fdPX zWD4S<=Lnt6Vwc}1d!SR3`6Gwskb0w{5e}Aw>qC{i%g<>7`42hAre}}W-Q(UV&#<(( zon+k?*=6c!6H74+>^1qczgu|!wSnuwkS~S5W;~MEF|!dcTg$ha{rHmm`p7ZRu|f*I z61!Vd?YXg0hU*#oe5cG=N|4uef2d-#5bSHim-)O?{=1+abCYS!HSf~yX=ym#HF(>- zuoqq_U=ov)dP_Hb$&Yl1wOG%6s7I=E&?%zo?jpeNFtx=oeC6>u)*aGLjZ4uBJwmd> zD7pV~9aCBr)kzB4hIUaUk#V*rkq$scc6 zM$l?#a6YLmIrcV=hsB-~#?Hx2&e6m6rMhu=w!8N5jmj(YspD5@$P06wyKoA?Xe(GWq5FY;>Rp}KO}79^VWGM|F>X#e+j66U$tkruL)p9|E>2>3)Osel1#rObIj8bF=lry+RX0dwcjDt&Mcfm2muTwxzLH`bw7zmS(prc|$(qSUfx z*fIOYSkT1twaDY6^ZD+jTc-7-^Fv>d?;Qm#(+TUdCCh!S2^ZNH+2>eeHr<=^Ez$=G zra1B7%^e8;?f2W92{l~1o9y-FIw^)T_7wPko!UGrLfd1gG&QRm&)MnDCI=CFi~(+q zjAv%oyr1LvPDwHt1%a^jx)0PZHDoycZfdhX64k7So~v2S zV*ss+1`2Ea`pgTBPrvB~WAWn*(s%h?x!)@cNNQRa-iw%ZxI0-M&jDt=^*%ziE`Yn? zDyg~33|y*|uYig7t9^+?$H~0osmj#ssh(NI-kIA{k&H{;u<%|9xlCvutyj(}E57@( zTSfn~_Kobxy-DTz0kv1?E)LP&Xs)0F(?6~xXfyWWI-cHutLMg0hT8HnyB5vExg^3(?89ZlS8qfK(<47a&I3zNY znmiG0SQ*55Zcl4Xw(zUggff{}?P_LQwOUU{PPXvS--^=jnGLgFavS3BK=9RP%#y>f zPMzPVHxr9FQcH?Cbds=DKi>Z$Kf|M1WcW}p$XxNDLg(E|FCg?sMVU5B02Cun2kt#>~N$`ZjVx zk%yA@7VA-+QiB@DU6K8@7+;8~G`DkvriSc>! ztv;@~;L*!W-3U(YCZ8;8UgQr$2Ml>Gg2cAOS+#gk)m42m%cM%?=KRSbdn3q~j1Jp} zg2$(AV=U;Xrs0{fXv1+^*YvKi=wR_$v>xwjaMdPn!{=>Z&u7?_+@bf80Mxi7v`xBy zoD^4(JnmYM)f$;*sJ960>gdaSG3{dFOIi0@h88}cccc}(N@*@|MbNsE4~qqTt(sf6 zB7m}oRpVZ4Tq_y z{Pqk`;ufhNee$E2n$1HreC8JI2M_2GZxyP?v+~yCVolhn^HPcVnUu4!_!4G#WA!tH z(RO@fiej>?uVm8?tTZtbOkd1%C)vs*vgrpHq~75t-+B)(9cx2e!;+Yyiy6O$MO|r} zR*gw(4lmh4WrHUo|WB(tpPT*lQlQEw-=y=N^VHdtlJ8`QypJ>*|^E&rZw13zY)h zhw>htFP{`-sIvkv#IjWG&bNkFgI_mX?gIIucGJVMM+5D^|F9~(JDBW^cH@QU8ur)@ z`E-_cy=7*P0Hq7TB)jf%f`xUvcP%D+)i~@uI$B1f5DWYr!MIL57W!#hYsqetK;F2; zIT*dD>{L2Y%M#@@-sN|+kCU7ay}_OKreWb@FC(o3+RK`pugiB8E?29=7h~Vcx^hBV` z_dxs!iZEeLrJzF9ZiBY!P#%d*6=R)f5lE_!CmSuPe!!7AYsugSs;$Z49644TyXl&l z~Tr5>hpTC%v+msHSb`EEL&L=Dfw0(+iipYdzt1bk$asZ^>g zi1pt71@WmZ0oe&O%a{b26qtNzr4QxM8Pej6t~ah&xqS60dE0?q66&gafB+TAE{KrZ z|9Zl`s!Wz4p8l;NORj!OB%&ttCjIOjLW`-rkP7k)0wmU2=<^XCmZ&PvK*mNG=!G$EU z?>Hv0g;EzWL;KcJFPt^SC=G-q3_hWhI!7We*u#9ds-xcOZx`Rf6MN*U1xz+03NjM1 zj9OKAhx>)-+|QQE%bd++YF}&iJBMVFlg;^ne1Ap$I_dMX_$G4l?U@%8J^gAHTw4jx zE@4;jb@`AMGH=1VFUxVgPdamcSVk88HHI*+arLFD^zEnkUSQ830Ls9vBUu>+N(3Zv z=pMO9!7#l?;mDRDa*wT!nQJhtWG+T(&RLo25A(c--e+O>K>);bMaCQK$h{xYzPis> zqY}iE>g|4T?&@;HTI&ZiVyV7%V=o<&kS;mAmEbc)*wF%`E>b2R-M?U6wM08J+lypM zzl=82mXY*kXG~$z@G#BX zIkY<;Jr!n4(~@M2nq^+Kk7~^2UnWIM>SOwH=6D6RCy+JC3;Gma8OrH%Hn+GfiRXH8 z0O<4d;A=Z{a}gT@Bc^Af<8{<;67YZQ4L_6~%%!oOTTFh2;c*&%HzuJzO5sY#KGCNcnj!YKJ3ryOq z3QbR=aS3dFo^~wS`22A5omPwV=R0C$?~JXB(bKYr5-}WW!{5x@tHbX&d%lBDoqvwu zGJH}bTy&ocZM;1gw`fvV_F8-I=R|!>vf@M_29~4tY8NK5!c)-_in=EPz+rJ2CWQuI5N*qm;KA{)s4lLw^RB`m?cXf z1F*qK#-C56+OOCPvL6HsV7Qrfs}6Uf{e?Zj`Qj2 z{!iVdYqAr(14~s8vZT{UaR-8H+LTh0Rq1Re z2fI_8TVT#>jlIzdTkH&&7esX-Ms-yL>&5d9O4PH7>5n@+&YCQFYHsCkg)_YH@d(sC zva_CjK@r2qSyZPjII?Nr`E>yi>gIQdA2mb68j~SDA^jYyVlf7QNM%iyKBn&3tPX+< zR~H#s0#$eA3`46cigo8&?U{DFS>QW{oBo`l+P=?efmj<3JwvIBnjCU3gU*gW;yN{x zRqoH_b#gBy#H>BPVr8Rjn>ff%bCGXXaIoSWetxHtTRnS)g!+>dKTDJ1Fcy|uZCwzu z!NcM3@IlcFmap;E6hrD6F&~ZJg+46o?*5T`&|<-u-^dbasUT3LP;9ZvTr=3Y;g{VO z7gvXMafHJd_6i_90d`2DL*|(c8}X^h$8a-7FJ;jbZKn5-XUgWIg|z*mO)Uw_;MiPU*4(5?wq;{f^$d4)elV@nVweMU(R_r7%tU#Q}0n4 zk4={wG-<9pjNQ*~_mW`}1bYlM^UhI4OiJRawhMG00-mG;D$TjYl>nRxcy86*g#k5# zIi!TFBE#;!uan|q?I&DZ`XLD&YfT8wTSdjqy)2vW9W=0E3=F9L;wXFb*O*;5oz3rx z%&FU8j$NR9`s9pr-{%WA2#m12Elm)uQ8;7ohX+$On%C1db!A_+4>GM*n+o(6TedoN zkTQ6>Og-E1KQV`^<{>-qq%&VqcfyghGbS2@Dpy?C)6i! zJ<`zs91UV<=OL$=m&A<{RZ7wT^wZrnmj7DoQasHa+r-!r;y|0GRgLR7u{fVDJTk?T z5L{($_UsyZB3mOeTB5o24q(#Q2Biu&yJz}$)WKHbu_$p8-@(|KLXo?3{I-F21zWh^ zCMCjj9NaL4tpj7z<7l*7Ynpo|teyMjE)xB~RSs5FX=N`9YPm4afcVD9>=Ov z$}>DMN*D=L7_qINga@8GVVYy?`rJBqXRp@vFfloZy=2@WddsOdO0U~Q*kmRrP$R#_ zz)@YZhRnT^MS3)%uD>m>o{+Nyo=C$o$KU1koLp~@fk-s|{`Z(R#aOFQifBEH-p8{z zg;K)lP9(dC@vZHyS#`H+t^j<+o3kTCm#K(7ZVf+P>|Zw4*{x`1z=F@lh|V4nysq{v z;i+OUk4}o-)XtCv;^gOA;{f0#lfNt*^SY4kHxIXgNw^jOn(Q`h8>D#q^J9@r1pJl zAN6<7>VCK7@AjSpMp^vF*#8RIq%@F@lACfQz{E)Yz2Ejq00CMcAlKtF^1jCjR{-SK z!F7+Dr$QX~Y)2u_djej=>R+-Q_+yPWroKZgfZ?EiG}z@_K`ocF`+p54h(qvlrwKC~ z-}884-thexS7tEe#B#u4592uz5fK7S6mmX``7zJ^ePOS@BQtBC(33r#jNQ(<7gTn( zHXXG6>tm4xCsm$(++xSj(@=iB>hSkFh>4IMJZF;%fa~%-W0Okziy@Jc6%xH~YbUt> zNEmQ_Jf`HA?2o=wf2cfyd7;@Tr@v18XEuV)5_>^q@T~!_0T+6`ln*X{NpJwBIaiCv z4rmK2Q61>#xBHJ5;iJBExzOkZ6;!SpR3cB|vK-I=1#p{_roX?VA7rshM_%WA9N;k5 zE)G(U-xbFL!nxZJyBDbI?$lK0aMJ1osHBQeG=ev^5zQm^eSo8*89r>;BONcwx~y~+ z^}wo@gO1_r>W`<&V+PhPAu)sPTPxTmc*dLhTeSORe+}Yxn)%4ip~99d3P}SV89jcPWqp$(k(lCIqlaHJ%b@4DBp} zl)8_pDt!`z-AU3H3-z3%!$fKj+I5#-0z(n-uV@^pliV(}e=t=ZdQ+=WL?Xqa>`59-<)lNve_VJu|#kEI-f{na!32K z70HsClVfBdsH;$}To0u%#&vc4&n%0!li!Tr5GNwZh20!_oRp%&d}tgT{A>Sm!9eNk z(Cul!?a$J~I*QFTT~Cg7QvrL`4F{XuVdo~0G+X%cN)q5_LdSDJ0=6%ZWM~b_G+L*& zZGWeae_z0)jmYj-A_Nt>99t*u6&T&_x`8b`S8ZLrKa;&&)BPe~27ml$X8j$dZ3i^GgMNg)1<)QqOR3|~P2vKS6|wdyQ4~wj#7X>UWBVcJT^TEgOXhZp<6;kjfI`6S z1v*8{bXHz1>7>MVCiv8&&=vRLSadzN-B1U3epeEHWXgl z-4xM<3+BEjWwI+G708quyS8~zZ*y_n`zAn(Zw&&pc+RY2_;5PUJ4N^7x1$Pi6Uga` z07k27s;d?iGJhg2dG~V-mq?wSksj=?nO@pY9rvAVoxfEdXPc2i zMp$k|dU2P45F&J(;vxGs_5EEcAb|rJTHZ>&iOE89h7{JVJt;lW=iLW%VMJI!DoAdEG7a}6Rj{3RvS5Efd6xojXx0nj}1>5(oj zYs(w6t%UE*m}63Ri!~dzO49pt?9!0e_j*ZE%<9ye4Y%*@?hmI~FJH^=nC+T%9B4;K z)41w)+w>RP`wml=f4`#J=w_7~)WO4t?40!PA@|UAM#e51x>^mViv%HlxMqhtdQy1c z9h~EhSyA6z7MVR5a$9mE9S0I3xL79zSr?>pb?TvyD9=Dr{Z9`(!HpLb23PXdp4SPd ze$Cb@Z05QWJa+(6_Xh@ox!#XK^jIu(-*80_K?;t?!B##U%GRZ%D&l-7SYDymuXH2O zQ?>eGoUpLqNpioP`$-L`nh;|O6We8xy|yyJO_7= z75@i3JX9~_w)zHQM%#tJ3nv;6k0uOS6`A97dCZGAj;`3}w&m0xgGEiY{uH^hT%1~0 zKYtCr)XFX`5aFil5G|Kfh0|fQ)NiS9I>KU4Xb$nBU3@8qAnpVE3j0|l*Rf|9fDq># z4w-<%+wH~PL2lVh)np%**I*6QfpmHgh%N*eXOJnQ=JfA${T|bw%Iz(lI(<#lkB;F9 zZ^TA}VOwL0XB$V8&aN;Mw4!&-WUB%x@m}XTd!v_Ei6~K=)HZcBixjKt3U=Gu&w7n$ zJ@aC^o7KeGcG9#YYh9d6wnvQfD$HqX@wiCQ1BuJwzmjg+VE{!5Tlq24xdk{EU5k%| zbFt4~-@2Z=Jh1+1%qtK!P8ux|ggi7Wo21i}|E`wWa%6R6y%Kp9(eA__)G^yL+wF0c zCH#aHy_J1_UBJ?=HMw(><1j;3kv}HaSr^0OXiJE%eo{!6KX0>6GCm)IrGe(vEwx9* z^ZJq~A38R%kpeZ)`W!U;rf>B>d!OH!dZ#Q8$exV8htSRNME&6sAWskKHC`M0tc7f7 zxvKe4UxAu;#KrKQAmDzExa!yb>1Efo;dFGanB^PFaTW`ZVX;fd^Ut*L6P0tYkVPNz zh{ls^&D_(i5ju3rqRvwenEQ$943#4DSBVbWC++Ta*)I*$IdFL^2*#K<665W)RZv70 zNJ^d&NP5Vlw`bH9*`mE zP;_DU1J1w&&n}j8pvuHKB7x3ll7ok8Gxjf6o5n|U>o9@^wuYB+M`haLf%{)fEXDqM ztA`Q|nIzYWedwrRZ@W)m^LV9h{H;NkFGm8IFa5Rz%Jf#{Rxx1xze$`xz7l4mW5BNj zDs0fbaJAj(n%e`bHGL19h~a&Rg0z3V03HZ;CeXB#FDgBpl0&{E1#t(yvzTm$SSUVw+mMEP0Vw#75@vtCxXkYfBB9 zN$8Z_p|2C!URw#;vC?GyS6;^iWng69o~<2U!9K14Ojt85#R79s2#lC%N`b%c6P^}Vr(DQzHL z(-*j0u*4WZ>nU2GQ{Iwt`&|l{R8ESf?zV0hO=OdM|Ymy zX!l(}#94^Q(kcWLyn)B`bZATo&+s zhX_6_oyO3{vZb)!+weG$skplnOxkMRF>q%z(IGPs!9pC#$YF=YW6G8c#7$H|U|K?# zg3!?v9_LkQj?y-N!L0wD4T?Op@{7F%f$|I-d6O-IMII0{ZG^YTk% zS`(j1J@n^BJzAa;Rs(lH-Una%3+e_i`C%{*v6~(Q4yP<`Fh&~Wsf-n zIo`+<`?7PM?s#)WcB6Uoq>(4&B|1G<-&ov2UE=ub@sp=oH;&xHw@Pu_|JgkWHh`np1#(Ej@F!Z& zR2A0Z4m^YO#8j6tiE&F>KsH@EBhv&su~^FkP?5PzDOaOETxz9!wZo#G)KaT*LXb07 znl|>?&dit(4B^%lZO!^DQ>yU_(UyfW2_%jZA!23nG(Qr=6| z8Jux`aB+9@tD)PQ`*%Ow{~j*I!QnKh_SsBM;yHGZ&`!1v%|^$N7O1=!_dME)h+#6l z`J=hiNc6D}3te0H_q-^T03UWPyvYEpM2FZO$1Oj>6?g;SwEQ_XtB_^pRXTVu`B)o& zejqu5Wm<95p7kSLY@>c4#Hg&+@S1brMIaZRWj7l4@iaR^!$tt>Ip>CVpbMV$up7BZ zx?av+ZMa=pmnz>ZwP@Eng6^af4a9RIn^Ied74d@p~p{uCGX?Ibf2JFMiN@A=c) z1^T!M8>sfKP{;0PBfYv*6dr6{4sc~p_o$57Yr5?o(7aObQ6&ZXz5}=eDau)QMPg?C zr0OsQ=zKd7Ta8a+5TDHyA{1CdUr*|B(eUyOey#f?j%)M=Jhig~HsFcbsQoa`v6jc% z(Elc@Q8dn_&tkE!$~>LFdaYgh2(G5~>OSZ$1 z3`rgJs%Y77=W(bhi0#f$6EZg&SR>eUTS!{Q!F}=TZ6I(pq(6Lw_n+Rw6q9{gI`4BU zZ4$Gt0?EPj@LEFpH57z@-&sE+ZdJUTv^no!bwZ<*u@Z+?-D0FKAO zu_9^NC+)L)tChxT#Sat<6@@~XbY#9MM+X0ZdyuCewQlUzlQ$4QqZabAovo5=w%nVP z5k1kk_kKKb_QbfBI?&v@l%Lr&VGvk9^^5RObcGmIrjXm|<*#-s2XsAz9Xp&#_-hg` zy!!R%KPS6Hw|TqDF{elMyo0mznmNN*E|Q!(@p)I9>60G*GcRq3X;D_j#~1EJ7{l>} zxE^s6bn@@Fzbb?RP96u6VTARGxNMR1bF>4}gNLn?u}5WPy;oTtG>6+gZWW)Y-sTt3 zwVS?NpV6ge8m;T%8Z~6QE9qn@R}xoc)LCOfW_w(ey*P98zOB>;vFGS+t&Z_4o2hMC zZFxv{mpM+SW&M>&#)kZgRlzbfV{NPsmFIzKi7|*$LJ#5CV9`)?DF=d|5(O0Rrv5u? z$I0}T9n8yfsC`mxf7H7yZD_wBYifR0m&2G+*TL-~*|50Tnjx@{?(pNsE=b89s1OH~ z&WArsyF^*>H>g>>EU@EOnko=h32gKIkG-0s0B6skKU6>gqD+8^Nmw4)7P7p4xEoQ8 zAkfO6kPVaq2cw!;Hfqa7Y1Wf{JP=OpKHJi3Ip4qtWhbCo1H{n`+(8;Z(4`uES4%qt zN@%ba`PO~`P+6ZLUdCs!D1J0G67o;&_ zFC=B*bGFNGHh?fTg8Ih>x;)gpDpB&WCOEoR z{C+XlQx=@lbN~M>R`q|OY|h%r($R%df*<{>N35;(t@b1Iehg9H&(ITaV|_?aB{IUp zYkx!#iKdEo<~7z?B~@CPx3^V;RSCpze3k>B(O!B&K>CTnTQ2lNq=p#Ve!Tt2=D}=< zTh2(9yC$yt>Wc8zF+4Xbw>qo5eCEx)oQjoSdI2QzyjjJyWq+%g-YZylQ?Kyrn=>#b z|B>N5t~~1pCX<0Ra~9;T6J{jmU`%;9IV6#>f+BW?ARqYyQycpLg0J_f^>H4_Ij@Cj z&y&0p%i~SY<8LXY&TP_Rw{#mNq5f0aDFbK3?<$ru_wq6D|x~(xUatvOOHdLAjQOu)E6Gn-q_K zlsgY!$I|lcdDGlVMp4EN6BAh@1)Y>W$`WP1Ql4Bx!rWPIDa+~l?I|xh?86H8lotE$Ey|$yU4^P~R%70|* z)((s&SAUgcsiVM++f7cB(*)j=5a;QCgxuhPI@)XD4ecwY2|%661MQ8tpLDYUudkyz z>d1yWyTRr0-5#kctl;-dzYvXQYac+^Ol(^QU+Th-9PM5azJ-C}tY|e6UDxTWh0d$G zB)u8x^>EZ%WCnuzK7GjB*xDslj_OVITk~DZ=^>PXA_t~mdcri$rz)BzK%@GD@HQUZ z7H}i?wSkRGf7=F!ek~f(wOds-%>69NKuK8IS?ZspHpC(28%sLW&Nq-g)O7+SSav`@ zUIG(?;#JI`CqVIP=+TO6oB_UJ9&Ln0KKljmC(uP9kUP^qplqq2=^?nA%6=Gk@H5om z%FyAUQiD@4JJ?J*Uq@)1m4XK(?y%hoxe@kcGN7lRktp6E^C+ zn2{d$B#O(-zz}s7hwF5hX7fmuyX8=Yvo#W=u&P|0VtRIZA5m~0JJ1Vnc83Rmj~@CI zj1f=xO|1#)@>=loUD;ciVlEob!P^{AT}TQ6#EMb=0l-riEZ;8HFtU#-`96jLMHRn_Zvm3e8)sN554b(YJ>UG&4MV_q8a``YdU zJ)YecNyer+7Ld?afj5VI;YY@OHQQ zEsmMJ$slJ{Tepw1CCS#K)zCx&S(5#L_xB-zzhitFlHb!hW##P2R=Q|^pZdnL5W;fD z#c&%@`thSjnP}&R>*kTiN610c%f_!N`=HD9z=I4jdaR_5(VJYP~`)F0C&AnkD20a@%fvu z?Dcj@+tLFD022jCpV+dVX&TjynMcevHYO=46?go!re|2%8%eny}QCEgK zJy1Mgl(siOjk#3IPw}AG+flc0&vRT6(@CT9_^zr9DZ2NIaJ#*=@!$)19H#A*gpme1 zwy#Ffp-Ol<8d6Pn@vBCC81x-K1LtuWgDT(AfO(#yDp$NpV&f}XF)-CKr)vG99+ML5 z*yKkGrmf7I$h1ZXy@f&A$NsB%|R91MTakdg3!933M z!QfG*WxGQSwrWwlzUJFZ^5ABA8zXrOm9LobbD|b~Y^Q=5rlKFYrX*dO$R}-tyb|N~l-LaHA6A4tcG6?KupbQidpeyxLLt+GmWF=!sh6VvGJx z>C|_*<&WO?;oTDmz`qwz`crE`ml1{@>)l6XjcsIZ}l6@G)Dr#1Ezr!O2JVQI4LH&SFe$?xym^!Gi1)O z*^G&)S2$VPH?!B%9wIzXj;(ShI_cy>HSX)MW#9ilY`tY%)9?E}jEN{EiUA zQ@W-wT2d+L97uO4B_Q3>qlbVrjF^IgfOIoLq;vFW?rZAj`};q*_rRCFhGXw5&N$BF zI8Sy1Ln#zT2<+F!5i8aedfQ^UA zpE)BxOIz+cE5$x~`L0nLzcF`R2IhX1w$iV(7!GtwgK9o}OI#L9hQm%_5oOm3uPrw| zTBT@q3CP6uvk3UN?WYPl+5h#(;=3x-vgD23st2b>*m4?r_r-g-&_qzTX_<2D-J0)C zx4l8ygrAGn;?3|~%((s1?)Gxb9ksb896zmd!1cz782KCxuQKUeiah*RS56%SF$%P0 zaytO$TM!6J)U5Y{#c{7EP)_Bg6wDPVz>IKDL-1yH>iN^7)pFffS`e2wMe*Q=I)RgO z+hF%Ad%0<6S^5~&ANLJDbWkOLO=182d?m|Gmo7WB$!d;FpGp`_sAag%V9yiu37ziG z=&But5KoJYfSO?E#&olsS`_v@Q}~-G9&G(~cqoj!o@{6EQgiX6J{E!1$p*})SC#E8 zC~i`$6Mo4-v=Vw%>0Qk-vQQ^htE!yYbIu=MWoAG4YBgGx6DoiFRwWipt*m8lz`LSq z8|gMO@OabOA}VDM?XD68%wzSB`=Kp&j`xJ9&JEJ~P!UAxfszNsdIL-<_(B>5Aq8+tKO;VF!$^tvbhReWWfgW z`;CY3z(JdS`bNInfPEoFM65!VqBglR9mL>l^gY0K%)Bw*iTxY`e}W&BpSy5*M2SUg z986ghh^tdE15i$g~b2!hE zWhFNpS^Ut;vR-)!D}7Y(7M%p9itdSFV7CNa9P%1}<(?O}+5dKF!ec3omu`YPP?n5> zHN6lMZ2Zv&_)nV$AJJ$1C*xpNgV{`Ed@u0luJ)d+22K)dF=(aLfr6M5c@q$k?Wy8^ zz`6jwFMhDUUgrlc>6hv$RQ~Kh&E(p$z9imU#tX0gWsKTct~cPe;KwfP5zt)?LVDr-cV$q*ODynA9c z$ZXS=&Nsy!7{i&tyP$uxA(r;!B!!_RoX#qS1wU2*w6w;JxHWt>Y;qdbHCxz4_j~29 zpHq*zmkJ0eDE2CnR|)96!L4K7vW9fx!fS^|vSE3JYS%UH5@Vylbx?NNZ*h{1lP@*@ zy6UpzNU@2$amO~`+AdxER1Mkj`@sXh6m zzVaiMh=iNlrx)5Hyn%WC^B|17tT}R;Nq##Xde*&M=dGMT;uv#|Am3&y`l8)z)blZ= zvw4_6?3K(Qb$3p-r*7h81+ljuIW4ORDcQgCZP6^&vAA)+X=>TU{Z01N;@haHG{CPK z>yC`iBz*De`4(@*c!b#L2&({|Eu)A#N>l`ksCyGQ%jVzTLv>@d4l57`vq3RRqNDOD zPkO_%EI;yZ%l7~NogPK3626mLKIyvGmGAR0sm%O?oA-(1_K(#lXVjIJC{>QH5unlsH^DxL4?6VwVmiA-2!GDw&Y~lG$PfFgTvpBc=ZiX{o)1>p-PD(r z)D?)$SpoJ1iJ+Nc_-#0w*dB#EBJK5ibk@TO1LZ%so0KN_`@_;I{yie1mIJ&q#v2e@5EB)o)GxI+V6g`LCCaGcBN?E7UI^~aul?`dQUSM2h+nzkRp zu1MM)>t>yjkc zS9}iU%luyW;Bcj6O^Q>+iE#IEG7Lnw(1ube@Uc){=~I7}Qy=%^2moe?&q-}vKtvL& zlsX@`B|%QLEf!K``Gs3ZpgN9rqV~DhU&r$% zbe6n!rjO>|dJtl!4yLS*xjkN-p)2FphV!O8B0kVjr>_@KyiL!Q*ft;a9@i8mS1-7- z(CN$&UGO_#FVm_2aMR>|mowUtxPFfNVO8s6o$&D5a4*3_y^em{lhRISA%uCl4LZjq zDd7gRqsL9QF}y;w2m}yfjzLjuus_4-!Ul2HYG^R~66Mqh*d@`W-z-GgNADpqp=yhy zS(YKoq)c&r&M^`$x4&0f^>p4DVOX)nemKlaO5zqVoQbaS>Cq~89(_iOL(RR?e#l5W zEPyp@++Qsb@ZOtiRhW3JR8_i<9e}pB7j3UqoOqBWI&q^-=`{t=7jY-UTk{G&y zy@W*2e9HLK=j)YdjDU~1sYq#0eeJ!WAVj-1}J+ErINCBHf!a4Pb!DfP5tmM`F zOAIYr;+!I+DU2@T{a#6CIUr?y4Q z#CA4qhjnitH4*0e`g7OgJ~yL@Tiat}omISYH4TD|X7n^%7K0-?1Bj^PkFOffY+l?~ zaJgNJ(rZxj>0e?#tGl6hR3s?Ck51wnZ&_M|&}y9Y77t^a?buESQtTF|qD+S)WQjbUTpa)Q6a*IEB-4PSOSX%U;QRRT*A^)0oCKs7>(at9VbQ z8kgF zUD%>lR}UAG@)qjN!-bJm$uFdXZwTI6>I(arSUJLHJ316bNmMPhUBBPtS#G#=%^8gR z*^UJ(F=}1dHF|+eWVyw+>6y!Ic3U9aiy^wfTEl*#Ewp@fCfU|7DGa19<>_;8+sR}C z+R1CP0+p3Wf-HMD@AL1wm8W}73ZP68&-BwD+!cGWiq^R~gWe7c`H1ewIkBE`9Qjg! zjq~Nn*iJPat9fc;>He|Ap*(rDiWL&iF)iM|>m{3z7l?h6USZ8oJTlg$Ftv*zzqS?<@tu3haPa0fXywW*?M%D%dEYf4TOT=zjE-F@bjx9vXOoqP^<9#T% zGAuv_qBoJMHcX+N!Z7Y*Nj7M5d1hmOKVi6Ve-3)0ke9W>dsFX-u>6S&Ot7~y+lYGI zugfoYNvJbsss0{6hgb$*KCz0oX;;b;qY)TYy@D{O75H|_pelRAF#sQkIs4hHU$?DU z7cxfOUazf-n4aQ;W9;Cs$qF~CqlWk*r&#IqWZO{NpgiMq zVhVoqDt-+N!&UBR>JzP|`qa8T`zd2KdEXxIfR27q%a6qLj#4dG2_dM9?*!S>?&yby z7q!eOVJ3NQw6;>nMq3;RdY_Ig*DT|vH{&v^cRRIIB)8emF^;FtZf6@O%69~poervR z!s9js`%uS?9Hr1;YS06YiHMPGXQ(UwVJYO&SQXp~pi-}A*bfzD$vJ+g(CZpR9{XB@ zK*Kpff1(e{V_DIkyus3oG`d0c-FkATIP^uoHa2b2SqWEFO>utU$|@~&oMUEnXY=ee zl?(YaUj4n=hab7KcdgtGi7gr(!xE6ZkFtl~`_EZCrIEBGr}J940Wz8m_>Q%C`oJQ? zR;!HPx@PQfPYcoDlF7W%OJtikHZeCzkAp&x-_E}~1)O;!(e^5&CHW*H%Y)=|g4I(MlV z&pdIrMwtcV(}YJSm5|6^z0VoeasjrO!KZe~u-mM3;Q})Lk@5Z&?Jx;HjysD*C!N@{ z?{?{#x&Ef*eyivtlfdx_t@V;?*#4JJ6fkF9lh|R zhLOI9iT21LHX5hJGb+GmB+Z$ON*B#Eoc5 z*~r+wURj&7E|knq@f>+wXW09S3aXk_j^z?ZO$@X_dp#w6X&`wIXpGYI#U|o0!wdUn z&zSk1h#r1%q@v7g>3h>KX6s~}9DFmZ!Is{QDk1Y$eP|C2J+bWnpyAT=@>Y zW@qx+=FU;!3C+Q`N84k0a%)>R(;ZHe_JovdzXTm+U)Eal6MIH8@bF!@_Q-zGlySL; zBk;vSi!TeHLmH z%tF*`V(}xl&Y%5|c4NZteU-MZ*`m&o$%^~H-g1q{NBv;h(I12Vj*VT4;QUL7sdb-U zd=8hJ`wn`H_+*Wse1ok2MsfeE=R+S#I5EWnlCSc*BwW`W8v=$u8qyh^VGVPU6zq~) z7q;FSzdu%y^i#6#Ibi|eOM9!!6s0*XQZRrVa}<(v%SN+Ww-88h$(EhKckT*II>uo6ksu_<}#%_oxw$ z>#lEn;U9<#?6ku2VS&n9;#-)lw&F#a6(!}^+mXdqezUOaVaYe0+M+^eZkRntQeG|c z;Mc;Tx7{JPFzfJSF-Ie4G_71E+VYE{&BehQ8l`lv13OH{$4>22O9J&QtD>wB7%|6? z9SN=dW0oNQfzN^#U)hsy-+h(#`;CX=hi1sH!JcHKfhM|t^BQeqUW88 zGIla%DX=ve%EybX&~8IhMIVHk?!9cw{dm=tA%`d)eb3XRP>yPRSeU-tBoRma!eSD! zNImaifqaw=%Z}WrIaw@+Y*VVkPqM62E%t#_7KMl0GOS zN*8W6=y6MYRVi!!5yiB2p-qQXG{tG?<@|_Igp(1=Hi(jw0~d4`Ta?pc-44GVCDp00 z2Tyw0@C@Uq&BKKn!|81M5sgW8dv>&0Pa1MW%-VnZx4cvei?ZYkv&^Stp{@|CuA_at zSG1?LJt<~xUGH&;-~U;If=`<)G%V2ahu7?A6cNcr=B!g;uL{Y;J${-XUHL_YZEK1< z(Mz`jNNRyau_V~d4NH#c(G8s3iJ(|7F)sIN>lmWlRUv85oSn(29vsx7ZEqDtzV#Ac z5Sq|ho*{>ZEkN{mo_Om%OPz|oic=xFZuuc1+Ver#sEt$9?;8S$D*gj^^)dxyc6cO3 zwC7CSDeJ(OvB1v_c_?fv!n z?3fn`S1$V^f9<%NCC*v3mv}rT5Z@!E7k=F(RJizb*GSbgxkE^2uBCl`Puo42QcnMK z>AH{FbcVz5O26~lOfDvrE}h(YWfu~pMr8YvlVSIbfw^}my(^uGyuHp5cLCNym*g0^ zj?1);j|-+b<2EN>(U6L}5&I;vy;Z0>`3T$C+0xc}7X!cFW_J^=uf@YSAs{0gE(>76 zq_7nu9IS0SaB6KlX8#=qpUmibzxPfEpBk25pkTj9_X@%*l?&~b)!8w`l zY*OPrHAg`-!9-t1-9pfO=f1@`dK9Qw^b>aB{Br>5}lt5TYq%*XVi0g z3q|2gX^OkbT;Wd;@da+*t_KCavbox)V(&*oF&NQ^eCn@D_nzi@kd9cBfFf(%@p3#@ zNZfD)$5RI9fXrnI6(aHja_4TlmZwg-YEM&vcj1C{Ll)cJsD@os+y&7v=8ef>-{QM4 zNump)=Ok_FxQA?wsvJoysV&DzgF?o!6G9>FGRwAilP_K=m}1{V{cyoi*2YJy?0#k< z1-7E3*tHY-eF-b3b4Esb#!VIXQ`1N30jJxGi5oipefxVDFJ|$Krlm${U86VpAdK)u zKGS8E_q30P@2^jmFS-TOGz^NDwAf|4xHqh(eT|EyU?{eBDOFIENg#;2_B_^z@>_ap ze-D(vS0|G~`dso8I|Q}xlj<`mXMlfxR8@TXJuL1g7+HDauvYTm;IPo=BSi1WkCs!2 zMJ)fYdDqLPw6urr*^dBZq`> zIofg@eY&MRq2-L;Y%-#(Bs&;s?NfWFj8I6BJIZNDLoB)=O}CB?3V%=BGLE+Nh%Py? zt`h36ynRYy?BLljYjoSGi_?nby{B$TTO2iFPh!}QqS})}8U=flw3o#DTeJE?PN?L@l8vX_vqDwfeK6e@UV^-MR%#V-LF8fYqy~(&GnkZ`Gv%h01>9V_$u_o72_E=gUpSv0A)?>OeHKaX-v}nojtc|2& zQGQ)mv(@RBQ|Kfk;=!FbG@Dv{s}9_@1I`NOuWWzx!j^0Wq+`d z0cN6eafn1S5Zq+Mms{u5(`3ohPzK7B=bzS{y3Mtk2Urf3f{p+g0})VmqV7=$d?)!f*whn=2Gjy4PJ-^td=QO9I=i>&}Vq zW<7IAC%EUwu$aJePE!3`)5R-J{Fzo7Baw!0uy8KY zTrCNTQX=N8=5kf$J+$3xzLER!q0iy?ahU@dgz^U+iIt%~7W+^CMw%r7mxs8Fv?qwZ<%0dsbj|4=N zIY1S%^f$f|d$WyfZUG_J?V@4{v%Ye);R9bY1?4Wsua;EQ+o3jdm^Jwt1$Uu(q)_QP z5m(6Q$tFFONtKVj$~>;r$@@2=U5W&QrW5Q9yI-1Xp+Pddor_NF`6U8E9gNqrDoVn3 zxdw-2)Q?26>UBpI(+giyoq1G~Ytp6=mYq>Yj%gbHR&EwFq``|G`II|cpyprzbh z%FgkDzNa~p!CWCKze(&vjjznowBZ}?vP&ZcG9+|gGxt}(vETJzTW#d(-+{-e%b(@4 z!T|hwTwvqyhtZ7}-#>Uh$?THl3UMM!l$WI-3#;!4Gs(3ish;Yq8#y%>)=^9cp{I&7 zm&I5gv~=Zu|K^Y3&GWv?;b2+b8(%S&+>)Aef(~mvg{a(jS24=8{S}&G0Ae5PUqvahuqikxPYHfy&jP$b|>$ID?k&8(Y z-`32G_adWFj6fM&W~wS@QNlM|&P4vTEQ|i|3U+}?L|$2wJ{476uB+v*F2+~9l*AEi zJoCBP=NBTeTl8QY`h!3Ep0WlrzSD3S$=sws+0*1B_O#rFA6WN-Fi{( zb0#XJuU?aGIaVJR`XOm!<4XB)&Ua`10T%z?)&~Ecl9)_*ZweTG!qdK{d{MFh%L=TJ!$F7EZtP`!x>#>{RC$Lk>(RO2OHaASYO-x;-gpRKmvpqYS@u%!^r)nV zm!osNxB6#R*+3y(<{i5qAaf-Hj4crq7g6_vt86AL1pu?F1!e|dWoJ5?ih9qx)aRMS zW+$WFI6EslkG>c3XUB{`DWcdepr9TwFXAGuCn-El?4f*Coh(?Jf6NR?3qRYv3G&qF zZzp~&8q3DP9VJ{G+Ihw?)Hl)`jCvhO7v%4^XDGRla<(rDrN2?JKH9nLQ&qOM#eDE2 z+o9{g48_h$H7sSHz%zCWt|};4-B?Iwu^l?kg%sSfYX~GFI!xw@Ko37U2MxXtK-0w* z#M0Dgt*ZVTBld^U_abGTgdpvzt|^?EJ=NHhQ9okf;&vE$!QR&L(mr=}Ba#x9t8z zX)7HT!6>-*>UrW~q^HfXU9cdT_ebxnnG(2ysOT7#Lchxxmff4~R7YBc@!OaJmZ~mZ zdZl0B%yh@dIZbWl96>C-EV;>Nz=}MowblQ0QA38TY{&OAD7fE`VJ<5*Q&O3lBqTNV zE-!x|6PWKUpUy@Gn~E^p~KIH6SdxxDr$i`>T;(wa%k0 z{?$9Qko)dT_4WCiJIIxg_VI?=>C_+0gbk_1mpf=nZd+F$6{7R7VxJjx0n!V+O5})tRyDdW-3X-@NGC$&G0J-XPNH=$$P)q_o-z! zo!t3GJ1gIItzg_)e>IGtx5_*j5gp~o<)qlmLEd0fJEXy!Z;9VupZ-^R3#O1HIYkts z=uWNud+I(bf4T}T7Uo!f6@&-6Q0M03daPg6g5sI8Wy8$6TVMUjp#X%^1j>6cT799N z?DVv23L6kx|7N)$Z|tbPUy<_=YXoxpE^biJKl#JZi}Ryt4I*YR@UFIQ7_D9Kr4vA( zQ6*o@U=C(SqJAx`jmcVW!tdb z#0+sV>>Sk5pJ9}4-1UyCI;O{Q%`ct%V4QtuCx81_wG=uke_PwsV)l%rjAK?w0`T8H*h6%4_>-T21+s2Ys zCnh(jYV#Y{lo*_>RNdtXYlK{l(xI+N;rZ*0yCq(I$tT_nh``zEmBA-;^w(I{^QX19 zEqZvU-J4}R?jVXg*^y&!#)HM2j7kHYS%@u84hIBbF;_Ct3@ndb$5>ODQaRB+V|uA_ zsj5oDw!@8c`m!vV_FfH>!k^5V?BsK^X;_3UdQh>E8N2tX5fv45hmf97L6?wS+q|LB zV_;SsJ)-my6Y1la|B_66L!MJer{O29&*x`_m)Uj1Lv7)jEX-0j>7yCil|nnXWnE(g zZ}4A^@G+H^e7f(5h^Nzd&u$BAbj7FFwppRJ>?DQcGU~1~dL(5^++KWhq83+ZW1L6< zI@eQ6YHePrzl<7hcw3rgFJ~9uVcf?c`TPFh&t4oB8N)4 z%akviepabwejqfC&0n#bmy1x-qEO+QJ#<%}i?0@F1O%jQt5NSB*PAR-u`JgmFgq<|^4J3ejN#LK8oODl}O&Q?1-P z_G#_k1_7bQ0@n@qWa`A)jtbXQn+w7N!h>l5Lu9{(0YpH}{hwO!yyRGxq z$`4FSYYem^g<6hv#N=N`7}BCdjY)C5AGl!h$HFdQJ%!-PwJ60cbd5_ymI=4 zB6>lVD=W1T^T z`v}OJ%cOVAX&^Rt^m8vaoMi3B4gsp`Ob4twOyB3EvS^RISl{mD5J|>H!*Np~NXUa0 z!mJ2tw?Lh;_DAA{BQg|w!vq7?PUVyP#WU7(>|ubu-ii%ItWQLPYEa=(gYm1O2Hd0- zcIB9te^E~kr;j4zEONvp+?(hRSSbO7mZV|402}1fKas{1-!4(<{9-gpb6>ts1qJN- zfYCaak}xHaUA3e8?i<;JFE}IwP1m#Sm=Qj?B16TD7EGDHcrsfJk`#KOw)%{4MO4l}s|0y>$m(ZC0 z2P~HFJ~s^Bht0>TDh5xLKh&vt1TXV#>=&k(TnLV4-~;?~l}($6!V^|Yr*b$0SS&k< zk58@bl;}~CJSO)@2LDiJK#sqP!ZU)mzY;>#UHbT47Vl611hBaIJVf?j9f8~T}gDCyZ#le^-f z&m$RUA?SRM=bHGE(^B7eXv+h=`Vr{&hAmj{8QJa{1_ZD$pHq5RFgdDv3+J}I&>g`h zzbGnCd2T!Nc?A6?vkM^SlMI&_zsEDq@zXoNE7Al1U`>R&-C}3;p%nLfzqoK_W(R{d z1tVIPgGiISC4&Lb*rc-)GG&cdDFjV72PFeyf9=z-mlftwq4v*ZZ-W$%?686acn|U@N0NLHs)P8Ha)m;ADuZ4eN z#R&wpQD)D#Y$<$siEa${tDhwETg3yd#r#LaRY0Q|svw$!JsGLAN@&18+H&J>@O?h8 zNwO~q>sAK$DN_V0RSmlXUcfMji1}WBAvhBGyKh@r?W__y&ZEF$#=*i6)qb7`iA278 zF!zyqw{oxBlu)7@$gaDm57Tr$0%~&Thrg`jKSQsBuQN*$Tv;1CUD&4`w#=xXKMWVR z?5l(Hm~Wdu*zNBGrIy6PVk=uNYv;71-1ny>eLDtOHawvMKOaHJR%Fj#^yTgMN>qB$ z2b021%@Pu?L$H`V3fo>n4etotP4RF>t5>WfCw$x92;-aRSsN*QP4B7&ax= ze}nSwMj)*z{88UFa$cw_@*(9u01b5wJC6kdVWC~wiI5J>Cp1DVfre7fMg-(miJpJV z>jjgO7PDGgkPmTL_Wr;ZPVh9PO&^aYA3weZRh)B``$dA;_Rj{*IQTgD7*er?<>GJE# zIY9L;Gm-m4^(JV#xR=kxbIj-D5v5R8$MM`B=a%k$Sa8IO^JJ0KSN|iW0T4h5j7=hY zg@VabC?5dFTpTvQxaEQlQ@H?G$pzi1azS*vd5>Y@^$$V@&%gFiT!v`1QTxj*7thV3 zBR(g$C?s+cFC4+4x>a#B(_WLH3D3ArEv0(u|NBc8-zH$?_-o}p$m<&9xBEpi9OAI) z)y6?;fa!?+V`s(7gw|`Cp}|D!2IHkhHwfP7`CCsV3a`oQxV=jq>zlJQ$@iTMf< zgXaMkPgC(Dl;aY`fwOW1y~yu2G7fbVo`^N5sh@;JpNWdj7WyxZnx(J(`*<~7Lw{X^ z3frm4mP|a`m{TPG{5L>lfH!%%rh@};`^c*})oniMk8}c1vY^NEf&mkB&GEtU;9JDK6N5FktvwsoyqO+1I{7DnErGC3h=3I% z1fbWbPB0N@wey*_qXDR~_h#N2TYQ*?{gBiC?db_3Q!do|50EagXWw*e+xFPXrcKG1*6A zEH3*8_=f<-R-3<3?Av$sF}zO&2Ys%^(3Su+jOz%atae(_=tn?9W68ekC|m9<_HG)# z&u>=y6X8VX0SQV)X85^zZV4QQ%rd2W{dPtzG7*LDrG?RjIw1A8wsI2ge3kbUzD zo4j-qXo7Mq`e2z}4vkYkC+E(|DO(>EZW;d7l%YISGpp+q}3vb#~-a&Et#wpDdI(ARi?RluK&Q7{rF7AGz!`;rMo|?+*QY$U!%jri08Ot>v;m&& z9~epRxl|o(qs^C&n(V=zR9U(CT_y<;`>?&@257S{;=PKly1FWl;1!@5zoRayc&X(D z5r)x1!rAzxCp=hG8NdBZdJG>9I(30X+X7uz(T}`~A&xjLHGs5khtA zoSjtLTkGAnb&P-F#S@J9x5d>fz_G(?HavWu!pHa^m7cM?r_ukEpuXpU1m}NR%8lJ& z{p9gXlvw4lp7g_M1beTBdlhn#H(}nPe@!JH<_(oPI)!s%2iCYv}rsrEjRpr)nTQmdMxPSvdd0!-Fk@Um#n@N(8-doP_qQcRllv< z@%w-i@iRlhPafn!s}*WH*qjQbxaE#phj)0HB245z^0nFc?MbW*V*^R) zEER!B^#}76cr`tvYyy|=PtXn1Iwd5&`pvXt-6{~bv?x5ZSs5zAJ79`4FHE7WH%RN$pcDy(v4#GPWz@9mM0arL)I~lK1*0^U$56hG>hD7L(Z9(?79-P zBaz=qqyE~}A~cYa;cvMqXgn4rOMa+OZ8KrLihD}u=OZceF=gj+vJQtYj&7fz8>%aq7+R1E#+wkK94L`@dw-Vc& z2I6EvAWK@zWjzE{eg$xhR)tk_6%~L?JkL&Wid~`V^s;Wztxc8d>bl z#!sMssD_2eKru+SZ)dD=K%q1V6pkKim48mM?{XF&4&-efw#TPE-JF0whchf#$HqR$ z2q8iR4K!%)A8nkft;j%eUZ}GHUOeUQB@l1r)owIh>m+25M)SFSy+l>>BM}(wS|Hz& zJl?$DeI$p!)%h)Imwu1tvDNVyrMWuB;+-`z2*@eEPrX)|yVx*+s8$UZ-jWgO?DwPX zTws`7EQ$q2ZA`+paX;v(yB5FmsupdKnn;tFR-gM0?U_fBs_n}AXeNX=_s8P7(D`BP z&Jj>^VoWSC$Bojx-fjkBg>6mPzx+CHk67Q{42cW)V+=ff?Al3A^o>{@ZdCO(ESXcu z5|*s;fqJM+F7u`I{YN0qe(ZC4ypQEyx1du&CxFJF-(t3%#8whAmBOqJPL6QZQ*5fy z4eRHi+xPD?#r;w6IT%WDVY!z`FU$qPib|tufhGfQ`IfQ5AvSja?`f~bn6&`~WGuCaV_c@bX zbfAgY?w98iCjS4Y<9wL;2;^=Po-Vg3oP)1dk}BC=I5^n{YR{j5R0f|hh7tGDDn=_1 z4Il#-ZU;pCIdXRZad?CB(BVV$xIV{y33VE2oC(TIu5qF7!~n!Bt0(E}(;x|-YYg7( z(1?~&OQ1%b?EoRs1k4|kF^4COf3K2IzvA`co&SV^jO$qva^>noD?QN02`IXdanv@V zv+&5|R#^WtHZ9%@lUm!#6h88Tr(>AKzH)6mP4o9+f-nN~paMUWf z(^1s=J@u+K3j9}4)-^_<2Sn0KHu5}*H@qLY?f>#4TlK_S8>*c6<0HpofqhH=oI!lL zKwWM<%1ia#KbYi|66#x^=U?X~&L4~Xr}Xz_G{D_lhVV~B1|OQj$7@R09#;V)N`wwu zzTE5qjJb}QK%4D~zll1~->IoEPBhm6n(&B{NKJ>D7jSR{1h4<(h&SuHiLrz+K_Gjt zdzy8>?boCSwlbIsg_b|{;%L*giVZ-@u}$q?j}7q;KIPCx&|COZU=Y3gj}yjR zJP1K3#Bu4rWQpaNRrh`$h=XhMSHUJsevZP&7fPGD2IfBfyVvUQBH|n@(=GfLQJ%%; z3o5IPkLnrZA^tcs*k{{b1XNf>FYZ*p22ZgoNWrI2?O}fsl9r!9knSA_KIy1Wo%-kS zzw^N~s`0n;1OfTV@L+#)N_1cH!$OUsvQi3S4Imt1FwF5FkA08=p_-HPO1OOrPu*Cf zJ!4hk0FvzL^sxk1g^z#45CoBv_w!PIBl;$^Lpb&Wh{!?0&eiMn($zL8)P{ufsh!Q{T>Wf334>7PPwv-&4wJ`T zvC%zoAM{>$?5*0VEOD~!bNIt@M*u<@mW6lq(51f^k%uYBp*YBM3waa&b2zN9SN}FG zf4%8c4&ZT#r4&+IID@u+;CE`-qoS$$SRjAY{k-!)B^f?``C--{b!5W^JQZ~3V0Hv@ z+ZRMJ4-R*hW*7 zA?9uI-7hj7J!MTM`%WgmfeqdA)V%$NkY8Z#0bRKwS9_ja9_qpb6Ze zF_>B>r4@V|JyLnnJr7j2Yn6O1%hzly4f;+k#46EawrX^?pD>2qsh7VKL|%gtZ^Yep zWB_+3Z`4E9$F@KM=Z;zc9TKY=aMyI-e7#iF^bSNS?Ojx->rL0V`9NS&I+cfjE1$35 z_fVTyv7kQO>F9Ak_FT#jCDCuYC%=w&ExI=!rvHucl}Tc&{+Su&2@(cT;WQvPyo(nc z_Jd?p$Z=(;Q`C1WM;MPQ9W52~z4KAd36eamK1^-dod8hf8m>SX*tX4!ge+X_OA=r; zj(LGu5$UgWFcVT@1x7(nYnL%8`eB70QAeuAB@m}MTt`x{7-ty*%skBdg5Z>eo~KQ~ zfjOpiJvk8qsW-w4NAL`_$MEqxfI2cc+gtu~%!C%K)4}x1OUZXKq#4)J_f(IBj@lMO zl=D*pCm;2t36U__rvJ)CAg9mRu6E?5doLr9@b&T9&Fpg%MoW5qQ`(k24QcC7cXjP4 z-bG(xRpN3VDmiGHT1r#Hh2eWjCo_#*-=oO=FaNo68 zkskXXcyqDfCN&^T=^${fDo6A4scei`Yi*dn7ErF+^_%URudlohv&s`3@k z0miUP1gC?UlQenJcO*9vT5z=qpwOWcOd;!?d|s!=2c~oFaW1C+mjx>xx=r?zn&(Px4bxZN9BZ-KPDP8mEKwCo2c_lK)7-ykO1 z73go!-sLp6GWf5esr?6#O9Z}K(5(xgBzmXmTOjC=j$(i1_AgR8*P;F|1||qF`V*w) zYL)i}&#DPCj{@T)=nbx>CC=cSC%aa_=j$8f3Qfw6xDv z&OU*Fv3$E>r%x^yAeZaOK*>tDOLz>z4wKaVjRmo@(GoA9VbJu$2F=?@J`hw#G+0+)cXe&F&BE>IbLJB&Z}%Z5w-nF>?%4RofM__e(!p5BY&hufT|S~}iN9kIg}t|Dgkcr{RJj8!r&wB)g{ zcY!$&i@OigVMZZtz$>eJCPETVV*-s|AFaOG3{y~Yvv5t2C@E%95A^o%=W;JF|^vAMcs z@;!tS4>t&C#7Rs@<-X9vC8zY z*t+l{lULd%#mh#7>bvmnqA(_3^M$4IZp8=d{L)DXAA_B-Vb(FQ`9h}?Uv1Il>7Yl% zYuYw5u>()Z3weh8uO?l;zsB^W1@9@5RN-B(4;7c(ikeg|g%s$aQ1Y!qmC0emwb-O! zT7N1}sMzQ1cRFYWQ73g|mEd-AH%^!^?|rcOa3`t>g+yA}&jiLZ#4Ee8){n$vF zgV-9qw5fsc1~01mK%7uajl%GV_#5=9y*R+S{N2(g zsK`!)zR&_Zz#5rl`pmq1LT6o z@0)ehq=Rp^kzfS#PH`$2;pxWT3s1(wm1`b|IW|sT#lhQ_?kwu~V^$+d0yo@mrW&QL zSkn$T?7GCrHNJvw?XKaT9^A1I?M&yoF1hfi{?4z04 z6MET_F1OW{WKqpBEV>YC$Dr#9VWI9F!b$DNX4)M3VP$LGYwIn%3C+V8tmX0UEqG$o zXZpCxVcLPVftvhc!X~%fT#u0++Ch=GdDY*WTVBA;TDjm9sGy$B#FJUXzGsOSYQWpw zy%57D;%ce3?z6B5J$-P-t?zX30z96%hx4;XBiNj{bE0lS^=c%Lu#!N8i-jGOuX&Z;BC3;|SfZ+7_+CCLq}t5gTVLcsa&vm1ThnVSN1Nv4S-pzP`-RjJ7pZ zAAjfj4K=TnG-QTqMfwRy#xyVxVAjGVXZy(Z*BfRYUX`R|az2NZ-K+tTMv}kGe#933 zt!l{RVryWEM#`{A8g{cUVP$rF0?Se~y=>WW*OIkmt9J5|!Gp$+p8xOh~0G73g6(%O7%H7a+W>OlcAdEVFh(`ssRCY5Bhyo0Z73m7ch- zX6xBM)5N$2BgvpX`b6`pK!N;hMC`4#+9y`;^wyRuo;NUJ@t>CtoxAH>{WMOoHK@?2f!|c6mqfkuDI-b}=pRe-H0y$+eYXI54z~+TK&cZMhMr%v<%|4oKg)ltTCG|# z28xS2R9{UDCkz`Gl)_d&nu>?CKPu`!>90aw%WD@tbsu-)$JNG5Z|5+jN?<`$g=5_` zHXZt2TVs%L_gjJq5VB&u?oH|2=m&P+M(tOyN=$$TiQD!S-~S%ty549Eicw~X8`l36 zqr&?hN7$p8)SLd)rM-8ueU$m0vZ>T0P#>wf-SDSo=#^OFiY)Jv_Ul=tR*x~U!Le|| zxKzMZ|@K^rX``JnB4{)-Pih zMN(Yev(VU^Tmf=4Id(y|#jDS1YlUrXmmgYV;xt3Vu|<}H%cucdTPP7DOzbJSXYs@V z0{kv2@SIs6eoYak!iX!~H-r1#6Y&m5#Xu#` zT{QalN21s3+hEHZNn=&=z0jny+I4R(&58ppL>RCsKNk~}XyRN0ZMZ8%Pk1VODt3Pb zK;tZ{b01#KQ|)Vbt?-j6eKtEB_L%52;ooE}QS#{+9%}44c!-vI_wrD6qoT+GOub2C zO07B+>o5x3$aWYuUH@a7*-IDo24+F$@kP4@z|AV4W^LO6dp*+iuug(1I_r&;9L=&C zon$|X1MXcj^L3PhIc0ZflwgNnDvo{; z=Uh*yaKU>otC1*86b4GRnAwX=ylO%vN6W(&5ohhL=A_$?il^(GQ#1PT%u(C~$ji$E zH%#k8{ftLu@r!q_9}TFaIyVw;1Ws0NdrtL*cU$&$FI_yAAJ@YG*+we z$bMaC04QwO`-sw=32Nr6h&Zvub4+RiM2$uVDq%TfDGP6II6LERp)UvZ!k#5#G^t*7G}gygH8XAn@b5c?w!rzJ`VvoSjD~7WS`0vi{ijL-lt)gb38Y` z_F>X4ZUGY=^25G$MBF4xtK1?fut4-EWX@V+M$=DAuSaYan<9x=@nhJqN7xt+1+U^m z7SH&rI9|)_uS0(Um8I@RZBi20XRhQ;7;Skm&>FKUy7vVpqC{SqbBI8eOaP__b-YtJS>=65ZwUYHh-XI}*|YtsBVw>f~7xD3pL z!ze-18y%p_`e+~5?!nzzqX4!h6>z4Ne51VPn;f|Bz`|%zEh9e%$*TZ2W>oVd;63@V z$ukS^qdDIv3e-}Q_NG_bF1XUL0WN`EjalmHHGA|!a&MsQKkyf@=;2CL*=a$rGYj~? zU>e(&SQ^x9GflQcpo=Jc7=*n(y0{Y z)8TPcygz3r;Nu^XIdhGIP4PaVs7`?hG!m_ZZa-L;U{wL{fuyWc;WSK&lzx8uS8u=g zgH30pypv}`N9o8|s|4x7H^~s4eBocXASl=(Rwq<E*}=+eI; z9^PM030r~$$SpIDK56`%Z(Q4>Gky( z)?23E;G{~~nI-Nmfm06ihC0K5#X57$$SPas6rw`D{W56zH*y5|> zSjJwka`Ea?0qw@b5VwD%h>Lnf^m!yE=^Wqqn%b1FI$iWojw9IjSGM#&EMbUeW6=lp z4j&1vkNe7^AE9CTx)tt8Kr3M3@dvgGekU;WdRjeGUyyGN_bDt4o6euUNp}=|%FY1c zx^L8;c8v4E8>MImO>b@N=G>7QRotDU-p?21QtIBi#o)GEHL4+wiI20uj#)3}S3C3V z=Ns6Sw+_X*`U~+NKF9?w!syI*MZN85Zt~JIJkIezXg(3*N{e*u;qnXeJqr9~k;Aw& z%w!-dL$AZ7(#(AxAwJZ$O1(A1HJS%rFN^HGlTR6OiR(}Ey{YLcfycx|-0hxs`ibH~ zqn#&@hOg{RCbjFTA`e#CWXsv7yAd8>abu_|u93dW#=`PW^*++;E>a$@e>fjuril73*o1D+88Ry_2-Vf5(#zIdX|Z7Auj>qADxc$Cnrko_jl7`teL{JZ93(|5tEy zgez?>@M=ZBM@@|1>RS)9_~0MHP-v~f78op2q@5b%X8K9K&A(eYtnZ|KB2msi_vX(nru|l zEJ}7%#Hn@Ya70?IW^ks@4&G8mnOn-vcMzS#jjWiI^2t5aE25%>O;k_&^}ej0vTuY3RCwJ$wVg&a4L?)`pMsAFZH zFTv0&-Tx9A@D|Uk&3s#l2Cw8%gBaqbG>3LtAJ>b1ah`GN-m?2kKc3PG6A{@OLCOp; zNptuNh~Lpsk2fra3L68%+JK1T*Fxud6Gh{tUaCeiTxX7?051Tn<-2gcs&-S+1>8H~ z^OYaz_&YMc^U8~qK;8ll@n=bS)NwP4TybCMpC)Yp7gt$c(;RwXsQkyk$uMA$Ey*+4 z6O%W@Nf#G!qiM%9i&@5na9g{>1v%5oA>2wK!Hq zd2ffeyEK~LDyXsu?!f%$h89x6y`xgE44;>aIUE|>SOUOOgv#^n+n20U+qq8!0Ir~=Wzr6SMFk5O`u5zQV?EUZygd8G)IPuvG<{q5 z;=u*>fOAc=SqH0ghX5MDDju%&qT2^p!l4@$AN?9AK*8mH7Pg=YH;?>ACtUw(_ zd`#Gv)3<{c2vhFdq?nh&hI%*Lt+;3}(jczB+3{j}lTB9@ zcmpmLFLbVL5Uz-%rB%O6XmWVh#9kBHXj^5$NoWpS6OgA#)B`+LLy6D$(^s!x<28|T zn_seii)xKrK74)APEW;>Qy>C5B5uHDp>OtgPjp{w^r#`te(wWXRw?$rLQp8knZ&5q zyi8rbU4445k*wVo)bELN=*N=*V>iiYc)lvYydPVA>k-R*W(Rl_oNjgRa@>Sjn4{nC z_Llr`>w+ZfN;8SFN13CVb=~|y6_x4vaXIlJ_K_$*&>}LkS zruXPnK-*9K^(-iH>T-N1YyE9@L`;X`hevg7yC~P5J1%$lOOo#&7cO{ zlZ70$`xRoB??8mD#WDC+`Oge_@T^4U?q3>PkRA<=1>?HyxRo9iB`%e&Jphz@F21wG ziT2TnJbscsZ_7dzldjOMy)?}CM04$1;bgjV+&aC?_!96|=X)e#(U-L_q?q&lRMMp5 zX32_ur3+i259hUmwS!x+>lK7@WRDp|Atqpoi_$`~0)zUw1o$1L!Y>)fMgd zkD1IOA9+wwc7FZ2)615goa7cP6a>p!mAe3M?&Qk{JJn>7oS`7@QEPhx-$PkEVRgvCU3)c)ll7XmZqckXKwv77OCCSQgu;R`M;q%ZE6jq z8>cb~=Lc$~{c>ib5Id)FcOU~4K1=E>U(}vV7ii5E;A#1*)Q!!~YIr%ZJICKQ(d`zc z^D1xHZwgeqxS;3GHJ*L61iNr{o6+pCqQtd*32~}Aw3~ftVZ7z8`Y*4s`hMJ217pR- zE)vdnCV0?t?AzT#>h`+HnzEl@vG~@H{Y-9M80e1k=i}PPtJuEd$X1nxJy*Wxl{S&# zp|AR}ex-msXH(!B>y5bwx5n9;nA(Hn)B8TkQ{LQ9z?f$%^-HUEJuC`Uret-Njuk$@ zXCUWcH9&Uiigk zIG4vCeL30eZ2z8{+F!`Z4^NWn6>Z`7_2nc~85v_ag(yX@IT;zjf9AYF$% zMA<%WU(A<;0X4z9ks|x_FnXgM<7Ol_Qq~<ResC#l5w&&8lu^Zzc5YTAS=f=$ zQnrDcGGbqi4xDq~TLibSrNJm7gS-~CZdKLwpAt_2Y)z$3iJ^|n`N2GEKn9*O3 z-MzH;YhxtTeSdI}6fCLYhjF;Ta|ot+p*k|7!Js=fMym-l@__a7@dALxJ_YgDgbB~a z!45VWcvs3Z-m=L1bksam<;otizky4P$>`kuZRZ#`Ey~yTw#PHwbk7QK4~xcgnT>YK zM?*h{_Mi>Av)ps54&5e7^6tpDcZEk&H?BQ6Qgx;~!P66(00Tj(0<@m7aErkV(ku|E= zRwgA8%ZYsD3ESdK1Fa=sw&r`}6a2-7zvshZd5vcI+`;bfc82#xyT>!+!~Ac6_9GAm z3;irOBTXYq5|L-8v=;I=Z>s!#$P+&?iDYgr%=&;8V`0o1ENQKu+r_cyIyhDMjvgeG zno8`Qqizg!vTa|LtSbvu7P#VORmHVVn=H(#Jr)L9JvI=inIir+M*#;+Y8O=6MdoB` zw!5SGTg%Zm2k9Fu+FJKCG$N_=KgSnozmqx;>vk|1d}O^qv(hJ}s+ElWCM-XbQ#s2@ z?A)D*WGb+0khQu%gt7#6*7gAhP~|u3plU$CWGfUOG64jcQLGB6Cu16XA`R@WmfAf6 zcKu?@^~&@;h}W>;SFoaVbjtn-RqpxuT1P}uGBFE{A<7D_N+}iUn#fF|`u>={WaQ58 zgASOvhEvhoF@QAQ@#x!$wf;>(&^-A@u36t?V0M$D8hhxwRlpV@=P*ydCzZ)M6IxRC zIy`Oi4E)!32!7u97pzY?j07M-ib+cw+^3r%+K<3i6|2EtQl@->U|Es>EECr{+whB$ zuh1ISQj(?R<=w;z?R$~M_T!w~{(KKYh)&$l7#rHn0dX>}HI&@2mDMQmmzRkWItXb% z>+`|6R36RZI)nDsmtY!+iT5~71!$k49&#f=-KBH)5#uhB6 zkWKKY^xp*d9`m2yUT-U0KMn;-ntLU_QfS&e(sL6%n=l15y${6VeSqR85{Q#ZN@3jH zE|pNNScSOq+YsrpZfzO>t0$Joab45u7{Z%Y=Sj={7jU!L=sycl=X5qS#-V`EHWoWp1*6*_sdD`Z0G$zPOSvy(d5wjYG)ws z7vbFjUG|SF25IK{e$4LiVQ2HUMqBwo&K6H=6y;X;!Tv_xz2*#WZ2Ze=08$Y>+x_Sc z()8|cFJ}J*Ac4CFjlW4qQfz)oz%`VP&6uJwAO@$~!nyZfGp2+Im?E!L?WL$SRL=^X zk(f&0z9WJR%(L9^waO|1Y}|pIJ79!7n|jTzb&C#hl4#Jpo3&4R4R~`}{VAmzy{r3FC z#+=H66;L3C$$PRv!vD#-!7T%Uc=Xv1IsTsH*Q*7llyV(Y0lXA2jX+2Sq2eZ|9?JA5 zfSc;GbgO_Sp)Z?j{zbNQrUQ2qkD9=$nA4FcmCU zbS%h)NE;H<^PkxDLgW5leAKRzhVUXOcZ6vA4XK<;K{Az z{Jv%~?8`&hqN2*;6sb{@No^-OX@tj65x_>3&ag{* zimwjJS0)4Sup8l(qfEr7FMNsD%hvw7s3YJ)^c6gd>^52jq9H}*GhnHJ&_q^>23NVX zCs8AC;0N{Ws4}4mO&;$*T5Dh!ULy9Ccd$GvfOKjgyN0u@ek|&wr*3JwG!rkVtx}D% zYeuga9F4*SL;yH}k^yKb6e~CarvbL@D(_~o-x1kqqQS0%k4lOfwy)64Z|ef6KoO`{ z=~BvG&qhCcIG^ZC?~k>cQ`z}@+-)k_`l_g1%PGs18Zz<4`uCL$J(YSThY(H6I`b+t zSCG&vG{@f3BYlWq(zTmPDau0x*3S<~l3dUzDj#rFJ(1 z4p2J^?c7V28pP{hsbAkq^#I)qVCM1mm8~r{0v&jMZZ6A>FXasO2KDr0bX0Wls}D%& z?Y$245OVTS;Z;I1a!c6aetLuW3zz994h8fUriJXqgzatN#>CciR>f*FX<#1IGKVy) zKf`iy*43;-=pK^z>D6b*O9+p{3fu(Ra-&~a&dqUE!u6~6;j^x<1NS|11AsctGcA6LuV6x zp97=qk4dE!NEd!i3Nk|*5PcBN&NVT<3Tj*hACum^jDsE4NQexa!_C4A!>YY zNncIgo8YFJtOJC5eo*QAV*+z?NO}fxWG13r^eJ*J5~TblPdo5YbLL5s&Y=WwVP_Mt zW}k&jD=a8%R$Y4cLbKbzbBauD}e`vj3Hmy6~k_8hF&7agA?rk6zjKrJ*|r?{R|mA&(3 zFR`G)3P>md35+IYh6D@nXIvLkY9E|$Q~7@ssFzkagOS zPvXdGaPJ2=L#QLL`HjBulO|s@(7dclDS~MH6T2BbyxBeV7iNcq2F@xX)~#-NcBWJ8 zt=i36p5|O(8c+bM=q|~Y|s>q=m z|FPkLu2P9R{Hd4H$qwhf^%}_rc=L9sJc(nAbg)`ym$m>0w{gDE3Tt<| zMofhL@ZGgWt!?T)F(0fsLVJdjrf6Ek^c0cu)~>HK9P!*_1cU4b25fw!AALf`c~bGo zBqdEj;AIPVZwh7Ffa-^ch>@j$GXnvv6;6k^8HAxcWIhOua%ULZa3f?bjSLs(ro*g& z(qB#Ozw$Ic;n!n`JoD#94`3U6shS>LbRM09BI~u?{HI}$Ms=WofY%1oF;tec(g@jf z+c)f{5^8se4AcH&Jo9Xl_qzyunP1jCPY|~NwT9(w5cEXQ2j`7l5Q6dqV?dA3bQ;75 zHVsTkuQSn$-Z6(z6uk{uuq>Z0w_GG}IAiPK7}7vG;XXoq%~*+z!FLgn2|<~0^FNKD zuz!uAsD~awHo!d@1y7P0)I`X7hf&P;Wt*?;gtF&>%3px*W5_6yEEV=z3@!%isnklv z=RswbTBiEo16&;R=d~>(*!6VCHK&>)feg?bgcF>Lf*Zk@qh9Iy%`(Qmmmy8#d{5NB zg4js()O`MczfB_ns^_A{vytRCY%Kw?=h!)z3}k)`R4fj6FFU;L zgu)La<#*l^jG^mu>>lNy%hxROc zn-xvBKjRCXK}5uw^=3=HGcJ#m|K%4-;1voCT$Xj`UsK~`M@|#kP0M~H;i_P)M zTeKjBL|*38ukzlBKx@MvAD@&qT4zF>nRxdbMy@WevH2xqyvE$CT;6o3 z;wy9uftoQvd5%$ifW>F*+&Zw#!=zN7*Zk=Tr`TpH2afdJXfWz?1A2e33jzl^t(m(V zkat_6_&5_e->dc>h3+qdVz^g&Dh>F)xbLko@3RA|mL0rTl3@R|g{jf$4Swm}2b3%R z#_w!WPU!uP$`|^)27S+Y3EXaQbvzV+($q4z?^u;ig*$0n16f}A@r^;E+nT_3W72t( zilxgd-JT5HWcJPT#J0+?3)XFC@Imq!a2b9L5Fda{Y@YKl#Pi*pOUQVBmrWCzC!cRz z?y?raBA-9r(IpHcOR3aZTzAKPnKC73s{q07&?;;G*la79R_g=FM&vF=V~+v=CrZK< z+Aab|(?&RMu7(y2XW3n6r=FnD?fv#-HLgclP+Vx^X&3fXF#|Q}hZ8vWOMLdT${_6p zRN8ps5UdZd{|^B#Ii;<5K6KVTWZ!8V#qNBy9Oh>nWb7f>iR?y^{e$@1qt>P+z+O19 zql-f=o%@8ZKD;;=HKy#7U6eoL?~WE4AJmkz5`c2eC%-JQb_ z|GidYLb!-LyMC|LNdD#BfVSz6Y;i?*i4xZT5VZd|6K8Ox>OUf1aO0wm!yT=nX&x~W z;*P^^Kriz5A@P6f*(Q9$|HofW|1ZgY5v0yH>ipkYGZ4D8_uqEL2QospLMO(&O5ms; zIQsn$caHY3)8a=n9%Uf4Snb~kWMFJo9((*oqI7&5R40nSa4yjcxtV{66L_fGOT~Do zUTv+JNc+s|p^jBJMWiYNtpCnQKv(knP)^ALxNeK1zoVT&MlleH6WxI1TmO93D4mc| zT$HEe1O921u#g>pO-;u37cmt2DFL_+zrUSGnZ^4bTL+*`{PV8~u*YGmgfQcugzo+E zzjIWk|BemyB$$_v|22K{9*YnmHu~{@T_GSHJF#Pbu_vk31{q{$K!gq<#?s%>Kic4= zIhth?zX#Ge?$8jYyO@1Gx|7*gxlphCL#WWo&|!J^2k^A7zn-~9q+ANv2!_7*pI~SrI}kks#bH7R2aDQkr|r2$tJ{_G z2xD;1A6vJ7zxzU1)WB+OZ71}=?Y;cRDSXNy_gkT_3J|@=-wpmVETfQw^f+u7k}3W^ zRE1K02St0%2mJBr%!U3Dm;BE+d%{v~{}bGMtW!At3P+eO%bnv~Y2QD7hUp~gx5^sL z@Xvw+sWUz<)Sg01swzStQpH@A?u$^nIXX%ozwK@?Hn#7BYmQSI4XsWxm9LA^IXd{OZdIh1u|buNnE+ERQW#wj1y z8SG&O?!WPj|LdK4CF;*b=AQ&v^4xzDWWmjR96MeQP(t=_v!lZuj67znA1sRyFxc*U zdoNM5x;Ho=4Fb)|h}}1Lb=Lp%RB=36{4)YoKkC_|p)AGn{eN{` zul)&e>>Pi5;N$;8)T(OQNpK6IdH?-9z@Tz`&zcMxq1#}qy| zdoLBk4d%%meO~bD3xULHOhd%SHqLyiikJUhlo?g<&3@eycL`D@5qO{&**}e1BsH zHaP5#MvGaP$rrJhc1SHc=YXUcen1EC`_SGkRgv2IuyBb^fKR5GT3}Di4v=wgeL7c# zUrq>ko=2VhRSb-gsodH?-G>8h=Q`>=GTPlf={c6%29+$gZil_CB@oC6^_KXzdmiW1 zyIDpTNpCpq#cm_PD8qu-UG?5WZWG~8!GW_S=FzEQg5MQx+5Q|p*yfE9PqysEbsn-et>J^Tj>M)|QO|>4rmCmLql2aYEvwiI*cefzh@bY|pwXYKl1oOKVUu6AR~ZkCk_}Dui2OP$ z+^Q1tj(R7h&5tb_``-O_l|@|?t5DETy{^VpWlCa9?2s58fI?teW$B$m52~08v&EiC z;#VRvCUQ4^;>=NY{d!g~R9B3!HE4;hO`dFJpx0T7#$Ke;VHpkW0kVqv+?8z&T{V+3cH?NUg9P0amt_8h=pxll0M|m@8I7UiLTZDR* z6}{6eoRFut5}pkW`e&!#sgj955H5hbZlTJS-m=?oDb^oiqDZxjaAw?*(QPJ^dDimCzW0VJlS@zj-X=aQKt3;rqseUFKq+O!UF*S$DQH^{Ouk^E zI00tz&W9z_{wsZ`zK^mg@>1*>t=3i3Hz~BnCH?+79C2Gce6`IRmLDU`BFgUfXdwV) zwC7JQc0GmJH6?&zKE_b|M8c&%9>-`=z+xe-h1oyuFNqr|S;FRMOgN?%&z9TspwVA! z4z#H#`$3}0txuN~vvu0R8oh>l!ObCz3{LPT%3wFG(Rk8PF-P6Cg%~*S3bM}G-7?eJ z&*RC2))whJ<{);qID2$JSLQpr2(I`ai4)-&Pp%RHPV<|89z#UEV-3OqV@=}AyA+7f zNa}XCb*TfNz{M9rGkj$o(ka#_$!G>@>hbw|(9rS9(kFzlZm6>!4p-L-T2R=-y|)2P z+>3DrzekT@g4a>gm%r+U4grY}aEWuama62x33*PvEgClB*rb;7M#}a%C5riNln-(-hozrJnSR$ z>PA2`U_hR-3n<%P1TfGlq&pw>T|pKrV&=v{HYfEJKX4iiM`qTu>rmu32{P z2)xc~-KOFY30k}DT56<3=_Z8bB(yr>)#3C_gDN~o_+=sJjX&N!qtXKHz(Cm%Ut>-` zep6L_zGxzhRHxNiM*Ql^FUej~C>T+5Om4A_82jr-jbuYU`OLz}ggXpylB2?craaoN-bhm=h8iLFhZSqkRRs{jQ5 z0=xFtyFYgdZWoe4{Viz22rZ*)Cog5|3=34yezE~}@ z$0OE$C^!Ik)F6*2-fhn>;HeJA>T3XOEJaG1bMCH-GcEwA3}k@@Fkt=B%rnq8QUJru zq6wJyz}~Vk%?kq{zFx8p{W~GqvtS{9rs^)gWCB=qK?;FD6lgMVJe>3jG3oj0YZ5!e zpjM%rFDqsm4+UB|-SjR3^$QXD9`wkQC{@uQ2SLUKlro#w9k`AwM&;jk+-(n2n<1H> zlm28?-*pk^((&H*>bKp#1J*}ZVM+knCxo6!zf+=tExwoHq3A2@;!OjG<$+~X3t*X` z?bvVq1c3+*oIL4WQdNLqEPklB`A|DzS)eT46|^sPXV}UeKwE?0a{(^{Kfi*7;G{#E z@zfx76ddV~%}!p-JS$Iolo0p{3aEe{q$G_O-S!Te0C!(-ayqo}1lfr204wYr$N5P- zsmDqZ8P*X#X!7doD9T{-TdwcGgUIcpqSyW9d9@QN>aZLr5c<|9h3Xu6SobMkv_@#5 z&L>@xOe1f#`wu}GNvAZxev2gsh*~#7@jDIu{pIX=)g(l!e1ASYyN<$z!M;^?$O(ux zA`ktG@12B<*(%>qeE? zp+!Bk41zNTn2F(CzSPqPRUF&B+MbmO3U!7)=T?IfGpS;lbD@x*(6 zkjE!Mul-h|G|5SzM_NqwUTBXO@F1NNj{gFGIwb2cW}g)j!8P?UftUy?jm!Tix|AbWkU+A%%HiGxXh;-4yYGdZ+l^@wW}8KUObWnc0k& zRa^e>3G(Q%>BCVw?08Lsm2HWiHPcy&3xEBvz8qQ~<|%TiTv!WxM^j#G16-(jrZINw#dMgYV;fg{f}CCb(77D#T7Cp#Qfm{2vyac`iU{3)tO!FJe^s@0;W8x=R{Mj|T#g zZlyEU(|^Q@nqFKCHp=e@MtH%@?{leIve4Ec2{E6u1DxRg@76ANA7Obd)02HzyowWxc#sagbORIL_r#E zF(Lb`1KY`Jpmjz^T>M#qO%J^`SCZ3$@4{;~#yE>`UYn4kmuCF`AN4^v1y(*VDA z94B1d8kqRuKkR2>qDkU*s3|1mwCX9il4UUZ`EIa6q$T&t%P<;ZnBu5pI`td&&Y!6# ziiaQgmkGkd$l$=mQ#L|`=7hHSP$OKKyNBK5C?T)tvg`JzcDDOs0n}f56LOfnxg)lD zt+Se!pCwU83Ja7sLV@Vz@<6iz&|-f?bNc@>otPfLqQ@V|GXt3r0Uc32^Csvcb>A8L zs)7M<);rhaiWTX=sLbFx>n$hkptjd-Z`=I(qMIONvAaB(J3jVcvq86+z42~mKJ2Cl z1Nm1jv&DXQpW#He!}*muyirR(-{&WXF5TOJY=g3)(+7UtE_gLi#g6-&Cd3-WWAQG+z+G3kif#|VUU7B519iZ% zJZz#ZJCGi)Se5f#_vss%^{u{zaC6Hr5;vb2#SNoZT)>s9RPp9CVeug9!W;Hq1fhw` zJbHSEC>mbyxK(uR1F!%<0wb`>-a<)O8r)?yXUj=1@y$9c4QFUN5mcA;AUA;8SEOqH$T_(th99Hh)T*euLN-yAoMb{9Q#P{FKeS;5|iPyo4L+$RV+ z9IT%|Y2*TL5XSAMXs0ssI5wf~))nz;p)KM0-z;~wzGC{TGR#9KmzCCSNT>6K-@g=N z34QN=XdXyVQigB}B%>yM(kHSl`$|{yv%8e9!aSjrfVG1j{<#tF;qYMd zI}stdGhpx-;yN0(seLh*GusR5WI%^Oo3-ml1+&%ZR_7*=1M8*mezgWX@8mp*JUQNg z29ShaYkXd=l9dJDe%S7wiumeJ4Vsx@@4Byo_=rFRNsJx=g`Yf+Z_cq_az zO<=xe(jP2TnwhJ+r6GDtc3P97f#z2v@ZkaL!o90Xps-hjfpHfR1uPFTzh(1V4J8+NfciWtEM^D4uO|1S$0VzM=X}&vE`gDpkh30XNMo~};CzBpCWJWObhXb$ zC8_+k4HNN~x1rg$b`~{@ANE=6NYd%9Oi6O7<{RG`!zLFF3Y5eg;r9{Ea0HU@l8J6~ z0_T1{$wgieu#A14bl##m zp#PgP$^1FwX+Ikkzj*sGAr(4r(1*$HMev%{8Fot(|BTh#IZ0I~4>BDpw&%LCe1WO@ z1!DA{t}GM=5BK8Mt$CCtv5jr%5oDUZY0!6j6y&HLus2`L0WS5$^a3@1a&tdYPT@z0F5E6|#L5vNk7OZYVpqCQ%>AW-X zu;I@+EBAxbq+Zu6z#?kl)LS(1Q>f2f4#XjWuDV{OJ=0XJh|tlk|4dn)LYa^sGyot0 z=0u_Icgk#1$tt0r;LjL+vN`MVm4?$69w5X2@%yW_QcC_?5@FXvG9+Iq_rJ23$T!h} zcte2&iA$0ER*Kcz9P~#Ew@B1z3Pn@0uXgPD8Omvr51u(aL)6UDkUI zggt}s-nYJt{lV|D&L*1facBK?_xUfny0YmX#j9nX#SP@D56$#H?O+O_CsYXeLsZTS zmKCW^hbpjZue=)GqBD2g@PN+n|5J+`s)X&|2ITK7HV3mYpG^PmFiXk=tgc_i1zb1u zdQ3nZTzV*Y*UXqT|L<#)r=+F-w zccn-uU}dYQzR24Z#;LcdRkG&2(qvruc3%kvz2JK=x`=iBwp;`9WIlt)lv3*_#aw#e z_F=4)sr5)Y8JZ3}F;QJ*^pOZx8Yw^0M!#F+al&#{sVZE&(Uy;%9iym5`9YLP-+xc* zz#6&m^E>f$ML`hgQ;6^z6_r-hsDYWh^57~h@l+rX3VX)jL3(>RyG$5O&oz30#N_MR zV@&oL1+gu0Mfq>YFNQi@;(s1M{qf*lP5tw1oyO}K|Oj+2j8%~_@J&FkMPpAF{1>MLv z@@=vT$YkEt4cswH?~7g&98gb z^SJ@O#bj2ZZgV8?)nt-%!W?gxrnb8%y875r|s7J1vck^1=Y_|gG$J^4S0Iw~y_ zrf!g^igiIMR=AVjY*=knaw7%3`k}Sx_-PDAK=1x29+l7LFO`927-v~cx)?hX2@wvI zN+2u-dOUI<=yE?k{$+d<{x0!GWrZHCfs_IY7p+@Grpc|ApQGWR?nmSu{9oy?oCQHV*8r(6yy3_~9f#+y}j0mvIEz z`TraVH~yH!fPX#r*S}+C@YnzPvNEaea7Fwv=+qn^KiVTT;UUT*)(&_M5dRx}@sCdz z{hCJ%p9!?zgq5!Ik7o1#b3S8M;$J|I zaSPXr#Sj4Z=re!(mU~VHqgQpFNj-|^^eq95sf2t+K%AM!uV4rZnrGL~qYQBCd-~_S zYIp$((IiU-s4i0F2RQ&Ev~HVsp}a3!1L&8*9vvyU*)Ju5VLjpS1d;`I0rCCuyT&U5 z`sD<+e#%;FzkZ~F`|cwsz5zOJNC7ZVVD`y1K7!K3Ghg}n+T8})3GgWr4sSD8CEt8@ zHbBJ872K{ZkRRU0yi|d{6x!27`J^Qx-rYndn0_i1?9RrIVU9o%6or!xO;e5VPvik2>K=D-GAQ8w4u+Rhz0}v%?`%CsE>sacI)?W%6;EYgbx25x^8E{F7-zgc@p^4TS?ext;O(*6ohLb z;r_Pko?nK_%t`34o~}=8?0_|3@buQ#W`0&?iFefg02)_O3YuoEz`i}??I(Xfc)Hhm za~QxcfLrXzLQmOOut}9f^f>M3dL0!tYcLg&OBT{%Z*ZB3#P6@(wgY@jizGX46TyCd z9wC`=sIMe4*Cz;Ax2bjo(H7OOgXBtR>!MOYDLSn%v!cJ>4M``K9M=qT3{bURA+!`= zdRw)J7vT0^n~}A5FAM@XoT=##~zOTaS%E zo1+{0o2SdD!)s&^m4i9+WYnA6?vDct9_286Y^%?0Kti9|tf$YmT4sLr+O6&T?T|MvPs(gj{pQ2R4FYC@DCezc z3tExY+~*A=JOar}G;T8#R#`u8Z7IHQk9Tc5uoq8DH(6ck-o7rK!Y zU9M4>3UWqOEhdp8Atj;HX!=CSyqVlmi(;;<1ds$G*P9@GAvEoYPT~EXAlIH4`}GVk ztp}g2gr}<9jqStz)JHajV;D&{(`GTuw!$hzVkG5L}jjf4= zParcoV9}V-IZ|(Ztxrt8#2WVSjTG7lwr;Brnpgo(o2U0@uK~}1KFLCF(XGTfePW@i zgO!RZ&4}w!*56g0Sg2iA>R$i_UwNKX>HI||XME`D%LXrEe?;>hO{sOqfYVJ`wB-}v zLk*pU=xIw1W?J*yEYaL><*y)5eH@6%pk`BoE@G%6a&Y}BqzMCBzJYyKHRw-GUev>) zd_!%T_Zai@G)eQ!dNgV()0n-|&z(n4U*h*610c*&XU+EKf%Pb#LEc%)ued&f6IYmwK70C_x(g|i zL|ua1N77fLFLgJF&!AdDM?Sb#P-c#cWvzaB-l>Q!CB){k_rKU+2u&r&M~%H#!ns}Rq@}0ZH32E- zxwNX6!(&nmy65MuXOh;~v?L#6zVLq04MTyHk}=HuFVwY4Eq!WCJf=xHZ=Mt`caxEC zFEdX()HP<0Y-I{%B1ATx*XP`YYhp$q&v5ZB7BQmD9&z8&V(cw)5^8Ux6z+;u>0BfI zBv`$`)nDlL?B*lv_ffS>U zObms5sSqwz0qakO$$P&_<&$BD$5EdZMZ4HqoSxm2b*h|-D&om{`Y~pNd@8Ns=!bN@ zMf>w%RW0w6pu1uU>G=9Vyj#n!w*`a;yQjr%?bWR74}KaSFViFx*SJlh27bmVKCrgs zdp$Y%wxB&5aDdy)K0mb%bDT)yXV ze80!RDd-T_YIf`{kh5h(QMzuAP?=D0>d&qds>!!)dA;*D;&qkhQc45ltw7Y{e3|hh(zY8osF)7T6lb$ z+gdG?&GDgWSNEyxp}ru7lPrOrk$_Umzd*sS0l0mW|n8eT661KbJi8_YR z3y2mZQ;Un(&wjvQU~g;PPg*!PQ&2o8gRDa~f928r1n&W#s5(*r;s-oeSdvrm7x@%G*ZnfWrgp zb;}s^!`~PiJr!dMi}!?6k6vjGpi1XzLLdR`Z?CrV zEu{&@0+lDo%V!sj)PaY$QNNm5cwP(s&1ZocDVne5~EFslBHaBfi64M<$$PGq;vFaihy1DMwmkdU0MRid@4x8t6$ zm;zM$#ie5asq0d%P78l@fSVW04>xX&GRxRedCXd25)k353x8BLq6~J5Q`#r;-BidU z-5m|J-5>Q;whtKlgY*#-mHpi|;O^wJenVvozK6}&*GzjHv>MwYN&vIRV5G8UmksFo zZcZ3Ga^p8rXaB^V>#5N;oNn>~0>@yT`Sva);@urwC4?n@c`>EO<(~tmu!DaoG>5WZh_iM9t^KU!^YdnZs4xi3Sx=!wPGi^%Wo! zZ&m9Z>fW^-#>Br}7@ThV;gdze(-;fSCC{Uc(kO@J%+da9=+C5opNpMZ4tj$;V&CyT zbf47&|1P;h!4);%1O_p%7NU|kKBN9wwZ%_v+;Hh|ICy6>dSjADK@`hbY{%rIPo%P>ro>4q3xqkC5=XnM5yK4$5CoxD(TG?(FurPjL`s;V@fZr26z+WtVL`y2BZ9+Dk2F31mRAb=9LT?QX14 zanY2bm0jW5OIX{}0LG2_Ncp|ya^;d@Q$g1v&{K)H^X)xwTL(^(fIhf%-RvS38YREa zX4P$oJySUzlS0Ry_Sd;Y@RD}MC(nPZkm**Vbx!1a0$&eXD+u}RwqI6x=ewJY)QZ$Z zv~-ix9&w+%Und88IJl6|KUUw7x!s*Kq}P2-LLy-ND6fCL-|T#zRxtQJpARIKWWKg- zG~_knujUGs3IVz~iTAStbAKK#3`}QuD3ubrdwqYOFc$FX{@_O<`E#A|yoxvX@653_ zqp8W5AE)WwCu^Fuzm^m~eEIeVflpt&J)}>|^cS3xNVmLu*}{z3w;(ef+xs-i@cA2| zWyurbWYubx`5z@2gN=6%|GYIlqVVkG$i4c7l+@y{uEp1^z9%`4kNY$(LWus9phCOO zKV2J59#L|$iN-PbkB_&$l;=HM9fA)hCeoagn%Z<74q-yUQ$K8;J$4IjuhXc6>M~+` z(Pe=rZa;Qn!rt>T- zl2&kup~=hoPjPcz_u%&)=^D|h*Uob(Ztw<;B7>$DIZZWTq@tHDZU`EhIj)Gq&kMbm zN9D>vVO}|2g@T*?G0AVY4&9hdu;IUh3qmIbQIJxwQ7&p1OYb1e@!be1e0Qo8XBVP; z<_2%tbxl@Y$rAYP%k9l-6!d#ExoUf;x(>hJKu?qXSl|t&P3uIZEPb6?8GGvOYj(oXUR68F$O*HvfK z{x8(o_s%*mYpOS;#=dNp1avf|x^lee(c!6?Pq=A^L8TgP7O=%&wD8w|uRd$@(HJ@W z?h4njhu!d9&HiV#T{bJ0UZ%-Mc|AOkzeC+ZXaxgz$@tSF&)D<^C-N{_i!5oWn8|p^ zrmXT!HN1RK-}&7-@AS&_Y3e=HSH?ZLDwQH7sb{Td2~y*88Q5S17rApdAZOS)7}EkeBKkd_fg{-C z+G^Y;UyPvOD7m01>kjy~#QxweNWZWCHByeKa|K;ZGnnIwXv)P&p%b0OCD2w@O*@0! zPglR9_RtUo6$zU3HK0xm19!UbX;(*2lg}V}egjxKRPWjglt^bvI`PVxaq{q^fg18UI{S6yLSB=3))`>@~?`hR@?|8n#HFMgfwlq&1qa!%ImONxR| z7j%9E)Ga< z0)UXlAinD9$fMV+ye7#;rL42zuYI*MM^0u!|s|7TN@Ed8S(L zPH`e_JE-Q&1)IWV83hIHN6*Rj+)0-1ghF+LA5^=fo|Su+{ZYm>4V#2(f{~Z%K-FHZ zE3T=^(nY~%S#tV&WckK=_sx+yucB>WXFi%oZRTKK-=nB=a*ux5#I#e@IDkgQ(```f zS`LEVgKLI*bWFcYRkVr6Xc)}RgX%sd)9ZF~JsHRl^PFr>-if#(;9C`M-~!4E%bW!d z4O|v{B?e5$6E>c%w=-jq-)Q|NUvVzgJpdJ~Rg1k@npm$|krgAp$#A@ZfJw@>??f)g)B_BLjH>9jf?)bK{2P zPi*JlrE^RZxydfi1U+BVewpDaQJzcYSaWS;R9s&o+BtV&8@5)Oi}bJyY#w7 zC2;E#MD*(3?W1peUdfF6evOZ)-P>7&6gZ3gxfPcZs{z{t&d*h>rjOIBi$rRcHCui- z6?OUUQ%H~k>AaR6VP+p3toqo`qVv4KG^oW}PvQc`%IO5eSjrbv8TsU_GHd3~F)=~? zc1*_c!MTV2@Iyn|4Y;MB3g3C3arpFJ@?VXkiQ}XK>n6dhzFvdx)~wZ=^f=zE_tSai zNVMeB&9E_`U2A7fn_fnN{|ybANHvuz@K#*wxbez12KcSB?*M&+Uo5a|qgKVhwz4fg z;_P+XH=^+$Q2N|lTuXJBCmRI~-G@|;y_p+@U5^Xe2ZFhnRbV$w`0)?i|MQ^ut8Vx? z9iwHz;_HS@%pz zG0{nQZRNq`qAuBng2g5LnnksX*zWys)P2JRR{6WWl#LP6DV1(im7l*e77kzi z-R@F!4OHYxoYU8>XlhCW)(>;T@-Du|Oopl)_9Nfcv%V!H`>A6)iKh7C$Nqa5Cr2lz zXBLrXl>>ck>Q56So|>jQbHUThGhu7Mt=u~`{z&XV7ski;_@KJiCjW5s)_m0M0woFwtVm<`AD@=6vfL_3sKMV^ubm;%TU)~^ zQ&B}wNAjj8&ra0B86l)T2l-r2V>&(tnVd`a2YnZxCyn-|G_vsz|2^QW)v5bUVHefS zzWu=8E*?2tZXXSC<~mGYzy7*{_G!?tmtnB>Az5BU<4?xT4;pV0=)%nx*NV-`m+-QC z5BN1AgwN?dF9k0_^y+E7>nkn6qk1x9zjX*A4${D5sos@0aotlK12_7#RZ=}(=3E&{ z`;Pgdw6t^;HhLi++7xBJ=*xVce3C=GjZ4PG(&=H1rPpBPAFn~8;-Dt_hWN>gPe}sq zYm19JMJjW$lN)GzsA<4fRqe)*1;JbytduRJe(K>q9z&bkl&1asK0MX)=|=MkSAH>c zCwU=Npn;Wr>F#3kPJoE#`7FXEOeykx?vs@k$HRS=!0cs{Yn!qiAQ{HHq56D!r89s! z9u95GI@!}V#$wDU)-X9S2OdrGz2wv7>;XrW$evxRKlr1ED?_@!k(cYcq}Zw=q-*0Nh5bgg_l#a%LgFVnF zSg$z~jKTU6)450svoc%l$w-gRk|3%AL}}oRD9D1`F}!f1n7H)?v6=U93fP3YDDQ1O z=6<*Oykxp9y6rfWhewy(L)sWxXN7O61r!5k;4TJ6!Gb0;eAYzT3wS2+3h-4Q^+H27 z*MK#9C-6=-%eVLN@Cdzt;ZQW0Fo32{3mfF8PrhVTc6j+52r(3v)Nc{?eC3D0cF+lI zcU|awn#P7otzBn3lkI#tOYY|{?Wz@A04_!HgJPXVRV~qnMyxLo$6a>ndBZ~ z-rNadksNn_;w5!fxUq+L_~PFJ|clO)6R zB5El>zPxe`H)o>BiBZOk1_v66qn>`u4@4Z!GK{xO_*$mRDJHz|WoGzJ&jNdBJ+&E2 z3Y=OHwwbzl=IQ$GUSrKf8tVyvW#NzP4bcjBa~8N5)MI0eR}U)+n%h^xC4IRVB5so- z(@gKLNU~pI{k9RhvKzCRlR<%9ylF|RmPS^6ZlMCzGS9rp7Wrs0)?^Cv0NMxat&bDV zZa%Lex3UnEYtpMi(3dcrWf-|pxRH^*uQmIc{v(`ptFYk79QH|AN_Dp;_C8FL-*)cY zbQ%ZS_$h5|0mp7-iH0b5lA7d4L8QuSm{Lh)xdK)^u%5`7S+f{BiILO~Z? zW?uFOW85c4bluKPSo{Y#l!!Xy&{%rb)7Ch)JQfsG)q2qFKq>3e;7_L;y*=!zX)`99ZvhC~S&9TPzf&aL^hLHZEDgF+TxYvd zozaF)c{Q7X_*5Uu0;oxpz$9e?SH?OoPgZS2>Q{{QW6YXefc~*}ly305IoSITY&vn0 zh8;#>hmZ6MGpDh|$Lut)&gxb`5x_XLWdlAHCL~```fW@oqu3rHT<}Hce7bFa1V{T_kLSIz_Z|&nGlcX%o=sF|+blD}|-t6NeRnTP7vkDxV zCguvEVKmx=V(IlTHk(_j`lktNj$zDu0ShV@1TSr!d^S*}%_P88@-1B9uCJaHysyiTMXH@xR{<j-0io!} zTj}DLk0y$I;pAM3DGq*+_W1T~x_w9d+IMc(nwV=IvmNd^(*EJ*H#Hgykkz%fpVvBk zQV3jB7JD=Ia+kSDGkPY6p6wUJv@tBr#8hRVk+$C**Ur5%+61c?{rF848G;s`kK8D3 zni2_K!lk1tI5V!QmQ-oW?<=-6eUwZJS(v)L7<0RwMfsYl7tam5JIn8n20pqeS;NB- z#l@z_6Ej$3imlvP3Sm0+bVS$L9?}i4s;BmpyXc$!H;?ivk3yyFfJg^&pu7>-<00cv z-O*>RwZvnV#_G}W@|O^EJ`0RVX)XHb%*C}HHP(@5!ipMwZ}|57GGN()tG0qB=M=m< z;`=mtwRN9qEgD;m|Jz&Cb7y+t)&%m!BX?I$^24SV542t7LT|3FM6To)R|l<~(^cMG zQ8QhTJ+F@0-l9nto+#d4qZnCRyS|K=#A4h%7D8ReH0fHcWUs&^h0p1*2;473`sLRB z;9gn38Bhtsyg6*scV~2tKW5Ohp<;3s=f5ZhLQSyHBH1O*`CSnI$fu86TI!(yeMtX$ z^*7(YYXGGogf~BpdJa80tA+2`8OzfU1sp36un9d5lO~J?vb}bYkNi}+5O_&H%`(wW z4W?!sMs?T?TAtrr8hMOvUEj8FscntNfP);$P4IE4-*Ax5x!x+b!^iYWPN5k=4m2(- zU^b-vGSv|tBGj&vzd};$`z#>ZexaKZLjsUlzF5)_mHkzn-;$B4qdSOfr(T-5_p^mG z?-F6e0OWw-838o+l6x8LBfn_DNsH~?>X)%yLZfgt`t`Vqp3%{#vCLC10wyg;+RdmH zu>~rl0YVIBf%|3D?h%%r_$b->iZkljNtqq3G?{OWpl`kkysMcG84n^uL4OabjyH{r zWRN+I+*gu>y?wEe>21&wu3_{h_OxYmma}lsIxBHL4^icOUdH8jOod1IC6jN4yq3U% zd>fc$U(aI;>w6<)^GG! zPv^`JkJ13;43Xuu?pRsD$s?TZUR`z82TP1on-tp8xhsv*NTwnj>FoF}nA6Ti{dXAc zWx&<|;gKUR>p|E2PGz`ov`LV@y?lJ9^9G_(_-X?ClCA;*_*##~M;yB@>c?jpcfN=w zD_35N%Uh|au>tf;cJ{jp)klJhNnVj_42mk-zfYO!sH7pSWd-1)+=SMM{CCopL9%sf zeRIdGWaHban>NHuonlGK!fid2Iylf9w2h_rBq&8@pRh8PNLdwt#vRvZqMs@)5S}0q z3u+Ac1oRa|AMy~2V0b$v}OHoUJESKG_CWTj}grw`Bg^qGO4vFpWieT zlvl?RmqyBG!7*`@D@V{f1jaAYJV`wo$BjpOT|oq%@ae|u<*xJ>X<5do4N>|=-ru94 zQR=TmTRC?jC<6zU;RbI#5PRc$Sgw457(P&b%6~`kO>zQXx8xCmW>$`2o;t~6%Tvx%Cai=8byu+x5uK)eSTx4Rj5-Lu#&I4XjwyHEK>ytf z%@;AIyQh}W*) zgc_9)KI*TE!ITItZ%S6b>4f5q3^@^xSN>6`ThMYifvWu2$1xcv4^g)qR%_^Pl zU3fs*nvQNuvmUA~fYin49}%`|PgXP#*MyN`-R$NAu;72 zJ$UA?LOkE}bWcRBypu^pY*x7pS)i1Au1qHPr!8bnPQfOY!i3W1PR!j=KwQ1LHR{n_ zYvkUyNt`*GVAZF=nQsDFvwAEc;&#~-;uSCYvBi@U_*Y0uOV{Z``!w=bOxgs@zRrZ= z&qwjGVLsWYUC=6#?wMoBXn>k5^iVKx^B3QNAJ%W3T(v%Z;gZx_Xlr<*XeA-w&s1ww zvB2Bx`O{@xoz>xN*IOP2mi$^+(+REtM}sA$xv^)z#E=6Tuz`IT0IF(BAIG6^{cK(> z0!eN=ocdubHZ8Bgj-ifVs@ntKiz?;`innJBlXC{V57DeE;ObA`&W9`HY9eC&pj&LFf`w1yWo&TBqHz_}4KSmL<@A-SYqOo@ zGX|J#;hx!-mCO(BrpSK&1Q%m|uu$Sl|AGu4LKJ6w6N3d#$`sS*dZe4QMf=9Cq+oeW-Ys@((v~0P$WX+zw=4xC7nW9De z7u5n#rK&hMt4pSk&8yQRGJbA?uF!8VpyG80rVP|d$<6^z(R2AH zGkckV6x@vR*<^usYlAR2gd!H_Jw{PlY#ocu zP{b#gMCW%V%V2Gvn;YIie5l#%UsHuHojTk1P04t5oS3QpDmV8MsX$54K=`DV*Wkuo zf9ZlIASQP=P`co_4$YdEe9*3MXfKbv2T=n-M-z-$bKOV7?Z4|KIxXi%Bjk^PE>tap z`!Di#j9Wu*sE}1;YQFskY@vc9loXp}av3Xy)HJ$HJEBM)nAkvb?bM_Yi06 z>M`$RvCx(a>l{n>t{ObIhT;16I703+GuWp_3!fl0Gx82sFJ!si5$!2`EX=pNiFF01 z``;s|t<9dj*zr+Tl9bniiK$cRK`(k8SMXkPFlCJQTFM8u$0N?{6y%Fcc}~h0?-~#r ztXPy~j#-qkS$#!|0E@W?0<-m-l_D0lnIm7I@`~IqM^?@WcIx}5H|0ELhriKK#FvPL zMGGlI_!+7PfKTbx8c5koqMXyzK9SaxoIrEIuS?I%M5TAHW$gszF69BuPvFW=hhz>?H;i)2y}IhcUSr3eb%du7a)Ge#_X)XE%?GS`@+oG(9DJ6-D2hovnf z0tAFmxoM)Gide`OII5!ex=QmDp8&Bj8BYzQlzjFEeU_3BK<>9bb7;m6>qvX{BPKlj z0IoWbkue;G$mIcsYZmg$3xici$3((=8_pwh$g+QKVNO% zZq%ovyc=cP<-+dk=J)6 zk{yp)!6{y}vr-Sc)0DhRvQJa*czO zvVMh&ClG8@(tb||3-3vPzU@z1h5||J$G>Q32Hv75&9}-o;yJ0%0jSuhYFah+*l*dM z)wSM)$|o!8oe_CSziK^t643>v--#=3= z!Y3q9yVs3vXlR_e1`3Qj^8r76YXM{8ix#w{Mte>FmiU(~^QGt&m zHKWdHpJ1J%Repdx{Dul}joK@vAhWFX5z1rdRDwpSfw&7DP5O|eRLRwS!c^X@{k}~j zmKNNWZ=H9)RBTp8OsM0AeyO)b@bceC81;#0w9@C(?YipP?rblb*-d2^xQK!egY_ho zUA>|3vE9H$yBls3s?{8GJP`mNIkVdxM{U>o(sf2#5y$**^IGe#gsj_@lku?$T#)>fM?iNF!mGyU+hD^%rRe zs}BAJMu0DboBQ=upx%2<(c9x4aKR4_wnL5Bj=qqrC6BKXxZMsI)dKsDVr!Yz6@bBX{waiWD(L_UUQy zM0>lgl=(ec6M%qFe;Yi?T})#@wOMsgmYn-;&BuU!HM`$)*iPVk@%0d3xYF_DY2JN@ zgS{ccn)fPs^Qp`lXNL@ngoO1@--l6|;DuXYNjtawdi|rjx!e5`u#rHI z--Oxk+>A-%u$?4j82O_zOkCmv8vM9wrQz__Xsv!{tv(!ArA#jHBc;NnL-*$O);e8- z`gaF?zJIJ=uKQ&fK&V{iRQ39w`rI5Xv5Aayba9ck?+UctTO_JTA%b=ad#H_s=!g zOP!bhUQ*A&3FP0);MD&gzD_^Fc>LTaT`^hrsgQ!AqViv_k8zLQA1DJSW9(j$R9zR~ zO_yMUK#{jPM>-Wys&`Ukd((>2fM{kkmsH``O^LeVpbWxJeG{6a43r>Dw%nq49&41) zfol-{H?l!t8arQMX=$~tn82~6@kZ?5JQP!;PZ0c|9Wq~EA1lWOKKk~aUz5rwGGpJ5OT?vkPPWinG*J=#x|G!=I8cT6CZL4; zUAFu~vCp0UqZT5!P4j+zVenVHKk{)~RBg;Pwbw`81qTYD*s5A1EcfIb*W?dIKuNge zV6w8W3aYLzk6{(}ch>-C1N8f=0b*O@H}RZt^uFyzI9OA=0YgTGzT)+V_P(Bs@+9nR z5}bb_0$0NzEbYc~BL9vGJ|X(o*%=wlcM`8QCj?xlzbtsvH^ZZ4^0(-dp7fiwuX~6% z6bd5hKs`%fS2@F8^aRNVBhR(xvE zF#Jp|hhS}Nr3g5es^Wc~E&NNQm$7lInCLb!d}~y-8)IWu9Ui|uPlWea05EpY4bE^ET?#rSX4A5~tX zF14|nu?ME8(frxB3z>>XYYi9~vL!o(<%4FrzfFDE&9Wn`JJfe~us8kWPhzB9x>y2aN zlp)M%!l{@+iT{I-g-hQiH+9`u-u^8}u*R<+@Xmm8VQII(H2MB{WD&GI^TGL<`l*4M z`~M7~caFd-rep^-SQ&NW*>5c}Nl-VTf=4B&VTDpN1A>oDnH`r=T&1L>Af$L5-wF6k$XYD%5Vm6?Yw_&udQv7^Yro2XVU#hGb8cjr63GX+;) zr<85!bqrOcnp;}5K&aNl>XO&mR&l+x+t8pe=57_mgaw`tZ%zzh8S7yaX5kAwJQA4u zlU{F}HK8n6bMl&3pVyICrWlMN?iidmVWZjs7r|h)c4YD7X5Oc$$?bw-ROkj1IrKCy zJ4jaciTNa!b#tn#%EOn?&vtDpgxp!5-+B2a9v^MC;Gx^HdC#JtGZ6a<_EiGo8ixyw z!bcZW^?MHrVJ=}pafl{7LLbAkDT;DXF$61UvF5o{#C{`aclzxOrqaf$+fN9*`Tr6A zdUx}tAn(JsEO%bRNxG7zIbD zmHYyntU||am4{zwOr5ClF~~(k9>)f3EX?Tq0hvCtI#8+$flbpU$WuxJ8A0{>1bo(q zinU(Ll>=n1D2hrkjad&w%8bwHoWED7bon0I4Z$gH>6WuO4&R5FUN2a~hk6jm@|yF$ zPAskYs|UL>q~I|k=94RM0aU1R z4Vn?Pb{BjMYUpm5{%k|M>`0O8kH_dbk-vtrVk;Lo2#+t|l)>@h^I|>-X_A1+`^YNr zPhFCa8|vy-T)UHzitwFcy5YU_z~Xq$pjc#AeI;QApPAsdHe0LYS@KM=+76JhI^P>s zT39}lcBqRINw=@^IQe~3QGl3>by?u{syDUA=Ys&z9!6$|OXO~ZgFpu`K!4GULF-w( zAo%cWOYPr4TN8E8QwQGdfAjd>T{t??N?+6Ra13PHJbw8D9M(K?q?4tgI!7CFm|m7^ z->l8o{UBazlw7gKL^lN(=&+Kkt|tISTaLX(wf+&UEHDX(t4A^$ToR=5)n;O%hRdR7 zsZKieMw${$TbZI~*FfAMl;)wU8xFo$EU5;VsAe>eSI+vk@Rf6#5oacKpmA}ZsVB#w z?L7L{uZI~Z;+L0_qk)gS9WG)>P&e_!+5y<8+~i{OPU-HMhfxfZGdUluo(Vn%r^#s@ zXc?v!xelPrzM^I5i=mIaR()v3wlIM)C3l!Hh z7Jp}f_%-BZjq`acenI2xv7$+-0AOPP5g7-4v?RZ5b!U;2ZD^aCDwiYe+VJPKsMno3 zL)87g4v@6OOfh1T?#1h1_MV&nC-qaLg~qB+m`1rk6|V0eppf!nDpX}RL)7c{OGo!M z=9i9KHWxa8VL{GX{c~tQfD( z8-uqoUTf?T>rgmgsxe+MSKU5J?Pv|XVRr1d14Kt#JVxm3x9#1vZZKPe)B?3km4 zA-sB~nTHP)7YU<5f5NT7?1_wwBv%v7|2vVq{sPwfHxos%*eo&Sa7TsJ~8=u3hCt=nS1Bsl+K@)!y#Cg`p$THqh}HD zu5V-EW-w)F;c3Ju-vXx%$`EJT7Xi-|KE$7*(e_J$g#d~W8V%ldmaYcu}K*~bPEQa`olKxWgkoG7(rJjngZThYp_9=&JZ z*h-w=xy-kOy;7JcGx@cb@qiL_s{bCeUzazfaY9Q$1~KtY{g9L35#e^VFq+ni$c57k zZENC_1Kf}TP-3>S_cN(W;Ev^+`k_fFX3glZY2q>MdY;KQC*4Vp?juNTVQCiTkD~L) z_er-rJj(Tg-_(c3Zn6#>=z{(=qqI&ktW4z!{@AI<(-|^%2+=2(Yn(fBigES$G!N{C zTuBqBgq5e@V~}fOL+2bM7qnaB2!&Rt~7BGU0Th8B74B6yw7jEm@bARHuGu&CT}tD;CZw1));v zFuM(ng!q^E4Id_)+g)xEKId=Gue$Sf=IVjd*0kcP<3?nYV9A#x>%JG{4Dh@;3zyA| zA-0g}N3U*fW}oCQ8y6?_<#ry?jQx@Mun=X({9BW*!!M8SPg(BD4>d1?sbE%dQSA%M zDQ{tb8|p^HzE}jN-sMkH>vtZQjC&7l93>Yf6rYqW1Bt*TYQc)#F;Z-|YU8GB<27X= z+M|`?KKrLORN&TH>sB4|b{G?}Nsrsw#=SwYn%9h*X)1wky-B1Bxm@K29V4uxbH%UNVZFf6Eh zt@cMq@agaK^cUEhX3qCHFid*g<+^r}$W5@Z&iMq^lU|URcW#SJcv0;{^r33` zp=Ar)mjs>;e`=0jEwT+T0ciwzbt@G(fkyuguy<9tQez(BbS{Qxg3u6VZG|*jz2uC; zCk!n@nCQ|rQDAq!)$#)%5%Pz7)~XDz8T*FB*}nHo!z|~i_S9ydR}l&3@tTg5sf)Qr z!olGeWJ}2+BC-f?mz~I5`**q8?APp40msBvm z@bbc--)N2dtKYm~-18R}6CI5!LvZlrTD)a}Z0j9*x&gL-p)0Ku|4diTyY=tZ8N7$E z>r@tOPR4yooy<`A0u4MLojBvL4SpV>!EX0qLU$4kj_h`VGdsf!We-H7`O|YX=9!mA zc;XA4KN~}YZNBiT1nOFv*z30+FGTDky9}|Ocaxsoq>_c~>G#;)Y-Uw%p8c65{djq_ z7Q~2v6@`?0&iCEoc7g-X7ZCwFVYA=Q8F|3QFfRR5;gx0_n*;+Z&r_^1l%oWpZ)a9+ z7YPyS_1jX;Tr-ioE4%ZWwQg~g6A={_L8=5qxJAI4n2J2+WTPm z=XBeY9pF)}m8|hF+7Q>rVcZ+(!+UwKfI6YCO*aC!+$``biSGPLw&(kWI9v5_8AS47 zu?@9Ki{bryyp@QB5peP*H|B$JBvrzZ&ql`(RN~dvyr0)5Ua0c-+{csw!@*;RoIlLY z+Es77Ek*@U0V-Utd^-qonaSoL>jCTE3ZgO=xHU+k%s5y56lC^M3L6zS23^0x1jPW| z=!UAvj3Hcz&v#Z+Q=1IXj#c>6?G>D_LjY%=uRdOozAd6)YBJ_7T4vO~FLN6G z7VLcjfjsITK&C$yMGi1WOm7kK1bE$X%!hr5XYZeZ&|Q3u^3?umpN^I4Uh+`Ca{t1_ zhZ=jwG%%aqzUTXU>d@B6V^JZ{bYq}r!=H@hHFo_WZE3q; zFS8Qk0ld#P%R|(*qN$nu@{%8l`_%WJgQS)%o$vOTD1Y>+& z5KmftzIb8s&H&aGMX0iRZ@;fvQfPcBM3ipKX(GKe8ATpZsnb{L#_e@hcNWB=+(Av( z!Id5eES2oR)7$@I{?banaVRb{_iQ34FAx*`Q7m%AKrzjNrjCV5U;T9%%$S>-TZ-T; zH)(XTs!7CJJdedt@+Qc7X%NfuRjj7*aC%CDgtf~-8HuPV^0(EZz(Hd` z+-*RA{G9AVP*by%dA+V1)WQ(FdZ>kGfF>~C`mzK}p$J^6e$bJ!)?$n5t^{yi>!8NQ zXLF`&>C@|}AjG|iK~rll!NELB5-{5(p^9KG#gcR;vGiiBkDqI4rxM0+I2p+U1cscLkBWLF# zv(JxSVQJG-JmHRVeyiE@z{wB$wrk?=#+ZDh`rl=2fh2(Gs^8@Rz-}Q@$*ro~e$p;G zKpUUBSwxDF2{~6LnVI~^E}Xi!8nW?eQPx&O%uqhHW( zkiQ(HV(zKjI$ZBlDth+D0S%kwY!bB@UUkp2_nIW-B{H?iqWDv{gS8W`ArL`A9V6>s z%Y^2viEUa|xr{lXe{S+fKTQ{zX6(7vVPQY@3Cp3{Yk_=9rFzNIBVNSu#?FIu!Ty1xGY+VP zgamIWN6zE92OybT22kN$san$|R*jf=%qB-v{yUB5UO4=1)V=d>wKG?ifi=o;vYlf2H{~BgUXz;+_C5b?!#(4jEGMZLEAgD4P zJ9f-&=4bq8UEyV-`8rvan$7{hxK~fwqR!ia6bk7r3Fba0$rlZ2K|7A7b+?+akG7{p zdy-|emR>dQfhD)jCfdpG;ppeV2fjXdKTyivysk%) zWDT8-WmHsY)V#Lqr!_9XEK#f`g1%`5+qeg&1z3lWCUP@PDpQyh>LE90w{C6ke~lHU z@}lxRk~Dx}eG9qe4o9=yNsDEPl1v58q~;*{Pr4t4#8a3b|CPIl2{O+lm;o-d-zfFjYZ71UT>$!c|1)w zWfu_lzRXCEpb+EL-}xLqEO>QGgbpx?j_B6ael20Gl5`)Dfgqdox9 zUuA#08zO=V zAU2}oSM^&zfum#n(2qm@9;gR8Lo3gncx}bwX$QxFp)aJs7sM&n$NnNE4;EjVdj@a&akaX~rU;MQ#6S5!DaR z_da{v*?tIjL-XHtH`(wtqjMntRuFry;5T6|1-s({$UPbhYU7a}L z1cc5_6zTbf0EmWDz_7`QIydpM-QRdz=tSJDLr~st#HFTPKkfUUApIA&dM~zo3w4tW zHPSeLB=7F;oRVre$y#{(GfC6a^SFd!HR8inbDbO_>pwFh*a$53 z>DaC6Eh+k!aA(V~qmF9$w#J#|S6Lzr2g$|OXBu!7jTydCto3T$H593+vRkp>B{T^{kgF8W$~} zzL(*gW(%q%yvII@a0VueJS?TQP=plvEwbx^e05huf>HZ_y2=D}RemBE56N|PbuE3s zBDQ~c*nzHrxeE#EF^WMV7EHma&tf?%Ip`hC3MP<-||#X>p}~?XTphbOwXpWc5Ew3>EdNOQmgK z9u)`0B2evw^@Ms(hOes53;)8a!RqbO2*b zx+g83=Hw;5JE`)6NHs36FVV9-BZiE-4y=YcJ0~^}MyJlS9F7D{s1%+(XaY+to6cFW z9Jc@LHTKk3`e^;MKY$J(z)$ii7XV|u=~p3|iSo;dNa&0I?=S#$LjxeiRRbV$2>4#DXpVyKO1fxeWO2`V_6AnE++LZz#;ae< zkE%Snd5ztK^%QO3#C9z{t=yT3uDSIHu7OokUN{uAZX_NPBldO!PgO<=R7td!r7wE9C#rmu3V@RN!- zWP{5827o-MMoriFtGW+$GR^Lw%0@ZZ%8@y7hwpRD>8TeTHY=vj(L+N+1Fib}=?`mj znS}43u|$84H<{4P>}ldi5r3sG-h-uZ&8H`U0I>LK{2Poca%|wZ@~2YoH_l`?rcK5abtX+{PsN3tpB~gVU7$CGyj`<3 zum2k_2zvA0|D!kEWZ{h`(>A=TX)BZy$Hor%(ck%6a3PbQA;h&d`kdT#^g5$)?j68t zSozNjn*P*p-f+A8nmVcyo&Xzmq{%tRDFpijE?Xkc%{JQ2_T8fprbF(8(^ko!B4g*w z=aaNWvX1AwZiSq#9HVBJL5IN-d9cO(RN>GV*8IBn?)RTIUZ0#mfVGz`vodMoe|RWQ z`cPK@(dB;^^=BX^%Rk91O|IVB(AXOD!KI#YyHER#3IUJ`Je{ zEGB!5cXIBW4zk@tc_&K$n}}CM+%w$2;p7aEO)zM^Ut%6#r%p!(>#^A3Pa9Rc08H)# zCf3Y2)1LbXu#B^4(=*eWeoe-5g|(o}kaeNP|5tC<8VKdKhPA6*L_4<>3fsM++)5OZ z$jGHsVn-3>PK<7cK}^vFdv-JBK4@H0BbON>biq#cJ`tiRX(UYsqjJj(_Pa(*=lnS5 z&pG@1XTF*5vaI#4^*-C zhnXqb&LuZ{&9IF8g+?R=yb{lj1Yv%~m{^gXqqastLz7+#KJQ$9Ks7K*5d$D~_$^0= z)&ym?WgiN-pCDn~c#rDFm=br(0l|LdKKgJKjJth9-%VKZXsO=`;ws1MB+|<9KR1iq z_ItX(bSjEfI#Y9=J~^OCF07096;Igx38D@oY=ZQU;>LyUQCz%?TQG%(y&cRDSzLBm zc>3ZLL;=n=PwmhEjTxjkH?*#IFiXd+`j293S7KZc@!_SVWn7i1%6-dv6=8h077WUeH{Z2A zq-zddkH{V*zOMk);pk^*_v0`vjRkyV$&IAw?u-tpwsphYeZ8b~Do)b*!tymh_TZ1= z9!(AQINQqB5X(w(wJ8otzUb&NAuf9VuABST{w1ap(GX-oUR$y6eqm7!gZhr47T#KO z!$%%lUBYnbP+_OIfn*my<*7>0@a7bij(yBK02p$jYoWb#>b4bXxbTq{D{5Pf8&H7m zvJHLpX13F;T|9Oaxm5$&yFEMfQ@mNFDJE-?DD5&qi6^C>!LS^M{UTCPeMC=E5hA&U z^^@^I53Apo=G7%7nof5T*F_2f`869?wii_tTNoWv6F5C|Vuochx-F8srF(Op-82YW zsLgU?P*W-OTAUssBuC6E*r^|K0r-vg{d4r^gBsZgQHHU_h_gh{fr4e2Dt3vjtCDPS zH2BedJE9m!k%ldnaXa&nY9RIis9XN4iRF-3g(9vL4r)e&v-vgp0SymyHkLUkpf;mY zsF#FdWyh?fTUb)9LSpWsLcYQRzMCIyDN29a^ z*#o_e=GOPrtX$TR#qQ(a#uA4dpM5Eh{1On_-y1YbD4o5q8AV6H{DF`DlZnay;547M z-h79zkX4v&4y}5ZIqj4Fbuj47>CE{z6%rzT$C%>Z6{$l7hws067^?{w73fQU@9A$X zJy~kXEm=L>UK^Zwj$FAhmDHUUA{ckGuXL*+si^Cy3%&b2biJLqyr*TnG39Y#!Hdnx zUEgRl}%~iY+LxErr9TnqC&wkN*hfS2-|f8B_J3_FzIuE6@VWd0AJGVJ7Vt7#LW7`$+8@pb}No%A&jh zN+=&q4qOLneKH^;sV`m}L{=l^v@hDOmA#k-NTC~ua}NQ8TX>3FSJxQRyL5xzY~%Yn z9ew!2xEOdqKUSy|j6R|8VpwdAm?7>2RO$QMG(*7$>ihp)O$E#f=g5e4fxr&?hO)A< zihWjSTmwVEAjk5mZdh2@DM+O00mZzOQSsW`Gaw5Kk< zb8s(E&2=Qz?flCXA`YfM?oEI_-Cx$W!=89r4IA9-4soZ#KsfhbG9XcW>Lx1MI3DsHs^r)mSuOyf>NZSN1Dcm|k1gf&Rli-c=W zHi6wks{e{WW)VkT@z8*~6rI^_(|{%XTuau<;0E9+WB5qR~8BeUUfC6**Nip*JR94(ma%Vo0rWkzF+g9Wq2lB_5$k2llaMbR z_BL)m_TsiPJ*^i+e5He$lU;y?4^C5SCVi`rR%G@t8!z;${(~4DXwd7o3a18q_Ak8p7B&k zL?a4jiFeyVg%PI*kuBM~=mS3M#7qtFgp~(xA98PUn;5U?_FE_14noGNQ-dMDlzI(r zx%uTwj;Y2+rNBzjtFg>idPBy{(WIMGa}Ni8WH{i(@7G!U_OaK*FcTkDO<2-Hkn;qf z5HBu|#Iv28k-=9TJU;%zz)7A1(ur$hUBWO~(~##a0UQ$=+hbx94|{Fj+^ZP=Lo8#R z=1C?9NB|DuFG(-KMo$uo^EZs@hd9u@P;*sLizM7gMYPWz9FmAeDf&JH=wVyL_en(O z#l{-~EE_`U`MOBts$rJ6_EW7r{WpN(2k=7ZmsStNKcK$LulZ@)a1>u$I(vDtEDW#x z#eIE!kFx8Q*aj$}9R93gd@g+$d_p9R@fOc}B2Wlv>w_iLDT=4MeFm1&vz<2#6jyO2 z{YUs*%2IZ#^ND|^!P*=`PABvJ<6Vw1aSG`_CrFqO2=m}Qc=UjeP}I6yO?4iUw-~#` z*vESN`|pw8!34~l>bWN(BBGz*33WfHsR0f9@W)HQPFhLOvb?g&y`%mHxIj7I3!!j5 z2lN$gB@)D*OKgMek97n3|HUuMuivenZ!n>e!vJk;kISt+15*Fj>d5tF0Fy3b>)EU? zi+d`fDh{Yh*`|>5Ptiz;bliuclmER#NE{L`#nh0QyuBz9U~y?n@BC{hs0iXwR7M;c z+$~E*Ox%x-4|VlHC5$frxpCQZ1|!9@m|O ztGmC`=mjjEmW6e&L`#qVS@M2gY}j+`&nG{~yDnES8Ndl5EmE7gkIhlh*y|2O!_9l$fuIrS_rd1~;`J(tn_I7?yqEzb z0IDD+GrUkI!NlRvghGL$?E&T;s8JB96YSb)KgdR)4?`-Ys`zB$)4PC>^On}b%4*2( zsa4rb1r-jGA%)?3?m?6(X{$!LH)z2mm`C{}0#@|3@V7BM**oqaqoNPk0{NSG#knFl zYb#Lfxl$VkIYpox8n`Bxo;8^uYq0KQE$Ahn=Qkv*VPn<+9rX{Qk?dywL+%#Bs+TNY zX<))M!`cM*5SopyDX3?!F7;_ubMnnmi6^diVw4b6kn`AYN-z`opc0LY>SBbR*`;UI zT1hG)UzLuO&*qJbU^>9>s!awjB-hRUoetQ~45kxxBv4D7^@KIsVDr1gpu zl6fve$r0z}g5&{~q3ff!Z4|E1T)TnB}UnvC8J)G;{z4D?dHBnlK6g z>#Jkjh%=J_ZXXYYc-RT18)rWnGq)DVCnPX63y|GS+;$RSOPTG55Z=V6c4MV3YS`{*X>0Rr!X!0gk-6@-qj}9LR2G>HIDDGj zr|ca5&K$E};Ft8wZb5?R)W5s+$o(;f-yA1UFt*IzL)at7K*6$$c%`7IXbZUCl{tEX zZyx!sx9k;Y;v33V37s4*=aM?nRhu4=1V1Gk&#m!4)8zIvja9wQ%Q*4m+L2^ZH?G%Y zG#nG+4J?n$=b8&V>MuzTNzOet2-T3`iUC+6nxMrA;vgaf|0M{IUNO*Ez5-Sg;WONB zB=`uOGy8&{leu7KeT#2*K6f#)-gMm^!7~X7o#Ttc=S~Zm*$Tub=)<-}fYJ+&|9U!q zZvJI2yy`O**_F38f2b+<{g%Af^MyB9eNvzi1s8?&f2;dq`9@Mfa7 Vn=i#rf3QIhVZF=Ff@*&B{9oD0mgoQg literal 0 HcmV?d00001 diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index 0b33affc76..e89fb99750 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -205,3 +205,80 @@ Disadvantages: How to set *mem_mode* --------------------- When the nodes in the network are converted to HLS layers, the *mem_mode* can be passed. More detailed information about the transformations that prepare the network and the transformation that performs the conversion to HLS layers can be found in chapter :ref:`nw_prep`. The *mem_mode* is passed as argument. Note that if no argument is passed, the default is *const*. + +RTL ConvolutionInputGenerator +============================= + +FINN implements convolution operations by pairing a ConvolutionInputGenerator (or "sliding window generator (SWG)") with an MVAU or VVAU (for depthwise convolution). +This RTL version is an alternative to the original `HLS implementation `_ and aims to improve on it in the following ways: + +* Support a wider range of hyperparameters without the fragmentation into 16+ separate HLS functions +* Support additional degrees of parallelism (i.e., across the output window or multiple input samples) that are difficult to implement in HLS +* Support additional features, such as dynamic feature map sizing +* Improve resource efficiency + +The component is implemented by generating (System-)Verilog code for each individual instance, realized via the template + replacement dictionary mechanism found in other FINN components. +Despite the HDL implementation, the component is managed by its own HLSCustomOp (!) named "ConvolutionInputGenerator_rtl". Naturally, HLS simulation & synthesis are not supported. + +The RTL SWG is currently disabled by default and can be enabled either in the corresponding HLS conversion transformation (:py:mod:`convert_to_hls_layers.InferConvInpGen(use_rtl_variant = True)`) or in the build configuration (:py:mod:`DataflowBuildConfig.force_rtl_conv_inp_gen = True`). + +Implementation styles +--------------------- +Depending on the amount of parallelism requested, one of two implementation styles is selected. The following table defines folding parameters (marked in bold text) and supported configurations. + +.. list-table:: Parallelism configurations + + * - **SIMD** + - **parallel_window** + - **M** + - MMV_in + - MMV_out + - Style + - Notes + * - < C + - 0 + - 1 + - 1 + - 1 + - default + - depthwise-aware + * - C + - 0 + - 1 + - 1 + - 1 + - default + - depthwise-agnostic + * - C + - 1 + - 1 + - 1 + - K + - parallel + - depthwise-agnostic + * - C + - 1 + - M + - M + - M*K + - parallel + - Currently unsupported + +(With C = #Channels, MMV_in = input samples (or "pixels") per cycle, MMV_out = output samples (or "pixels") per cycle, K = kernel_width * kernel_height.) + +The following diagram shows the operating principle of both styles, the "parallel" variant is pictured for a 2x2 kernel without dilation. + +.. image:: img/rtl_swg_impl_styles.png + :align: center + +The main difference lies in the buffer structure. If the output width is equal to the input width ("default mode"), an addressable circular buffer is used, which can be implemented either in LUTRAM, BRAM, or URAM resources. If parallel access to multiple window elements is required ("parallel mode"), the SWG generates a fixed structure of registers and line buffers to avoid memory port limitations and exploding multiplexing logic, while still featuring LUT-saving BRAM/URAM implementation for the line buffers. + +The "default" style also supports a dynamic mode, which provides an interface to change feature map dimensions, stride, or dilation at run-time. See `this pull request `_ description for more information. + +Folding +------- +The RTL SWG is supported by the basic automatic folding algorithm in FINN (:py:mod:`SetFolding()`). Consider the following implications: + +**MVAU:** Although it is recommended to unfold SIMD first, SIMD and PE can be set independently. Full (and balanced) parallelism is achieved by using the SWG in parallel window mode and setting MVAU SIMD and PE to their maximum values (SIMD = MW = C_in * K, PE = MH = C_out). + +**VVAU:** While the VVAU HLS component supports SIMD unfolding independently from PE, the RTL SWG requires full unfolding across the channel dimension (SIMD of the SWG = PE of the VVAU) before enabling window-parallelism. Unlike the MVAU, the VVAU can't accept datawidth-converted input from a fully-parallel SWG in this case due to the depthwise data layout. As a result, the VVAU should be unfolded by PE first (up to PE = C), followed by SIMD (up to SIMD = K). diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index eae9ffd6bd..10eb604a6b 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -188,6 +188,7 @@ def get_outstream_width(self, ind=0): return self.get_instream_width() def get_number_input_values(self): + """Function to get the number of expected input values.""" folded_ishape = self.get_folded_input_shape() num_input_elems = np.prod(folded_ishape[:-1]) return num_input_elems @@ -198,6 +199,7 @@ def get_number_output_values(self): return num_output_elems def get_1d_conv_attrs_normalized(self): + """Returns normalized spatial attributes, where H=1 for the 1D case.""" # normalize FM dimensions so that: # [H, W] = [Y, X] = [1, D] or [D, 1] are always mapped to [1, D]. # The dummy ('1') dimension is the Y-dimension. @@ -218,6 +220,8 @@ def get_1d_conv_attrs_normalized(self): return (ifm_ch, ifm_dim, ofm_dim, k, stride, dilation) def get_buffer_depth(self): + """Returns total depth of the internal buffer, depending on + implementation style.""" ifm_ch = self.get_nodeattr("IFMChannels") k = self.get_nodeattr("ConvKernelDim") ifm_dim = self.get_nodeattr("IFMDim") @@ -488,8 +492,8 @@ def execute_node(self, context, graph): shape doesn't match expected shape (1, ofm_dim_h, ofm_dim_w, k_h*k_w*ifm_ch).""" def prepare_codegen_default(self): - # Default implementation style for MMV_out = 1: addressable cyclic buffer - # Computing incremental addressing scheme directly.. + """Fills code generation dict for the default implementation style by computing + the incremental addressing scheme for the circular buffer.""" if self.get_nodeattr("dynamic_mode"): template_select = "/finn-rtllib/swg/swg_template_default_dynamic.sv" else: @@ -671,8 +675,10 @@ def prepare_codegen_default(self): return template_path, code_gen_dict def prepare_codegen_parallel(self): - # Parallel implementation style for MMV_out = K: - # mix of shift-registers (for parallel read) and line buffers (BRAM/URAM/LUT) + """Fills code generation dict for the parallel implementation style by computing + the loop controller configuration and partitioning the fixed buffer into + shift-registers (for parallel read access) and line buffers (for efficient + LUTRAM/BRAM/URAM implementation).""" template_path = ( os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" ) @@ -936,6 +942,7 @@ def prepare_codegen_parallel(self): return template_path, code_gen_dict def select_impl_style(self): + """Selects implementation style based on folding configuration.""" simd = self.get_nodeattr("SIMD") M = self.get_nodeattr("M") ifm_ch = self.get_nodeattr("IFMChannels") @@ -984,6 +991,8 @@ def select_impl_style(self): return impl_style def generate_hdl(self): + """Generates HDL code and wrapper for the IP, depending on required + implementation style.""" impl_style = self.select_impl_style() # prepare code generation by filling out dictionaries @@ -1186,44 +1195,53 @@ def get_dynamic_config(self, ifm_dim=None, stride=None, dilation=None): return config def code_generation_ipgen(self, model, fpgapart, clk): - """Normally: Generates C++ code and tcl script for IP generation. - Here: Generates (System-)Verilog code for IP generation.""" + """Generates (System-)Verilog code for IP generation (instead of HLS code).""" self.generate_hdl() def ipgen_singlenode_code(self): - """Normally: Builds the bash script for IP generation.""" + """Not implemented (RTL component).""" pass def code_generation_cppsim(self, model): - """Normally: Generates C++ code for simulation (cppsim).""" + """Not implemented (RTL component).""" pass def compile_singlenode_code(self): + """Not implemented (RTL component).""" pass def global_includes(self): + """Not implemented (RTL component).""" pass def defines(self, var): + """Not implemented (RTL component).""" pass def read_npy_data(self): + """Not implemented (RTL component).""" pass def strm_decl(self): + """Not implemented (RTL component).""" pass def docompute(self): + """Not implemented (RTL component).""" pass def dataoutstrm(self): + """Not implemented (RTL component).""" pass def save_as_npy(self): + """Not implemented (RTL component).""" pass def blackboxfunction(self): + """Not implemented (RTL component).""" pass def pragmas(self): + """Not implemented (RTL component).""" pass diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 525af7ea92..d1f6eb4608 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -140,19 +140,7 @@ def apply(self, model): k_h > 1 and k_w == 1 and ifm_dim_w == 1 ) - # Ensure that RTL variant is not inserted for unsupported configuration - is_rtl_variant_compatible = True - if is_kernel_pointwise: - is_rtl_variant_compatible = False - if self.use_rtl_variant: - warnings.warn( - """%s : RTL ConvInpGen requested for unsupported - configuration. Falling back to HLS implementation.""" - % n.name - ) - - if self.use_rtl_variant and is_rtl_variant_compatible: - + if self.use_rtl_variant: ConvInpGen_node = helper.make_node( "ConvolutionInputGenerator_rtl", [ConvInpGen_input], From e17ef0cd8f2115abebc3a8dbca072ced90a9a888 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 30 Jan 2023 17:34:37 +0100 Subject: [PATCH 330/628] [Test] use stitched-ip rtlsim for DWC test --- tests/fpgadataflow/test_fpgadataflow_dwc.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index bcf2a1fe3d..db756c6f7e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -35,9 +35,9 @@ from qonnx.util.basic import gen_finn_dt_tensor import finn.core.onnx_exec as oxe +from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode @@ -87,7 +87,8 @@ def prepare_inputs(input_tensor, dt): @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype): - + test_fpga_part = "xc7z020clg400-1" + target_clk_ns = 10.0 # generate input data x = gen_finn_dt_tensor(finn_dtype, Shape) input_dict = prepare_inputs(x, finn_dtype) @@ -96,9 +97,11 @@ def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype): model = model.transform(SetExecMode("rtlsim")) model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP("xc7z020clg400-1", 5)) + model = model.transform(PrepareIP(test_fpga_part, 5)) model = model.transform(HLSSynthIP()) - model = model.transform(PrepareRTLSim()) + model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) + model.set_metadata_prop("exec_mode", "rtlsim") + y = oxe.execute_onnx(model, input_dict)["outp"] assert ( From 8652bfb235370388fb323606ef67dfaaf8e615c3 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 30 Jan 2023 23:55:27 +0100 Subject: [PATCH 331/628] [DWC] avoid running HLS for impl_style=vivado --- .../streamingdatawidthconverter_batch.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index a3aa9d570d..33ba6450f0 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -466,3 +466,28 @@ def lut_estimation(self): cset_luts += outw return int(cnt_luts + cset_luts) + + def prepare_rtlsim(self): + assert self.get_nodeattr("impl_style") != "vivado", ( + "StreamingDataWidthConverter impl_style " + "cannot be vivado for rtlsim. Only impl_style=rtl supported." + ) + super().prepare_rtlsim() + + def code_generation_ipgen(self, model, fpgapart, clk): + # no codegen required for impl_style=vivado since + # that uses premade, configurable AXIS IP + if self.get_nodeattr("impl_style") == "hls": + super().code_generation_ipgen(model, fpgapart, clk) + + def ipgen_singlenode_code(self): + # no IP generation required for impl_style=vivado since + # that uses premade, configurable AXIS IP + if self.get_nodeattr("impl_style") == "hls": + super().ipgen_singlenode_code() + else: + code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + # set ipgen_path and ip_path so that HLSSynthIP + # and CreatedStitchedIP transformations do not complain + self.set_nodeattr("ipgen_path", code_gen_dir) + self.set_nodeattr("ip_path", code_gen_dir) From d95e8f3c694a41b33f71c9a3c0fee18dc49746ec Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Mon, 30 Jan 2023 23:56:18 +0100 Subject: [PATCH 332/628] [Test] temporary DWC testcase for nondivisible widths --- tests/fpgadataflow/test_fpgadataflow_dwc.py | 23 ++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index db756c6f7e..8fe1f35cb3 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -37,8 +37,8 @@ import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP +from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): @@ -46,6 +46,10 @@ def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, Shape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, Shape) + max_w = max(OUTWidth, INWidth) + min_w = min(OUTWidth, INWidth) + impl_style = "hls" if max_w % min_w == 0 else "vivado" + DWC_node = helper.make_node( "StreamingDataWidthConverter_Batch", ["inp"], @@ -56,6 +60,7 @@ def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): inWidth=INWidth, outWidth=OUTWidth, dataType=str(finn_dtype.name), + impl_style=impl_style, ) graph = helper.make_graph( @@ -76,13 +81,17 @@ def prepare_inputs(input_tensor, dt): # shape -@pytest.mark.parametrize("Shape", [[1, 4], [1, 2, 8]]) +# @pytest.mark.parametrize("Shape", [[1, 4], [1, 2, 8]]) +@pytest.mark.parametrize("Shape", [[1, 24]]) # inWidth -@pytest.mark.parametrize("INWidth", [2, 4]) +# @pytest.mark.parametrize("INWidth", [2, 4]) +@pytest.mark.parametrize("INWidth", [6]) # outWidth -@pytest.mark.parametrize("OUTWidth", [2, 4]) +# @pytest.mark.parametrize("OUTWidth", [2, 4]) +@pytest.mark.parametrize("OUTWidth", [4]) # finn_dtype -@pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"], DataType["INT2"]]) +# @pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"], DataType["INT2"]]) +@pytest.mark.parametrize("finn_dtype", [DataType["INT2"]]) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado @@ -94,13 +103,13 @@ def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype): input_dict = prepare_inputs(x, finn_dtype) model = make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype) - - model = model.transform(SetExecMode("rtlsim")) + model = model.transform(InsertFIFO(create_shallow_fifos=True)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, 5)) model = model.transform(HLSSynthIP()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.set_metadata_prop("exec_mode", "rtlsim") + model.set_metadata_prop("rtlsim_trace", "dwc.vcd") y = oxe.execute_onnx(model, input_dict)["outp"] From 47aeb173d73a904c1b3deb27d7fed4538f36e9a2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 08:41:54 +0000 Subject: [PATCH 333/628] [Tests] Modify copyright header and delete print statements --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 979dcbfab8..1e64880ea5 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, Xilinx +# Copyright (c) 2022, Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -197,9 +197,6 @@ def test_fpgadataflow_conv_dynamic(): getCustomOp(swg_node).set_nodeattr("dynamic_mode", 1) getCustomOp(swg_node).set_nodeattr("inFIFODepths", [16]) getCustomOp(swg_node).set_nodeattr("outFIFODepths", [16]) - print("SWG initial config:") - idim = getCustomOp(swg_node).get_nodeattr("IFMDim") - print(getCustomOp(swg_node).get_dynamic_config(idim)) model = model.transform(InsertFIFO()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) From 9c800366a6f697194f59ed0576c85df9308da700 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 08:58:27 +0000 Subject: [PATCH 334/628] [Tests] Add Jenkins marker to new tests --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 1e64880ea5..cd20b305a1 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -156,6 +156,7 @@ def write_swg_config(sim): @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow def test_fpgadataflow_conv_dynamic(): idims = [32, 16] ifm = 4 @@ -361,6 +362,7 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("m", [1]) @pytest.mark.slow @pytest.mark.vivado +@pytest.mark.fpgadataflow def test_fpgadataflow_slidingwindow_rtl_dynamic( idt, k, ifm_dim_series, ifm_ch, stride, dilation, dw, simd, m, parallel_window ): From 7faf316b42e382c8490459fdbddd64aafdb65851 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 31 Jan 2023 09:34:04 +0000 Subject: [PATCH 335/628] [DWC] handle indivisible widths via LCM-sized intermediate stream InsertDWC now uses hls mode, and vivado mode is only used for 8-bit divisible stream widths --- .../streamingdatawidthconverter_batch.py | 79 ++++++++++++++----- .../transformation/fpgadataflow/insert_dwc.py | 9 ++- 2 files changed, 64 insertions(+), 24 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 33ba6450f0..940b4f4ab0 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -78,24 +78,33 @@ def get_normal_output_shape(self, ind=0): def check_divisible_iowidths(self): impl_style = self.get_nodeattr("impl_style") - if impl_style == "hls": - # when using impl_style = hls must have the following - # if inWidth > outWidth: inWidth % outWidth = 0 - # if inWidth < outWidth: outWidth % inWidth = 0 - iwidth = self.get_nodeattr("inWidth") - owidth = self.get_nodeattr("outWidth") - if iwidth > owidth: - assert ( - iwidth % owidth == 0 - ), """DWC InWidth is bigger than OutWidth and is not divisible by it. - Please adjust PE and SIMD values so that InWidth % OutWidth = 0 - or alternatively use impl_style = vivado""" - else: - assert ( - owidth % iwidth == 0 - ), """DWC OutWidth is bigger than InWidth and is not divisible by it. - Please adjust PE and SIMD values so that OutWidth % InWidth = 0 - or alternatively use impl_style = vivado""" + iwidth = self.get_nodeattr("inWidth") + owidth = self.get_nodeattr("outWidth") + if impl_style == "vivado": + # the AXIS IP we use in vivado mode only supports + # stream widths that are divisible by 8 + iwidth_d8 = iwidth % 8 == 0 + owidth_d8 = owidth % 8 == 0 + assert ( + iwidth_d8 and owidth_d8 + ), """DWC impl_style=vivado requires + stream widths that are divisible by 8: (%d, %d)""" % ( + iwidth, + owidth, + ) + + def get_iowidth_lcm(self): + iwidth = self.get_nodeattr("inWidth") + owidth = self.get_nodeattr("outWidth") + return int(np.lcm(iwidth, owidth)) + + def needs_lcm(self): + iwidth = self.get_nodeattr("inWidth") + owidth = self.get_nodeattr("outWidth") + maxwidth = max(iwidth, owidth) + minwidth = min(iwidth, owidth) + impl_style = self.get_nodeattr("impl_style") + return (impl_style == "hls") and (maxwidth % minwidth != 0) def get_folded_input_shape(self, ind=0): self.check_divisible_iowidths() @@ -202,6 +211,16 @@ def defines(self, var): "#define NumInWords %d " % numInWords, "#define numReps %d" % numReps, ] + if self.needs_lcm(): + lcmWidth = self.get_iowidth_lcm() + assert ( + numInWords % (lcmWidth / inWidth) == 0 + ), "Error in DWC LCM calculation" + numLCMToOut = numInWords // (lcmWidth / inWidth) + self.code_gen_dict["$DEFINES$"].append("#define LCMWidth %d" % lcmWidth) + self.code_gen_dict["$DEFINES$"].append( + "#define NumLCMToOut %d" % (numLCMToOut) + ) def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -226,6 +245,12 @@ def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) ) + if self.needs_lcm(): + self.code_gen_dict["$STREAMDECLARATIONS$"].append( + 'hls::stream> intermediate ("intermediate");'.format( + self.get_iowidth_lcm() + ) + ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> out ("out");'.format(self.get_outstream_width()) ) @@ -233,9 +258,19 @@ def strm_decl(self): def docompute(self): # TODO continue with fxns below, they are copy-pasted op = "StreamingDataWidthConverter_Batch" - self.code_gen_dict["$DOCOMPUTE$"] = [ - "%s(in0, out, numReps);" % (op) - ] + if self.needs_lcm(): + self.code_gen_dict["$DOCOMPUTE$"] = [ + 'hls::stream> intermediate ("intermediate");'.format( + self.get_iowidth_lcm() + ), + "%s(in0, intermediate, numReps);" % (op), + "%s(intermediate, out, numReps);" + % (op), + ] + else: + self.code_gen_dict["$DOCOMPUTE$"] = [ + "%s(in0, out, numReps);" % (op) + ] def dataoutstrm(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -287,6 +322,8 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" ) + if self.needs_lcm(): + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS DATAFLOW") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py index efc1799235..632d1f813b 100644 --- a/src/finn/transformation/fpgadataflow/insert_dwc.py +++ b/src/finn/transformation/fpgadataflow/insert_dwc.py @@ -83,10 +83,13 @@ def apply(self, model): dwc_out_width = n1.get_instream_width() larger_width = max(dwc_in_width, dwc_out_width) smaller_width = min(dwc_in_width, dwc_out_width) - if larger_width % smaller_width == 0: - impl_style = "hls" - else: + both_8bit_aligned = (larger_width % 8 == 0) and ( + smaller_width % 8 == 0 + ) + if both_8bit_aligned: impl_style = "vivado" + else: + impl_style = "hls" # determine shape for dwc dwc_shape = n0.get_normal_output_shape() From 4e0d1ec70b77a25c8ec39beaa701630e82560568 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 31 Jan 2023 09:51:51 +0000 Subject: [PATCH 336/628] [Test] flesh out DWC testcases as separate configs with impl_style --- tests/fpgadataflow/test_fpgadataflow_dwc.py | 53 ++++++++++----------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index 8fe1f35cb3..695a5f902c 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -41,14 +41,10 @@ from finn.transformation.fpgadataflow.prepare_ip import PrepareIP -def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): +def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style): - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, Shape) - outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, Shape) - - max_w = max(OUTWidth, INWidth) - min_w = min(OUTWidth, INWidth) - impl_style = "hls" if max_w % min_w == 0 else "vivado" + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape) DWC_node = helper.make_node( "StreamingDataWidthConverter_Batch", @@ -56,9 +52,9 @@ def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): ["outp"], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - shape=Shape, - inWidth=INWidth, - outWidth=OUTWidth, + shape=shape, + inWidth=inWidth, + outWidth=outWidth, dataType=str(finn_dtype.name), impl_style=impl_style, ) @@ -80,41 +76,42 @@ def prepare_inputs(input_tensor, dt): return {"inp": input_tensor} -# shape -# @pytest.mark.parametrize("Shape", [[1, 4], [1, 2, 8]]) -@pytest.mark.parametrize("Shape", [[1, 24]]) -# inWidth -# @pytest.mark.parametrize("INWidth", [2, 4]) -@pytest.mark.parametrize("INWidth", [6]) -# outWidth -# @pytest.mark.parametrize("OUTWidth", [2, 4]) -@pytest.mark.parametrize("OUTWidth", [4]) -# finn_dtype -# @pytest.mark.parametrize("finn_dtype", [DataType["BIPOLAR"], DataType["INT2"]]) -@pytest.mark.parametrize("finn_dtype", [DataType["INT2"]]) +@pytest.mark.parametrize( + "config", + [ + ([1, 24], 6, 4, DataType["INT2"], "hls"), + ([1, 24], 4, 6, DataType["INT2"], "hls"), + ([1, 4], 2, 4, DataType["BIPOLAR"], "hls"), + ([1, 2, 8], 2, 4, DataType["BIPOLAR"], "hls"), + ([1, 4], 4, 2, DataType["INT2"], "hls"), + ([1, 2, 8], 4, 4, DataType["INT2"], "hls"), + ([1, 2, 8], 8, 16, DataType["INT2"], "vivado"), + ], +) @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_dwc_rtlsim(Shape, INWidth, OUTWidth, finn_dtype): +def test_fpgadataflow_dwc_rtlsim(config): + shape, inWidth, outWidth, finn_dtype, impl_style = config test_fpga_part = "xc7z020clg400-1" target_clk_ns = 10.0 # generate input data - x = gen_finn_dt_tensor(finn_dtype, Shape) + x = gen_finn_dt_tensor(finn_dtype, shape) input_dict = prepare_inputs(x, finn_dtype) - model = make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype) + model = make_single_dwc_modelwrapper( + shape, inWidth, outWidth, finn_dtype, impl_style + ) model = model.transform(InsertFIFO(create_shallow_fifos=True)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, 5)) model = model.transform(HLSSynthIP()) model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns)) model.set_metadata_prop("exec_mode", "rtlsim") - model.set_metadata_prop("rtlsim_trace", "dwc.vcd") - y = oxe.execute_onnx(model, input_dict)["outp"] assert ( y == x ).all(), """The output values are not the same as the input values anymore.""" - assert y.shape == tuple(Shape), """The output shape is incorrect.""" + assert y.shape == tuple(shape), """The output shape is incorrect.""" From c4256c3c1f83fc43d29cd2bd502cf386d5f8146c Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 13:54:10 +0000 Subject: [PATCH 337/628] [FMPadding] Increase counter bits for rtl component --- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_fmpadding.py | 8 ++------ 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index c47f9d52a2..ecfbe92b4b 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -249,8 +249,8 @@ def execute_node(self, context, graph): def get_template_values(self, ifm_dims, pads, chans, simd, idt): dimY, dimX = ifm_dims padT, padL, padB, padR = pads - y_counter_bits = int(math.ceil(math.log2(padT + dimY + padB))) - x_counter_bits = int(math.ceil(math.log2(padL + dimX + padR))) + y_counter_bits = int(math.ceil(math.log2(padT + dimY + padB + 1))) + x_counter_bits = int(math.ceil(math.log2(padL + dimX + padR + 1))) topname = self.get_verilog_top_module_name() stream_bits = idt.bitwidth() * simd stream_bits = int(roundup_to_integer_multiple(stream_bits, 8)) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index ef458f6288..090a0d1e65 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -53,9 +53,7 @@ target_clk_ns = 10 -def make_single_fmpadding_modelwrapper( - optype, idim, padding, num_ch, simd, idt -): +def make_single_fmpadding_modelwrapper(optype, idim, padding, num_ch, simd, idt): pad_h = padding[0] + padding[2] pad_w = padding[1] + padding[3] idim_h, idim_w = idim @@ -117,9 +115,7 @@ def make_single_fmpadding_modelwrapper( @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_fmpadding( - idim, pad, num_ch, simd, idt, mode, impl_style -): +def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style): if impl_style == "rtl" and mode == "cppsim": pytest.skip("rtl implstyle has no cppsim, skipping") if num_ch % simd != 0: From c1768f7b1274e5844a355dd0834b767dd5f1dd07 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 31 Jan 2023 14:22:29 +0000 Subject: [PATCH 338/628] [DWC] add disable_start_propagation on recommendation from Thomas --- .../fpgadataflow/streamingdatawidthconverter_batch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index 940b4f4ab0..a80d2bbefa 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -323,7 +323,9 @@ def pragmas(self): "#pragma HLS INTERFACE ap_ctrl_none port=return" ) if self.needs_lcm(): - self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS DATAFLOW") + self.code_gen_dict["$PRAGMAS$"].append( + "#pragma HLS DATAFLOW disable_start_propagation" + ) def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") From 5d65dd46449ff3a34b6f322bb011f2e748868787 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 15:04:51 +0000 Subject: [PATCH 339/628] [FMPadding] Cleanup code for rtl variant and enable all hls tests --- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 8 ++------ tests/fpgadataflow/test_fpgadataflow_fmpadding.py | 8 -------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index ecfbe92b4b..5650d21885 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -68,10 +68,6 @@ def get_nodeattr_types(self): "SIMD": ("i", False, 1), # FINN input datatype "inputDataType": ("s", True, ""), - # controls distribution of padded pixels - # in case of uneven padding -- see FMPadding fxn - # in hlslib - "PaddingStyle": ("i", False, 2, {2, 1}), # shape describing input vecs per execution "numInputVectors": ("i", False, 1), # Enable reprogrammable implementation to change FM dimensions, @@ -136,7 +132,7 @@ def make_shape_compatible_op(self, model): exp_ishape = self.get_normal_input_shape() oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) - assert ishape == exp_ishape, "Unexpect input shape for SameResize." + assert ishape == exp_ishape, "Unexpected input shape for FMPadding_rtl." return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): @@ -160,7 +156,7 @@ def get_input_datatype(self, ind=0): ret = DataType[self.get_nodeattr("inputDataType")] # the hlslib op always pads with zeros, so ensure that the DataType # is able to represent zeros - assert ret.allowed(0), "FMPadding_Batch DataType must support zero" + assert ret.allowed(0), "FMPadding_rtl DataType must support zero" return ret def get_output_datatype(self, ind=0): diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 090a0d1e65..8ab8a7aa4d 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -125,12 +125,6 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style): pad_h = pad[0] + pad[2] pad_w = pad[1] + pad[3] - if idim_h == idim_w and pad_h != pad_w and impl_style != "rtl": - pytest.skip( - """Only equal padding along the dimensions for square images - is supported for HLS, skipping""" - ) - # generate input data x = gen_finn_dt_tensor(idt, [1, idim_h, idim_w, num_ch]) input_dict = {"inp": x} @@ -150,8 +144,6 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style): model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) model = model.transform(PrepareRTLSim()) - node = model.get_nodes_by_op_type(optype)[0] - inst = getCustomOp(node) y_produced = oxe.execute_onnx(model, input_dict)["outp"] expected_oshape = (1, odim_h, odim_w, num_ch) From 2c01f8e0fc67bc2a27f56aaecc95ec4e624bd5d3 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Tue, 31 Jan 2023 17:45:14 +0000 Subject: [PATCH 340/628] [qonnx]: use new utility function to create onnx model --- src/finn/util/create.py | 4 ++-- tests/fpgadataflow/test_code_gen_trafo.py | 4 ++-- tests/fpgadataflow/test_compilation_trafo.py | 4 ++-- tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py | 4 ++-- .../fpgadataflow/test_convert_to_hls_channelwise_layer.py | 4 ++-- .../test_convert_to_hls_conv_fc_transition.py | 4 ++-- tests/fpgadataflow/test_convert_to_hls_conv_layer.py | 4 ++-- .../fpgadataflow/test_convert_to_hls_layers_synthetic.py | 4 ++-- tests/fpgadataflow/test_convert_to_hls_pool_batch.py | 6 +++--- tests/fpgadataflow/test_depthwise_convolution.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_addstreams.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_checksum.py | 4 ++-- .../fpgadataflow/test_fpgadataflow_convinputgenerator.py | 6 +++--- .../test_fpgadataflow_convinputgenerator1d.py | 6 +++--- .../test_fpgadataflow_convinputgenerator_rtl.py | 6 +++--- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 6 +++--- tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_dwc.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_fifo.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_fmpadding.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_globalaccpool.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_ipstitch.py | 6 +++--- tests/fpgadataflow/test_fpgadataflow_labelselect.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_mvau.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_res_estimate.py | 3 ++- tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_thresholding.py | 4 ++-- tests/fpgadataflow/test_fpgadataflow_vvau.py | 4 ++-- tests/fpgadataflow/test_set_folding.py | 3 ++- .../streamline/test_absorb_mul_into_topk.py | 3 ++- .../streamline/test_absorb_transp_into_flatten.py | 3 ++- .../streamline/test_collapse_repeated_op.py | 5 +++-- .../streamline/test_factor_out_mul_sign_magnitude.py | 3 ++- .../transformation/streamline/test_linear_past_eltwise.py | 5 +++-- tests/transformation/streamline/test_maxpool_nhwc.py | 4 ++-- tests/transformation/streamline/test_move_add_past_mul.py | 7 ++++--- .../streamline/test_move_chw_add_past_conv.py | 3 ++- .../streamline/test_move_flatten_past_affine.py | 4 ++-- .../streamline/test_move_flatten_past_topk.py | 4 ++-- .../streamline/test_move_identical_op_past_join_op.py | 4 ++-- .../streamline/test_move_maxpool_past_multithreshold.py | 3 ++- .../streamline/test_move_mul_past_dw_conv.py | 4 ++-- .../streamline/test_move_mul_past_maxpool.py | 4 ++-- .../streamline/test_move_scalar_past_conv.py | 5 +++-- .../streamline/test_move_scalar_past_matmul.py | 7 ++++--- .../streamline/test_move_transpose_past_scalar_mul.py | 3 ++- tests/transformation/streamline/test_round_thresholds.py | 3 ++- tests/transformation/streamline/test_scale_resize_nhwc.py | 8 ++++---- 49 files changed, 113 insertions(+), 99 deletions(-) diff --git a/src/finn/util/create.py b/src/finn/util/create.py index a8c2e67b38..642cabcf6d 100644 --- a/src/finn/util/create.py +++ b/src/finn/util/create.py @@ -30,7 +30,7 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor +from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model def hls_random_mlp_maker(layer_spec): @@ -84,7 +84,7 @@ def hls_mlp_maker(layer_spec): graph = helper.make_graph(nodes=[], name="mlp", inputs=[], outputs=[]) - model = helper.make_model(graph, producer_name="finn") + model = qonnx_make_model(graph, producer_name="finn") model = ModelWrapper(model) for lyr in layer_spec: diff --git a/tests/fpgadataflow/test_code_gen_trafo.py b/tests/fpgadataflow/test_code_gen_trafo.py index 49ee32c71e..f5edabbd4b 100644 --- a/tests/fpgadataflow/test_code_gen_trafo.py +++ b/tests/fpgadataflow/test_code_gen_trafo.py @@ -32,7 +32,7 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper -from qonnx.util.basic import gen_finn_dt_tensor, get_by_name +from qonnx.util.basic import gen_finn_dt_tensor, get_by_name, qonnx_make_model from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim @@ -70,7 +70,7 @@ def test_code_gen_trafo(): nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_compilation_trafo.py b/tests/fpgadataflow/test_compilation_trafo.py index 9bafb101ce..d04b68a56b 100644 --- a/tests/fpgadataflow/test_compilation_trafo.py +++ b/tests/fpgadataflow/test_compilation_trafo.py @@ -32,7 +32,7 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper -from qonnx.util.basic import gen_finn_dt_tensor, get_by_name +from qonnx.util.basic import gen_finn_dt_tensor, get_by_name, qonnx_make_model from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim @@ -71,7 +71,7 @@ def test_compilation_trafo(): nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py index 7b3e206164..98a7c76ee4 100644 --- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -38,7 +38,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -121,7 +121,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_ helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) ] - modelproto = helper.make_model( + modelproto = qonnx_make_model( helper.make_graph( name="conv_test", inputs=[top_in], diff --git a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py index 0f19b6d79a..089d1ae420 100644 --- a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py @@ -35,7 +35,7 @@ from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -57,7 +57,7 @@ def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape): outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, ishape) p0 = helper.make_tensor_value_info("p0", TensorProto.FLOAT, pshape) - model = helper.make_model( + model = qonnx_make_model( helper.make_graph( name="test", inputs=[inp], diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py index 0760ff9b37..3512c39cb3 100755 --- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -39,7 +39,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -149,7 +149,7 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): "Flatten", ["thres1_out"], ["flatten_out"], axis=1 ) - modelproto = helper.make_model( + modelproto = qonnx_make_model( helper.make_graph( name="test", inputs=[global_in], diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 8c9f110c31..bf15336b3c 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -38,7 +38,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -107,7 +107,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) ] - modelproto = helper.make_model( + modelproto = qonnx_make_model( helper.make_graph( name="conv_test", inputs=[top_in], diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py index 79a48793e0..c837a46a7c 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py @@ -43,7 +43,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.insert_topk import InsertTopK -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -123,7 +123,7 @@ def make_model(ch, ifmdim): outputs=[outp], ) - model = helper.make_model(graph, producer_name="add-model") + model = qonnx_make_model(graph, producer_name="add-model") model = ModelWrapper(model) # set initializers for scalar add/mul nodes diff --git a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py index ef9bd7a13d..6d628c9e53 100644 --- a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py +++ b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py @@ -35,7 +35,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -78,7 +78,7 @@ def make_single_maxpool_modelwrapper( nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="mp-model") + model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -112,7 +112,7 @@ def make_single_quantavpool_modelwrapper(k, stride, ifm_ch, ifm_dim, ofm_dim, id nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="mp-model") + model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index 5228ade3d0..caa22e077f 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -37,7 +37,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor +from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim @@ -123,7 +123,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): outputs=[global_out], value_info=value_info, ) - model = oh.make_model(graph, producer_name="lowered_dw_cnv-model") + model = qonnx_make_model(graph, producer_name="lowered_dw_cnv-model") model = ModelWrapper(model) # initialize model diff --git a/tests/fpgadataflow/test_fpgadataflow_addstreams.py b/tests/fpgadataflow/test_fpgadataflow_addstreams.py index 6d881f45b6..1ad2c26610 100644 --- a/tests/fpgadataflow/test_fpgadataflow_addstreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_addstreams.py @@ -34,7 +34,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -68,7 +68,7 @@ def make_addstreams_modelwrapper(ch, pe, idt): outputs=[outp], ) - model = helper.make_model(graph, producer_name="addstreams-model") + model = qonnx_make_model(graph, producer_name="addstreams-model") model = ModelWrapper(model) model.set_tensor_datatype("inp1", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py index ceafda90e5..13fab9a47f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py +++ b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py @@ -34,7 +34,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -73,7 +73,7 @@ def make_modelwrapper(C, pe, idt, odt, pdt, func, vecs): ) graph = helper.make_graph(nodes=[node], name="graph", inputs=[inp], outputs=[outp]) - model = helper.make_model(graph, producer_name="model") + model = qonnx_make_model(graph, producer_name="model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_checksum.py b/tests/fpgadataflow/test_fpgadataflow_checksum.py index 495fcd10b6..cd404f5a63 100644 --- a/tests/fpgadataflow/test_fpgadataflow_checksum.py +++ b/tests/fpgadataflow/test_fpgadataflow_checksum.py @@ -36,7 +36,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.core.rtlsim_exec import rtlsim_exec @@ -115,7 +115,7 @@ def create_two_fc_model(): value_info=[mid], ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index a196ecbb61..3cfff9ac34 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -34,7 +34,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -73,7 +73,7 @@ def make_single_im2col_modelwrapper( nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="im2col-model") + model = qonnx_make_model(graph, producer_name="im2col-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -117,7 +117,7 @@ def make_single_slidingwindow_modelwrapper( outputs=[outp], ) - model = helper.make_model(graph, producer_name="slidingwindow-model") + model = qonnx_make_model(graph, producer_name="slidingwindow-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py index 0fc3ca82cf..f467f37618 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py @@ -35,7 +35,7 @@ from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -82,7 +82,7 @@ def make_single_im2col_modelwrapper( nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="im2col-model") + model = qonnx_make_model(graph, producer_name="im2col-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -133,7 +133,7 @@ def make_single_slidingwindow_modelwrapper( outputs=[outp], ) - model = helper.make_model(graph, producer_name="slidingwindow-model") + model = qonnx_make_model(graph, producer_name="slidingwindow-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 007360a5fd..58fc5ec04c 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -33,7 +33,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.prepare_ip import PrepareIP @@ -72,7 +72,7 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilatio nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="im2col-model") + model = qonnx_make_model(graph, producer_name="im2col-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -124,7 +124,7 @@ def make_single_slidingwindow_modelwrapper( outputs=[outp], ) - model = helper.make_model(graph, producer_name="slidingwindow-model") + model = qonnx_make_model(graph, producer_name="slidingwindow-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index e8807fd24f..086057f3fa 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -45,7 +45,7 @@ LowerConvsToMatMul, _auto_pad_to_explicit_padding, ) -from qonnx.util.basic import gen_finn_dt_tensor, get_by_name +from qonnx.util.basic import gen_finn_dt_tensor, get_by_name, qonnx_make_model import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -396,7 +396,7 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilatio nodes=[im2col_node], name="im2col_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="im2col-model") + model = qonnx_make_model(graph, producer_name="im2col-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -449,7 +449,7 @@ def make_single_slidingwindow_modelwrapper( outputs=[outp], ) - model = helper.make_model(graph, producer_name="slidingwindow-model") + model = qonnx_make_model(graph, producer_name="slidingwindow-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py index 7ec254405d..441bbce50a 100644 --- a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py @@ -36,7 +36,7 @@ from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -76,7 +76,7 @@ def make_dupstreams_modelwrapper(ch, pe, idim, idt, n_dupl): nodes=[dupstrm_node], name="graph", inputs=[inp], outputs=out_vi ) - model = helper.make_model(graph, producer_name="addstreams-model") + model = qonnx_make_model(graph, producer_name="addstreams-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index bcf2a1fe3d..104bfa011f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -32,7 +32,7 @@ from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP @@ -62,7 +62,7 @@ def make_single_dwc_modelwrapper(Shape, INWidth, OUTWidth, finn_dtype): nodes=[DWC_node], name="dwc_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="dwc-model") + model = qonnx_make_model(graph, producer_name="dwc-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", finn_dtype) diff --git a/tests/fpgadataflow/test_fpgadataflow_fifo.py b/tests/fpgadataflow/test_fpgadataflow_fifo.py index b9c74185d9..efdb3bf6aa 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fifo.py +++ b/tests/fpgadataflow/test_fpgadataflow_fifo.py @@ -33,7 +33,7 @@ from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP @@ -66,7 +66,7 @@ def make_single_fifo_modelwrapper(Shape, Depth, fld_shape, finn_dtype): nodes=[FIFO_node], name="fifo_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fifo-model") + model = qonnx_make_model(graph, producer_name="fifo-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", finn_dtype) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index 8ab8a7aa4d..b95409fda8 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -36,7 +36,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -87,7 +87,7 @@ def make_single_fmpadding_modelwrapper(optype, idim, padding, num_ch, simd, idt) nodes=[FMPadding], name="fmpadding_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fmpadding-model") + model = qonnx_make_model(graph, producer_name="fmpadding-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py index a37e6e3271..a2c3d09a55 100644 --- a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py @@ -34,7 +34,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -65,7 +65,7 @@ def make_accpool_modelwrapper(ch, pe, idim, idt): nodes=[accpool_node], name="graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="thresholding-model") + model = qonnx_make_model(graph, producer_name="thresholding-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 325470a6d6..b220338e69 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -36,7 +36,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_data_layouts import InferDataLayouts -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model from finn.core.onnx_exec import execute_onnx from finn.transformation.fpgadataflow.create_dataflow_partition import ( @@ -100,7 +100,7 @@ def create_one_fc_model(mem_mode="const"): nodes=[fc0], name="fclayer_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) @@ -177,7 +177,7 @@ def create_two_fc_model(mem_mode="decoupled"): value_info=[mid], ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_labelselect.py b/tests/fpgadataflow/test_fpgadataflow_labelselect.py index a9b98ecaf8..553f263ba2 100644 --- a/tests/fpgadataflow/test_fpgadataflow_labelselect.py +++ b/tests/fpgadataflow/test_fpgadataflow_labelselect.py @@ -33,7 +33,7 @@ from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim @@ -67,7 +67,7 @@ def make_labelselect_modelwrapper(labels, pe, k, idt): outputs=[outp], ) - model = helper.make_model(graph, producer_name="thresholding-model") + model = qonnx_make_model(graph, producer_name="thresholding-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index a7e7eba7ee..f3efd6a686 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -36,7 +36,7 @@ from qonnx.custom_op.general.multithreshold import multithreshold from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor +from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -106,7 +106,7 @@ def make_single_fclayer_modelwrapper(W, pe, simd, wdt, idt, odt, T=None, tdt=Non nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py index e3c79fa44f..b3cf7b4229 100644 --- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py +++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py @@ -32,6 +32,7 @@ from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.util.basic import qonnx_make_model from finn.analysis.fpgadataflow.res_estimation import ( res_estimation, @@ -87,7 +88,7 @@ def test_res_estimate(): nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py index a3968cf797..628721b429 100644 --- a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py @@ -35,7 +35,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -74,7 +74,7 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_ nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="mp-model") + model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py index 706679b680..96cd69c345 100644 --- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py @@ -37,7 +37,7 @@ from qonnx.custom_op.general.multithreshold import multithreshold from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -93,7 +93,7 @@ def make_single_thresholding_modelwrapper( outputs=[outp], ) - model = helper.make_model(graph, producer_name="thresholding-model") + model = qonnx_make_model(graph, producer_name="thresholding-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 03ddb12863..abf8ba0b9e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -35,7 +35,7 @@ from qonnx.custom_op.general.multithreshold import multithreshold from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer @@ -132,7 +132,7 @@ def _make_single_vvau_modelwrapper( nodes=[VVAU_node], name="vvau_graph", inputs=[inp], outputs=[outp] ) - model = helper.make_model(graph, producer_name="vvau-model") + model = qonnx_make_model(graph, producer_name="vvau-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) diff --git a/tests/fpgadataflow/test_set_folding.py b/tests/fpgadataflow/test_set_folding.py index 8ea0e18f2c..5355dd7044 100644 --- a/tests/fpgadataflow/test_set_folding.py +++ b/tests/fpgadataflow/test_set_folding.py @@ -34,6 +34,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames +from qonnx.util.basic import qonnx_make_model from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.transformation.fpgadataflow.create_dataflow_partition import ( @@ -91,7 +92,7 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): outputs=[tensors[-1]], ) - model = helper.make_model(graph, producer_name="fclayer-model") + model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) model.set_tensor_datatype("inp", adt) diff --git a/tests/transformation/streamline/test_absorb_mul_into_topk.py b/tests/transformation/streamline/test_absorb_mul_into_topk.py index a6dff788dc..89ef74e0b3 100644 --- a/tests/transformation/streamline/test_absorb_mul_into_topk.py +++ b/tests/transformation/streamline/test_absorb_mul_into_topk.py @@ -34,6 +34,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.insert_topk import InsertTopK +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.absorb import AbsorbScalarMulAddIntoTopK @@ -65,7 +66,7 @@ def test_absorb_mul_into_topk(mul_positive, scalar): value_info=[a0, b0, c0], ) - model = helper.make_model(mul_graph, producer_name="mul_model") + model = qonnx_make_model(mul_graph, producer_name="mul_model") model = ModelWrapper(model) # initialize values # for mul diff --git a/tests/transformation/streamline/test_absorb_transp_into_flatten.py b/tests/transformation/streamline/test_absorb_transp_into_flatten.py index 1358d468c0..44b0c1d7e0 100644 --- a/tests/transformation/streamline/test_absorb_transp_into_flatten.py +++ b/tests/transformation/streamline/test_absorb_transp_into_flatten.py @@ -8,6 +8,7 @@ from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.absorb import AbsorbTransposeIntoFlatten @@ -45,7 +46,7 @@ def test_absorb_transp_into_flatten(perm, shape, ishape, data_layout): outputs=[outp], ) - model = helper.make_model(graph, producer_name="absorb_transpose_model") + model = qonnx_make_model(graph, producer_name="absorb_transpose_model") model = ModelWrapper(model) if shape is not None: model.graph.value_info.append(shape0) diff --git a/tests/transformation/streamline/test_collapse_repeated_op.py b/tests/transformation/streamline/test_collapse_repeated_op.py index 268e0ffc5c..c1d3ee0088 100644 --- a/tests/transformation/streamline/test_collapse_repeated_op.py +++ b/tests/transformation/streamline/test_collapse_repeated_op.py @@ -33,6 +33,7 @@ from onnx import TensorProto from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as ox from finn.transformation.streamline import CollapseRepeatedAdd, CollapseRepeatedMul @@ -46,7 +47,7 @@ def test_collapse_repeated_op(): add_param_1 = oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [2]) mul_param_1 = oh.make_tensor_value_info("mul_param_1", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -96,7 +97,7 @@ def test_collapse_repeated_only_if_linear(test_args): value_info += [oh.make_tensor_value_info("p4", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("p5", TensorProto.FLOAT, [1])] - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py b/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py index 04ab9bf0b9..89596a1c0f 100644 --- a/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py +++ b/tests/transformation/streamline/test_factor_out_mul_sign_magnitude.py @@ -33,6 +33,7 @@ from onnx import TensorProto from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as ox from finn.transformation.streamline import FactorOutMulSignMagnitude @@ -43,7 +44,7 @@ def test_factor_out_mul_sign_magnitude(): top_in = oh.make_tensor_value_info("top_in", TensorProto.FLOAT, [1, 2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_linear_past_eltwise.py b/tests/transformation/streamline/test_linear_past_eltwise.py index 12633d750b..4e5dcd6386 100644 --- a/tests/transformation/streamline/test_linear_past_eltwise.py +++ b/tests/transformation/streamline/test_linear_past_eltwise.py @@ -35,6 +35,7 @@ from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveLinearPastEltwiseAdd @@ -78,7 +79,7 @@ def make_model(shape): outputs=[outp], ) - model = helper.make_model(graph, producer_name="add-model") + model = qonnx_make_model(graph, producer_name="add-model") model = ModelWrapper(model) # set initializers for scalar add/mul nodes @@ -156,7 +157,7 @@ def test_linear_past_eltwise_add_multiple_forks(ch, ifmdim): helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape) ] - modelproto = helper.make_model( + modelproto = qonnx_make_model( helper.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_maxpool_nhwc.py b/tests/transformation/streamline/test_maxpool_nhwc.py index aa77b5cf1a..d61eedaaf5 100644 --- a/tests/transformation/streamline/test_maxpool_nhwc.py +++ b/tests/transformation/streamline/test_maxpool_nhwc.py @@ -7,7 +7,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.maxpoolnhwc import compute_pool_output_dim from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MakeMaxPoolNHWC @@ -56,7 +56,7 @@ def create_maxpool(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt) value_info=[outp_mp], ) - model = oh.make_model(graph, producer_name="maxpool_model") + model = qonnx_make_model(graph, producer_name="maxpool_model") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) diff --git a/tests/transformation/streamline/test_move_add_past_mul.py b/tests/transformation/streamline/test_move_add_past_mul.py index 0fb4dd9f7a..ea9c2a954d 100644 --- a/tests/transformation/streamline/test_move_add_past_mul.py +++ b/tests/transformation/streamline/test_move_add_past_mul.py @@ -33,6 +33,7 @@ from onnx import TensorProto from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as ox from finn.transformation.streamline import MoveAddPastMul @@ -44,7 +45,7 @@ def test_move_add_past_mul_single(): add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [2]) mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -76,7 +77,7 @@ def test_move_add_past_mul_multi(): add_param_1 = oh.make_tensor_value_info("add_param_1", TensorProto.FLOAT, [2]) mul_param_1 = oh.make_tensor_value_info("mul_param_1", TensorProto.FLOAT, [2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -116,7 +117,7 @@ def test_move_add_past_mul_only_if_linear(): value_info += [oh.make_tensor_value_info("mul1_param", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("mul2_param", TensorProto.FLOAT, [1])] value_info += [oh.make_tensor_value_info("mul3_param", TensorProto.FLOAT, [1])] - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_move_chw_add_past_conv.py b/tests/transformation/streamline/test_move_chw_add_past_conv.py index 7eb7f9f1af..e1b324a798 100644 --- a/tests/transformation/streamline/test_move_chw_add_past_conv.py +++ b/tests/transformation/streamline/test_move_chw_add_past_conv.py @@ -33,6 +33,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveAddPastConv @@ -72,7 +73,7 @@ def test_move_chw_add_past_conv(idim, k, s, ich, och): add_node = helper.make_node("Add", ["inp", "a0"], ["add_out"]) conv_node = helper.make_node("Conv", ["add_out", "a1"], ["outp"], **conv_config) - model = helper.make_model( + model = qonnx_make_model( helper.make_graph( nodes=[add_node, conv_node], name="move-add-graph", diff --git a/tests/transformation/streamline/test_move_flatten_past_affine.py b/tests/transformation/streamline/test_move_flatten_past_affine.py index 8c3f71d1f3..22c5e19fac 100644 --- a/tests/transformation/streamline/test_move_flatten_past_affine.py +++ b/tests/transformation/streamline/test_move_flatten_past_affine.py @@ -36,7 +36,7 @@ from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveFlattenPastAffine @@ -74,7 +74,7 @@ def test_move_flatten_past_affine(data_layout, batch_size): value_info=[a0, a1, a2], ) - model = helper.make_model(graph, producer_name="move_reshape_model") + model = qonnx_make_model(graph, producer_name="move_reshape_model") model = ModelWrapper(model) # initialize values diff --git a/tests/transformation/streamline/test_move_flatten_past_topk.py b/tests/transformation/streamline/test_move_flatten_past_topk.py index d1478088e2..82336cd3e6 100644 --- a/tests/transformation/streamline/test_move_flatten_past_topk.py +++ b/tests/transformation/streamline/test_move_flatten_past_topk.py @@ -36,7 +36,7 @@ from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.insert_topk import InsertTopK -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveFlattenPastTopK @@ -67,7 +67,7 @@ def test_move_flatten_past_topk(data_layout, batch_size): outputs=[outp], ) - model = helper.make_model(graph, producer_name="move_flatten_model") + model = qonnx_make_model(graph, producer_name="move_flatten_model") model = ModelWrapper(model) model.set_tensor_datatype("inp", DataType["INT2"]) diff --git a/tests/transformation/streamline/test_move_identical_op_past_join_op.py b/tests/transformation/streamline/test_move_identical_op_past_join_op.py index 4986363ff4..7be9763162 100644 --- a/tests/transformation/streamline/test_move_identical_op_past_join_op.py +++ b/tests/transformation/streamline/test_move_identical_op_past_join_op.py @@ -30,7 +30,7 @@ from onnx import TensorProto from onnx import helper as oh from qonnx.core.modelwrapper import ModelWrapper -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd @@ -81,7 +81,7 @@ def create_model(perm): ], ) - onnx_model = oh.make_model(graph, producer_name="test_model") + onnx_model = qonnx_make_model(graph, producer_name="test_model") model = ModelWrapper(onnx_model) return model diff --git a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py index bf25eee9e6..6126acd9e3 100644 --- a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py +++ b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py @@ -32,6 +32,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveMaxPoolPastMultiThreshold @@ -99,7 +100,7 @@ def test_move_maxpool_past_multithreshold(): ) ] - modelproto = helper.make_model( + modelproto = qonnx_make_model( helper.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_move_mul_past_dw_conv.py b/tests/transformation/streamline/test_move_mul_past_dw_conv.py index 401631a728..72a6650ec4 100644 --- a/tests/transformation/streamline/test_move_mul_past_dw_conv.py +++ b/tests/transformation/streamline/test_move_mul_past_dw_conv.py @@ -33,7 +33,7 @@ from qonnx.custom_op.general.im2col import compute_conv_output_dim from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveMulPastDWConv @@ -94,7 +94,7 @@ def test_move_mul_past_dw_conv(ifm_dim, ifm_ch, k, stride, pad_amt, dw): value_info=[mul, W], ) - model = helper.make_model(graph, producer_name="mulpastconv-model") + model = qonnx_make_model(graph, producer_name="mulpastconv-model") model = ModelWrapper(model) inp_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, ifm_dim, ifm_dim]) mul_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, 1, 1]) diff --git a/tests/transformation/streamline/test_move_mul_past_maxpool.py b/tests/transformation/streamline/test_move_mul_past_maxpool.py index fcc1b65132..3bae2905a0 100755 --- a/tests/transformation/streamline/test_move_mul_past_maxpool.py +++ b/tests/transformation/streamline/test_move_mul_past_maxpool.py @@ -34,7 +34,7 @@ from qonnx.custom_op.general.maxpoolnhwc import compute_pool_output_dim from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveMulPastMaxPool @@ -92,7 +92,7 @@ def test_move_mul_past_maxpool(ifm_dim, ifm_ch, k, stride, pad, cw, negative): value_info=[mul], ) - model = helper.make_model(graph, producer_name="mulpastmaxpool-model") + model = qonnx_make_model(graph, producer_name="mulpastmaxpool-model") model = ModelWrapper(model) inp_values = gen_finn_dt_tensor(DataType["INT2"], [1, ifm_ch, ifm_dim, ifm_dim]) mul_values = np.random.random_sample(mul_shape).astype(np.float32) diff --git a/tests/transformation/streamline/test_move_scalar_past_conv.py b/tests/transformation/streamline/test_move_scalar_past_conv.py index 59b8b8f8b2..bb99fd1d8f 100644 --- a/tests/transformation/streamline/test_move_scalar_past_conv.py +++ b/tests/transformation/streamline/test_move_scalar_past_conv.py @@ -32,6 +32,7 @@ from onnx import TensorProto from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as ox from finn.transformation.streamline import MoveAddPastConv, MoveScalarMulPastConv @@ -79,7 +80,7 @@ def test_move_scalar_past_conv(test_args, padding): value_info += [oh.make_tensor_value_info("p2", TensorProto.FLOAT, conv_param_shape)] value_info += [oh.make_tensor_value_info("p3", TensorProto.FLOAT, conv_param_shape)] - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -158,7 +159,7 @@ def test_move_scalar_past_conv_only_if_linear(test_args): value_info += [oh.make_tensor_value_info("p4", TensorProto.FLOAT, conv_param_shape)] value_info += [oh.make_tensor_value_info("p5", TensorProto.FLOAT, conv_param_shape)] - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_move_scalar_past_matmul.py b/tests/transformation/streamline/test_move_scalar_past_matmul.py index 6fdaaadfae..6c788294bc 100644 --- a/tests/transformation/streamline/test_move_scalar_past_matmul.py +++ b/tests/transformation/streamline/test_move_scalar_past_matmul.py @@ -33,6 +33,7 @@ from onnx import TensorProto from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as ox from finn.transformation.streamline import ( @@ -47,7 +48,7 @@ def test_move_scalar_mul_past_matmul(): mul_param = oh.make_tensor_value_info("mul_param", TensorProto.FLOAT, [1, 1]) matmul_param = oh.make_tensor_value_info("matmul_param", TensorProto.FLOAT, [2, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -79,7 +80,7 @@ def test_move_scalar_add_past_matmul(): add_param = oh.make_tensor_value_info("add_param", TensorProto.FLOAT, [1, 1]) matmul_param = oh.make_tensor_value_info("matmul_param", TensorProto.FLOAT, [2, 2]) top_out = oh.make_tensor_value_info("top_out", TensorProto.FLOAT, [1, 2]) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], @@ -122,7 +123,7 @@ def test_move_scalar_past_matmul_only_if_linear(test_args): p2 = oh.make_tensor_value_info("p2", TensorProto.FLOAT, matmul_shape) p3 = oh.make_tensor_value_info("p3", TensorProto.FLOAT, matmul_shape) p4 = oh.make_tensor_value_info("p4", TensorProto.FLOAT, matmul_shape) - modelproto = oh.make_model( + modelproto = qonnx_make_model( oh.make_graph( name="test", inputs=[top_in], diff --git a/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py b/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py index 9662ba8a90..6bf72961ac 100644 --- a/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py +++ b/tests/transformation/streamline/test_move_transpose_past_scalar_mul.py @@ -36,6 +36,7 @@ from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MoveTransposePastScalarMul @@ -71,7 +72,7 @@ def test_move_transpose_past_scalar_mul(perm, scalar, data_layout): value_info=[a0], ) - model = helper.make_model(graph, producer_name="mv_transpose_model") + model = qonnx_make_model(graph, producer_name="mv_transpose_model") model = ModelWrapper(model) # initialize values diff --git a/tests/transformation/streamline/test_round_thresholds.py b/tests/transformation/streamline/test_round_thresholds.py index 1ec5f02e87..85c60b37d5 100644 --- a/tests/transformation/streamline/test_round_thresholds.py +++ b/tests/transformation/streamline/test_round_thresholds.py @@ -32,6 +32,7 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.basic import qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline import RoundAndClipThresholds @@ -46,7 +47,7 @@ def test_round_thresholds(): "MultiThreshold", ["v", "thresholds"], ["out"], domain="qonnx.custom_op.general" ) graph_def = helper.make_graph([node_def], "test_model", [v, thresholds], [out]) - model_def = helper.make_model(graph_def) + model_def = qonnx_make_model(graph_def) model = ModelWrapper(model_def) threshold_val = np.asarray([[-1.1], [0.7], [2.3], [5.1]], dtype=np.float32) model.set_initializer("thresholds", threshold_val) diff --git a/tests/transformation/streamline/test_scale_resize_nhwc.py b/tests/transformation/streamline/test_scale_resize_nhwc.py index f10930f4e7..5e107448f8 100644 --- a/tests/transformation/streamline/test_scale_resize_nhwc.py +++ b/tests/transformation/streamline/test_scale_resize_nhwc.py @@ -9,7 +9,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model import finn.core.onnx_exec as oxe from finn.transformation.streamline.reorder import MakeScaleResizeNHWC @@ -58,7 +58,7 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): value_info=[outp_up, param, roi], ) - model = oh.make_model(graph, producer_name="resize_model1") + model = qonnx_make_model(graph, producer_name="resize_model1") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) @@ -113,7 +113,7 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): value_info=[outp_tr, param, roi], ) - model = oh.make_model(graph, producer_name="resize_model2") + model = qonnx_make_model(graph, producer_name="resize_model2") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) @@ -180,7 +180,7 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): value_info=[outp_up, outp_tr, param, roi], ) - model = oh.make_model(graph, producer_name="resize_model3") + model = qonnx_make_model(graph, producer_name="resize_model3") model = ModelWrapper(model) model.set_tensor_datatype("inp", idt) model.set_tensor_datatype("outp", idt) From 450ac0a0a313a65e1313e646349957be238ac62f Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Tue, 31 Jan 2023 17:45:44 +0000 Subject: [PATCH 341/628] [req]: update to qonnx 1.13.0 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 348b1afab9..83aad07d72 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,11 +4,11 @@ dataclasses-json==0.5.7 docrep==0.2.7 gspread==3.6.0 numpy==1.22.0 -onnx==1.11.0 +onnx==1.13.0 onnxoptimizer onnxruntime==1.11.1 pre-commit==2.9.2 -protobuf==3.20.2 +protobuf==3.20.3 psutil==5.9.4 pyscaffold==3.2.1 scipy==1.5.2 From d08c8c0cb275b506c225eedc2429dbfc54bb7a3d Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Tue, 31 Jan 2023 17:46:56 +0000 Subject: [PATCH 342/628] [qonnx]: update commit hash qonnx --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 16960c71e3..e370149f3f 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="f14d7dc92a6baeffa2bef811e902abb121a6f696" +QONNX_COMMIT="e8ce71cedac960ab340f18a81910d9173997cec5" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From c13f2c1985ab5d10d35ffddb63cee49b2e648abb Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 17:57:15 +0000 Subject: [PATCH 343/628] pre-commit cleanup --- src/finn/util/create.py | 6 +++++- tests/fpgadataflow/test_depthwise_convolution.py | 6 +++++- tests/fpgadataflow/test_fpgadataflow_mvau.py | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/finn/util/create.py b/src/finn/util/create.py index 642cabcf6d..ed3e1a843e 100644 --- a/src/finn/util/create.py +++ b/src/finn/util/create.py @@ -30,7 +30,11 @@ from onnx import TensorProto, helper from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model +from qonnx.util.basic import ( + calculate_signed_dot_prod_range, + gen_finn_dt_tensor, + qonnx_make_model, +) def hls_random_mlp_maker(layer_spec): diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index caa22e077f..8ab22bcfdc 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -37,7 +37,11 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames from qonnx.transformation.infer_shapes import InferShapes -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model +from qonnx.util.basic import ( + calculate_signed_dot_prod_range, + gen_finn_dt_tensor, + qonnx_make_model, +) import finn.core.onnx_exec as oxe from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim diff --git a/tests/fpgadataflow/test_fpgadataflow_mvau.py b/tests/fpgadataflow/test_fpgadataflow_mvau.py index f3efd6a686..b80ef76a19 100644 --- a/tests/fpgadataflow/test_fpgadataflow_mvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_mvau.py @@ -36,7 +36,11 @@ from qonnx.custom_op.general.multithreshold import multithreshold from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.general import GiveUniqueNodeNames -from qonnx.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor, qonnx_make_model +from qonnx.util.basic import ( + calculate_signed_dot_prod_range, + gen_finn_dt_tensor, + qonnx_make_model, +) import finn.core.onnx_exec as oxe from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer From 5826a9e2ca3df3ec4b4ba6aca73241fac1e0438a Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Tue, 31 Jan 2023 18:13:18 +0000 Subject: [PATCH 344/628] [notebooks]: use new utility function to create onnx model --- notebooks/advanced/2_custom_op.ipynb | 3 ++- notebooks/basics/0_how_to_work_with_onnx.ipynb | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/notebooks/advanced/2_custom_op.ipynb b/notebooks/advanced/2_custom_op.ipynb index c27f8bdca7..051a406708 100644 --- a/notebooks/advanced/2_custom_op.ipynb +++ b/notebooks/advanced/2_custom_op.ipynb @@ -178,6 +178,7 @@ "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", "from onnx import TensorProto\n", + "from qonnx.util.basic import qonnx_make_model\n", "\n", "def make_graph(ishape, exp, op_type = \"MyPythonPowerOp\"):\n", " inp = helper.make_tensor_value_info(\n", @@ -204,7 +205,7 @@ " graph = helper.make_graph(\n", " nodes=[custom_node], name=\"custom_graph\", inputs=[inp], outputs=[outp]\n", " )\n", - " model = helper.make_model(graph, producer_name=\"custom-model\")\n", + " model = qonnx_make_model(graph, producer_name=\"custom-model\")\n", " return ModelWrapper(model)" ] }, diff --git a/notebooks/basics/0_how_to_work_with_onnx.ipynb b/notebooks/basics/0_how_to_work_with_onnx.ipynb index 514efd1693..b6a5a04815 100644 --- a/notebooks/basics/0_how_to_work_with_onnx.ipynb +++ b/notebooks/basics/0_how_to_work_with_onnx.ipynb @@ -36,6 +36,7 @@ "outputs": [], "source": [ "import onnx\n", + "from qonnx.util.basic import qonnx_make_model\n", "\n", "Add1_node = onnx.helper.make_node(\n", " 'Add',\n", @@ -158,7 +159,7 @@ "metadata": {}, "outputs": [], "source": [ - "onnx_model = onnx.helper.make_model(graph, producer_name=\"simple-model\")\n", + "onnx_model = qonnx_make_model(graph, producer_name=\"simple-model\")\n", "onnx.save(onnx_model, '/tmp/simple_model.onnx')" ] }, @@ -550,7 +551,7 @@ "metadata": {}, "outputs": [], "source": [ - "onnx_model1 = onnx.helper.make_model(graph, producer_name=\"simple-model1\")\n", + "onnx_model1 = qonnx_make_model(graph, producer_name=\"simple-model1\")\n", "onnx.save(onnx_model1, '/tmp/simple_model1.onnx')" ] }, From ddde137e207d3faf4ed77fd4a512cbcb81e2c795 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 31 Jan 2023 22:05:30 +0000 Subject: [PATCH 345/628] [Test] Fix check for padding layers in conv test --- tests/fpgadataflow/test_convert_to_hls_conv_layer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 8c9f110c31..5c5363ca2d 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -175,8 +175,11 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod assert np.isclose(exp_cycles, cycles_rtlsim, atol=11) assert exp_cycles != 0 - if pad == 1: - padding_node = new_model.get_nodes_by_op_type("FMPadding_Batch")[0] + if pad: + if use_rtl_swg: + padding_node = new_model.get_nodes_by_op_type("FMPadding_rtl")[0] + else: + padding_node = new_model.get_nodes_by_op_type("FMPadding_Batch")[0] padding_inst = getCustomOp(padding_node) assert padding_inst.get_nodeattr("SIMD") == in_chn From 1c2a1c122844cbd20dec759414034b7be8eaa794 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 1 Feb 2023 11:21:02 +0100 Subject: [PATCH 346/628] [Stitch] add warning if first FIFO in graph has impl_style=vivado --- .../transformation/fpgadataflow/create_stitched_ip.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 52e4e88b40..8e2c69bad4 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -310,6 +310,14 @@ def apply(self, model): behavior. It is strongly recommended to insert FIFOs prior to calling CreateStitchedIP.""" ) + if model.graph.node[0].op_type == "StreamingFIFO": + firstfifo = getCustomOp(model.graph.node[0]) + if firstfifo.get_nodeattr("impl_style") == "vivado": + warnings.warn( + """First FIFO has impl_style=vivado, which may cause + simulation glitches (e.g. dropping the first input sample + after reset).""" + ) for node in model.graph.node: # ensure that all nodes are fpgadataflow, and that IPs are generated assert is_fpgadataflow_node( From aad29ebf3a153a8c95a5af0376088d00a69b5ee0 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 1 Feb 2023 10:21:19 +0000 Subject: [PATCH 347/628] [Tests] Ensure that i/o FIFOs are created for dynamic swg tests --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index e8807fd24f..1c7a284ae0 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -287,7 +287,7 @@ def test_fpgadataflow_conv_dynamic(cfg): getCustomOp(comp_node).set_nodeattr("SIMD", 4) getCustomOp(comp_node).set_nodeattr("PE", 4) model = model.transform(InsertDWC()) - model = model.transform(InsertFIFO()) + model = model.transform(InsertFIFO(create_shallow_fifos=True)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveReadableTensorNames()) model = model.transform(PrepareIP("xc7z020clg400-1", 5)) From 38e9d5afd096004e9b3b1494d02641cb76d60b7a Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 1 Feb 2023 11:21:23 +0100 Subject: [PATCH 348/628] [FIFO] use impl_style=hls for first&last FIFOs to avoid glitch --- .../fpgadataflow/insert_fifo.py | 20 ++++++------------- .../fpgadataflow/set_fifo_depths.py | 7 ++++++- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index 50da9cdf16..bfeee95e9b 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -209,13 +209,9 @@ def apply(self, model): graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) - if ( - self.max_qsrl_depth is None - or fifo_depth <= self.max_qsrl_depth - ): - impl_style = "rtl" - else: - impl_style = "vivado" + # only use rtl-style FIFOs to avoid simulation bug + # (top-level IOs should not have impl_style=vivado) + impl_style = "rtl" fifo_node = oh.make_node( "StreamingFIFO", @@ -271,13 +267,9 @@ def apply(self, model): graph.value_info.append(fifo_input_tensor) model.set_tensor_datatype(fifo_input_tensor.name, dtype) - if ( - self.max_qsrl_depth is None - or fifo_depth <= self.max_qsrl_depth - ): - impl_style = "rtl" - else: - impl_style = "vivado" + # only use rtl-style FIFOs to avoid simulation bug + # (top-level IOs should not have impl_style=vivado) + impl_style = "rtl" fifo_node = oh.make_node( "StreamingFIFO", diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 2619557edf..35e7b9e6c9 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -412,8 +412,13 @@ def apply(self, model): node_inst = getCustomOp(node) node_inst.set_nodeattr("depth", depth) node_inst.set_nodeattr("depth_monitor", 0) + # exception for top-level IO FIFOs which cause a bug in simulation + # (top-level IOs should not have impl_style=vivado) + toplevel_in = node.input[0] in [x.name for x in model.graph.input] + toplevel_out = node.output[0] in [x.name for x in model.graph.output] + toplevel_style_exception = toplevel_in or toplevel_out # Set FIFO implementation/ram styles - if depth > self.max_qsrl_depth: + if (depth > self.max_qsrl_depth) and (not toplevel_style_exception): node_inst.set_nodeattr("impl_style", "vivado") node_inst.set_nodeattr("ram_style", self.vivado_ram_style) else: From d19255d81892dea491b990e17aa5237bd1b82087 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 1 Feb 2023 11:32:17 +0100 Subject: [PATCH 349/628] [Build] calculate stable_throughput metric as part of step_measure_rtlsim_performance --- src/finn/builder/build_dataflow_steps.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index b0f7b6ec6c..fb4d60c1eb 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -667,7 +667,6 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi ) rtlsim_bs = int(cfg.rtlsim_batch_size) if force_python_rtlsim: - # run with single input to get latency orig_rtlsim_trace_depth = get_rtlsim_trace_depth() assert rtlsim_bs > 0, "rtlsim batch size must be >0" if cfg.verify_save_rtlsim_waveforms: @@ -680,9 +679,11 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi rtlsim_model.set_metadata_prop( "extra_verilator_args", str(["-CFLAGS", "-O3"]) ) + # run with single input to get latency + rtlsim_latency_dict = throughput_test_rtlsim(rtlsim_model, 1) + # run with batch to get stable-state throughput rtlsim_perf_dict = throughput_test_rtlsim(rtlsim_model, rtlsim_bs) - rtlsim_latency = rtlsim_perf_dict["cycles"] - rtlsim_perf_dict["latency_cycles"] = rtlsim_latency + rtlsim_perf_dict["latency_cycles"] = rtlsim_latency_dict["cycles"] else: rtlsim_perf_dict = verilator_fifosim(model, rtlsim_bs) # keep keys consistent between the Python and C++-styles @@ -696,6 +697,19 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi for (key, val) in rtlsim_perf_dict.items(): if "max_count" in key: del rtlsim_perf_dict[key] + # estimate stable-state throughput based on latency+throughput + if rtlsim_bs == 1: + rtlsim_perf_dict["stable_throughput[images/s]"] = rtlsim_perf_dict[ + "throughput[images/s]" + ] + else: + total_cycles = rtlsim_perf_dict["cycles"] + latency_cycles = rtlsim_perf_dict["latency_cycles"] + stablestate_cycles = total_cycles - latency_cycles + clk_ns = float(model.get_metadata_prop("clk_ns")) + fclk_mhz = 1 / (clk_ns * 0.001) + runtime_s = (stablestate_cycles * clk_ns) * (10**-9) + rtlsim_perf_dict["stable_throughput[images/s]"] = rtlsim_bs / runtime_s with open(report_dir + "/rtlsim_performance.json", "w") as f: json.dump(rtlsim_perf_dict, f, indent=2) From a1519ef889d2c7df6b25567d9a647a2ae7f426f7 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 1 Feb 2023 11:32:48 +0100 Subject: [PATCH 350/628] [Test] add cnv testcase as part for FIFO sizing test --- tests/fpgadataflow/test_fifosizing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index f4f2b8dbff..9399fbe394 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -55,7 +55,7 @@ def fetch_test_model(topology, wbits=2, abits=2): @pytest.mark.parametrize( "method", ["largefifo_rtlsim_python", "largefifo_rtlsim_cpp", "characterize"] ) -@pytest.mark.parametrize("topology", ["tfc"]) +@pytest.mark.parametrize("topology", ["tfc", "cnv"]) def test_fifosizing_linear(method, topology): force_python_rtlsim = "python" in method method_key = "largefifo_rtlsim" if "largefifo_rtlsim" in method else "characterize" @@ -68,7 +68,7 @@ def test_fifosizing_linear(method, topology): force_python_rtlsim=force_python_rtlsim, synth_clk_period_ns=10.0, board="Pynq-Z1", - rtlsim_batch_size=100, + rtlsim_batch_size=100 if topology == "tfc" else 2, shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, @@ -83,7 +83,7 @@ def test_fifosizing_linear(method, topology): with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: sim_data = json.load(f) assert ( - float(sim_data["throughput[images/s]"]) + float(sim_data["stable_throughput[images/s]"]) / float(est_data["estimated_throughput_fps"]) > 0.9 ) From 554e6062969a7c73fdc7fea0c102343164f37565 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Wed, 1 Feb 2023 14:39:58 +0000 Subject: [PATCH 351/628] [deps]: updated commit hash QONNX --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index e370149f3f..7078b284a9 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="e8ce71cedac960ab340f18a81910d9173997cec5" +QONNX_COMMIT="ce321742d98f23909a890ed680a9c99640d7aaab" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From 423c32aaf42bd1bc050675ff5e7096892cd30ab1 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 1 Feb 2023 15:41:54 +0100 Subject: [PATCH 352/628] [DWC] always use hls mode during insertion for better compat --- src/finn/transformation/fpgadataflow/insert_dwc.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py index 632d1f813b..cff8b60267 100644 --- a/src/finn/transformation/fpgadataflow/insert_dwc.py +++ b/src/finn/transformation/fpgadataflow/insert_dwc.py @@ -81,15 +81,11 @@ def apply(self, model): dwc_in_width = n0.get_outstream_width() # determine dwc outwidth dwc_out_width = n1.get_instream_width() - larger_width = max(dwc_in_width, dwc_out_width) - smaller_width = min(dwc_in_width, dwc_out_width) - both_8bit_aligned = (larger_width % 8 == 0) and ( - smaller_width % 8 == 0 - ) - if both_8bit_aligned: - impl_style = "vivado" - else: - impl_style = "hls" + # use hls mode by default since it supports more configs + # vivado mode can be manually enabled by user, but does not + # support e.g. node-by-node rtlsim neded for + # characterization-based FIFO sizing + impl_style = "hls" # determine shape for dwc dwc_shape = n0.get_normal_output_shape() From 510ee6ba6a76f4327e0387307b2e9fdc1d8924b9 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 1 Feb 2023 14:51:35 +0000 Subject: [PATCH 353/628] [Docs] Update links for getting started --- docs/finn/getting_started.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 40425c119f..69d29e5707 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -20,7 +20,7 @@ How do I use FINN? ================== We strongly recommend that you first watch one of the pre-recorded `FINN tutorial `_ -videos, then follow the Jupyter notebook tutorials for `training and deploying an MLP for network intrusion detection `_ . +videos, then follow the Jupyter notebook tutorials for `training and deploying an MLP for network intrusion detection `_ . You may also want to check out the other :ref:`tutorials`, and the `FINN examples repository `_ . Our aim in FINN is *not* to accelerate common off-the-shelf neural networks, but instead provide you with a set of tools @@ -28,19 +28,19 @@ to train *customized* networks and create highly-efficient FPGA implementations In general, the approach for using the FINN framework is as follows: 1. Train your own quantized neural network (QNN) in `Brevitas `_. We have some `guidelines `_ on quantization-aware training (QAT). -2. Export to FINN-ONNX by following `this tutorial `_ . -3. Use FINN's ``build_dataflow`` system on the exported model by following this `tutorial `_ +2. Export to FINN-ONNX by following `this tutorial `_ . +3. Use FINN's ``build_dataflow`` system on the exported model by following this `tutorial `_ 4. Adjust your QNN topology, quantization settings and ``build_dataflow`` configuration to get the desired results. Please note that the framework is still under development, and how well this works will depend on how similar your custom network is to the examples we provide. If there are substantial differences, you will most likely have to write your own Python scripts that call the appropriate FINN compiler functions that process your design correctly, or adding new functions (including -Vivado HLS layers) +Vitis HLS layers) as required. -The `advanced FINN tutorials `_ can be useful here. +The `advanced FINN tutorials `_ can be useful here. For custom networks, we recommend making a copy of the `BNN-PYNQ end-to-end -Jupyter notebook tutorials `_ as a starting point, visualizing the model at intermediate +Jupyter notebook tutorials `_ as a starting point, visualizing the model at intermediate steps and adding calls to new transformations as needed. Once you have a working flow, you can implement a command line entry for this by using the "advanced mode" described in the :ref:`command_line` section. @@ -50,7 +50,8 @@ Running FINN in Docker FINN runs inside a Docker container, it comes with a script to easily build and launch the container. If you are not familiar with Docker, there are many excellent `online resources `_ to get started. You may want to review the :ref:`General FINN Docker tips` and :ref:`Environment variables` as well. If you want to use prebuilt images, read :ref:`Using a prebuilt image`. -The ``run-docker.sh`` script that can be launched in the following modes: + +The above mentioned script to build and launch the FINN docker container is called `run-docker.sh `_ . It can be launched in the following modes: Launch interactive shell ************************ @@ -214,7 +215,7 @@ We also recommend running the FINN compiler on a system with sufficiently strong hardware: * **RAM.** Depending on your target FPGA platform, your system must have sufficient RAM to be - able to run Vivado/Vitis synthesis for that part. See `this page `_ + able to run Vivado/Vitis synthesis for that part. See `this page `_ for more information. For targeting Zynq and Zynq UltraScale+ parts, at least 8 GB is recommended. Larger parts may require up to 16 GB. For targeting Alveo parts with Vitis, at least 64 GB RAM is recommended. From 431d74c7fe2a2a96047180f1ccad777ffcee2f33 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 1 Feb 2023 18:13:21 +0000 Subject: [PATCH 354/628] [Builder] Move extraction of rtlsim depth --- src/finn/builder/build_dataflow_steps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index b0f7b6ec6c..2ee898bc7d 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -666,9 +666,9 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi + "in FINN C++ verilator driver, falling back to Python" ) rtlsim_bs = int(cfg.rtlsim_batch_size) + orig_rtlsim_trace_depth = get_rtlsim_trace_depth() if force_python_rtlsim: # run with single input to get latency - orig_rtlsim_trace_depth = get_rtlsim_trace_depth() assert rtlsim_bs > 0, "rtlsim batch size must be >0" if cfg.verify_save_rtlsim_waveforms: # set depth to 3 for layer-by-layer visibility From b9d5fbe61e4a388311bd39e2d40f9f42cb9de699 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 2 Feb 2023 11:30:41 +0000 Subject: [PATCH 355/628] [Docs] Remove finn-base dependency for docs build --- setup.cfg | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index a1d0fef6cb..f38fe11ad2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,9 +72,8 @@ exclude = # Add here additional requirements for extra features, to install with: # `pip install FINN[PDF]` like: # PDF = ReportLab; RXP -# finn-base is needed to build the full set of docs +# qonnx is needed to build the full set of docs docs = - finn-base==0.0.3 docutils==0.17.1 dataclasses-json==0.5.7 gspread==3.6.0 From 007cb4320fdaaddd76e0b340b5818e9af88396ed Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 2 Feb 2023 11:48:43 +0000 Subject: [PATCH 356/628] [Docs] Update python version in rtd yaml --- .readthedocs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 3601fcdccf..478957be11 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -35,7 +35,7 @@ sphinx: configuration: docs/finn/conf.py python: - version: 3.7 + version: 3.8 install: - method: pip path: . From 8a2a9b558f8c4a0705c0ce300bf69b605861d7ae Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 3 Feb 2023 18:44:01 +0100 Subject: [PATCH 357/628] Fix top module setting in CreateStitchedIP --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 8e2c69bad4..d1cb3c4af9 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -412,7 +412,7 @@ def apply(self, model): wrapper_filename = "%s/hdl/%s_wrapper.v" % (bd_base, block_name) tcl.append("add_files -norecurse %s" % wrapper_filename) model.set_metadata_prop("wrapper_filename", wrapper_filename) - tcl.append("set_property top finn_design_wrapper [current_fileset]") + tcl.append("set_property top %s_wrapper [current_fileset]" % block_name) # synthesize to DCP and export stub, DCP and constraints if self.vitis: tcl.append( From 146cd2b80dacfcb41d6c9d01f62c96c9f9b5c719 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Tue, 7 Feb 2023 10:45:36 +0300 Subject: [PATCH 358/628] [Lint] bump isort version to 5.12.0 --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5a7f70f8f6..126a4ac4b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: args: ['--fix=no'] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort From c3d4f1520924cb223ca9f9b370dc7a1cf6e7b57d Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 7 Feb 2023 15:36:40 +0000 Subject: [PATCH 359/628] [Notebooks] General updates of Jupyter notebooks --- notebooks/advanced/0_custom_analysis_pass.ipynb | 2 +- .../advanced/1_custom_transformation_pass.ipynb | 2 +- notebooks/advanced/2_custom_op.ipynb | 2 +- notebooks/basics/0_how_to_work_with_onnx.ipynb | 2 +- notebooks/basics/1_brevitas_network_import.ipynb | 2 +- .../bnn-pynq/cnv_end2end_example.ipynb | 2 +- .../bnn-pynq/tfc_end2end_example.ipynb | 2 +- .../bnn-pynq/tfc_end2end_verification.ipynb | 14 +++++++++++--- .../cybersecurity/1-train-mlp-with-brevitas.ipynb | 2 +- .../2-import-into-finn-and-verify.ipynb | 2 +- .../3-build-accelerator-with-finn.ipynb | 2 +- 11 files changed, 21 insertions(+), 13 deletions(-) diff --git a/notebooks/advanced/0_custom_analysis_pass.ipynb b/notebooks/advanced/0_custom_analysis_pass.ipynb index a4ad32ed7f..f8444520c3 100644 --- a/notebooks/advanced/0_custom_analysis_pass.ipynb +++ b/notebooks/advanced/0_custom_analysis_pass.ipynb @@ -137,7 +137,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/advanced/1_custom_transformation_pass.ipynb b/notebooks/advanced/1_custom_transformation_pass.ipynb index e40a534af5..391e852a71 100644 --- a/notebooks/advanced/1_custom_transformation_pass.ipynb +++ b/notebooks/advanced/1_custom_transformation_pass.ipynb @@ -233,7 +233,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/advanced/2_custom_op.ipynb b/notebooks/advanced/2_custom_op.ipynb index 051a406708..1ef60fd11c 100644 --- a/notebooks/advanced/2_custom_op.ipynb +++ b/notebooks/advanced/2_custom_op.ipynb @@ -658,7 +658,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/basics/0_how_to_work_with_onnx.ipynb b/notebooks/basics/0_how_to_work_with_onnx.ipynb index b6a5a04815..ed36aa1095 100644 --- a/notebooks/basics/0_how_to_work_with_onnx.ipynb +++ b/notebooks/basics/0_how_to_work_with_onnx.ipynb @@ -599,7 +599,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/basics/1_brevitas_network_import.ipynb b/notebooks/basics/1_brevitas_network_import.ipynb index 5fb29754dc..a884e90d75 100644 --- a/notebooks/basics/1_brevitas_network_import.ipynb +++ b/notebooks/basics/1_brevitas_network_import.ipynb @@ -297,7 +297,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 28155d6f3e..c46ae25322 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -643,7 +643,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index c4fc92b97c..851b8998d7 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -1069,7 +1069,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb index 813127197e..c925dab026 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb @@ -61,7 +61,7 @@ "fc = get_test_model_trained(\"TFC\", 1, 1)\n", "raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n", "input_tensor = onnx.load_tensor_from_string(raw_i)\n", - "input_brevitas = torch.from_numpy(nph.to_array(input_tensor)).float()\n", + "input_brevitas = torch.from_numpy(nph.to_array(input_tensor).copy()).float()\n", "output_golden = fc.forward(input_brevitas).detach().numpy()\n", "output_golden" ] @@ -383,7 +383,15 @@ "\n", "child_model = ModelWrapper(build_dir + \"/tfc_w1_a1_dataflow_child.onnx\")\n", "child_model = child_model.transform(InsertDWC())\n", - "child_model = child_model.transform(InsertFIFO())\n", + "\n", + "# set all impl_styles of the DWCs to hls to enable emulation\n", + "dwc_nodes = child_model.get_nodes_by_op_type(\"StreamingDataWidthConverter_Batch\")\n", + "for dwc in dwc_nodes:\n", + " dwc_inst = getCustomOp(dwc)\n", + " dwc_inst.set_nodeattr(\"impl_style\", \"hls\")\n", + " \n", + "child_model = child_model.transform(InsertFIFO(create_shallow_fifos=True))\n", + "child_model.save(build_dir + \"/test.onnx\");\n", "child_model = child_model.transform(GiveUniqueNodeNames())\n", "child_model = child_model.transform(PrepareIP(test_fpga_part, target_clk_ns))\n", "child_model = child_model.transform(HLSSynthIP())\n", @@ -431,7 +439,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 5625a6f1c2..3d77586258 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -741,7 +741,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index 370312c77e..e4848a1f40 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -381,7 +381,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index 33adb68dc8..a18cafd604 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -624,7 +624,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, From c1233860b952a44f71fdecaa4d343362f15440db Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 7 Feb 2023 18:16:21 +0000 Subject: [PATCH 360/628] [Notebooks] Change board execution section in tfc notebook --- .../bnn-pynq/tfc_end2end_example.ipynb | 191 +++++------------- 1 file changed, 54 insertions(+), 137 deletions(-) diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index 851b8998d7..e6fbc7f13a 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -798,23 +798,21 @@ "source": [ "## 4. PYNQ deployment \n", "\n", - "* [Deployment and Remote Execution](#deploy)\n", + "* [Deployment](#deploy)\n", "* [Validation on PYNQ Board](#validation)\n", "* [Throughput Test on PYNQ Board](#throughput)\n", "\n", "\n", - "We are almost done preparing our hardware design. We'll now put it in a form suitable for use as a PYNQ overlay, synthesize and deploy it." + "The bitfile and generated driver will be copied together with some necessary files for execution into a deployment folder which then can be used to run the network on the PYNQ board." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Deployment and Remote Execution \n", + "### Deployment \n", "\n", - "We'll now use the `DeployToPYNQ` transformation to create a deployment folder with the bitfile and driver file(s), and copy that to the PYNQ board. You can change the default IP address, username, password and target folder for the PYNQ below.\n", - "\n", - "**Make sure you've [set up the SSH keys for your PYNQ board](https://finn-dev.readthedocs.io/en/latest/getting_started.html#pynq-board-first-time-setup) before executing this step.**" + "We'll now create a deployment folder with the bitfile and driver file(s), we zip it and afterwards it can be copied to the PYNQ board for execution and validation." ] }, { @@ -823,74 +821,33 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", + "from shutil import copy\n", + "from distutils.dir_util import copy_tree\n", "\n", - "# set up the following values according to your own environment\n", - "# FINN will use ssh to deploy and run the generated accelerator\n", - "ip = \"192.168.2.99\"\n", - "username = os.getenv(\"PYNQ_USERNAME\", \"xilinx\")\n", - "password = os.getenv(\"PYNQ_PASSWORD\", \"xilinx\")\n", - "port = os.getenv(\"PYNQ_PORT\", 22)\n", - "target_dir = os.getenv(\"PYNQ_TARGET_DIR\", \"/home/xilinx/finn_tfc_end2end_example\")\n", - "# set up ssh options to only allow publickey authentication\n", - "options = \"-o PreferredAuthentications=publickey -o PasswordAuthentication=no\"\n", + "# create directory for deployment files\n", + "deployment_dir = make_build_dir(prefix=\"pynq_deployment_\")\n", + "model.set_metadata_prop(\"pynq_deployment_dir\", deployment_dir)\n", "\n", - "# test access to PYNQ board\n", - "! ssh {options} {username}@{ip} -p {port} cat /var/run/motd.dynamic" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ\n", + "# get and copy necessary files\n", + "# .bit and .hwh file\n", + "bitfile = model.get_metadata_prop(\"bitfile\")\n", + "hwh_file = model.get_metadata_prop(\"hw_handoff\")\n", + "deploy_files = [bitfile, hwh_file]\n", "\n", - "model = model.transform(DeployToPYNQ(ip, port, username, password, target_dir))\n", - "model.save(build_dir + \"/tfc_w1_a1_pynq_deploy.onnx\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's verify that the remote access credentials is saved in the model metadata, and that the deployment folder has been successfully copied to the board:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.model.metadata_props" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "target_dir_pynq = target_dir + \"/\" + model.get_metadata_prop(\"pynq_deployment_dir\").split(\"/\")[-1]\n", - "target_dir_pynq" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! ssh {options} {username}@{ip} -p {port} 'ls -l {target_dir_pynq}'" + "for dfile in deploy_files:\n", + " if dfile is not None:\n", + " copy(dfile, deployment_dir)\n", + "\n", + "# driver.py and python libraries\n", + "pynq_driver_dir = model.get_metadata_prop(\"pynq_driver_dir\")\n", + "copy_tree(pynq_driver_dir, deployment_dir)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We only have two more steps to be able to remotely execute the deployed bitfile with some test data from the MNIST dataset. Let's load up some test data that comes bundled with FINN." + "Next to these files, we will also need an example numpy array to test the network on the PYNQ board. You may recall that one \"reshape\" node was left out of the StreamingDataflowPartition. We'll do that manually with a numpy function call when passing in the input, but everything else in the network ended up inside the StreamingDataflowPartition so that's all we need to do. The example numpy array can then be saved as .npy file. " ] }, { @@ -918,14 +875,17 @@ "iname = model.graph.input[0].name\n", "oname = parent_model.graph.output[0].name\n", "ishape = model.get_tensor_shape(iname)\n", - "print(\"Expected network input shape is \" + str(ishape))" + "print(\"Expected network input shape is \" + str(ishape))\n", + "np.save(deployment_dir + \"/input.npy\", x.reshape(ishape))" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "Finally, we can call `execute_onnx` on the graph, which will internally call remote execution with the bitfile, grab the results and return a numpy array. You may recall that one \"reshape\" node was left out of the StreamingDataflowPartition. We'll do that manually with a numpy function call when passing in the input, but everything else in the network ended up inside the StreamingDataflowPartition so that's all we need to do." + "! ls {deployment_dir}" ] }, { @@ -934,27 +894,34 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "from finn.core.onnx_exec import execute_onnx\n", - "\n", - "input_dict = {iname: x.reshape(ishape)}\n", - "ret = execute_onnx(model, input_dict)" + "from shutil import make_archive\n", + "make_archive('deploy-on-pynq-tfc', 'zip', deployment_dir)" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can now download the created zipfile (**File -> Open**, mark the checkbox next to the `deploy-on-pynq-tfc.zip` and select Download from the toolbar), then copy it to your PYNQ board (for instance via `scp` or `rsync`). Then, run the following commands **on the PYNQ board** to extract the archive and run the execution:" + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "ret[oname]" + "```shell\n", + "unzip deploy-on-pynq-tfc.zip -d finn-tfc-demo\n", + "cd finn-tfc-demo\n", + "sudo python3.6 -m pip install bitstring\n", + "sudo python3.6 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", + "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We see that the network correctly predicts this as a digit 2." + "The output will be saved on the PYNQ board as `output.npy` and can be copied to the host and opened with `np.load()`." ] }, { @@ -963,45 +930,25 @@ "source": [ "### Validating the Accuracy on a PYNQ Board \n", "\n", - "All the command line prompts here are meant to be executed with `sudo` on the PYNQ board, so we'll use a workaround (`echo password | sudo -S command`) to get that working from this notebook running on the host computer.\n", - "\n", "**Ensure that your PYNQ board has a working internet connecting for the next steps, since there is some downloading involved.**\n", "\n", "To validate the accuracy, we first need to install the [`dataset-loading`](https://github.com/fbcotter/dataset_loading) Python package to the PYNQ board. This will give us a convenient way of downloading and accessing the MNIST dataset.\n", "\n", "\n", - "Command to execute on PYNQ:\n", + "Command to execute on PYNQ board:\n", "\n", "```sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading```" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! ssh {options} -t {username}@{ip} -p {port} 'echo {password} | sudo -S pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading'" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can now use the `validate.py` script that was generated together with the driver to measure top-1 accuracy on the MNIST dataset.\n", "\n", - "Command to execute on PYNQ:\n", + "Command to execute on PYNQ board:\n", "\n", - "`python3.6 validate.py --dataset mnist --batchsize 1000`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! ssh {options} -t {username}@{ip} -p {port} 'cd {target_dir_pynq}; echo {password} | sudo -S python3.6 validate.py --dataset mnist --batchsize 1000'" + "`sudo python3.6 validate.py --dataset mnist --batchsize 1000`" ] }, { @@ -1016,54 +963,24 @@ "metadata": {}, "source": [ "### Throughput Test on PYNQ Board \n", - "In addition to the functional verification, FINN also offers the possibility to measure the network performance directly on the PYNQ board. This can be done using the core function `throughput_test`. In the next section we import the function and execute it.\n", - "First we extract the `remote_exec_model` again and pass it to the function. The function returns the metrics of the network as dictionary. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from finn.core.throughput_test import throughput_test_remote\n", - "\n", - "model = ModelWrapper(build_dir + \"/tfc_w1_a1_pynq_deploy.onnx\")\n", - "res = throughput_test_remote(model, 10000)\n", - "print(\"Network metrics:\")\n", - "for key in res:\n", - " print(str(key) + \": \" + str(res[key]))" + "In addition to the functional verification, FINN also offers the possibility to measure the network performance directly on the PYNQ board. This can be done setting the `exec_mode` to `throughput_test`. \n", + "Command to execute on PYNQ board:" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Together with the values for folding we can evaluate the performance of our accelerator. Each layer has a total folding factor of 64 and because the network is fully pipelined, it follows: `II = 64`. II is the initiation interval and indicates how many cycles are needed for one input to be processed. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "II = 64\n", - "# frequency in MHz\n", - "f_MHz = 100\n", - "# expected throughput in MFPS\n", - "expected_throughput = f_MHz / II\n", - "# measured throughput (FPS) from throughput test, converted to MFPS\n", - "measured_throughput = res[\"throughput[images/s]\"] * 0.000001\n", - "# peformance\n", - "print(\"We reach approximately \" + str(round((measured_throughput / expected_throughput)*100)) + \"% of the ideal performance.\")" + "```shell\n", + "sudo python3.6 driver.py --exec_mode=throughput_test --batchsize=1000 --bitfile=resizer.bit\n", + "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The measured values were recorded with a batch size of 10000 and at a frequency of 100 MHz. We will be improving the efficiency of the generated accelerator examples in the coming FINN releases." + "The network metrics from the throughput test are saved in a file called `nw_metrics.txt` on the PYNQ board. Which can be investigated after running the command above." ] } ], From 427c224394d57fbb14bf77ded2c07fe16ebb7c70 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 7 Feb 2023 19:36:39 +0000 Subject: [PATCH 361/628] [Notebooks] Change board execution section in cnv notebook --- .../bnn-pynq/cnv_end2end_example.ipynb | 131 +++++++----------- 1 file changed, 51 insertions(+), 80 deletions(-) diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index c46ae25322..32f1c13030 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -462,11 +462,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 5. Deployment and Remote Execution\n", + "## 5. Deployment and Execution\n", "\n", - "Now that we're done with the hardware generation, we can copy the necessary files onto our PYNQ board.\n", - "\n", - "**Make sure you've [set up the SSH keys for your PYNQ board](https://finn-dev.readthedocs.io/en/latest/getting_started.html#pynq-board-first-time-setup) before executing this step.**" + "The bitfile and generated driver files(s) will be copied into a deployment folder which then can be used to run the network on the PYNQ board." ] }, { @@ -475,33 +473,33 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", + "from shutil import copy\n", + "from distutils.dir_util import copy_tree\n", + "\n", + "# create directory for deployment files\n", + "deployment_dir = make_build_dir(prefix=\"pynq_deployment_\")\n", + "model.set_metadata_prop(\"pynq_deployment_dir\", deployment_dir)\n", "\n", - "# set up the following values according to your own environment\n", - "# FINN will use ssh to deploy and run the generated accelerator\n", - "ip = \"192.168.2.99\"\n", - "username = os.getenv(\"PYNQ_USERNAME\", \"xilinx\")\n", - "password = os.getenv(\"PYNQ_PASSWORD\", \"xilinx\")\n", - "port = os.getenv(\"PYNQ_PORT\", 22)\n", - "target_dir = os.getenv(\"PYNQ_TARGET_DIR\", \"/home/xilinx/finn_cnv_end2end_example\")\n", - "# set up ssh options to only allow publickey authentication\n", - "options = \"-o PreferredAuthentications=publickey -o PasswordAuthentication=no\"\n", + "# get and copy necessary files\n", + "# .bit and .hwh file\n", + "bitfile = model.get_metadata_prop(\"bitfile\")\n", + "hwh_file = model.get_metadata_prop(\"hw_handoff\")\n", + "deploy_files = [bitfile, hwh_file]\n", "\n", - "# test access to PYNQ board\n", - "! ssh {options} {username}@{ip} -p {port} cat /var/run/motd.dynamic" + "for dfile in deploy_files:\n", + " if dfile is not None:\n", + " copy(dfile, deployment_dir)\n", + "\n", + "# driver.py and python libraries\n", + "pynq_driver_dir = model.get_metadata_prop(\"pynq_driver_dir\")\n", + "copy_tree(pynq_driver_dir, deployment_dir)" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ\n", - "\n", - "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_synth.onnx\")\n", - "model = model.transform(DeployToPYNQ(ip, port, username, password, target_dir))\n", - "model.save(build_dir + \"/end2end_cnv_w1a1_pynq_deploy.onnx\")" + "Next to these files, we will also need an example numpy array to test the network on the PYNQ board. (*and before you ask, that's supposed to be a cat (CIFAR-10 class number 3)*) Recall that we partitioned our original network into a parent graph that contained the non-synthesizable nodes and a child graph that contained the bulk of the network, which we turned into a bitfile. The only operator left outside the FPGA partition was a `Transpose` to convert NCHW images into NHWC ones. Thus, we can skip the execution in the parent as long as we ensure our image has the expected data layout. The example numpy array can then be saved as .npy file." ] }, { @@ -510,8 +508,14 @@ "metadata": {}, "outputs": [], "source": [ - "target_dir_pynq = target_dir + \"/\" + model.get_metadata_prop(\"pynq_deployment_dir\").split(\"/\")[-1]\n", - "target_dir_pynq" + "import pkg_resources as pk\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "fn = pk.resource_filename(\"finn.qnn-data\", \"cifar10/cifar10-test-data-class3.npz\")\n", + "x = np.load(fn)[\"arr_0\"]\n", + "x = x.reshape(3, 32,32).transpose(1, 2, 0)\n", + "plt.imshow(x)" ] }, { @@ -520,14 +524,19 @@ "metadata": {}, "outputs": [], "source": [ - "! ssh {options} {username}@{ip} -p {port} 'ls -l {target_dir_pynq}'" + "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_pynq_deploy.onnx\")\n", + "iname = model.graph.input[0].name\n", + "ishape = model.get_tensor_shape(iname)\n", + "np.save(deployment_dir + \"/input.npy\", x.reshape(ishape))" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "We only have two more steps to be able to remotely execute the deployed bitfile with some test data from the CIFAR-10 dataset. Let's load up some test data that comes bundled with FINN -- *and before you ask, that's supposed to be a cat (CIFAR-10 class number 3)*." + "! ls {deployment_dir}" ] }, { @@ -536,54 +545,34 @@ "metadata": {}, "outputs": [], "source": [ - "import pkg_resources as pk\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "fn = pk.resource_filename(\"finn.qnn-data\", \"cifar10/cifar10-test-data-class3.npz\")\n", - "x = np.load(fn)[\"arr_0\"]\n", - "x = x.reshape(3, 32,32).transpose(1, 2, 0)\n", - "plt.imshow(x)" + "from shutil import make_archive\n", + "make_archive('deploy-on-pynq-cnv', 'zip', deployment_dir)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Recall that we partitioned our original network into a parent graph that contained the non-synthesizable nodes and a child graph that contained the bulk of the network, which we turned into a bitfile. The only operator left outside the FPGA partition was a `Transpose` to convert NCHW images into NHWC ones. Thus, we can skip the execution in the parent as long as we ensure our image has the expected data layout, which we have done above." + "You can now download the created zipfile (File -> Open, mark the checkbox next to the deploy-on-pynq-tfc.zip and select Download from the toolbar), then copy it to your PYNQ board (for instance via scp or rsync). Then, run the following commands on the PYNQ board to extract the archive and run the execution:" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from finn.core.onnx_exec import execute_onnx\n", - "\n", - "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_pynq_deploy.onnx\")\n", - "iname = model.graph.input[0].name\n", - "oname = model.graph.output[0].name\n", - "ishape = model.get_tensor_shape(iname)\n", - "input_dict = {iname: x.astype(np.float32).reshape(ishape)}\n", - "ret = execute_onnx(model, input_dict, True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "ret[oname]" + "```shell\n", + "unzip deploy-on-pynq-cnv.zip -d finn-cnv-demo\n", + "cd finn-cnv-demo\n", + "sudo python3.6 -m pip install bitstring\n", + "sudo python3.6 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", + "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We see that the network correctly predicts this as a class 3 (\"cat\"). " + "The output will be saved on the PYNQ board as `output.npy` and can be copied to the host and opened with `np.load()`." ] }, { @@ -592,7 +581,7 @@ "source": [ "### Validating the Accuracy on a PYNQ Board \n", "\n", - "All the command line prompts here are meant to be executed with `sudo` on the PYNQ board, so we'll use a workaround (`echo password | sudo -S command`) to get that working from this notebook running on the host computer.\n", + "All the command line prompts here are meant to be executed with `sudo` on the PYNQ board.\n", "\n", "**Ensure that your PYNQ board has a working internet connecting for the next steps, since some there is some downloading involved.**\n", "\n", @@ -601,16 +590,7 @@ "\n", "Command to execute on PYNQ:\n", "\n", - "```pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! ssh {options} -t {username}@{ip} -p {port} 'echo {password} | sudo -S pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading'" + "```sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading```" ] }, { @@ -624,15 +604,6 @@ "`python3.6 validate.py --dataset cifar10 --batchsize 1000`" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "! ssh {options} -t {username}@{ip} -p {port} 'cd {target_dir_pynq}; echo {password} | sudo -S python3.6 validate.py --dataset cifar10 --batchsize 1000'" - ] - }, { "cell_type": "markdown", "metadata": {}, From c24cd1850ad1f7dfc1b09bb2ab10c2c92acc40cd Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 8 Feb 2023 09:03:30 +0000 Subject: [PATCH 362/628] [Notebooks] Update text in advanced and basics nbs --- notebooks/advanced/2_custom_op.ipynb | 8 ++++---- notebooks/basics/0_how_to_work_with_onnx.ipynb | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/notebooks/advanced/2_custom_op.ipynb b/notebooks/advanced/2_custom_op.ipynb index 1ef60fd11c..636da64dd5 100644 --- a/notebooks/advanced/2_custom_op.ipynb +++ b/notebooks/advanced/2_custom_op.ipynb @@ -8,14 +8,14 @@ "\n", "Suppose that you want to introduce a new (custom) operation type into the FINN compiler. Custom operations in FINN are useful for a variety of things ranging from code generation to functional verification. This is achieved by creating a new Python module for your custom operation that fulfills certain interface specifications.\n", "\n", - "One thing to point out before we start is that **these custom operations are generic** and not really tied to e.g. Vivado HLS or few-bit quantization. As you will see in this notebook, it's possible to provide arbitrary Python/C/C++/... execution and code generation paths for custom nodes.\n", + "One thing to point out before we start is that **these custom operations are generic** and not really tied to e.g. Vitis HLS or few-bit quantization. As you will see in this notebook, it's possible to provide arbitrary Python/C/C++/... execution and code generation paths for custom nodes.\n", "\n", "## The CustomOp base class\n", "\n", "Subclasses of `CustomOp` provide a way of providing custom functionality for ONNX nodes in FINN.\n", "This is the base class for every custom op node used in the framework, so you must create subclasses of `CustomOp` to provide execution, code generation and other functionalities in FINN.\n", "\n", - "Let's start by looking at the `CustomOp` base class itself, which lives in the `finn-base` repository. You can view it [here](https://github.com/Xilinx/finn-base/blob/dev/src/finn/custom_op/base.py). Note that the `finn` Docker container already has `finn-base` set up as a dependency.\n", + "Let's start by looking at the `CustomOp` base class itself, which lives in the `qonnx` repository. You can view it [here](https://github.com/fastmachinelearning/qonnx/blob/main/src/qonnx/custom_op/base.py). Note that the `finn` Docker container already has `qonnx` set up as a dependency.\n", "\n", "Some points of importance:\n", "\n", @@ -23,7 +23,7 @@ "\n", "2. `CustomOp` subclasses need to implement the methods below (those not starting with underscore).\n", "\n", - "3. To be discoverable in the custom op register, `CustomOp` subclasses must set the `domain` field to the name of the Python module they appear in. For instance, to use the custom `Im2Col` op type from [here](https://github.com/Xilinx/finn-base/blob/dev/src/finn/custom_op/general/im2col.py), the ONNX node must use `domain=qonnx.custom_op.general` since its module is located at `finn/custom_op/general/im2col.py`." + "3. To be discoverable in the custom op register, `CustomOp` subclasses must set the `domain` field to the name of the Python module they appear in. For instance, to use the custom `Im2Col` op type from [here](https://github.com/fastmachinelearning/qonnx/blob/main/src/qonnx/custom_op/general/im2col.py), the ONNX node must use `domain=qonnx.custom_op.general` since its module is located at `qonnx/custom_op/general/im2col.py`." ] }, { @@ -130,7 +130,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To make sure our custom op is available, it needs to be registered. The best practice for this is to create a submodule under `finn.custom_op` which includes a `custom_op` dictionary that maps strings (op names) to classes (op implementations). Since we're in a Jupyter notebook we'll just hijack it at runtime like this:" + "To make sure our custom op is available, it needs to be registered. The best practice for this is to create a submodule under `qonnx.custom_op` which includes a `custom_op` dictionary that maps strings (op names) to classes (op implementations). Since we're in a Jupyter notebook we'll just hijack it at runtime like this:" ] }, { diff --git a/notebooks/basics/0_how_to_work_with_onnx.ipynb b/notebooks/basics/0_how_to_work_with_onnx.ipynb index ed36aa1095..35a83ea97b 100644 --- a/notebooks/basics/0_how_to_work_with_onnx.ipynb +++ b/notebooks/basics/0_how_to_work_with_onnx.ipynb @@ -24,7 +24,7 @@ "source": [ "### How to create a simple ONNX model\n", "\n", - "To explain how to create an ONNX model a simple example with mathematical operations is used. All nodes are from the [standard operations library of ONNX](https://github.com/onnx/onnx/blob/master/docs/Operators.md).\n", + "To explain how to create an ONNX model a simple example with mathematical operations is used. All nodes are from the [standard operations library of ONNX](https://github.com/onnx/onnx/blob/main/docs/Operators.md).\n", "\n", "First ONNX is imported, then the helper function can be used to make a node." ] @@ -305,7 +305,7 @@ "source": [ "### How to manipulate an ONNX model\n", "\n", - "In the model there are two successive adder nodes. An adder node in ONNX can only add two inputs, but there is also the [**sum**](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Sum) node, which can process more than two inputs. So it would be a reasonable change of the graph to combine the two successive adder nodes to one sum node." + "In the model there are two successive adder nodes. An adder node in ONNX can only add two inputs, but there is also the [**sum**](https://github.com/onnx/onnx/blob/main/docs/Operators.md#Sum) node, which can process more than two inputs. So it would be a reasonable change of the graph to combine the two successive adder nodes to one sum node." ] }, { From 0fdcc10e865d77b39408a357a41c5c9dce9a93ab Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 8 Feb 2023 12:47:12 +0000 Subject: [PATCH 363/628] [Notebooks] Update end2end notebooks --- fetch-repos.sh | 2 +- .../bnn-pynq/cnv_end2end_example.ipynb | 16 ++++++---- .../bnn-pynq/tfc_end2end_example.ipynb | 30 +++++++++++-------- .../bnn-pynq/tfc_end2end_verification.ipynb | 2 +- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 7078b284a9..5b060f5bc8 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="ce321742d98f23909a890ed680a9c99640d7aaab" +QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 32f1c13030..8ea6a35009 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -46,7 +46,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into 5 sections represented by a different color, each of it includes several flow steps. The flow starts in top left corner with Brevitas export (green section), followed by the preparation of the network (blue section) for the Vivado HLS synthesis and Vivado IPI stitching (orange section), and finally building a PYNQ overlay bitfile and testing it on a PYNQ board (yellow section).\n", + "The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into 5 sections represented by a different color, each of it includes several flow steps. The flow starts in top left corner with Brevitas export (green section), followed by the preparation of the network (blue section) for the Vitis HLS synthesis and Vivado IPI stitching (orange section), and finally building a PYNQ overlay bitfile and testing it on a PYNQ board (yellow section).\n", "There is an additional section for functional verification (red section) on the left side of the diagram, which we will not cover in this notebook. For details please take a look in the verification notebook which you can find [here](tfc_end2end_verification.ipynb)\n", "\n", "\n", @@ -199,7 +199,7 @@ "\n", "![](cnv-mp-fc.png)\n", "\n", - "Note how the convolution layer looks very similar to the fully connected one in terms of the matrix-vector-threshold unit (MVTU), but now the MVTU is preceded by a sliding window unit that produces the matrix from the input image. All of these building blocks, including the `MaxPool` layer you see in this figure, exist as templated Vivado HLS C++ functions in [finn-hlslib](https://github.com/Xilinx/finn-hlslib).\n", + "Note how the convolution layer looks very similar to the fully connected one in terms of the matrix-vector-threshold unit (MVTU), but now the MVTU is preceded by a sliding window unit that produces the matrix from the input image. All of these building blocks, including the `MaxPool` layer you see in this figure, exist as templated Vitis HLS C++ functions in [finn-hlslib](https://github.com/Xilinx/finn-hlslib).\n", "\n", "\n", "To target this kind of hardware architecture with our network we'll apply a convolution lowering transformation, in addition to streamlining. You may recall the *streamlining transformation* that we applied to the TFC-w1a1 network, which is a series of mathematical simplifications that allow us to get rid of floating point scaling operations by implementing few-bit activations as thresholding operations. \n", @@ -563,8 +563,8 @@ "```shell\n", "unzip deploy-on-pynq-cnv.zip -d finn-cnv-demo\n", "cd finn-cnv-demo\n", - "sudo python3.6 -m pip install bitstring\n", - "sudo python3.6 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", + "sudo python3 -m pip install bitstring\n", + "sudo python3 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", "```" ] }, @@ -590,7 +590,9 @@ "\n", "Command to execute on PYNQ:\n", "\n", - "```sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading```" + "```shell\n", + "sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading\n", + "```" ] }, { @@ -601,7 +603,9 @@ "\n", "Command to execute on PYNQ:\n", "\n", - "`python3.6 validate.py --dataset cifar10 --batchsize 1000`" + "```shell\n", + "sudo python3 validate.py --dataset cifar10 --batchsize 1000\n", + "```" ] }, { diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index e6fbc7f13a..7e9980cf2a 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -33,7 +33,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into 5 sections represented by a different color, each of it includes several flow steps. The flow starts in top left corner with Brevitas export (green section), followed by the preparation of the network (blue section) for the Vivado HLS synthesis and Vivado IPI stitching (orange section), and finally building a PYNQ overlay bitfile and testing it on a PYNQ board (yellow section).\n", + "The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into 5 sections represented by a different color, each of it includes several flow steps. The flow starts in top left corner with Brevitas export (green section), followed by the preparation of the network (blue section) for the Vitis HLS synthesis and Vivado IPI stitching (orange section), and finally building a PYNQ overlay bitfile and testing it on a PYNQ board (yellow section).\n", "There is an additional section for functional verification (red section) on the right side of the diagram, which we will not cover in this notebook. For details please take a look in the verification notebook which you can find [here](tfc_end2end_verification.ipynb)\n", "\n", "\n", @@ -161,7 +161,7 @@ "\n", "![](finn-hw-arch.png)\n", "\n", - "In practice, the compute arrays are instantiated by function calls to optimized Vivado HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library. As these function calls can only handle certain patterns/cases, we need to transform the network into an appropriate form so that we can replace network layers with these function calls, which is the goal of the network preparation process." + "In practice, the compute arrays are instantiated by function calls to optimized Vitis HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library. As these function calls can only handle certain patterns/cases, we need to transform the network into an appropriate form so that we can replace network layers with these function calls, which is the goal of the network preparation process." ] }, { @@ -248,7 +248,7 @@ "\n", "In FINN, we can bake some of these pre/postprocessing operatings into the graph, and in some cases these can be highly beneficial for performance by allowing our accelerator to directly consume raw data instead of going through CPU preprocessing. \n", "\n", - "We'll demonstrate this for our small image classification network as follows. Brevitas preprocesses BNN-PYNQ network inputs with `torchvision.transforms.ToTensor()` [prior to training](https://github.com/Xilinx/brevitas/blob/master/src/brevitas_examples/bnn_pynq/trainer.py#L104), which converts 8-bit RGB values into floats between 0 and 1 by dividing the input by 255. We can achieve the same effect in FINN by exporting a single-node ONNX graph for division by 255 (which already exists as `finn.util.pytorch.ToTensor` and merging this with our original model. Finally, we're going to mark our input tensor as 8-bit to let FINN know which level of precision to use." + "We'll demonstrate this for our small image classification network as follows. Brevitas preprocesses BNN-PYNQ network inputs with `torchvision.transforms.ToTensor()` [prior to training](https://github.com/Xilinx/brevitas/blob/master/src/brevitas_examples/bnn_pynq/trainer.py#L86), which converts 8-bit RGB values into floats between 0 and 1 by dividing the input by 255. We can achieve the same effect in FINN by exporting a single-node ONNX graph for division by 255 (which already exists as `finn.util.pytorch.ToTensor` and merging this with our original model. Finally, we're going to mark our input tensor as 8-bit to let FINN know which level of precision to use." ] }, { @@ -343,7 +343,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As can be seen, several transformations are involved in the streamlining transformation. There are move and collapse transformations. In the last step the operations are transformed into multithresholds. The involved transformations can be viewed in detail [here](https://github.com/Xilinx/finn/tree/master/src/finn/transformation/streamline). After each transformation, three of the tidy-up transformations (`GiveUniqueNodeNames`, `GiveReadableTensorNames` and `InferDataTypes`) are applied to the model.\n", + "As can be seen, several transformations are involved in the streamlining transformation. There are move and collapse transformations. In the last step the operations are transformed into multithresholds. The involved transformations can be viewed in detail [here](https://github.com/Xilinx/finn/tree/main/src/finn/transformation/streamline). After each transformation, three of the tidy-up transformations (`GiveUniqueNodeNames`, `GiveReadableTensorNames` and `InferDataTypes`) are applied to the model.\n", "\n", "After streamlining the network looks as follows:" ] @@ -525,7 +525,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can use the higher-level [HLSCustomOp](https://github.com/Xilinx/finn/blob/main/src/finn/custom_op/fpgadataflow/__init__.py) wrappers for this node. These wrappers provide easy access to specific properties of these nodes, such as the folding factors (PE and SIMD). Let's have a look at which node attributes are defined by the CustomOp wrapper, and adjust the SIMD and PE attributes." + "We can use the higher-level [HLSCustomOp](https://github.com/Xilinx/finn/blob/main/src/finn/custom_op/fpgadataflow/hlscustomop.py) wrappers for this node. These wrappers provide easy access to specific properties of these nodes, such as the folding factors (PE and SIMD). Let's have a look at which node attributes are defined by the CustomOp wrapper, and adjust the SIMD and PE attributes." ] }, { @@ -547,7 +547,7 @@ "metadata": {}, "source": [ "We can see that the PE and SIMD are listed as node attributes, as well as the depths of the FIFOs that will be inserted between consecutive layers, and all can be adjusted using `set_nodeattr` subject to certain constraints. There are also a lot of additional attributes that can be set for this node type.\n", - "**In this notebook we are setting the folding factors and FIFO depths manually, but in a future version we will support determining the folding factors given an FPGA resource budget according to the analytical model from the [FINN-R paper](https://arxiv.org/pdf/1809.04570).**" + "**In this notebook we are setting the folding factors and FIFO depths manually but it is possible to use FINN transformations for this ([SetFolding](https://github.com/Xilinx/finn/blob/main/src/finn/transformation/fpgadataflow/set_folding.py) and [InsertAndSetFIFODepths](https://github.com/Xilinx/finn/blob/main/src/finn/transformation/fpgadataflow/set_fifo_depths.py)).**" ] }, { @@ -609,7 +609,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This completes the network preparation and the network can be passed on to the next block *Vivado HLS and IPI*, which is described below." + "This completes the network preparation and the network can be passed on to the next block *Vitis HLS and IPI*, which is described below." ] }, { @@ -871,6 +871,8 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", + "\n", "model = ModelWrapper(build_dir + \"/tfc_w1_a1_pynq_deploy.onnx\")\n", "iname = model.graph.input[0].name\n", "oname = parent_model.graph.output[0].name\n", @@ -912,8 +914,8 @@ "```shell\n", "unzip deploy-on-pynq-tfc.zip -d finn-tfc-demo\n", "cd finn-tfc-demo\n", - "sudo python3.6 -m pip install bitstring\n", - "sudo python3.6 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", + "sudo python3 -m pip install bitstring\n", + "sudo python3 driver.py --exec_mode=execute --batchsize=1 --bitfile=resizer.bit --inputfile=input.npy\n", "```" ] }, @@ -937,7 +939,9 @@ "\n", "Command to execute on PYNQ board:\n", "\n", - "```sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading```" + "```shell\n", + "sudo pip3 install git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading\n", + "```" ] }, { @@ -948,7 +952,9 @@ "\n", "Command to execute on PYNQ board:\n", "\n", - "`sudo python3.6 validate.py --dataset mnist --batchsize 1000`" + "```shell\n", + "sudo python3 validate.py --dataset mnist --batchsize 1000\n", + "```" ] }, { @@ -972,7 +978,7 @@ "metadata": {}, "source": [ "```shell\n", - "sudo python3.6 driver.py --exec_mode=throughput_test --batchsize=1000 --bitfile=resizer.bit\n", + "sudo python3 driver.py --exec_mode=throughput_test --batchsize=1000 --bitfile=resizer.bit\n", "```" ] }, diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb index c925dab026..6c3b796509 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb @@ -72,7 +72,7 @@ "source": [ "## Simulation using Python \n", "\n", - "If an ONNX model consists of [standard ONNX](https://github.com/onnx/onnx/blob/master/docs/Operators.md) nodes and/or FINN custom operations that do not belong to the fpgadataflow (`backend` $\\neq$ `fpgadataflow`) this model can be checked for functionality using Python.\n", + "If an ONNX model consists of [standard ONNX](https://github.com/onnx/onnx/blob/main/docs/Operators.md) nodes and/or FINN custom operations that do not belong to the fpgadataflow (`backend` $\\neq$ `fpgadataflow`) this model can be checked for functionality using Python.\n", "\n", "To simulate a standard ONNX node [onnxruntime](https://github.com/microsoft/onnxruntime) is used. onnxruntime is an open source tool developed by Microsoft to run standard ONNX nodes. For the FINN custom op nodes execution, functions are defined. The following is an example of the execution function of a XNOR popcount node.\n" ] From 72d555ee1dbd99fc623233da85431a823b36c945 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 08:35:52 +0000 Subject: [PATCH 364/628] [Tutorial] Update README in tutorial --- tutorials/fpga_flow/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/fpga_flow/README.md b/tutorials/fpga_flow/README.md index 63ca6ac832..2aaad0423b 100644 --- a/tutorials/fpga_flow/README.md +++ b/tutorials/fpga_flow/README.md @@ -4,7 +4,7 @@ This example demonstrates how to bring a FINN compiled model into the Vivado FPG If you are new to the command-line flow, more information can be found [here](https://finn.readthedocs.io/en/latest/command_line.html). -This demo was created using Vivado 2020.1. +This demo was created using Vivado 2022.1. ## Compiling the Model in FINN @@ -26,7 +26,7 @@ Prior to running, insure the following prerequisites have been met: - Install FINN and prerequisites. The [Getting Started](https://finn.readthedocs.io/en/latest/getting_started.html#quickstart) section of the FINN documentation might be helpful for this. - Ensure you have the `FINN_XILINX_PATH` and `FINN_XILINX_VERSION` env variables set appropriately for your install. For example: > export FINN_XILINX_PATH=/opt/Xilinx -> export FINN_XILINX_VERSION=2020.1 +> export FINN_XILINX_VERSION=2022.1 - Set the env variable for your `finn` install top directory (where you cloned the FINN compiler repo): > export FINN_ROOT=/home/foo/finn @@ -112,7 +112,7 @@ testbench generators. There are any number of ways to bring the stitched IP into larger design. -FINN already packages the stitched IP block design as a standalone IP-XACT component, which you can find under `${FINN_ROOT}/tutorials/fpga_flow/output_tfc_w0a1_fpga/stitched_ip/ip`. You can add this to the list of IP repos and use it in your own Vivado designs. A good reference for this is [UG1119](https://www.xilinx.com/support/documentation/sw_manuals/xilinx2020_1/ug1119-vivado-creating-packaging-ip-tutorial.pdf) +FINN already packages the stitched IP block design as a standalone IP-XACT component, which you can find under `${FINN_ROOT}/tutorials/fpga_flow/output_tfc_w0a1_fpga/stitched_ip/ip`. You can add this to the list of IP repos and use it in your own Vivado designs. A good reference for this is [UG1119](https://www.xilinx.com/content/dam/xilinx/support/documents/sw_manuals/xilinx2022_1/ug1119-vivado-creating-packaging-ip-tutorial.pdf) Keep in mind that all of the User IP Repo's included in the Stitched IP project (from `$FINN_HOST_BUILD_DIR` which is normally located under `/tmp/finn_dev_`) need to also be brought in as IP Repo's to any project using the stitched IP. It would be prudent to copy those IP repos to an appropriate archive location. You should also set the `FINN_ROOT` environment variable to point to the compiler installation directory, as some of the build scripts will From 6f4f664464e23d79f9eb350758ff6899bf44cf80 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 09:02:02 +0000 Subject: [PATCH 365/628] [Docs] Update end2end flow description --- docs/finn/brevitas_export.rst | 2 +- docs/finn/end_to_end_flow.rst | 2 +- docs/finn/hw_build.rst | 11 +++-------- docs/finn/nw_prep.rst | 2 +- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/docs/finn/brevitas_export.rst b/docs/finn/brevitas_export.rst index 304aa30854..950b601f98 100644 --- a/docs/finn/brevitas_export.rst +++ b/docs/finn/brevitas_export.rst @@ -16,6 +16,6 @@ Two of the Brevitas-exported ONNX variants can be ingested by FINN: To work with either type of ONNX model, it is loaded into a :ref:`modelwrapper` provided by FINN. -At this stage we can already use the functional verification flow to simulate the model using Python, this is marked in the graphic with the dotted arrow. For more details please have look at :ref:`verification`. +At this stage we can already use the functional verification flow to simulate the model using Python. For more details please have look at :ref:`verification`. The model can now be further processed in FINN, the next flow step is :ref:`nw_prep`. diff --git a/docs/finn/end_to_end_flow.rst b/docs/finn/end_to_end_flow.rst index bc5c523071..0a022067c3 100644 --- a/docs/finn/end_to_end_flow.rst +++ b/docs/finn/end_to_end_flow.rst @@ -9,7 +9,7 @@ As you can see in the picture, FINN has a high modularity and has the property t :scale: 50% :align: center -The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into five sections, each of it includes several flow steps. The flow starts in top left corner with Brevitas export (green section), followed by the preparation of the network (blue section) for the Vivado HLS and Vivado IPI (orange section). There is also a section for testing and verification in software (red section) and the hardware generation and deployment on the PYNQ board (yellow section). +The white fields show the state of the network representation in the respective step. The colored fields represent the transformations that are applied to the network to achieve a certain result. The diagram is divided into five sections, each of it includes several flow steps. The flow starts in top left corner with Brevitas export, followed by the preparation of the network for the Vitis HLS and Vivado IPI. There is also a section for testing and verification in software (in the cloud on the right) and the hardware generation and deployment on the PYNQ board. This example flow is covered in the `end2end_example `_ Jupyter notebooks. For a more detailed overview about the different flow sections, please have a look at the corresponding pages: diff --git a/docs/finn/hw_build.rst b/docs/finn/hw_build.rst index 2a64b87943..a5c486935d 100644 --- a/docs/finn/hw_build.rst +++ b/docs/finn/hw_build.rst @@ -9,14 +9,14 @@ Hardware Build and Deployment :align: center A model where all layers have been converted to HLS layers can be processed by -FINN to build a bitfile and driver targeting a Zynq system or to generate a Vivado IP Integrator (IPI) +FINN to build a bitfile and driver targeting a Zynq or Alveo system or to generate a Vivado IP Integrator (IPI) design with AXI stream (FIFO) in-out interfaces, which can be integrated onto any Xilinx FPGA as part of a larger system. Hardware Build ============== -Internally, the hardware build for Zynq devices consists of the following steps: +Internally, the hardware build consists of the following steps: 1. Driver generation 2. DMA and DWC node insertion @@ -89,9 +89,4 @@ Deployment Deployment and Remote Execution ------------------------------- -The bitfile and the driver file(s) are copied to the PYNQ board and can be executed there using the *onnx_exec* function with the right *exec_mode* settings. For details please have a look at transformation :py:mod:`finn.transformation.fpgadataflow.make_deployment.DeployToPYNQ` and the execution function :py:mod:`finn.core.onnx_exec`. - -Throughput Test ---------------- - -FINN also offers the possibility to measure the network performance directly on the PYNQ board. This can be done by using :py:mod:`finn.core.throughput_test`. When running this function the metrics of the network are returned as dictionary. +The bitfile and the driver file(s) are copied to the PYNQ board and can be executed there. For more information see the description in the `end2end_example `_ Jupyter notebooks. diff --git a/docs/finn/nw_prep.rst b/docs/finn/nw_prep.rst index 566eda5bac..6fea992cf7 100644 --- a/docs/finn/nw_prep.rst +++ b/docs/finn/nw_prep.rst @@ -10,7 +10,7 @@ Network Preparation The main principle of FINN are analysis and transformation passes. If you like to have more information about these please have a look at section :ref:`analysis_pass` and :ref:`transformation_pass` or at chapter :ref:`tutorials` about the provided Jupyter notebooks. -This page is about the network preparation, the flow step that comes after the :ref:`brevitas_export`. Its main idea is to optimize the network and convert the nodes to custom nodes that correspond to `finn-hlslib `_ functions. In this way we get a network that we can bring to hardware with the help of Vivado. For that we have to apply several transformations on the ONNX model, which this flow step receives wrapped in the :ref:`modelwrapper`. +This page is about the network preparation, the flow step that comes after the :ref:`brevitas_export`. Its main idea is to optimize the network and convert the nodes to custom nodes that correspond to `finn-hlslib `_ functions. In this way we get a network that we can bring to hardware with the help of Vitis and Vivado. For that we have to apply several transformations on the ONNX model, which this flow step receives wrapped in the :ref:`modelwrapper`. Various transformations are involved in the network preparation. The following is a short overview of these. From 4dea37219ec188238d3008a95655c7a07ed19304 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 09:19:31 +0000 Subject: [PATCH 366/628] [Docs] Fix automatically generated code for FINN builder --- docs/finn/source_code/finn.builder.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/finn/source_code/finn.builder.rst b/docs/finn/source_code/finn.builder.rst index 2433cab83d..caadf3f91f 100644 --- a/docs/finn/source_code/finn.builder.rst +++ b/docs/finn/source_code/finn.builder.rst @@ -9,9 +9,9 @@ finn.builder.build\_dataflow ---------------------------- .. automodule:: finn.builder.build_dataflow - :members: - :undoc-members: - :show-inheritance: + :members: + :undoc-members: + :show-inheritance: finn.builder.build\_dataflow\_config ------------------------------------ @@ -26,6 +26,6 @@ finn.builder.build\_dataflow\_steps ------------------------------------ .. automodule:: finn.builder.build_dataflow_steps - :members: - :undoc-members: - :show-inheritance: + :members: + :undoc-members: + :show-inheritance: From 3b9e555ecc98a4788c610692fd314bc456c09461 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 09:38:08 +0000 Subject: [PATCH 367/628] [Docs] Add pyverilator dependency for automatically build docs --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index f38fe11ad2..fdc28ae718 100644 --- a/setup.cfg +++ b/setup.cfg @@ -83,6 +83,7 @@ docs = torchvision torch qonnx@git+https://github.com/fastmachinelearning/qonnx@main#egg=qonnx + pyverilator@https://github.com/maltanar/pyverilator@main#egg=pyverilator # Add here test requirements (semicolon/line-separated) testing = From d992e14e72832b5787cd0af3f8d2357934d71f9d Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 09:40:06 +0000 Subject: [PATCH 368/628] [Docs] Fix typo in setup.cfg --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index fdc28ae718..810de7bc95 100644 --- a/setup.cfg +++ b/setup.cfg @@ -83,7 +83,7 @@ docs = torchvision torch qonnx@git+https://github.com/fastmachinelearning/qonnx@main#egg=qonnx - pyverilator@https://github.com/maltanar/pyverilator@main#egg=pyverilator + pyverilator@git+https://github.com/maltanar/pyverilator@main#egg=pyverilator # Add here test requirements (semicolon/line-separated) testing = From d60f1f756de76f77408c37613649dd6d83fa0f69 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 09:42:06 +0000 Subject: [PATCH 369/628] [Docs] Change pyverilator branch to master in setup.cfg --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 810de7bc95..930752a731 100644 --- a/setup.cfg +++ b/setup.cfg @@ -83,7 +83,7 @@ docs = torchvision torch qonnx@git+https://github.com/fastmachinelearning/qonnx@main#egg=qonnx - pyverilator@git+https://github.com/maltanar/pyverilator@main#egg=pyverilator + pyverilator@git+https://github.com/maltanar/pyverilator@master#egg=pyverilator # Add here test requirements (semicolon/line-separated) testing = From b308f9690b52ab7b0afbbd944b8abc7779492d78 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 10:03:33 +0000 Subject: [PATCH 370/628] [Docs] Add additional packages to docs setup --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 930752a731..1893aa4231 100644 --- a/setup.cfg +++ b/setup.cfg @@ -77,6 +77,7 @@ docs = docutils==0.17.1 dataclasses-json==0.5.7 gspread==3.6.0 + IPython pytest netron vcdvcd @@ -84,6 +85,7 @@ docs = torch qonnx@git+https://github.com/fastmachinelearning/qonnx@main#egg=qonnx pyverilator@git+https://github.com/maltanar/pyverilator@master#egg=pyverilator + brevitas@git+https://github.com/Xilinx/brevitas@master#egg=brevitas_examples # Add here test requirements (semicolon/line-separated) testing = From ec96d948921f20fc0dfadeeed516d6ab480a966d Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 10:12:03 +0000 Subject: [PATCH 371/628] [Docs] Update code links in command line section --- docs/finn/command_line.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/finn/command_line.rst b/docs/finn/command_line.rst index 12e01db554..8c37479a28 100644 --- a/docs/finn/command_line.rst +++ b/docs/finn/command_line.rst @@ -105,7 +105,7 @@ The following outputs will be generated regardless of which particular outputs a The other output products are controlled by the `generate_outputs` field in the build configuration), and are detailed below. -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.ESTIMATE_REPORTS` produces a variety of reports to estimate resource usage and performance *without* running any synthesis. This can be useful for setting up the parallelization and other hardware configuration: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.ESTIMATE_REPORTS` produces a variety of reports to estimate resource usage and performance *without* running any synthesis. This can be useful for setting up the parallelization and other hardware configuration: * ``report/estimate_layer_cycles.json`` -- cycles per layer estimation from analytical model * ``report/estimate_layer_resources.json`` -- resources per layer estimation from analytical model @@ -113,31 +113,31 @@ build configuration), and are detailed below. * ``report/estimate_network_performance.json`` -- whole-network performance estimation from analytical model * ``report/op_and_param_counts.json`` -- per-layer and total number of operations and parameters (independent of parallelization) -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.STITCHED_IP`: produces a stitched Vivado IP block design that can be integrated with other FPGA designs in Vivado IPI: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.STITCHED_IP`: produces a stitched Vivado IP block design that can be integrated with other FPGA designs in Vivado IPI: * ``stitched_ip/finn_vivado_stitch_proj.xpr`` -- Vivado project (including Vivado IP Integrator block design) to generate the stitched IP * ``stitched_ip/ip`` -- exported Vivado IP for the stitched design -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.RTLSIM_PERFORMANCE`: measure latency and performance for the stitched IP in RTL simulation, using PyVerilator +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.RTLSIM_PERFORMANCE`: measure latency and performance for the stitched IP in RTL simulation, using PyVerilator * ``report/rtlsim_performance.json`` -- accelerator throughput and latency from RTL simulation -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.OOC_SYNTH` runs out-of-context synthesis for the stitched IP. This is useful for getting post-synthesis resource counts and achievable clock frequency without having to produce a full bitfile with DMA engines: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.OOC_SYNTH` runs out-of-context synthesis for the stitched IP. This is useful for getting post-synthesis resource counts and achievable clock frequency without having to produce a full bitfile with DMA engines: * ``report/ooc_synth_and_timing.json`` -- resources and achievable clock frequency from out-of-context synthesis -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.BITFILE` will run Vivado and/or Vitis to insert the FINN accelerator inside a shell, with DMA engines instantiated to move data to/from main memory: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.BITFILE` will run Vivado and/or Vitis to insert the FINN accelerator inside a shell, with DMA engines instantiated to move data to/from main memory: * ``bitfile/finn-accel.(bit|xclbin)`` -- generated bitfile depending on platform * ``report/post_synth_resources.xml`` -- FPGA resource utilization after synthesis * ``report/post_route_timing.rpt`` -- post-route timing report -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.PYNQ_DRIVER` will generate a PYNQ Python driver that can be used to interface the generated accelerator: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.PYNQ_DRIVER` will generate a PYNQ Python driver that can be used to interface the generated accelerator: * ``driver/driver.py`` -- Python driver that can be used on PYNQ on Zynq or Alveo platforms to launch the accelerator -* :py:mod:`finn.builder.build_dataflow.DataflowOutputType.DEPLOYMENT_PACKAGE`: +* :py:mod:`finn.builder.build_dataflow_config.DataflowOutputType.DEPLOYMENT_PACKAGE`: * ``deploy/`` -- deployment package folder with a bitfile and driver, ready to be copied to target hardware platform @@ -153,7 +153,7 @@ and compare it against the expected output that you provide. This is achieved by setting up the following members of the build configuration: -* Set ``verify_steps`` to be a list of :py:mod:`finn.builder.build_dataflow.VerificationStepType` +* Set ``verify_steps`` to be a list of :py:mod:`finn.builder.build_dataflow_config.VerificationStepType` where each element in the list indicates the output of a particular step that will be verified. See the documentation of the ``VerificationStepType`` for more information. From c114d03935afb52dacdf030ce46da40f8f71656c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Thu, 9 Feb 2023 10:26:18 +0000 Subject: [PATCH 372/628] [hotfix] mark build dataflow test as xfail Changes were made to a dataflow step previously that causes the test of the dataflow step to fail. Further investigation into the options is needed, until then the test is expected to fail. An option may need to be added to src/finn/qnn-data/build_dataflow/dataflow_build_config.json in order for the test to pass again but more investigation is needed to determine if that is the best option. The commit in question is: 59b19dd3699426a549b25f3926716278742ad72b Signed-off-by: Fionn O'Donohoe --- tests/util/test_build_dataflow.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index 39f0b0dc89..915e54f4e4 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -41,6 +41,7 @@ @pytest.mark.slow @pytest.mark.vivado @pytest.mark.end2end +@pytest.mark.xfail def test_end2end_build_dataflow_directory(): test_dir = make_build_dir("test_build_dataflow_directory_") target_dir = test_dir + "/build_dataflow" From 8e9c4c2a637664fbcfa09fc91715bc31d172d582 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 10:41:16 +0000 Subject: [PATCH 373/628] [Docs] Update internals section --- docs/finn/internals.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index 0b33affc76..add70d649c 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -7,7 +7,7 @@ Internals Intermediate Representation: QONNX and FINN-ONNX ================================================ -FINN uses `ONNX `_ as an intermediate representation (IR) for neural networks. As such, almost every component inside FINN uses ONNX and its `Python API `_, so you may want to familiarize yourself with how ONNX represents DNNs. Specifically, the `ONNX protobuf description `_ (or its `human-readable documentation `_ and the `operator schemas `_ are useful as reference documents. We also provide a Jupyter notebook that can help to get familiar with ONNX by showing how to work with a simple ONNX model in FINN, see chapter :ref:`tutorials` for details. +FINN uses `ONNX `_ as an intermediate representation (IR) for neural networks. As such, almost every component inside FINN uses ONNX and its `Python API `_, so you may want to familiarize yourself with how ONNX represents DNNs. Specifically, the `ONNX protobuf description `_ (or its `human-readable documentation `_ and the `operator schemas `_ are useful as reference documents. We also provide a Jupyter notebook that can help to get familiar with ONNX by showing how to work with a simple ONNX model in FINN, see chapter :ref:`tutorials` for details. .. note:: FINN supports two specialized variants of ONNX called QONNX and FINN-ONNX, and not all ONNX graphs are supported by FINN (and vice versa). @@ -137,14 +137,14 @@ ModelWrapper contains more useful functions, if you are interested please have a Analysis Pass ============= -An analysis pass traverses the graph structure and produces information about certain properties. It gets the model in the ModelWrapper as input and returns a dictionary of the properties the analysis extracts. If you are interested in how to write an analysis pass for FINN, please take a look at the Jupyter notebook about how to write an analysis pass, see chapter :ref:`tutorials` for details. For more information about existing analysis passes in FINN, see module :py:mod:`finn.analysis`. +An analysis pass traverses the graph structure and produces information about certain properties. It gets the model in the ModelWrapper as input and returns a dictionary of the properties the analysis extracts. If you are interested in how to write an analysis pass for FINN, please take a look at the Jupyter notebook about how to write an analysis pass, see chapter :ref:`tutorials` for details. For more information about existing analysis passes in FINN, see module :py:mod:`finn.analysis` . .. _transformation_pass: Transformation Pass =================== -A transformation passes changes (transforms) the given model, it gets the model in the ModelWrapper as input and returns the changed model (ModelWrapper) to the FINN flow. Additional the flag *model_was_changed* which indicates if a transformation has to be performed more than once, is returned. If you are interested in how to write a transformation pass for FINN, please take a look at the Jupyter notebook about how to write a transformation pass, see chapter :ref:`tutorials` for details. For more information about existing transformation passes in FINN, see module :py:mod:`finn.transformation`. +A transformation passes changes (transforms) the given model, it gets the model in the ModelWrapper as input and returns the changed model (ModelWrapper) to the FINN flow. Additional the flag *model_was_changed* which indicates if a transformation has to be performed more than once, is returned. If you are interested in how to write a transformation pass for FINN, please take a look at the Jupyter notebook about how to write a transformation pass, see chapter :ref:`tutorials` for details. For more information about existing transformation passes in FINN, see module :py:mod:`finn.transformation` . .. _mem_mode: @@ -167,7 +167,7 @@ The following picture shows the idea behind the "const" and "decoupled" mode. Const mode ---------- -In *const* mode the weights are "baked in" into the Matrix-Vector-Activate-Unit (MVAU), which means they are part of the HLS code. During the IP block generation the weight values are integrated as *params.h* file in the HLS code and synthesized together with it. For the *const* mode IP block generation the `Matrix_Vector_Activate_Batch function `_ from the finn-hls library is used, which implements a standard MVAU. The resulting IP block has an input and an output stream, as shown in the above picture on the left. FIFOs in the form of verilog components are connected to these. +In *const* mode the weights are "baked in" into the Matrix-Vector-Activate-Unit (MVAU), which means they are part of the HLS code. During the IP block generation the weight values are integrated as *params.h* file in the HLS code and synthesized together with it. For the *const* mode IP block generation the `Matrix_Vector_Activate_Batch function `_ from the finn-hls library is used, which implements a standard MVAU. The resulting IP block has an input and an output stream, as shown in the above picture on the left. FIFOs in the form of verilog components are connected to these. Advantages: @@ -185,7 +185,7 @@ Disadvantages: Decoupled mode -------------- -In *decoupled* mode a different variant of the MVAU with three ports is used. Besides the input and output streams, which are fed into the circuit via Verilog FIFOs, there is another input, which is used to stream the weights. For this the `streaming MVAU `_ from the finn-hls library is used. To make the streaming possible a Verilog weight streamer component accesses the weight memory and sends the values via another FIFO to the MVAU. This component can be found in the `finn-rtllib `_ under the name *memstream.v*. For the IP block generation this component, the IP block resulting from the synthesis of the HLS code of the streaming MVAU and a FIFO for the weight stream are combined in a verilog wrapper. The weight values are saved in .dat files and stored in the weight memory from which the weight streamer reads. The resulting verilog component, which is named after the name of the node and has the suffix "_memstream.v", exposes only two ports to the outside, the data input and output. It therefore behaves externally in the same way as the MVAU in *const* mode. +In *decoupled* mode a different variant of the MVAU with three ports is used. Besides the input and output streams, which are fed into the circuit via Verilog FIFOs, there is another input, which is used to stream the weights. For this the `streaming MVAU `_ from the finn-hls library is used. To make the streaming possible a Verilog weight streamer component accesses the weight memory and sends the values via another FIFO to the MVAU. This component can be found in the `finn-rtllib `_ under the name *memstream.v*. For the IP block generation this component, the IP block resulting from the synthesis of the HLS code of the streaming MVAU and a FIFO for the weight stream are combined in a verilog wrapper. The weight values are saved in .dat files and stored in the weight memory from which the weight streamer reads. The resulting verilog component, which is named after the name of the node and has the suffix "_memstream.v", exposes only two ports to the outside, the data input and output. It therefore behaves externally in the same way as the MVAU in *const* mode. Advantages: From 6581fc13f2ab5d0cd2609c1c911b563d827a5f01 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 11:02:31 +0000 Subject: [PATCH 374/628] [Docs] Update tutorials and developers section --- docs/finn/developers.rst | 4 ++-- docs/finn/tutorials.rst | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/finn/developers.rst b/docs/finn/developers.rst index b152dfef66..f9252f764c 100644 --- a/docs/finn/developers.rst +++ b/docs/finn/developers.rst @@ -12,7 +12,7 @@ Prerequisites Before starting to do development on FINN it's a good idea to start with understanding the basics as a user. Going through all of the -:ref:`tutorials` is strongly recommended if you haven' already done so. +:ref:`tutorials` is strongly recommended if you haven't already done so. Additionally, please review the documentation available on :ref:`internals`. Repository structure @@ -153,7 +153,7 @@ from the FINN root directory as follows: :: - python setup.py test --addopts "-k test_brevitas_debug --pdb" + pytest -k test_brevitas_debug --pdb If you want to run tests in parallel (e.g. to take advantage of a multi-core CPU) diff --git a/docs/finn/tutorials.rst b/docs/finn/tutorials.rst index 110f77c5b1..7ac54501cf 100644 --- a/docs/finn/tutorials.rst +++ b/docs/finn/tutorials.rst @@ -46,3 +46,8 @@ The notebooks in this folder are more developer oriented. They should help you t * 2_custom_op * Explains the basics of FINN custom ops and how to define a new one. + +FINN Example FPGA Flow Using MNIST Numerals +============================================ + +Next to the Jupyter notebooks above there is a tutorial about the command-line build_dataflow `here `_ which shows how to bring a FINN compiled model into the Vivado FPGA design environment. From 5e8c3a7edf830139d0ca7cc0eb87fef4db027ccb Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Feb 2023 11:11:22 +0000 Subject: [PATCH 375/628] [Docs] Update getting started section --- docs/finn/getting_started.rst | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 69d29e5707..9b3111b70e 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -141,10 +141,7 @@ If you are having trouble building the Docker image or need offline access, you Supported FPGA Hardware ======================= -**Shell-integrated accelerator + driver:** For quick deployment, we target boards supported by `PYNQ `_ . For these platforms, we can build a full bitfile including DMAs to move data into and out of the FINN-generated accelerator, as well as a Python driver to launch the accelerator. We support the Pynq-Z1, Pynq-Z2, Ultra96, ZCU102 and ZCU104 boards. - -.. warning:: - In previous FINN versions (v0.4b - v0.7) we had support for `Xilinx Alveo boards `_ using PYNQ and Vitis 2020.1, see instructions below for Alveo setup that works with older versions. Please note that with the new release with Vitis 2022.1, we do only have experimental support to automatically deployment for Alveo cards. +**Shell-integrated accelerator + driver:** For quick deployment, we target boards supported by `PYNQ `_ . For these platforms, we can build a full bitfile including DMAs to move data into and out of the FINN-generated accelerator, as well as a Python driver to launch the accelerator. We support the Pynq-Z1, Pynq-Z2, Ultra96, ZCU102 and ZCU104 boards, as well as Alveo cards. **Vivado IPI support for any Xilinx FPGA:** FINN generates a Vivado IP Integrator (IPI) design from the neural network with AXI stream (FIFO) in-out interfaces, which can be integrated onto any Xilinx FPGA as part of a larger system. It's up to you to take the FINN-generated accelerator (what we call "stitched IP" in the tutorials), wire it up to your FPGA design and send/receive neural network data to/from the accelerator. @@ -182,12 +179,12 @@ On the target side: On the host side: -1. Install Vitis 2020.1 and set up the ``VITIS_PATH`` environment variable to point to your installation. +1. Install Vitis 2022.1 and set up the ``VITIS_PATH`` environment variable to point to your installation. 2. Install Xilinx XRT. Ensure that the ``XRT_DEB_VERSION`` environment variable reflects which version of XRT you have installed. 3. Install the Vitis platform files for Alveo and set up the ``PLATFORM_REPO_PATHS`` environment variable to point to your installation. *This must be the same path as the target's platform files (target step 2)* 4. Set up the ``ALVEO_*`` environment variables accordingly for your target, see description of environment variables above. 5. `Set up public key authentication `_. Copy your private key to the ``finn/ssh_keys`` folder on the host to get password-less deployment and remote execution. -6. Done! You can try the ``test_end2end_vitis`` tests in the FINN Docker to verify your setup, although this will take some time. +6. Done! Vivado/Vitis license ********************* From 17af0c3573d8658a23676358434da03caf0fc74c Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 10 Feb 2023 10:23:07 +0000 Subject: [PATCH 376/628] [Docs] Update readme and authors list --- AUTHORS.rst | 4 +++- README.md | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/AUTHORS.rst b/AUTHORS.rst index d011ce3d7a..861b81924b 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -9,7 +9,7 @@ Contributors * Hendrik Borras (@HenniOVP) * Lucian Petrica (@quetric) * Tobias Alonso (@Tobi-Alonso) -* Felix Paul Jentzsch (@felixpj) +* Felix Paul Jentzsch (@fpjentzsch) * Mirza Mrahorovic (@mmrahorovic) * Suranga Mahesh (@surangamh) * Peter Lehnhardt (@pete-lennart) @@ -26,3 +26,5 @@ Contributors * Aziz Bahri (@azizb-xlnx) * Fionn O'Donohoe (@fionnodonohoe-xlnx) * Matthias Gehre (@mgehre-amd) +* Hugo Le Blevec (@hleblevec) +* Patrick Geel (@patrickgeel) diff --git a/README.md b/README.md index 1b8efc8f19..2e1faf8f0c 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ Please see the [Getting Started](https://finn.readthedocs.io/en/latest/getting_s ## Documentation -You can view the documentation on [readthedocs](https://finn.readthedocs.io) or build them locally using `python setup.py doc` from inside the Docker container. Additionally, there is a series of [Jupyter notebook tutorials](https://github.com/Xilinx/finn/tree/master/notebooks), which we recommend running from inside Docker for a better experience. +You can view the documentation on [readthedocs](https://finn.readthedocs.io) or build them locally using `python setup.py doc` from inside the Docker container. Additionally, there is a series of [Jupyter notebook tutorials](https://github.com/Xilinx/finn/tree/main/notebooks), which we recommend running from inside Docker for a better experience. ## Community @@ -67,4 +67,4 @@ The current implementation of the framework is based on the following publicatio ## Old version We previously released an early-stage prototype of a toolflow that took in Caffe-HWGQ binarized network descriptions and produced dataflow architectures. You can find it in the [v0.1](https://github.com/Xilinx/finn/tree/v0.1) branch in this repository. -Please be aware that this version is deprecated and unsupported, and the master branch does not share history with that branch so it should be treated as a separate repository for all purposes. +Please be aware that this version is deprecated and unsupported, and the main branch does not share history with that branch so it should be treated as a separate repository for all purposes. From 9847528a8430fb6bf00826845de74fbe4a1a596d Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Feb 2023 11:52:15 +0000 Subject: [PATCH 377/628] [Notebooks/Tests] Fix typo in nb and fix build_dataflow test --- notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb | 2 +- notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb | 2 +- src/finn/qnn-data/build_dataflow/dataflow_build_config.json | 1 + tests/util/test_build_dataflow.py | 1 - 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 8ea6a35009..388accad3a 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -524,7 +524,7 @@ "metadata": {}, "outputs": [], "source": [ - "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_pynq_deploy.onnx\")\n", + "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_synth.onnx\")\n", "iname = model.graph.input[0].name\n", "ishape = model.get_tensor_shape(iname)\n", "np.save(deployment_dir + \"/input.npy\", x.reshape(ishape))" diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index 7e9980cf2a..eec17b2fa7 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -873,7 +873,7 @@ "source": [ "import numpy as np\n", "\n", - "model = ModelWrapper(build_dir + \"/tfc_w1_a1_pynq_deploy.onnx\")\n", + "model = ModelWrapper(build_dir + \"/tfc_w1_a1_post_synthesis.onnx\")\n", "iname = model.graph.input[0].name\n", "oname = parent_model.graph.output[0].name\n", "ishape = model.get_tensor_shape(iname)\n", diff --git a/src/finn/qnn-data/build_dataflow/dataflow_build_config.json b/src/finn/qnn-data/build_dataflow/dataflow_build_config.json index 27ec38f6a4..a053c1a22f 100644 --- a/src/finn/qnn-data/build_dataflow/dataflow_build_config.json +++ b/src/finn/qnn-data/build_dataflow/dataflow_build_config.json @@ -7,6 +7,7 @@ "standalone_thresholds": true, "shell_flow_type": "vivado_zynq", "verify_save_rtlsim_waveforms": true, + "force_python_rtlsim": true, "verify_steps": [ "initial_python", "streamlined_python", diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index 915e54f4e4..39f0b0dc89 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -41,7 +41,6 @@ @pytest.mark.slow @pytest.mark.vivado @pytest.mark.end2end -@pytest.mark.xfail def test_end2end_build_dataflow_directory(): test_dir = make_build_dir("test_build_dataflow_directory_") target_dir = test_dir + "/build_dataflow" From f752ba21e0badeb81372dc74c8ee52eb581ca1fb Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Feb 2023 15:19:11 +0000 Subject: [PATCH 378/628] [Deps] Update finn-experimental version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 5b060f5bc8..dd62cad9eb 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" -FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" +FINN_EXP_COMMIT="72fac31ab732130cba5cf05555544ee5a5ecb773" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" From a66a2405c013a748f03b970974e87056d774a966 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Feb 2023 15:25:39 +0000 Subject: [PATCH 379/628] [Deps] Update finn experimental version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index dd62cad9eb..4f3b821f76 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" -FINN_EXP_COMMIT="72fac31ab732130cba5cf05555544ee5a5ecb773" +FINN_EXP_COMMIT="53049bf5025dbc0a11dc19355325b1a02c3947c0" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" From 54e612fcf6ea7e1dcd621531a4bb3b6e1aa42098 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Feb 2023 15:53:43 +0000 Subject: [PATCH 380/628] [Deps] Update finn-experimental version (domain name update) --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 4f3b821f76..89a18896af 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" -FINN_EXP_COMMIT="53049bf5025dbc0a11dc19355325b1a02c3947c0" +FINN_EXP_COMMIT="448bd6f5ee1dbf9e2f9cda014e2f875bc6d49ae0" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" From b703e74b0dd0339e850c0229ac5781bb9b882e3c Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Feb 2023 18:47:22 +0000 Subject: [PATCH 381/628] Update finn-experimental commit --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 89a18896af..e6aa50940c 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" -FINN_EXP_COMMIT="448bd6f5ee1dbf9e2f9cda014e2f875bc6d49ae0" +FINN_EXP_COMMIT="8e6cccda16a5adeaac8451f9236e2a24766e0a27" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" From 93b96f814303ef75146c2f8112d651dbd1b35c6b Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 14 Feb 2023 11:36:30 +0000 Subject: [PATCH 382/628] [Deps] Update finn-hlslib commit --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 5b060f5bc8..dd23c33e1b 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="d27f6b6c5d8f1bb208db395659389603f63ad4be" +HLSLIB_COMMIT="4ddfa00b07275a3f1de1c13409e6acb489115fe2" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From a31a13978ed0b4d176001d7d21c35d79893e32d6 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 15 Feb 2023 16:20:23 +0000 Subject: [PATCH 383/628] [Deps] Update qonnx commit --- fetch-repos.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index b7b616e166..9e06e196e2 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="13d777a2aa0dc449dc3de7aa369c1e155d6ce2c2 " +QONNX_COMMIT="383d511db8d540ff9efadf2d620db7caa44bf876" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" @@ -38,7 +38,7 @@ AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" -QONNX_URL="https://github.com/i-colbert/qonnx.git" +QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" BREVITAS_URL="https://github.com/Xilinx/brevitas.git" PYVERILATOR_URL="https://github.com/maltanar/pyverilator.git" From c62dc7638ab74bc38c95341de9a4d99a620f2b82 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 16 Feb 2023 15:40:09 +0000 Subject: [PATCH 384/628] [Docs] Add new transform to docs --- .../source_code/finn.transformation.fpgadataflow.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/finn/source_code/finn.transformation.fpgadataflow.rst b/docs/finn/source_code/finn.transformation.fpgadataflow.rst index 9f8ec07930..f7137ae347 100644 --- a/docs/finn/source_code/finn.transformation.fpgadataflow.rst +++ b/docs/finn/source_code/finn.transformation.fpgadataflow.rst @@ -173,6 +173,15 @@ finn.transformation.fpgadataflow.minimize\_accumulator\_width :show-inheritance: +finn.transformation.fpgadataflow.minimize\_weight\_bit\_width +-------------------------------------------------------------- + +.. automodule:: finn.transformation.fpgadataflow.minimize_weight_bit_width + :members: + :undoc-members: + :show-inheritance: + + finn.transformation.fpgadataflow.prepare\_cppsim ------------------------------------------------------- From dbf592687cef670ab49c96d16ddaf3e2fa2f09d2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 20 Feb 2023 15:24:11 +0000 Subject: [PATCH 385/628] [Notebooks] Update Brevitas import nbs --- ...a_brevitas_network_import_via_FINN-ONNX.ipynb | 2 +- .../1b_brevitas_network_import_via_QONNX.ipynb | 16 +++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb index 2d751b43b6..429effca83 100644 --- a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb +++ b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb @@ -6,7 +6,7 @@ "source": [ "# Importing Brevitas networks into FINN with the FINN-ONNX interchange format\n", "\n", - "**Note: This notebook is very similar to the 1b notebook, in that it shows the same concepts for the FINN-ONNX ingestion as 1b does for QONNX.**\n", + "**Note: This notebook is very similar to the 1b notebook, in that it shows the same concepts for the FINN-ONNX ingestion as 1b does for QONNX. Section 1 is identical in both notebooks.**\n", "\n", "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", "\n", diff --git a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb index 2d8447ad3a..fba824dca2 100644 --- a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb +++ b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb @@ -6,7 +6,7 @@ "source": [ "# Importing Brevitas networks into FINN with the QONNX interchange format\n", "\n", - "**Note: This notebook is very similar to the 1a notebook, in that it shows the same concepts for the QONNX ingestion as 1a does for FINN-ONNX.**\n", + "**Note: This notebook is very similar to the 1a notebook, in that it shows the same concepts for the QONNX ingestion as 1a does for FINN-ONNX. Section 1 is identical in both notebooks.**\n", "\n", "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", "\n", @@ -217,7 +217,7 @@ "from qonnx.core.modelwrapper import ModelWrapper\n", "import qonnx.core.onnx_exec as oxe\n", "model = ModelWrapper(export_onnx_path_cleaned)\n", - "input_dict = {\"0\": nph.to_array(input_tensor)}\n", + "input_dict = {\"global_in\": nph.to_array(input_tensor)}\n", "output_dict = oxe.execute_onnx(model, input_dict)\n", "produced_qonnx = output_dict[list(output_dict.keys())[0]]\n", "\n", @@ -230,7 +230,7 @@ "metadata": {}, "outputs": [], "source": [ - "np.isclose(produced, produced_finn).all()" + "np.isclose(produced, produced_qonnx).all()" ] }, { @@ -258,9 +258,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "showInNetron(export_onnx_path_converted)" @@ -280,7 +278,7 @@ "outputs": [], "source": [ "model = ModelWrapper(export_onnx_path_cleaned)\n", - "input_dict = {\"0\": nph.to_array(input_tensor)}\n", + "input_dict = {\"global_in\": nph.to_array(input_tensor)}\n", "output_dict = oxe.execute_onnx(model, input_dict)\n", "produced_finn = output_dict[list(output_dict.keys())[0]]\n", "\n", @@ -293,7 +291,7 @@ "metadata": {}, "outputs": [], "source": [ - "np.isclose(produced, produced_finn).all()" + "np.isclose(produced_qonnx, produced_finn).all()" ] }, { @@ -306,7 +304,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, From 15a3552654a88bd0b3b59c68aaf2e3eacd67326d Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 21 Feb 2023 16:07:41 +0000 Subject: [PATCH 386/628] [Deps] Update brevitas commit to v0.8.0 --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index dd23c33e1b..f13037733e 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -29,7 +29,7 @@ QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" -BREVITAS_COMMIT="a5b71d6de1389d3e7db898fef72e014842670f03" +BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="4ddfa00b07275a3f1de1c13409e6acb489115fe2" From 1b1ee9b3a3c129447d5f3615b989ecbcc7ffd01d Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 21 Feb 2023 16:13:00 +0000 Subject: [PATCH 387/628] [Tests] Update export fct in transformation tests --- .../streamline/test_sign_to_thres.py | 5 +++-- .../streamline/test_streamline_cnv.py | 5 +++-- .../streamline/test_streamline_fc.py | 5 +++-- .../test_batchnorm_to_affine_bnn_pynq.py | 7 ++++--- .../transformation/test_infer_data_layouts_cnv.py | 5 +++-- tests/transformation/test_infer_datatypes_lfc.py | 5 +++-- tests/transformation/test_qonnx_to_finn.py | 14 ++++++-------- 7 files changed, 25 insertions(+), 21 deletions(-) diff --git a/tests/transformation/streamline/test_sign_to_thres.py b/tests/transformation/streamline/test_sign_to_thres.py index 839680bd7a..72e400346d 100644 --- a/tests/transformation/streamline/test_sign_to_thres.py +++ b/tests/transformation/streamline/test_sign_to_thres.py @@ -28,10 +28,11 @@ import pytest -import brevitas.onnx as bo import onnx import onnx.numpy_helper as nph import os +import torch +from brevitas.export import export_finn_onnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -47,7 +48,7 @@ @pytest.mark.streamline def test_sign_to_thres(): lfc = get_test_model_trained("LFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) + export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index 6a82925012..b7d6a825bb 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -30,8 +30,9 @@ import pytest -import brevitas.onnx as bo import numpy as np +import torch +from brevitas.export import export_finn_onnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import ( @@ -63,7 +64,7 @@ def test_streamline_cnv(size, wbits, abits): nname = "%s_%dW%dA" % (size, wbits, abits) finn_onnx = export_onnx_path + "/%s.onnx" % nname fc = get_test_model_trained(size, wbits, abits) - bo.export_finn_onnx(fc, (1, 3, 32, 32), finn_onnx) + export_finn_onnx(fc, torch.randn(1, 3, 32, 32), finn_onnx) model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/streamline/test_streamline_fc.py b/tests/transformation/streamline/test_streamline_fc.py index 9000821435..6131c3b03e 100644 --- a/tests/transformation/streamline/test_streamline_fc.py +++ b/tests/transformation/streamline/test_streamline_fc.py @@ -28,10 +28,11 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph +import torch +from brevitas.export import export_finn_onnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -66,7 +67,7 @@ def test_streamline_fc(size, wbits, abits): nname = "%s_%dW%dA" % (size, wbits, abits) finn_onnx = export_onnx_path + "/%s.onnx" % nname fc = get_test_model_trained(size, wbits, abits) - bo.export_finn_onnx(fc, (1, 1, 28, 28), finn_onnx) + export_finn_onnx(fc, torch.randn(1, 1, 28, 28), finn_onnx) model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py index fd4e37807c..60e81ffe81 100644 --- a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py +++ b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py @@ -30,11 +30,12 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph import os +import torch +from brevitas.export import export_finn_onnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.batchnorm_to_affine import BatchNormToAffine @@ -50,7 +51,7 @@ @pytest.mark.transform def test_batchnorm_to_affine_cnv_w1a1(): lfc = get_test_model_trained("CNV", 1, 1) - bo.export_finn_onnx(lfc, (1, 3, 32, 32), export_onnx_path) + export_finn_onnx(lfc, torch.randn(1, 3, 32, 32), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -75,7 +76,7 @@ def test_batchnorm_to_affine_cnv_w1a1(): @pytest.mark.transform def test_batchnorm_to_affine_lfc_w1a1(): lfc = get_test_model_trained("LFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) + export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py index 952ce306a4..71822a2903 100644 --- a/tests/transformation/test_infer_data_layouts_cnv.py +++ b/tests/transformation/test_infer_data_layouts_cnv.py @@ -28,9 +28,10 @@ import pytest -import brevitas.onnx as bo import os import qonnx.core.data_layout as DataLayout +import torch +from brevitas.export import export_finn_onnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants @@ -51,7 +52,7 @@ @pytest.mark.transform def test_infer_data_layouts_cnv(): cnv = get_test_model_trained("CNV", 1, 1) - bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path_cnv) + export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) model = ModelWrapper(export_onnx_path_cnv) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/test_infer_datatypes_lfc.py b/tests/transformation/test_infer_datatypes_lfc.py index 9798005349..173532cb76 100644 --- a/tests/transformation/test_infer_datatypes_lfc.py +++ b/tests/transformation/test_infer_datatypes_lfc.py @@ -28,8 +28,9 @@ import pytest -import brevitas.onnx as bo import os +import torch +from brevitas.export import export_finn_onnx from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -45,7 +46,7 @@ @pytest.mark.transform def test_infer_datatypes_lfc(): lfc = get_test_model_trained("LFC", 1, 1) - bo.export_finn_onnx(lfc, (1, 1, 28, 28), export_onnx_path) + export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 7e438b4b8b..e5f1eefe12 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -31,12 +31,11 @@ import pytest -import brevitas.export.onnx.generic as b_onnx -import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph import torch +from brevitas.export import export_finn_onnx, export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -117,8 +116,10 @@ def test_QONNX_to_FINN(model_name, wbits, abits): torch_input_tensor = torch.from_numpy(input_tensor).float() brev_output = brev_model.forward(torch_input_tensor).detach().numpy() - # Get "clean" FINN model and it's output - _ = bo.export_finn_onnx(brev_model, in_shape, finn_base_path.format("raw")) + # Get "clean" FINN model and its output + _ = export_finn_onnx( + brev_model, torch.randn(in_shape), finn_base_path.format("raw") + ) model = ModelWrapper(finn_base_path.format("raw")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferShapes()) @@ -137,10 +138,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): ).all(), "The output of the Brevitas model and the FINN model should match." # Get the equivalent QONNX model - b_onnx.function.DOMAIN_STRING = "qonnx.custom_op.general" - _ = b_onnx.manager.BrevitasONNXManager.export( - brev_model, in_shape, qonnx_base_path.format("raw") - ) + _ = export_qonnx(brev_model, torch.randn(in_shape), qonnx_base_path.format("raw")) cleanup(qonnx_base_path.format("raw"), out_file=qonnx_base_path.format("clean")) # Compare output From ae210a2d53b563a56447c387873bf29afa627bcd Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 21 Feb 2023 16:27:21 +0000 Subject: [PATCH 388/628] [Tests] Update export fct in conversion to hls layer tests --- tests/fpgadataflow/test_convert_to_hls_layers_cnv.py | 5 +++-- tests/fpgadataflow/test_convert_to_hls_layers_fc.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 9997f28438..73721b6cc5 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -30,9 +30,10 @@ import pytest -import brevitas.onnx as bo import numpy as np import os +import torch +from brevitas.export import export_finn_onnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount @@ -61,7 +62,7 @@ @pytest.mark.parametrize("fused_activation", [True, False]) def test_convert_to_hls_layers_cnv_w1a1(fused_activation): cnv = get_test_model_trained("CNV", 1, 1) - bo.export_finn_onnx(cnv, (1, 3, 32, 32), export_onnx_path_cnv) + export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) model = ModelWrapper(export_onnx_path_cnv) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py index fd4e3679d7..5a45638ba1 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py @@ -28,12 +28,12 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph import os import torch +from brevitas.export import export_finn_onnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -59,7 +59,7 @@ @pytest.mark.vivado def test_convert_to_hls_layers_tfc_w1a1(): tfc = get_test_model_trained("TFC", 1, 1) - bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path) + export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -130,7 +130,7 @@ def test_convert_to_hls_layers_tfc_w1a1(): @pytest.mark.vivado def test_convert_to_hls_layers_tfc_w1a2(): tfc = get_test_model_trained("TFC", 1, 2) - bo.export_finn_onnx(tfc, (1, 1, 28, 28), export_onnx_path) + export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) From c0410fda5ab99c60e89a4aa5d4924f0fc2d50a58 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 21 Feb 2023 17:04:05 +0000 Subject: [PATCH 389/628] [Notebooks] Update export fct in Jupyter nbs --- .../basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb | 4 ++-- .../basics/1b_brevitas_network_import_via_QONNX.ipynb | 4 ++-- .../end2end_example/bnn-pynq/cnv_end2end_example.ipynb | 7 ++++--- .../end2end_example/bnn-pynq/tfc_end2end_example.ipynb | 7 ++++--- .../cybersecurity/1-train-mlp-with-brevitas.ipynb | 4 ++-- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb index 429effca83..756faf149d 100644 --- a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb +++ b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb @@ -139,10 +139,10 @@ "metadata": {}, "outputs": [], "source": [ - "import brevitas.onnx as bo\n", + "from brevitas.export import export_finn_onnx\n", "export_onnx_path = \"/tmp/LFCW1A1_finn-onnx.onnx\"\n", "input_shape = (1, 1, 28, 28)\n", - "bo.export_finn_onnx(lfc, input_shape, export_onnx_path);" + "export_finn_onnx(lfc, torch.randn(input_shape), export_onnx_path);" ] }, { diff --git a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb index fba824dca2..58fa3fc7e1 100644 --- a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb +++ b/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb @@ -142,10 +142,10 @@ "metadata": {}, "outputs": [], "source": [ - "from brevitas.export.onnx.generic.manager import BrevitasONNXManager\n", + "from brevitas.export import export_qonnx\n", "export_onnx_path = \"/tmp/LFCW1A1_qonnx.onnx\"\n", "input_shape = (1, 1, 28, 28)\n", - "BrevitasONNXManager.export(lfc, input_shape, export_onnx_path);" + "export_qonnx(lfc, torch.randn(input_shape), export_onnx_path);" ] }, { diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 388accad3a..0018bb27ca 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -81,16 +81,17 @@ "metadata": {}, "outputs": [], "source": [ + "import torch\n", "import onnx\n", "from finn.util.test import get_test_model_trained\n", - "import brevitas.onnx as bo\n", + "from brevitas.export import export_finn_onnx\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "from qonnx.transformation.infer_shapes import InferShapes\n", "from qonnx.transformation.fold_constants import FoldConstants\n", "from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, RemoveStaticGraphInputs\n", "\n", "cnv = get_test_model_trained(\"CNV\", 1, 1)\n", - "bo.export_finn_onnx(cnv, (1, 3, 32, 32), build_dir + \"/end2end_cnv_w1a1_export.onnx\")\n", + "export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), build_dir + \"/end2end_cnv_w1a1_export.onnx\")\n", "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_export.onnx\")\n", "model = model.transform(InferShapes())\n", "model = model.transform(FoldConstants())\n", @@ -148,7 +149,7 @@ "# preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", "totensor_pyt = ToTensor()\n", "chkpt_preproc_name = build_dir+\"/end2end_cnv_w1a1_preproc.onnx\"\n", - "bo.export_finn_onnx(totensor_pyt, ishape, chkpt_preproc_name)\n", + "export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", "\n", "# join preprocessing and core model\n", "pre_model = ModelWrapper(chkpt_preproc_name)\n", diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index eec17b2fa7..f99944e31f 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -81,12 +81,13 @@ "metadata": {}, "outputs": [], "source": [ + "import torch\n", "import onnx\n", "from finn.util.test import get_test_model_trained\n", - "import brevitas.onnx as bo\n", + "from brevitas.export import export_finn_onnx\n", "\n", "tfc = get_test_model_trained(\"TFC\", 1, 1)\n", - "bo.export_finn_onnx(tfc, (1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\"); # semicolon added to suppress log" + "export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\"); # semicolon added to suppress log" ] }, { @@ -267,7 +268,7 @@ "# preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", "totensor_pyt = ToTensor()\n", "chkpt_preproc_name = build_dir+\"/tfc_w1_a1_preproc.onnx\"\n", - "bo.export_finn_onnx(totensor_pyt, ishape, chkpt_preproc_name)\n", + "export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", "\n", "# join preprocessing and core model\n", "pre_model = ModelWrapper(chkpt_preproc_name)\n", diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 3d77586258..9bb9e6761e 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -677,7 +677,7 @@ "metadata": {}, "outputs": [], "source": [ - "import brevitas.onnx as bo\n", + "from brevitas.export import export_finn_onnx\n", "from brevitas.quant_tensor import QuantTensor\n", "\n", "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", @@ -696,7 +696,7 @@ "model_for_export.cpu()\n", "\n", "# Export to ONNX\n", - "bo.export_finn_onnx(\n", + "export_finn_onnx(\n", " model_for_export, export_path=ready_model_filename, input_t=input_qt\n", ")\n", "\n", From f60e4abe943dac75a03fb1824c4fb02e700cfb26 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 09:14:54 -0800 Subject: [PATCH 390/628] Inline lambda function for data type bound --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 6 +++--- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index c440b3675c..6aa26af453 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -353,12 +353,12 @@ def lut_estimation(self): acc_datatype = self.get_accumulator_datatype() # if accDataType is not set, then it will default to INT32, which would # be a large overestimate in most (if not all) cases. In this scenario, - # we would use the minimum accumulator as determined by the data types. + # we would use the minimum accumulator as determined by the data types + # bound, derived in https://arxiv.org/abs/2301.13376 alpha = math.log(MW, 2) + W + A - 1 - int(idt.signed()) - phi = lambda x_: math.log(1 + pow(2, -x_), 2) acc_bits = min( acc_datatype.bitwidth(), - np.ceil(alpha + phi(alpha) + 1) + np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1) ) acc_luts = acc_bits # thresholds and threshold comparators diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 377a62f79f..796225a712 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -1199,12 +1199,12 @@ def lut_estimation(self): k_h, k_w = self.get_nodeattr("Kernel") # if accDataType is not set, then it will default to INT32, which would # be a large overestimate in most (if not all) cases. In this scenario, - # we would use the minimum accumulator as determined by the data types. + # we would use the minimum accumulator as determined by the data types + # bound, derived in https://arxiv.org/abs/2301.13376 alpha = math.log(k_h * k_w, 2) + W + A - 1 - int(idt.signed()) - phi = lambda x_: math.log(1 + pow(2, -x_), 2) acc_bits = min( acc_datatype.bitwidth(), - np.ceil(alpha + phi(alpha) + 1) + np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1) ) acc_luts = acc_bits # thresholds and threshold comparators From 88b00a9fff71f859c187540bfb2384db0392df3e Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 09:52:58 -0800 Subject: [PATCH 391/628] Adding minimize_bit_width to the build config --- src/finn/builder/build_dataflow_config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index a38cb6e572..53576e50e7 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -233,6 +233,12 @@ class DataflowBuildConfig: #: flexibility, and makes it possible to have runtime-writable thresholds. standalone_thresholds: Optional[bool] = False + #: (Optional) Whether optimizations that minimize the bit width of the + #: weights and accumulator will be applied. Because this optimization relies + #: on the the values of the weights, it will only be applied if runtime- + #: writeable weights is not enabled. + minimize_bit_width: Optional[bool] = True + #: Target board, only needed for generating full bitfiles where the FINN #: design is integrated into a shell. #: e.g. "Pynq-Z1" or "U250" From 9c74af528dc1cdd0a238e1ae4caa730e06e4c479 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 10:44:20 -0800 Subject: [PATCH 392/628] Creating and adding build step to default finn flows --- src/finn/builder/build_dataflow_config.py | 2 ++ src/finn/builder/build_dataflow_steps.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 53576e50e7..4c3e4ff899 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -119,6 +119,7 @@ class VerificationStepType(str, Enum): "step_create_dataflow_partition", "step_target_fps_parallelization", "step_apply_folding_config", + "step_minimize_bit_width", "step_generate_estimate_reports", "step_hls_codegen", "step_hls_ipgen", @@ -140,6 +141,7 @@ class VerificationStepType(str, Enum): "step_create_dataflow_partition", "step_target_fps_parallelization", "step_apply_folding_config", + "step_minimize_bit_width", "step_generate_estimate_reports", ] diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 2ee898bc7d..16ac90296f 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -118,6 +118,8 @@ from finn.util.pyverilator import verilator_fifosim from finn.util.test import execute_parent +from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth +from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth def verify_step( model: ModelWrapper, @@ -477,6 +479,14 @@ def step_generate_estimate_reports(model: ModelWrapper, cfg: DataflowBuildConfig return model +def step_minimize_bit_width(model: ModelWrapper, cfg: DataflowBuildConfig): + """Tighten the weight and accumulator bit widths for each layer.""" + if cfg.minimize_bit_width: + model = model.transform(MinimizeWeightBitWidth()) + model = model.transform(MinimizeAccumulatorWidth()) + return model + + def step_hls_codegen(model: ModelWrapper, cfg: DataflowBuildConfig): "Generate Vivado HLS code to prepare HLSCustomOp nodes for IP generation." From 49055cf229f1b959ba38916d5c25cd3a7036d35e Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 11:13:36 -0800 Subject: [PATCH 393/628] Remove MinimizeAccumulatorWidth from convert_to_hls layers --- .../fpgadataflow/convert_to_hls_layers.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 7b8a1bf6b8..3029e09d48 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -40,10 +40,6 @@ from qonnx.util.basic import get_by_name from qonnx.util.onnx import nchw_to_nhwc -from finn.transformation.fpgadataflow.minimize_accumulator_width import ( - MinimizeAccumulatorWidth, -) - class InferConvInpGen(Transformation): """Convert Im2Col layers to ConvolutionInputGenerator layers.""" @@ -761,7 +757,6 @@ def apply(self, model): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -904,7 +899,6 @@ def apply(self, model): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -1057,7 +1051,6 @@ def apply(self, model): graph.node.remove(n) graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) @@ -1135,7 +1128,7 @@ def apply(self, model): PE=pe, numSteps=thl_thres_shape[1], inputDataType=idt.name, - weightDataType=idt.name, # will be set by MinimizeAccumulatorWidth + weightDataType=idt.name, # can be tightened by MinimizeAccumulatorWidth outputDataType=odt.name, numInputVectors=list(thl_in_shape[:-1]), ActVal=actval, @@ -1148,7 +1141,6 @@ def apply(self, model): graph_modified = True if graph_modified: - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) return (model, graph_modified) From c553deac9356c42a8eccb6ac810fafa3afda01e4 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 15:51:04 -0800 Subject: [PATCH 394/628] Update minimize_accumulator_width to account for runtime-writeable weights --- .../fpgadataflow/matrixvectoractivation.py | 118 +++++++++--------- .../fpgadataflow/vectorvectoractivation.py | 117 ++++++++--------- 2 files changed, 121 insertions(+), 114 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 6aa26af453..468e660117 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -589,69 +589,73 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): return ret def minimize_accumulator_width(self, model): - weights = model.get_initializer(self.onnx_node.input[1]) - # since in the calculation the values of the weight matrix are used, - # for the bipolar case they need to be converted to bipolar - if self.get_nodeattr("binaryXnorMode"): - weights = 2 * weights - 1 - if len(self.onnx_node.input) > 2: - thresholds = model.get_initializer(self.onnx_node.input[2]) - else: - thresholds = None - idt = self.get_input_datatype() - # calculate minimum and maximum values of accumulator - (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) - if thresholds is not None: - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) - # set threshold datatype (and accumulator datatype implicitly) - min_threshold = thresholds.min() - max_threshold = thresholds.max() - # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): - warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) - thresholds = np.clip(thresholds, clip_lower, clip_upper) - model.set_initializer(self.onnx_node.input[2], thresholds) + """Minimize the accumulator bit width according to the weight values, + input data types, and size of dot product""" + if not self.get_nodeattr("runtime_writeable_weights"): + weights = model.get_initializer(self.onnx_node.input[1]) + # since in the calculation the values of the weight matrix are used, + # for the bipolar case they need to be converted to bipolar + if self.get_nodeattr("binaryXnorMode"): + weights = 2 * weights - 1 + if len(self.onnx_node.input) > 2: + thresholds = model.get_initializer(self.onnx_node.input[2]) + else: + thresholds = None + idt = self.get_input_datatype() + # calculate minimum and maximum values of accumulator according to the + # weight values using the bounds derived in https://arxiv.org/abs/2301.13376 + (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) + if thresholds is not None: threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + # set threshold datatype (and accumulator datatype implicitly) min_threshold = thresholds.min() max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) + # clip threshold values + clip_upper = None + clip_lower = None + if max_threshold > acc_max + 1: + clip_upper = acc_max + 1 + if min_threshold < acc_min: + clip_lower = acc_min + if (clip_lower is not None) or (clip_upper is not None): + warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) + thresholds = np.clip(thresholds, clip_lower, clip_upper) + model.set_initializer(self.onnx_node.input[2], thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + min_threshold = thresholds.min() + max_threshold = thresholds.max() + # get range required by threshold values + tdt_min = min(acc_min, min_threshold) + tdt_max = max(acc_max, max_threshold) + if tdt_min < 0: + if abs(tdt_min) > tdt_max: + tdt = DataType.get_smallest_possible(tdt_min) + else: + tdt = DataType.get_smallest_possible(-tdt_max - 1) else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) + tdt = DataType.get_smallest_possible(tdt_max) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), + ) + self.set_nodeattr("accDataType", tdt.name) else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( - threshold_tensor - ).all(), "Thresholds in %s can't be expressed with type %s" % ( - self.onnx_node.name, - str(tdt), - ) - self.set_nodeattr("accDataType", tdt.name) - else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) + if acc_min < 0: + if abs(acc_min) > acc_max: + adt = DataType.get_smallest_possible(acc_min) + else: + adt = DataType.get_smallest_possible(-acc_max - 1) else: - adt = DataType.get_smallest_possible(-acc_max - 1) - else: - adt = DataType.get_smallest_possible(acc_max) - # ensure a datatype divisible by 8-bits in case this is the last node - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - self.set_nodeattr("accDataType", adt.name) - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + adt = DataType.get_smallest_possible(acc_max) + # ensure a datatype divisible by 8-bits in case this is the last node + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + self.set_nodeattr("accDataType", adt.name) + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 796225a712..f38abcc763 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -104,69 +104,72 @@ def get_nodeattr_types(self): return my_attrs def minimize_accumulator_width(self, model): - weights = model.get_initializer(self.onnx_node.input[1]) - k_h, k_w = self.get_nodeattr("Kernel") - fm = self.get_nodeattr("Channels") - # put weights into the shape expected by calculate_matvec_accumulator_range - weights = weights.reshape(fm, k_h * k_w).transpose() - if len(self.onnx_node.input) > 2: - thresholds = model.get_initializer(self.onnx_node.input[2]) - else: - thresholds = None - idt = self.get_input_datatype() - # calculate minimum and maximum values of accumulator - (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) - if thresholds is not None: - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) - # set threshold datatype (and accumulator datatype implicitly) - min_threshold = thresholds.min() - max_threshold = thresholds.max() - # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): - warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) - thresholds = np.clip(thresholds, clip_lower, clip_upper) - model.set_initializer(self.onnx_node.input[2], thresholds) + """Minimize the accumulator bit width according to the weight values, + input data types, and size of dot product""" + if not self.get_nodeattr("runtime_writeable_weights"): + weights = model.get_initializer(self.onnx_node.input[1]) + k_h, k_w = self.get_nodeattr("Kernel") + fm = self.get_nodeattr("Channels") + # put weights into the shape expected by calculate_matvec_accumulator_range + weights = weights.reshape(fm, k_h * k_w).transpose() + if len(self.onnx_node.input) > 2: + thresholds = model.get_initializer(self.onnx_node.input[2]) + else: + thresholds = None + idt = self.get_input_datatype() + # calculate minimum and maximum values of accumulator + (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) + if thresholds is not None: threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + # set threshold datatype (and accumulator datatype implicitly) min_threshold = thresholds.min() max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) + # clip threshold values + clip_upper = None + clip_lower = None + if max_threshold > acc_max + 1: + clip_upper = acc_max + 1 + if min_threshold < acc_min: + clip_lower = acc_min + if (clip_lower is not None) or (clip_upper is not None): + warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) + thresholds = np.clip(thresholds, clip_lower, clip_upper) + model.set_initializer(self.onnx_node.input[2], thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + min_threshold = thresholds.min() + max_threshold = thresholds.max() + # get range required by threshold values + tdt_min = min(acc_min, min_threshold) + tdt_max = max(acc_max, max_threshold) + if tdt_min < 0: + if abs(tdt_min) > tdt_max: + tdt = DataType.get_smallest_possible(tdt_min) + else: + tdt = DataType.get_smallest_possible(-tdt_max - 1) else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) + tdt = DataType.get_smallest_possible(tdt_max) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), + ) + self.set_nodeattr("accDataType", tdt.name) else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( - threshold_tensor - ).all(), "Thresholds in %s can't be expressed with type %s" % ( - self.onnx_node.name, - str(tdt), - ) - self.set_nodeattr("accDataType", tdt.name) - else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) + if acc_min < 0: + if abs(acc_min) > acc_max: + adt = DataType.get_smallest_possible(acc_min) + else: + adt = DataType.get_smallest_possible(-acc_max - 1) else: - adt = DataType.get_smallest_possible(-acc_max - 1) - else: - adt = DataType.get_smallest_possible(acc_max) - # ensure a datatype divisible by 8-bits in case this is the last node - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - self.set_nodeattr("accDataType", adt.name) - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + adt = DataType.get_smallest_possible(acc_max) + # ensure a datatype divisible by 8-bits in case this is the last node + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + self.set_nodeattr("accDataType", adt.name) + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 7c92c0f4f5f5f51b429fc3a0a9e1d3cc3d75a8ee Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 15:52:11 -0800 Subject: [PATCH 395/628] Updating comment on accumulator range calculation --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index f38abcc763..232053b0fa 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -117,7 +117,8 @@ def minimize_accumulator_width(self, model): else: thresholds = None idt = self.get_input_datatype() - # calculate minimum and maximum values of accumulator + # calculate minimum and maximum values of accumulator according to the + # weight values using the bounds derived in https://arxiv.org/abs/2301.13376 (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) if thresholds is not None: threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) From 205528b689894396fb0f709b1850ce74abf5c48a Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 21 Feb 2023 15:59:45 -0800 Subject: [PATCH 396/628] Adding unit test --- tests/end2end/test_end2end_bnn_pynq.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 858363d6d3..a627606f45 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -89,6 +89,8 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) +from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth +from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth from finn.util.basic import get_finn_root from finn.util.gdrive import upload_to_end2end_dashboard from finn.util.pytorch import ToTensor @@ -511,11 +513,23 @@ def test_fold(self, topology, wbits, abits, QONNX_export): model = folding_fxn(model) model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold")) + def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): + prev_chkpt_name = get_checkpoint_name( + topology, wbits, abits, QONNX_export, "fold" + ) + model = load_test_checkpoint_or_skip(prev_chkpt_name) + model = model.transform(MinimizeAccumulatorWidth()) + model = model.transform(MinimizeWeightBitWidth()) + curr_chkpt_name = get_checkpoint_name( + topology, wbits, abits, QONNX_export, "minimize_bit_width" + ) + model.save(curr_chkpt_name) + @pytest.mark.slow @pytest.mark.vivado def test_cppsim(self, topology, wbits, abits, QONNX_export): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fold" + topology, wbits, abits, QONNX_export, "minimize_bit_width" ) model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(PrepareCppSim()) From e7490079d1b5a4b1939ea8cfe0844497ce132f5e Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 10:32:20 +0000 Subject: [PATCH 397/628] pre-commit cleanup --- src/finn/builder/build_dataflow_steps.py | 8 ++++++-- .../custom_op/fpgadataflow/matrixvectoractivation.py | 10 +++++++--- .../custom_op/fpgadataflow/vectorvectoractivation.py | 10 +++++++--- .../fpgadataflow/convert_to_hls_layers.py | 3 ++- .../fpgadataflow/minimize_weight_bit_width.py | 2 +- tests/end2end/test_end2end_bnn_pynq.py | 8 ++++++-- 6 files changed, 29 insertions(+), 12 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 16ac90296f..60290bbb98 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -89,6 +89,12 @@ from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver from finn.transformation.fpgadataflow.make_zynq_proj import ZynqBuild +from finn.transformation.fpgadataflow.minimize_accumulator_width import ( + MinimizeAccumulatorWidth, +) +from finn.transformation.fpgadataflow.minimize_weight_bit_width import ( + MinimizeWeightBitWidth, +) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -118,8 +124,6 @@ from finn.util.pyverilator import verilator_fifosim from finn.util.test import execute_parent -from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth -from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth def verify_step( model: ModelWrapper, diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 468e660117..01d8c3b42b 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -358,7 +358,7 @@ def lut_estimation(self): alpha = math.log(MW, 2) + W + A - 1 - int(idt.signed()) acc_bits = min( acc_datatype.bitwidth(), - np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1) + np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1), ) acc_luts = acc_bits # thresholds and threshold comparators @@ -618,10 +618,14 @@ def minimize_accumulator_width(self, model): if min_threshold < acc_min: clip_lower = acc_min if (clip_lower is not None) or (clip_upper is not None): - warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) + warnings.warn( + "Clipping some thresholds in %s" % self.onnx_node.name + ) thresholds = np.clip(thresholds, clip_lower, clip_upper) model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor( + thresholds + ) min_threshold = thresholds.min() max_threshold = thresholds.max() # get range required by threshold values diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 232053b0fa..fdf6a51c4c 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -133,10 +133,14 @@ def minimize_accumulator_width(self, model): if min_threshold < acc_min: clip_lower = acc_min if (clip_lower is not None) or (clip_upper is not None): - warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) + warnings.warn( + "Clipping some thresholds in %s" % self.onnx_node.name + ) thresholds = np.clip(thresholds, clip_lower, clip_upper) model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor( + thresholds + ) min_threshold = thresholds.min() max_threshold = thresholds.max() # get range required by threshold values @@ -1208,7 +1212,7 @@ def lut_estimation(self): alpha = math.log(k_h * k_w, 2) + W + A - 1 - int(idt.signed()) acc_bits = min( acc_datatype.bitwidth(), - np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1) + np.ceil(alpha + math.log(1 + pow(2, -alpha), 2) + 1), ) acc_luts = acc_bits # thresholds and threshold comparators diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index 3029e09d48..eaafebebf5 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -1128,7 +1128,8 @@ def apply(self, model): PE=pe, numSteps=thl_thres_shape[1], inputDataType=idt.name, - weightDataType=idt.name, # can be tightened by MinimizeAccumulatorWidth + # weightDataType can be tightened by MinimizeAccumulatorWidth + weightDataType=idt.name, outputDataType=odt.name, numInputVectors=list(thl_in_shape[:-1]), ActVal=actval, diff --git a/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py index 147f8281a7..32871cc44a 100644 --- a/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py +++ b/src/finn/transformation/fpgadataflow/minimize_weight_bit_width.py @@ -46,4 +46,4 @@ def apply(self, model): inst = getCustomOp(node) if hasattr(inst, "minimize_weight_bit_width"): inst.minimize_weight_bit_width(model) - return (model, False) \ No newline at end of file + return (model, False) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index a627606f45..13635f88b0 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -78,6 +78,12 @@ from finn.transformation.fpgadataflow.insert_dwc import InsertDWC from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver +from finn.transformation.fpgadataflow.minimize_accumulator_width import ( + MinimizeAccumulatorWidth, +) +from finn.transformation.fpgadataflow.minimize_weight_bit_width import ( + MinimizeWeightBitWidth, +) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode @@ -89,8 +95,6 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth -from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth from finn.util.basic import get_finn_root from finn.util.gdrive import upload_to_end2end_dashboard from finn.util.pytorch import ToTensor From 8e84036e468d3abd0f08d8c84d353c16062c1a0e Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 10:47:19 +0000 Subject: [PATCH 398/628] [Tests] Update export fct in end2end tests --- tests/end2end/test_end2end_bnn_pynq.py | 9 ++++----- tests/end2end/test_end2end_cybsec_mlp.py | 11 +++++------ tests/end2end/test_end2end_mobilenet_v1.py | 6 +++--- tests/fpgadataflow/test_fifosizing.py | 5 +++-- tests/fpgadataflow/test_split_large_fifos.py | 5 +++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 858363d6d3..ccae0849fe 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -28,7 +28,6 @@ import pytest -import brevitas.onnx as bo import numpy as np # as of Feb'20 there is a bug that segfaults ONNX shape inference if we @@ -38,7 +37,7 @@ import subprocess import torch import warnings -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from collections import OrderedDict from dataset_loading import cifar, mnist from datetime import datetime @@ -323,13 +322,13 @@ def test_export(self, topology, wbits, abits, QONNX_export): (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "export") if QONNX_export: - BrevitasONNXManager.export(model, ishape, chkpt_name) + export_qonnx(model, torch.randn(ishape), chkpt_name) qonnx_cleanup(chkpt_name, out_file=chkpt_name) model = ModelWrapper(chkpt_name) model = model.transform(ConvertQONNXtoFINN()) model.save(chkpt_name) else: - bo.export_finn_onnx(model, ishape, chkpt_name) + export_finn_onnx(model, torch.randn(ishape), chkpt_name) nname = "%s_w%da%d" % (topology, wbits, abits) update_dashboard_data(topology, wbits, abits, "network", nname) dtstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S") @@ -369,7 +368,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): chkpt_preproc_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "preproc" ) - bo.export_finn_onnx(totensor_pyt, ishape, chkpt_preproc_name) + export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name) assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model pre_model = ModelWrapper(chkpt_preproc_name) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 290afc3084..86942415b9 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -30,7 +30,6 @@ import pytest -import brevitas.onnx as bo import json import numpy as np import os @@ -40,7 +39,7 @@ import torch.nn as nn import wget from brevitas.core.quant import QuantType -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantIdentity, QuantLinear, QuantReLU from brevitas.quant_tensor import QuantTensor from qonnx.core.datatype import DataType @@ -133,10 +132,10 @@ def test_end2end_cybsec_mlp_export(QONNX_export): ) if QONNX_export: - # With the BrevitasONNXManager we need to manually set + # With the onnx export from Brevitas we need to manually set # the FINN DataType at the input - BrevitasONNXManager.export( - model_for_export, input_shape, export_path=export_onnx_path + export_qonnx( + model_for_export, torch.randn(input_shape), export_path=export_onnx_path ) model = ModelWrapper(export_onnx_path) model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"]) @@ -146,7 +145,7 @@ def test_end2end_cybsec_mlp_export(QONNX_export): model = model.transform(ConvertQONNXtoFINN()) model.save(export_onnx_path) else: - bo.export_finn_onnx( + export_finn_onnx( model_for_export, export_path=export_onnx_path, input_t=input_qt ) assert os.path.isfile(export_onnx_path) diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index 2f4df956ac..3a3c0fe237 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -27,11 +27,11 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest -import brevitas.onnx as bo import numpy as np import os import time import torch +from brevitas.export import export_finn_onnx from PIL import Image from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -95,7 +95,7 @@ def test_end2end_mobilenet_export(): std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) - bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) + export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype( @@ -111,7 +111,7 @@ def test_end2end_mobilenet_export(): # export mobilenet finn_onnx = build_dir + "/end2end_mobilenet_export.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) - bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx) + export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) # calculate golden output with pytorch/brevitas and save as .npy # get single image as input and prepare image diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 9399fbe394..922232c2c2 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -31,7 +31,8 @@ import json import shutil -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +import torch +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -45,7 +46,7 @@ def fetch_test_model(topology, wbits=2, abits=2): tmp_output_dir = make_build_dir("build_fifosizing_%s_" % topology) (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) chkpt_name = tmp_output_dir + "/model.onnx" - BrevitasONNXManager.export(model, ishape, chkpt_name) + export_qonnx(model, torch.randn(ishape), chkpt_name) return tmp_output_dir diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py index 85b4a2bfa8..0437d006cf 100644 --- a/tests/fpgadataflow/test_split_large_fifos.py +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -31,7 +31,8 @@ import json import shutil -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +import torch +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -46,7 +47,7 @@ def fetch_test_model(topology, wbits=2, abits=2): tmp_output_dir = make_build_dir("build_fifosizing_%s_" % topology) (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) chkpt_name = tmp_output_dir + "/model.onnx" - BrevitasONNXManager.export(model, ishape, chkpt_name) + export_qonnx(model, torch.randn(ishape), chkpt_name) return tmp_output_dir From 285b9933410a0d2ef09315b69a33d3da5b11b893 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 10:54:57 +0000 Subject: [PATCH 399/628] [Tests] Update export fct in brevitas export tests --- .../brevitas/test_brevitas_avg_pool_export.py | 9 ++- tests/brevitas/test_brevitas_cnv.py | 7 +-- tests/brevitas/test_brevitas_debug.py | 6 +- tests/brevitas/test_brevitas_fc.py | 7 +-- tests/brevitas/test_brevitas_mobilenet.py | 7 +-- ...revitas_non_scaled_quanthardtanh_export.py | 7 +-- tests/brevitas/test_brevitas_qconv2d.py | 7 +-- tests/brevitas/test_brevitas_qlinear.py | 7 +-- .../brevitas/test_brevitas_relu_act_export.py | 55 +++++++------------ .../test_brevitas_scaled_qhardtanh_export.py | 7 +-- .../test_brevitas_validate_mobilenet.py | 5 +- 11 files changed, 52 insertions(+), 72 deletions(-) diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py index 669601ecb6..9c35910366 100644 --- a/tests/brevitas/test_brevitas_avg_pool_export.py +++ b/tests/brevitas/test_brevitas_avg_pool_export.py @@ -30,8 +30,7 @@ import numpy as np import os import torch -from brevitas.export import FINNManager -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantAvgPool2d from brevitas.quant_tensor import QuantTensor from qonnx.core.datatype import DataType @@ -97,14 +96,14 @@ def test_brevitas_avg_pool_export( # export if QONNX_export: - BrevitasONNXManager.export( + export_qonnx( quant_avgpool, export_path=export_onnx_path, input_t=input_quant_tensor, ) model = ModelWrapper(export_onnx_path) - # Statically set the additional inputs generated by the BrevitasONNXManager + # Statically set the additional inputs generated by the Brevitas ONNX export model.graph.input.remove(model.graph.input[3]) model.graph.input.remove(model.graph.input[2]) model.graph.input.remove(model.graph.input[1]) @@ -118,7 +117,7 @@ def test_brevitas_avg_pool_export( model = model.transform(ConvertQONNXtoFINN()) model.save(export_onnx_path) else: - FINNManager.export( + export_finn_onnx( quant_avgpool, export_path=export_onnx_path, input_t=input_quant_tensor ) model = ModelWrapper(export_onnx_path) diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py index 62aab2e3c2..1a96815105 100644 --- a/tests/brevitas/test_brevitas_cnv.py +++ b/tests/brevitas/test_brevitas_cnv.py @@ -30,11 +30,10 @@ import pytest -import brevitas.onnx as bo import numpy as np import os import torch -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import GiveUniqueNodeNames, RemoveStaticGraphInputs @@ -58,13 +57,13 @@ def test_brevitas_cnv_export_exec(wbits, abits, QONNX_export): cnv = get_test_model_trained("CNV", wbits, abits) ishape = (1, 3, 32, 32) if QONNX_export: - BrevitasONNXManager.export(cnv, ishape, export_onnx_path) + export_qonnx(cnv, torch.randn(ishape), export_onnx_path) qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(ConvertQONNXtoFINN()) model.save(export_onnx_path) else: - bo.export_finn_onnx(cnv, ishape, export_onnx_path) + export_finn_onnx(cnv, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferShapes()) diff --git a/tests/brevitas/test_brevitas_debug.py b/tests/brevitas/test_brevitas_debug.py index 181d610fff..547c026e21 100644 --- a/tests/brevitas/test_brevitas_debug.py +++ b/tests/brevitas/test_brevitas_debug.py @@ -34,7 +34,7 @@ import onnx.numpy_helper as nph import os import torch -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -58,7 +58,7 @@ def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion): ishape = (1, 1, 28, 28) if QONNX_export: dbg_hook = bo.enable_debug(fc, proxy_level=True) - BrevitasONNXManager.export(fc, ishape, finn_onnx) + export_qonnx(fc, torch.randn(ishape), finn_onnx) # DebugMarkers have the brevitas.onnx domain, so that needs adjusting model = ModelWrapper(finn_onnx) dbg_nodes = model.get_nodes_by_op_type("DebugMarker") @@ -72,7 +72,7 @@ def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion): model.save(finn_onnx) else: dbg_hook = bo.enable_debug(fc) - bo.export_finn_onnx(fc, ishape, finn_onnx) + export_finn_onnx(fc, torch.randn(ishape), finn_onnx) model = ModelWrapper(finn_onnx) # DebugMarkers have the brevitas.onnx domain, so that needs adjusting # ToDo: We should probably have transformation pass, which does this diff --git a/tests/brevitas/test_brevitas_fc.py b/tests/brevitas/test_brevitas_fc.py index 211fdb629b..3aaa96f9a5 100644 --- a/tests/brevitas/test_brevitas_fc.py +++ b/tests/brevitas/test_brevitas_fc.py @@ -28,12 +28,11 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx import onnx.numpy_helper as nph import torch -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -68,13 +67,13 @@ def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits, QONNX_export): fc = get_test_model_trained(size, wbits, abits) ishape = (1, 1, 28, 28) if QONNX_export: - BrevitasONNXManager.export(fc, ishape, finn_onnx) + export_qonnx(fc, torch.randn(ishape), finn_onnx) qonnx_cleanup(finn_onnx, out_file=finn_onnx) model = ModelWrapper(finn_onnx) model = model.transform(ConvertQONNXtoFINN()) model.save(finn_onnx) else: - bo.export_finn_onnx(fc, ishape, finn_onnx) + export_finn_onnx(fc, torch.randn(ishape), finn_onnx) model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index b1475b6f4e..c840524172 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -28,9 +28,9 @@ import pytest -import brevitas.onnx as bo import numpy as np import torch +from brevitas.export import export_finn_onnx from PIL import Image from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -54,7 +54,6 @@ @pytest.mark.brevitas_export -@pytest.mark.xfail def test_brevitas_mobilenet(): # get single image as input and prepare image img = Image.open(get_finn_root() + "/tests/brevitas/king_charles.jpg") @@ -76,7 +75,7 @@ def test_brevitas_mobilenet(): std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) - bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) + export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype( @@ -89,7 +88,7 @@ def test_brevitas_mobilenet(): finn_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_exported.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) - bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx) + export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) # do forward pass in PyTorch/Brevitas input_tensor = preproc.forward(img_torch) diff --git a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py index 5d70acb102..ad6a7e53de 100644 --- a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py +++ b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py @@ -28,7 +28,6 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx # noqa import os @@ -36,7 +35,7 @@ from brevitas.core.quant import QuantType from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantHardTanh from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -78,13 +77,13 @@ def get_quant_type(bit_width): ) if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_act, ishape, m_path) + export_qonnx(b_act, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_act, ishape, export_onnx_path) + export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( diff --git a/tests/brevitas/test_brevitas_qconv2d.py b/tests/brevitas/test_brevitas_qconv2d.py index 214c55e5fd..faeb3ff48e 100644 --- a/tests/brevitas/test_brevitas_qconv2d.py +++ b/tests/brevitas/test_brevitas_qconv2d.py @@ -28,7 +28,6 @@ import pytest -import brevitas.onnx as bo import numpy as np import os import torch @@ -36,7 +35,7 @@ from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType from brevitas.core.stats import StatsOp -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantConv2d from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -96,13 +95,13 @@ def test_brevitas_QConv2d(dw, bias, in_channels, QONNX_export): b_conv.eval() if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_conv, ishape, m_path) + export_qonnx(b_conv, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_conv, ishape, export_onnx_path) + export_finn_onnx(b_conv, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=-1.0, high=1.0, size=ishape).astype(np.float32) diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py index bcd75a5455..1ad52fb5df 100644 --- a/tests/brevitas/test_brevitas_qlinear.py +++ b/tests/brevitas/test_brevitas_qlinear.py @@ -28,12 +28,11 @@ import pytest -import brevitas.onnx as bo import numpy as np import os import torch from brevitas.core.quant import QuantType -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantLinear from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -75,13 +74,13 @@ def test_brevitas_qlinear( b_linear.eval() if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_linear, i_shape, m_path) + export_qonnx(b_linear, torch.randn(i_shape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_linear, i_shape, export_onnx_path) + export_finn_onnx(b_linear, torch.randn(i_shape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) inp_tensor = gen_finn_dt_tensor(i_dtype, i_shape) diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index 3dc46ec31e..1900763bdd 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -28,7 +28,6 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx # noqa import os @@ -36,7 +35,7 @@ from brevitas.core.quant import QuantType from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantReLU from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -51,18 +50,16 @@ @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) -@pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)]) @pytest.mark.parametrize( "scaling_impl_type", [ScalingImplType.CONST, ScalingImplType.PARAMETER] ) @pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_act_export_relu(abits, max_val, scaling_impl_type, QONNX_export): - min_val = -1.0 +def test_brevitas_act_export_relu(abits, scaling_impl_type, QONNX_export): ishape = (1, 15) b_act = QuantReLU( bit_width=abits, - max_val=max_val, + max_val=6.0, scaling_impl_type=scaling_impl_type, restrict_scaling_type=RestrictValueType.LOG_FP, quant_type=QuantType.INT, @@ -79,18 +76,16 @@ def test_brevitas_act_export_relu(abits, max_val, scaling_impl_type, QONNX_expor b_act.load_state_dict(checkpoint) if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_act, ishape, m_path) + export_qonnx(b_act, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_act, ishape, export_onnx_path) + export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( - np.float32 - ) + inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] @@ -98,7 +93,7 @@ def test_brevitas_act_export_relu(abits, max_val, scaling_impl_type, QONNX_expor b_act.eval() expected = b_act.forward(inp_tensor).detach().numpy() if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits, max_val, scaling_impl_type) + print(abits, scaling_impl_type) print("scale: ", b_act.quant_act_scale().type(torch.FloatTensor).detach()) if abits < 5: print( @@ -115,27 +110,25 @@ def test_brevitas_act_export_relu(abits, max_val, scaling_impl_type, QONNX_expor @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) -@pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)]) -@pytest.mark.parametrize("scaling_per_channel", [True, False]) +@pytest.mark.parametrize("scaling_per_output_channel", [True, False]) @pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_relu_imagenet( - abits, max_val, scaling_per_channel, QONNX_export + abits, scaling_per_output_channel, QONNX_export ): out_channels = 32 ishape = (1, out_channels, 1, 1) - min_val = -1.0 b_act = QuantReLU( bit_width=abits, quant_type=QuantType.INT, scaling_impl_type=ScalingImplType.PARAMETER, - scaling_per_channel=scaling_per_channel, + scaling_per_output_channel=scaling_per_output_channel, restrict_scaling_type=RestrictValueType.LOG_FP, scaling_min_val=2e-16, max_val=6.0, return_quant_tensor=False, per_channel_broadcastable_shape=(1, out_channels, 1, 1), ) - if scaling_per_channel is True: + if scaling_per_output_channel is True: rand_tensor = (2) * torch.rand((1, out_channels, 1, 1)) else: rand_tensor = torch.tensor(1.2398) @@ -148,18 +141,16 @@ def test_brevitas_act_export_relu_imagenet( b_act.load_state_dict(checkpoint) if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_act, ishape, m_path) + export_qonnx(b_act, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_act, ishape, export_onnx_path) + export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( - np.float32 - ) + inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] @@ -167,7 +158,7 @@ def test_brevitas_act_export_relu_imagenet( b_act.eval() expected = b_act.forward(inp_tensor).detach().numpy() if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits, max_val) + print(abits) print("scale: ", b_act.quant_act_scale().type(torch.FloatTensor).detach()) if abits < 5: print( @@ -190,7 +181,7 @@ def __init__(self, abits): bit_width=abits, quant_type=QuantType.INT, scaling_impl_type=ScalingImplType.PARAMETER, - scaling_per_channel=True, + scaling_per_output_channel=True, restrict_scaling_type=RestrictValueType.LOG_FP, scaling_min_val=2e-16, max_val=6.0, @@ -208,15 +199,13 @@ def forward(self, x): @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) -@pytest.mark.parametrize("max_val", [1.0, 1.5, 1 - 2 ** (-7)]) -@pytest.mark.parametrize("scaling_per_channel", [True]) +@pytest.mark.parametrize("scaling_per_output_channel", [True]) @pytest.mark.parametrize("QONNX_export", [True]) def test_brevitas_act_export_relu_forking( - abits, max_val, scaling_per_channel, QONNX_export + abits, scaling_per_output_channel, QONNX_export ): out_channels = 32 ishape = (1, out_channels, 1, 1) - min_val = -1.0 model_pyt = PyTorchTestModel(abits) rand_tensor = (2) * torch.rand((1, out_channels, 1, 1)) @@ -229,7 +218,7 @@ def test_brevitas_act_export_relu_forking( if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(model_pyt, ishape, m_path) + export_qonnx(model_pyt, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) @@ -237,9 +226,7 @@ def test_brevitas_act_export_relu_forking( model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( - np.float32 - ) + inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] @@ -247,7 +234,7 @@ def test_brevitas_act_export_relu_forking( model_pyt.eval() expected = model_pyt.forward(inp_tensor).detach().numpy() if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits, max_val) + print(abits) print("scale: ", model_pyt.quant_act_scale().type(torch.FloatTensor).detach()) if abits < 5: print( diff --git a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py index 403d406105..d35cc8d2dd 100644 --- a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py +++ b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py @@ -28,7 +28,6 @@ import pytest -import brevitas.onnx as bo import numpy as np import onnx # noqa import os @@ -36,7 +35,7 @@ from brevitas.core.quant import QuantType from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType -from brevitas.export.onnx.generic.manager import BrevitasONNXManager +from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantHardTanh from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -91,13 +90,13 @@ def get_quant_type(bit_width): b_act.load_state_dict(checkpoint) if QONNX_export: m_path = export_onnx_path - BrevitasONNXManager.export(b_act, ishape, m_path) + export_qonnx(b_act, torch.randn(ishape), m_path) qonnx_cleanup(m_path, out_file=m_path) model = ModelWrapper(m_path) model = model.transform(ConvertQONNXtoFINN()) model.save(m_path) else: - bo.export_finn_onnx(b_act, ishape, export_onnx_path) + export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( diff --git a/tests/brevitas/test_brevitas_validate_mobilenet.py b/tests/brevitas/test_brevitas_validate_mobilenet.py index 55915838e8..20e8ddad50 100644 --- a/tests/brevitas/test_brevitas_validate_mobilenet.py +++ b/tests/brevitas/test_brevitas_validate_mobilenet.py @@ -35,6 +35,7 @@ import torch import torchvision.datasets as datasets import torchvision.transforms as transforms +from brevitas.export import export_finn_onnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import ( @@ -113,7 +114,7 @@ def test_brevitas_compare_exported_mobilenet(): # export preprocessing preproc_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_preproc.onnx" preproc = NormalizePreProc(mean, std, ch) - bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) + export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) @@ -124,7 +125,7 @@ def test_brevitas_compare_exported_mobilenet(): mobilenet = get_test_model_trained("mobilenet", 4, 4) if debug_mode: dbg_hook = bo.enable_debug(mobilenet) - bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx) + export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) From bfb966841221a427dae09f3252cdb5de55382335 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 11:45:52 +0000 Subject: [PATCH 400/628] [Deps] Update finn-experimental --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 6068d9fc4c..53d199d4d4 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -28,7 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" -FINN_EXP_COMMIT="8e6cccda16a5adeaac8451f9236e2a24766e0a27" +FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" From 65822357a7dba4f917c852d5f08bdebc7dd22e9d Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 12:12:28 +0000 Subject: [PATCH 401/628] [Deps] Update to qonnx v0.2.0 --- fetch-repos.sh | 2 +- src/finn/custom_op/fpgadataflow/addstreams_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/channelwise_op_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/checksum.py | 4 ++-- src/finn/custom_op/fpgadataflow/concat.py | 4 ++-- src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py | 4 ++-- .../custom_op/fpgadataflow/convolutioninputgenerator1d.py | 4 ++-- .../custom_op/fpgadataflow/convolutioninputgenerator_rtl.py | 4 ++-- src/finn/custom_op/fpgadataflow/downsampler.py | 4 ++-- src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/eltwise.py | 4 ++-- src/finn/custom_op/fpgadataflow/fmpadding_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/fmpadding_rtl.py | 4 ++-- src/finn/custom_op/fpgadataflow/globalaccpool_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/hlscustomop.py | 4 ++-- src/finn/custom_op/fpgadataflow/iodma.py | 4 ++-- src/finn/custom_op/fpgadataflow/labelselect_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/lookup.py | 4 ++-- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 4 ++-- src/finn/custom_op/fpgadataflow/streamingfifo.py | 4 ++-- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 4 ++-- src/finn/custom_op/fpgadataflow/tlastmarker.py | 4 ++-- src/finn/custom_op/fpgadataflow/upsampler.py | 4 ++-- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 4 ++-- 24 files changed, 47 insertions(+), 47 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index f13037733e..9738ea153f 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="dd35a8ff49d7225a07ffceeebe25a6361df48349" +QONNX_COMMIT="d9ac34c638ccbdcd3b3f5cd236fe76d611b08f6a" FINN_EXP_COMMIT="9cbd2787b5160e2b44e0e8164a0df1457dbd5366" BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index cd0af6b3ab..af106d9c06 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -38,8 +38,8 @@ class AddStreams_Batch(HLSCustomOp): """Class that corresponds to finn-hlslib AddStreams_Batch function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = super().get_nodeattr_types() diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 46adca680d..cde66f1ae2 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -85,8 +85,8 @@ class ChannelwiseOp_Batch(HLSCustomOp): including Add, Mul and multi-thresholding. """ - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index c927c07df2..99646274fa 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -38,8 +38,8 @@ class CheckSum(HLSCustomOp): """Class that corresponds to custom_hls checksum function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/concat.py b/src/finn/custom_op/fpgadataflow/concat.py index 4437bcd198..8b655b570d 100644 --- a/src/finn/custom_op/fpgadataflow/concat.py +++ b/src/finn/custom_op/fpgadataflow/concat.py @@ -39,8 +39,8 @@ class StreamingConcat(HLSCustomOp): """Streaming concatenation node with dynamically generated HLS. Only supports concatenating along the last axis.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 1566445999..6cc9208bb8 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -54,8 +54,8 @@ class ConvolutionInputGenerator(HLSCustomOp): attributes (e.g. depthwise or not, whether k % stride is 0) a different variant will be picked for the actual HLS implementation.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index f1c84662cc..6e792ca585 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -59,8 +59,8 @@ class ConvolutionInputGenerator1D(HLSCustomOp): attributes (e.g. depthwise or not, whether dilation is 0) a different variant will be picked for the actual HLS implementation.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 1afd23d3a1..30861f0135 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -60,8 +60,8 @@ class ConvolutionInputGenerator_rtl(HLSCustomOp): (sliding window) function variants. Generates an RTL ConvolutionInputGenerator implementation based on (System-)Verilog templates, defined in finn-rtllib/swg.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index b7efaff440..255606ee7f 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -39,8 +39,8 @@ class DownSampler(HLSCustomOp): """Corresponds to finn-hlslib ConvolutionInputGenerator_*_kernel1 function. Basically performs a down sampling of the image removing rows and columns.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py index 93cde15ca7..312f5e7e4a 100644 --- a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py @@ -38,8 +38,8 @@ class DuplicateStreams_Batch(HLSCustomOp): """Class that corresponds to finn-hlslib function of the same name.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index 68ed6546c7..c96f12f06b 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -38,8 +38,8 @@ class StreamingEltwise(HLSCustomOp): """Class that corresponds to finn-hlslib StreamingEltwise function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index dfc55d283f..bdb5775c3e 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -39,8 +39,8 @@ class FMPadding_Batch(HLSCustomOp): """Corresponds to finn-hlslib FMPadding_Batch function. Pads input image by given amount.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index 5650d21885..9c27503224 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -49,8 +49,8 @@ class FMPadding_rtl(HLSCustomOp): Supports adjusting the padding amount and spatial feature sizes at runtime.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index e7fa5bc004..220856922c 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -38,8 +38,8 @@ class GlobalAccPool_Batch(HLSCustomOp): """Class that corresponds to finn-hlslib AccPool_Batch function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index d1326607aa..d5d0c9ea6e 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -59,8 +59,8 @@ class HLSCustomOp(CustomOp): custom node should have. Some as abstract methods, these have to be filled when writing a new fpgadataflow custom op node.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) self.code_gen_dict = {} diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 65683079fc..8a756b630d 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -75,8 +75,8 @@ class IODMA(HLSCustomOp): """Class that corresponds to finn-hlslib DMA function(s).""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index 03f89bd7ec..492cd01073 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -39,8 +39,8 @@ class LabelSelect_Batch(HLSCustomOp): """Class that corresponds to finn-hlslib LabelSelect_Batch function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) odt_name = self.get_nodeattr("outputDataType") if odt_name == "": # If not provided compute min size diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index fd3e2b5b1c..ed560ac962 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -44,8 +44,8 @@ class Lookup(HLSCustomOp): "Streaming elementwise HLS lookup, mapping indices to values." - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 72128fda4c..27c44e3e65 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -60,8 +60,8 @@ class MatrixVectorActivation(HLSCustomOp): """Class that corresponds to finn-hls Matrix_Vector_Activate(_Stream)_Batch function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 522305327f..34b1940fa1 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -41,8 +41,8 @@ class StreamingFIFO(HLSCustomOp): - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) self.strm_fifo_wrapper = templates.strm_fifo_wrapper def get_nodeattr_types(self): diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index d9745acf63..ce8c31ee9a 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -57,8 +57,8 @@ class Thresholding_Batch(HLSCustomOp): """Class that corresponds to finn-hls Thresholding_Batch function.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): diff --git a/src/finn/custom_op/fpgadataflow/tlastmarker.py b/src/finn/custom_op/fpgadataflow/tlastmarker.py index 1bd32442a1..895a2eedab 100644 --- a/src/finn/custom_op/fpgadataflow/tlastmarker.py +++ b/src/finn/custom_op/fpgadataflow/tlastmarker.py @@ -37,8 +37,8 @@ class TLastMarker(HLSCustomOp): (needed by the FINN PYNQ shell) or at the beginning to remove the end-of-burst from DMA read.""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index a018fd35aa..b653b9386e 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -41,8 +41,8 @@ class UpsampleNearestNeighbour_Batch(HLSCustomOp): The layer expects square feature maps for the in and output. """ - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index d5e29ca22a..531dc75a5f 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -50,8 +50,8 @@ class VectorVectorActivation(HLSCustomOp): """Class that corresponds to finn-hlslib Vector_Vector_Activate_Batch function""" - def __init__(self, onnx_node): - super().__init__(onnx_node) + def __init__(self, onnx_node, **kwargs): + super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): my_attrs = { From 86ec96dce961cb0d15d2e24c6922a390271c4e57 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 15:39:04 +0000 Subject: [PATCH 402/628] [Tests] Update res estimate test to match updated estimate fcts --- tests/fpgadataflow/test_fpgadataflow_res_estimate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py index b3cf7b4229..2ff7dd8b32 100644 --- a/tests/fpgadataflow/test_fpgadataflow_res_estimate.py +++ b/tests/fpgadataflow/test_fpgadataflow_res_estimate.py @@ -101,7 +101,7 @@ def test_res_estimate(): "MatrixVectorActivation_0": { "BRAM_18K": 0, "BRAM_efficiency": 1, - "LUT": 357, + "LUT": 317, "DSP": 0, "URAM_efficiency": 1, "URAM": 0, @@ -119,7 +119,7 @@ def test_res_estimate(): { "BRAM_18K": 0, "BRAM_efficiency": 1, - "LUT": 352, + "LUT": 313, "DSP": 1, "URAM": 0, "URAM_efficiency": 1, @@ -127,7 +127,7 @@ def test_res_estimate(): { "BRAM_18K": 0, "BRAM_efficiency": 1, - "LUT": 357, + "LUT": 317, "DSP": 0, "URAM": 0, "URAM_efficiency": 1, From 9249b668dd38cc8c59c10f6ccc022b79617a6390 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 22 Feb 2023 15:41:01 +0000 Subject: [PATCH 403/628] remove unused dependency Signed-off-by: Fionn O'Donohoe --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 83aad07d72..6703c83d97 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ bitstring==3.1.7 clize==4.1.1 dataclasses-json==0.5.7 -docrep==0.2.7 gspread==3.6.0 numpy==1.22.0 onnx==1.13.0 From fd12de646969c58ceff2652fa72406d73b04a26a Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 22 Feb 2023 16:51:13 +0000 Subject: [PATCH 404/628] [Builder] Update lookup for builder steps --- src/finn/builder/build_dataflow_steps.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index 857a1f6122..ba5a23f411 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -851,6 +851,7 @@ def step_deployment_package(model: ModelWrapper, cfg: DataflowBuildConfig): "step_create_dataflow_partition": step_create_dataflow_partition, "step_target_fps_parallelization": step_target_fps_parallelization, "step_apply_folding_config": step_apply_folding_config, + "step_minimize_bit_width": step_minimize_bit_width, "step_generate_estimate_reports": step_generate_estimate_reports, "step_hls_codegen": step_hls_codegen, "step_hls_ipgen": step_hls_ipgen, From 74ab5a3030f06163b576edc1c67ffef2b52e9073 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 23 Feb 2023 15:00:49 +0000 Subject: [PATCH 405/628] [Tests] Fix to validate top1 in bnn_pynq test --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index ccae0849fe..831c1a2f73 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -268,7 +268,7 @@ def measure_top1_accuracy(model_chkpt, dataset, parent_chkpt=None): raise Exception("Unrecognized dataset") # move from dataset_loader layout to ONNX layout: NHWC -> NCHW testx = testx.transpose(0, 3, 1, 2) - model = ModelWrapper(model_chkpt) + model = load_test_checkpoint_or_skip(model_chkpt) iname = model.graph.input[0].name oname = model.graph.output[0].name if parent_chkpt is None: From 3ebd6ee1a393769104dfa72d07543352a6bc4d6c Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 24 Feb 2023 15:50:54 +0000 Subject: [PATCH 406/628] Update finn-hlslib commit --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 86a2176c75..1e01a058ff 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -32,7 +32,7 @@ FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" -HLSLIB_COMMIT="4ddfa00b07275a3f1de1c13409e6acb489115fe2" +HLSLIB_COMMIT="c17aa478ae574971d115afa9fa4d9c215857d1ac" OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" From 0c239a5f9086601171d550819831a42bcbb74d97 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 27 Feb 2023 12:02:55 +0000 Subject: [PATCH 407/628] [Tests] Re-enable decoupled tests for VVAU --- tests/fpgadataflow/test_fpgadataflow_vvau.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index bcbf4fb721..5ffbf81354 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -187,7 +187,7 @@ def prepare_inputs(input_tensor): # Number of input and output channels @pytest.mark.parametrize("channels", [6]) # memory mode -@pytest.mark.parametrize("mem_mode", ["const"]) +@pytest.mark.parametrize("mem_mode", ["const", "decoupled"]) # execution mode @pytest.mark.parametrize("exec_mode", ["cppsim", "rtlsim"]) @pytest.mark.fpgadataflow From 746d44d4d87649c44d52492751d6963c85a875b3 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 27 Feb 2023 16:17:09 +0000 Subject: [PATCH 408/628] [CustomOp] Add assertion when pe or simd setting not valid --- .../fpgadataflow/vectorvectoractivation.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 69275cfc5e..5d996e10d8 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -253,9 +253,6 @@ def get_output_datatype(self, ind=0): def get_instream_width(self, ind=0): i_bits = self.get_input_datatype().bitwidth() simd = self.get_nodeattr("SIMD") - # if simd > 1: - # pe = self.get_nodeattr("Channels") - # else: pe = self.get_nodeattr("PE") in_width = i_bits * simd * pe return in_width @@ -270,11 +267,13 @@ def get_folded_input_shape(self, ind=0): dim_h, dim_w = self.get_nodeattr("Dim") ch = self.get_nodeattr("Channels") simd = self.get_nodeattr("SIMD") - # if simd > 1: - # pe = self.get_nodeattr("Channels") - # else: pe = self.get_nodeattr("PE") - sf = k_h * k_w // simd + kernel_2 = k_h * k_w + assert ( + kernel_2 % simd == 0 + ), "Requirement kernel (k_h * k_w) divisable by SIMD is violated." + sf = kernel_2 // simd + assert ch % pe == 0, "Requirement Channels divisable by PE is violated." nf = ch // pe if ind == 0: From 9ccaed38b46d3e220d237a3394c78b62166d8e1e Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 27 Feb 2023 16:18:50 +0000 Subject: [PATCH 409/628] [Tests] Extend parameters for vvau testing --- tests/fpgadataflow/test_fpgadataflow_vvau.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 5ffbf81354..be1ada59a1 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -180,12 +180,12 @@ def prepare_inputs(input_tensor): @pytest.mark.parametrize("simd", [1, 9]) # Input image shape @pytest.mark.parametrize("dim_h", [10]) -@pytest.mark.parametrize("dim_w", [10]) +@pytest.mark.parametrize("dim_w", [10, 1]) # Kernel shape @pytest.mark.parametrize("k_h", [3]) -@pytest.mark.parametrize("k_w", [3]) +@pytest.mark.parametrize("k_w", [3, 1]) # Number of input and output channels -@pytest.mark.parametrize("channels", [6]) +@pytest.mark.parametrize("channels", [3, 6]) # memory mode @pytest.mark.parametrize("mem_mode", ["const", "decoupled"]) # execution mode @@ -196,15 +196,15 @@ def prepare_inputs(input_tensor): def test_fpgadataflow_vvau( idt, wdt, act, pe, simd, dim_h, dim_w, k_h, k_w, channels, mem_mode, exec_mode ): - if pe == "channels": - pe = channels - if dim_w == 1 and k_w != 1: pytest.skip("1D image requires 1D kernel, skipping.") if channels % pe != 0: pytest.skip("Requirement Channels divisable by PE is violated.") + if (k_h * k_w) % simd != 0: + pytest.skip("Requirement kernel (k_h * k_w) divisable by SIMD is violated.") + # Generate weights in expected shape for ONNX and HLS node W = gen_finn_dt_tensor(wdt, (channels, 1, k_h, k_w)) # shape: [channels, 1, k, k] W_onnx = _infer_sparse_weight_tensor( From b8386edf55e631eb4416a83254c92c8f9c2b3174 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 15:07:31 -0800 Subject: [PATCH 410/628] Update minimize_accumulator_width for MVAU If not runtime-writeable weights, then we can still minimize the accumulator bit width according to the data types. --- .../fpgadataflow/matrixvectoractivation.py | 142 ++++++++++-------- 1 file changed, 79 insertions(+), 63 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 40f625093b..75aa587433 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -591,75 +591,91 @@ def get_hls_compatible_weight_tensor(self, orig_weight_matrix): def minimize_accumulator_width(self, model): """Minimize the accumulator bit width according to the weight values, input data types, and size of dot product""" - if not self.get_nodeattr("runtime_writeable_weights"): - weights = model.get_initializer(self.onnx_node.input[1]) - # since in the calculation the values of the weight matrix are used, - # for the bipolar case they need to be converted to bipolar - if self.get_nodeattr("binaryXnorMode"): - weights = 2 * weights - 1 - if len(self.onnx_node.input) > 2: - thresholds = model.get_initializer(self.onnx_node.input[2]) - else: - thresholds = None - idt = self.get_input_datatype() - # calculate minimum and maximum values of accumulator according to the - # weight values using the bounds derived in https://arxiv.org/abs/2301.13376 + weights = model.get_initializer(self.onnx_node.input[1]) + # since in the calculation the values of the weight matrix are used, + # for the bipolar case they need to be converted to bipolar + if self.get_nodeattr("binaryXnorMode"): + weights = 2 * weights - 1 + if len(self.onnx_node.input) > 2: + thresholds = model.get_initializer(self.onnx_node.input[2]) + else: + thresholds = None + idt = self.get_input_datatype() + # if runtime-writeable weights, then the values of the weights can + # change and we need to use the worst-case values from the datatypes + if self.get_nodeattr("runtime_writeable_weights"): + wdt = self.get_weight_datatype() + lower_worst = wdt.min() * np.ones_like(weights) + lower_range = calculate_matvec_accumulator_range(lower_worst, idt) + upper_worst = wdt.min() * np.ones_like(weights) + upper_range = calculate_matvec_accumulator_range(upper_worst, idt) + acc_min = min(min(lower_range), min(upper_range)) + acc_max = max(max(upper_range), max(upper_range)) + thresholds = None # range of thresholds are also runtime-writeable + # if not runtime-writeable weights, then we can calculate the min + # and max values of the accumulation range using knowledge of the + # weights and input data types since they are fixed + else: (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) - if thresholds is not None: - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) - # set threshold datatype (and accumulator datatype implicitly) + # if the thresholds can be used to determine range, then adjust the range + # according to the known values of the thresholds + if thresholds is not None: + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + # set threshold datatype (and accumulator datatype implicitly) + min_threshold = thresholds.min() + max_threshold = thresholds.max() + # clip threshold values + clip_upper = None + clip_lower = None + if max_threshold > acc_max + 1: + clip_upper = acc_max + 1 + if min_threshold < acc_min: + clip_lower = acc_min + if (clip_lower is not None) or (clip_upper is not None): + warnings.warn( + "Clipping some thresholds in %s" % self.onnx_node.name + ) + thresholds = np.clip(thresholds, clip_lower, clip_upper) + model.set_initializer(self.onnx_node.input[2], thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor( + thresholds + ) min_threshold = thresholds.min() max_threshold = thresholds.max() - # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): - warnings.warn( - "Clipping some thresholds in %s" % self.onnx_node.name - ) - thresholds = np.clip(thresholds, clip_lower, clip_upper) - model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor( - thresholds - ) - min_threshold = thresholds.min() - max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) - else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) + # get range required by threshold values + tdt_min = min(acc_min, min_threshold) + tdt_max = max(acc_max, max_threshold) + if tdt_min < 0: + if abs(tdt_min) > tdt_max: + tdt = DataType.get_smallest_possible(tdt_min) else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( - threshold_tensor - ).all(), "Thresholds in %s can't be expressed with type %s" % ( - self.onnx_node.name, - str(tdt), - ) - self.set_nodeattr("accDataType", tdt.name) + tdt = DataType.get_smallest_possible(-tdt_max - 1) else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) - else: - adt = DataType.get_smallest_possible(-acc_max - 1) + tdt = DataType.get_smallest_possible(tdt_max) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), + ) + adt = tdt # Set activation datatype to the threshold datatype + else: + if acc_min < 0: + if abs(acc_min) > acc_max: + adt = DataType.get_smallest_possible(acc_min) else: - adt = DataType.get_smallest_possible(acc_max) - # ensure a datatype divisible by 8-bits in case this is the last node - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - self.set_nodeattr("accDataType", adt.name) - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + adt = DataType.get_smallest_possible(-acc_max - 1) + else: + adt = DataType.get_smallest_possible(acc_max) + # if this is the last node in the graph, then ensure the datatype is + # divisibly by 8 bits + if model.find_direct_successors(self.onnx_node) is None: + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) + self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 7afc09862707074982f3e18da6e019c3614a9442 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 15:09:37 -0800 Subject: [PATCH 411/628] Update minimize_accumulator_width for VVAU --- .../fpgadataflow/vectorvectoractivation.py | 146 ++++++++++-------- 1 file changed, 83 insertions(+), 63 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 10ee30f89a..a580674836 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -106,75 +106,95 @@ def get_nodeattr_types(self): def minimize_accumulator_width(self, model): """Minimize the accumulator bit width according to the weight values, input data types, and size of dot product""" - if not self.get_nodeattr("runtime_writeable_weights"): - weights = model.get_initializer(self.onnx_node.input[1]) - k_h, k_w = self.get_nodeattr("Kernel") - fm = self.get_nodeattr("Channels") - # put weights into the shape expected by calculate_matvec_accumulator_range - weights = weights.reshape(fm, k_h * k_w).transpose() - if len(self.onnx_node.input) > 2: - thresholds = model.get_initializer(self.onnx_node.input[2]) - else: - thresholds = None - idt = self.get_input_datatype() - # calculate minimum and maximum values of accumulator according to the - # weight values using the bounds derived in https://arxiv.org/abs/2301.13376 + weights = model.get_initializer(self.onnx_node.input[1]) + k_h, k_w = self.get_nodeattr("Kernel") + fm = self.get_nodeattr("Channels") + # put weights into the shape expected by calculate_matvec_accumulator_range + weights = weights.reshape(fm, k_h * k_w).transpose() + # since in the calculation the values of the weight matrix are used, + # for the bipolar case they need to be converted to bipolar + if self.get_nodeattr("binaryXnorMode"): + weights = 2 * weights - 1 + if len(self.onnx_node.input) > 2: + thresholds = model.get_initializer(self.onnx_node.input[2]) + else: + thresholds = None + idt = self.get_input_datatype() + # if runtime-writeable weights, then the values of the weights can + # change and we need to use the worst-case values from the datatypes + if self.get_nodeattr("runtime_writeable_weights"): + wdt = self.get_weight_datatype() + lower_worst = wdt.min() * np.ones_like(weights) + lower_range = calculate_matvec_accumulator_range(lower_worst, idt) + upper_worst = wdt.min() * np.ones_like(weights) + upper_range = calculate_matvec_accumulator_range(upper_worst, idt) + acc_min = min(min(lower_range), min(upper_range)) + acc_max = max(max(upper_range), max(upper_range)) + thresholds = None # range of thresholds are also runtime-writeable + # if not runtime-writeable weights, then we can calculate the min + # and max values of the accumulation range using knowledge of the + # weights and input data types since they are fixed + else: (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) - if thresholds is not None: - threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) - # set threshold datatype (and accumulator datatype implicitly) + # if the thresholds can be used to determine range, then adjust the range + # according to the known values of the thresholds + if thresholds is not None: + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) + # set threshold datatype (and accumulator datatype implicitly) + min_threshold = thresholds.min() + max_threshold = thresholds.max() + # clip threshold values + clip_upper = None + clip_lower = None + if max_threshold > acc_max + 1: + clip_upper = acc_max + 1 + if min_threshold < acc_min: + clip_lower = acc_min + if (clip_lower is not None) or (clip_upper is not None): + warnings.warn( + "Clipping some thresholds in %s" % self.onnx_node.name + ) + thresholds = np.clip(thresholds, clip_lower, clip_upper) + model.set_initializer(self.onnx_node.input[2], thresholds) + threshold_tensor = self.get_hls_compatible_threshold_tensor( + thresholds + ) min_threshold = thresholds.min() max_threshold = thresholds.max() - # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): - warnings.warn( - "Clipping some thresholds in %s" % self.onnx_node.name - ) - thresholds = np.clip(thresholds, clip_lower, clip_upper) - model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor( - thresholds - ) - min_threshold = thresholds.min() - max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) - else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) + # get range required by threshold values + tdt_min = min(acc_min, min_threshold) + tdt_max = max(acc_max, max_threshold) + if tdt_min < 0: + if abs(tdt_min) > tdt_max: + tdt = DataType.get_smallest_possible(tdt_min) else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( - threshold_tensor - ).all(), "Thresholds in %s can't be expressed with type %s" % ( - self.onnx_node.name, - str(tdt), - ) - self.set_nodeattr("accDataType", tdt.name) + tdt = DataType.get_smallest_possible(-tdt_max - 1) else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) - else: - adt = DataType.get_smallest_possible(-acc_max - 1) + tdt = DataType.get_smallest_possible(tdt_max) + assert np.vectorize(tdt.allowed)( + threshold_tensor + ).all(), "Thresholds in %s can't be expressed with type %s" % ( + self.onnx_node.name, + str(tdt), + ) + adt = tdt # Set activation datatype to the threshold datatype + else: + if acc_min < 0: + if abs(acc_min) > acc_max: + adt = DataType.get_smallest_possible(acc_min) else: - adt = DataType.get_smallest_possible(acc_max) - # ensure a datatype divisible by 8-bits in case this is the last node - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - self.set_nodeattr("accDataType", adt.name) - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + adt = DataType.get_smallest_possible(-acc_max - 1) + else: + adt = DataType.get_smallest_possible(acc_max) + # if this is the last node in the graph, then ensure the datatype is + # divisibly by 8 bits + if model.find_direct_successors(self.onnx_node) is None: + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) + self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 1185b60fd5d40cdef41d70a15b2f6ed7a1f9052b Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 16:35:23 -0800 Subject: [PATCH 412/628] Creating test for MinimizeWeightBitWidth --- .../streamline/test_minimize_bit_width.py | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 tests/transformation/streamline/test_minimize_bit_width.py diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py new file mode 100644 index 0000000000..51dbe9cc7f --- /dev/null +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -0,0 +1,148 @@ +# Copyright (C) 2023, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of FINN nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest +from typing import Optional +from onnx import TensorProto, helper +from qonnx.core.datatype import DataType, IntType +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.custom_op.registry import getCustomOp +from qonnx.util.basic import gen_finn_dt_tensor + +from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation +from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation +from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth + + +def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = None): + """Creates a toy finn-onnx model for unit testing. The VVAU-MVAU pair is based + on the first pair of MobileNetV1""" + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 32, 32, 288]) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, 32, 32, 64]) + layer1 = helper.make_node( + "VectorVectorActivation", + ["inp", "params0", "thresh"] if tdt is not None else ["inp", "params0"], + ["hid"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + PE=1, + Channels=32, + Dim=(32, 32), + Kernel=(3,3), + inputDataType=idt.name, + outputDataType=idt.name, + weightDataType=wdt.name, + noActivation=tdt.min() if tdt is not None else 0, + ActVal=0 if tdt is not None else 1, + ) + layer2 = helper.make_node( + "MatrixVectorActivation", + ["hid", "params1", "thresh"] if tdt is not None else ["hid", "params1"], + ["outp"], + domain="finn.custom_op.fpgadataflow", + backend="fpgadataflow", + MW=32, # matrix_width (num_inputs) + MH=64, # matrix_height (num_outputs) + SIMD=1, + PE=1, + inputDataType=idt.name, + outputDataType=idt.name, + weightDataType=wdt.name, + noActivation=tdt.min() if tdt is not None else 0, + ActVal=0 if tdt is not None else 1, + binaryXnorMode=0 + ) + graph = helper.make_graph( + nodes=[layer1, layer2], name="fclayer_graph", inputs=[inp], outputs=[outp] + ) + + model = helper.make_model(graph, producer_name="fclayer-model") + model = ModelWrapper(model) + + model.set_tensor_datatype("inp", idt) + model.set_tensor_datatype("outp", idt) + model.set_tensor_datatype("hid", idt) + model.set_tensor_datatype("params0", wdt) + model.set_tensor_datatype("params1", wdt) + model.set_initializer("params0", + gen_finn_dt_tensor(wdt, (32, 1, 3, 3)) + ) + model.set_initializer("params1", + gen_finn_dt_tensor(wdt, (32, 64)) + ) + if tdt is not None: + model.set_tensor_datatype("thresh", tdt) + # model.set_initializer("thresh", thresholds) + return model + + +weight_data_types = [ + DataType['INT8'], + DataType['UINT8'], + DataType['INT7'], + DataType['UINT7'], + DataType['INT3'], + DataType['UINT3'], + DataType["BIPOLAR"], + DataType["TERNARY"], +] + +@pytest.mark.parametrize("wdt", weight_data_types) +@pytest.mark.parametrize("rww", [True, False]) +def test_minimize_weight_bit_width(wdt: DataType, rww: bool): + """Testing MinimizeWeightBitWidth for VVAU and MVAU. + + :param wdt: (DataType) The data type that we are testing for the weights + :param rww: (bool) Whether or not to use runtime-writeable weights""" + + # Create a w8a8 model + def_wdt = DataType['UINT8'] + model = make_unit_test_model(def_wdt, DataType['INT8']) + + # Create new weights for the model based on wdt + params0 = gen_finn_dt_tensor(wdt, (32, 1, 3, 3)) + params1 = gen_finn_dt_tensor(wdt, (32, 64)) + model.set_initializer("params0", params0) + model.set_initializer("params1", params1) + + # If runtime-writeable weights, specify as a node attribute + for node in model.graph.node: + inst = getCustomOp(node) + if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): + inst.set_nodeattr("runtime_writeable_weights", int(rww)) + + # Apply the optimization + model = model.transform(MinimizeWeightBitWidth()) + + # Iterate through each node to make sure it functioned properly + for node in model.graph.node: + inst = getCustomOp(node) + if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): + cur_wdt = DataType[inst.get_nodeattr("weightDataType")] + exp_wdt = def_wdt if rww else wdt + assert cur_wdt.bitwidth() == exp_wdt.bitwidth(), "Mismatched data types" From 68a6f3e166b11a4c541676c344f26fd7e0bfc72b Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 16:39:32 -0800 Subject: [PATCH 413/628] Removing bipolar weights for now --- tests/transformation/streamline/test_minimize_bit_width.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 51dbe9cc7f..e25bf68d0d 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -108,7 +108,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = DataType['UINT7'], DataType['INT3'], DataType['UINT3'], - DataType["BIPOLAR"], + # DataType["BIPOLAR"], # TODO - investigate bipolar weights DataType["TERNARY"], ] From 5fc807348074027306fa2189b4e0bb3f6ebd9397 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 16:51:55 -0800 Subject: [PATCH 414/628] Adding test for MinimizeAccumulatorWidth --- .../streamline/test_minimize_bit_width.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index e25bf68d0d..658481cc6d 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -37,6 +37,7 @@ from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth +from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = None): @@ -112,6 +113,17 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = DataType["TERNARY"], ] + +input_data_types = [ + DataType['INT8'], + DataType['UINT8'], + DataType['INT3'], + DataType['UINT3'], + DataType["BIPOLAR"], + DataType["TERNARY"], +] + + @pytest.mark.parametrize("wdt", weight_data_types) @pytest.mark.parametrize("rww", [True, False]) def test_minimize_weight_bit_width(wdt: DataType, rww: bool): @@ -146,3 +158,45 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): cur_wdt = DataType[inst.get_nodeattr("weightDataType")] exp_wdt = def_wdt if rww else wdt assert cur_wdt.bitwidth() == exp_wdt.bitwidth(), "Mismatched data types" + + +@pytest.mark.parametrize("wdt", weight_data_types) +@pytest.mark.parametrize("adt", input_data_types) +@pytest.mark.parametrize("rww", [True, False]) +def test_minimize_weight_bit_width(wdt: DataType, idt:DataType, rww: bool): + """Testing MinimizeAccumulatorWidth for VVAU and MVAU. + + :param wdt: (DataType) The data type that we are testing for the weights + :param idt: (DataType) The data type that we are testing for the activations + :param rww: (bool) Whether or not to use runtime-writeable weights""" + + # Create uniform-precision model + # TODO: add thresholds (tdt) to unit tests + model = make_unit_test_model(wdt, idt) + def_adt = DataType["INT32"] + + # If runtime-writeable weights, specify as a node attribute + for node in model.graph.node: + inst = getCustomOp(node) + if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): + inst.set_nodeattr("runtime_writeable_weights", int(rww)) + cur_adt = DataType[inst.get_nodeattr("accDataType")] + assert cur_adt.bitwidth() == def_adt.bitwidth(), "Default data type is incorrect" + + # Apply the optimization + model = model.transform(MinimizeAccumulatorWidth()) + + # Iterate through each node to make sure it functioned properly + for node in model.graph.node: + inst = getCustomOp(node) + if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): + cur_adt = DataType[inst.get_nodeattr("accDataType")] + cur_odt = DataType[inst.get_nodeattr("accDataType")] + # TODO - figure out how to calculate expected accDataType + # exp_wdt = def_wdt if rww else wdt + # assert cur_adt.bitwidth() == exp_adt.bitwidth(), "Mismatched data types" + if model.find_direct_successors(inst.onnx_node) is None: + assert (cur_adt.bitwidth() % 8) == 0, "bit width of last node needs to be divisible by 8" + assert cur_adt.bitwidth() == cur_odt.bitwidth(), "outputDataType and accDataType should be equal" + else: + assert cur_adt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" \ No newline at end of file From 880f0f4f587c0fdf8e96890b730e09b2928871f4 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 16:55:11 -0800 Subject: [PATCH 415/628] Fixing test_minimize_accumulator_width() --- .../transformation/streamline/test_minimize_bit_width.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 658481cc6d..4995f45eba 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -161,9 +161,9 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): @pytest.mark.parametrize("wdt", weight_data_types) -@pytest.mark.parametrize("adt", input_data_types) +@pytest.mark.parametrize("idt", input_data_types) @pytest.mark.parametrize("rww", [True, False]) -def test_minimize_weight_bit_width(wdt: DataType, idt:DataType, rww: bool): +def test_minimize_accumulator_width(wdt: DataType, idt:DataType, rww: bool): """Testing MinimizeAccumulatorWidth for VVAU and MVAU. :param wdt: (DataType) The data type that we are testing for the weights @@ -191,7 +191,7 @@ def test_minimize_weight_bit_width(wdt: DataType, idt:DataType, rww: bool): inst = getCustomOp(node) if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): cur_adt = DataType[inst.get_nodeattr("accDataType")] - cur_odt = DataType[inst.get_nodeattr("accDataType")] + cur_odt = DataType[inst.get_nodeattr("outputDataType")] # TODO - figure out how to calculate expected accDataType # exp_wdt = def_wdt if rww else wdt # assert cur_adt.bitwidth() == exp_adt.bitwidth(), "Mismatched data types" @@ -199,4 +199,4 @@ def test_minimize_weight_bit_width(wdt: DataType, idt:DataType, rww: bool): assert (cur_adt.bitwidth() % 8) == 0, "bit width of last node needs to be divisible by 8" assert cur_adt.bitwidth() == cur_odt.bitwidth(), "outputDataType and accDataType should be equal" else: - assert cur_adt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" \ No newline at end of file + assert cur_odt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" \ No newline at end of file From fc403fb8bc8673f7d0330ffad2c0d6123c38de74 Mon Sep 17 00:00:00 2001 From: icolbert Date: Mon, 27 Feb 2023 16:58:22 -0800 Subject: [PATCH 416/628] Fixing bug --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 2 +- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 75aa587433..5f0fb2ede1 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -607,7 +607,7 @@ def minimize_accumulator_width(self, model): wdt = self.get_weight_datatype() lower_worst = wdt.min() * np.ones_like(weights) lower_range = calculate_matvec_accumulator_range(lower_worst, idt) - upper_worst = wdt.min() * np.ones_like(weights) + upper_worst = wdt.max() * np.ones_like(weights) upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index a580674836..15cfdcfd37 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -126,7 +126,7 @@ def minimize_accumulator_width(self, model): wdt = self.get_weight_datatype() lower_worst = wdt.min() * np.ones_like(weights) lower_range = calculate_matvec_accumulator_range(lower_worst, idt) - upper_worst = wdt.min() * np.ones_like(weights) + upper_worst = wdt.max() * np.ones_like(weights) upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) From df856b45ff8d708d48f78b4c4bbc25a14b1e48e2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 28 Feb 2023 17:00:54 +0000 Subject: [PATCH 417/628] [Tests] Update ReLU export test --- .../brevitas/test_brevitas_relu_act_export.py | 180 ++---------------- 1 file changed, 13 insertions(+), 167 deletions(-) diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index 1900763bdd..6bff4ae800 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -33,14 +33,12 @@ import os import torch from brevitas.core.quant import QuantType -from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantReLU from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.cleanup import cleanup as qonnx_cleanup -from torch import nn import finn.core.onnx_exec as oxe from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN @@ -50,95 +48,30 @@ @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) +@pytest.mark.parametrize("ishape", [(1, 15), (1, 32, 1, 1)]) @pytest.mark.parametrize( - "scaling_impl_type", [ScalingImplType.CONST, ScalingImplType.PARAMETER] + "scaling_impl_type", [ScalingImplType.CONST] # , ScalingImplType.PARAMETER] ) +@pytest.mark.parametrize("scaling_per_output_channel", [True, False]) +@pytest.mark.parametrize("per_channel_broadcastable_shape", [None, (1, 32, 1, 1)]) @pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_act_export_relu(abits, scaling_impl_type, QONNX_export): - ishape = (1, 15) +def test_brevitas_act_export_relu( + abits, + ishape, + scaling_impl_type, + scaling_per_output_channel, + per_channel_broadcastable_shape, + QONNX_export, +): b_act = QuantReLU( bit_width=abits, max_val=6.0, scaling_impl_type=scaling_impl_type, - restrict_scaling_type=RestrictValueType.LOG_FP, - quant_type=QuantType.INT, - ) - if scaling_impl_type == ScalingImplType.PARAMETER: - checkpoint = { - "act_quant_proxy.fused_activation_quant_proxy.tensor_quant.\ -scaling_impl.learned_value": torch.tensor( - 0.49 - ).type( - torch.FloatTensor - ) - } - b_act.load_state_dict(checkpoint) - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_act, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) - idict = {model.graph.input[0].name: inp_tensor} - odict = oxe.execute_onnx(model, idict, True) - produced = odict[model.graph.output[0].name] - inp_tensor = torch.from_numpy(inp_tensor).float() - b_act.eval() - expected = b_act.forward(inp_tensor).detach().numpy() - if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits, scaling_impl_type) - print("scale: ", b_act.quant_act_scale().type(torch.FloatTensor).detach()) - if abits < 5: - print( - "thres:", - ", ".join(["{:8.4f}".format(x) for x in b_act.export_thres[0]]), - ) - print("input:", ", ".join(["{:8.4f}".format(x) for x in inp_tensor[0]])) - print("prod :", ", ".join(["{:8.4f}".format(x) for x in produced[0]])) - print("expec:", ", ".join(["{:8.4f}".format(x) for x in expected[0]])) - - assert np.isclose(produced, expected, atol=1e-3).all() - os.remove(export_onnx_path) - - -@pytest.mark.brevitas_export -@pytest.mark.parametrize("abits", [2, 4, 8]) -@pytest.mark.parametrize("scaling_per_output_channel", [True, False]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_act_export_relu_imagenet( - abits, scaling_per_output_channel, QONNX_export -): - out_channels = 32 - ishape = (1, out_channels, 1, 1) - b_act = QuantReLU( - bit_width=abits, quant_type=QuantType.INT, - scaling_impl_type=ScalingImplType.PARAMETER, scaling_per_output_channel=scaling_per_output_channel, - restrict_scaling_type=RestrictValueType.LOG_FP, - scaling_min_val=2e-16, - max_val=6.0, - return_quant_tensor=False, - per_channel_broadcastable_shape=(1, out_channels, 1, 1), + per_channel_broadcastable_shape=per_channel_broadcastable_shape, ) - if scaling_per_output_channel is True: - rand_tensor = (2) * torch.rand((1, out_channels, 1, 1)) - else: - rand_tensor = torch.tensor(1.2398) - checkpoint = { - "act_quant_proxy.fused_activation_quant_proxy.tensor_quant.\ -scaling_impl.learned_value": rand_tensor.type( - torch.FloatTensor - ) - } - b_act.load_state_dict(checkpoint) if QONNX_export: m_path = export_onnx_path export_qonnx(b_act, torch.randn(ishape), m_path) @@ -157,93 +90,6 @@ def test_brevitas_act_export_relu_imagenet( inp_tensor = torch.from_numpy(inp_tensor).float() b_act.eval() expected = b_act.forward(inp_tensor).detach().numpy() - if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits) - print("scale: ", b_act.quant_act_scale().type(torch.FloatTensor).detach()) - if abits < 5: - print( - "thres:", - ", ".join(["{:8.4f}".format(x) for x in b_act.export_thres[0]]), - ) - print("input:", ", ".join(["{:8.4f}".format(x) for x in inp_tensor[0]])) - print("prod :", ", ".join(["{:8.4f}".format(x) for x in produced[0]])) - print("expec:", ", ".join(["{:8.4f}".format(x) for x in expected[0]])) - - assert np.isclose(produced, expected, atol=1e-3).all() - os.remove(export_onnx_path) - - -class PyTorchTestModel(nn.Module): - def __init__(self, abits): - super(PyTorchTestModel, self).__init__() - out_channels = 32 - self.b_act = QuantReLU( - bit_width=abits, - quant_type=QuantType.INT, - scaling_impl_type=ScalingImplType.PARAMETER, - scaling_per_output_channel=True, - restrict_scaling_type=RestrictValueType.LOG_FP, - scaling_min_val=2e-16, - max_val=6.0, - return_quant_tensor=False, - per_channel_broadcastable_shape=(1, out_channels, 1, 1), - ) - - def forward(self, x): - act_out = self.b_act(x) - y0 = act_out * 2.0 - y1 = act_out * -1.0 - y = y0 + y1 - return y - - -@pytest.mark.brevitas_export -@pytest.mark.parametrize("abits", [2, 4, 8]) -@pytest.mark.parametrize("scaling_per_output_channel", [True]) -@pytest.mark.parametrize("QONNX_export", [True]) -def test_brevitas_act_export_relu_forking( - abits, scaling_per_output_channel, QONNX_export -): - out_channels = 32 - ishape = (1, out_channels, 1, 1) - model_pyt = PyTorchTestModel(abits) - - rand_tensor = (2) * torch.rand((1, out_channels, 1, 1)) - - checkpoint = { - "b_act.act_quant_proxy.fused_activation_quant_proxy." - "tensor_quant.scaling_impl.learned_value": rand_tensor.type(torch.FloatTensor) - } - model_pyt.load_state_dict(checkpoint) - - if QONNX_export: - m_path = export_onnx_path - export_qonnx(model_pyt, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - - model = ModelWrapper(export_onnx_path) - model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) - idict = {model.graph.input[0].name: inp_tensor} - odict = oxe.execute_onnx(model, idict, True) - produced = odict[model.graph.output[0].name] - inp_tensor = torch.from_numpy(inp_tensor).float() - model_pyt.eval() - expected = model_pyt.forward(inp_tensor).detach().numpy() - if not np.isclose(produced, expected, atol=1e-3).all(): - print(abits) - print("scale: ", model_pyt.quant_act_scale().type(torch.FloatTensor).detach()) - if abits < 5: - print( - "thres:", - ", ".join(["{:8.4f}".format(x) for x in model_pyt.export_thres[0]]), - ) - print("input:", ", ".join(["{:8.4f}".format(x) for x in inp_tensor[0]])) - print("prod :", ", ".join(["{:8.4f}".format(x) for x in produced[0]])) - print("expec:", ", ".join(["{:8.4f}".format(x) for x in expected[0]])) assert np.isclose(produced, expected, atol=1e-3).all() os.remove(export_onnx_path) From 72798e10c1091d7139d8c725fb5e92199f447223 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 15:48:52 -0800 Subject: [PATCH 418/628] Removing manual override of thresholds --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 1 - src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 5f0fb2ede1..39fd16d456 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -611,7 +611,6 @@ def minimize_accumulator_width(self, model): upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) - thresholds = None # range of thresholds are also runtime-writeable # if not runtime-writeable weights, then we can calculate the min # and max values of the accumulation range using knowledge of the # weights and input data types since they are fixed diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 15cfdcfd37..8fac0942e9 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -130,7 +130,6 @@ def minimize_accumulator_width(self, model): upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) - thresholds = None # range of thresholds are also runtime-writeable # if not runtime-writeable weights, then we can calculate the min # and max values of the accumulation range using knowledge of the # weights and input data types since they are fixed From fcfeb026c1408f0c3203b181023b4ad150c3f171 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 16:19:51 -0800 Subject: [PATCH 419/628] Updating checks in minimize_accumulator_width --- .../streamline/test_minimize_bit_width.py | 74 +++++++++++++++++-- 1 file changed, 68 insertions(+), 6 deletions(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 4995f45eba..221be75da7 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -27,12 +27,16 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest -from typing import Optional +import numpy as np +from typing import Optional, Union from onnx import TensorProto, helper from qonnx.core.datatype import DataType, IntType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp -from qonnx.util.basic import gen_finn_dt_tensor +from qonnx.util.basic import ( + gen_finn_dt_tensor, + roundup_to_integer_multiple +) from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation @@ -109,7 +113,9 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = DataType['UINT7'], DataType['INT3'], DataType['UINT3'], - # DataType["BIPOLAR"], # TODO - investigate bipolar weights + # TODO - current MinimizeWeightBitWidth sets {-1,1} to INT2, need to check + # for 0 in weights to minimize weight bit width to bipolar + # DataType["BIPOLAR"], DataType["TERNARY"], ] @@ -160,6 +166,57 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): assert cur_wdt.bitwidth() == exp_wdt.bitwidth(), "Mismatched data types" +def calculate_accumulator_bit_width( + inst: Union[MatrixVectorActivation, VectorVectorActivation], + model: ModelWrapper + ) -> Union[DataType, IntType]: + """Calculate the accumulator bit width use the closed-form expressions + derived in `Quantized Neural Networks for Low-Precision Accumulation + with Guaranteed Overflow Avoidance` (2023) by I.Colbert, A. Pappalardo, + J. Petri-Koenig + + :param inst: (HLSCustomOp) The instance of the MVAU or VVAU + :param model: (ModelWrapper) The instance of the whole model + """ + def phi(x: float) -> float: + return np.log2(1 + pow(2, -x)) + + weights = model.get_initializer(inst.onnx_node.input[1]) + # since in the calculation the values of the weight matrix are used, + # for the bipolar case they need to be converted to bipolar + if inst.get_nodeattr("binaryXnorMode"): + weights = 2 * weights - 1 + # modify the weights based on if the node is a VVAU or MVAU + if isinstance(inst, MatrixVectorActivation): + K = inst.get_nodeattr("MW") # matrix_width = num_inputs + elif isinstance(inst, VectorVectorActivation): + k_h, k_w = inst.get_nodeattr("Kernel") + K = k_h * k_w # size of kernels = num_inputs + fm = inst.get_nodeattr("Channels") + # put weights into the shape expected by calculate_matvec_accumulator_range + weights = weights.reshape(fm, k_h * k_w).transpose() + else: + raise Exception("Considering only MVAU and VVAU currently") + # collect attributes used to determine the accumulator bit width bound + wdt = inst.get_weight_datatype() + idt = inst.get_input_datatype() + rww = inst.get_nodeattr("runtime_writeable_weights") + # if runtime-writeable weights, then use the lower bound on the accumulator bit + # width as determined by the input and weight data types and size of dot product + if rww: + alpha = np.log2(K) + idt.bitwidth() + wdt.bitwidth() - 1. - float(idt.signed()) + P = np.ceil(alpha + phi(alpha) + 1.) + # if not runtime-writable weights, then use the tighter bound on the accumulator + # bit width as determined by the weight values themselves + else: + beta = np.log2(abs(weights).sum(axis=0).max()) + idt.bitwidth() - float(idt.signed()) + P = np.ceil(beta + phi(beta) + 1.) + # if the node is the last in the graph, then round up to the nearest 8 bits + if model.find_direct_successors(inst.onnx_node) is None: + P = roundup_to_integer_multiple(P, 8) + return DataType[f"INT{int(P)}"] + + @pytest.mark.parametrize("wdt", weight_data_types) @pytest.mark.parametrize("idt", input_data_types) @pytest.mark.parametrize("rww", [True, False]) @@ -169,6 +226,8 @@ def test_minimize_accumulator_width(wdt: DataType, idt:DataType, rww: bool): :param wdt: (DataType) The data type that we are testing for the weights :param idt: (DataType) The data type that we are testing for the activations :param rww: (bool) Whether or not to use runtime-writeable weights""" + if not wdt.signed(): + pytest.skip("Closed-form accumulator calculation is designed to consider only signed weights") # Create uniform-precision model # TODO: add thresholds (tdt) to unit tests @@ -192,9 +251,12 @@ def test_minimize_accumulator_width(wdt: DataType, idt:DataType, rww: bool): if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): cur_adt = DataType[inst.get_nodeattr("accDataType")] cur_odt = DataType[inst.get_nodeattr("outputDataType")] - # TODO - figure out how to calculate expected accDataType - # exp_wdt = def_wdt if rww else wdt - # assert cur_adt.bitwidth() == exp_adt.bitwidth(), "Mismatched data types" + # Calculating expected accumulator bit width using a closed-form expression + # that is a slight over-approximation of the lower bound. The accumulator + # bit width minimization logic in the MVAU and VVAU is exact and should be + # less than or equal to this calculation + exp_adt = calculate_accumulator_bit_width(inst, model) + assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" if model.find_direct_successors(inst.onnx_node) is None: assert (cur_adt.bitwidth() % 8) == 0, "bit width of last node needs to be divisible by 8" assert cur_adt.bitwidth() == cur_odt.bitwidth(), "outputDataType and accDataType should be equal" From 9a99b31141f372417914ed47b73d1ede1b287a02 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 18:06:52 -0800 Subject: [PATCH 420/628] Adding threshold data types for accumulator width unit test --- .../streamline/test_minimize_bit_width.py | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 221be75da7..7cb866c6e8 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -51,7 +51,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, 32, 32, 64]) layer1 = helper.make_node( "VectorVectorActivation", - ["inp", "params0", "thresh"] if tdt is not None else ["inp", "params0"], + ["inp", "params0", "thresh0"] if tdt is not None else ["inp", "params0"], ["hid"], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", @@ -67,7 +67,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = ) layer2 = helper.make_node( "MatrixVectorActivation", - ["hid", "params1", "thresh"] if tdt is not None else ["hid", "params1"], + ["hid", "params1", "thresh1"] if tdt is not None else ["hid", "params1"], ["outp"], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", @@ -100,9 +100,21 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = model.set_initializer("params1", gen_finn_dt_tensor(wdt, (32, 64)) ) + # if the threshold data type is specified, then we need to generate + # some dummy threshold values if tdt is not None: - model.set_tensor_datatype("thresh", tdt) - # model.set_initializer("thresh", thresholds) + model.set_tensor_datatype("thresh0", tdt) + model.set_tensor_datatype("thresh1", tdt) + # Create threshold tensors + n_steps: int = idt.get_num_possible_values() - 1 + thresholds: Optional[np.ndarray] = np.random.randint(tdt.min(), tdt.max() - 1, \ + (32, n_steps)).astype(np.float32) # generate thresholds for the activations + thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds + model.set_initializer("thresh0", thresholds) + thresholds: Optional[np.ndarray] = np.random.randint(tdt.min(), tdt.max() - 1, \ + (64, n_steps)).astype(np.float32) # generate thresholds for the activations + thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds + model.set_initializer("thresh1", thresholds) return model @@ -170,7 +182,7 @@ def calculate_accumulator_bit_width( inst: Union[MatrixVectorActivation, VectorVectorActivation], model: ModelWrapper ) -> Union[DataType, IntType]: - """Calculate the accumulator bit width use the closed-form expressions + """Calculate the accumulator bit width using the closed-form expressions derived in `Quantized Neural Networks for Low-Precision Accumulation with Guaranteed Overflow Avoidance` (2023) by I.Colbert, A. Pappalardo, J. Petri-Koenig @@ -217,21 +229,30 @@ def phi(x: float) -> float: return DataType[f"INT{int(P)}"] +thresh_data_types = [ + None, + DataType['INT32'], + DataType['INT24'], + DataType['INT16'], +] + + @pytest.mark.parametrize("wdt", weight_data_types) @pytest.mark.parametrize("idt", input_data_types) +@pytest.mark.parametrize("tdt", thresh_data_types) @pytest.mark.parametrize("rww", [True, False]) -def test_minimize_accumulator_width(wdt: DataType, idt:DataType, rww: bool): +def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, rww: bool): """Testing MinimizeAccumulatorWidth for VVAU and MVAU. :param wdt: (DataType) The data type that we are testing for the weights :param idt: (DataType) The data type that we are testing for the activations + :param tdt: (DataType) The data type that we are testing for the thresholds :param rww: (bool) Whether or not to use runtime-writeable weights""" if not wdt.signed(): pytest.skip("Closed-form accumulator calculation is designed to consider only signed weights") # Create uniform-precision model - # TODO: add thresholds (tdt) to unit tests - model = make_unit_test_model(wdt, idt) + model = make_unit_test_model(wdt, idt, tdt) def_adt = DataType["INT32"] # If runtime-writeable weights, specify as a node attribute From 168ccbc7b2d1a76406a48097f56a5420fedd40f9 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 18:10:16 -0800 Subject: [PATCH 421/628] Fixing bug Switching noActivation and ActVal, which we incorrectly set when thresholds were specified. --- .../transformation/streamline/test_minimize_bit_width.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 7cb866c6e8..73beaf5b0c 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -62,8 +62,8 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = inputDataType=idt.name, outputDataType=idt.name, weightDataType=wdt.name, - noActivation=tdt.min() if tdt is not None else 0, - ActVal=0 if tdt is not None else 1, + ActVal=tdt.min() if tdt is not None else 0, + noActivation=0 if tdt is not None else 1, ) layer2 = helper.make_node( "MatrixVectorActivation", @@ -78,8 +78,8 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = inputDataType=idt.name, outputDataType=idt.name, weightDataType=wdt.name, - noActivation=tdt.min() if tdt is not None else 0, - ActVal=0 if tdt is not None else 1, + ActVal=tdt.min() if tdt is not None else 0, + noActivation=0 if tdt is not None else 1, binaryXnorMode=0 ) graph = helper.make_graph( From c25ac04f1a63ee1d0bef6a9dff9bf1cc0cd7e0d2 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 18:16:28 -0800 Subject: [PATCH 422/628] Handling weight data type test cases --- .../streamline/test_minimize_bit_width.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 73beaf5b0c..1b280de015 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -30,7 +30,7 @@ import numpy as np from typing import Optional, Union from onnx import TensorProto, helper -from qonnx.core.datatype import DataType, IntType +from qonnx.core.datatype import DataType, IntType, BipolarType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.util.basic import ( @@ -125,9 +125,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = DataType['UINT7'], DataType['INT3'], DataType['UINT3'], - # TODO - current MinimizeWeightBitWidth sets {-1,1} to INT2, need to check - # for 0 in weights to minimize weight bit width to bipolar - # DataType["BIPOLAR"], + DataType["BIPOLAR"], DataType["TERNARY"], ] @@ -149,6 +147,10 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): :param wdt: (DataType) The data type that we are testing for the weights :param rww: (bool) Whether or not to use runtime-writeable weights""" + if isinstance(wdt, BipolarType): + # current MinimizeWeightBitWidth sets {-1,1} to INT2, need to check + # for 0 in weights to minimize weight bit width to bipolar + pytest.skip("Not well-supported for this optimization") # Create a w8a8 model def_wdt = DataType['UINT8'] @@ -248,7 +250,7 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, :param idt: (DataType) The data type that we are testing for the activations :param tdt: (DataType) The data type that we are testing for the thresholds :param rww: (bool) Whether or not to use runtime-writeable weights""" - if not wdt.signed(): + if (not wdt.signed()) or isinstance(wdt, BipolarType): pytest.skip("Closed-form accumulator calculation is designed to consider only signed weights") # Create uniform-precision model From 29fa600cf0992948aaaa4ad3b24f1bd874b7ca0e Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 28 Feb 2023 18:30:51 -0800 Subject: [PATCH 423/628] pre-commit cleanup --- .../fpgadataflow/matrixvectoractivation.py | 10 +- .../fpgadataflow/vectorvectoractivation.py | 10 +- .../streamline/test_minimize_bit_width.py | 144 ++++++++++-------- 3 files changed, 89 insertions(+), 75 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 39fd16d456..f5585db483 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -631,14 +631,10 @@ def minimize_accumulator_width(self, model): if min_threshold < acc_min: clip_lower = acc_min if (clip_lower is not None) or (clip_upper is not None): - warnings.warn( - "Clipping some thresholds in %s" % self.onnx_node.name - ) + warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) thresholds = np.clip(thresholds, clip_lower, clip_upper) model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor( - thresholds - ) + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) min_threshold = thresholds.min() max_threshold = thresholds.max() # get range required by threshold values @@ -657,7 +653,7 @@ def minimize_accumulator_width(self, model): self.onnx_node.name, str(tdt), ) - adt = tdt # Set activation datatype to the threshold datatype + adt = tdt # Set activation datatype to the threshold datatype else: if acc_min < 0: if abs(acc_min) > acc_max: diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 8fac0942e9..a9c59ebe31 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -150,14 +150,10 @@ def minimize_accumulator_width(self, model): if min_threshold < acc_min: clip_lower = acc_min if (clip_lower is not None) or (clip_upper is not None): - warnings.warn( - "Clipping some thresholds in %s" % self.onnx_node.name - ) + warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) thresholds = np.clip(thresholds, clip_lower, clip_upper) model.set_initializer(self.onnx_node.input[2], thresholds) - threshold_tensor = self.get_hls_compatible_threshold_tensor( - thresholds - ) + threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) min_threshold = thresholds.min() max_threshold = thresholds.max() # get range required by threshold values @@ -176,7 +172,7 @@ def minimize_accumulator_width(self, model): self.onnx_node.name, str(tdt), ) - adt = tdt # Set activation datatype to the threshold datatype + adt = tdt # Set activation datatype to the threshold datatype else: if acc_min < 0: if abs(acc_min) > acc_max: diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/transformation/streamline/test_minimize_bit_width.py index 1b280de015..866b64445b 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/transformation/streamline/test_minimize_bit_width.py @@ -27,21 +27,23 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest + import numpy as np -from typing import Optional, Union from onnx import TensorProto, helper -from qonnx.core.datatype import DataType, IntType, BipolarType +from qonnx.core.datatype import BipolarType, DataType, IntType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp -from qonnx.util.basic import ( - gen_finn_dt_tensor, - roundup_to_integer_multiple -) +from qonnx.util.basic import gen_finn_dt_tensor, roundup_to_integer_multiple +from typing import Optional, Union -from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation -from finn.transformation.fpgadataflow.minimize_weight_bit_width import MinimizeWeightBitWidth -from finn.transformation.fpgadataflow.minimize_accumulator_width import MinimizeAccumulatorWidth +from finn.custom_op.fpgadataflow.vectorvectoractivation import VectorVectorActivation +from finn.transformation.fpgadataflow.minimize_accumulator_width import ( + MinimizeAccumulatorWidth, +) +from finn.transformation.fpgadataflow.minimize_weight_bit_width import ( + MinimizeWeightBitWidth, +) def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = None): @@ -58,7 +60,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = PE=1, Channels=32, Dim=(32, 32), - Kernel=(3,3), + Kernel=(3, 3), inputDataType=idt.name, outputDataType=idt.name, weightDataType=wdt.name, @@ -71,8 +73,8 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = ["outp"], domain="finn.custom_op.fpgadataflow", backend="fpgadataflow", - MW=32, # matrix_width (num_inputs) - MH=64, # matrix_height (num_outputs) + MW=32, # matrix_width (num_inputs) + MH=64, # matrix_height (num_outputs) SIMD=1, PE=1, inputDataType=idt.name, @@ -80,7 +82,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = weightDataType=wdt.name, ActVal=tdt.min() if tdt is not None else 0, noActivation=0 if tdt is not None else 1, - binaryXnorMode=0 + binaryXnorMode=0, ) graph = helper.make_graph( nodes=[layer1, layer2], name="fclayer_graph", inputs=[inp], outputs=[outp] @@ -94,12 +96,8 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = model.set_tensor_datatype("hid", idt) model.set_tensor_datatype("params0", wdt) model.set_tensor_datatype("params1", wdt) - model.set_initializer("params0", - gen_finn_dt_tensor(wdt, (32, 1, 3, 3)) - ) - model.set_initializer("params1", - gen_finn_dt_tensor(wdt, (32, 64)) - ) + model.set_initializer("params0", gen_finn_dt_tensor(wdt, (32, 1, 3, 3))) + model.set_initializer("params1", gen_finn_dt_tensor(wdt, (32, 64))) # if the threshold data type is specified, then we need to generate # some dummy threshold values if tdt is not None: @@ -107,34 +105,40 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = model.set_tensor_datatype("thresh1", tdt) # Create threshold tensors n_steps: int = idt.get_num_possible_values() - 1 - thresholds: Optional[np.ndarray] = np.random.randint(tdt.min(), tdt.max() - 1, \ - (32, n_steps)).astype(np.float32) # generate thresholds for the activations - thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds + thresholds: Optional[np.ndarray] = np.random.randint( + tdt.min(), tdt.max() - 1, (32, n_steps) + ).astype( + np.float32 + ) # generate thresholds for the activations + thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds model.set_initializer("thresh0", thresholds) - thresholds: Optional[np.ndarray] = np.random.randint(tdt.min(), tdt.max() - 1, \ - (64, n_steps)).astype(np.float32) # generate thresholds for the activations - thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds + thresholds: Optional[np.ndarray] = np.random.randint( + tdt.min(), tdt.max() - 1, (64, n_steps) + ).astype( + np.float32 + ) # generate thresholds for the activations + thresholds = np.sort(thresholds, axis=1) # provide non-decreasing thresholds model.set_initializer("thresh1", thresholds) return model weight_data_types = [ - DataType['INT8'], - DataType['UINT8'], - DataType['INT7'], - DataType['UINT7'], - DataType['INT3'], - DataType['UINT3'], + DataType["INT8"], + DataType["UINT8"], + DataType["INT7"], + DataType["UINT7"], + DataType["INT3"], + DataType["UINT3"], DataType["BIPOLAR"], DataType["TERNARY"], ] input_data_types = [ - DataType['INT8'], - DataType['UINT8'], - DataType['INT3'], - DataType['UINT3'], + DataType["INT8"], + DataType["UINT8"], + DataType["INT3"], + DataType["UINT3"], DataType["BIPOLAR"], DataType["TERNARY"], ] @@ -144,7 +148,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = @pytest.mark.parametrize("rww", [True, False]) def test_minimize_weight_bit_width(wdt: DataType, rww: bool): """Testing MinimizeWeightBitWidth for VVAU and MVAU. - + :param wdt: (DataType) The data type that we are testing for the weights :param rww: (bool) Whether or not to use runtime-writeable weights""" if isinstance(wdt, BipolarType): @@ -153,9 +157,9 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): pytest.skip("Not well-supported for this optimization") # Create a w8a8 model - def_wdt = DataType['UINT8'] - model = make_unit_test_model(def_wdt, DataType['INT8']) - + def_wdt = DataType["UINT8"] + model = make_unit_test_model(def_wdt, DataType["INT8"]) + # Create new weights for the model based on wdt params0 = gen_finn_dt_tensor(wdt, (32, 1, 3, 3)) params1 = gen_finn_dt_tensor(wdt, (32, 64)) @@ -171,7 +175,7 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): # Apply the optimization model = model.transform(MinimizeWeightBitWidth()) - # Iterate through each node to make sure it functioned properly + # Iterate through each node to make sure it functioned properly for node in model.graph.node: inst = getCustomOp(node) if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): @@ -181,9 +185,8 @@ def test_minimize_weight_bit_width(wdt: DataType, rww: bool): def calculate_accumulator_bit_width( - inst: Union[MatrixVectorActivation, VectorVectorActivation], - model: ModelWrapper - ) -> Union[DataType, IntType]: + inst: Union[MatrixVectorActivation, VectorVectorActivation], model: ModelWrapper +) -> Union[DataType, IntType]: """Calculate the accumulator bit width using the closed-form expressions derived in `Quantized Neural Networks for Low-Precision Accumulation with Guaranteed Overflow Avoidance` (2023) by I.Colbert, A. Pappalardo, @@ -192,6 +195,7 @@ def calculate_accumulator_bit_width( :param inst: (HLSCustomOp) The instance of the MVAU or VVAU :param model: (ModelWrapper) The instance of the whole model """ + def phi(x: float) -> float: return np.log2(1 + pow(2, -x)) @@ -202,10 +206,10 @@ def phi(x: float) -> float: weights = 2 * weights - 1 # modify the weights based on if the node is a VVAU or MVAU if isinstance(inst, MatrixVectorActivation): - K = inst.get_nodeattr("MW") # matrix_width = num_inputs + K = inst.get_nodeattr("MW") # matrix_width = num_inputs elif isinstance(inst, VectorVectorActivation): k_h, k_w = inst.get_nodeattr("Kernel") - K = k_h * k_w # size of kernels = num_inputs + K = k_h * k_w # size of kernels = num_inputs fm = inst.get_nodeattr("Channels") # put weights into the shape expected by calculate_matvec_accumulator_range weights = weights.reshape(fm, k_h * k_w).transpose() @@ -218,13 +222,17 @@ def phi(x: float) -> float: # if runtime-writeable weights, then use the lower bound on the accumulator bit # width as determined by the input and weight data types and size of dot product if rww: - alpha = np.log2(K) + idt.bitwidth() + wdt.bitwidth() - 1. - float(idt.signed()) - P = np.ceil(alpha + phi(alpha) + 1.) + alpha = np.log2(K) + idt.bitwidth() + wdt.bitwidth() - 1.0 - float(idt.signed()) + P = np.ceil(alpha + phi(alpha) + 1.0) # if not runtime-writable weights, then use the tighter bound on the accumulator # bit width as determined by the weight values themselves else: - beta = np.log2(abs(weights).sum(axis=0).max()) + idt.bitwidth() - float(idt.signed()) - P = np.ceil(beta + phi(beta) + 1.) + beta = ( + np.log2(abs(weights).sum(axis=0).max()) + + idt.bitwidth() + - float(idt.signed()) + ) + P = np.ceil(beta + phi(beta) + 1.0) # if the node is the last in the graph, then round up to the nearest 8 bits if model.find_direct_successors(inst.onnx_node) is None: P = roundup_to_integer_multiple(P, 8) @@ -233,9 +241,9 @@ def phi(x: float) -> float: thresh_data_types = [ None, - DataType['INT32'], - DataType['INT24'], - DataType['INT16'], + DataType["INT32"], + DataType["INT24"], + DataType["INT16"], ] @@ -243,15 +251,19 @@ def phi(x: float) -> float: @pytest.mark.parametrize("idt", input_data_types) @pytest.mark.parametrize("tdt", thresh_data_types) @pytest.mark.parametrize("rww", [True, False]) -def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, rww: bool): +def test_minimize_accumulator_width( + wdt: DataType, idt: DataType, tdt: DataType, rww: bool +): """Testing MinimizeAccumulatorWidth for VVAU and MVAU. - + :param wdt: (DataType) The data type that we are testing for the weights :param idt: (DataType) The data type that we are testing for the activations :param tdt: (DataType) The data type that we are testing for the thresholds :param rww: (bool) Whether or not to use runtime-writeable weights""" if (not wdt.signed()) or isinstance(wdt, BipolarType): - pytest.skip("Closed-form accumulator calculation is designed to consider only signed weights") + pytest.skip( + "Closed-form accumulator calculation is designed to consider signed weights" + ) # Create uniform-precision model model = make_unit_test_model(wdt, idt, tdt) @@ -263,12 +275,14 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): inst.set_nodeattr("runtime_writeable_weights", int(rww)) cur_adt = DataType[inst.get_nodeattr("accDataType")] - assert cur_adt.bitwidth() == def_adt.bitwidth(), "Default data type is incorrect" + assert ( + cur_adt.bitwidth() == def_adt.bitwidth() + ), "Default data type is incorrect" # Apply the optimization model = model.transform(MinimizeAccumulatorWidth()) - # Iterate through each node to make sure it functioned properly + # Iterate through each node to make sure it functioned properly for node in model.graph.node: inst = getCustomOp(node) if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): @@ -279,9 +293,17 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, # bit width minimization logic in the MVAU and VVAU is exact and should be # less than or equal to this calculation exp_adt = calculate_accumulator_bit_width(inst, model) - assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" + assert ( + cur_adt.bitwidth() <= exp_adt.bitwidth() + ), "Mismatched accumulation data types" if model.find_direct_successors(inst.onnx_node) is None: - assert (cur_adt.bitwidth() % 8) == 0, "bit width of last node needs to be divisible by 8" - assert cur_adt.bitwidth() == cur_odt.bitwidth(), "outputDataType and accDataType should be equal" + assert ( + cur_adt.bitwidth() % 8 + ) == 0, "bit width of last node needs to be divisible by 8" + assert ( + cur_adt.bitwidth() == cur_odt.bitwidth() + ), "outputDataType and accDataType should be equal" else: - assert cur_odt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" \ No newline at end of file + assert ( + cur_odt.bitwidth() == idt.bitwidth() + ), "outputDataType should not be changed" From 3a2d5e3fd2561adce3f16b143d0bf5c4450cd523 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 1 Mar 2023 09:51:36 +0000 Subject: [PATCH 424/628] [Tests] Update brevitas export test for relu --- .../brevitas/test_brevitas_relu_act_export.py | 53 ++++++++++++++----- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index 6bff4ae800..a4657d7924 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -32,7 +32,6 @@ import onnx # noqa import os import torch -from brevitas.core.quant import QuantType from brevitas.core.scaling import ScalingImplType from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantReLU @@ -49,28 +48,56 @@ @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) @pytest.mark.parametrize("ishape", [(1, 15), (1, 32, 1, 1)]) -@pytest.mark.parametrize( - "scaling_impl_type", [ScalingImplType.CONST] # , ScalingImplType.PARAMETER] -) -@pytest.mark.parametrize("scaling_per_output_channel", [True, False]) -@pytest.mark.parametrize("per_channel_broadcastable_shape", [None, (1, 32, 1, 1)]) @pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_relu( abits, ishape, - scaling_impl_type, - scaling_per_output_channel, - per_channel_broadcastable_shape, QONNX_export, ): + b_act = QuantReLU( + bit_width=abits, + ) + if QONNX_export: + m_path = export_onnx_path + export_qonnx(b_act, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) + model.save(m_path) + else: + export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) + model = ModelWrapper(export_onnx_path) + model = model.transform(InferShapes()) + inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) + idict = {model.graph.input[0].name: inp_tensor} + odict = oxe.execute_onnx(model, idict, True) + produced = odict[model.graph.output[0].name] + inp_tensor = torch.from_numpy(inp_tensor).float() + b_act.eval() + expected = b_act.forward(inp_tensor).detach().numpy() + + assert np.isclose(produced, expected, atol=1e-3).all() + os.remove(export_onnx_path) + + +@pytest.mark.brevitas_export +@pytest.mark.parametrize("abits", [2, 4, 8]) +@pytest.mark.parametrize("ishape", [(1, 15, 4, 4), (1, 32, 1, 1)]) +@pytest.mark.parametrize("QONNX_export", [False, True]) +def test_brevitas_act_export_relu_channel( + abits, + ishape, + QONNX_export, +): + + ch = ishape[1] b_act = QuantReLU( bit_width=abits, max_val=6.0, - scaling_impl_type=scaling_impl_type, - quant_type=QuantType.INT, - scaling_per_output_channel=scaling_per_output_channel, - per_channel_broadcastable_shape=per_channel_broadcastable_shape, + scaling_impl_type=ScalingImplType.CONST, + scaling_per_output_channel=True, + per_channel_broadcastable_shape=(1, ch, 1, 1), ) if QONNX_export: m_path = export_onnx_path From 6b409baf091c1f3e131734a93dc66beafc444486 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 2 Mar 2023 10:44:58 +0100 Subject: [PATCH 425/628] [Thres] remove workaround for vivado_hls bug for T[0][0]=0 case --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 7 ------- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 7 ------- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 7 ------- 3 files changed, 21 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 40f625093b..d6285a6f69 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -709,13 +709,6 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): # ensure all thresholds are integer assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all() ret = orig_thres_matrix - # workaround for vivado_hls threshold bug - if ret[0][0] == 0 and n_thres_steps == 1: - ret = np.copy(ret) - ret[0][0] = 1 - warnings.warn( - "Setting 0-valued first threshold to 1 to avoid vivado_hls bug" - ) # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (mh, 1)) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index ce8c31ee9a..292f70941a 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -319,13 +319,6 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): np.mod(orig_thres_matrix, 1), 0 ).all(), "Need int threshold tensor" ret = orig_thres_matrix - # workaround for vivado_hls threshold bug - if ret[0][0] == 0 and n_thres_steps == 1: - ret = np.copy(ret) - ret[0][0] = 1 - warnings.warn( - "Setting 0-valued first threshold to 1 to avoid vivado_hls bug" - ) # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (mh, 1)) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 5d996e10d8..a2dd3c75dc 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -418,13 +418,6 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): # ensure all thresholds are integer assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all() ret = orig_thres_matrix - # workaround for vivado_hls threshold bug - if ret[0][0] == 0 and n_thres_steps == 1: - ret = np.copy(ret) - ret[0][0] = 1 - warnings.warn( - "Setting 0-valued first threshold to 1 to avoid vivado_hls bug" - ) # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (ch, 1)) From b8319a719cea0c6ccf073220c7839688d5d2f557 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 3 Mar 2023 13:58:07 +0000 Subject: [PATCH 426/628] [Tests] Delete vivado_hls bug test from thresholding testing --- tests/fpgadataflow/test_fpgadataflow_thresholding.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py index 96cd69c345..445afdf458 100644 --- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py @@ -132,10 +132,6 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode): odt = act n_steps = act.get_num_possible_values() - 1 T = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype(np.float32) - # make the vivado_hls threshold bug appear (incorrect rtlsim result when first - # threshold of first channel is zero, while using BIPOLAR output) - if act == DataType["BIPOLAR"]: - T[0][0] = 0 # provide non-decreasing thresholds T = np.sort(T, axis=1) From 7077e40af03cffdce4f8bcbc9d39d1628ec48f9a Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 3 Mar 2023 14:29:21 +0000 Subject: [PATCH 427/628] [Driver] Update loading of rt weights in driver base --- src/finn/qnn-data/templates/driver/driver_base.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/finn/qnn-data/templates/driver/driver_base.py b/src/finn/qnn-data/templates/driver/driver_base.py index 2096760580..5f6f00da13 100644 --- a/src/finn/qnn-data/templates/driver/driver_base.py +++ b/src/finn/qnn-data/templates/driver/driver_base.py @@ -189,14 +189,10 @@ def load_runtime_weights(self, flush_accel=True, verify=True): layer_ind = int(w_filename.split("_")[1]) rt_weight_dict[(sdp_ind, layer_ind)] = layer_w for sdp_ind, layer_ind in rt_weight_dict.keys(): - cand_if_name = "StreamingDataflowPartition_%d/s_axilite_%d" % ( - sdp_ind, - layer_ind, - ) + cand_if_name = "StreamingDataflowPartition_%d" % sdp_ind if cand_if_name in self.ip_dict.keys(): layer_mmio = getattr( - getattr(self, "StreamingDataflowPartition_%d" % sdp_ind), - "s_axilite_%d" % layer_ind, + self, "StreamingDataflowPartition_%d" % sdp_ind ).mmio layer_w = rt_weight_dict[(sdp_ind, layer_ind)] layer_mmio.write_mm(0, layer_w.tobytes()) From 2da2445fda6c63bd19ccedfae6d5614eca7ad241 Mon Sep 17 00:00:00 2001 From: Rachit Garg Date: Sat, 4 Mar 2023 19:55:04 +0100 Subject: [PATCH 428/628] Fixed Summary Typo Changed from "Summmmary" to "Summary" --- notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 388accad3a..f08bcf8488 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -240,7 +240,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We won't go into too much detail about what happens in each transformation and why they are called in the particular order they are (feel free to visualize the intermediate steps using Netron yourself if you are curious) but here is a brief summmmary:\n", + "We won't go into too much detail about what happens in each transformation and why they are called in the particular order they are (feel free to visualize the intermediate steps using Netron yourself if you are curious) but here is a brief summary:\n", "\n", "* `Streamline` moves floating point scaling and addition operations closer to the input of the nearest thresholding activation and absorbs them into thresholds\n", "* `LowerConvsToMatMul` converts ONNX `Conv` nodes into sequences of `Im2Col, MatMul` nodes as discussed above. `Im2Col` is a custom FINN ONNX high-level node type that implements the sliding window operator.\n", From 6f78fa76308d8fc657d257eabe8b5b159290368a Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 6 Mar 2023 10:26:20 +0000 Subject: [PATCH 429/628] [Docs] Modify docstring in set folding transform --- src/finn/transformation/fpgadataflow/set_folding.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index 4e79a3faa5..0a466afe13 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -71,12 +71,13 @@ class SetFolding(Transformation): When folding depthwise convolutions ("VVAU"/VectorVectorActivation) or spatial reduction ops (Pool_Batch): - * the producer of the node is expected to be a ConvolutionInputGenerator - with depthwise=1, whose SIMD value will be set equal to the PE value of - its consumer node - * the VVAU also supports SIMD ("input window") parallelism next to - PE ("channels"), but current ConvInpGen limitations require PE to be fully - unfolded before SIMD is increased + + * the producer of the node is expected to be a ConvolutionInputGenerator + with depthwise=1, whose SIMD value will be set equal to the PE value of + its consumer node + * the VVAU also supports SIMD ("input window") parallelism next to + PE ("channels"), but current ConvInpGen limitations require PE to be fully + unfolded before SIMD is increased """ def __init__( From e0f68c537d7f33c2767b8b9f37e524cbd29c2722 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 6 Mar 2023 10:34:49 +0000 Subject: [PATCH 430/628] [Docs] Update internals.rst --- docs/finn/internals.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index 848d22afec..c0d1f65aab 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -213,10 +213,14 @@ FINN implements convolution operations by pairing a ConvolutionInputGenerator (o This RTL version is an alternative to the original `HLS implementation `_ and aims to improve on it in the following ways: * Support a wider range of hyperparameters without the fragmentation into 16+ separate HLS functions + * Support additional degrees of parallelism (i.e., across the output window or multiple input samples) that are difficult to implement in HLS + * Support additional features, such as dynamic feature map sizing + * Improve resource efficiency + The component is implemented by generating (System-)Verilog code for each individual instance, realized via the template + replacement dictionary mechanism found in other FINN components. Despite the HDL implementation, the component is managed by its own HLSCustomOp (!) named "ConvolutionInputGenerator_rtl". Naturally, HLS simulation & synthesis are not supported. @@ -277,7 +281,7 @@ The "default" style also supports a dynamic mode, which provides an interface to Folding ------- -The RTL SWG is supported by the basic automatic folding algorithm in FINN (:py:mod:`SetFolding()`). Consider the following implications: +The RTL SWG is supported by the basic automatic folding algorithm in FINN (:py:mod:`finn.transformation.fpgadataflow.set_folding.SetFolding`). Consider the following implications: **MVAU:** Although it is recommended to unfold SIMD first, SIMD and PE can be set independently. Full (and balanced) parallelism is achieved by using the SWG in parallel window mode and setting MVAU SIMD and PE to their maximum values (SIMD = MW = C_in * K, PE = MH = C_out). From 99bc34573339eba0bb5950fe92c91636cc8819ab Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 6 Mar 2023 10:44:58 +0000 Subject: [PATCH 431/628] [Docs] Update API links in internals section --- docs/finn/internals.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index c0d1f65aab..d0c4cd2065 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -224,7 +224,7 @@ This RTL version is an alternative to the original `HLS implementation Date: Tue, 7 Mar 2023 15:52:43 +0000 Subject: [PATCH 432/628] [Tests] Move minimize bit width test and add Jenkins marker --- .../streamline => fpgadataflow}/test_minimize_bit_width.py | 2 ++ 1 file changed, 2 insertions(+) rename tests/{transformation/streamline => fpgadataflow}/test_minimize_bit_width.py (99%) diff --git a/tests/transformation/streamline/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py similarity index 99% rename from tests/transformation/streamline/test_minimize_bit_width.py rename to tests/fpgadataflow/test_minimize_bit_width.py index 866b64445b..7f6778fbf3 100644 --- a/tests/transformation/streamline/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -146,6 +146,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = @pytest.mark.parametrize("wdt", weight_data_types) @pytest.mark.parametrize("rww", [True, False]) +@pytest.mark.fpgadataflow def test_minimize_weight_bit_width(wdt: DataType, rww: bool): """Testing MinimizeWeightBitWidth for VVAU and MVAU. @@ -251,6 +252,7 @@ def phi(x: float) -> float: @pytest.mark.parametrize("idt", input_data_types) @pytest.mark.parametrize("tdt", thresh_data_types) @pytest.mark.parametrize("rww", [True, False]) +@pytest.mark.fpgadataflow def test_minimize_accumulator_width( wdt: DataType, idt: DataType, tdt: DataType, rww: bool ): From 21b9c45a3cdf40a0cd15453e3a551116019dcc01 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 7 Mar 2023 08:05:53 -0800 Subject: [PATCH 433/628] Cleaning up weight data types for pytest options --- tests/fpgadataflow/test_minimize_bit_width.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index 7f6778fbf3..dc4a076a18 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -129,7 +129,7 @@ def make_unit_test_model(wdt: DataType, idt: DataType, tdt: Optional[DataType] = DataType["UINT7"], DataType["INT3"], DataType["UINT3"], - DataType["BIPOLAR"], + # DataType["BIPOLAR"], # TODO - add support for bipolar weights DataType["TERNARY"], ] @@ -247,6 +247,15 @@ def phi(x: float) -> float: DataType["INT16"], ] +# Removing unsigned data types fro weights +weight_data_types = [ + DataType["INT8"], + DataType["INT7"], + DataType["INT3"], + # DataType["BIPOLAR"], # TODO - add support for bipolar weights + DataType["TERNARY"], +] + @pytest.mark.parametrize("wdt", weight_data_types) @pytest.mark.parametrize("idt", input_data_types) From 4907f627546c77a7ba296931e6c0dc468c9be81d Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 9 Mar 2023 08:48:04 +0000 Subject: [PATCH 434/628] [Util] Fix interpretation of dtype to check for signed integer --- src/finn/util/data_packing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py index 3602b1bdd5..a41fe882e5 100644 --- a/src/finn/util/data_packing.py +++ b/src/finn/util/data_packing.py @@ -220,7 +220,7 @@ def unpack_innermost_dim_from_hex_string( if conv_dtype == DataType["BIPOLAR"]: ar_list = [2 * x - 1 for x in ar_list] # interpret values as signed values - elif dtype.signed(): + elif conv_dtype.signed() and conv_dtype.is_integer(): mask = 2 ** (conv_dtype.bitwidth() - 1) ar_list = [-(x & mask) + (x & ~mask) for x in ar_list] From 1a2eaaac2fdbf7ee6b24b7458577dbc692659b63 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 13 Mar 2023 10:05:38 +0000 Subject: [PATCH 435/628] [Tests] Remove minimize acc width for vvau tests --- tests/fpgadataflow/test_fpgadataflow_vvau.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index be1ada59a1..95501078d6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -43,9 +43,6 @@ from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP -from finn.transformation.fpgadataflow.minimize_accumulator_width import ( - MinimizeAccumulatorWidth, -) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.prepare_ip import PrepareIP from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim @@ -156,8 +153,6 @@ def _make_single_vvau_modelwrapper( model.set_tensor_datatype("thresh", tdt) model.set_initializer("thresh", T) - # Minimize accumulator width to obtain realistic HLS reports - model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) From 6e31105f59b0cba618b1dc452035b98c5f19802b Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 14 Mar 2023 10:06:57 +0000 Subject: [PATCH 436/628] [Tests] Update resource estimates in cybsec test --- tests/end2end/test_end2end_cybsec_mlp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 86942415b9..d2a4d0287f 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -222,7 +222,7 @@ def test_end2end_cybsec_mlp_build(QONNX_export): assert est_cycles_dict["MatrixVectorActivation_1"] == 64 with open(est_res_report, "r") as f: est_res_dict = json.load(f) - assert est_res_dict["total"]["LUT"] == 11360.0 + assert est_res_dict["total"]["LUT"] == 7904.0 assert est_res_dict["total"]["BRAM_18K"] == 36.0 shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build", QONNX_export)) From 40eb5c7d22189065589fb0e592c1cbee6e46f1e7 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 14 Mar 2023 15:12:55 +0100 Subject: [PATCH 437/628] [SWG] Move common modules to static source file --- finn-rtllib/swg/swg_common.sv | 254 ++++++++++++++++ finn-rtllib/swg/swg_template_default.sv | 160 ++--------- .../swg/swg_template_default_dynamic.sv | 30 +- finn-rtllib/swg/swg_template_parallel.sv | 270 ++++-------------- .../convolutioninputgenerator_rtl.py | 29 +- 5 files changed, 345 insertions(+), 398 deletions(-) create mode 100644 finn-rtllib/swg/swg_common.sv diff --git a/finn-rtllib/swg/swg_common.sv b/finn-rtllib/swg/swg_common.sv new file mode 100644 index 0000000000..8dfb8f51a2 --- /dev/null +++ b/finn-rtllib/swg/swg_common.sv @@ -0,0 +1,254 @@ +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +// loop controller used for both, "default" and "parallel", implementation styles +module swg_controller #( + int unsigned LOOP_H_ITERATIONS, + int unsigned LOOP_W_ITERATIONS, + int unsigned LOOP_KH_ITERATIONS, + int unsigned LOOP_KW_ITERATIONS, + int unsigned LOOP_SIMD_ITERATIONS, + + int unsigned INCR_BITWIDTH, + + bit IS_DEPTHWISE, + + int HEAD_INCR_SIMD, + int HEAD_INCR_KW, + int HEAD_INCR_KH, + int HEAD_INCR_W, + int HEAD_INCR_H, + int TAIL_INCR_W, + int TAIL_INCR_H, + int TAIL_INCR_LAST, + + parameter INNERMOST_STATE +)( + input logic clk, + input logic rst_n, + + input logic advance, + output logic [INCR_BITWIDTH-1:0] addr_incr, + output logic [INCR_BITWIDTH-1:0] tail_incr +); + + // state and counters + typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H + } state_e; + state_e State = INNERMOST_STATE; + state_e state_next; + + logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; + logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; + logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; + logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; + logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; + + // combinational logic for addr_incr generation + always_comb begin : blkHead + unique case (State) + STATE_START : addr_incr = 0; + STATE_LOOP_SIMD : addr_incr = HEAD_INCR_SIMD; + STATE_LOOP_KW : addr_incr = HEAD_INCR_KW; + STATE_LOOP_KH : addr_incr = HEAD_INCR_KH; + STATE_LOOP_W : addr_incr = HEAD_INCR_W; + STATE_LOOP_H : addr_incr = HEAD_INCR_H; + endcase + end + + // combinational logic for tail_incr generation + uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; + assign tail_incr = + tail_incr_inner_condition? 1 : + Counter_loop_w >= 0? TAIL_INCR_W : + Counter_loop_h >= 0? TAIL_INCR_H : + /* else */ TAIL_INCR_LAST; + + // combinational next state logic + always_comb begin : blkState + state_next = State; + if(State != INNERMOST_STATE) state_next = INNERMOST_STATE; + else begin + if(Counter_loop_simd < 0) begin + state_next = + (Counter_loop_kw >= 0)? STATE_LOOP_KW : + (Counter_loop_kh >= 0)? STATE_LOOP_KH : + (Counter_loop_w >= 0)? STATE_LOOP_W : + (Counter_loop_h >= 0)? STATE_LOOP_H : + /* else */ STATE_START; + end + end + end : blkState + + // sequential logic + always_ff @ (posedge clk) begin + if(!rst_n) begin + State <= INNERMOST_STATE; + Counter_loop_h <= LOOP_H_ITERATIONS; + Counter_loop_w <= LOOP_W_ITERATIONS; + Counter_loop_kh <= LOOP_KH_ITERATIONS; + Counter_loop_kw <= LOOP_KW_ITERATIONS; + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; + end + else if(advance) begin + State <= state_next; + if (State == INNERMOST_STATE) begin + if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; + else begin + Counter_loop_simd <= LOOP_SIMD_ITERATIONS; + if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; + else begin + Counter_loop_kw <= LOOP_KW_ITERATIONS; + if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; + else begin + Counter_loop_kh <= LOOP_KH_ITERATIONS; + if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; + else begin + Counter_loop_w <= LOOP_W_ITERATIONS; + if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; + else Counter_loop_h <= LOOP_H_ITERATIONS; + end + end + end + end + end + end + end + +endmodule : swg_controller + +// buffer used in "default" implementation style +module swg_cyclic_buffer_addressable #( + int unsigned WIDTH, + int unsigned DEPTH, + parameter RAM_STYLE = "auto" +)( + input logic clk, + + input logic write_enable, + input logic [$clog2(DEPTH)-1:0] write_addr, + input logic [WIDTH-1:0] data_in, + + input logic read_enable, + input logic [$clog2(DEPTH)-1:0] read_addr, // absolute (!) read address of cyclic buffer + output logic [WIDTH-1:0] data_out +); + + (*ram_style=RAM_STYLE*) logic [WIDTH-1:0] Ram[DEPTH]; + logic [WIDTH-1:0] Out = 'x; + always_ff @(posedge clk) begin + if (read_enable) Out <= Ram[read_addr]; + if (write_enable) Ram[write_addr] <= data_in; + end + assign data_out = Out; + +endmodule : swg_cyclic_buffer_addressable + +// buffer used in "parallel" implementation style +module swg_reg_buffer +#( + int unsigned WIDTH = 1, + int unsigned DEPTH = 1 +) +( + input logic CLK, + input logic shift_enable, + input logic [WIDTH-1:0] shift_in, + output logic [WIDTH-1:0] shift_out, + output logic [WIDTH*DEPTH-1:0] data_out +); + +reg [WIDTH-1:0] data [DEPTH-1:0]; + +assign shift_out = data[DEPTH-1]; + +for (genvar e=0; e0; i=i-1) + data[i] <= data[i-1]; + data[0] <= shift_in; + end +end +endmodule : swg_reg_buffer + +// buffer used in "parallel" implementation style +module swg_ram_buffer +#( + int unsigned WIDTH, + int unsigned DEPTH, + parameter RAM_STYLE = "auto" +) +( + input logic CLK, + input logic RST, + input logic shift_enable, + input logic [WIDTH-1:0] shift_in, + output logic [WIDTH-1:0] shift_out +); + +reg [WIDTH-1:0] out_reg; +assign shift_out = out_reg; + +integer addr_w, addr_r; + +(*ram_style=RAM_STYLE*) reg [WIDTH-1:0] ram [DEPTH-1:0]; + +always @(posedge CLK) begin + if (RST == 1'b0) begin + addr_w <= 0; + addr_r <= 1; + end else begin + if (shift_enable) begin + ram[addr_w] <= shift_in; + out_reg <= ram[addr_r]; + + if (addr_w == DEPTH-1) + addr_w <= 0; + else + addr_w <= addr_w + 1; + + if (addr_r == DEPTH-1) + addr_r <= 0; + else + addr_r <= addr_r + 1; + end + end +end +endmodule : swg_ram_buffer diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 06e65e9111..4970762172 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -28,141 +28,6 @@ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -module $TOP_MODULE_NAME$_controller #( - int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, - int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, - int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, - int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, - int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, - - int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, - - bit IS_DEPTHWISE = $IS_DEPTHWISE$ -)( - input logic clk, - input logic rst_n, - - input logic advance, - output logic [INCR_BITWIDTH-1:0] addr_incr, - output logic [INCR_BITWIDTH-1:0] tail_incr -); - - // state and counters - typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H - } state_e; - state_e State = $INNERMOST_STATE$; - state_e state_next; - - logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; - logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; - logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; - logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; - logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; - - // combinational logic for addr_incr generation - always_comb begin : blkHead - unique case (State) - 0 : addr_incr = 0; - 1 : addr_incr = $HEAD_INCR_SIMD$; - 2 : addr_incr = $HEAD_INCR_KW$; - 3 : addr_incr = $HEAD_INCR_KH$; - 4 : addr_incr = $HEAD_INCR_W$; - 5 : addr_incr = $HEAD_INCR_H$; - endcase - end - - // combinational logic for tail_incr generation - uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; - assign tail_incr = - tail_incr_inner_condition? 1 : - Counter_loop_w >= 0? $TAIL_INCR_W$ : - Counter_loop_h >= 0? $TAIL_INCR_H$ : - /* else */ $TAIL_INCR_LAST$; - - // combinational next state logic - always_comb begin : blkState - state_next = State; - if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; - else begin - if(Counter_loop_simd < 0) begin - state_next = - (Counter_loop_kw >= 0)? STATE_LOOP_KW : - (Counter_loop_kh >= 0)? STATE_LOOP_KH : - (Counter_loop_w >= 0)? STATE_LOOP_W : - (Counter_loop_h >= 0)? STATE_LOOP_H : - /* else */ STATE_START; - end - end - end : blkState - - // sequential logic - always_ff @ (posedge clk) begin - if(!rst_n) begin - State <= $INNERMOST_STATE$; - Counter_loop_h <= LOOP_H_ITERATIONS; - Counter_loop_w <= LOOP_W_ITERATIONS; - Counter_loop_kh <= LOOP_KH_ITERATIONS; - Counter_loop_kw <= LOOP_KW_ITERATIONS; - Counter_loop_simd <= LOOP_SIMD_ITERATIONS; - end - else if(advance) begin - State <= state_next; - if (State == $INNERMOST_STATE$) begin - if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; - else begin - Counter_loop_simd <= LOOP_SIMD_ITERATIONS; - if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; - else begin - Counter_loop_kw <= LOOP_KW_ITERATIONS; - if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; - else begin - Counter_loop_kh <= LOOP_KH_ITERATIONS; - if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; - else begin - Counter_loop_w <= LOOP_W_ITERATIONS; - if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; - else Counter_loop_h <= LOOP_H_ITERATIONS; - end - end - end - end - end - end - end - -endmodule : $TOP_MODULE_NAME$_controller - -module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( - int unsigned WIDTH, - int unsigned DEPTH -)( - input logic clk, - - input logic write_enable, - input logic [$clog2(DEPTH)-1:0] write_addr, - input logic [WIDTH-1:0] data_in, - - input logic read_enable, - input logic [$clog2(DEPTH)-1:0] read_addr, // absolute (!) read address of cyclic buffer - output logic [WIDTH-1:0] data_out -); - - $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; - logic [WIDTH-1:0] Out = 'x; - always_ff @(posedge clk) begin - if (read_enable) Out <= Ram[read_addr]; - if (write_enable) Ram[write_addr] <= data_in; - end - assign data_out = Out; - -endmodule : $TOP_MODULE_NAME$_cyclic_buffer_addressable - module $TOP_MODULE_NAME$_impl #( int BIT_WIDTH, int SIMD, @@ -197,9 +62,10 @@ module $TOP_MODULE_NAME$_impl #( uwire window_buffer_read_enable; uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr; uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; - $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + swg_cyclic_buffer_addressable #( .WIDTH(BUF_IN_WIDTH), - .DEPTH(BUF_ELEM_TOTAL) + .DEPTH(BUF_ELEM_TOTAL), + .RAM_STYLE($RAM_STYLE$) ) window_buffer_inst ( .clk(ap_clk), @@ -216,7 +82,25 @@ module $TOP_MODULE_NAME$_impl #( uwire advance_controller; uwire signed [INCR_BITWIDTH-1:0] addr_incr; uwire [INCR_BITWIDTH-1:0] tail_incr; - $TOP_MODULE_NAME$_controller controller_inst ( + swg_controller #( + .LOOP_H_ITERATIONS($LOOP_H_ITERATIONS$), + .LOOP_W_ITERATIONS($LOOP_W_ITERATIONS$), + .LOOP_KH_ITERATIONS($LOOP_KH_ITERATIONS$), + .LOOP_KW_ITERATIONS($LOOP_KW_ITERATIONS$), + .LOOP_SIMD_ITERATIONS($LOOP_SIMD_ITERATIONS$), + .HEAD_INCR_SIMD($HEAD_INCR_SIMD$), + .HEAD_INCR_KW($HEAD_INCR_KW$), + .HEAD_INCR_KH($HEAD_INCR_KH$), + .HEAD_INCR_W($HEAD_INCR_W$), + .HEAD_INCR_H($HEAD_INCR_H$), + .TAIL_INCR_W($TAIL_INCR_W$), + .TAIL_INCR_H($TAIL_INCR_H$), + .TAIL_INCR_LAST($TAIL_INCR_LAST$), + .INCR_BITWIDTH($INCR_BITWIDTH$), + .IS_DEPTHWISE($IS_DEPTHWISE$), + .INNERMOST_STATE($INNERMOST_STATE$) + ) + controller_inst ( .clk(ap_clk), .rst_n(ap_rst_n), .advance(advance_controller), diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv index eb53978b58..412f8689ba 100644 --- a/finn-rtllib/swg/swg_template_default_dynamic.sv +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -152,31 +152,6 @@ module $TOP_MODULE_NAME$_controller #( endmodule : $TOP_MODULE_NAME$_controller -module $TOP_MODULE_NAME$_cyclic_buffer_addressable #( - int unsigned WIDTH, - int unsigned DEPTH -)( - input logic clk, - - input logic write_enable, - input logic [$clog2(DEPTH)-1:0] write_addr, - input logic [WIDTH-1:0] data_in, - - input logic read_enable, - input logic [$clog2(DEPTH)-1:0] read_addr, // absolute (!) read address of cyclic buffer - output logic [WIDTH-1:0] data_out -); - - $RAM_STYLE$ logic [WIDTH-1:0] Ram[DEPTH]; - logic [WIDTH-1:0] Out = 'x; - always_ff @(posedge clk) begin - if (read_enable) Out <= Ram[read_addr]; - if (write_enable) Ram[write_addr] <= data_in; - end - assign data_out = Out; - -endmodule : $TOP_MODULE_NAME$_cyclic_buffer_addressable - module $TOP_MODULE_NAME$_impl #( int BIT_WIDTH, int SIMD, @@ -242,9 +217,10 @@ module $TOP_MODULE_NAME$_impl #( uwire window_buffer_read_enable; uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_write_addr; uwire [$clog2(BUF_ELEM_TOTAL)-1:0] window_buffer_read_addr; - $TOP_MODULE_NAME$_cyclic_buffer_addressable #( + swg_cyclic_buffer_addressable #( .WIDTH(BUF_IN_WIDTH), - .DEPTH(BUF_ELEM_TOTAL) + .DEPTH(BUF_ELEM_TOTAL), + .RAM_STYLE($RAM_STYLE$) ) window_buffer_inst ( .clk(ap_clk), diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv index 767f9c6f85..9fe0f2c5ab 100644 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -28,217 +28,22 @@ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -module $TOP_MODULE_NAME$_controller #( - int unsigned LOOP_H_ITERATIONS = $LOOP_H_ITERATIONS$, - int unsigned LOOP_W_ITERATIONS = $LOOP_W_ITERATIONS$, - int unsigned LOOP_KH_ITERATIONS = $LOOP_KH_ITERATIONS$, - int unsigned LOOP_KW_ITERATIONS = $LOOP_KW_ITERATIONS$, - int unsigned LOOP_SIMD_ITERATIONS = $LOOP_SIMD_ITERATIONS$, - - int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$, - - bit IS_DEPTHWISE = $IS_DEPTHWISE$ -)( - input logic clk, - input logic rst_n, - - input logic advance, - output logic [INCR_BITWIDTH-1:0] addr_incr, - output logic [INCR_BITWIDTH-1:0] tail_incr -); - - // state and counters - typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H - } state_e; - state_e State = $INNERMOST_STATE$; - state_e state_next; - - logic signed [$clog2(LOOP_H_ITERATIONS +2)+1-1:0] Counter_loop_h = LOOP_H_ITERATIONS; - logic signed [$clog2(LOOP_W_ITERATIONS +2)+1-1:0] Counter_loop_w = LOOP_W_ITERATIONS; - logic signed [$clog2(LOOP_KH_ITERATIONS +2)+1-1:0] Counter_loop_kh = LOOP_KH_ITERATIONS; - logic signed [$clog2(LOOP_KW_ITERATIONS +2)+1-1:0] Counter_loop_kw = LOOP_KW_ITERATIONS; - logic signed [$clog2(LOOP_SIMD_ITERATIONS+2)+1-1:0] Counter_loop_simd = LOOP_SIMD_ITERATIONS; - - // combinational logic for addr_incr generation - always_comb begin : blkHead - unique case (State) - 0 : addr_incr = 0; - 1 : addr_incr = $HEAD_INCR_SIMD$; - 2 : addr_incr = $HEAD_INCR_KW$; - 3 : addr_incr = $HEAD_INCR_KH$; - 4 : addr_incr = $HEAD_INCR_W$; - 5 : addr_incr = $HEAD_INCR_H$; - endcase - end - - // combinational logic for tail_incr generation - uwire tail_incr_inner_condition = IS_DEPTHWISE? (Counter_loop_kh >= 0) : 0; - assign tail_incr = - tail_incr_inner_condition? 1 : - Counter_loop_w >= 0? $TAIL_INCR_W$ : - Counter_loop_h >= 0? $TAIL_INCR_H$ : - /* else */ $TAIL_INCR_LAST$; - - // combinational next state logic - always_comb begin : blkState - state_next = State; - if(State != $INNERMOST_STATE$) state_next = $INNERMOST_STATE$; - else begin - if(Counter_loop_simd < 0) begin - state_next = - (Counter_loop_kw >= 0)? STATE_LOOP_KW : - (Counter_loop_kh >= 0)? STATE_LOOP_KH : - (Counter_loop_w >= 0)? STATE_LOOP_W : - (Counter_loop_h >= 0)? STATE_LOOP_H : - /* else */ STATE_START; - end - end - end : blkState - - // sequential logic - always_ff @ (posedge clk) begin - if(!rst_n) begin - State <= $INNERMOST_STATE$; - Counter_loop_h <= LOOP_H_ITERATIONS; - Counter_loop_w <= LOOP_W_ITERATIONS; - Counter_loop_kh <= LOOP_KH_ITERATIONS; - Counter_loop_kw <= LOOP_KW_ITERATIONS; - Counter_loop_simd <= LOOP_SIMD_ITERATIONS; - end - else if(advance) begin - State <= state_next; - if (State == $INNERMOST_STATE$) begin - if(Counter_loop_simd >= 0) Counter_loop_simd <= Counter_loop_simd-1; - else begin - Counter_loop_simd <= LOOP_SIMD_ITERATIONS; - if(Counter_loop_kw >= 0) Counter_loop_kw <= Counter_loop_kw-1; - else begin - Counter_loop_kw <= LOOP_KW_ITERATIONS; - if(Counter_loop_kh >= 0) Counter_loop_kh <= Counter_loop_kh-1; - else begin - Counter_loop_kh <= LOOP_KH_ITERATIONS; - if(Counter_loop_w >= 0) Counter_loop_w <= Counter_loop_w-1; - else begin - Counter_loop_w <= LOOP_W_ITERATIONS; - if(Counter_loop_h >= 0) Counter_loop_h <= Counter_loop_h-1; - else Counter_loop_h <= LOOP_H_ITERATIONS; - end - end - end - end - end - end - end - -endmodule : $TOP_MODULE_NAME$_controller - -module $TOP_MODULE_NAME$_reg_buffer -#( - parameter WIDTH = 1, - parameter DEPTH = 1 -) -( - CLK, - shift_enable, - shift_in, - shift_out, - data_out -); - -input CLK, shift_enable; -input [WIDTH-1:0] shift_in; -output [WIDTH-1:0] shift_out; -output [WIDTH*DEPTH-1:0] data_out; - -reg [WIDTH-1:0] data [DEPTH-1:0]; - -assign shift_out = data[DEPTH-1]; - -for (genvar e=0; e0; i=i-1) - data[i] <= data[i-1]; - data[0] <= shift_in; - end -end -endmodule : $TOP_MODULE_NAME$_reg_buffer - -module $TOP_MODULE_NAME$_ram_buffer -#( - parameter WIDTH = 1, - parameter DEPTH = 1 -) -( - CLK, - RST, - shift_enable, - shift_in, - shift_out -); - -input CLK, RST, shift_enable; -input [WIDTH-1:0] shift_in; -output [WIDTH-1:0] shift_out; - -reg [WIDTH-1:0] out_reg; -assign shift_out = out_reg; - -integer addr_w, addr_r; - -$RAM_STYLE$ reg [WIDTH-1:0] ram [DEPTH-1:0]; - -always @(posedge CLK) begin - if (RST == 1'b0) begin - addr_w <= 0; - addr_r <= 1; - end else begin - if (shift_enable) begin - ram[addr_w] <= shift_in; - out_reg <= ram[addr_r]; - - if (addr_w == DEPTH-1) - addr_w <= 0; - else - addr_w <= addr_w + 1; - - if (addr_r == DEPTH-1) - addr_r <= 0; - else - addr_r <= addr_r + 1; - end - end -end -endmodule : $TOP_MODULE_NAME$_ram_buffer module $TOP_MODULE_NAME$_wb #( - parameter IN_WIDTH = 1, // bit-width*C*MMV_in - parameter OUT_ELEM_WIDTH = 1, // bit-width*C - parameter OUT_WIDTH = 1, // bit-width*C*MMV_out - parameter BUFFER_ELEM_TOTAL = 1 + int unsigned IN_WIDTH = 1, // bit-width*C*MMV_in + int unsigned OUT_ELEM_WIDTH = 1, // bit-width*C + int unsigned OUT_WIDTH = 1, // bit-width*C*MMV_out + int unsigned BUFFER_ELEM_TOTAL = 1 ) ( - CLK, - RST, - data_in, - shift_enable, - data_out + input logic CLK, + input logic RST, + input logic shift_enable, + input logic [IN_WIDTH-1:0] data_in, + output logic [OUT_WIDTH-1:0] data_out ); -input CLK, RST; -input [IN_WIDTH-1:0] data_in; -input shift_enable; -output [OUT_WIDTH-1:0] data_out; - $GENERATE_REG_FIFOS$ $GENERATE_BRAM_FIFOS$ @@ -252,15 +57,15 @@ $GENERATE_OUTPUT_MAPPING$ endmodule : $TOP_MODULE_NAME$_wb module $TOP_MODULE_NAME$_impl #( - int BIT_WIDTH, - int SIMD, - int MMV_IN, - int MMV_OUT, - int LAST_READ_ELEM = $LAST_READ_ELEM$, - int FIRST_WRITE_ELEM = $FIRST_WRITE_ELEM$, - int LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, - int BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, - int INCR_BITWIDTH = $INCR_BITWIDTH$ + int unsigned BIT_WIDTH, + int unsigned SIMD, + int unsigned MMV_IN, + int unsigned MMV_OUT, + int unsigned LAST_READ_ELEM = $LAST_READ_ELEM$, + int unsigned FIRST_WRITE_ELEM = $FIRST_WRITE_ELEM$, + int unsigned LAST_WRITE_ELEM = $LAST_WRITE_ELEM$, + int unsigned BUF_ELEM_TOTAL = $BUF_ELEM_TOTAL$, + int unsigned INCR_BITWIDTH = $INCR_BITWIDTH$ )( input logic ap_clk, input logic ap_rst_n, @@ -302,7 +107,25 @@ module $TOP_MODULE_NAME$_impl #( uwire advance_controller; uwire signed [INCR_BITWIDTH-1:0] addr_incr; uwire [INCR_BITWIDTH-1:0] tail_incr; - $TOP_MODULE_NAME$_controller controller_inst ( + swg_controller #( + .LOOP_H_ITERATIONS($LOOP_H_ITERATIONS$), + .LOOP_W_ITERATIONS($LOOP_W_ITERATIONS$), + .LOOP_KH_ITERATIONS($LOOP_KH_ITERATIONS$), + .LOOP_KW_ITERATIONS($LOOP_KW_ITERATIONS$), + .LOOP_SIMD_ITERATIONS($LOOP_SIMD_ITERATIONS$), + .HEAD_INCR_SIMD($HEAD_INCR_SIMD$), + .HEAD_INCR_KW($HEAD_INCR_KW$), + .HEAD_INCR_KH($HEAD_INCR_KH$), + .HEAD_INCR_W($HEAD_INCR_W$), + .HEAD_INCR_H($HEAD_INCR_H$), + .TAIL_INCR_W($TAIL_INCR_W$), + .TAIL_INCR_H($TAIL_INCR_H$), + .TAIL_INCR_LAST($TAIL_INCR_LAST$), + .INCR_BITWIDTH($INCR_BITWIDTH$), + .IS_DEPTHWISE($IS_DEPTHWISE$), + .INNERMOST_STATE($INNERMOST_STATE$) + ) + controller_inst ( .clk(ap_clk), .rst_n(ap_rst_n), .advance(advance_controller), @@ -318,16 +141,16 @@ module $TOP_MODULE_NAME$_impl #( // control registers/signals logic Writing_done = 0; logic Write_done = 0; + uwire write_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !Writing_done;; uwire write_ok = write_cmd && (out_V_V_TREADY || Write_done); uwire write_blocked = write_cmd && !out_V_V_TREADY && !Write_done; - uwire write_cmd = !($signed(Current_elem) > Newest_buffered_elem) && !Writing_done;; uwire reading_done = Newest_buffered_elem == LAST_READ_ELEM; uwire read_cmd = !reading_done && ( // if there is still an input element left to read Writing_done || ( // if writing is done (e.g. for skipped rows at FM end due to stride) - $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(First_elem_next_window) && - $signed(((Newest_buffered_elem - (BUF_ELEM_TOTAL - 1)))) < $signed(Current_elem) + $signed(((Newest_buffered_elem - ($signed(BUF_ELEM_TOTAL) - 1)))) < $signed(First_elem_next_window) && + $signed(((Newest_buffered_elem - ($signed(BUF_ELEM_TOTAL) - 1)))) < $signed(Current_elem) ) // (over-)write to buffer if oldest buffered element will no longer be needed ); uwire read_ok = read_cmd && in0_V_V_TVALID && !write_blocked; @@ -347,10 +170,15 @@ module $TOP_MODULE_NAME$_impl #( // write done logic always_ff @(posedge ap_clk) begin - if (advance) begin - Write_done <= 1'b0; //reset flag - end else if (write_ok) //successful W in this cycle, but R still outstanding - Write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! + if(!ap_rst_n) begin + Write_done <= 1'b0; + end + else begin + if (advance) begin + Write_done <= 1'b0; //reset flag + end else if (write_ok) //successful W in this cycle, but R still outstanding + Write_done <= 1'b1; //write can happen even if read is blocked, but only for the current cycle! + end end // main process for advancing counters diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 7ed3de3c19..4a8ddfee90 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -29,6 +29,7 @@ import math import numpy as np import os +import shutil from qonnx.core.datatype import DataType from qonnx.custom_op.general import im2col from qonnx.custom_op.general.im2col import compute_conv_output_dim @@ -616,13 +617,13 @@ def prepare_codegen_default(self): # skip innermost SIMD loop completely if loop_kw_iterations == 1: # skip innermost KW loop completely - code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KH"] + code_gen_dict["$INNERMOST_STATE$"] = [str(3)] # STATE_LOOP_KH loop_kh_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KW"] + code_gen_dict["$INNERMOST_STATE$"] = [str(2)] # STATE_LOOP_KW loop_kw_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_SIMD"] + code_gen_dict["$INNERMOST_STATE$"] = [str(1)] # STATE_LOOP_SIMD loop_simd_iterations -= 1 # -1 because state is initial state cntr_bitwidth = math.ceil( @@ -735,10 +736,10 @@ def prepare_codegen_parallel(self): loop_simd_iterations = 1 if loop_w_iterations == 1: - code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_H"] + code_gen_dict["$INNERMOST_STATE$"] = [str(5)] # STATE_LOOP_H loop_h_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_W"] + code_gen_dict["$INNERMOST_STATE$"] = [str(4)] # STATE_LOOP_W loop_w_iterations -= 1 # -1 because state is initial state # set head and tail address increment values @@ -846,7 +847,7 @@ def prepare_codegen_parallel(self): wire [IN_WIDTH-1:0] reg_fifo_{id}_in; wire [IN_WIDTH-1:0] reg_fifo_{id}_out; wire [IN_WIDTH*{len}-1:0] reg_fifo_{id}; - {name}_reg_buffer + swg_reg_buffer #( .WIDTH(IN_WIDTH), .DEPTH({len}) @@ -871,10 +872,11 @@ def prepare_codegen_parallel(self): """ wire [IN_WIDTH-1:0] bram_fifo_{id}_in; wire [IN_WIDTH-1:0] bram_fifo_{id}_out; - {name}_ram_buffer + swg_ram_buffer #( .WIDTH(IN_WIDTH), - .DEPTH({len}) + .DEPTH({len}), + .RAM_STYLE("{ram_style}") ) ram_buffer_inst_{id} ( @@ -887,6 +889,7 @@ def prepare_codegen_parallel(self): name=self.get_verilog_top_module_name(), id=i, len=bram_fifo_depth, + ram_style=self.get_nodeattr("ram_style") ) ) @@ -1012,10 +1015,7 @@ def generate_hdl(self): self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] ram_style = self.get_nodeattr("ram_style") - if ram_style == "auto": - code_gen_dict["$RAM_STYLE$"] = [""] - else: - code_gen_dict["$RAM_STYLE$"] = ['(* ram_style = "{}" *)'.format(ram_style)] + code_gen_dict["$RAM_STYLE$"] = ["\"{}\"".format(ram_style)] # apply code generation to templates code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") @@ -1062,6 +1062,9 @@ def generate_hdl(self): ) as f: f.write(template_axilite) + # Copy static source file for common core components + shutil.copy2(os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir) + # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain self.set_nodeattr("ipgen_path", code_gen_dir) @@ -1081,6 +1084,7 @@ def prepare_rtlsim(self): verilog_files = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", + "swg_common.sv" ] if self.get_nodeattr("dynamic_mode"): verilog_files.append(self.get_nodeattr("gen_top_module") + "_axilite.v") @@ -1104,6 +1108,7 @@ def code_generation_ipi(self): sourcefiles = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", + "swg_common.sv" ] if self.get_nodeattr("dynamic_mode"): From f94e1cbdca5cfdf2f299d3d072b85fc36406d4df Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 15 Mar 2023 11:43:30 +0000 Subject: [PATCH 438/628] [CustomOp] pre-commit on rtl swg --- .../convolutioninputgenerator_rtl.py | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 4a8ddfee90..5fe578e99c 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -617,13 +617,13 @@ def prepare_codegen_default(self): # skip innermost SIMD loop completely if loop_kw_iterations == 1: # skip innermost KW loop completely - code_gen_dict["$INNERMOST_STATE$"] = [str(3)] # STATE_LOOP_KH + code_gen_dict["$INNERMOST_STATE$"] = [str(3)] # STATE_LOOP_KH loop_kh_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(2)] # STATE_LOOP_KW + code_gen_dict["$INNERMOST_STATE$"] = [str(2)] # STATE_LOOP_KW loop_kw_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(1)] # STATE_LOOP_SIMD + code_gen_dict["$INNERMOST_STATE$"] = [str(1)] # STATE_LOOP_SIMD loop_simd_iterations -= 1 # -1 because state is initial state cntr_bitwidth = math.ceil( @@ -736,10 +736,10 @@ def prepare_codegen_parallel(self): loop_simd_iterations = 1 if loop_w_iterations == 1: - code_gen_dict["$INNERMOST_STATE$"] = [str(5)] # STATE_LOOP_H + code_gen_dict["$INNERMOST_STATE$"] = [str(5)] # STATE_LOOP_H loop_h_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(4)] # STATE_LOOP_W + code_gen_dict["$INNERMOST_STATE$"] = [str(4)] # STATE_LOOP_W loop_w_iterations -= 1 # -1 because state is initial state # set head and tail address increment values @@ -860,7 +860,6 @@ def prepare_codegen_parallel(self): .shift_out(reg_fifo_{id}_out), .data_out(reg_fifo_{id}) );""".format( - name=self.get_verilog_top_module_name(), id=i, len=len(reg_fifo), ) @@ -886,10 +885,9 @@ def prepare_codegen_parallel(self): .shift_in(bram_fifo_{id}_in), .shift_out(bram_fifo_{id}_out) );""".format( - name=self.get_verilog_top_module_name(), id=i, len=bram_fifo_depth, - ram_style=self.get_nodeattr("ram_style") + ram_style=self.get_nodeattr("ram_style"), ) ) @@ -1015,7 +1013,7 @@ def generate_hdl(self): self.set_nodeattr("gen_top_module", self.get_verilog_top_module_name()) code_gen_dict["$BIT_WIDTH$"] = [str(self.get_input_datatype().bitwidth())] ram_style = self.get_nodeattr("ram_style") - code_gen_dict["$RAM_STYLE$"] = ["\"{}\"".format(ram_style)] + code_gen_dict["$RAM_STYLE$"] = ['"{}"'.format(ram_style)] # apply code generation to templates code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") @@ -1063,7 +1061,9 @@ def generate_hdl(self): f.write(template_axilite) # Copy static source file for common core components - shutil.copy2(os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir) + shutil.copy2( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir + ) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain @@ -1084,7 +1084,7 @@ def prepare_rtlsim(self): verilog_files = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", - "swg_common.sv" + "swg_common.sv", ] if self.get_nodeattr("dynamic_mode"): verilog_files.append(self.get_nodeattr("gen_top_module") + "_axilite.v") @@ -1108,7 +1108,7 @@ def code_generation_ipi(self): sourcefiles = [ self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", - "swg_common.sv" + "swg_common.sv", ] if self.get_nodeattr("dynamic_mode"): From 71ea7485fa9cc00f84cc5a9f9afe41776f109e35 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 15 Mar 2023 16:59:44 +0000 Subject: [PATCH 439/628] [Builder] infer data types after bit width minimization --- src/finn/builder/build_dataflow_steps.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index ba5a23f411..e43a29d632 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -488,6 +488,8 @@ def step_minimize_bit_width(model: ModelWrapper, cfg: DataflowBuildConfig): if cfg.minimize_bit_width: model = model.transform(MinimizeWeightBitWidth()) model = model.transform(MinimizeAccumulatorWidth()) + # make sure the changed datatypes are propagated through the network + model = model.transform(InferDataTypes()) return model From 24bd19072c4631c842d4142b695cf4819abecc6e Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 23 Mar 2023 15:30:23 +0100 Subject: [PATCH 440/628] [SWG] Incorporate minor comments on Verilog code --- finn-rtllib/swg/swg_common.sv | 55 ++++++++++--------- finn-rtllib/swg/swg_template_parallel.sv | 8 +-- .../convolutioninputgenerator_rtl.py | 6 +- 3 files changed, 35 insertions(+), 34 deletions(-) diff --git a/finn-rtllib/swg/swg_common.sv b/finn-rtllib/swg/swg_common.sv index 8dfb8f51a2..ff6778973c 100644 --- a/finn-rtllib/swg/swg_common.sv +++ b/finn-rtllib/swg/swg_common.sv @@ -185,25 +185,25 @@ module swg_reg_buffer int unsigned DEPTH = 1 ) ( - input logic CLK, + input logic clk, input logic shift_enable, input logic [WIDTH-1:0] shift_in, output logic [WIDTH-1:0] shift_out, output logic [WIDTH*DEPTH-1:0] data_out ); -reg [WIDTH-1:0] data [DEPTH-1:0]; +logic [WIDTH-1:0] Data [DEPTH-1:0]; -assign shift_out = data[DEPTH-1]; +assign shift_out = Data[DEPTH-1]; -for (genvar e=0; e0; i=i-1) - data[i] <= data[i-1]; - data[0] <= shift_in; + for (int i=DEPTH-1; i>0; i--) + Data[i] <= Data[i-1]; + Data[0] <= shift_in; end end endmodule : swg_reg_buffer @@ -216,38 +216,39 @@ module swg_ram_buffer parameter RAM_STYLE = "auto" ) ( - input logic CLK, - input logic RST, + input logic clk, + input logic rst_n, input logic shift_enable, input logic [WIDTH-1:0] shift_in, output logic [WIDTH-1:0] shift_out ); -reg [WIDTH-1:0] out_reg; -assign shift_out = out_reg; +logic [WIDTH-1:0] Out_reg; +assign shift_out = Out_reg; -integer addr_w, addr_r; +logic [$clog2(DEPTH)-1:0] Addr_w = 0; +logic [$clog2(DEPTH)-1:0] Addr_r = 0; -(*ram_style=RAM_STYLE*) reg [WIDTH-1:0] ram [DEPTH-1:0]; +(*ram_style=RAM_STYLE*) logic [WIDTH-1:0] Ram [DEPTH-1:0]; -always @(posedge CLK) begin - if (RST == 1'b0) begin - addr_w <= 0; - addr_r <= 1; +always_ff @(posedge clk) begin + if (rst_n == 1'b0) begin + Addr_w <= 0; + Addr_r <= 1; end else begin if (shift_enable) begin - ram[addr_w] <= shift_in; - out_reg <= ram[addr_r]; + Ram[Addr_w] <= shift_in; + Out_reg <= Ram[Addr_r]; - if (addr_w == DEPTH-1) - addr_w <= 0; + if (Addr_w == DEPTH-1) + Addr_w <= 0; else - addr_w <= addr_w + 1; + Addr_w <= Addr_w + 1; - if (addr_r == DEPTH-1) - addr_r <= 0; + if (Addr_r == DEPTH-1) + Addr_r <= 0; else - addr_r <= addr_r + 1; + Addr_r <= Addr_r + 1; end end end diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv index 9fe0f2c5ab..b55a51e400 100644 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -37,8 +37,8 @@ module $TOP_MODULE_NAME$_wb int unsigned BUFFER_ELEM_TOTAL = 1 ) ( - input logic CLK, - input logic RST, + input logic clk, + input logic rst_n, input logic shift_enable, input logic [IN_WIDTH-1:0] data_in, output logic [OUT_WIDTH-1:0] data_out @@ -96,8 +96,8 @@ module $TOP_MODULE_NAME$_impl #( ) window_buffer_inst ( - .CLK(ap_clk), - .RST(ap_rst_n), + .clk(ap_clk), + .rst_n(ap_rst_n), .data_in(window_buffer_in), .shift_enable(window_buffer_shift_enable), .data_out(window_buffer_out) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 5fe578e99c..77a435640c 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -854,7 +854,7 @@ def prepare_codegen_parallel(self): ) reg_buffer_inst_{id} ( - .CLK(CLK), + .clk(clk), .shift_enable(shift_enable), .shift_in(reg_fifo_{id}_in), .shift_out(reg_fifo_{id}_out), @@ -879,8 +879,8 @@ def prepare_codegen_parallel(self): ) ram_buffer_inst_{id} ( - .CLK(CLK), - .RST(RST), + .clk(clk), + .rst_n(rst_n), .shift_enable(shift_enable), .shift_in(bram_fifo_{id}_in), .shift_out(bram_fifo_{id}_out) From ad4678a4d460814444a6368a206b7ff5559876d0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 3 Apr 2023 15:35:47 +0100 Subject: [PATCH 441/628] [jenkins] add node label to Jenkinsfile Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index e3e5b5f7f9..2e195d105e 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,4 +1,4 @@ -node { +node('finn-build') { def app stage('Clone repository') { /* Let's make sure we have the repository cloned to our workspace */ From a14bf7ea9e41c1dc5b21bf18bcb8e04105803b41 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 15:31:09 +0100 Subject: [PATCH 442/628] [jenkins] introduce basic declaritive pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 67 +++++++++++++------------------------- 1 file changed, 22 insertions(+), 45 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2e195d105e..fee116da3a 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,46 +1,23 @@ -node('finn-build') { - def app - stage('Clone repository') { - /* Let's make sure we have the repository cloned to our workspace */ - checkout scm +pipeline { + agent { node { label 'finn-build' } } + environment { + FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" + FINN_XILINX_VERSION="2022.1" + FINN_DOCKER_TAG="xilinx/finn:jenkins" + FINN_HOST_BUILD_DIR="/scratch/users/finn_ci" + PLATFORM_REPO_PATHS="/opt/xilinx/platforms" + } + stages { + stage('Quicktest') { + steps { + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh 'run-docker.sh quicktest' + } } - withEnv([ - "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64", - "FINN_XILINX_VERSION=2022.1", - "FINN_DOCKER_TAG=xilinx/finn:jenkins", - "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", - "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" - ]){ - parallel firstBranch: { - stage('Brevitas export') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") - } - } - }, secondBranch: { - stage('Streamlining transformations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") - } - } - }, thirdBranch: { - stage('Util functions') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") - } - } - }, fourthBranch: { - stage('General transformations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") - } - } - }, fifthBranch: { - stage('Fpgadataflow transformations and simulations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") - } - } - } - } -} + } +} \ No newline at end of file From 60033063c3ca57542120e8b49b1f2baf374ebfe3 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:08:35 +0100 Subject: [PATCH 443/628] [jenkins] move into the test dir before running quicktest Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index fee116da3a..dfe8b42f58 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -10,13 +10,15 @@ pipeline { stages { stage('Quicktest') { steps { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh 'run-docker.sh quicktest' + dir("finn") { + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh './run-docker.sh quicktest' + } } } } From da54e373e3be095804533648e39273366b42aef8 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:09:44 +0100 Subject: [PATCH 444/628] [jenkins] keep 30 builds in build history Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index dfe8b42f58..db0bf15815 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,4 +1,7 @@ pipeline { + options { + buildDiscarder(logRotator(numToKeepStr: '30', artifactNumToKeepStr: '30')) + } agent { node { label 'finn-build' } } environment { FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" From da57f9bcdbfac83e8b2f9545a74582ccdb2c2d4c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:19:49 +0100 Subject: [PATCH 445/628] Revert "[jenkins] move into the test dir before running quicktest" This reverts commit 60033063c3ca57542120e8b49b1f2baf374ebfe3. --- docker/jenkins/Jenkinsfile | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index db0bf15815..1497c5f843 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -13,15 +13,13 @@ pipeline { stages { stage('Quicktest') { steps { - dir("finn") { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh './run-docker.sh quicktest' - } + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh 'run-docker.sh quicktest' } } } From 0d3d69228b5d816a156e30acc11b9e1b48d220a0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:22:24 +0100 Subject: [PATCH 446/628] [jenkins] the './' was necessary to run the test, not moving into a new directory Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1497c5f843..2107524169 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -19,7 +19,7 @@ pipeline { sh 'pwd' sh 'docker login' sh 'printenv | sort' - sh 'run-docker.sh quicktest' + sh './run-docker.sh quicktest' } } } From c6ee5a6f0b29fde7acf426cd54bdf0d3cd03a596 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 4 Apr 2023 17:30:56 +0100 Subject: [PATCH 447/628] [Deps] Update qonnx commit version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 1e01a058ff..e039ca9144 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="d9ac34c638ccbdcd3b3f5cd236fe76d611b08f6a" +QONNX_COMMIT="20a34289cf2297d2b2bbbe75d6ac152ece86e3b4" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From 90cc515938ed18eba01ffd15d33dc9b24a2b6efe Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 4 Apr 2023 17:33:21 +0100 Subject: [PATCH 448/628] [QONNX conversion] Update infer quant avg pool 2d --- .../qonnx/infer_quant_avg_pool_2d.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py index 5a3f176f1f..bd3ff15645 100644 --- a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py +++ b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py @@ -46,7 +46,7 @@ def _get_signed_from_upstream(model, trunc_node): # Check if the input of this node already has a FINN datatype signed = None inp_dt = model.get_tensor_datatype(node.input[0]) - if inp_dt is not None and inp_dt is not DataType["FLOAT32"]: + if inp_dt is not None and inp_dt != "FLOAT32": signed = inp_dt.signed() # Go further up the graph, since the datatype inference works top down # these nodes should either be sign preserving ops or they already have a @@ -67,23 +67,27 @@ def _get_signed_from_upstream(model, trunc_node): ) next_node = next_node[0] out_dt = model.get_tensor_datatype(next_node.output[0]) - if out_dt is not None and out_dt is not DataType["FLOAT32"]: + if out_dt is not None and out_dt != "FLOAT32": signed = out_dt.signed() break # Special cases where the node has an internal or intrinsic datatype. if next_node.op_type == "MultiThreshold": - mt_inst = getCustomOp(next_node) + mt_inst = getCustomOp( + next_node, onnx_opset_version=9, brevitas_exception=True + ) out_dt = DataType[mt_inst.get_nodeattr("out_dtype")] - if out_dt is not None and out_dt is not DataType["FLOAT32"]: + if out_dt is not None and out_dt != "FLOAT32": signed = out_dt.signed() break if next_node.op_type == "BipolarQuant": signed = True break if next_node.op_type == "Quant": - q_inst = getCustomOp(next_node) + q_inst = getCustomOp( + next_node, onnx_opset_version=9, brevitas_exception=True + ) out_dt = q_inst.get_integer_datatype(model) - if out_dt is not None and out_dt is not DataType["FLOAT32"]: + if out_dt is not None and out_dt != "FLOAT32": signed = out_dt.signed() break From 744a43dc5db11df8d44eaf3ae7e08c21ac67d7de Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Apr 2023 14:17:11 +0100 Subject: [PATCH 449/628] [Transform] Update check for dt in infer quant avg pool --- .../qonnx/infer_quant_avg_pool_2d.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py index bd3ff15645..d2aaee59a4 100644 --- a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py +++ b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py @@ -46,7 +46,7 @@ def _get_signed_from_upstream(model, trunc_node): # Check if the input of this node already has a FINN datatype signed = None inp_dt = model.get_tensor_datatype(node.input[0]) - if inp_dt is not None and inp_dt != "FLOAT32": + if inp_dt is not None and inp_dt != DataType["FLOAT32"]: signed = inp_dt.signed() # Go further up the graph, since the datatype inference works top down # these nodes should either be sign preserving ops or they already have a @@ -67,27 +67,23 @@ def _get_signed_from_upstream(model, trunc_node): ) next_node = next_node[0] out_dt = model.get_tensor_datatype(next_node.output[0]) - if out_dt is not None and out_dt != "FLOAT32": + if out_dt is not None and out_dt != DataType["FLOAT32"]: signed = out_dt.signed() break # Special cases where the node has an internal or intrinsic datatype. if next_node.op_type == "MultiThreshold": - mt_inst = getCustomOp( - next_node, onnx_opset_version=9, brevitas_exception=True - ) + mt_inst = getCustomOp(next_node, onnx_opset_version=9) out_dt = DataType[mt_inst.get_nodeattr("out_dtype")] - if out_dt is not None and out_dt != "FLOAT32": + if out_dt is not None and out_dt != DataType["FLOAT32"]: signed = out_dt.signed() break if next_node.op_type == "BipolarQuant": signed = True break if next_node.op_type == "Quant": - q_inst = getCustomOp( - next_node, onnx_opset_version=9, brevitas_exception=True - ) + q_inst = getCustomOp(next_node, onnx_opset_version=9) out_dt = q_inst.get_integer_datatype(model) - if out_dt is not None and out_dt != "FLOAT32": + if out_dt is not None and out_dt != DataType["FLOAT32"]: signed = out_dt.signed() break From 2d42124deb1a679f0e749df8633090ec43443db7 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 5 Apr 2023 16:25:29 +0200 Subject: [PATCH 450/628] [SWG] Minor fixes --- src/finn/builder/build_dataflow_steps.py | 1 + .../fpgadataflow/convolutioninputgenerator_rtl.py | 8 ++++---- tests/fpgadataflow/test_convert_to_hls_conv_layer.py | 5 +---- .../test_fpgadataflow_convinputgenerator_rtl.py | 11 ++++++----- 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index b4a0374fb8..a22b5adc98 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -589,6 +589,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): hw_attrs = [ "PE", "SIMD", + "parallel_window", "ram_style", "depth", "impl_style", diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 77a435640c..173a157841 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -564,13 +564,13 @@ def prepare_codegen_default(self): ) addr_incr_end_simd = -buffer_min_size + (channel_factor + 1) - # sanity check + # sanity check for wrap logic assert not ( abs(addr_incr_end_window) > buffer_actual_size - ), "ERROR: W increment > buffer size, wrap logic doesn't account for this" + ), "ERROR: W increment > buffer size, try setting parallel_window=1" assert not ( abs(addr_incr_end_row) > buffer_actual_size - ), "ERROR: H increment > buffer size, wrap logic doesn't account for this" + ), "ERROR: H increment > buffer size, try setting parallel_window=1" # set certain threshold indices to detect when reading/writing finishes code_gen_dict["$LAST_READ_ELEM$"] = [str(h * w * channel_factor - 1)] @@ -753,7 +753,7 @@ def prepare_codegen_parallel(self): tail_incr_w = addr_incr_end_window + buffer_min_size - 1 tail_incr_h = addr_incr_end_row + buffer_min_size - 1 - tail_incr_last_window = buffer_min_size - 1 + tail_incr_last_window = stride_w addr_incr_end_simd = 1 addr_incr_end_window_elem = 1 diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index de31ef0f12..7b2793712d 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -73,9 +73,6 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod if use_rtl_swg and exec_mode == "cppsim": pytest.skip("cppsim not supported for RTL SWG") - if use_rtl_swg and kernel_size == 1: - pytest.skip("1x1 kernel not supported by current RTL SWG") - if depthwise is True: group = out_chn = in_chn conv_param_shape = [out_chn, 1, kernel_size, kernel_size] @@ -164,7 +161,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod inp_dict = {model.graph.input[0].name: x} assert oxe.compare_execution(model, new_model, inp_dict) - if kernel_size == 1 and stride > 1 and pad == 0: + if not use_rtl_swg and kernel_size == 1 and stride > 1 and pad == 0: assert new_model.graph.node[1].op_type == "DownSampler" if exec_mode == "rtlsim": node = new_model.get_nodes_by_op_type("DownSampler")[0] diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 6775498610..2f3ad0a23d 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -198,12 +198,13 @@ def test_fpgadataflow_slidingwindow_rtl( pytest.skip( "Illegal convolution configuration: kernel or stride > FM dimension" ) - if (k_h == 1 and (stride_h != 1 or dilation_h != 1)) or ( - k_w == 1 and (stride_w != 1 or dilation_w != 1) - ): + if (k_h == 1 and dilation_h != 1) or (k_w == 1 and dilation_w != 1): pytest.skip( - """Illegal convolution configuration: - stride or dilation defined for unitary kernel dim""" + "Illegal convolution configuration: dilation for unitary kernel dim" + ) + if (stride_h > k_h) or (stride_w > k_w) and not parallel_window: + pytest.skip( + "Not all combinations for stride > k edge case supported in default mode" ) if k_h == 1 and k_w == 1 and simd != ifm_ch: pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") From b056303b65cac8a5afa872c7822ea5b9313617be Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Apr 2023 16:54:38 +0100 Subject: [PATCH 451/628] [Tests] Update avg pool export testing and disable unsigned testing temporarily --- .../brevitas/test_brevitas_avg_pool_export.py | 87 +++++++------------ 1 file changed, 31 insertions(+), 56 deletions(-) diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py index 9c35910366..9550031b32 100644 --- a/tests/brevitas/test_brevitas_avg_pool_export.py +++ b/tests/brevitas/test_brevitas_avg_pool_export.py @@ -30,9 +30,8 @@ import numpy as np import os import torch -from brevitas.export import export_finn_onnx, export_qonnx -from brevitas.nn import QuantAvgPool2d -from brevitas.quant_tensor import QuantTensor +from brevitas.export import export_qonnx +from brevitas.nn import QuantAvgPool2d, QuantIdentity, QuantReLU from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_datatypes import InferDataTypes @@ -47,10 +46,9 @@ @pytest.mark.brevitas_export -@pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.parametrize("kernel_size", [2, 3]) @pytest.mark.parametrize("stride", [1, 2]) -@pytest.mark.parametrize("signed", [True, False]) +@pytest.mark.parametrize("signed", [True]) # TODO: Add unsigned test case @pytest.mark.parametrize("bit_width", [2, 4]) @pytest.mark.parametrize("input_bit_width", [4, 8, 16]) @pytest.mark.parametrize("channels", [2, 4]) @@ -63,79 +61,56 @@ def test_brevitas_avg_pool_export( input_bit_width, channels, idim, - QONNX_export, ): - export_onnx_path = base_export_onnx_path.replace( - ".onnx", f"test_QONNX-{QONNX_export}.onnx" - ) + export_onnx_path = base_export_onnx_path.replace(".onnx", "test_QONNX.onnx") + if signed: + quant_node = QuantIdentity( + bit_width=input_bit_width, + return_quant_tensor=True, + ) + else: + quant_node = QuantReLU( + bit_width=input_bit_width, + return_quant_tensor=True, + ) quant_avgpool = QuantAvgPool2d( kernel_size=kernel_size, stride=stride, bit_width=bit_width, return_quant_tensor=False, + float_to_int_impl_type="FLOOR", ) - quant_avgpool.eval() + model_brevitas = torch.nn.Sequential(quant_node, quant_avgpool) + model_brevitas.eval() # determine input - prefix = "INT" if signed else "UINT" - dt_name = prefix + str(input_bit_width) - dtype = DataType[dt_name] input_shape = (1, channels, idim, idim) - input_array = gen_finn_dt_tensor(dtype, input_shape) - # Brevitas QuantAvgPool layers need QuantTensors to export correctly - # which requires setting up a QuantTensor instance with the scale - # factor, zero point, bitwidth and signedness - scale_array = np.ones((1, channels, 1, 1)).astype(np.float32) - scale_array *= 0.5 - input_tensor = torch.from_numpy(input_array * scale_array).float() - scale_tensor = torch.from_numpy(scale_array).float() - zp = torch.tensor(0.0) - input_quant_tensor = QuantTensor( - input_tensor, scale_tensor, zp, input_bit_width, signed, training=False - ) + input_array = gen_finn_dt_tensor(DataType["FLOAT32"], input_shape) - # export - if QONNX_export: - export_qonnx( - quant_avgpool, - export_path=export_onnx_path, - input_t=input_quant_tensor, - ) - model = ModelWrapper(export_onnx_path) + input_tensor = torch.from_numpy(input_array).float() - # Statically set the additional inputs generated by the Brevitas ONNX export - model.graph.input.remove(model.graph.input[3]) - model.graph.input.remove(model.graph.input[2]) - model.graph.input.remove(model.graph.input[1]) - model.set_initializer("1", scale_array) - model.set_initializer("2", np.array(0.0).astype(np.float32)) - model.set_initializer("3", np.array(input_bit_width).astype(np.float32)) - model.save(export_onnx_path) + # export + export_qonnx( + model_brevitas, + export_path=export_onnx_path, + input_t=input_tensor, + ) + model = ModelWrapper(export_onnx_path) + model.save(export_onnx_path) - qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(export_onnx_path) - else: - export_finn_onnx( - quant_avgpool, export_path=export_onnx_path, input_t=input_quant_tensor - ) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) # reference brevitas output - ref_output_array = quant_avgpool(input_quant_tensor).detach().numpy() + ref_output_array = model_brevitas(input_tensor).detach().numpy() # finn output - if QONNX_export: - # Manually apply the Quant tensor scaling for QONNX - idict = {model.graph.input[0].name: input_array * scale_array} - else: - idict = {model.graph.input[0].name: input_array} + idict = {model.graph.input[0].name: input_array} odict = oxe.execute_onnx(model, idict, True) finn_output = odict[model.graph.output[0].name] # compare outputs assert np.isclose(ref_output_array, finn_output).all() # cleanup - # assert False os.remove(export_onnx_path) From 76a8f6338987f991ee0fe1901b4beb4758d5d469 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Apr 2023 14:49:48 +0100 Subject: [PATCH 452/628] [Docs/Tests] Remove unused images and replacing mobilenet test image --- docs/img/finn-examples-header.png | Bin 26332 -> 0 bytes docs/img/imagenet.jpg | Bin 296033 -> 0 bytes tests/brevitas/king_charles.jpg | Bin 11954 -> 61443 bytes 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/img/finn-examples-header.png delete mode 100644 docs/img/imagenet.jpg diff --git a/docs/img/finn-examples-header.png b/docs/img/finn-examples-header.png deleted file mode 100644 index 50f8fa7761e10a958ed3567f268ef675cf1814f7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26332 zcmZ^~WmH^C*ENb0BoN#o3GVLh?(XjH4haxk0t9ym?%KG!ySuwekZsJ;Qs*V)%X#eZ}s~GwJ-vC^`z7XJQ zV_b>htH}KOS*ia<+ZmOhqur1aD!~mk20H$)%hkB>B!{nGrQGy~j_zjG*X34nkV^So z;q;eupZ+Ij9w=Pv#m!XUp7>8A z)5>Pp8^S@qWL#ZYaTpE$9`;L*2L28{ErsvS$c`+s?jIb?KLZwy8y)c-zoH6>?j7J& z%ti2AxKc?kr3NOS;@?9DLT9AnKRnX?5!TFbw6hV}3WO-QOymgLZFa?O-nhVT!s^@t zqot?I0jNn6lmrD=ZnX+_?e9q?*Um*doH0lHGkEzWAV7{`%nI$Moy;8ALz-R7g^#fR zr)&Y*4=uv4shts zLz+XZ_N%{c?XEx2(+lv{G~qy~Jr}+KYMYFm-CCLgBW>k8WjLq~ZBqSbaCuqbTGo`1 zXCJ1a*X|YHiYl=Epw4}4E_owaKy_TpjI2N~N27)*ZXKgNBl!GfuON&j^|*f|ffq9HQ>2k+*s(_WuWbmCRhn99EXI|iX45XxajiQQ4N%m1EQ}?XEA{wKSy56PH7H-CE3Y)F@t7(?C{YZvgc(M zI1*+I*jO!t;<^MU+rLVjWNh08e#X1nQv~>Kg?EbC+@dWX$p`RQ;dh_gbQ%(b|8>d?+t1jX=U-Py*g-v`>MEyJp0! zUvAp`xI^`2a{@q4JXw`Nun_t~no4fkL&Wu`hV%fu&oReIkI5+Ri}p=x9pIFuyqn_# zN7H#-U7`x6(0n@+*=%ikp|d`B~~6{hZX-0FLtm8`;enLk*pjE+{Z z{?7%Uk-wW1eh#9iiHV6JT)nfLd{blyT@>qnK(eZ>5}JGH`$q)9N{u9-57*L7gpZ~Y z#j;uBtozuwPUX;3RQ*Pk=X9lPU_Iepvv@KcufT`>Y}kxV$c#QTh53S?k76H+t?4z2 zV{vZSXySAcNy(kRAUBuw*(3g+SNmziL--=~s$%8{24+VJ;;7s?mOER?e-kH$7J&_m zd>x$m@M?4njNrZ!gUdq(ZaBcsrEUtO)N~$v^3JXUkAhC( zNyB5UV#}$#F7?>ep84)UZq&gc?34H`6x@JH&<#;z0&XtJTO0UpB@>$U9zavUi6Sl5 z;v+OP_-~}X#*^reMEJj0_z|%3Yir|ANgZAIj+DEUc2uVQgi5F)gDj7Ys_SBrndm%P&W2U0UH7T?! z<)QKt#7JHNvI`fkLA(s<7cl1rBb3_RUWcN4#c_V^75kXI%vL8Abftbda>k#5(Q8}K zUNooR(?nMQwx7Ko;Ol+<(z!m!Az9Ao-#kGi+%JPGkW;+?I-9Gv^nNIg+vattSjZ3h zWqz;=!KrB&=wEJ5{<}#_88aH=%7o3={?%Phri+i{%QW~NXYE`oa_kq6dH#XNUgpqM zy7^BYb95V^)}Qk6IfOG_kDdXkCEBqYjSgG>MFQ1aN5iw))xUHa3DvX(YaH zME6}J)=ymiANuT=!3y?KwP{;J|2N%y0(otSsP+A5Es1Wtf9>=p(!zJEeI3bA#aC$J@*Wcsdd+8dH29QN7O?hgeZ{6 zm#h5(+30>e^gffFXVR`2b2Cz$ytcp%&?Q-${&5qRn7o!`8~UT~r|Bk6w{ZaNv%&g8 zXNA#6EG-YK7g->RV|y4ge-?(;o- zr*3~GF3Cb(%AY3Nl1?1Y6-)ZDnkES6yq=t26Hw+--y{2erVF4{R0zR?V~r`tb(j>g z221LPwcxlcESgqA|HVjOMlB60LXeUar?L;WO~JDMn+}+LxeMvD-}%(oRZ4JcnLjf8 zbaC8P9r`&`Xw_@w7mlbq8G=&JwuifH!8qlo+6#f$XoUA@gJgTX?wareB}c+v2~5LV zllq1%04?0yE5Yfio71og@t9cv>gYpXC8~ps1h{0h zw)Yq~dKs5cT2%h5e0n~_c|Y}nYrZ15C7xv8;>41YaE~cG5XWPm zWg&(W3M?nw%XF8E-a}#~1#>*OdWDgtw`RW>05O=s?~I4qeWz2R?lIz$D1ZK(*~8}k z>*z)b)lo@LQIXur0ns;GZ*Vu4!x6dqSP!xJXFgK&Npn}7WAU%artP{L={x1(wIS+d zsF8{YC{>;%nTq%#y+0toF&I@+M==_RF>>1p7DWPP0-v1#ega1cze*?)uZ_<2nj!uO zL)!y<0Y@u@6-M`GIL4Vya?J>ia>!QlXko`mluYUAN_z3kM8J;#X=<8KCET*=Q!%*N zwK|wqf+fk5hsiHXXEeRusKD@9`gB&aIG9+88BxnghIivrv$^{_=1 zWz2aYo89Xlz#rn`%1VSNm|@UEpQ)Smsus6_+a|w147N#yZyTV(#_l<#C>QNNx)Wz( zPk9|liw&n|?)lw5_-=vMGCx(zeLgLXDi~v2@ZB(SXI%1dW2I&KGyW$~g>BDstcH$* z>Iafa@bljfeSYUE+AtAFj++8T`KEt_LnHb!lgt24rhj8wUf+GImNYJNkm9Vv->X-W znJXOK=lz_N>-SyrH_#K~N5c4kO1)VA%E>&aP-vsoYh4RXE|@T1STb%NP9`9)ZO z$S~g99@if%Jk}|N8{%@Ibao*!wO4AgguzV*9JQm^NSHisaFf__p`E*Ijxt}9^Y#g?^r=4sV7+`UCf9ZPadjJ2PBEaXScjCq+8?WrX&QW6wo>C0 zD&YTiL&NcWdutD%g|6zY8qV*$Vqe=$j%6;O2XK?TFmF=ku)4$AzOPUa8oro>t4o1p zYb%*liv1^DGH%}SqMrmC1Rj&!#_UJ;rAw>}X2#+qn_icem?yJDTTT-mMz{ir_lA|v zI$0=n5eqz=EqJ3G45xaUC?*tes+zsvP^C?@Tv#wD%zv|+QeY$q>`^D$B5C_S(RgIe zihwtj6fa6;Mz?GW7UPU!i+^JQs*O@c2zV~G?z~<)_s0Y&gu}l#Y|y6OgG3sdJnadN zaFe_4`D728&;<%U%nDK}iz6v=XNhHaP6cn{M`}HB=FwyyD#>B+pR@3c_>lp|Hw;n= zg0s`+n&NZv$MoOs?m@6WkECC~B1T-3jut4#xyxrzg z8Xy^Rq?C3A6(Sp_sa7-R1B{Fv@@dF0SF}+@*(6|B$)0jC8Lt{*V1s1+W^wKHNs>qq z8>I9XYk-ROsckQ&9dm65Gyz^gQI~Gn{Lo}i7_NJc7U8D`G|}DXOa5apaBtXy0RP6` zAMyxVmH||GFMl@r@FV@fd#$teb9Lz9Ov&TCvVVJYFs!$o_L0eRgZDbP8~w02qiyGb zj~$aQOY#M_`^2JmUEGXZzp9fx^4(VduM%2obg>ubD}6Hhm?8CkzyF97o?1`xDtc3g zmPjRzQz6SyXWDHR8cfe015aq^4>R+BCq&kyE6d+iQdbifW=H-57gN6Z2G1NPD8D5> zR+1Ws>e%%{kzSNG*a%ZWy|ZvaKtS*^#=2Glni^8Z#}9^4C@~*se>lB5 zW7G|BmmIzV_Hb4ZobCnX;!~q#-Wh4BSH)e$xqv>cIrCgOH5qiXht0=Kr)t3C=I6S!#)`^+Y4d#7 z?UDtWBiT_I_A=&tWc&3;yp|2G%Np1EB0A0^*yIpJJ}tCy)IZiHTlQ5yxv1zdzzWxV zrJ(E-oxFe1=QHJ3$`KjsPkGn901! z6pHH?R-sb*KX@fJ4h|Jy?-h5~O~ka3PdFwGJ-E>!_{$O_Aq^K1l>#M>YL%_LHsP`0 zSB>a&)M?tyCeS)Ffs2#hWcwFmN#@MQDhZ%1IWEbvt`FhXOOg94NnntHhC_>fnpa|_ z7L-yI$tCKxpf7_-M)k9M=FyZW`2GERRPh#o_-Ls!+$2#-NYDUQKK{}If(%7OzID(e z^8;NMZw47CVb+e6fN()ql1i);z}K>x>?GqXb+G^};?#J4;&{-PhX3#iwDboZIrg8*6-cyY#SwHK9b?p$K|S|9*jPFtvRyOO zl>=Xc&&q{l*ZX~oD+L?oc2K1pO@p>BU6%(v_JUFK@_IZ-7AOw;KW1BSp6U+PY_VsJ zhom|&RaTWOAbIxf=jgcQ@;N}5*BvZKA?jm_ut(T_o+WAmEe|YGL01KZlwS;;xa3** z6Z>Bo%GbA?z`nkj6xO2cZKx(+lyE*wn@w(Uev0SItz!O57q={a*-?1O9N$jil>{ zD&LUPE{+6=j9w{{)6;A+iOee}O(~p(#DK3@OdwlyvEyfVm?UsC^*Nrjg~J7WK8K7ZczccLgfZ{xynUbEs45u@;NU_bBaMyp?D7={r1h>cLsJmBJYMaQv!2A~ zYfJBmxPE+u!%gmc&RBn6yvZVp=^K1 z0xaD$M-vYHY)J{uoJ;=x@x75!w_KE6lU@2sRTDFV+INh&cxO2kK%cWt*Sc&5}u_$w#=;jO{lq7kFl}QZ!=`#Y%rTVsT`cvtN@wynwLl2!YAS$w6 zyryJ6j<6>jyIiIQNh?==4S_SaXk+@~Xa)t8>mBnOBKF1AZcjbhQ$brZv1>Gj=j6~M zrtfCM_xk;c^&QCajR^NR@QrmH@JT?xcB6Y6My*Y)+kkQ2&!|N^i=|W^>B|pb%MDdu z6*CTPpkR>mBVlMU@nnF_<_}Tv+FG>-I)~ZAqwC&qugmvKjnQDswb( z#t!1qC<7BqVFY+u8ZQ4L6h-LmNJ?hvwAvHAbpcOeS4v8>Z{CxKwN^f{;*WJaO&eJX zwon%{vR*GwW8XEm+Ge`|(UP4t;l&kVDhC#wznfYfmUgz*M-2navWTkW+lfb?I{PQ{ z(PiQ^oLIanL=$k6c(SR}Skn@-daeWF!*aZK89J}ErqSzNkx?&vvIi)*zczFoGF`Oo zP=7E6&lw_%avXTN6B?cGzdjS9v|?irk01T=ZL1OUwP0+M5z+5fu^1<_w_#D;c)^5$ zJE}C9x?~~7U%GKE@6a5N6~wIT`WtO}HLtXmyAv5R#Lt#iyEmIA;0t-@$lCIlQrfXv z@N(|w&G@=ivfuyeKJUX89%bpJLSv1WHvR%k86n;P9kx|H+9qewoLLU+2Ml~>AF%Ur zW93CrQ(1#BJG>sOJ`~q6;#bE!II1sh4w(g%bLnfw75yB`>fS~hiJnRwS+5=^rCr&# zyVM_O4xHeeie30URhPkNUM`oJGxoC)?~tV50a-LbiXf-wPsPf3R=l09@MZGd41pDv zUCS#m2hnHZmM8>pXBJLH-F4Q8=}3wz6=HxFS7Iu7YfYQ8Z(r5@fCoO*+B0B=>qh1y z|1s`A0Ln`2U3NM>$S_!soUe9acA-SGij8R6s9oPl#b4a;7klyR28EKldFAh}S_J0Am20nUbWbdO6q zv%nFw(XH~Gf>s~4Zmgp&c$GnOCVpQISI7D;_$sn|nF>8#8_Yt^t>MqrBt1KGy8RJo zWLi+{pRwFFy7Q>MEn@ebkv`h-Pj*z#K1~}FD!?BxayZE--x%j&P(6ott?~#n(;92w zs9_D{HMIu3h(|^4s`Z@o^K&#dcFH(8`R9IKW9vaI(RkqV`LvY{YFZ=OdX5C$Oo296 zS@t6BJ?YUNirD7QLi4&X(zM5)h6GLL{iBV^At2m$C*C1^%bnjJzDkIO+)Wkku})aH zQ}hq)Jg*Yo1HSuXMWdcgM;VB=e1qM~^iKVU59x5wdIg?-TTj%i+j&Rtd^1e?hH7^) zapZmT43(T=MSbB#Tu!VvRkcTD}fC3QAfZ0z{7g=T}g zU-wwtf|+VgF!xp(Z>y;8Qe(uM!ST1=$s7gMpPbuFh0kW!3%&I3g20h~Sf3<&Gs7*f zfq{w%`n-XpwKbMpZ027pEUY}?u}Lxt3%7}szaEa?3VTSF(&RieSng%N^7e`MY|0Xp zKV2SNkGfGbJkb}Z*PhJ~x){*)VNiImJ5D!#7y>;8PVM2**f0mTBLCv%3QIQ78YKg< z>^tT9=LauYeoN8s?|_Re+FL&a+5lyu@y)nTda zM5tB`X^MP-W&K-mxmK#f`;X((mt%IlQab&%ko>^S@tsA+kii|pYD--Y1|(8!o>Qvj z8g8``h`z3-$S}DO8Bcvn%}gYm5I@VQg(YZt%nI$^QKFtL%gf^p96 zqrkp0np3IQXtfQ6f(IDlH|@If>8A`Fv5@430SeRr+RT4g5@C+epI4vzipk{rwxqaWHN-T>V=!ceS^ z{6e$y>fqx<3O|9Kl>TVgSH&)?MZloe`^4Lsl_VQHU_^PccqBmtYl~e=wxm~*y2G75 z6xpU(chP|o!$T8+IEJW)P8;lqKXig7ja(i;(k^g)QX)Vkx&&e{iiCutU&JK+UIqIr z_Zgchy}h5q8?UIlrdhyiZGuAehG;y|Z_CHyik~YYzk>A>CMk z8ZzMjQdn7Gngab)X{H!uxpX=G<26xo?D{w6eO=jZX(ItQ?qh;WT-ro$wPgYagbnRu z-9=09Z!X-apD}#toKfp9^fT|yCw{I6GDWX!u^?yGHj%@P8#yf})RjIRgKyBYc#A-o z)1vn*X}K;(@`#7-7WC{fdW!uuLAJyJB*sdZ)*+Ae*=_EIY;#l`iB6{V5xI%+uNSLY z4XvJ3^bC3J4E;so-j_5Q#e7qTE%bXADU*LPa<``oJIZKjOUU`%8gK0os^KZKU7>!y zZ_(u4_d?Z)u8*Hm@nh3C=5@HmNjs9He>`X*X{!?aU!nUfSUPT8Wprvt^H?J316xki^ts9=z&I z`v;NGk=6H&LE1F6{#;?O$uC9EG>HBe{()izQ`UBC!t6+T@??$I_52@YH8lzQVS3Gh z*f5RwTan8`XSWAGF@yr_gPJ!+Brmez`I8ku+C#Nj-A!z=ZD3xs{MoHY&d*6R>}vg< zc_+a9W$Ps5_~g}z-+TFR*Y*y6YQK>t(Sqgo;+xeOzU;BMqx)*z5G6gRK6o2HHB}Ps z6TzgRxfj_~p010lHjRWaU^-9tz;y(Gg`6F2saXA-ks*&q4#qY~cf{Mm0|#;4;|f$x zG`#BeS6T* zgNEM*uERv53f{Kqd%Gj&pa_seYCgd)p1o^f5$Tih1Vn6g4!5Jm6(YZM}o!X_uhAJg*u- znKHc#RGdZk=Of6yuLbT+Yf1ODC8Cv-H^4kjru6YWbqGSFLw75_(cfb8_q9bo9|vEM zuH!HchX(p{V;fO)IPvlASZhk_i$0E0Ax_&@j>X~MwtATS)(q?T6z~En1?63<_e@ND zXwDfBUdkU0u!=kMB5}M)Hgnz{DP)$eZ&%8meh zpY%t}w)O&>aoUr{sz#N%PHR0GjBm&ijXgBMWK8bw-;1E;g?bz#MO7(LrH*W$7*73e zxGRI`Q61w-5O%WLqbWHVj74g&&P1@*H0X`(hesWVV8M-P`Dxr{4|j2%yZvTdm~6s6 zZ3(9MX0RQ$jhr$;>(zR46Eekaj!{VkdC^O8A|5qi1scNET8H-y()wdin)=PWWbd$+ zEGwQcgB4Nh!By&69BzhCf z>nTZKwmdI(jl1#hDacumj59E}DgK8VD= zp?xc~YgdRkVt^-GmQYvUyJb~)vcIf>JA6fP3J{gCI)e>};~3s+qv4+n~-f9dcfSDx;g*vq4$H-NdcfqW9mr#l;8p-q?d-S2F2MD|ZTY-%+;PW=UUThwG4A;%u^jkc_ zE6oR<+60$9nch}WVkGh0*iXr#x^*|Mb?3*!V}!H363yEN1rtOjsg56*%6Mh2yCt)3sPV*#tRE~;*=|Fiov6G zVy|E487&p6t0;9K>2To~cj(C-iudf5T|?(SBimUTD3Dl6I?iC0)zMmkN2k*=id zTVjc6VwYI?o;T%WOn~QU#?FWib%&FY;rd+O6?IWnPWbQu`#7f|XL-HBLlSY|V_Q|m zM5`n=>;?gSPCZv#)`inA>sLRw_`MdtlS4O&V)yN%L-O+)m$6m{NI~#Zjd(b#_uG9f zI^Pf=p`QId+dW3NA6R%ok#oSJZz)geY@iUO1nI>5_L1(=aIF33@!7!c?>pm*MxLI) z?*RdZ`_GL4+Esluw`YCsiZ0f3?bOtf^qeiMn~t4vgGWMZ!?rxPC(zlBn|vy*VK44r zI==meT!$x~?9rs@4!^9{d!KrRZdY^#ZTq{knzja1l~HgV3-?p~=lh5l;h8L40TM2^ zY5#5a+y*V3>l3Pvq~OqCfxsGV#92E$KLi7R!1?OL&b}S#{QJvNSbb!#Z<$O-{;q2Y zZY(yS2Ky$ThY%M9{v(#lmLtZ*;Cif7K)7#1tTjhDh%faF(m>_*53ICmthB^aFQEvZ zSSB2nd?o}~?rgmM>1fmI7e>J(;AAK`)&DaXbtun6RASBP;g(v$`8Su$FiiQ?c_?=5 zINKy=LnM>;_Fz8uS3fJVVRKp+ot>m^l`UQ}1dL))#%alekY>KMZyWeA&$)()WtgAY z7t*x?=wVssq`<=K5Ufh1=)Z7bQnPyzZSyHF`wviA_2p zej)4(sEJMd7JA{9T%|9OeiGx9R`o$VA$4I9)9#t=GB07XlJ^ z*=DwoET;lJH6?o0$Zc=~g9{T0AJt@=90P{Bg&kb;9>p%5bg6h{nQlIX(`+e0znCj1 zj~m}Rk_ZE6*IkZGOhQ2+O!j^}4rL++{qJ)6^&pD?PdL>lJkxxHoGtM2t=4_jF; zP*yJSP+`I%V-iAXm2A1A zsdZja!tC`>6m3$3Mx2x-%U$4~KELvx9-h9%W#4VE&`3}%ZA&=VcUz_8AcF2y9&{;~ zrR<$sfH7l6mlX~=F75;9Xb~rKR*0XvCY4kNYn?7mP|J=vZmUktx=^R>|x}x+#`7cJ7{u)O$yhp3~@?J zNsS$UC)_U9oW0hsfX|SN6vvEqo82fBWGL;e=DF!Skd(U`6oDc-> zxHae~U%x2%@r3rlw}P*{{3{o+2g-x1_CCRE0pNCDm)6_^y=1KNMv}L&^*NOoIMB9A z(5@nPTBH15avhP2;qH%2kaKC~KRYEZSLawS_=qb{gPoxYCM~cGAcyj+pWdYHdH}Ce z_Ll{l)`=1?`)!ZTUjflq<27F?;D$EtZXAH>ZI!gAf>ZS<`%f=SG>Tf-yJ`}}Zaks= zH&cc^fSW-1Jq@{6d{%ro>Cv7WR-_s)JSSeuy%$N&&Y-0zfsNp(E^61zZ%it-a<_%!|M(rA$RKF8?h;Jpy@h?G&0?M>A@6Nt;Sp zw@ENSWJij4ycv61Wrb%K717-CAxnK9=w;;;+I5!}D$%;}(-O^qTSm!@D=|6dTLjgl zGV-fgoyH!dw@152t>RE^7A$!$59j`+5$+$rxbcWpv`9T^baGYa6KkJetv6z)x?2;NyqJatH>9@GpOj4@s!{unL~mZ0+64k@+NxeDixsf?$_pB_46M-T zDi@Q#BFT2IjW8j*iHjr5yD$qqq-5N~-}w6D9!;_~%#3Wq#jKVC@1((0drFS1p`9_1J@YTD~M(bq2#$ zZa~)O`TD@8axaSsHfok*X0%xfjFiXOi3{t`*;U0`{JLl0O}A}(-xZ*|oRzMax9#%7 zHKL1$TKfFUiFSj6fuiRaKaL9u<2>M^Y(D#L21|nO>yu_YGy7vR?s_N}v{YtWdLgf& zxxH6K0&CiUpjLm0Rr92C{7DF8;2{Hi3JW1rb7gh?*i6f%{tb4N8~uF*>&oh1_h*Ci z_*c(kK90A#(5s5!&-FCF9`seEXJTa{J@#*hr;!gCb+MGU&Sc0=`w?NF_KEaZS&^o| zZ)FR&Gb(84g^fIBU$1Ip^i-!c`|^TBf2KbA7nJYqS$S~1t@K3?n+Ft%x9=lI#AO=7pqXj8hug z;oOXPF6Z?&3!n0QAknzV%1N&n<<=)dk?!4SWvNv5EepLgP`kKz_r+#i>YvsOv-N|+ z*t=yI@L_v~m3&M^GzcWMDs&W5Iv;69rf3BKG-3o_B+>*O4!pFjb_N*+%LxMtnb-<+ zD6a|ECjzGA4ns<@=EjaM-Uco%A6t)Lx5N?`YX=!LyBHF6F!Q{qbm~K}cBoZ*4E=e( z#V-p%3!oO4pSB7}pNX#ukmz50&F+zwMsvp95K$NKjZMw;cf@LI!m0PNH`7m&)yqEf zD^yHflU3a4y0jki+9^cN_W7Q2;eGQFoEec^?rtoRF+Z$OPeD&cd^ge7uiswvKPm?A zD+W*QdaiBh_f!Z07H-y=u`MglAH{T`>S)8*AN67Gk=hQM2D{@19!X+qltv~_92}J) zlpJA5usGCrF3WFyE76Dnz23CT_Y1Ig+zL-KYal=h7w6^mg5c?mSB5EPLz~BBNcwxbN>J){5#r#}D9@Rl%Vc8Ci3oC>|oWppJhVT1&9@GW{ zx`UG)hXYi+AxG+nk$mH61lT%jyRo<~0r=@PRi|c(o`5iIUsCLwTM|tPGMr-#pYkmy znx@;~s%y4s1T(M^=^6Ilu-4SfmB}SXLlS(?X>mOVPKQta9BlIdZ+`vzuY{#(-O6O7-2rccYscA3}qBv2;2;jm59LKDX~g={>uR zcytE(LlL6n=Zw@l52WUQh|BA=`sb*?6Eqwf$wT;we-<FlH0Gc+8tk4dCo<{BLQOXj-ko0=aNA&QBq>B(F% zsskV+K=_6RGAPbH;L6~iYL9~>m0g}Qz72%kU8gEAnnF$^Yq3jG$D&5P<(CZZ0Y6hY zT1$V%Of1M$6W zRb6h}cZjI5?Z@%lvw*+9^I45w$tFxE6fIb3c2_jwhIn}0j{a{4OB-P<`e{P!urd$G z|5>Hd5b9?-$2T%^R=)o0jQ)9PX(jAbCaX*?od`=KyrH1jEL?Qogzi7mBTl*G+5M`6 zJ2I$*2=bb%NnThNF@A`x!sS0Cl!d=#x7>*gxx`5GDf)jdKKnVjE|F5$;UfSw%QK@l z|3o11ve`SFF2br=?~pe_Q<)}oOd~xKpH?hTxwEgGYO^=x4(u3atbASlo2`huEStHU zj-gfz-TiB9ztVW9kDvwgi?B}aGT5MNG9@ZKeHv(DEcO>3P7i^i6uh?Uvffn% zBeB7ep$wR?VE&<5@NvpjjdM`H2fpPdd{b^`KYtTNK$W*4?GeMoNHQT`u0zAX5X}+r z508tJRK_$hG5M!p@#wBGGT!MYI-LrHtgTGV@FyI)P~(tjQVx#f=4LM7)0Lv0o?f|8 z#0S5UlyO{A^)qmn@A$NeeWpwB{>o2JCgbAi0?D5jM;1M(EFbx@MKgZaNeE}^;GoE3 z%O~L8Z(o+zc~RdCKC5h1RXzSvbHo(a_EC*jUBY_*u3h%t1AH;a9g4s?&{t~I`1i&~ z8gJd$zJ*p$4SNUrk~oEU2awG|4gEBiGV8gh@uxOW$Xu#@G-M<%EmA^${A+yoOL^Yg z@mB74E1H=?FRYl^w`(0phPuVWdT$2;<>5^k4u5wvMOXyh3#D(cjd!Ueei^aX@N6mY z@G&-6gaNXq@AejkKWMUnkLXS9ME7|5d)n{k7T6T}MUOhC+h*<)C3cFA4+IhqrNc)< zHS_UBiGWp7@{1~Ww4V$3N-8|27~4+QCZT|!rLF6lYm{5jNH;b7T;?lOXm~lP4>xAp12(BN>TmBH%oM0r+>70sD=P97{WFuX@V8D73tUV zm&|iF^epCe>BPj#hN4fEHBFE)nB4I5=fS7;rC1mYWjVD-tgm8ZFk&HdCu9r^v4@9; zp8yPB#d0B%^ZV^c>P>?-+A|d>nVFgIFV<27{DITOiX*M8*{!f7`z$+StqRH|84qibq zzjWQP$w_zT%IZN?F;8UCJfifl+$&l>w3ghu|2y#(KF$lSQ#^qdy z7-tLhvJB~lMhBto@mEK?{>gfFl2Nt*k6qXKI9aw2gVoz-@^k7k)AzTKX%NfqiuxBi zh%>6?2O8833-sTH>cf+qr`Irn@CgaHynl~YGnhvPs&duFy=bYI^#^9*@mEUFfj^h4^?+oem3j($f_ zC1qrO?wOC+x->++*C$#)b$8RXRuCW*9OE+y=Jgw#$r~4Nj$h*WlZyK(|s}&I!;#HYZMx1!;x1P!5+5`AA zs!!EuygrayBP?kbsP{-w`33Y{w&a){&erv`{J{}Bisu1%axDmmC@+Bzgmfr3WT$)(IesUy zhsN|I74c))c|WIps!eBk8hiI)=bzuaxU)!X=49;eReP61$tbe7@RsU8MGXT~?8P6^ z1b2=x6je=JujLvkYx@&Tjt(4Iap$$Ss*tLN6@AuiCl#T)wezk5i zI}L+Yq0Wk>Ks(T@qE`;m1oGNa%9`89Jrk zziy?aviNb*ulD&SYsBIp((-wFdgUad*mYSA+s9w!<4DPiO$h2vb89HJ+TjyAtNXbQ zAp2bUY{UU8l;1t8ATd$#sD=aNyts~iuU}4Yaj|+tN-sfb!Qv@r>2tFAs#)%ny|9Jq zP#zMOJ#4m%O0Va)wKkT`w3C#YV5TqifcUknOUFibk*;_yqRap>owGg_24i@7A=&8U zxD!$toeG9pMEn@5J;|=cmqVAw?RV54Z`&0o#r?9_f1s=Wue@4%9lVT7gYKO)Jmi98 zQ}Sw;=0Qg+>9NV&&Kj;^8`Yu7A&%dRzkl*Fq*aW(N45rskwN7_QZ({v+&r{uo5e&A zsUBHO2GSYZpgwGoHqUAUNsy>?>iG8u4yiZ0m%5rWD)8|BlKm(yN(dGw&qRI#yO7sE zQ_9jAoYI$jNd{}g{{yUwGyJMxC=}#EMb*(>MKyQT*&=VFNY|HU>sZ#VB$z(T1z~Lk zjm)co6Oo#${>Lw2tX}=Zlo~!?M%tjHctEKBR#((*R{b!3;*avq$Y5W?11J4d7{&A< z(Wj8f;kJ$rJXtLsdq!JS82=Q4(dF9xyT=c6+eG3ZTOLoQ}LRnwZHAq^eWpI8|&#w1SB?ZuJ;K1AcA z7Tt5^7AmjayI(WM#KI2i^v#qW&su9F6YY>!i^6Iq`vR35(^MzX2+vzCqYKj(3L60P zt;~6l)p*8-IB?n|pWBUNYkcxxd$PvdO%Y4nJ8#VDa%60|#)C{(CbAUeG3=ux;&ir> z$r}rv;#G4}waA=_4?ts=RG*#rddt;<>SV!U>^}9drP@qcZE_blUw6FI`#U0DLsPmZ z)(M>W^+O!(wzM!imb$<4GD9Fad9y6&Bitf67S9vuj6cMf0Q6PE;yrb~Y855I~+_@_uou1naRw%=6mgVpRaZ4qsazjR_&pJ- zPA$LA)+mzN(&AsmX}EhM)VKHqd-QSRGG&p=b*|1^tJdi<@4Kybt2fPoWl`emFy= z1DF)I0`Rynsytl2mTBg@8lPK?CWKcxH4a*KSl*n-%Z#o48hLDuEGIbfMs13-giSa$ z)H+w*k3a-idYA3lzK9a_KsC^iP@6oRxNgqAGGMqzt%N>L)R5VYTT!dty2*azsj=ah zQ6r;|mIq)Y+b2Z8Wl5`TG_%7m3gTd~Cmhk<+S;DWr*mn?i3Idp*1HsQ8ADG5e>%C6 zF+XjeaYWGqjQBQ(m*={R^z(fgvdGfUiWULUuGxjIZIob)Xbb1jqsza%-GQ7Q?k^P7 zvE>#;Y@~kt1^U8=YtM@?NVQ7X*$%>XcCxH8yT&7mU01N&!`}{WMwzL$T>-ey7%mnw zSGvGsyY3J%wLWS0-Ym{A-mK-OoTGPX_LEV_>*a>6BHkSSmZrIJWoT3-aDm0XdzfGb)6C zcvk<6vhs=niB0@2=173dZ#j6bH_Le&^EHn@vm0jH_><~Y*Yrefpn%U!*-Or59#yR3V!?u9*ul_)cT%7A$6xnQ(P3QJLUm)x?{HRSU+o=; z8y%U-=7iZm^he8^a?jB*h36F-#{H7M?TNEZiKNRGXRH-ed?~z`I129{m#={&__w%* zQLd=zJn^bDc0&Sgq*E)`hd2(0W9^5Yjsy|C!7(C%e}*!BTaYWM5l5j=rj>I~$A2pPKyLr_2ZYlTI7Tawgg^awd!7 z^yv@Id|x)a3{zW!0x~^`iIYEp+d`(?5LSjx%|zs)uT#(Bc~hNt?6xBh6u$cJN|((K zml|n>v^;C(^W5!TNWxV+UP`JKZ55stsHyypIDn7OHH=*1NE& z9JBvHq&#@EeWfGC18 zjqK6IUXC!|LC(CVE_F0DlR~%n5J!fmrMF7qU-T+@{z9{SN5@CG7+)>RceRl5l@pHP0@fDcYyANQzZ2-7(6AmmlMc+W%BVID|Y6Ky|8(iYAuG$oV_2yQ(?{R zajTf}R3<9YE|nh~KaFEE1OEh3Q&|5>9HxJqiiHt-Xw~BqN>Q z3jKza1~%l&v<6-*?A(gpf-+8aZlUKS@1DL2(*Dk%P4Ktz;vKsjVfjBAPhXKTU~3{j z`P;+&jrLUYGp4eZI_!fW%MWV?P<{&i@ik}si`PY2HnWRUayRY*EB+$hEQrj^f_Y>Y zEcV{xLhT=u`?;PQpdgk}8#MU2iNol@=1lIUl~&E1pC+rdqz75|R8)dzfO)A^8ZphIRNGu3c0_sInfh<#?bIkQo5+Bf>JFEM-!;EN} z7XF=GWpM)NEjU1*O9SS&2f^KL$c;9OJ-X2QzBDRD9e(z}6?kkVc|1lc|j_@$wB!8bq5auTU@|>?=`&Vg~l#yO!bXdE#$4TK8@pnubO0<}>+A>1%!N z3Ia(VPLll?3EPFpj5FmYRZE^Ka1P`f-OFwnlWF$~+K+~eGfU+t zO_YzptX5hh0+Ti1Y$GKr$zrcwU>hN!frPN6fJL_e z({b`(#9L<&ici1jm*I>>BgkwfLioeSm3OKkgz~*^!@fhfd)wVQA z(tfvu(JCex+?#!8s5vR}%2$zFq{xfdXHS{wNAEN<%it<1@{Ci5S#s8FQut1VF{>!wt!T2tNu68{gLPZ(jYpRTH(-%_ zRJ$I4z`RjbvMUe66kAh>%R8y%hI}r;E z3sc&+UsXn@+M~=D1CAZFCa>A|dt_N>BK^iP*7xy)t3<1^g^2 zOc{S+s*9~!PC$tmb-6=#gg*k3_}!t-W@qg%-*U?(kG$Ezw zj8=UFD8>fT@TVdSu;t;hKZ6sV{foBD!?r`KC+odEUf&J&wk`yD&!3((r)OImH%6Kv zbKd5eN*OJp;o^+R0eI-k=C7GCx>R<`%oFa5usS)T`!p#2m{WY|8#@tM6uvN_s@P$D z>nw@;9OWs{$pGDzDPbg$m=5;y>k5=OR#LN$MJE=`f%;w-O7GlH9RfT~gRfZ!i^h8e zfXJ4Fmv=4h1&>LAq03;(9-sqN475>J5aBPCZxH;gU|J{Wv#jhcbf;(rD zxn?Fx5nZ5lDE`(FAY6RJwJ!Gg#eGSfI_*T&g7p2zW+(z(u3_QR?s~7d5I8Tg!RL2> z_j{U>zSwg+?6MQg69FM`S5x0$ES+6Wsv@kTF^am)RVX_Lp-5xii^HsvWK7qZ6f@h- zcxdCA&x(0y`&C}hZ*HMKHame^R2R(`#Z^$VXw1RYBquj=lT73^&scHjB^!OTbSb{9 zC0P`g$0Ybic%yHoh`6rMg*f>q&d;ZRCZz~nY8K*BV>%!4qLd8yJG28H**K=fDbE=0 z45sZkpzG!&Nhw3R$={q~v;8ZXF6`Z)YZenlADOh}vwAye(Fe=kZeyA&VQ(G4{oa|F zP{Tc)ghAhV-O}bhWm9qGl>fBGy*`oeBE(vur`7ijvT|VvrL?hb!_*d1&ngBLdo}yl z&}mJAY2e#` z4C-}P*do`srNvEpO!wHwj-4~A6#`yxbsKYEIiF_k(BkRZU1H%z_n*{PrD9fa<|rR2dP2}`BrwWALUoH1$l8MZcwGEw2KkM!YQLHQ)N%v9lsUL)NN1 z(1%wbZvl;%0c{5Aj)!n3$NH@Dyuoj>g|}wCeeLXM#}k`gMDBXK3qCO4>lF-=FlhP# zuLHfRe?Z=-&#WY<#6z?&2weghZ?-}8`rT+}fiPLurK$iyW%b9!?E&eplJ7NwwO?}; z@|p73`d$Iyf?DGx4$();-QBb#{TV7pw((v=y>WJn0PM9tEIwUwxK-49QZuP6Uf}mu zUYS^0k0tINsjWc!+k#2*z|=nBm+OOcoL)Jh^r3)LBH39N?>F4CtL0g6`=zUBWoZoJ zbbI$7zRBg#;I&z`+cFzg=DAnum}`nCXtW`rgT+iy?f3vb>Bsyy#HCDDfiaIQiNXCZ z32~B9P(Yfx_^01*D%iVeJ!`{LIhN>l%7i{kzVZxqBgMcvow`xrI0AETpO}3dzKR7# zP>8L2T<~yjiQ;q4y?GJZPe|*)7YlTx&f#^n-(MCPoP3`F^M8mX9PTW%nyaiVn!)-; zh5yOGEdAZyX+uF_SY=gaeN3!B#*FcuTWa_!gmqZGc7naNV0l| zI{gKoF62RQx=Ly8i}@>vD0ky_OQX?}5E}9{-kb&PBPKvC(V6MPbUZ zf7X!^#|vyR(`-Rw>SQjGpEdFnGOce+9}`}lL{5E>xm$aq@JOnMLbiW{Yhjn}c?HXRc;Ib;p z+2;6Uj_QrFeac1cO=ADyV}Iq31LfD`b7b)zKOF8m zv%d8l=_~ZMa!#Whc-Z7~vk>Tvw24>b77G2e`+bvS-JB`xfCk|E#>X+Ucan3CE_n^W zw;Zr3S^d!dc25Phk(7LQ)XZF_S*|Op7sI&6A^eH*;a>N53>H7M-YmovVjs_eH-{m_H+E_n0z=jFd{zV+Nf|f=JW2b-VUXZDvW zYOc~$;0dWZqV&aP@V8QFwV+ zUQLHxPI!*f0GRo_K3wIkysPA-48h?>xFE-c;ExIOEpjOiy)GUsl0Y5Wk@ z1cR6v!VajHDe}=Jq-WR`aQHeMty$RUL~G@5P)W4AL?2l~HDiM6^4gg=nTF&EbY&k0 zke}#W7qfwq^JXzitWobDww|->sNPRr^D}nIu3kUAbPE%eZ(?J8BGmgN4vP}D}XFs|kxmrfU?;drsLP#4t$*fZyBmrb2Zb6Kbs7X zJZz%?A1U{$(ekKhLu64h{i{N#is;~zw*-rFpXxqfWr<^ozoYt+&O*@h1l(fl{%Lky z#=x!+J~u#A1;kV`OEMhK-SU@!rjK)vqrulZf={_V)cVo%F;%Z;Zs;MY~Cd zkGJz1xrm<>^vNar(DP#`Sq_Ltj0w+V0R->mMqoukmTGlk=(p{d@Q6AW#|!GhO(e2v zuACv%H~H6PH{}-SY14akZm3*C$ZP8Qm{l+e5!*=w0ly(4Xy5PIQk|PF)E>|kOM3*O z6;mIYI#FsC@d5iyyuPhFJGiOBgMC3sAesH9zI7}RUlMpMVPpPn4mFF(CQUR4Vs?8m zT76l28Lp07dFQa0zM^QWgEr!totuS=^hYMQ3jICrKP!W|2{7j7xUV>?x`%-u2E6Q#j@*D@h7v)^XnVhY!cVb)w!-dL$0V zlHUr|g#TV_u>@kmwwLRznweWqq`6e!I+hE)AFn=CgVNjak!Jblx6bR%g_fqU+-%v_ zjt>_!>gW_5^lW|Q`yfc_Ato>;uAK%s=4imqc6+>9xvkP*M3q z%$6Oxk?uF%S5qa;6$Mqp2TOML0M2)}h&qdVBT{3lK_eDT0hhVe#tFv?QPEz6PnmF^}iZNNV-%p5wJ^ zNIAQafiwLxK=p?pv*%NTgdLR}HJ#f7g*R^z`>ND7)H+KeOcljvlS?P4#Dw;fx&<4m z#267AqBz3g`&imRVCTA)g0IyA{7URYfb)ApQ1%6?q-9}=t4$7?IM|nmKh8A5Y8nC; z4~mghu7-t@F0K`xZdpm0cFEg%HwAW!_U`5>T;}R3UrXvVNBFwHC+MKQ$-b#kJxHxW ztFPr_1#$m9EoE?(3FAIQy?HN>nzXHMD9SUBcl(Z}pIo8?#drCKU@kiP#p(Z2&wYEm3brgk6~4uEOj)ByyWp`Fx`Ol^Ocymp zso|WW!R)W^~xW^s4>AO*N&P_w;=gxGO-Y5)PfYT&c)+zLCO?$G4w@`!U7I*QW zIt!mB7W;+Hz{HHT3o}G_+jW{%gEAP!F7z2cM#6X$M7MpblIFOAid~{4LQ_{mb5GBH zv1o?pi9PCp#qX^4->xPBq7mri7KXM{Mk?lkdLth+JH9NciuBvfk~SH+Y*LSuY(M9y zRO#w4`C9SR{0T0QqUAUrF`-KB|LN!3S5BY8j4Bgd=1JVcE3&r5ho#42deRHI;-5x|k7ymAAVP{0DpJhd3!j== zyJ;0fonSsvAOM*tKp3+U-j|oQS^^q8yp>ATtjW|wPVK>I(D85W=JunsRT+D8Ip~i1xwi-H#gdK>m)e zXj)w#x+^z-bb^2!W3Pcwl~WWHOohMi2(X2_&Jqj`7M<)7$~Ca~DOkFU1Q~42lyMwh zJ9A_Tz1$Cz;{Q0-$AyN5ypW1`;I;q3v7m?EH|Z&d%M z&K?{cNh3KSv5&n<=&$uV`=M~*PF%ZVM`V8B{cBjkbiw5Gx%;H&YM>F1`sul@drYCJ z8+3QBD1mW2uUljisJj9zb6bows3qgXueJIfQsvmitK?W;V}mX|i)nM?lS}JzK27VQ zhaKv>XL@jqNljCqAgUQUY1d!#xeAeFIQOsiP_-LZY(x zc56%&c7h*k&e!t{zkR30>FRT&?J)3YhkykvyOIl^ZrFYHaO#|?^N~d~Q<6+T;>TRk zC~9vp8xRJ^+u0qpEB|3WQgG5xkdj-CTcu*_nj|CdCf1N-QB*HTM4EnZJ-+>n!F?JE z(CUw0vp22hcs?WYtN(H5%R-2crZ?qo8-;z}%W%ep1kx01iT&;A?KZwJU0VvY#nU#xmq z#aV`>c;I0svCee7I){NsDfD2qv=L>)jV-2#tZpcyL zQcl1CZNX9Q$W($p+T=9nVK?d#=HyuqY+reX;tH7y&*!Rye z0FUBPVyxk-UA%gNg%FZjMo5Twjf?@dQX!8DEq0o#PczAgB1^IuNS``At6(##hGm^N zJhf_-4*^*W(;|DXKv@WpR$&wSbBJ3}FCW%1%jDzpdpEVlhRpFUFt${S>dL^8tb(qO zAUi=yN<*MzK%jLk<6l}V<&h8% zGw|HtsdMNP!2G!rMHD?#L)${nab2p)^``pXgY+vEI=A>MMKMPSI#6wwNcWodKw3LZ z3I1oNV>;YIOT)hjHxtbB#=!Qbb5{#OA{0^mHvWj%uujzdU|rVe*6swzvHhc`KTc#Z zG^+7~N-#U}DgN{oQ=IV@?pR+hzKyk8>som}$Sf;@4{|4lNJyN6_I&7Ougw90eVQ=q zUE3zrU;VW(--QjCZ6nAswtkHLp)Sio=n^r;Aiif=Yf@oJGU^VH)of|4C-AQmA9QudpnaXkx2*=!?!S^auA=Y;f8YkWoepjlAAaWhP1+*E~{jUK5B!5RD;|Bo@-Qj;5$U~EgGnW3x1W&79%Kv=*kMI8n$<6;{+I!#g Y8AIM+R=xMg*VCZLODlh$F^fH(zL-aJg zMIBv47+n%AhT(Tc?#;cQ&-e5FeV^a+dp&gMFm_3|A74|0WZxvZq5LpuMda;0B{IkpkfDTK#B_d1E}}_x&s;j9I5z!((Y7e z|J0!dbz}fA2ZX`Rn@aY8J_gd!p>6;zxCd(i^1!VhVBGx;07Q@cwZGzobc71ubVd3g zQLac&VGR=n;j{V<5H}AB;~XG+UPejzJov95Od({H9`qD1wQeisnyU`hWI?>g3;jp%VLNU#O}6s)I_F`e)t84;lvR14rzjKL`8$q5Wlm z`Jfl}8u|d${tCc)Py(EOqv-9A0uKS^Lx&C>KE!9y!L!%6g2Fu^#(z$kG3m3ie+EM-Ks-zzZ6xlK}Nm zDw?BI`{Q6I8Ta2)se@M1fxbX-haZUsprWRwqh~mDn2`x|fPWQI0n{{q36B6&v{W?I zwA6GA^oQtZnB_p>Q5xD4>@sv$j2t*ldfk(yKlU`eMH~#&y`X7K;&A?Eho*lnLY~Ky{4a_-^w1o>qAekX^B3|dQ=|!zW?)jHokFSX2Z8{j`?gI(y^`ZHT`@nsxT#n

Zfk< z9Pe!#YceJV7Ppik=Uf9-#kppEgJAVVqlvp*RyKotUmF!=?iHh1QKbT!ca}Gj%ri+= z-mrZD&q-$M5piphxMM*R7P@sO{!1#aK>1?jhL80L{9aF)de#O51oLQMI7szb|KeP8 zA-62?qVk9bvSHl}LaI`c$Q{V%sl=ig`D5m-ory9X3(S&P<+X&;u|z+GZojp-y?p?; z$0=qRA7Sn4p*|cCrz6?ek&Vl*sAXcY7;ri-Td)tPuft0=NfS|PjQK5Pc^k@>At*bU zQC>FG@xkLoJF6SA;`~GVfN5%XIjKHAf0vk(l=C$6h`8a$R~8#y)P_Nmb$c8^wUH^o zdP=coW68C|l`_dn2Chp?TAKR+q^%;#XcFR~J*O%Xs7Pv=_E|unr7aq2jWGwuWp{NR zqve+lUCJoi4Ad_QTrF9aoY?Au3c0kP#ET}P(n(swyWP2@>i(ue2PfvSE!FLKIGXe3 zq;be^gxe~7Zx9jC@DbwpipzIBPMIi^)xrU&nWNRhNb`tA?e^ifunJhY@ zD%7(NyvY+mNBw@DI}15G<^0>rE2p;B_kpT?pjtc0K#l2}7b&nYJio*FI;=plHdHm< zVl23Pd@jILDSRVX8x!0Wpx!Cm_gg#4R!>Gk>iw&PjT7tDh~UCDyG^@&0H&H+Ij{bf z}u^BH?z^|a@NWq!pQv!cX5`#JyYzi*$t%+6Ug^u3eeB*^( zpLyp@8sEf~tM25>H*9yWr+qwUz;N?WKMihfAMia28fc{z5*eq=DqYyH!d@Tpxn_x5 zeg%4C2RY)W6oF2UfcnTps#O*&>g@wsi@`cBYo)zf13ZLt`*N4lM zb}UKi=8B^v#8&;H*f2?&c(giO0Ycy@s^RBbvkh-*+0ZDIF6wG(&&K0dtCkV<5&a)l zOM0G?)D6;z$83AQVViR%%c}-1#w{VDikmO0Be*Ma$^`o55V6C5Oy-jKF)BR|uajbY zF5Z21eYIlDuBg(zMuo398)nt+ozGcOV|~|h{>i8I<$}ZlkCC~c9gL81lpa}Z5NXu7 zz-bSaEbXYUXi{oOZ*)ELE^y`jj+SOa22`ct@?;NkA7Gd(gx6M%LhpPZsKyFBHDY`D z;N(kEf=TJHsa+)&k(w!v;IeMmo;2(36({oIoyalby`{VSs-hK@OP=VE659lYWkeMM zt%fbBQ*_)*S`SLAQ?cDyxqrg@1tbuL$aKLO$TW?)R_?0SXNsK`s83o{N-tlo93~lR z^p~y2?uD+mX~vc;2ilCj&L6l4ER1!;b14wadZ4kN`yxk!(=ZdM?dq!LnTMMu(iddb z`-}v0P9@J)#d6p1J=ua|)Rrf*8`5i*HueG4y`z?m)w|J?hJ&c|!a|LTr&V}MO`g4a zIUy*E%L*p5ZyyM|e)J`u&$Wg}*c?bzCq`eMn2d>(g1JM&Jm?!Vw@TY!fkM{4mI%8{ z^TO&a5aDLXXDZ08+5mH=LL6#RPtW(Viw|}X)e4}{-R&%m~ z!D0@DQ9(&y1N|&()>Vr35Ri|J&5atSU&3w3eUcEfR@@&tJCI9_Q5w-u9dCxdMh zo^iHk_mchohpB2&qt3EIM?zA+R3>!bw>1TU$;|g7+bG|`F<%nWuVG!D6cj@+8t`Z< z-OE_(9|LNW?|vYZ=E%=94k&)`OAjm+D^5f2ZM<=?o+lc?9;B5HUJ7xaW@X(pB;u~- zZ}WQK)Y&s6F)Wa=&B5u_OPVa}1%bsy5oH4|h5Nu~n*>=p*tO+pve|XC;>$H3n(7us zM+KbSBF|)rbydw)$jGiAsb6&xYuX^Xv=3zOXa?mr?4Imu=7z`*I-w;V1TLuIBN2}Z zM;7aQG}4?&t7NI>KxF22TCd|ipq#b!k#uLk_7At#qR)1-LRYbtb%X%vjClEpY=hDU z{Yk8)`FyY*TH#jFr_T;%gUG&%%s|g-B_U#W@CJKJ2&AgG*=62iJ!L5;m&Ba8 zUA+&yPM;yWlPaCH^P`g6tVV4H%gz6=8qtp%8hf%2Xs@*IA@)+j*U5W31Hr{?uLTq) zOH6!KG$pd%^*Q&kHm?uI%*SI=I-x?D`+yE|UR9Hr|Gl&MNx=$k;B44vgj1v7(U)I) z7iT{PhLGLo>-OSUg1yPZJ7yyha*afWb#%jULYLl;5YZKJ& zk*}p~V^*zI5MdLMK9M7iTemk^9geY6{{{{KasDL0`Qk$id2yB8#hH}?(*xYV^JjS36l1KB8<{@|0}d8qs$g2_EY=_DeX4h3aoftaLnH9$;1NxDp`6(q=nS^q1de@ zSGuqDrAogDR$MaBTAyc7rf^5od`JCDL*{1im(+}pFWztJi0#=MaO}z6iTrKx?S?jymLQ%~kx3*;cRD6TjmWBO-(TPsh z;NgnuZsVfFvXgDdj8(%qQ}0j4dJj#-!-rm8z`a>otAFX!Y%}|5<5GxBsAswSo55?9 zkaud`b;BylrTc(MA2e_?C~dtWQ*KdPd;Q(`GmSTH5@pBi^b#te9q0-~reCJvngLme zJhZ#Vd-}Jc2&=<&jr9qA-^3P`f+}?|ab-e%iYAde?M)v)s%|>UCnatMt>(^?+Bp`r z^aqq{nH^m_%H;$XvOH=z(Ap=zPih*X8m>T8?K?IwVrIwzX7Qr-AkV7U=_#(`PbO;v zB7!#Iu;+amw#$C&Np3|5xk4AH6Q8oP^f0MRUMYcj!a&@1j-aO+N#@@&g)FZ>J$Y5Q z8F2@X6OlRR)xxLhcy}Mr%nrcKB?XSNotc)}2X5IdV#HlHFsexi>?HV%u0qm$w4qVZw`t~6 z@)#XQfUWx4p3+Tn0q0i?zL=~Vx4NS>)BD+K$?O(F8pqcpWFkBOd(b}S*nSqmw^-k& zqTW+5>~!b1lXG8L)YqjG(vDrMSpFVTVK(Lw{b_3%9Z}ZPlr!p<9mxU%CrnWSx24`Z z4)bacw^rYy-d;_f?W$p%CaQ3ZOZb8}*=$80IM zhq&ZSQn23R=qHOs%F&Db{tQpZ{jUD19&+>09|H8v@4~i{_evKf=6ShG+m}l=I=3vX zVNx#nCT`s4Jx~hGy9^r=Hpa`!shaCP63GT}M7gZQB-ldW@=FCc!Y+9{%rYl z-pM4Y!KG+-HRlx`CP={VMW)7(H2U5TJ}v9hzu;SEWO!@Ayw(E(#UaD&q3GJ+=R}dW zjXP%s2R||4P+zfX63o(=2c(_6kurE^{X2gzj_4~Dz;S`S$?+{wa(c`{kH<&b@DC$d zhwkmXTZ{QH@1wjH5Gb+e(TsYzP}IrxQK{=>2p+tnhX>cSDe=tiH7ty@W{UMeA-{c` zS}3gymNqNxo+)@&jHc=}ygoKrDmbdOn!Tu&EMi9{9N z(M%XaK1pGD9k!>Ngu~dX^C+HM=P>z{^MEZ$QjmCIAAp(#o3sSlDGU-unBaJPx$0~3 zIZvUK?KX*SJYF`_)(~+UpS{E?%x=4$vYUHuXyQq=>w+pnwojmMnaGGoP0rYDj^Gh# z$V)h?u09UN)6};YCD#i+CL#*DPA%mYJ68=hux$9~(iFR@Woy0~l0;P~LQ}InY+{Fp zxqBisT82Xivd@7d$G0YyyM*-C_R2$bFHkp=RZ;49rI zH|MR>IhHAY1%3VS8V>B|&oS2Syn$)f<92Z*&Emk+LD|*Lu_BDmg3{(j?GnE?qKrK` zlrIm}3OU3dtZlhjHe#j~6j)Y-D9(ApWL@T=ipbAlN~!9=&8v^E_oLQIl|vB2Rj2t^ zy)chnT+sOf3wol9RxA2cBUae9H{hYdkY#JDKfd6;8Q36SpJYQ0%f{x6{!YMD5N-SU zXk0r`p=BsPh-pJVcxkKS-`oe3Y$rbkYn3%LZrp1q=NU{heVfs4rC4<@b(YEAoqNDe8ro2a<=_(IGzyZ9{a z++D~{D?k%~n6>GOH8g9?%!5Fo1ZT;n>iNoZoeqKG69nZNRoj7E?R6r-4=48lS}eM| z@VTw}$%Tb1>*%j@BhGD75E_kyN=Q)Gq~DsaN9jzF^{APidrRAf7Yar{lJC~cBFRZY zY!s^3l+9V%?~2ZIAHQ`f8J`=Ru;2SJsOTg5cSo@X- z!q&&l*BaWbc`){(qQyp`Q6?|b^rR3SEtNi2;w2So?Kl!A_)?K!F1fcExG-xSK7-S56vPL zF4cjsBvM}ILg?d`1zU8D1Zt45=0i32Y!7T87_ zp%vHhgS{B7c%?{xbmKmN4cG@VGy8Gt((CqCJO_e> zWFK}t&iU44AHkkvNR*(yENqSmj?WGgroE3*j%bK4ko8~PPU#&{RYA1;ouCx!)X%#r zS5Y!&3za6REe3zs2QHLLky3iyYi}-v4ZGbd*`Z>US-u;rxEtTF-T1*Ay7YU_ByET`x3g!~4n5NXet-bAy1YwrQDz;Ogv{ z&3IZe`21b~&lKesIziyTXznbyqCF(X?3V6E%}(n18Fzp0Hm)pMsQ&;{sHhDWO`JGC zVFyR_$075-f8S!8ElpTdA#%pniNVuTrXT1vt6_x)XWX+uFguO<*BuK zT0-&TDNh%d>$g6z7Q}8eil4DU*omfT0HwCO>+?6)n1pxHX<_y z3$o3qDfS!2VDcs z_4UgMu^t;Yc-=<6ETpI$Pj9{HXCDh41V2Cqr;yJevAsj#-?d8D^jVj$#KOEVVhgY$ zTyOcvKG19S#g~-(Vc2BYOgyQ+x}b6rqQ0oxxQ+j89TgGuHJZkPq=5tTy0`O@c z?rpyKPU6|IomXK)u9;!f&9@iG@~=o6v1xcX(ku{Hi-iBp~hB?As{CaBQxA#xu=O(>&>x5pU zT+H6UCG8L`t674@m0M7kH4{M^Ia z_GPNd?<^s~H#eqglH0w~<}tYKZllyFEO^qwx{S5A3P^~ZJ28Tl_EW)QCi}zt%V#|P z4tmN-od+&n)zPOta}a<#@@E|G=k7qJd{CSLbBg>&td7ak8w`{hqJ2EkJ|L44r3G%H zd`ujXNce$VZa31QeRG*MNId%4`S6nz-GeXk2ykp&UaI|ELq{ZQy`_-qN(V z1+soL_Tzw?-bkdg3DgJeY3S$<0fo%KO~4yKf`86{Fkk{e0UrPjcv7kY7(IV#gQbIN zuEKph{SPksm3=gQV^4@n3jX;r~_ds<+cEQx6}l zQ^s)0x&4{0!kvWw6Wkd8pSk)GzRQ1Ik}9_r@d^4IXO zQUu1JO;?dVK1c){>EZI{AE~^qresJ9BA_XEDIjgx^_@Le2{|~)xl99l!1~M z|C}Ts`#^W&fO{Q~c6J6Oa4-kK2VkQ5larp1_>&`hz_$YPzjWjeI26zSlXFn#cd!gl zX4d|HOf+*8@qZNU|NoI4j0L4?a54ha2PF#Bg8~!zGac~G9~5XQID&&R#d?bU6py1j z;QY}4b1qX~2CDLLI;^ew+(FN|8nf%zlgjr3g?U9dvg+B83Wp-mLAVHmyZ_Vur$Kwe|K*l*F!=w<(MP%b>urhN0q%3l!R24HSRqh2{1)_% zj}GekE&UsRUHA_D#r_xN!>&m0fGcn}m%m1e?MLUY|HTG-&fo-fc0eCoh!6Why?y?J zy4hdse^qC4bkRV39p+@nA0zpDLH03HGuAU!B!eb@)-3Et3A(0^X(7~$Y0@xO4H z9S<&{|Ao&4o?_R3S3j76XDJPVcWls|)xR9jKW5vp1DfK(KV~XL12}F1W|Z0U0^Bq~ z@*@D;yAARc0Ui+8^b7d(3;6U4`1A|-^b7d(3;6U4`1A|-^b7d(3;6U4`1A|-^b7d( z3;6U4`1A|-^b7d(3;6U4`1A|-^b7d(3;6U4`1A|-^b7d(3;6U4`1F4}@aadq@dOxA z1i%hO0HH?;lnQ^e@B=l# zaDY(&+t~laY@wr}VSm%uP)p~A=0UT}*KfFaAgRGnKLSMbfUq553rj0uhAuGFe;7m^ zfI)o+2+C96M2iA}0(3Mrgi)Xph5j>vrf&}Zz#Q~RUcV_U{FmT=aeoxz=?y}AsBVJ# za!ybbh^C@?2-0vrA5RLO5}`fj2&2%{l-TSsZ_q%H=AqCoKWK>qogegN3hjjO0E79| z6q`Mr5Ka{OJxKrVi>3tmX>NdYh_4&e52X7+`ZOGka0BV@Abku0bpU|}w3O)YX&)%W z6{OFDG_&_DV-OoedkKVmFuVMNcKiqJ14i3HT|fir>3$nbW!*^)DI5z z@sYgg0D(DpI|*wb5uOen{s8c!&y=G8cFNcagM+Lfqokl9DJw<6RQ#3xUxj}u{SSM} zx&5KCW^{1QsJJ@*=KX!}-#lanh)n@;JT!m&&2vlufXYVzz%%kU@AM1MgC7DwMfX4Y z!$t8IS05iwWoczm@=pk5PpF6 z7DjnGK%l~s|L(;9+Xw!`tbf=cVGMPKdPBjVbP1b*UIqfcz|-yFr>bUO*5y1)K%3NV0$;paNV5 zQ4_j=A#e+@1nd9^crinONJ&57E^rSB0m6VkfLI_ANCUEfe4rR81uB7B;0@3Ov;!Z2 z0bmsP2+RPBz!zW>*a1OR3{)&s98|niLR4p{#HnPdE>fvcX;R&wGN!Vmx=rOmIkDxa!^s*36jRV!5w)iBi*)jZWVDiSpfH8b@wY5{5yYH8|=)K{qW zsBcl*QoB%lQ3p^zq>iM1PMtwrNL@kwhPs`4fchi#BJ~EidJYo}7tIM8Nt%l^8Z?G9 zRy58u-ZVipPiW$3GH8lvs%e^O`e;7VEYWP!($jL#3e!r`s?c7iHKm2ndeH{ahSMg} z=FwKrzNhV@oud6hyGO@Dcbra~PMJ=J&VtT`&W|pHE{-mT4o}xeH$XQ-w@FV=&qXgv zuSkEL-jd#(K9D|~K7}4f|C+vsewu!Rfq{XSL7YLA!H~g$!G|G)A%UTgp@E^7VTR%R zA*Mq@hhz_F9kM#)ap?Y`xI+bp>JRlEA|Bc~eB|(%!^(#Z4?7+9KOA*9>u~kq?!z;O zcNmW{iZZG)-ePoTyvO*Qv6!)uag1?|iHYeXlM<5=lN-}-OwXBKGPN*GF>N!mGm9~6 zFxxPrnIoBVnO`#xGq16*u!yi+X0c-NVTokPXL-jm!SbDzgH?)Eht-KSm^F#Dg0+`* zh3zog8MZ5Ic5DG`acpI5-E2!o4j&OYqH)CGNYIg_BUMKTM>dXf9F;k0cocCo{Aj_^ z)}wRm4D2H8n(R*O57{%>-?2|~&~TjM(BOb@JmARUXylkVMt@B7n9ecxV`0aNj&&XT z!pY8gfzyoh4rdZ)9p^_b8ZHqo9WFRm6jv$NAQy>SfcpwJl>0GvF?TQb77stqWgaNc z6CNDT01t^*m{*e*#`~1Fl6RbsmQRe&h|iBNm9LR+iT@bC3O|JZ34ba7$Z_i9V#ke- z2OiHn-f?_GKu|ziz*8VW;GMvdAeZ1}L3hDtg7t#)LL5SBLT*COgc^hvggJ$;2*ZV+ z3%?isa^m=j>nG4B(ob}q*f}YB()8s0lem-Prx;INJmq}q*{QduzMK|1t$+INY3%8d zGl$PyJmY#M{!H_kEfG->bCFPyN|8BH9#I|908y;y*jbjd>Sw*qW}NLmM}O|(IoP?B zb05U0#pK0Y#gfFj#i_*=#NEVG#Cs&@B$OmPBr+rjlFX71|G-9bG={nO=Bmz^%> zTqa&Qcg6im;gyxE=dXHS#b4dhP}aa`yuLkGniEV8i>{(BM)$oQv!01wqTZ()VmG{RRO?gg>*~koj~SdX z@Gz(}1PpZyV+_Y{ir)0PSz|m6A#zx*I#AeV|)b@^T zs~w*m+^)`^&ECPj>^9wP^VbsUo&zd$q~@sLHQ%TCXn=Ao+4XeiNH z)j8UE&PCNF#%11B-8Ig2$xXv8(e11Ib@w#)EtnxJ7q$;KhvN{35VsLk9_$`4kM~GH zr?_W`=SMGPuUM}y-g@3SC@PdK>Xpwi@L{S0eHQ%)J>{$Bo8?XP<4kTSo!X+P1ew@6O;*`>xdO5W?O&~2Sjr79p z#ZbC-dPRmvMqDO+rf=qKmRVMF_QmY{9KM{e9CEHl?#Ddiy!ZKv`S}IM3!<>pSTuIN z(5CQ1(Y2yi#gfJ8I4)e+OW-B?3V)U~OOPbQy6ryeU%kBA@x|iH%vbNP``^OXc-L~*FRZ`axVbU0iP+rPdh(t3d*1fN z?PiiWiMZpp%dq>Le2!ebr?)q@j{wouqu{FtEj0}dxDFba(SnKYFg+cZm<}DHKg_}e z-XNHmnZcFYjLG=sf3DhBpn?iJv}2A8#5c% z|L3&-7F^_w+Jf%RKNfj=4;%-dU1%tv>>m#-l$@THj)8`n>d^jsfFE2&k0Shsl!lg? z3S9i{uZ7Ky&>R7uigx#afZj6mBfWCk@-OO&r@iRc&k=6f3(ei0UuN6~+_*7e#|PZC z)t@dOEyiyo6VqECU)g%S@p2YcEDaU6WYnAPmAbG?e3mDrm4A2mT92iVDFlC>@5D4d zuD_3q@L0#}>+r^FqYYtvD6c@l%g`(-u}2k1M7>}3m6hm7o#*9e)a^2dnTEi)Oj)G+xlT9~i@hk`V>lT{)!K}geUTS*skO@w#9bP%usseNYU_E8g~ z&DiSGtwUAp60GZ&+=9LBZorTSb^i9;<3QUw2LR+B(4>aT)5 zBQ^!`VsQK3%5BZVo7`W^?Qbr*su-hZGA68)PK+U$xIY9MB(cSED z!4gz^`oh~vp~BfIb=4-sb4j^mx9Ivrt)4uhuN|wYRzhG>|BxlO-*L`5<8oB_ z8C#EIE4dBq@D_pRA#QF4CB>3-2)XaOC3+obvJLo4I*8-4P*XR;6R+su{y>jcrA7+o z-u+Jg%W1jd30; zQ>)=JpOl;9%nrz2ubuyPVq$pOthF{r^L)DHEvT&1+j`I1)hlYo0!#u@_Ii$;?|KvK zdQ%gwW~E$B4K}Ed%M#T{Chh1v2}IfV#V?2#2mO8v_qs*oqS^e7!9K3qJE?SShF20* zy~R0H4>3Pmw&79=_^_p>cXBjw)Y9u*Pyo-T-Ktghk2RU56884OXe8QwT;Fmyr_!&) zZSQ+K$yKNLd8%^C{YX;LWUlI`4RB47SXI(E<0{|DS5Ngu>|V}RS18w=(@kIYRiqD( z($BdBHLbgH3y{7I;>~et4{U3?eWZ}`H+^(hpzi*5OQovI2=0rkQA539`c#GA0Yfsw zePqIbtxFrHU%XoQg=;ly5z*KkTga?&-_;I0VKbY?uxl%F$9|?Rzx_kLG0Y+si(nPy zP0EAk*&(y4XLw^}xkar!MYTH4z!D5F1S0vbFno#j@tn)_)*e^A*Up%ldoM~Z?|ofEl8!|D0#cxdZ6b@JX7a~GcL zZmp5V6TeljbabE4k9#a}LgKC-x{b-H5tTBn`y@@Y+LU=5R@GXel3jW&C_~&bw=^;w zj+NQ##aVV%Z^8cZWRC3^uWTDlHZ3zv469`iyiA2*gt%)=asKWdToseu78yaQ zp-%Jgi`SG53rlBei#h_*qpQpqJCl-PnOpBI+jmAOG%Q{&cmMYLTK#8j+v*&G8mYci zIgMz3ndo+T#n2A3?)U6lO;LeX&rr)(XJ2^<-?syZ2&t1_T>O8#*LfF^*rAakPd4G`Njj`yfY-CRNK** zV}kB=OP>iX2tp6LIK66-CA(-gXvj#b=VYIld|tc6IXd%1KTaQ>h!rB@dPp6*uWF1( z?-^B&FD8s*g`+31r6W?krZ`t?iAJRX_%*?9+a^d{_z`E{hRv>xyzW1S&u=>K=rnIU z9+%yEG9qL^j(bBmrCSo$)I6(HuUW^H_!e;q%13Rc++aXTi*IRIn{9dOdI2Z+UbH|_ zCnc%Ru-741CitsQrL(QCJ0tXaQT}wHU6>{f;c0;JEHSz4S)dtwt=b2mU|~wc~xYcZnn$oUbL;w z^*I!0voL+X^PA=6X7x%_vur_u!#yu-TulZZHIHS=@%TvhT2T9Oyj_%yg_^xPmztxe zNAf@mB8dpe0W?om`{pG$^fp`TV)uuM(l!r&U&87UQfv91|4LQLt)V6(Th_MU>rfYN z3*dgE9p9oFuY4S)7%hVftHl37cvBxu7j|!c_JL|@^|sLQ?m3^0)vLEeIJb5n7Ijws zU+UnZsS%uQj;@(53A=7%X{4Y)k3N**TAfXrzd60ruw6rBQ@p+S`^^Uk3KBv5`A$(4SwH>#Esn>GZi z-_$953RFN9C%hj|;fNkREec&?s*KsG(m8%SyX>3+XV31am9f)EQET{MFKomq1kGmn z7GJn(71B_i-s)I@O&fsbyQN+Yc{O{hBS$}p+h#dlnSkwF%P%1sd`a>B)|z03AHQNn z_wC)9T2kjax;*&&fL&7JBx&Q4JG~fwH<1&LR11Ffbf(@ErtwWlFd#D2t_;5NSWQs0 z;-1pv!&QyCn&|l#>tBuJ3oG3#s@E1j`$swTS>&mx4i*h(FPR4BK3(*d92&4IOPRhn z{;gV8Tx2fyxu)>V&Nttuw88A) zBUOIj9#u8pORJjD` zUs_+#{k#uU=q}Z!StS;3-lbo@^QzL!aZqKjy=3}S$v}X>nMsk!5;akVA{qknO1_+- zoqqL9cs0xzSDY9x<*{hXUvGEj{M4j)v18-hjZK_W>IXHf_vWQK-r#;WzcuyVUez>N zx+tZ|bL(CwENnhVch;n@@9yXv^MXGv5KtV8-@0)AQiG=UyU2V)bqV*QF)urn*gB&( zgcfzX^FO`yxH8o&^Lk!T-EM@rnBae`KFe-Hj9q#gVkLHRcX+Pqj#h0u`=vYf9ulKZ zpPN<-aB^Q|^TYd&HlgixCZAak$&nV~0#%J_#LBs1tGg>XF_C7@Ebf~nVTQvs*)z#d z`azG7l+8C5o%Y*4=Kh3Hi$!pOgA+!o(Wj2{T298kdenDQwy;$2VYfwR+fJR`t4Lgx z_kiW7O*_9QK5XSxNK&^)qk_#;tiV&bi=D4wuYn!#x#+c8=xeEbk2?s_H~PD~G_m4$ z-fkrXerqb0R&;Ynz+0rg3^KYKoN0;Bo%h2DY@PRjr8S7dz7sRo@O`jCY;0QR#(TjM{}2++`ENPl4wIW z$>m+Qk6FFi>4!7#YtFg?*{t`CK7*4M8V}`PblR|%6aJnOf#ww4jk;PfCLexwsm$8Xk|msV;FLi^o7`-V<#ey}pC{{k}XK8%&{g`bkp#8*E+=>U%_}t9iGW3#Pc}>8Ob^QSBvT zy#kS?(%*c+%*4f8qp;>x9b5QNPG#1(OPM)3LRbCK?laS4#e6RA^W4un1EFU^**9M% zh;s!d@HM7963e*r2@Ld`Eojq1teW-1J^uwx}csQpB!7(UygOH zT`tlaNc1yldxW^K(C_2*1bw$>x;e<-cpAUJJ5ZUQk(7oF5?da?P^KQH4FoEerYVnRV0o zF`2xHyn-U-vgnQ9y=qh93?f)yMy+>R6K2WVUx)5yyFH4`dtaSrEnpT~TZW315d*ZwUx(i+h+#_7D;l( zh@Z*OwmV{4Q{dL7XBJmGAp8E@b|GZim6v^~M8oiG!^Ip}8`RfuwpkM0mcWiASDuYt znE@B1wSs)CF@+V+g`(t19+G@9T_m3A=k+(FOa+$gH5osA0 zgRcLvEa^)eOk@-?ZZ|=0cwAH0ZFi50_E9m>TMt-P?$3T2^f-4yUA?q+ed1!b+t=`rm`JVGD9lYpA&tx3R@UwduWwSRYyY+RW31>&}*I4@a zOnpf8uFdk*+&6Y&Cyw*G>p@AnlNSbl_e!&yKR$-%=Z#tJafUH%{V{8 z1C&_B2<%vP(Ea?(SK@kibzsF0x0$Zdxk!r`8{6{nCUT~3?Pc1DjO1qMB;Xb35lkVE~9+RP=(>Uqj{^GzSnk1Y3Pr*(q`g1pLPlga}BR;ENP%%nS z%F#NS=1C}1{gzDg0{FNbVK^mswL_$+xLqE<>ze|Nwm}Hx)xWUqY8!g8=p%mpdMqLK zd#$;77e2^R+${40&RES*rPv{loZ08pgwLbBWwY#E<6CP1;NE=1*d?;}a5s zjri}2*%SF*H`MkA)jZ0N(}iSuHxBLTJNqv;JQ4S6PZT%D4D{k_E-l*Nx07PC2Hfk@ zh_eckdorKa3pWgjBo%}~AjcfHzb+2Rp8}UdaP#NQC#s-eh)a{)xK63a7_yUXMp?%8 zI9Vw6rV{FvUe;XOdR*|51ZGJFctl8`F{GM-npPkqCs>jTk+Vp4m)z0--sAbMCm{onDh&B9ije2u&@M${LycO0l z;5d1HnSQ)>cWP^cMVxzkjD!jXXEIyaeLA9e$ClaFrdnW_u?%iC(n&WO%1vBpY)g(#Xx@_&#U5^3P6xY;CU#)#E_2i0e)CYoVW@nlnKXbR(K2!Zk=Wf)F z-J(rM{@@=bo5>Xy;)TpVj=o&xR#;LND{KQ#wxdTKi6HxxrA4?70o+MUtTp-7qBjjp}NMHbB8q5mpT3L z!g4;hMj$IB6Zbq@MTobsd(p!!?JUC`fM$Vz&SbpY$^~odPM+!vRw|Vi4-!u<$EU{2 z+l*febeBtbehf0T^$}noJoGcI)Rn1Z-h!n$n2tf?=POg%GVg~;cn~?G2EP}%bjMVQ zH#JiyW4AeErb}cJ<%9DGOA_H7m*HGmwwQ9+jMw$vL846E^1co z5$dugX_B{{AVFMfcu8h6N-5j1a|y~*l@2uy-2J#L>Bnm_&Mx086%#AFH?of*dRjD>?Gb{w}nmcYfiqd*w0s*$ZbF=vxQ^DFG=92DOhY zSj;^PbjMb&o6tlgR;z}+53_1hoE@Enx}3$;XE>?}8LUmMvn49{47_QKd=#Po=99tw z8MgpuBtlMFu4??~6^LAPibC~DNWM29p4ywA;1cdoOZHya{(`7$EZl31 z!evM9UG$6pLmakej8(X?{AFw|?%9_<)@uU;@C3+LzPECPkWOzN&0Y$V^<{IzkK}fAWiC&%a#y`1N7? zQ^!~BZD{K2zpJE1p9mHG8ZMCXF0`+%cI~^jc3!TY5IWeaYQjOFGn`G-f3)B8H2H}@ z%!<$idzF_;*WM?x%4u+X?tW2et$#reS2iUprK@1)33opw>pSTeoRYs$Y5xDP_SQjd zbzi?Y(v}t}ZE-J9pb(_R-Q8V+7IzB4wRmxNcPF@ef#43oDFlK9cYl67-}m?4nS1B{ zci%l{cFyEvR+jCPm3`LwEYh^KzZR}M-8 zz-6O(aJ*313eA;M3a@vrX!bRiv*Hv-5LN04{=6V=C2A%T%0l^%HP+sI*@LldTe;LK z^zy+NwWe$KhVi5ulDT<|Y;|4`#|ugs+(M@F-8xlK`3fLSw(-&vF5aj()2hTYeXk+a zDyAWkO{SJ*J$zgIrD~FGH>4Z6E>E*Ae_3a3XH!V<(d5^E#aaIoGx^%-NrQ(_5%sh8 zY8v}6?$hJ_mzg?M_w`~H0;ye_gKCQ6`*lyRU6;4OKjQJH<%8m2OAsCRS56z5?f!zj zJ62=**`XtPD?CS~1#dGnuYwLE%yGj#vAl4hufK+SUa+ZT+fCXCk{K+YNYY*3WOyaW zH5MrnE!D9kFq|m$AEa_w66XAPIA=~#lS&GDT!@ENw~iBCmRA%mtwQH+yktmQ&rGj6 z){Kh}l)m{J)=WKiSI}|KnnMN66Uqxzipn_e9O}2#y&o>x|0mS(-!$Ik6#Rq4fmjb6 zSNJtLVy7XM#?^Dj0MEn*EI-~ zlJz+|3O4jFQ)p9y_Ybqusk~_6+6o5ceA%JoXv_FuAi-o-c8aeHk{u! z4wg)c7{KR435~$|q7jH;9>@T`y$mrtf4U}?D(Zy1pSTXIQ&roy#$&Sfqse%@r^xY@ z|NH+g`F~Tu5|SJtB`#Jms4BZqAI~mN`U4(wOC>svxS3nZSPF@Xgj%Pi+#Z}Xb}3AR znx(DYe|oEUDQK4MJQgB~SH%pdr+nf+ZyxB%l?4}Zf84w5cs6<=jclhzT^witrNq>6 zDfGQV#F1JuQ-J~!4SX{V?BWVM*E-67PhyRXbNz#)Pb)$vo_J6{k#29+`k800eh^WH zh{aua?Q9+6^~dFOl9&Ou{yFDVU>m5rU4h=U@;cw^8Fi-yqwXcip7BW)!LdbgUx?nMdCb5cbojejxbQ{bZxQ z9#ysP8nR4H=oan8^L@?nez}p2f7AQgBqLSl${A97FDF#waU2qy?{T%Ol988{Ti)Js z!Y0WTE(z#ZexNIm35;H6!jP9;4!2TjD>8Jyk&vEVU(nXyJ)fK3;fW7BH_Y+X`1Y?L zNT?cDSf>`=_aM}b=-%A>yB;1Y%e}l;NT_?Ptm1oQGi$h_u`i)^C+udz)iCcgC+*kI zv6MxE=atKE=m69`9bU0S(glR^tL`}Awl1lAf(C(?Buw6kz;C$E8va(-rSBY(uy)_9 zi$v$EPd%;>7z_peMLMg8k#PI|1lNF3HAh^Grq8l6E`A%L3v>47BGp$SH>e^vYdk)5 zyzb(wes+1Y5pJY6`}iMbiKcZz12s;J1h!PLJd=Ce_D?`1Zyplh4%M#tw2C z0rVY${F^`_jV@W8%f*zh%!p?>#8aKRw&%K15&6to@Pf`lVWJ)(;A3S+6O*s$YxElv z0@sso>q5C~619T6R{~V+X^PdRx!Tqx<>z~Km027ub(;X{6PNz0Tifw%z74FMlVWQM z))VY2yd~Q0Ypd%QQ0sZ`Ec>kop3XTYD~>Zp$q-25;$7lHOo`MAYgl#U+z*O+;{`_T z%I*t{^PC)C&-JhWyYf*?`98Sonkt|Xzf=1oAmD*A(%8bhp#9;GsHUa<>lXtEUheF{ zdpYB@dhfEZCoJ!>&98hr9yVl=s}ge8QO&TM()F=D zxMWvp+Ar%r6knz zzDz+5E)P-e+$*x^)wrIc&&bdkr`To=s@Qk@_Ry~zVbDlCe%>$xQz~1xIn0BcBvk-4 zHMB=Nc4|2%g)UjqXn1kgv{0D&G>&fqY#Ps&ify%A)fDX6({x}=zxPo_%lMg9&F-{d z-GYzT1^*?Ibyr>&=1GpC(F@=!sv7bS9EOyD~(4vKFsz`rZi|}jT+0<{Z#u!U{^};d5jSl9O9O5 z@najwrM@ziOnwgwKThi}5^~4il_AM~&PRU2X1}p}ZMGjT=&0vLq5sEHxVwCWQ{+vK ze|F%e1Al(u)&5rU7(XXVNaFcFsnGv&?>%!iP9~8)7rtHn4-%xJh%+!F-fTI~Tfa#S zov2g(K4Aue4huhg*T$A2chhup&^|e|A|_Srqz7owN@OfH6L-;S*6lz1y4=>o_2OFZ z_yp;Sd-p7%X*7kAv#`E=Q|B-A;acGeuQZhtTp+&z7R`n1cA)>m!TN*5xm< zCz4-deHc`-XrvAv7NBM##+mGeO)BNX-yWfNg7pd^E`X`CWA#c8AMR1%bB-yzUmB77f5`vqWmSw}{+p1F#Fa_n@pjR7Gt~d2hvX01?)WX+6I+bY|FItK`al2s z;s2}tf4V69-mPlg)@R9(b%G#O5Zh~8Sw)xewZc|aQ|oj4$O?kB)t8nlt5A*?cxPUH zx;k+VmHr0`kIu?E*qB8*2{9I#HgVZ%_!M8yi~76s{rG(ov!=_I>$dm1TveP0_B!@b zW|!8PwWkhRm95zav(uQ%FL$9rcGC`T0kyb@d;D4g+Hn9~U>*WH(#Qe}sEeaR8W_ zxVwa-dIpv%OlD8@AdwxaXNV1YPZc8}!G37C5)C7UY!!x-x^rIOiZ?p?MWJ zz9cIkCFH>@+uM%+A%S2<3>_XOvCA61b8J>$YG|Q_!Su`uy7MZf&y3ygueSNt74MpQ zz*?s2H`&egBG=1!7thdhwT)B*M(OK7LK0X}LuPpSeUMAgL<8p_#rKud9Ep||%m2nx zxan{xt`eO09?)!DohUi!S^bMVGgA15rQ(;qCgg<>%4Fj=0-Aphe2yMe^izV%f60H+ zPC`Nqp&nf!z26f4_jsKB_wDe*2yypj0!?tT+c^vf;;8tPg4>FQ?a9O{(`6K-S44O<^q; z``j8hZDll9um3)on^W(8ab)!v(bTZQM_&otP8nEgO5^dh05jSU9(7?CNcFKnYcUo7 zLHgQOp?6lRWy#=LG@%pw_;6PKA6foqrWu3t&L8p>vmXyUttu)$++;VUUik7NsDkAJ zU)?<}=1*je1Eg&?2?*n)59-pBeKb}~u&&EkFZLDi7)dH*#kIKT1G6-Ul5f@3a=S@U zm(F}&qS%|0@u6e#rF!*hIQsz!n7&H^WtQDcdEMPeSbLRKwsAf`U9`1`7E_QToP$dU zSgt7xfBM0ZFOXphL#=A8?bUB8AFPXb@;5J*uVd?M|}P=q^rQHYpu#GPr-9JpLC_~Y;sP)&OAjC-7>asT&42pl3zFbb zYQaJ3m0PO!zzr6Ul=`lGW~~=pm2auW8eB&-)Qg~qs_wjgV%}xzX60|=jT8>#QZLmh z*>v2w*}iDMXZ*ES5u4#%ad{{3i(a*1!rqeZS;nDsrw0Z6EZ6kDSR;!|TWyJj*j{>C ztpNS*W+&`_P9;b)G-6Zv8x!&`Jw4ehzQ7?*HO0*b#_;7t!>_qP5RAEgZv4j8`IG2* zRaVpLCl1w}@orm%3?3FXJ?|^BV_*_qQz2b-N(3=CxB5(vz0F{r;viD}BogXLkAU{V z%31VG$+}05*zWRq<0n2t4WyIWdcuw~e?(1ycq2C)StV%&2}hewBJ2UG%1av2uuxh)^txqN<9 zZpC%9Q`@oWC0a4a;bCOH(uTwwVfX;b?KLSz(E%Ye7I9zGVZ|6SbHmZ!+4yFnz*k!iF1Vb)t+8QR~;~ea5C^Ftub(48k>5e7k46$Lvyp#1=Dl2Zm&F^cx6;d?tE_yIX|9t-mv(I zb9b7t>Kn)oc7%B?D1>QF(>xVwyB%x#_9ZyA^wi{EoEWvKB(^S3*)gA0p7+ksk&UK= z19w{=(hdZhBLe~pc0YLfHtK;rFN^dWe(0Q^?D$q6ZF`cPFC;yD7>%}FOmHkv3@&v* zPdDH`q9qkDRr&;GG18c~1Ahqh0FJfR>1j}q{ zHcW7s5-aKsvbN1@bIC;Wc#+2sGgL2O_8+y_3LNX^(r>shS9?6m@7PIt7?sqhUsov= z4^k(2+*zUstY~z?q@%~&O7mQI3K|5n`Z5%4N136G{JSjlM}W5Uaf^%U9IeTg#R9=U zdz1@up)sCGPsGDHfHzsej(&2kbD{b77($w@&^YJYGZB)4QvIPW~#P%u6xG);K<5>4PW_~Iq*|UAD!G>^}x5S%kyuD+vaYD2Unidv4Vx|DgaQ1LtX3LY+ulxqEV~5o{U{kt9_t-&6+Ab^Gs^;(p=2# zv(vz6?;k^aS(C>-3QNOw+1-X#UFj$|g{t`S>n`gX132U(t#FW>VGtU-*C;n> z^?_-9gA|u2Y3@NEJNWyK{L!L5@$3O5rwv99bGjkx_B`Ui{n87STW5N+AH zS{xQJE8r5)j7KkNvAe;CSVk2c_6P~icsL*0=WBd6QxUz7)~w$ZxZ2ZqydI9>k(vmP zBMXl|Auhbp=wyZHT0T;FId|up{~c@Db%q{P$AgnOvU!M6Z}h_0)5jG6fwcToEJ=9v zJl)#QQLLU$Px!j&6=i1P#?V*SiT>*LPxc&8K!V?iSt3bf$OB44wPXr1Hnjs?a_)z;Ixh!HZ-*a5V5GVqz; zi56?~IH!;=Pbi=`@%~)!Y;kC!<=gra&g9B8RK;3t0B!mbnXTxIhYA}U)~UDv9gHh@x@feQew{9{g>*yPgC>NCA zp|pZ(fZf(Hi;MxN;>!E>I?stTGbW5Ewy)0ckisHQ$NKp)<)U;vW^X8K+d98*#G0dxf=9v*!gN$W3NumjZeB7}xM^R} zI+Z|hygk|_yAkH?zg#eV{q&vDd;N>neS?!(weIP0Rq2ktg80{Dj$Bx+uT_WlvZRBq z(S(6dm}AJQm_O{jmb;{vcQyh0l^;sOeH9;|KE|iiSEV%)W1Q`Pw1rb^s5uiM8!(p% z_?nTjTHEQ;TI8f>tm$mvGX8-zbQSwkJJ_%0DV6>Mr>)=gOHBo*vK9yhH6}@+L_$vI zZdoWeMxPmk-@yEx;k=r)?6PB6Vchb-+DE{TSW78^u6p@6E{nK8-?`EnS7-l0CHwRN zC}R*^cqBd6qk9+_#p|hOXF$je=Arfg6PUs07rq}af-N2FzPOs^G?c*DvEm(ON0LYO z9+zW=AL-YXE`)bGCYt^61ijFN6wr-<_Ut zLSwnUEc$BgK%b-Ebha;05k7w!(Cfxrx5Nv4dCp;MCp*;-W+3y8j*DgoeQ7f;X!>d& zQZHbZ3_KE>tt3hBatz)ja!5*q#GP&RX#6?p5zIlle+jWEUV>O`pn=sribWCU@>5JJ z=JLbQ!yT09V<$`c+;Z%(w>G<#T>9Cb8f~|E-=4dkc(z9Rl8O()?&Q-d)wNOYZD$!p zA;a|q?V`(po!L0LPp!Khcn8a_3DJcjD3xC_m88(hF+Y5Dyuj(A*RqQ`dFg%K^@~;A zv*;frS$h0i_$bz=d2?YYpWd4@!XF;`O@TG##b8hEt?ZlYeW-2L(^TUSbEDHt6e5=4*z9SZ6#`+Mp_RSld@3tw4q-jp6aJ5D?ew!__-i*K+5 zKtm$#CV_$acEI)baK9Qyp2Em+)^}{d?3LS=8K%euuGH#_dP-iSA+sIZMW-gXJkj#_ z)`fzSm*ct4W~(9kYRv}Fu>HM2g{PLJkM@t0J!>{5eV!b)_CD{4j}soz#bLidrHCwZ zYo*xc@(JB&kBPgLj_&txyj{)SnuL02J) znrDI!KCp8vNEB~vmrG8zGXHHQS^YJ-07Ctt$|iaFJvH8Bn{h!O=`OMjsxrgI zy@b(3kQh3ZbRFzz9lBqT9jsf?SSKFPV~(1|BdFmwZw9{|lbbnCTHOzTSI6AKDO69m zp4l2_&D&OTrU4kfo|mjE;DIL-(Q4fr{w0bbK_4dDXgN5vBX(L*24SiLIrnvK zD$8XHt!)dECzY#I8fq}v=i^nnO52I#gS4aMG7aLH`9p#mE`W6!y4z5VM}E{7C4@!r z58?&z(7i@#%1^-gUzsG)N4*;uOOtzBgasU$qwSNim*2E6hnU$kFI^eT-{{g*S}r$8 z7OvyT+}Qoa9E2p;Nwsfn)<|-tl(NU5H2UoMY#02~<})%vZKUfIsxQRFPY2Xu*5kLy zNnoEV9MvwJcU~sQRK3)g#bBSMM<{}vcKQaV~1Vt;I>?A(~+`POHcZD+s<&n=DY@^Bc>A|p5r=DJ-owE)>dpNoZj6SMIJ}P z8XcTj=LIdaf$CTo9u$z2?$k@xNL!xGPl<`UjO^w`(`@TxEHKwat+HG?roeCLekv_= zny^@&eLERK+`xUM{fBlklm0AJZZ@lcDx->K@a5s%An|b(N+9q3bSu0j{ z7scYdyTmxHVOfhCa|^eo5UYX;VExp{VzsDA|O=qyJ)K?wCwH2n> zKxO5YqkoXTD_I@qE_$?A*)$)Ksfb1<9~giOu;0j77g?4R9}_{#OH_S!gm zY4zYAX-JSxY5TR*OU|8fMv#f?1mzjmNJpE%zN%%W*2ITJV?sfrC%KS>Lu+OjGaskFbQem&$vF|ni_+WVx0rOpIY z#Kbf8dv7n%!xZ(YPySII?MK3tM^}}DGWx-3xvsQ`G0~oXi-z7CW{C8wy5QVT6P5ns zC4kpX6SdeZ;qiae*|maY+d8lnWvl&diP?o^_exQ#x1(_c2n;dp3x0{1&DFHgly1^^ z>+~`Ij3@QB5?=$eT1ue?ieh~y*y%2 zQAezx?_8Z9a1JWV-%ws$0_@^j_-pGi+c-+q6b%HC`^Q^39j#SE`lAFs_lF+jv6n()t`9F| z$VkX9?V(=3eK~n4Nk)BzjPm*o(kC3;k9cSwK2vhwQ&E5U_MMHLRfLAqh(MT@OH}No z2>IMyY!X>jL64xuT~|wm;d%ONR>Hs zlcY59K{BMkf@U(;{tlb(Tf;K|hW*T`llPyB$sRh6D+B{P#BPEUlK@#7m`o~uJamjs zbo;p4T0}&rkArsP_a`(*H||q=U)kb_SeyvZV=*hz?cinV&yy*)EIdn%S1oQ-)aEnO z5nwHR0-z^Oo?ppiweqjAxsTLS{$b@7ky(sI65+_DC~V%dV@6XMfWZn&R$QxQ_Tedua?Qxsd%c~Ktb{^TnZiM8b6 zum~I?ZVhAAroBknS_L`aN7tYq4^xFI;S5Z}1*ADPicC>)y5Z9$zE1&5v0Qj%U3?a_ zS%X^3gb^jB#Ak;m?)N%K*g7_9`$HYAzx}>TnQc@tax&Xi_VEq6FqERDZ5@3)&}UR( zIj}GOE|i?`$$&FLr;V7Wn0}^1U90i$0qajvTYtE~Hd5bv{a?=$ZqiQuPJSF|a~+#R zKG%X-5UuNaB571hHKe5_T6GMq#mk|qf#fr=0-*MgbA>bVFul!(Yef&Lnxo75Tg3;3 zO*Kb^C6-GiPl7e{t+kgN&Fc%2@bUhxgvOU%;pZ?ahPze(hmD+SXVgp6T)zs#ljNIo zMA!0-hi6pBf)S%*t)_=&4a7w&E|PT6>kdsgC&1ix=HOi4k?32ia*T;U(%S-%WkZwSqnxpx=pxkY zCD~Osw*Tj&ZZx>ewrJsAwZe6NU?a!EUn)rhd28t}k3ht4b3^S_H<=r^8UpE9n4`}p zi+P#Y0t$s65EoqD8_-1Ctqoa<^OxR|S4W{96cpF>w_Q5UOrxv@1HdH8;haJC2HFaw z>o$>cZvkaG=;vIWxrEf>@$Q)7F9|up0E6s#NuSq6x+UL@_ECaAL;}CU4Ex2T_jBH* zl6YOmBZNM=TyB+3oK*^kuk#ZVd&wDQiS>Wbi~U(8=a*3~c)cs)>CE+263!sQu2wG? zXYMH4%Dk6rYb#~H_M2)$S3=`wxyuOA4@XpF%&k{!k*@6VDh47O412J$0 z^M*JH^qVLP0KikvD$_nsUH)c`IUwUGXLaLUOSmpXG@2Zp9Xn(1FFZfhk z86GP!SM&-tcsO}z!N&GV6+-i;MQ#XTjqtl4)?{9*y#UX8R8(@R63A7tQGVBG!?eQX zodeC+ed}Mrr7FRJ{F!b(j3K*yih=!uMEYy~O^Ew;(VtVWe*P3L4jNgG&4!c41)-`pEY%e)eHD`xNN|`?+UX}>ZaPn<(`Ql#&PqJLWl)7nk z{)o4BI0m@&WLEOKodyaugxFMb;T46)zn_~gD+n@#@YlTfIi(lA4u-==y4>;GmM&TJ zs=k$lhC{N(lQfk?K&l$8HlJk=t+|HFOzXMr@y$!fK{yd*3L16M$R(vMcI-K~u)|c3VJ>_#lPUO@?Hy~Q7CZW-cl64U1>~`%*|KMvgB^y;>1CpF zoTs16!p$m>^8_;oC~?c_HTYb_K?nh5t1727i@)nw3q#ji!C!?J5pxy$4{o`2E_g1 zBj3{cxBP9Kc`i#fGVUzVwoSbkn8@<*jSG)eQ|PSZfURmX$Mmw=p~vT^TleCCkq@nH z=Ecdem39vXHVdC(NT}n@ZrOq{9xWw+?hg2bs}TuK;i^M6U`b2VTs(ba9Pje0$vi_$C3d;83ZGu@GHr&A;h5ov=Jn&vPYvw%_zrDAoeDs?J z3CXdUt!e8mTcEshCS}IA;xywB*!PPuInhzefj~1;i8sN1K`9VNTj^vL2S+m9cTY?< z@%y?H`vX74j2ngtXi^nJyswQvux-GS;T>75d zORA_6_-&GC?n)8caUo0RkA>4KY#tAr<>V>Dj#6Q1yWi*O0nR*m=G&e2a!@|m%Qz%Q zr3UFrYv<;>#AhPCwh|WUqRQ>JUE-VX#DM?7i-Iev%+iLTwf8&ZxF4LHQt(1l; zFT8R9B^=DA&pqRWEfLZuHapA(goTO#a>&uwmWMwaAh+r+3bz3m@XihaR-2kjfG3#+ zM;Yesxh@=cAmWlXL$i~4bJl3L)Jutr!JhVOY*XgS<|-Gm6zk`W0z zp+#k)PmYmuc-6q);XKdJYP8P~)};b#EYmIO3W-@SMw@%6-hDMc`fzU)FtUEWeKshP34lR;RL8J#ggN zr);$)QJ!%Y%5GV|uPj6??OTv{zEXn$60U&w%%mgV?uG}*%A_Jnj4B3MBz1?*u58WW zHt~P&i*-^g#K?=VXGS&Z963ls;(np3A@`%g@X~%4;;_@+lYf+vwH{S&LOZ>m1Nu~y zt}*6oUNB3&`x)KMwt`VQ#5{{Z+-ZFVy#{8ZHlOj6{IJiI4tN7%&FCmRAB3tv9rQcp zecv9mOakA<6&Pd0vN85(3blo{5jE`#mL^E|dp=;ktvuQ+Yuj6YcYZ0ianyDHjf?0 zI@Qq6{XC@Ws!N$V1>*(pZ)j@!vXX_yJ)KnAWBqN!G7Ho11Af<>$nAg2C1cm+iZb)Y z(IPEo)uAIQ4r%+NGO!NUAe7(hEr8U5a$%d`3ExI`t*HvpJKAt4HKB`>RKLyE#9)K%k=C~*LGMeRM|07Yai=978RHBZ?Xu^V`h9$rxOcoC}VcT4z z(1VGH`fC8-G*DtlFK}ea<~CeM<@+x=EF89cPxd|R{!DVZsUJ&N9m+3#7Cx@t!)y_1MFu_TP&9KE)tI;pMj;7F6Um0Z5l)$!@yD+uAtjq%Yf>;jCMmo z8ZtBQNDZ^aD-*bSTnyFW^ewy#RW~Q1>9K1Yq!y>*Jy|xZPuZ7?KXg&#hW1yN=54tL z&{Wx@4M-oTWfpYAOHj)q`!Pz3MDlid*L0JgmG8U#gWE9o9}|2NJ{zyq-!;dA@rCc( z-0k~2R9`2jn)G!JJtdQtIq2utA}VANBO=veS#gpT7+GQE>4u8My3Q<}hZO8fk6{&8 z^;I4ivavH7mLd+~P^oI>nv6v$?(fz7G|4O5NW?3K-hO^VI^#BX5!u!?uTZGoCOk3- zi*hK)(S+%0$Ul|Y9g9_|8a>@!d@NU>{)=!TGe%GZ{Sq6r;{M9@XtPT@IdIh=zf)mJ z|8wuRe~w1VcL}k}eiauU^3VEBN`&IL53-cMV&(?UL&Buj=bgKni}CH>i)m6vt-o9Q z&HB#T@;ANWLgE~&=Dx}pjr$?7oq{%{SMHxY)(drwFf(L0{jgSx?vU!*RKZA;<_b+C z8wcTM(Df|tS?7KB56RsKXyd53^2yd8`FR^a#t6^?n=aV08JsVggjU8V@`a$3% zeojxB0G7d)aZ~=bz+A#p{Pn*YSl%(wCpY}RWOvNd~?LR zG7qN)mRHPjG%jPj3y5v9a0;RTGqY`*nGMNp{0cq4*wp;vEbP?$iAGqgzEg=(28}9l z(S@tseKPHVeNlhKiP&jg@jDZu5YV@Q%xe)_~ zy?VZ1w1{YSO1NaTT6L_4i*R*2YCFpUFB?{n*@8nyFkKJ4^RvyzILzQe3&ZYMd0pLV zRtmw!3)H_EacJrZ-B@8(|`{5zQxtMt**U$0GO z{Ypf#QbB=&9bu&luE&?3U`hnqfR{-<*=YB!V_i}{_0fk@lIAgJ*=K9RTkjUCmm^f~ z!ip4_{nRjMEzgX0oi%MF=liA1hZ>K;muuO^HyI)4njDkw0=vHQz2#L&iM~iD#wr^U z@7=I}H#IVD9MU{re_8+gIuGB1SAwjRvt2ELEKNN%_2Eb11_loQJsngch{!9UZy$!h zknuI!$?npwl6EsGO!01DXhYpMS8NyYW9-;VSM^+CNIXBOE6tj8gAN zUOqx-(L*xlM5ZIO%X?>IQP@YT zmN2Hbn!u?3Q?>!tUouJSR7JB!rrBRP#P-?ya9z|yRg#;-%sUF*@JC5~Sho!VzRZr z50)p1zlZwDAJ$fI?Y}z?$CPNm^24`evrodEI6nwxpDKph0q338Y#tSJ{$4NLCM0`a zW6x{zJLk!nPc%Lo;a2cBce+)0Y1)Gf1RwuFx~cGs?lQi17xa0j!y8rl%Q?6Tb}=Wn z*Dw^6@WIQ>yDz5JB~vAjnes8S>XXPkQQ_I9rR!xyB+u)AkXQ^YdTSjkIjKg=Tm!rN zMUoN*PVtxUT{RpxX#SYp3Qn!Az=g821#eSyGd3Vm*G^XThE^LC^`%NNl)qEEsq`Dq zr*#5tkEo1(Hk;Pk<4D!=ystW8plbJhfk_MNL*6Uz@HpX%YJ34cB>GcV-{b;Q)cBz9 z(sJj9_r6XRtOkqUP?*`qLQ`^ZL+}*Y@9Qe9ve!Z`(qy*TzZsCnI%_b@mwSFRWN6E) z-7l}QyaJo|qoY61#fFVbbD~+29P3f*=Zn;D)Xaw_-^RbEZ@a)~Sx*2#-=6WM^glVy z9tksgiSzJ_MvOTwY}2_pIT&^8)*J@}C`Gh!G!C%SH{nNoI!uzkEaS@Gk0{m-+Tje- zukymY3j#s+XOmRGE2MT>zp;FBHPI@xY?EangsFt=+atZMgKukXj@@4 zm!o!CqbU?BqbfSs>7VDqO@PnJHkFbE&hekrs#rb=Hj|4RDwy-&PtvDhv$>=}{_lHQ z(-WAL3KU;6rdxjp{1xC8vI}em&gz~)bS_S>6?4HuERHM%K z%CcQmlm0x}&yhx~gTECRNQiv-#naHQxPiU3Mtas(KABUynu=K?q~%PReS&~p^l&;w zMlqKNh+uxTLqe1&O}&`z2o@($c1|Vo@9fbVl+g|s;YLVYPZKWRuxiyjytSaXIIk6z zBRwyFnQhp~;_#`I;vb~w)MkUNyE{VQA{xRb_MPw^1|P7sU0t8|cB1nUfGU)LEf9)= z@666VHM%tAS;>~^r!rRMwf=CEh3vS^;*+yvHeO}G<~zKtlCF>SSj_uJLP1vwmjrcc zjG?`Oe?Fl)aQ;OBN*oDP6LBvJ8$KSf|0#2pz#Rvc905s8zQR{Kq6-%ak(~k)~fbq2b;i>CwFv3kM99^!2cSw^ZTA7cuIJ#=1U3yrHB1Zn!JCG z>_0CM-XJhWsE#(1~j}@Qnmvm zJjTlkpJn-xbl*+u?~H|fFxTPQukJoj^e)MPQ;39qcxa1tGitcDF6V-|n=q-o%bg=x zjTT;3PcJDm2+v|AnTZNt&+E*utpCnAe^zHXK&FC5(t4lLFV>*U0wu5pC7E`viM{?( zDMVgRXF32QJKT^^-j7wS#z{{Pm<|xikzr2le?{_v(FWDY3Q}3+8?+0Vo_s|??zCpP zo2JSVuByB=(=8?t;M}nKp|~UJEHT21aW>g=!)kiLMBB? zASr$FP2Ls2TqqHG8!XUF-Vm+7d_gpOH@x6 zlk&+Pd#$^4#Zhj1Vkgm@`ZZ^Z&)_xk zRn!9LijGYy2|@N0Re5F(fV1&vuhph>{lMVkP)S6`AG18j*}UX=qoZOW8*>h;t7)Z8 zr}0et2kG5DTW--L#J=h-huOVn@j>usHLB}~@4OWqY9xZp1t028h7q^1uJ1^kGF}xi z9!DYyWM}2o|5bxRFggd+}3 zQm05H*^CLaL#}7wp-eZY)ci+@3^;~ACyUbLB*d%7VpupStH{2Q8;=DMV{<9)9Iu7+ z#84WtpIel^q|z9rzg^5xT`mfqarD{>DHgEF!_>mL^=dbXZy~TIR~RO2bQyECOFF}R zEBhG6tFDu7gFK%!hN96P$v4-r8#ymZ&=f)C!Mo8QWi+3|lMsS`p4@m=Y#c}P09KHM zFx?NkH2|g~gS^8^!w^u;0@I;~nwR=;Y;CaKwaw)}Nc(5|sY@3IHaFDIf4%Yn7i5N9 z3i+aXp>-|ye}C2)Y_1QKSE8p4#@_6#(>}Nt^4wlK??#4b9%G|L-L*=j)J3$PeoHRu zYHW|~37737Hpx~@%B!>uDQ7Jpex-WK1;HaWrn5nFZxgb({wM()lhV@|AwsJG_*CO% z*5vqweX<(hE>F{3>vg)n?uVw*df=#-FJTHCHhrZQq`yl`|3q^I{b-9G7e8q$KJQ9Pbj(fwYxTH1k7 z+;O7>)gS-^C@1S$Bz@KtDes=1ffS`Kp^BkzRg(PHN_^l@hBDFPaVAExjhZQBDr`TQ zPd%*&u^2PZz}QZH9G#*&=ZwPRWi#SMy$VNryQ7`p zo37qk-`!?#fmO&Bh;KT{b#Fb&_LD+`DG~HMOc!ZXDXx7y>2-f_u9W3=I}fwG+E@04 zRK7xfXUb-`qQwmnAwKAi5F3cW)+QUQUSqYuHm^d9voOnvX8Izl?OP5riN3UqE||XI zh!UAIpArS;YSCC%?dp2S-UE`37Z{(U-G~C9A}xp<>~8E&%%tRMODfb@E!l}PK|^UZ zvz4!$jYOwR&0G2B5WZJkIq@5O8|2xn>gU#Z!b9)2nj!(07w(fTAWMVB1VX^zaqVeq z@DL@FeeqM&HugbrbnlS=Z*Pcp>N2q)S$us3cI(JRG9vpMt=k0&aA~nP_-gSR z?b3bNXN82M;>C!878wC1>jS-v?P+j|{aUI!Sb)U}^Ygy{D}N71Ks9(=RV5&XPcVK; zWb>oreE3FNXS*Gf3-ZI(-6L3a8E-?5i;e^0vgQ*XmI8kceLv;TSI1tK9jxI5Org00 z6qH6oKvGp(e>Jb~5dR>ZjdjztA1yG!*~g`vNxw38u_f0Mi_yx43w^NYsG-p`w|9N| zTyFhAZy9iTvVS(RyyAl40$$6x>#We7)b3?xt3=V~z4k7}K*a&hPrXS9l*6X!R$L}6 zVr3fa2y)wI;|8kgS9OmXR`z0jxKrFM zSaEkNUZ7ZUcPZ{pa3~VoCBZ2!0a_$D^yc?{_kREXy(i~H&c3_Z^X@ydGqW?#V{EP& z{AL2C}#D@=`BFMpNy(=?miOHL;3x0kCuZ5T7uW&|o|Q-9veQ5Z}4I?kZm^bV$RnpY?z?1!nv1vlyHGQXFtB zmEp`^Z)`POm-VOXj2=`>AsxBSm{veVyBhB`yFrF+Part98@;Q@!hsdSNhPl$DywZ& z!Ku_{`lC79Q^ntJH&YkLapQVAr(s~3MvW*&*MxidBng<3O&3(2ri0q3*?V*11oE(% zw12NuRtJEwjOwJ#L?&L%wSRzCozhU-9smQe*rdbWGxl6982E(^F_4CEaR{|z1-_Tm zxz554dmUt5=?)pOzw+s}s_k}v&G-jyn%|M}xM97Cc8fa|R@-$j6G|gRBJ^1cJd-tR zA8~ulWXATlXnNi}W%7Vg@FBqPa*rr`^yRndu*T&GtN-O8J4EhL9+1gj>~Gdd8;EaM z&-mxJ+x6;h>UPIQM`pD3M?J0VLsKTkjc1!Wz8?Kn9&}dzoE86{BR2I)aWWM@c-Te# z7)yU96Y_?JII#L)PuUw`m^$1Wn|vPgc;_T*?}48&K_7!px>ifqZ@Df5%a(q8z5!NI zAAEDODiEL|*CUe*4Q7)He(ytXdB3Gj2Giy}dYFo`v3HCcF*H(Q446~y%aenaz0s&` zpU>b>bbmkIy2IQDGa`?@t#QgvPWTuw91pye+!O{?TOQ}JT!R3tguB)0LF^eGwzr;G z95!JE1xgy~zN;n~UP{3SpA*SxUR&< zG?>|tDFbJw2()ul2dXDWX4*<%qHJzIP}*|`mryM0tgK!wjT=ih zmcQ(C1&#c*ATUIy!jQV7K#esuAS7PeiLNVi&tCv_5kccdbbWgiHR!y~R+8sc$i8M6 z+RzokxG!3cXJv;k04OcWX@QlBbXml#qWn9Gy6ZpbnsEWL;1h`7Ue+S(_ztJDjGP{pk?)7hscuyXTvw(#z3DuN#(>;X2-a+0%k78+GR#R&>_6Bux>s@qQKm{mF zI@t-b)iRv60CgyZyjn$_ukIO$oAt``4;W zateGw#eVtbHjy-Q3x4RT>m|nr2i8AHdk7jBASUf~4!lG~aD$~5o-L$MzxS(epYp_a ziOe5?5_}9OYKBbcgs$im55Q4&J8jTW5WiIh!)H2NJM77U=bPR%Rl*^rkPVI8&5G@I z<>HFI)ex<&tazM^q$od*F()V`zkp7V&Ul2OmT^r3G+oDf_BhA}BaxTILKsQc)wAEFHAg zN;T>BKsoMVBX1&8ETbctOZe#|_gd@^F=1ca%HP)~4S(dZRex!=!ZS(I8m2VlSN=}- zN{=%IO!%-v@gw;35Y{b!Dm*As7R7sG>IO4ctud>}HT-EpWIA^IvDW$lnk2d}wTW9d z_Pfwwa#hGwp^tqpwrOfVG|4OGAaT8>3Sc*ynu1NWjeg0O-phE z>|w(iT`wT~2Ox&nC0^-fe+;e$u5M_{w8*GUau@>j_%x8eGNms`P%zf)i!wZ1=fKdJ zlie72D1Wy9U|X`j^nAxa$UO;}HVKCp69FyaN=|Ir13+|BD2L=$Y2X1qhNo5{p8OVJ z^a01xrsSW;eR)z_?r4NC+Vdf8)i=2YqxqjkzA^B7kd_?~8&+2@V+>k)zp?1t_3I{#W{WEr$9^wR0Qu<@*@x(yeJLlVqspzsP z7*Zp%o#Jsqy_JL4s47-yvr9yJs3*dnd0B341+NYWj+-Hk=hd|39g{q!{v zTyH=7qWao2bsajZ0OH+Bwlw~Z(JNp?=*Nc1I}9Wwbl0Amc;g2s+Lt%@Xnqg2Np+|C zqId#?R?3_8i(K})ia5C=<1dNUN#{21vp-In$sXeGVD5pOR-Dpgy6tbhYMJL83pA4W zzA01fYa?@3et7udOs?(y`dDN29oco6#(ic8{5*R-4^@X#OjhBv;*U9qUz0@FZ z?S=YuLEpMM{9^1ibAWADO|Zs~TpOOLTn8C{T8_GlbcXh`fb1UXYwaelFst824-$WW z7iT^kPrP#QZF=Oj_86`ygMK676bJbUNO<|sKCzFkbS%wublqs~8YiHI>73vFED#en zu^JF!&;aZm^7#ia`)O7cuL{TI%Tk(dy%a|LAx`~WkfY%?jpW61Mw7*X4Bb4NSwo|XL5 zCCafY-}xnn#PX!W_3$1?{KlBmMq)vH%3H~w_vdAmYO|kMzvBlVlwEu9tC1g=Nl$$% z$0C_)!T$iZ{P)NijYO0tNy)CL7<`MhE>h(7Z6Nk7J(R{`l(T=pb1YN)lWJPh@B!U< zSM?yrK)KabvAcS)`33KV?Sq$qf|SiE{*d_A;C1n*XQF}0R`=;|3Difc`1+2GxPb*z ztZh9-IN~kBzeL_FOOvhab&*wtNSZZ&JhDHv&Z@yVUEfUeEJ+(xPqN_Cbth$K$6sS} zZ`l(3ymw6F!&F$RVZcq zyI)=~*;VSO6Gr7%NB zp2M*QyNnWZ4GD}%VJU!5?e{l{d6gK|$%#q>=&d{NR!FhQIMWx>EZSn1{QcA$vCkZc z^YjT|aWm1LCEK7pgaaaG7Zt_OZkM@#LLTX_K4oUD6|y>c>lN}~9oH9E9UMJRf2yw6 zNH)4n)8(ccJ7G?1JggghQJHL(v?|>0oLM>i(ky+|7Nb^vSxK_UvftAnBg8`CRx@q( z;rpR?4T*Syq4IT{gj8gnhu#uy(!jg6ud|vKt9#pF7GE$yQlEtutu8;{JipRT7M_Mq zemP`jLJtdAe`vX|IAoD+&aq0@SCE($&R39+%7A`GWQp5+^6oDfxjHa2-xmdbJfcUl zRaLR2?@>}5&2B6-e)oplN1^CI$;w6eQ^ZTjJ^P6VX_d(IdwagQkPFDG-yS7y>2v+| z$v8M>xE@OscCMJ9Mv@wSI2l$cdEC1RwHA&KbaS?t}4R#R$QfLCnrc$sBN+A8*l&@-4Dwp8frvBMp!37 zC`LTtR!w>JRjo@c%lVn2ld?HXHVKWvb%T>?dN;OZ<-E%(O_v)qSx(RnBITX^h~Y8$ zkwAGOdM|^@k6E|mB9Z2?qNOdR?1d|1a#-H4w^alyT%2M8EM>_FCQ6>HrksaZsw`75 z0><8{HUJ$nn2;8PpScyI^sVA7=CD6G9N|d54;g($fJI(`PpU6pI$vr(GS~b|Xm`8q zixpGmatq7$obLUU){5yz^+ZS^8|N!Iv#swHf*cR%mN6>_jucdG^;93e#IlBb_aG4A z)2^YKnGD!*}k z+r{5)rF>2Q9EQ~CvX_RT;oj)u#-epy^?3q*63C2R5}Q>c(<5I#zptI}HiSLpTls?4 z&fVpekq<}ul!6Fk{ln>W(ADK|G*5qKG1jD}LtxLOr!pnAb@HSljZ4iv1E0_}sc z1CK-g@%vv#hRTY|j_QL6Da_YZT-hb08K{?^^U&fQ-R5l8IB#4&oEk4l9F)DJwuKZ8 zP74?0N&7`(W}{VsZ?Pdcs-$fb%vnL<_(VPm-c#X2PaW%#aaPrtjZwQm_ER?98>7lN z(A4R9v5NffN4^;~$?cd}C2@t_VIbQ&JS~mX*s_=IHOzOPG_G91X05X3Wu$-?S={&Y z@2~HmbldF}+riTDFAbQ|S{6@gw9giGI2Z5?u#%9-b@|&rHxIvSeqqq`wDJ#tH{Zp6 z?YXh2Irt&*deegcD=9UQr1!ziIfFgUXa^GO+)On6JIX)XsE-A%{q{?_wMf(+>A1bN zdG6_f%nrmXA%b6$L|VVJD@CgIMQ7>H1*^X*Q(qv{QwD0}aiy1- z&lsY5dNScjb>mSXe;FXX)(gcT?e!B^mVUXSdXo#Z7#R)oBl+-F*}l>=kGy%Qct3S# zM3nbu-23wKYmA*Y0hE0{GnaA-110X z=HCz)RTs?iTA^BGF-72_C?Cub$0VLB{|;#~1XC9NXkqCj%ZkAbFFpFY-?;>VX>g+HVL&SNQC>t-x7mzvwjZq%TE1lNlOxOl&pE|C zP@o1UKnSmld&8aL)$5ujlxD#DgzR&SV4;lqZjHUqDNJ>HL7r1>UTVj~IUotY5>{nL zVS0HF-lQ#ITw|w?d>0W>)!p`mR zq)k`*D8Pc1CC}r=6pG%T?%L%8A4r}UWL*5Kp6)KMvHUOK0jHN<$f`BtQpV~$x|lJx zobmqvL6V@&{MMUH7vT$zpEGJ@ZBVi)_cbqh8>Wz%W!u~u6trtyza5kc_;oO?+&3AT z*Kxd7G5YGVQF6WOX&wk>)yq>S4iYT;T#*59Bf-EEStpXWv7~Bw$l!~^T5+fh7q>QNupM* z-Q})tE=k?=%;$AVHn728P4Vm&Hl4zDQ}}7Ca&0UL$2JE$$9Uov33lm*JzaP2uLZ$);ikACOF_&L^?;>UGe&}t2DZ4?ky zOz=#*qq_go8z!tjR_a<_-QY64mT5V#^mWgtswY$1Ff$5Gs22NG9UAYxEHYJVkW54y zMOc`juq;Fn!&?DY4`?$|*Yi8SS=_2bJO57}=;VF0G0}^eJ?FC=vcOGM< zO6?*Xh*z>9t6^#LXiPweUzg)9tI<9L00Tv5E({|h5q{?`Gb%kGfIGJGX?ti`%`exs zmTOy-$z>lZZ)(}U0AKXBmL^8#1dYnEq|km$*Y(ZkX-~XKQ-j8z(lJTbQzF3}00IV# zWU-DL@PR>eY30&{2nkbRBO5E0pCjIN+(@|>OotuK)wE8D)_uCZn0`L~S_yx|F4x5^ zMusm<2Nrz{KeQ-3J=Eo&No{*Jm3)53w`0~lhfgX4DtXoOCYq~mmh53%tIn!*>5iyd8 z{*b4$vv~t3;Dtln<-0ph+zG?D#)L`FnLoSeQ+~8zDjv^`L>2C8Q0H8pHmXkZ-;8e= zZ(kSa2zxeyBZH*d)AMccYtw^`yoH`!cCN(T3uay&i>;Om2%hYeqWu95u zT1;;02b3uXh!sRXHVv4L*h6&}kz)KfKw58WpzQZ0g8n40sj_o&vloc7`2>RbwbqX@ zbLM`Jt^2IRP#K(jrg~JwIZUd^L7jV*Prkr)udn^^-o;j+?rKcJZoCXMF_=kYl#RMK z-d@c0!1`kiZ!Y^tT7ak3{C&+35u%VMA!07PW*L2Nut`duxb0 zL%#N}EcxA94FQzV{mxzXq}Wm*dBa9Pn!!%F<1pHC+Wo0WfI8=p&`MR)ERej5@xoDX zT}G2`OK%E|A*}|Ld@USUj2wu+zWvjFsgtr6iOBlBoRg;B8F7BEmnY}`q{Y;{VE%?F zJ!(`9Ywf6&#(CY)qc_vLgLO|k;g0HZmMdI!>z3;;ahyi|(5>ZHX}e6f;ooByR?cDH zM~}FA)17^q{CiJ+>7{(i_v)R(QYbsmM1xYpKZnRgLhkXXL4hD$B+paO7wD>id0&0K zH?^wdaaPX((~Hv4Xo?HQc?%EmRSe%v^@Fv}hRWaC=EhnlHo&)Q%#Q;?zAK}P{kkPu z=*Y`x5Xx9$1u>dSspjj+VHRy-ZZyeH2P$VI%!KI5&6-xnh|}1+-f8`!`j!0QOKNL{ zpC-`V`sVr1HYbBQeVv@IA);j(^njFFhK7}|XU^<}B6d$kL35Zq_>%4T&yTIT16YIS z4vBaF01g||?L(pf+M?EUw)Qhojf!IKLcS0g=yw_ajqNkvf$#dq<`)(ZRymp*g4@Yf zT6NOvjft%*UdJ_Fv34DNPJTAn{2kfG{&B*Kq5@N$#(l!d?QTi!*q#f0lN|o^fj0qg zX4^XE%rmia6006(njDs6cc^1TD7;5Hc7nBW)ClDd0<3jSq|t@7eW!lu8V|;pO~kM7 zvN{%=zUg?7Vi{OxDUP|w`KQ5@?q$waQ3B%Mu96g@L&1qlDYH;**j`Gme4sMgn=XB_ zMxA0BW1PG3AvkDNfT8a3TMJ?yR!agvmi&ni^E?HpU+ zC!Q%!s9EkA2IBzb3+3TVbDjaBMQ)^&7;JaV%_VX?iPgdg4!r?)+oUrsU5|!KFY6T_ zi1&QFl?iTLHJ2(;%~oUedSBIe`FN_i)~)8p z)8=az0j0)wyo>w(w<)FOmuYdCvDe{EG%I_7d{${#yy-Wq=9S`r43$D`n<9i_M<$E8 z$8hw<$;I*Lo2)^jT%IF9?Ww*nE;{nA(e+<57HU`y?LTk+TZiTpiSzbZdhsC}pV3aB zRE=JX^R5SLdv=e-YMLfz>}iw*GaG zv;Sg1@rp#}yyeb!%yfUkjVy?$(?i_*zzPoSPwQSXJ*dB>v2sh0(z zUom=VebMtlyr2PX9e(KIJn>X%q1j5xYS^br55E5DD^VXc4F`GS+xu$tnrGRU+QMr% z9KG`)>{(AwwZ~NJb82xdBwy3(kW*7>Mu&dK{;F{ivw?@>Lt-gm3Oce)2E)QsM;QsyL;`n_*Py?-a)+)GZs zM1k{apHag1m)a8CPaq7O$xX94LB-`E8;}7T3o5gzXfwV!A+_i~Z`+Q*qfm-dmUKa( zeyvb@W>N@_FFc6vITQlLTrIXQ!gW{cO&)t>o>VaWCW#vta~E7bbz;PF)JzbK&Mxv#A5XV= zPm*NWPxX zO{A(dkJMM+?pGrJ0E{}Z8{srBL?Wxorm^!w@%;8mh}^hQ*!LZ7hA+Brbx#7gy{bL| zZS(qD?3o6e;Sz~*Ra*Z53xU};k6JA(p1}C|>}_qK;^J@Z0G4#C$QBiBxBC8HtXpFw z#-%NKUk(Ks(W|pUFDwI&6`zTyJ1Zgo0GSaK(;U6&yj}nrV;~tT4eGb9OWB314sU)( zGh_KET~P5a{b=`XeibPmeDv!T^|WE9vm^T}{lK3`y)1LH{tkaZpYu8C%Z8GJ`~Crx z2H@|e=sPG$g!Hm5z{4h=lUSVo+5!MoYS zc*{RF&kNY2mT2Ot^Xb(aZ<@nV=K6nHD8)CJYDArUNjnn6rsy#OM~Obc(AHxFmf}w9 zL!A%SN?WfY#=8^Pk1^-@{sBmJv-#6^;0E)ukfFlt(LIF>PC@+q$>S;#p9D^;#{7ab zk$f%p4{7V)tjmy`nR6Wu${ega&TuBoK5lHH0)49E_n= zz-UKXc>*PLvVNw0)ljcYoQ%!&W|J{X{kTKkE>1eLARnE7)9ZV}>KXS%f4{GRQj}^6 zh{migofH0m8W`xBQfrKv|Db17VoOuX_}BI1*4^2@aF_RDPkuVq7e*<-FjxKBFbzf! z-yYW$T~*=i24T)Zq)g3zP9;Mmn7wHB5x6SaI(2YILm%rPNGTU9p|vKRG(aURQz1s|il2 zb3a`2W^$vkj6sXCF%#n>SSbl4z|GHd{o-in8P{|7h=h;kiP+Z}pVKq*bs zCE@o7$WKadm7Rz(FE@*1;&aE9dfT-;@y3|P`vKPuw$#k$tmi2a+U`FvFU~n?Bd->m zupmr}s+Ch;vC4W5wD^d`QdH-dRG`6jW_*!9j0ZmCx!c5nER5Uv#F^E5be{QxeNFxuU z5*)cb*L@#f!mR5_R-{Ui@QlSVvOGQDUv_Ck5$ z`R%=11LToHv58vO^IcpE+NEysp={*C_?opED+>QA}QQ@G^($w0arqPcz z_m$gLd<7B|Y(sMV1Y?omdOe!I>uo6h;7hVfIN9UHDm6DjlNg-2L9V1%V8|~w#xCga zir4rD5_Fxn$RK1SBD!F;&D*J2K4EL4b_Rsa)1Bzywut{6NW8t^cDYp|cli@XM*6Zj zy(TtqN0yiIYpvn{%1f|?M5|fTk-DcvZ%Ny^$Y(gB__6HqqdCl!NMl&9Yie`{?!bGj zOt-a4yys691|Uqu?P6^qD5!$*9N^1aCyun&>w1viT$?vqr&6cDTXPbA2b@2g#`XO< z!be=oUNmYWJy@K|e~m=W?@#LWCNL0}KgF95m1+8>>KFZWdMRt8rXKTrx}^BCAbLTV z{y)GFXZAOR1q^v#tO&K>N!@Or6FZl_1ie1dVRow~C$`?@%ul^JCclq#Ja3V_3r4WA zMlRC#u5wf3hQCX|Kw=^8TlMt2g~BeGR`w(9!s8U*QSpHaqk4FA!Zr`YEv47)sJ<4S zIM^%p_=Xl-`Q$E%_P*7xz)KjNOLlY?RQ}uNa?6J4V2^hSAAtA7<@Rxwy^eoOc4>SW ztx{|(qkK=$h%bM9hU?$dSB%KiR=R*O^DP!Fsv!<8gA}M1 zMD^uu(1o0%u?CizYZ?Yi1@55U%n?&%i=q!xe)Q}LPsm14L`Rci3cpyd6K-h5i0oU` z+xpClfdFVI=RnbcWr6(sQCchKEklZm-qC|`AeB;m@v+x+AV+UP>4L#I2Sa=_XUrn2-qsU+GTK3>hAA8a;;cObtp z-3eIYkvN8h9F2u7gN5!RVT6#(ctPiza~-hDj~R^>)qJOSM13cMx%aJ7wGNBzadjo& zktQKu@P%Y{vj01Q>%Lvnr*1(rtHH@Foz{=3FWxmXIt>X;@R;|b^vce%UYYf?=2g5^ zuimTGtfAF#bVvyBa-VoTY@w)TPyh<@Xr!V_PU*OvXl*Bnqe$6lV0P(f)jkw8ZMFOO zKU7~amlmjEY&vU=B_>iu94xFL{4H_Q2r=8rANj;0zw(%KtY&Us;_>c+kNWl>`|pu! z=K_fSmWi{zy_W|j{k`2L57^*ZWJNezx=}5T5lBDpl2i|mU0-wGVJYJC>sD~yB5L7( z-ua?$4fO`wOaM~@QUq3Jg76S~AXJ`rtNNi}lqui8dzmV07~#IlZ=cEiq%bC@*reK2|Av3+3%t+&o%s2-NP zKazkwp&}x?F|uabM&Z)|Ea{hpB=2FZj}dUZHieNn*-n=W>HxXOOv=i;`dNNG><;<~ zVAOR!Uo(K7j=A1V3r{$YhD>~JcRID7RoL9mWYQN80KLRUc_5BVi^E><9ysrVwnxTT z@AYbteA5aBLpl4*oAvrI5gbklch65LaL&Qpwg&yXyDu%Tr%DWxdHk;haK$1^{KlPM zKReG{Q3Z)VU@I?DsoT_7@In_!U?@pt%$HqkJ{=jItg`h9}f#kiZoZYzr59E?$D&BAy!J+D%*h zdJva;m6xquKLz+0WeNHx8&_kc5Gy5UUc*kHM`UdT!L}+_aC6}=X4(*&xX)G9tx|-O zx8$EDvNFg{Z|Xt6>P6Dx`|J`fe%+E-Nbx6J*|54shfLwQd}>U2b$+K)$LbQk}nkE5z(jPS<}F1f+iJx*lq z**{`%9*8LfD+Au~`IquoiAhueA69#OP_5cu8atc_L-gF&HAUv zdZ*|11mdY!vTeRtj&^PpLKgoZD4eV5hiYA-Bql3MZELG8MxB)%Pps6g8-n{0v@(dw zp7rj4oO;G93R|q9tt|>VPW;k7OL*7I;kN11970Jv{sZ(q?iEx9y*>uq5z<#H3OEpN zg)*2cDkX0r76~V~_Ub*B7cpO zFdzKPHZun2RAga#pCkT>xuR7Mxw~g6tN)40hOpK1`c(T@?8cggkUsVz@-gmq?%6E0 znnD;CbI{P6^A1+~MY@VJ(qhDcLgFG14wWmM+WSPFrUIarI~ZhK6zRur}nNeDuhC2#CLMymUkjfG>Yc zoz_qHh<&*t_uL^zI6UZ2RM!Z8rBP@{Z;KMmLPB)h+*87z38JLle356>;9f+|e&6fE zo#XsSyxYfU+fo4>^x(fXT0OmO(SJFQ5y|U?(1)#r=wR3r^qP{O@(|sqk9XeZ{89Yb zyWnc+S?&B$=pO(m-dlbmKDR?v=opmt%>T<`W+4xDT<>)q$5GBICuC<)Yp)3fx#HkO zq4RT8NQ!-Vl`g^NHRkMK^;Kz+EvVk01|CKi;bf7#Z?+wW-fccd69>=mTR-!you&EJ zyW_fUgb`jGVoIGAR(*uJ@;#=LoKXcKOGU_wxLc@S=IXI#7)9e2B>Wup67d8-Y^YXZR2B_sK|9 zfiyQ#Ze(_Jj%LQa!MDlj&b4?yA9QxPb&ViZnq~I~UsuA8wV?Cmiw?c?a5rf-sX+@UJ18{Io&7Bl zzbUhF=6fbz6z?{C)Dmrm{=RFC=88)-d-mjwG$Isw2AU6hBx|g7ojoap?E!Zv4bLjUY zk-a_}MI)zcxn4aPNkTceGIN7E`r~A&m3k_l2lYILN^K&KmRbJ))%_{(sS8r`)D!DM zChCOMm>+I(iQ5yl!TD?VF+NDXM&fWxFHpHS6QO)yL6~A zfY9naoT^I_;pZStGLL;!f?!Qzuv-osp;>c$A=;|I>{vwha&Hjk}4c;+uZDs zFGNwh6|nY%&5j%8*5R$!|8bT^Gn9Y1vDz}Ycv#BwcjvR z@XRi7-D*ZpDVv!d-Kz7n&6LXfJpySDvSdyEn9BBTOQ+9Nr_W$Yi97gi?Zwple-wY; zRItQjyK4Oqe9FC$?l;x{$cgF6Kfq?m%p!sy17QgTQ@wdcwz#f8zPKsXMJ*xGsP+p2c_lDVWY%qI429nE3a`4s0wR;AL*&UMQ7)yRX_Y-4q7VBq_I_nmW4tvB zGmQY>Q4i8A&68~|n#D#%!j7q<gOsgWP*RzO_wrQjWfvkTybrB>cJd0xn);kw^YNb{Wm65)1|gX(DV$j@j)jjms|YFJT6 z>)Da+gP8jT*|>{=NvXRUDU)-{g(M#L>?*X}60r6hX9a*re44dz6P+;X>O zz|V(j8z2_85%{sD~ouEjg>;9}2xf8p;gScoQH&2tF!yT1GD_78Bk z^&)@}t&N12FjH8N82vv${gvsg*ptuau*&>6))iBF@x%Xe3cOxgyec0OuD?bekx`Wf zt3^O=_@zt=sYVLH{zjgei7(HWan9jUhfQxw_nQ<=to*-apbLLd&K+treAagv6dy3A@V0?+|ch_mx z!+jl|tMIOnTgywSFrH5i47tY4ft%8!v0e7CXSKNh_PNCW<8w72qTe>p!F_C-R(|wF zVeDmlkB#CB*1{Q&?WRXt!JN-{=hDw=eJ)Sp-dbY#@`npZKaT3=fpb3c_uB;g7yVy;)+3AjmX`3 zH=+vfmqK8df{IW=Am|t#{)abneZ05^Gm=YFcWY<~YWf?``%QyxP9Y0TUG3;#IS>Dg z#kh%=|CZ9w+K~LP9a{5&Var>w6A0w1|@vSwwvUI|LJ0%WcyG~)|^_a2TuhpT{ z$%(K8=pfkqLF9>cMo;+5CK3Os4clmswU`<%qvezzL40G5gQy=9Z$oEc)!ihA-9iri z*ewJ+F>-4w7Sr2(vB_@&C)8IoJOg+=5UtldX?qi&{W?+TGM&O|NZK|~LbglwP~I}~9fv~AqOr_?kK0$1A3_39_iSNkTPk3^}D zWPVzlCv@8EkOfGe1U&^`i{oxRnEK!&jQsohBdSoL?bU|W#eabO`jcRvm92-qNVV&& z!J%M@7|N5nOl(Fu@kR5M7J44^?x9OLo~6{z9hHR- zRN+j~F3gJIf#|81^(mp+X9XFnJ0#jB1eg5_cnLHu*ZO$Lq0_Ml|^`i#$Wo4IXp)&U*vnMCsZHm7*guVXJ+l?WqxJ{ba5GW;^;XM z{_$n+riH-xEOO2CZ<%||kh`eu5wuZh(JQHD+DY7=&+oZn4t7&4U%whh*@yTkPCv`k zhv^T*taCc!iF^pQoM&mJY;!=>O%Y>m?V@4#L+9>Q?H}&`2cTv1-c)dIwW<{Gy#}gH zm1RxeI*DriITXu%lFYS54*$5cQH0rOf2w-ZQg|EAdYZ6|`Q$I48Z6EwA=`Mov0fWFXRsQbhw z{0jnHmM|jpx{thdYv{-O366>*QAtVk^h5>gD|UBB-ICEAURb276>EYi@c5qe0dKSQ zw@n+Z?lYN(NDuY+l~h)TyZyQlzR?sXRXmC@K7P_McX8uD;ansnOJK19FI9sEVA5a&8`c z1bzQ~BN5dS-@2LOX3Hnin*D=-qwy8FHeLV4;|*EA*=KM+_Fb$4knul(W)bK2!m9vF z3~Vmwh^O}m&<0;rF5R((!fTZoyM5-EQy>tJc8TCgfo8D8BCfN&$wRFW{&70P?rVPd zp&@F^tAYF(lgd}5E;K^ZRbmXpD!(4bG#6Lpp0RW=p?Jz1cIVn|Ev;$?`w|vM;vC62PiP!&vmQJ6=klsRWoN-PPLc)?se)HcfN$qSXCv;f>|> z%cS=c{t`c(K-0>5>Tn&}I-_nqJvRmE^e6L6wc!o_oGJh+_T0+9ucswXGp$aQN%0-veoEcGCtQlE^VZpz z(E-YWv|e4iR@`0R2L|!~9hJ_}bisOIx`*xfAx!WLCJ53)lPcE)|Ho+@9!PeZ`)`!Z z(#Be)&C{s8%>}M@+9{ryXxZVjFbk#V`C3t}S|=xrxMcofu;ca-k;q6_O@!aJ03y9JZ|7N)nT5)i99 zW-{q{JvN6lwf0H%hCNoHXR*pU8^eD$zGQBXIpoTwR08U;Gb#D!{fb2+k?Dukon9y0 zg6HA!nREMOusq8LPpiVGLe6cTB^MJLg$KR9tuL!35c1Jx+s()fPi=v86D?$Mn!9-6 zyLJFr#9<`wU}4ZC3ucp=UQeFv70PLVJG0}sY!jYplIY4Id8dS~FWb4|dm+$ckCvn3 zU#Ex$sTJd}w1>4y`X1M`B3t5m+g{}BE>fNISvq;tgx$ohrxrViI!_oUSdX46pz#he z`_g!1R!F4b8j|savV1~8;FGn=LekYoG7$EKE8bQ6Qav-#7!=$&i$K`#bQ;C{Y} z+oWx=&!0)&=JhF;;!7_qMiG9U0NuPv2O0k<`m?lnKCFVSIO(|SFR^PB z{P_o%e=Y!~G9)2;H^(WKj;mRiCICK&Gxgwd(E68KhkkNu+}e)4jNSq^jwm+gwXf%<46JBu*Ge)s24lyJ=ZndNj&`%bypMeI#!CQ^o$s)9Et1M6#Oqi^C%X}>(r zb?P6ZFiKGG-kzoXwkY9;zd~2Y{Jz3_FwBM=N1Sz&KgT^PfN_n;J3H>3IVNJh_=U`v z$Vyv)^(M;!Bf=QSrd>416%sTPGb3l>0v)M}DyYUjUl_~Ci;{{eD>^OEl(1;T?-dNH zX-xb*(JkDKTIn5&T_y?9`^MfsAK&BlAZg`%#=^BO#`$^ld_|WRSWKE*K>bDV?y{`m z#Um!g!im~eF9v`-YN26f=Prr;)qssrM9&{r)67qA5eWlQMC}emlUTA5W|5AkTroMp zd@@?h9*8mNJfzEIm4)`#qW$5b$_T{72J9*8L5>F#2DIQvML#cwpw&Gt{Np!J z%mBnyG9(Q!cmXD81b%6RZ2dXspdot*LWFy`NJ3 zr${WjuwTVW4D69AFGfR>zO35SaWyHcQqz(+)NHlWD*(kDGQ5%$$Hdg^=)ru|d z^21f0`?e|nThIjAQ-Jtyje4ULZgW8~>P_!-)%a@1)O)Zqq$*w(OFQAxiFI`mw);0g z&A1ur2)_^wP&1|Qo-w5e@LCh~o-s~`f&5PMdxg#l;(LYeV7qlIFi`N%3+RZ(J5g`L zy0!mYwf5g1GygTh`xf|$a~C_*2@$@~x(}jUL3FA|^gZn}PCsGgU4;1E1LMM$4;`@I_-|@S9d=a{P?6RqkMl>R(^X^tg`9EXY{O50+%bF3>(=aIL zJH7bO>+(*T5uXpoiP|8x^y7ty@DGZbO7(ODrPY{fdmV8j8rHWC*Tf?GbtQSyj97^q zd86iq2%la4#{oNeE!o`r(?{FV_%AS?B{~U4Gki;Q3U_0 z^okiGrm%_Eb$2(%#-0b3Giju{o~kYMcdkun`>LWskk2qM(N)m&Q@}L9=c96pv2qMp zQ*Dr*)1qhokRo_->VOn^-cec##6JQvk8*6E!BTHwN(HyrFZVFOo#PY~;b`?<(C0IO z*T`<2L&pV2FKT+CU6&ZyN{B3!JPaxeq3FXkAfTEK_Th_mknGI00Ud&JLR!?$15J?+c0h^0x%#r|gRAmC&N&2p z7{PUo8wGl2S$`c`>)NDst&>+DoRLzTU;|rP^`HiN1(BJ|kh08Y37%-Q270Ib>ZLo6 zl6?6|A$I0ydEXKjF5ZTYXu9OB`;2hT(Q)ElZ+-(dwwA-}i&+V)tg5(Zsc4X)SVZ*Z z+K3gt3ZoLBFPmS_qbDnhsRJBA-m_ zk2GbS_>P(65Dop8RMKz3UxVFX^~a2cl0T0YK6Jah$Jvmb!j@O6+aoGmWzm)XL@))c zrrWdb7*s#a<|+$F=t~Hsc~T7qJNnG?dEf1>fk95eMOV**_7M&CYfbVWNcsN&G-a5$ z!R`K4XtB`#8O+$bwF=I#&~*J+J1$aT!VtrAB=pM8P@+601v+XSafPehI>TSi*zM{| zN$C249uOHocB7$UmHF>NEK3}t6|1S-OP9&eELwDGm!=2;sG0Tvyffc-8?}1IJjRC3 zv6(L(J-i%-d$u~xrS8Xo_N~`9Oc_ArSz(f2PK#M@CG~j%b1pE-aS+v}asW0XzdTko zMFVBXm;)A`i*EKeqVZfMioQf&^07KtS2AY^77c^n3=_x&eb!tlS1$6{IT{J`T)JkX z(EkAD7Bh%V2@x4JMDI$eH()G9ExS*vvcgsox#S%@e!k6371`yKgJk9s_21T=R!u8t zoa$1EXJqkqS)a4~W;0E*NR3L{y3iRQY`cQ%jL_7h0b~hO#1~m%f#x-ciH3|>EuLi- zE=LR0@!wE-2Iz?E3GP_bF~^f1!QR7hs5fXhUX|%K;jen#X(u{bVYH2F{7Ex#e0hGo zY_4LXQcgpeyEMm>#Yg!5609nj6`O!-7P=N!M2UUWkDFae*Qt-s2MVvIJ|+&_E-b8S zGsjb0DrVxJtv))P#aw0POK@N;U02Uz@&?v-=xH>dFfrJf;|nP}Q25D@l{&Xoi1(ag zGT!AM%`h{Pv5;Z0qqD^DQtS3_j8d0VF(6KT$xW{hxID_tfiSIHUL>D|9IVm}?l7w% z*TuokoW3uEq&*q=eI3mP4Yfk~-TIY-OpOO6kOc#?SY-9hUV6;qx!%5a8_JSuMO0Eb zb7CT{{Zbw!Z3-M6;vD&Ggi^u6M%Sh)>)TV;ppx=0dbGwo%InB!nL)9zdZKNMkd1@o zyX0D^&tTGY8SkY!f-Ow?ti00gCt*nu>+vO=Ql*g3>C$$%b%iBZ_u_4ao5Uy3^L%^|48@Y0YJeM@DIrR0lBRPxsZCQt_I{f=yAed+Oey5*K2ICyFx* z$%U9}kA70czhsJX4AVFF?ub88>KOD`7^r`GC zb*Vj`$?;MZ=}>%VZ0Cqn+H@$Fh`hR_-5hU$t{|}b!lp_(T1+JZAJWSph_KDK*z|a& z7djLeNS&8=jx57E`2PS-uNXXeIS$v;jghzRn=6o5=1~+FbCU8yW$Av~Io5u(?eZo# z_z6LAJf#}id93D3xcxfPe)Uv^V z89bZN`Iy2AIzvR6Si#v{<(|TD%LUCX z>Epm8*JGLN%F+!BK}H{IAV%v@esp;VCWCH;tSP%@dVcg6_H?SwV~nt2QRW9+fR6h7 z!GYgFGeiaIc-Z4p^T__ymS$~+G1$99X)#k+kw(RPFugm1%`0sFGgzJv#hJ*&(K^i5 zU{TDQbd2U`u$f&~#2#bYd#6%`vt`;)J#;hPOjwIStZHlA?7Rg&Y%XGdgGkuyE8M)$(Jn{sHr{tkrPmvrn z)zylpgc~`r$R9nm@|2Rt+2ZsLFWq1H2j&>PY;xeR1o7L{G3SoERrI!WhTFcye3<$* z9~H?>H}A~UtuPmt$Oco=V&=d}+^pP~M0}wL`Dm6LcnNal#b-~HB{0NTl&XM&(>)po zXPD=UbxtLZkBVQkLWPb^EEFnf6ENAC=5DrJ*-V2z-L$LkcLJlIA8>T0-;?AIHw zZyF%fI4GqN}fTrC#k4>seg;Q<~%ieIm8G1*9XCCSZ$pMOAQvwsZZZN#8hr8;eZRjj^WH0KS0Q++6dK z>NWG|w>@Sl7tG6>r_t|5!X?C_=j2b3wY=45Ic%!NOM!HrGEY`o;ej+V02dmTv~$j&>gCre@;1M^qsj@VEu1eAFoBb0cAYro2J=l?xtD zVV2RAS0hk{-y11n5mrwEUhA6y%aSz`_c;ePO_v|~F1J}ZI)l;*h3rfN29G29-Sykd z(X8NvnV>tF=T?(2z-O~HSjE|q#G`-vadnD;e2^AvuNK=W59!yscn{P8l8W46wOSq)&hW5(vYzVLNib} z!wpGWTts2j>WMqUZ{MviA?PpwlgZjc|EtNlpqu2i-cFj<6k8C7NF)P6UHsl{CTSU8FqWz)Bm zPozaBjb5{zbZhEo0C!rXGNoywuv}cbFbiZSQ6{xh(_ntdS>vE^OXQqm<0uMM8j+aO zjX{E_CW@7RmN;oW9z&;@o{r52F!vkbuN>~{M{~PfqIaaFW=aYQwPPCQ zWO>D{2(blc@uB!<05leM0`Wp;CEbpsEvI@Aon60+9cga)YgJLfjRPZ1g}-=u8Gxi( zYQ3!>?y5Xli&k|$JvI;X*U!brvs8G}k5!R#RBj9y@<&r*$5exhr0-)^v^8bXo@}zV zCF$8w_($wk!X3Qe@LsBSJi!1uJ!r1KzI_=Uq}#=yEB9I}6t$_JND^+ePb#zJU+8*E z*7amfqsLikqHyf0Ta)F7ja6PlpfaCr+lh|A*I{8)tSm6~imhE}%l$pwkz*auZ&}#L+rIT#je`y7#J zZK{y9oZ3X8b!KSXTBEhh(z+v%vnE3$v$-?`|cADe1iB(!o+! zv31U|v$$IFMfVOenoaT4uFb{;4(6ij4H0U-KIhszTRodn5n%TovHf>=?#)-iYgW&U z$N0Ekn}#bMO5R08EJ!5$^~cTb@w{Xu_Vm-XdwML?Dsf$%;Z1X+n%ci&o#4 zbalvBkg-M$HLEcThX3tVMZq(DMZbyV3{yOPR|mlpqyhtGU~7O1%8B8>5zfkV{bp) zQwI(P!auq#UOF1+Y2L@g&{~VOSgdt)#hrHr# z28cH1>{8t95UOPN|vZtxo{-9&p8o>qa zgqdVmnco&qG+kd6)mN2>N{c~J5fE=Ceebr58oFPbr2 zIbkhPDQ5h1g;ni(S3um1cK6h52+nS^zg9KBde)oUVdfRkuA6giILVpXfzT9%elI=o zE@ln8`q7IfWYZ*lwOtyD7M{L%$*JQ7Nj;qblSf0w8K`WCUYL1{zns=qTBEuNUbasl zq|c(D94n$$8ZMj=8ZR5H&LSr>4a0Kil5fKFu7#q)a^xXkwuT76Xx@}AI~?)Wv8MJX z=P5-@)%fmr&+Oi1qm(Fux>?Gp;UZFAoX8LhO!Xt70;{B<>3cGkb1VM<#;y$rxOpa^ zOs3}XDiS{DT0Vdht7v?ZsrcmutytsK(2YRfA7dSf9;Tl5Qbz|S51AXIph9IO^Kh;j zFotoOBCEmN)p1=m8#9I*0|i-lw;E6Qn?EOD9Uqh)waL~q_S4Za*$X+a=TtpAH#aGp z*54t}&taXig-~*seAm!LJxUWRbw;ycrR}EF)%MtIKmh84rlD^JYMM{oprU=v1P8)( zTGZNqm_0|ft`sXw)D2EyfRSOW#S~<0X#myMEtz9g@Mt1>2c$ppCe^`3e0;P=asA5w z0J4Uw2^W!aO>rD0E$fm-&Ypd3S15IZCSViRGnI^;{Q`YQkvnD*>c8@8)AX|wKBLGb z*tO|!N!A^P?Wyr^A7>tu+yrb?XI7ufaEt!{;yHk7M01P^rk5biDE40)n+yl}wnrB z2CP?Wljo~lOR_>x>J@)2LM1IX&69a0$58fxjD_-Ssfot!m?_W|`RcMiVQ|)>-8kBX z-ZySK4Xzb}F_z!*+d^HX)EL=II`wjg__bOWT$0_}Ab~~rog(_P=f6u3)V;3|dlEiM zC}M4xttfO9t(Gn7XtdX;X5biKUA49GjIFZiQzA=v$(Z#}OnQSln3Z_P(^t2ozl9@( ztb^InHkBoF@e>%T?rk+zDM=?nRwhyv1=7~FVUGFha(;6%buCi0#vfV=*or?A5TUBb3N1^X32;oQVZ{K$a?d6^HRz7o z-Dbg#X;Gz6RNVxFUsT4l zj`|-@n1*t-lR)_^uMmhqQp<>nR%p#M;=!PYp=tKg>RHa~ zj)v|U=wI0|9x50>FmWw{OUi$tzITL0i)M5Vs=7tQi+Mh)| z*kub=M}YZnY)w3w(i3`-7#g(b!O_orjH%kLYm8@?7fhC2C)aClU)otC@VxUlagJ;eIRRn1UAc&4gDWcygR7KEm^Id#j& zLJMDv6XP&WWSJeENjAF~NmQJh9hy6}pGHldeJU!uaHExaixWuJ)8zWfm??41d87H; zzM#xrKVRC6GEt4>S`X9oGX*ka?B0fB{Up-M5E^T?lnDKw){pO6&BSWW3xwa(vD_h= zzD|S5l+*HFxGNV1Z`J&FhlXKBZ8ZX(c(Z}ZL{Dbmn5LklX<`jy5@}x|v@L_UX|vA) zMy`8=wuF+@CRpr5mSwn&jb=!mT+f-E7As_&G4@sQx39<4A?ct{i>9HK>!Ly5ssr=l zqweF^(j7pz@$P2+BT4A9MDpAjai{}1L%KEbeW2wKi|M8Xs@{Q>?P|heOHbUeC#7?s zB29`?+v<*V%|ldrH}m9yGEjbx~M&98NI4iy(x^}1*tNz(cjHow|G z{>Ro_+8q*Qn-x11G)Tc7JhVEjvll(+jYe{z7CWS~6=r;RdIC^Jh^=$C@b4Dfg?{KTejo7jcBCU{Pzi)Z&@>`K;4En7| zkWlx%xv%IScN{0r9@1`A(|Jk{FI$hXRCWO%vmk6a`#$5y^Xhgg`pYV&c5;_%kr%Tq z_Dog4z3(xQ^aWtw-Q&8-pr7@ltR!h%;`rBZL&nB=lV}}r--oa?cC z{@Y%t`ROR6tMywqyi4j|H#Y20RdDvYOW{4|9wWClT?!=RYkZK`xpjkMuNlIje>qG+ zf4f1c!3LKzWUG4@?+}K?bTQSamJ}_}FKR(NzEun5sCy|p6=q9YGudEBv$I#J8IV>e z8U`sIuUo$tKxQ=yOS_;@UaLKMR?Xa<_Z{!#&w8*gn|(zIAE!Zqg%?Xki_$Cf^;_^B z%#48LmaVk9#QjiVt)bWDkWl5XP}RJ8)}K=m$|m;y%Ni8O4~-%B(wuCbbD5YQRKI;J zx>J26y@qiEVFzLaAtxuURx5WS3!*@4eWHKtJ#fKL{EtHwt0~Ab^u2ORf_jbLsCH_z ztPGX6h_uPh!=<#aP-X1wi;gMA^WHtd&*MtX zdfKgHR>BRZF2(jNlfzFFJ8iq`u2o&T0|P^v&7Jx@vM95=eAcd zvCW>FjzJPLzf@PUK6RJ57|UFu(>PHz&3qAH9jonb9~ax3ev6p zN(Zez!1I4i(v3LP`$fHJRVuZ@qD#qkFXroMYA>Lac*PYJaMB>YtI50rKjBbgkbzP4 zWrtH?(=*4jotb&nrAFu9U+$?wOvuwtx)s!1*}-ndGCIyjPA`lr$Z|Y&5W=*Wc0m@( z7KYn1)1S>$kRf#C$y)o8NOKmSPAm>DD*V_LyuC!HxwNkWV=;=Sb!&Dg$yS~~kp^cl zNEI|&)b>+l<5ul6Ua^8kD*8qxZWFFSC8-PbF${R1H~5cr5h)I+EaXz2b^8|ZCbtPIOoEiqZMMxL|GYYwq%SmLFCy#_k6L(Qr1 zN+05cM*YTIfeeo8NUE7ltV-qr&zIz~`RH1nZA3E`KSM)4E~the`+}gJawj|7Yp-#x z-VlOse^~DAs;P5X(H7Yi7GVhi9ndTU}$0&7(e;WnYr(eJxSY zXl2`=4lCCr*7W+ttXXQU6_Ye^f$dX-OYN~8ZjsWn=l%+)usq2%C!u$TtPJ$o#?ntP z;q?TT6m7TgP0bj#8q2$o>X_m;G_tJX&_9rrHM&gTy(Wl&+-24d#o47gA=&M3Jp~zF z(N-{o5-Q7|6X8l>-707HoX}HE@Cl|1xYM06XR!Xg2C$CQC zT+g?MS@42 zOE+J2iv{Hmax~o>9P!=$!R^&JSZ1Vkhcvc+b4Fo+K7S<+YcP>SK?tHckyoaXy}IKTI>5e%saEwa*HvObakcyesI9jsz^`TvX%?AD60vP-H!Rr29fmA^ z>pk%w9~KmvYCy3TgCtrB*FwQw%}01{ES?wt2uCr+RWdnKA@Pu<04q~rP_po9R}s+oN&(Lpuy$aI`zqO59D z4hp>H=$NiIjRvm6!ZcInwEf?sX9qm^tKClxpw$(jlLL>hdvINh@~VB(+Mj0_VFanU zQNZ&wh8_b_@)cLmb;q#~U&uT=33BTta3N!`k+Y7pDXE@zT}6qyT!n*p@fINjOW6u* zd*178jMH+@hJ_V60O(~Kw3O;lQ7ux%*HtD7v%Uk-km!Pf8A5jN@G;$(@Ooh#+XBHU z15w>jz?#)oMaZ?6U>XVL`7N#>=g_m94v6(7Way#e@bRIN7Vn=EQ|ih)p&2j97VXqx z8h7uGqklRF`&gAh#kYZ|CvX?AEs}_8&H*pc{d!Ha?E1&!jLSoV-lJ5p9@%dSu;+zqG30RlpH;$>g>*4_3+S z)1#YSdLhDF9!WJzEw`pJwHJeBH)yVD#>{x|udy4)$2$C;Lp@?0mdgcieHKcjwsp(PAQ&CoPP5hOb3he9n zUcy@;<>*wjo^dl}<#E5-J~<=P)5<;o^aofgoxtP2&7q-8m9#fngMTjtKG zdat>cBYn&>+2B(~jOS?Iq*c@ktaA)gbUt&BO0M#|;Fi$_o}6d0Ake@|@bO=drVVkx zIV1`={{ZxORlqg?(qt&FnR1&zs#i-m9uYD%^KY;jmEIKSOC!8w7||Q;+;4Hc5D~s{ zGOEg|%Rvx_Rv*zBkm8Ousy?wqP+V|EE(xQSnYzngz|eb&ir&SDjYLulJZzs~dyeE` zW{l;nYPd*a4hGjlxf9Xk%y&Cv0W=MzH)CGpp_CEv7HY{SX9QgiI6+vXq?(GBBA!(e zNnw*t4&jh(wmipz7M3S*Zy%h}l=1j~B!w1eaZ=rn7{VXj==$Fd#l9DHlAd$yG0}>e zEf095S*vp6BfO5)Cq@JtLdiIdIMZMQe3M91pt}%VC0VNnY{FclZCcV92U_-mCPvK;osim3!|b-7j;)B~>w)0jnOwmd2a+X=ce8X;eLj;sFBBk%=F+uY zlKylKiR%}zU^g=o*Cm@mJI}{z3N)>Lop#Tnn97JW!Q~pAUbZ8W+w@W z^RnlMrLZYEEu(8JGWJo+xPoX&KWw;ln$ zrRj!7>S7YDTI^gDpFW3h=8+<6TqL-7x_!xPLkj5J+V&|d%c-j$#8QlVj3nJ$5d5uM zvU+UM7Plr`EOGaO+q|T$pLr(13GQVT?G0&?mxYA&X~m#j3pt-m0<|TK(^)mE73jaG z*!}oc{{19V)l7WlWST0`ZQRNTRQxv@b^n_2^d(=VXAP@9#2G3(huqQ@&@gp=_K+m}zFVu3s#+ zSt{-@K9zbaMUUqQ%{DE570zkAnpQ7zKi)Vw?G!J_An+d>UX1seg*f+Z7t?~CCn_S( zonF{*22I-t=dts}>+z&WBKss2mn5*uDUS7_?kVI%t@={Fveh?aE_j-($mTQcTL5> zRoCn!AWv5~o>N(AwMSm-M&Fa^*(s0URaByTz8HedZS9%6+LxIXScw})m50d@`emSd zjjfyL+3eqAtD-t}l#1m(qN5`lh}1S{ zOVE8}f z3%-oWRa)?nfJM1Uyn0S3Dsf-r=q}Z*Z7J>1@4rDQW^*19MR8ayQdcKlam4sQRn$gq z9g)iM?6L6-vC%>Q0EMFc)Z&AW+Ku03R+M^T&eoo4+LLTP*kr#Kp85*RIU}qdX*`H> zW1HA*Ar7n>$F2Gz{{Y0`0Tg8wtdVrd@{XV<#sX*G%Z*5!;P@x5M7-S?MWD4_%Py$t zVKnLqL+`3?nR4Q%ga^u~s6K$jtPFZv%W76@OD9K&K|@k8b+S4`i(BWX=e11j8`Qz% zmA0D<5stgM&|(N%`RP>E(Qda5giF?uKW5h#x<*i^R(CVz*N-c9u=>I8?DEsc6%B;j zYE-cLUB^*l6i9}auZ*oPu04EsK?V>#uXi66~s=>$US%Th)hzMFbB zS+H%E-E!7TMzNlCeFn}}z_He9i`U@VPssF?U8?oCxhGhFY;}pa3z@NDm3;*mb6J$L z5XqR?-BUWrr=n}p#;K&#F{tdBv$w!gx6d)pReZ}>w0Xj&sv9fZt|zHxSGKXKZD;(5 z@XF=@T*3NiGn|jLLMHtRx0090H=VnbwtHA@jg8dm9UQw~-Z_{DLVUfT@pvEcQ>>p@ zET2~`eEYwa+B+P5lq{}k%6!{;;?Mp&C$w81jnSo*q+$hWwbrv;hiFT8^y<@b@)OaEkY+INNux(tbJ1#U-OD)_h0imRu`Q!? zDn5xt?k+G$g)=P3xeMPOEsoj(=e6lrt(6d$tN}k-8E&^#CCJ^Xm%q#TmJy$F_#Ei*bpN} zB;OI55N4k^MGLx3S(Y3>ocg(XkF@gI4mut?FBKfrz?|{g(W5vP#LjgIqx7P@@25F6 zdDKajz*H5B|HJ?_5dZ=L0RsaA0|NpE0|5a6009C30}%ugArm4o1rR|LGEpESVL(C# z6)-bUaZ-ViG$m92+5iXv0|5a)0rqJ_1#o40ce?In>9)cqB7!EEw)n)68_D*1>pjjh zDe17tTtyghEz>IL#iQmWyBh?}W~}vy#M4;L4Cfrd2ooN{SC(u}dbpN-#u%7l7HUOl za2j#{0CMN&n3y&K-eTRf;#*;N-eqj5Z*Z1s)b?x1813XIr12e4n+nLzPGctpAIhX{+$+r^C zFiL}b&-?aVn1VOv8z0Q9f9EWyZ44!2Z$C3lXL!#hTVNCxYRCToA=-W-xZ*w?`+P@C z97@uy-Ow#&Y)!nG=+z^P5j}7()!}#(J=lO@4CZcZ;7fH6SOsC!HI;KRW)vPwr0L1; z6IZa?ioV(D?x~&PU#Oty_JtXaP>v=9)@evx;pT`QpLvx1#LEOAFA!oX3}!t{JV)L0 zG4H3+U8;OmGYxwfgEPd*@hA(wE#hkF{o}ClDON+193PpIW~e%*R#(c8(g+@5oEZ36 z;QNQ!-7N!9y~Yg%N_R}a8f+#$RNzLiuO=p_vBo8LU7r(APg!z&%WK-$i;!ZH%mfux z)bSsN;#;UnuQ{4_tcEK(Ys(KXURrLRo#|s%+e=_iQ1sEFu&8Xs&cb4i#AbamW;Kli zv5Fk>A{P!Ad`o>s_Ztt1^eOW$1X;0VdzpBIu{^{LKEX3+k6|Y;Y1>Sr zXNja?ySU9jt+IY&a~jWq6|CEcZ?nOgN`9*ClUM1dPsFCJMtjU!+r-mrFVd1~<1)rB zOBfT3h*;-{mSD>|`|_S3>wk11PbO_JR&j@z1H=oM$XDHIMjg>?#hp1@n+YF@u}Q&> z0v|s6kS}ysR!-TB2+Q*-IUXj$&*plyR%qA{CM|}>3ehT6=4<^&>Z-22fNg_ZX&A&^ zPgv&>6f_D@<}S9q-^@3wa%V_bOPCe|mIY0G6y`jtG9EPSzH64%EPx>~|#(%Jzn#IabzYcCRwW9;d` z9?vuBFq`uU%u%rODo>F*O<~OGnnqx^FuoHLpD@|AqFI;XXmssCb%1+;F3q`gdQ6(&iUAkGan2>F)Y_dX3_1{Y@_8Y})yP z_n{6V7qEQ$MYY!`?7D9FG#}bL}e3B=D#uA7A9(0+xePm+&DOxU_FIM-~%kL zoK_;~>|=~e3!&-!%|44$UZB{=gYg0xq_t^sUOpM3FLbb`=q$y1Vu)8WW@9M=V2QXy zTAHuARbQMKS!H6Oo;jFrfjW6Si9&_)Ai9)oz)fboZs0&kYzg$XABc3$t>RbM4(QH0 zc=i?8d|e;$Fq8%&+d9Eb$hUTDH07Sfw1zIw5_}G&j>t``E|S*JZ`tJ%yJT z@eX$IaWvN1V`jtRUOMgB5{IetEOWACXPEuO?V4-RPjcp6v8?QuQ0zX~w^_uxzOm+3 z!FfLsEe>$8rs=hQJjbjC!$d?(;u{9s2(F+`xA7Sif-?qCaANK!nY8|mFs~%M)m_$b zPs9_V{{W9=0x;$|S=L_|ik2TM8Av?A5MiaFy_^^ocIGF`3F)>9?)}N?nvq)usyj1W zENI1#nA^S#v38yyQeZ|M(`aiq&Gff9oB7C{+1w&K2i!cyw(Xlw%;ysaGf${BE(`%| zI75lBf;oYEqgdu?HPwX`+9TgWS=->n0I?C(ND&zIWlp<%!BDv8Gg6^Aguv1?&bDfpU%wzk_9m=1i(l#`gF24%Nn;vqYr zIpqAp#-p?uP2F2|W^N^jSz>KTPit~wAUQI?0NoXj*$@i|6K6u*r{+|<^OG9Ahs>}H zK~j|_S87%P`IlI%`Ihb6IhB>K9K^kwiPb+bt;ZOP$d+duMBnrJfyDZ&2e8TQ5y?@s z#3yuZtMMA_1j;vL0vB+0>m;SlUF?jhX+CCX%}} z`^>+!%85@k=4(Zr0#;~V_H&cOf3(dSCObzmkFZ5Dueux&GOE~ynzxKiZaTf912kY8 zMq^{(Of_d~4%w_)*5(2fgkn|8iPIiPjH57?N67I2+;bn#F|_K(F;fCvQxdVLvZ02G zPaB|J&$VOeEl_yz62tkGvoZ_`uls~ilNyHdJWgdn(XuAHNV=Bgv5&;9Sr{SlK9_Fd zXCy#`%n*5)E+$;?X7b$llvbgGgEbw`nA{#Ays6qCW_NST|J{ulK|*V5+I#tgY?QNWhAM6X+YX|YccvQA9uGWeQG{-6-kQ>3b@2~o^! zW0|8m3oR>Gn1~T}iQsV+EDOBM*hJobVpz<%fL7%0^FE%&{{U=eC9JK)8oM)$&a3Ul zVbb*uLq|tW34qMwkrwy-#eK$6Uw?OrtOP$~G^&7FSk;~oMfK_|=u0|3s8H-jFwR>Mmv4m)LafsbM0GlIn>nqhCBh?-@k zrB$P0eoXp$(3;G23a9v)+6LIySUg928ncuBjTF@NlI?>!c;;EJ)L5MtJWi1O#MN4h zutq!l`HTtSV{fcviPd4r5OohYkE*e#G&kEcx=O1}wTuWiwgEEQiXkyIHoNdRfaY4f z;%K!8P6!;$N2nVr*j9C`>WcH-52f^!K0Lvvj+;!f`!R`f6eIb8VAPOsV_TVCrpk|D z#9RXrd#0e2SiQqJnomkyR!-^anmY6q@6KS?hB(Cd%m!C^tY%qrIRwMH+Z9ZDdB$aW zb#a`=7-A(j`IR7M7?%1=S5q^XR~*c78yTG}MN_wQ8&4BK==5q3yIYB^q*1AA)Z_@m zGfi3D{7cl>r9LHi(&)u0@y=pC;4(bQ`-T<`HE-qP=<79|tvUb<<^&{0)YzN>8rE&5 z3!8o=E7Nj8^A7xEz~Gz+`c2QgG)nEOARNu_p5cfbR^g1dQ)3*@jkmg()&s@P6Jms(t6HsdMQ zDp!q@8@ZJ!Q3bv+H7w4AL8c!wcH9U?V8Z!~Wom-WuT z$IPMKf+rh10I>M7Z0fMQiyej_QsSS$)~X7J0z%u91%GVF)uRer$>D|dXwz(YMU>eBA% zsMivfD}4BeCI$d=Eegk(Ps)kPF4?$dX?5C)7wyW;PM}?JvmRi5yhqiFeEiR-9yZH` zjaLyAtpa%QDXEKPd`oK!%FegMO9nHUoab-MPl)P6Zk5~J3(wk9oIt`jW`Hs6FxR^i zQBr?o=>eI?$G#;ybC!u<&K7kQjCK$>KcmdShtIan+C}I808-Q`rChhn9$``prrI`l z4q}yiKZ#bVs&cL3YOST4C?@h(6A-}--8S6InEJ)L3(_F|eY&JB(LF-Ti|k7CG}d91 z?K1sHyHp!)1hU#&QaVzG9cY&6Ydgz%6W8vx%}x`MJu;RKcX2DNcWunlgx`;eetOVb43Fw@zNg(o&7oABfBNGTx5vOspszMMggme$n3i3F>~Mq`uGEaA3jl2Lg5L zTJTRZPgz{OgA(ULyi7x#&?%Kv$8>6&5LCgr5uUD&7|TBHB`Z-K!KZ#%|;VzFPQko0E=R%B3WqX5r^>t@ijH>Tq)*KTCU8&{F#ls&HKLN3kF<3A~0J4 zE0HWattCbv&S4xz^ZGLsjACU+hycz6z)o5{m7HVrRpuP#aXQ;#wnRMOm<~>HH{*!t zGI`9U35|{BRs%vdgEvloCg%g0rrhI+U?F*f#L|wf4ZDrM{WBoSKIv%godAuIJjVNg zmUg^CGct$4g)j`P4xG|f3ByYRZ`!j%r)%x*IG0za)6=tdSj)7vf#z$ps@j~*6<||? zG~01KaDV*vB6@lZ_X%2HO(nWs;cV2kEd9Tkt!C+g@x&xD?J}(`mATy9Mj*s(urj&9 zgOkK?+2$TSsBi`hcEfYxBMU4dJ-}KOLpC*)qlPwkwqcFo*OU zW7#(R%2iCZfoQ~S4Qc-XuWQgHg-5n#Om`u8fEF@^P?<9BFwq%&A22H8lNnQ`yXFC( z;%w$9Q5g$(pH5(0E=(VGB|wHEDhF^3zeQ^{)t)`G`Z9k;$`Lk}zh` zIG7V?g^Bjh=*gSg^ZGOIy}vM=`{c}#><%GfW>2=o{7q)1zO2%?;(B$*o&*=MvjSZG zHV9_p4BnvN#57>?JcNRSSf7kq`qtUdQjYrg2JEyAamqoI*Pd|yJ)D|Okdt#s?pEG9= z*?!_N5gNnYD6jDjbGBQj+Bk!%!znB34>2ua4TQ{}GNF5n{(%OO`h85wHM>aQM$~R@ zVYO7|b%UNI&29!~(zflwC2SWmnoDwJqv_kLUnXib4{GAY!~pXKr{E70MXP72mlJVE z>7H{vLW@?C9C?kRMcT*X{#V!iCf#wZt88L=W`|p*Z{4$W3vZJ;keD-=mSe=r1Cu_Z z@j6^_2*>D0!2Jv6eNSe{YVFFv9vu9{IB0b}xFQRGwjmoa4dSveWg3dw!PW6K&!c5t zi{#5&V91pz#~cF<+iW`oTSrrK@ikh|)rI(%-A>-u&VS`~_S(4VE@qb9NU=kv1UsOWn#l_-c0(cVQ$_be0hVr#30R$=k=!wCiPQ-CAKL&u&^hH%y#Bv9|8bXIh{Up z1BMu!54LITs2|*mrlQp3&)Hi_EYu>sfltKLu<2}yoag>g+9~elZW);#$ScGf0qu$g zWvP_JZ94HYg~YdMTumCP=bXnYh;g^i>cGUMM$a=StL9s?0i|e#{{V=oO_>>rgC1hi zoj&s{sR5dOJuUs*%k=eh$yjupW+}kL)9tgc`G~+iG_yW6D#=sh5pkIpKTBS0GU7Q{s5|((_h5tA^4pNmyjiN z2291L1DRpL;sBGq2p81?kqCMBVlgBHdk&$!!(v$rN! zsZ4yu)Vhobm$c>($b54hT(%6dg1nfQy{oZAT2D`LJ|}44_X&b2@cXMrm$e9f>fWpA)z z8*?vg9wM|597OdC6Ud!rxrqV-;PNHwp3@gkwE2UNYBbM(+bq%BQstn@9L|UzX@MBc zH5R^bC92B#afpYg_*s6M$6N`fu_|02GW~jVb^+!$Vg^`4fzODbxeBqpH&Q+&HEli8 z(l<-lmys*gk!D+&XRFc%(hon-1t;Qk-|;?ux^MV}Nb*0g-?kj)Z^xJ|S-3AI8&fgW zoaJKF{H^kEKU$@M-I<{Id&*;(K5lWeTs#5^M!x#|C$O$PVcL}P!j?#IE z`E5=V%`oRK=kh9Z>%5iAwAY`V^;GY9li6t{G< zV1C(PukI0BF-~}vJ`-S-ZvN?2g1jk#=p2{|k3J&4nXhJPrqpe&!B2^5wl}~=;VgA# zExIj9!z|W+y9t5vyG$R@dTWN#rairNfz&gAGaq2{igjtX!FH zo21{j%zAdN;{Zau%Sr)&OsNo5GZngd+C0lFYZs?3(``|Y5t6OA&oOu7%+rdt_xYDu zrvx3c*IQeGh}D2Ups!PPd?Qb;)c*kI8H*gWOB%M^rxX0kSN{O|8E&fUe)iAx{QOE; zhnVDI9}}O<=Mm0d25w;3-Na4IcrxtP!FN#%B*RueLa|xbcH#{q zbhl=7usNDt5;A8p{WI2b4Aw2nUVbO}W3uNsgGhln<|=K6c;%&r@u=rZ#L-7S>yKek1N9Kk|ydF~{O#JWNGg!qyKF_p|p2(@|#FP>4r4iJg&i zvUClsCqRpW8D@nAS^Ww(90~MgMoEu-wTuxB$8L7bFR79;sWmsgrNXGheLk;n$;=Kw z+F{dL)98ruW~PSl46AtK;x(Dbl+syz#n&xsw-UqDa_S@GDFE_g1JxWtTmDkF68%=w zfF*Yj!#^{2U?OK}m9`^tb28>^Z1WM@7!s|_xq-?`?O|t8xtfdG!39M7)Ie@v)C0s# z!JD^}7Wx49KnTA@dr0Oj>01@drp?S4341cBV+OvwZNy!DTQ^Vh3Q5Zusnqo!b$=1r zv6#f15#kL^U}6pVJ4Rylx8rXUQ&XuNa}8G$y@bjC0F*+b%&VurahIw)ZxC6wZdtry zS7t{LA#O~r#H);PF52zbC0VW}^lZ8epyDMqMLOhx?*ew>TV4rO+(YvHt*UFB7KUiKEsnRqPVCtmL#+ z2UOkC>?KDAZd`t>evC|eWBm+?kuoOXGA2j**&pf^%xn)4j!efBh>iwECWizO8A8Li z8O%2lIGG-0R~|DhbPdetjiter512@>llnA+H?RH0r*iaIe0i68W-UC%)2iyQ$BChO zO}q(k+@lcI0vT58k&sVFqKh=!i0+=DWoT3%5`@lKOwrm~CHx6zt9?i{g~Yc^$yWaW ziRt=ezP@ETE|tv397|hIV(c?fq^u2~L~6og9LL&5Cc3ik=ZWdGZM==yoi3g;310$w zowe!o``UUE2lVc#^OGM=paKpv8EaF)%;26UKp6Tu$`+br1R1Fasl--Yg??kLfW#fL z;B&;*gS11D^DSGq0z5(+P&l(}Z40%y%+l$#1oTzAXQxrp)z~w}AotZN-JH!956mNU zHGmsp=IVe;Nm_2#4^h}aAH`W z5m|FNEyT05^9u$rV=;D_v_LTtz%t8^{{UcJ-MNgdSi?=KAw#=~bw#+ETFW>|tI`TP zN#+X*`zABYsTH3Qw5XMDU${*G^&8iBSd7n14P(DivP9KrR?8WOKg20@HWOC{P;YzJ zyvy_erdg($YZbdsO{Es0Z{~WjFJMs%8pc${)@o69j|M3z+z&AydN$+C)Ab#;hMIc6 zc&_QG^y_P20rMKIO8w`=HNuKl#OQ`Nji*YRHO=MQlQ*2l(oua_ux1dte#*Q}C83}9 zC5)Nv7@CqQtEkN>w%vf{A#+wDqRals6D=B(2!{q4p$6#IaQTN-0oy6IHoTm4fSc4r7}L7&G@i?ej|8)aEF}Q-RET zRB~ahEEA^6*d8ELo?*xvmIt@{qEWE9AWd~^T`CcNAzsetHf@Xncq**Ntui*tlecg%E4Q^NW7-zEr1bqq*7Tbb z)pVMT3T}V4QR|MYy%&k6zfk*)Bh05?sar~~{^6mW`iR__)UjJ|Jj=DWG63^Wu~gPa zCTKOCDz=&aAlEf_D*-(==hZm`OGZ~FH6&CVd6ZDjyP}4z=T(F5hgWW)wtgjH)L_gN zS_&1$q^NY-3?6MB6?%zafeEy#M#@-b><=4;ySZS^!-t^_={f{iF>KoAk;S? z%WcSr`%dWEtcdp#U~~AIjagPznx@r~L#`9^8%XUPP&rH&9 zL(UAZO@qmff-o@d_=;cV3kt($Q;5_kIaz0@+Wc&Ytm`ziZ5V>60WtS5#wS;eu~e99 zIfpAVnA(>ytTU3>olHI=jGRuAh*nZ*A9QuA8A!~fc=i;Zh!~;4l_w<1ZF*Zcmg<@E z49gLSEF+0#qnt&tqq_+GK$&k41MT~pfl1WVRd)Lf*Xr)E6jK!R9Zb>p@~LaV%E*51Fm6>RmuS1khJzp6}qxZLekw z2XgNClxs>h)ft6dj^i9`J;EjG3~TocMJ~Oc5$NgE&!-*cdRe z1D&#+NGqH`#n15>L{)_t_?98AtVY}BJA!zMIR-Vgn>=wkYq+tNT(s~+`mt(^2!8Hn>`mV=>E6pX2$^0CVU^u2 zCmT#uv=2VjkS3tyaAmn_<6WluO@glf0L-V=4Jx~9=jLCjI)~myCY<@q`daEla@oxK z;&G9TO8sX`(pr{kuSdO@KtV9r6|)V+mjuPsAE)U>7uRm(l~~(yW1Z9g0AAb}auPc3 zGk_vlS>%cJKn~TJIox6@GM0p5Cq89|jwk)h64AJUa}9jvWC)va2vxY`O-9@vq|$0M z+2Mxh%~Kq?hgDb@ihmI}Fj-p;Ow?MBR@hMZjnvzKV%R|Lmz~67CIjw<#4{!V9O6(n z90>Kgud&7pX)hrpK@<4rZf6sX@Jx4>^|A)nEa@nAmZdXa~WF{Ovv^t5HnmZ~p*7jPBbtI*RFC z)_!Ny=q^sKej%45liFe_3oZL#&k(3yW{*!z^c?w*wFyBgDi1SLSVhQ!C1>IlZQRQ^ z;v7WA=mN_HY`8EDr?zEV%*u$)8^q{_SUX19e5HVCW2qcMsze-1&4*RQ`c$-SvzdRV zCXxDzd{0~TZO>X&=QB@UnRMHq?v;IlA2@+9W-umU@htN+>^^0qjv!FD_hreH;SN?^ zVPWUl6{V})EyA3b)@8Vfm`{NndyLuyumDCSst*zL{{Xfi2N8pL5eWmCj4owvtK;Sn zvW+rXNrs+@+nmo%V!D7q!J4ZzR?v4p5$Pk$sfIhIoGEj-M6R?rSW#?zOEl@XD>3g- zC9yB6l}_2)h<&#e@hC;M(68cGqO58fqu@qkoMt(F-^9|s@J3?~iDifv80Ii$T;^27 zFPNmX(+$nE%}?Na8vg)eDanOcvCCpV{{W;THh7j)H|$d_qo#N>E%|_P7O+<_wyX`z zJPsn+$1sM^23EQIqpGNb8G|1X(z4W0Wn(J}E(s%uG?YI*A^!l$wPI0akH-R0T?25( zB3b~8%);V2W)zB|YzK(d0GtkC76HtsB>^#~KNDkqX2T7wH)ojmFsS&76hn~HyS-nM0ItmxjzU7>9LgdNb2?)iyRH^drR6mGIxq|(u} z)OaxIt0r5c)9>Ov7!tXm(YB)8d`(80-79}G>zJnLhu9d!d$Gj&Ypmvd0-kLV^cB`O ztXjcgxXzte?VO3#AYw7h0|pwqbL@&X%Ld;N*T*n`1QrlyS+R+GT1k|>?AUp@o(v2} z5G1>;oC!l%qqBuPw-*xF9A27@6LD zO{}alH^&)hn>OIZtQZq-Fy6#sD<7Cxv75((1=|F(S-fo~bUtNzixmJ7_cLxWROd4a z+)twk$%jscZ6md3@d3e|LQW%8g!%Ue2Q%tiu0KIBE$_jfPzM;5`mJgGlToOzQD#Wu zZ##d8bwNg9)uWyxIcNz2CZX!^W9qE9<%}RV6H%#ZupmY%CsDT)%ttEOZe{lMJV0>K zZ6AnjU&NsB z+mSw?jtmdFD&$SF%Qg-NGR;MZ;cO8}JP6h>V~>faadPgKb`Bx}(UTCYlE4~RTN7AKhABKeo7Va!zmm6mhN)F?F^z<+NMVz(1dE*W7g{kr>OlGb3# z$=mHr#t*j9Dyd9h75JA+Kie_>COL`XTy~BnY6}8pr0!+U=2OFFQroX|J|fn&0;%FR zCS$p0%&cy+nRxxo!p2s6u`A)hnjYfh&s6*D6VuUtj7ISr#4Ki>!WLO$5j{4g)KKQn zi0CWiLgkwVEdRs+HxU2=0s#a80|NsB1_1*F0003300R*O5+N}Y1tLKZGEo#DVIx8n zFf%|oi4 z05e$5s5Z|?c56=VM7o-dL}Gu3=@q!FK*UAlLZjwfxlSTfzal0FaAOk@iN`TtJWmJ> zoJ`uSnU^WdMkhIp2HBVfWtJ6uL<~OZVB#;}z=$}3m!THS*vs)PZ1vRFt&K_XoEcI( zS{KQa26n~-6N&ave=|(p%$Ql*+9U5?A!`8k&sBSYuLu6-n?5l-e%!)&fb%wwnR1Vj zG?gf@m0Us~VnluT;#KzLrQnmnimb2P7B|n)H74ub7mc!nZNB3yI9v!Dk8tKGEHetU zv6!)x+NZd?PtynJT~oQYoX)1vjwd<(C3YbbRy>2+aWwn4b2yr2Mtn50?){__^mfE{ z&nUC~@P$euqZ5aCloIG!9rSQ%~n^%GOPa}@6X0IA2sTbFhW z+M}sw5v)(lYSzm3w?5G_UeVmzAk%X-7Fe0Ys_PoP%@^9Ck~Oaeu>GPdmPn^5%rg;mmEgl;u1T9KCo-^u ziEiLMy~>)xdW*+Vm4M9Hx#ljN8|{vp7l~cKoHV-aIR5}lMzf5@P~s@W+y~tNTx7$Z zgjJ+29vbT-*#d1jnGm?nQfEB<0s4(y$@VOv(xkH|AqrBK_WShVs4U2l>0;t$Mb+&GtC{Y{f~VP#}F zz=pUf2Exz~1|VI_KGo)3ctqccU#TmGaIn_LjnQ;~4orH5OYWIxT&%-XGGbn>4fz;@ zo@Ewz`FRfpwiGe6( zPIEPP(rP1iXScbL8Q33epttuzzz8;xx?k!#d-RJ~8N_x!h{MRdO}QM-6igktp4K~| zunu5uAkOX~1de7hc#Bp!j@Bz<62^Y<9e1t_RW!D1jKKWCZ}y3+{-bRNnWxiQzjRGF z3>sq#m^F2-d4o`mUvlq$Hp;6z^Evs3`qo`Rvo0nUqyo<7k7GI8EFRboG1}ihv~er< zB)zN&gpXaXUfe}OskB0)C!Xh%fi*W}cC@O&U0@jRTh0zZC$J|V=>#2Jf|PbK7Yjj0LUxIgY_7} znq+liT4a99cNN+tD(PMcnV+by^YrFmiCR8#OjW z$q|r+n@4iHOFkj)S!WoQH@5;ZWbc^AGXa)bLbnm0h>S$@7|!jS#cif;xaKmLs!#U)~aT?pf*^UwqCfbktzK{73A*EKlL?s zYnITordQ@_HFnTI#|CKp+cR%)!xK?giajlB?v;N<;JCLql-gJ{)N3xP;-GOTCD1XJ zCT*9EziiTPFPNQ)tb2C=e%W#N#+r|^Y;tDIuMldiO8wJD`>W5>)op6Dtm`V&S!L?Q zr|KCGA}c#4XOMM?%(H|sts!`59sd9^SxsE`4T=;26lG#+N_%5I`KHuXHv(d6slVM^ zQ|>sM%8ayIl;in@XSGbA_Zz9?m=K_4nq5KDCU%3L%(yGpxjUQB0v;i-Cvi`ckuqy; zqb)#gA28uh<{L@KmFHw!e|K@(kU5&6NmE{MOs`H7tOEmkaT&fRsMvU(nf|{s0E|IT z=5AgQOx0>V;0dd_TrTdWh@aN3I{m_JA0JY9#IMy0YCJ3^6h}}Z)EMFgDr5b#oIyo@ ziKhXt6%oOnvY((~x80djZ&_09G4!=FZebS+Feeh`3(V8oVZQ^Yx!f@|KdbSBZ&90Z z$(RN*D;=hOoka1k#Ime50f%y^+0?Gw_b#O@Oj*=A9XRH+pR2brhHZm0xT*};rA}ee z)%cH2(bTgWatAX^tB-W9!t8MZzsyt-bX7H^pPBl4sBA28##So3ovqYO6*crA+;co^ z(NqP*ZlGLsGSRqP%{qr(Oti53We(nd>T)K%>IFq>iL_h6l$AnMOBkM|0G7#&R~i^HzMH1By&8kjEI02>R7(=8ds*N zvaBUEQkM*V{GX$~4kqu3UXFog@HvL+gKf-gP1rm{asAT)_{`Z^_U2#+wGcxr)Y`%F z+|BATOs`gs(!F7^y}f-0GTOoJE}d1w37m5dsgDL*u5`)A0%`SLRmqj2mXunzZpXyu zsMzOjEV7 zH8u{6sDvTMVm*gdsNB-UA;0P>4cm#Dn;x^k?hR$Nk-2y<>dP<~fxzN;i94cKfMH$B zJa`WCgmC+_^xZc^ zZPPzi&ONY(I3@%!Mg$ZoJEk);SmB{6{483)?o*>+`FA*lf0&o8Y-OZady7Uhx>ZF; zPCjKmo#JeUn+Y65;70cdobn}zTc~ND;kMoxR%pF0#26+fpY-LOj}nV0@jIdJZ@}DT zX0;#p0T1@eD>V*d+NvScso;rDk*OI$F?M=_lhKB;oXZ#5VA9#Uc4Fx4YQ(cn)!1_= z(=pCwr%1`CU<^vs)&ln`w`e(z*Be{$95zAxMi#pUA05gyeoD;5YRQxfD_pF(v4&{g z^SDihHC0&qRP#1$8)t~KW(_&Caq$M4z}t&TEJUzoHd^9fC1uX7$sEk@;KNSw9kO$X zjGHC4u3&I_mQ}qnAgugM*jKsCt8B_C+-9EI>iCqpuOvowXAh1GPZ zh=ZAr+DOFDw-9w1v1CFEaS7rpspB&3Hmq899wms?*WOHBHP)|?9%U8UiHDv%K7NV2 zBusHH+^d1}Gz;SP52oLlqErKWHYTQzB7MLdt^iQGIG{H$IFQkyK;FeUH3xs=;lC#f@$hB_A#esG>IV^BJ|~!9^8#Pz_Vp8~ zW0f7mW3-RV0fEE`$urA{S`=nEo;S{O#7u3(*~HP>70&LZn`~lnDqW<)a|i>GHjK@} zCNO!ygn~IajJ$C-ADBO3jF=oi4(?z9A!6Tb31)fymUS>OIi3RFGV~zIK{=Ltd4Uw1 zp5T9pZqj*`4{s9IDkkvcKscMn%=VGQ)DWaf$SuS@!<=yrW&KLJjcIfud&EZeWXkeu z#ImjG1T=X0o=`~QJ3jeg$5O6)nv@JMVkustlQcJfLlawJTggG()Es@ox#$~5s;&kE z*I0J@3@0;Nqtq4*gSqH+-A!z#q7G-KQijt103f&j0NJs(?lQ~jbQ>3#1|ujklq<|( zhRjuh1H``vmifbksw4P`r;*OPcvpXnv&|+%+gtsL~vW?Tf1)%ck~VTmSCh?LbfGZigt4>89aQ- zS1UDY0)sL@i(?*t%fS8Nn0+t|00WT=vH6*sjPkhX&7gJru`oY&Oxi!pN>taqrL`&Q zYDlV`z(vl@*qcYtgN`Nn8)1A|h?mIsPHn zD!#=tU#S2tB@Mje<_f)3>Fug|7 zC#PDLKY22%HtqoT{{Sna)Ga`Uo}|@kYVn}N9-Hmj8;SlH0&L8yPRO8U4L`qmF{|WF zpUgEJa~|g>61UvB{fxQ&zoiBDMAkZLHd)j!TMi~xIRs)29$$86i$3f}R$Dg#DburL znPm;P2^NR1;#98Z8IN^pn^+RVtW8I!ja_PNY)v+S^z?Y^JEn`%nJhMZ#I5&xhy0)f z%{3_8xx~FH+3jqYG9WA_(=7II(2zu0nfM*g(P{@cmMy?w4rY?KT0m~&VSAf_o<-~T z*>kn$DfyaF?EFUrtkcxPhcddGfAK%No~6j;_%R!}eY3y1J#UX>%k}dYe~%^$nu-~x zrrK;}d8op+5B#FG1kOY%*qcw$_io@%aUSU80W{WyvogJHoR);KVqw0@tm0pNmi$Wh zXPCct?j^T%HTPI8;7wuFlB5H+dN!i=FH>|sGR9z-9m^j2@IZ#_b3%Q8e%+QSi8;OFUWn6e=9b1Oopc4IfUF>Gvp(dvNP&pyne!?|*Sl?&YkR>K$D*`%GO(WG11RrMazN>Wn~@^u5Q^>r<8-OWcOw%_%IL z%|Io1h`#P7#?OE3U+q6#FZv7r0AKJS*iSQ$nEIPao@biR%va1x#yN(hKN7lAaF(@w zM%Gv?Wd>=z^$q>Q?fYX`LdCUMiG?3dChWtg8N?!n89W(wo|a-@TG2X-w#A6Gfw)TU z1Ykz-t7VKgC~jswyh59sxGM^CA5!+90P!10x$*;y%XKKC z)g;8C=560I^w;m^I@mY@UqNdI>FN4v>kWf0OjdIak(%E(Qq1mEm4^rpCe7b;Rbctd z*L`7@#vg$dTRi^&u=Q)5^=t|1WY0pm?9%(bCb7!b z#NOYTq_Mo1&-?!XQd>>_$?3~}2?HiqOXuD&BPDx#okg`2!(y?>gAnki1`4p*6EB!S zCz(zZOoRJ1Epo5)#G&r7J@TP&;GQQtu@8`EiBh)h%`YKWh*a73z`Bg{)D|%q`IaU7 z6e*Y5^_5p{=9fVnk!8x!{#oj-oG1t3kY$znMn5W3X&jJ$1yS{=u>dWjyB>{FyFG< zO?^wXOeyMDsB{_3{Z-7w&m*=oE!Ao(;bFeZs{+9^y0o$ODCSt!2H?OQxWv^auv@iS zR*q`|)6At(mM@IYPxUV?jvs^gFx_y+97|VCo#h1ei#5xs*``ys9AbJko}=YIsRtj- z4BLcP{{Ufbo|}1f%>4$J{{Y56_=*CI34nW^B>X^tb^iYV3;35P{@AO`6EN*0gDO$> z=2e7j8G8+xSgm1P;u}vnlB z>N%gNSMF(zJIM=!2dkeaya+vKQupjkOKt_ai(<9`5w$H*;w$dF78A_5+GeNJE2mfE zv#-SR4VlDOLtgtjg0yZ|sJnBI`A4vWiCu*E3&L7v3hHIak6k09EC_;c$iNVxP$5Ll z4Dlf5HOorRF8XsS)IXbH{h7qnWmkwaL;nDD6%-iEeJe7`Yf6-sj%e7x3`L*^+zhR> zwH)?luhjI_;%WU)OT!?VyH%8NDXi}XHmN4#a%w0h%kR^C}`Il-`*|%)CGdD7dtrh~u ziE@qE%<(Qnvc()2R9T4i4t zUnVSA--9TeB`xsEba}Y9&0RlKVXg#?mC837l`x~j`!)7n@F+E^!tYTE1K zvfLSXDM;c{rJf~jPhaE_j{gA8$I}L5{EQj2{_Un>VE+J=t%<323^{_u-T;@lBN&dA zsNhW%n-GXk$eaHFGtZteEI<>OD{OFOm5E4lIE~l3SmC|0);e5PqxL`8V>dj@Z?qus z%{edbwrlGKu%aT0tl;+rj9hUU@&nCHr+*~P3S=-5v-Y%Q_k|GXihGHO`L}47mPXJGfK%Kcu7plU z5uZGAVR;bhtYgWR77n{d5!fQ;Sz%-n ztxE369Ywa14%NgKuIy$$rwzoa(o=DQ&FeO898I3h(KS}oeT|7@8&8tYx-~xD1U2K6 zG|F$Y!~`i`VK952>NcS?h zTDqEy<8d;p1&0kCE6xCmV(%(MVayMO#J0?gO%B$#4L?mz3B*;b6Oj(z#J;30)bUn6 zBlFOn8-KLVE$T3)CC6?bx$ZEG@M2P<{^M9PjKpg*$%~ULr+*C}Pg;zq<`1);;H}(6 zrA3%B#Z){+?hq#2M@_iIvZwyjF!iUj%9*!!HwJE}^*47b`#sXev*HF~4(U<<0Nf+G zuLS)rup;EkbqQs1osNC5`12oD&-RENoiJCJ`nRn!V&Vm*9N0!CpHp3z5WNq~*J`Zt z3x`5prqv9+ndtg&El0@RO;sX=&R}A5n3|p1#;C6!6H#!i<2=PaTD{W$03c84jLM&B z&5JG7ra@lj{odc0`W>|S7{ctM3qMk|uQU8fAJ`^I05o#39BlVW?#P?QWf%BP;oNII z+{Z-QyvuaTKr0CpdzqoWs~2c!nrwGJP>aN7Qrm>|ojAQ7!g`hdlEOk+%uTHLmuYCV z`EFqWJ-M2zHuXH^MnHo57{IC^wRdtL_aAJsjF_ltIT3YsV+2K{4w`NN%WiD&AhnNT zGh>*xF!_LfV;N1Mvx%I|q`^{rn6)l@nIg@rEY>9~pHW*th~Y$GQ!;f!+- zFm7h)oc+QDBy}uqxQ7OK`OFwom}w0XqB#=9K`PpK%qW|SMze9x(^1c~#|e$NmDnrJ zA@yw`#&Z`}r?K`dTI^$)>QoCWP);VIl<-R3L-P-T*J}v6%{H6bL`{8E(n zF%s4LfrnR8jN)5JE*tqo};$b9w^!? ztr(R0O^D!}N2a~lCR#bprk(!y7n?t1~u_LDZv zq#4=n0AK_jCz-?G!%pYU=-Y9DHAhmqjiG(BMjIz|y!$wn{!9!%wmaZrRNIDT{C6#I z%f#B>i2Jv3DL>H@OZ-hp(9)w z2*)z2GTX5hu$FHzZASU#9mBbpqb<4;7nz*F+)V!fbZb;Macm996Hi+!vly<2fic9> z+_hGB48iZ`60?Z;+mRQ^o}k_SMybLs;}zK8!JmjGq19|`#Zh8=rlVl2sFj4=yJ2D2 zoEVEQQb!VtH#P{FG2LR2M-sjuG5Y#e_N#Fet0%Z~9ZGcw_V*o*-XiLq4rcE!zG*r# z&~XB=fXAzJtZlCi7RN)l8T$J!O@?}Yp1@hHn$PhE)vDFSaOP@k+GS36aXgG>;(LgT zI{H*_VRmffKu^;9k!%5=N?2uPp0KUg(eJM~i{kl=Q^*LKmko?h0Py5@Hm{G#o@H*E zf0R&ySf+ zo45uew_9^Aq^o30?F-Q|rhS~sL!RLG979U0dw!e$0Ii4WZVIDn%Pe5I3k}vXb1TU_ z!?!rZ`Hf}{C*3STH3a4^n!N7B@vVJE zAT(V~UP7SHD?-VhVrmgYyHt8pG`G7s4G9}g(FXqjnY)>QOi%=K?dAtv*ntFL%ocn_ zc7r6~&(olbLJp_7TMH{JTMYmihK1d_e@_{S;=nnYkd5}rt0V4~GOcHCa9epCMIO*h z^04X$E!;g$z0$p9hGe%U6OX6N!c3gaxtlTe%h;mNJ%@ulL%0}pB9@J5#AXaJ1qu#k z1OwlJ0mgbnp! zxMB@0VYhz9V6wn0QnJk^NcUnlqHeIA`Q|Q?t%EY{6&J4Ar>43`wt3aqsEVVu?tY{w zkE#A3#-oTdN~a=d^d|k^7>$J|GP{uUBk9fUyQxl-#QXCOF$u@aRc85?>U=GNC3@i8 zZGp@CMEyQF%<)4{Fx3iTBVf-Yt*o)I1i36rxH7*Ze9bnUt$n$V>3AP>{T-Ob=roK6 zaj7ZXG@E{9x^SJDS{y-VIGIz)k7BP-UJu0Ik&HyIXy!JlYvL_|*K}(S#8x@x7|ns1 zR_0j9+q;^r!E>0`GM{jsRSTAhU=5Kg)NvKn&Ip-I;0P87s&V87V1~>@-Pw;y?LO!R zE9At`+nvbW&(LZR;f$wF!LsH*3&a^5kqF~4af#$nz>jcM%w0xt#CPZXi~5>zn?N$^ z5GgYP5H`bFfyA{!)NwSxXENWkf80wQt#D6Lv=3M&P=R{-X(>gkfiGj^B51_MI*xdS zjBzUJYEEy9!x>S_eKQ2Rh|znDIEiZ|oK5AdIh1iUU+W2BK3?H4jwYg3+f?FKe3pBn zpbHodW1LEPhv*pMYxS0+kS2wTX~tqjKKNz^(FfZtbM2a*SWQ9?7G=KV+G6l!KnR55 zQpCB>(^5zrO(4xuqz&A(0bXUFZ1oefXcN*s!3mzBV>Yb8^s>gpC=6QG3Q>6zIpEEM zh~3Zs!~jbW00IF60t5pD1_%QO0|NvD0RjU65fTI;6CyD|GErd^BSLWn5HnJNksvTM zVset9u_SZB6+=^klhNTdU}Hdoq6Q_h@bN^#|Jncu0RaF8KLYyV#37n-mHz1RwQ$O# z-7>+)jDOy*7Df}l4Z6kYOxk;fmG0H2C4t`c!F#AXsr%LJ(aO&wd@Io_RDXNxM|gO$ zNG?sUjU99-W5dS1sw*RdY_obR7*PFEMD1FWs?;+iVy_04GZ5*QS|Ly;ok>B4L}C>rqR>h{!n@lkm~gh=GNhCkL(Rpwu)m!lT`k>XoLAlZsQ_u4lxj){K77 zi(e86=~?a*{w21ft5hK`W#7k1E}(@480wp_y>UQQk<@}HNtw2ND^sVk*t-07G$7#|m||M~w4x?E zl5bMRTI|BY^pMyFep)EUslL^<2$UR&sSByUYFm;m*7Tttpr=U*yFL0=q>hEJy+ibK zWX%z{Z#sun#YP0M5s!yz!BcNL)Jb4&`wDP;{-)-`_pj1`k6AsvB%-OTlkqiY<}UOR zQ@dK{dew3qkc$!4k_hw|Oo_}|R=-ax*Sv|;;bi65Qjsioxn0F@rpIq8cV$%I-sjSy zL}7JaZMxSf8D-v;r?r*3cB%B-Nka#>%9~7LcHj(JLEehA5)|mUuUhuG5USqBrFK#{ zvb38TiB~Q_1v_tS%|kR~@a^wcv7;g{AlMsqrdEuxN7~CqNI>Qt4%Vyc@;g{6uE5i{ zNpe$&VX>?s;#xy?X}swJ2+rPJYIwMD+2bPLg+~<9%*)dSxC7@|yo@1r0LOn7s!4>e z6iFez4Y(B^uQ{0QWMXppni1(s#Ki4dO&O9&Un{c{y*Q3}1b!>r_41{d4Nc%SW`^9< z`iOI9$7Qzl?Rt<`;v7rZn$?%#ONj>HTYmI(=p%u!zDO&C8!4kq_?GEXywX1rlO2>< zg=uxD%ZO`>^`VGoX&WQuT1NM1>~gzx6(hs+uC7AA4%F`>o&Ny6OC9b*T<_A0@eFu2 z-nAvqRNjNMd8%$ybv1&*v53$OzEr~B{X*%bm5~#5x5em93}OEO5yHsh+WmzB+sHQ{ zcB8}@EACX(4y%gidsam=WZiAnjJ!S@KIZ*vP{X@@VPu@b;9B+Ua7n~3<7$mhULR9T zA~{I^02g6e!O@jL6sr1eCfj!uV!#0>^rUo9II!e?eszvzRPJhhOis@{GK?GW=~4-x zA^s(;BQX~e4E-u&5wip~H8|I5f~2YlUV|zibRz!%I^+KT09ug8*RS=WiD;Cq*f&#p zczBrrHnl{$TtYHsYgBSffr&~+(*wn29XzUN^(Dk@&^1pcqCj#404U$B_M;wKAL&!A z>|f%oK-RjWK*u2=wp-S(=^3K~kF+-(2c2-%`G@7T9YBae8*XDzB5DMZMmUm2$Hcyr zl170_j8|>Qq||C5#jmtrZ+gWiX5;e(%I5WGqH%xtpAXK4f#$q%^Kz;8(mvlxshabP!NP7gUT-#@!jW^xfAF_YT$E~DCv^9I-t3XduY z!Z}{VpDLYUV7!(UEPBTA^xlM0gDJ}x#jlU82xncELYW?idtRm0L#b9J3bmW97u0Pa z*=#&RwO(#ZsKb9DO0mewZ@mzb97jQV<7wD&En$Aupw!6rvmDKF{{Y>#+f*SjYXF0O z^dgvy{vap_!pFarB$;#Y$Tbq${KNay&yPYZC`-OWb6PnS(2|$hg+(A1vx0A1>J#&z z75a+0O14(!yHOTUKJ@^OE`xtsDJo=0?@ps|4NS?|a-}jeG4Fc%Jz&!+vy$fCT`Mhz ztuMV&%0cVaj;~eB#f`FRSXwkHLB4$}gAXhcDDSl~O*<-s%bQj#ii6RwQb{veBST)` znLOkpa4S_b>KhZVH2T$CZ_p{VHlAf|g_VbT=38GQQOhiCDAg_Ru6+ZnXr|;Z;Y}3#g;r}0^s;yl+!V^MWcaP#iY%)iUD*0|p#IS_M&9vxt00IR zwXq|$2X$P#ije9_IUU0GHJVQ*INI3KV{A>$WcKr=;4bWWk6}u>O>3!R*3=p)?Y>UK zxu!1HMCi4W8*Q-eDJ5xaif@xt(P@MgeBhfrb4~3{4Tc=+-FP{mC=ijYZ%PZXNzlONw$zyTN+K&+1i{l`A z(4Gk!CmvtYj-nzD1J6I#{6P^6NyjU~Sf> zb&uW_$$xgNu72@1>S)I+Mz`@0n~#+&0#0c=nzL(c>*OnrlcVHp2TGAyaU$D)^zk7V z4B%I}((J#z;;s~NIKKNyU*RGuEzCOD)Q&;2Ezhr|B+#|B19bznXQ|bwxV^FKM8!J2 z*5q1)PPdj%TFs)3f^J4z)!#|bZZoT$AQ$O(tNS1S0F_M?djxW-DMS25uCNPBvnvt} zBEtqUNOmQzK6DCBOLd{C*2b(MTyQOG*Z6qI;tj#@tuCKKFAIsyFMFD4jU&9mt`;y8Zkc#Yq~?e9p=a^>NMk(VI^Lti`~+>OC&6L|q)%k=CuI4Hz+YdMMhi zJcT)y+X|QM7Gxqs5lQb9Sy0#z6VfX=IA& z#PjquArnh{w-{5T0(KG$8z*W(GD@5gWyspIfHTJ3o-;~%U`1TVb02CC46PmOGcE76sUB4(Sw)WhXz+ATBIj;X;i2tZ z$OpUMr7Vx9S)ursIzQoU{{TI4G|`n#C6oASJ1)gpUfObGDrYfx=YjLOaX^~>~n74(vfV)!>vb6nf) zZuH9&T1i?CW8$x~x8qATpO!-Q>v~N; z7_4%*%-W@f9ZBJ%@{%eLTsjeF1%It^)Pc;bPk9AFKA4eu`!OK~>v6s49ZYyWU1+ND z08{TC-D$CC&+$?wmDLob&K1RZZq~IY4qIYwHmP_#HPk#{<~qpT>L@kVAdBNuB6vlu z`F^QhDA><}-EY+QrJsd}Wy;xfs9phV%L{kg>Wn{xw)O8cd8PjSNA5k;owDgqD`_sg zTt~d=Tp2WBMiUJJtlm*;(^Q3&rINsv1kgplV?dL}{ z0AH}Ak%mxp3+YbrsaS1tzLlqJJ{Bb3t@>1|(cv17SM#k(+nsmY%AksP};g7zIgsK1JmcrZ|xwa;4iK@vyJ&*xHH+n&C}7YAuF=~0kwIF+T-!?TWa ze=5{zC6gIfQI-DywFZ(b%<_5v09u`~ac8%WtxS@@QfxU5{6e(aeIbtPwU1j0W#NS5 z$7|M|FSK!OQ*nU1F}KQ#cL*pMZpxd~iU__2!k*^mKJ>8}2Nfn8U$q!D7aq{w?D^Ev zENnPp<9`(uj2-^~c9)A7x*nsw5z;j1Xo0d0zJ{0?IS8<~tv0G1_zVVD-o~A-**D#x z8w^SFrfwp~2y1a0rR$ORU5~9!@vYm3wMRCj;*I2OwQ!XO4SP2CrUh079tQZU85MA~ z$n*JHqgZ5k5y8JM^c~f%2Z_ETOxkHqaM-k9_^Tp3Mf7vJ#xlve9k>3}pG)fX7R58`fA2yl{o#|vw}zh4 zu*FH=zpV}?`*iiEjxaIG^_`b}tx~+(Ntg7m#+3f?KmI1A+x_?SqnB}g{&gUd*%uhs z2?d3^3MfwD4aESw)sa7iaULQOZI_s>CY}Y!5aeo9;frJCy=$iY;ZZ z6{u$%$HLxUR-v^~=25nw6)YEVze;I1cX3GzY2hJ7uETLcV5@L;0-j%_g7ICqzSS?X z!NrWZ=BCCHDCQDdbS~B_QldK}N##(@JZ4NAA}`*!$fHgW_&}{H!ZLP|ZR=6AnMD09 zMVBrliOlpMb}o0$Rk>%q4*l4l1ObTi#GccQmI@6DcGHbs#owYi9oI#nsdZY z7UI<_tN;ko;_f#-Rg|14nt1QXatvuno+FQY9%@-)vtm5xB%zq!d}>h_BYyYyuZT!g zS-Vp#!P-m+W1S?iEuV$9{p4L3icpS*hjie)M$u zfyCmR4=n}6{xu#V-tX#K;%pdnwO@ye$FmRhss8{E?fR$ymznKl+LldDs4QSv5nlDC zk);ex`F`)!82#!|sMM@({#~mnzL7Fpq658J6=>A8?MzOfvgLF>RKnZi%zr`@UZz26 z48s7vKtaDuw(BIVY-wbMZ7!-Ca~$^+t*8O3R>Z#A$EMi@(|{~RaJ3Re{48sVnn=y}y0NLC<3k)}xmN!G+|#DBf?rYL zvs{YP>f@FXr^3Oq@e{pWW4QCDmx%~4jLMU}GRT}+VSjaV()A{%gB)!8kfxtSGZ5_A zRwGh>mbfrV#!RjasA^@rtByCm;-_?D5);sOq}6K`WigDddecVGs$YI|wI{ORTy76d zsU(sl$|TM4ZpO2MFw>%|V%-Hr#*lF(P~158psNImm*-+S3jY8XsE9>3gMPK`3&JYB zTRxRAhg}B|mevhYL8rCQ7B{ODO#H#9gkedK!|6sRhfESdW9LZ>BOJ}HT8eQ7-FsY6 zjUTAndNClbCW#vI~N1W^dJ5=+>sEMVM zl>mNFDkvEt5+hu;dm4QV%O4$;fIm7K30wW%B#PTxo5+67-Ds?5Fjm&Cf4gD7O5o2Q z5@c&XFG|PR9_O|_DR?lL{Vh@IG`$Ov!iw9o3mRQa#FdoZ zGCeC57G8(WlL+ijE1T0b$0**4lG%=q2TGcBbY*T)bKaMZz74KAUbu}JEsr&YZJ#N* z+Jdl?V`ZbNg-#FBrGYnYg?y@cq_w7QTH&%A+Q8B=V~V$(3j#w?P%`xIL8Qx@VPQsS zrk$=yvyF$PDwcDPOZK8MHXG8&$U?tETAT;OC_!)-^e3fAq@9HgV!ToY8w>6!cPf1A z8L})7m1E8L3icSY;>*(Fx}p4P{_hd{)O3%DRPB{IvL-lKSoNqhsKz;910C$5n9YVq zr#0zAw&SgvtxgMuJj(VSMzC2&O}6MM_!8J47xkk)l6+)7Z9r-Y@j4a{MfI~oEUqtp zl*=Bc#e3t@gTtvDqwe&gMxw;|)N+Y(RyJ$-*SG*W^cBGvWsfl(%{HL$gm20&+pP@~ zJR(8^zT{16C2$A^)9bb)i*w(j|~tmTXTFgWDzacSesWYb^vWi zhb448w-maGb#_U-H$5wfrIr^Fi(I!_z2N?O<4eH7d|dT|%NluH)H5--0@7pLRI^&y zTHlxImB$Y30UOZbIp*^;FaX=G^|CKdIuBvWZ`QJV{&mM_Lkm%O3gghMhk7yTofaEi zvTg5LiTQDkK70O#mj3|pm;2G;b44F2CHA+Sdv1-mP1l#9A1d~gS9O(ntyYd14NDSP zrFHj>_eYg<+?}W|q+XvYQRAXmBD#0;7F5yOo>*WB9&x@SOJs^tzOMz>5vf zLs9A3R%}j3EyC0%fIVz$UoC)9r)&DsWv7pI@~!R%YCh1Cm+<4zP?Jre)7z@M9ECFR zES=PJsJM(6ld!i+cR~jK8&*7bY`O#C6&#MR7Iy46rPskJ_IHn&4`!l7!+*SL?K0v! zgGs@DX*_awq3qMJU@lK@m1*6!20kp2j?F{Z`us-yDV?PXP}<$qOh9YLK8Og13{>q?IIPiZwj{0C(kDt#WFlmcw>`i24fEMWk(fT8SbpoXxSR?yNXx zaNR`)c9@lG6~8E{0TyW9)~C}QC$zf}g5JI2DiSYc?OJc(r#e=-i9*SjPnAI*+RHYp zaGxVr8VP?Y;_HEq-Byb)yEyW)y&3+D*2=(aayF(PN$IlS8^X=K?@C!z#NDmZq=0GN zK?db_QA*$g8g(9ZXaR+ns?)3*m02>lfAp<8YV5M7!o3gvDrsfctlG)?0D1~X+>zdc zj`SZ>C9mOL=8SqjaEHIdYV73qH#M2qRxu}EI+D&2-c>)|rwV=V5F7p#V2b|eYrid7z;F4AWjz?yD0xB18u~XS`*HyOwJGsQARIskO{ze-9utFqDqLH1tTMS@ z?^wp|8x^=Cttv=3m>cGLnjF}!HxxupTv_Zu+m^lKix{$7Qkye+SHIFsap*$S{57(% z0MoQX?E2Oxs9Kj#8{@_{YHXxM31CYel^~KfZXbWWM=d$=W7oYT`ZZNCHMzP|#Xf;d ziO{QUM%n{^;;t_b)v@?#br6%$&x<)PBQl`B)nu*)F=~(+Zw=Ma5QMAnb*$W>C@~nZuSe=;EJU7HT zUfpj-Igt+`Hv-fhF<`RojWROW(Z1Ff-h#H$WO~=KM5`pHsXYZtryM4Gi;LH?l^GjZ zR+o$L$$gu(4Gxx075?{XQcg;(Y}cVRE+w$N`Khs+7bC;zO#E{H0P9w1@_c+FwQ{FD zwm06gdyr3F^j%}>C^L@s+qFd=no-484Uq9(r5r-M*Y}5W>q@$LR0ii$d)E$G2F9Ke z{xtKaI;PC17^=}ol~N&LO&Rm}o z9+j!n#c+|zLf^~uQdjB1AKHOb8V7A-()bS zoj`0`ao?qIr_%<3i7VLcQ|Y2miNmm8(9@ICiof2Z(@Aa zsOQ`R_JDtS0!?GjAN)H3T2rM!%A|)&cCUG(H}qpsu8&9}Ig7tjYB`y>n{dd#^r(HH zwp=lLdC*4;$K}1RP}w&uFjZdF?z#+~d1 z+n}a0t=}$Nd3n<5?eOR9kLq3P#bIN^PT%No+~lI+W2=}gkE4Oo)H%+{HV<{Qo8QFGAO2Z=P6 z_}jfnrGh0`$=bukNhY)r9kx$8M$kJhoZ8B*V4sCW7NaXSb*pKbRxS7X3Z6AyXG40$JBwD0q&@blUZ5i6qtwSdHmsL;@BaYw(TaVU+?B0UOaqyu$S>Wm zq#vBLncktG~n{{z-T9Fnj z-@WTHJV{F2OXM>7P}C!vTbh?Geqbmf;MBymmP_N8Y6`m8T!WWGbLm1k*WQ1`6hv|c z0QK{sJY{TYH0q{hP&;}&(p7d0|Je%ZOdN9Q*yPoN2$2!Lr#*8pkj%wMer~D=mi& zn!|5eo;6~@Ue`aOf@X5OA#r`1YPKf=dQ>I^Mag%yNpY5A8Ynll#aWb-z3XT~^fe*C zNHgY2kJ&zrUOd9v{2p~Ak<3bWL;4W^0GKEwimz|HFIQMqFWfB`W>J-MYS@3xO{j`~ zD)-8w#u(7sau%+oPs~@ohBjmy^4IK;gui-e;f+|8#{N~Anq!FB6xR&2w!ez~D97y* zY;An0SKFQM{{RiTRN9SvqEuoLIGfd%N2YOa7Yn6I$mS;il^ELef5uJ9cIN3@>!&~S z)~)qZw1i3k%-^*sFvowDYw$s3gP^1d67e6YV?|_!J-1sLiJ}K~+!~LHs0izNjZ7%O zo%W%YpH4sJtBoWiAKk4<;V4D7LE7~?=*83kMF|pC55)raqS8*ye>S83B1`nyt4j(v z9_4ONod=K!BfmP4{j1x9` z7vPEKmTY2I=~3!o${g%4UW!LreHnztLfItqHN%nkmiyhPaHV8VCD>Ku=g5F^qosz~ zCiwxiKLwTWVS81`73|y)t}bupDiC*EdRE4?_O{dC`WoTskqZyJZCnjfD3j3C13@1h ztTNwVD~S`Fhlx*7T7G`!xo2VWHDRff`Kso9I{GpiTTP{V`72N|EM>3o^!~Ia6T=T_ zBink;DN${>B7x#FSG$!}zIE={lix0nSRFg5J|~U*HI}PT@z3fMrA?RQRA;a7na75rJ<46?G#jA@su#Kk3mTZ(?%xXt>iJ^ zwPO@P8rCO=T8%u)lJK6(mqAh<8Y#qqf>I&4YY&8Vr$yyB?Ts{ial?r7lTCf?Arux{ zVdQ#NrQp+}FkYZmKFlxjqOvS;`GB`I6k217j+D?gt0@O-(dJ{$D^(z!6NZ^6?@cj` z%g}Q6HJ1&y@~2)N_=D2D@*4_kmsL@F3Y%LfTuJv~sz&-#QtVu8xmYbs`e$Z|TbDh- zsbzd`w#&6U#G?4$r81rU*KyvxpH4|9)~1E7J)HCJ(vWOhu2uGTt|k@(%AD}8FHYK)2MbvS*QlAMIZLob7c|&{s?uGJw-p@G6#+%9 zZ|hOoJ(ad=b*`JxTd>?4U)H#sC<^(3Nf*)Z1&npYY)Px?RgPYptw>u;fs}F&I@2$) zB74!lm3t1NJD#EYREcPLI^15T)kkK5VIC7oWOR^YkX7@oEYq$rJ?;^%e;VMWk}!6z zSr~cRhZ0rfTl06TGO0J()Y3X+6jp1b&fN$Xqo%=@K)GXuODfNbJC<9wYA>bLIqYn0 zYSnvn-;i60y$&Xt9<~=+C3LXtI^k2mbg=MX0K{#%sdI`BBnx-t+zQC)5(STyLXC)~ z@+YsADTEFuV#1Nxao>HaNbuhG^fep4ni9?XwCLIxR1XQT=BL!@^wEhIrYl(pqBpkQ zR58;)2Xj-9*>j!$07l$rx6Z!M*V@1HR?@%cQH~>S{&dM2?y3o;v$JRLp45n}MdRMG zQ)B5?X&C(f075V06_s_{`&0s8cp-aqCag5$)0U-=Pj!vb?!Bl8*eUr@cRi^myA>_X ze)Y)bo2m7wZ+mezy*>O7{c21gQJJ}7ay2QFlJCoU3E}|q)KMzr@km-vOZ5KLv_tWr zgY3~se0-^`=k;o_atM@vYPS;q0F6GDNw=6_8lPFmV$lI%ZK`LINiQ2>E%K;3Nx+&{CmVrUV8wUpC`?_r zSLuHhOT^PGzWT#8xLT7*>_s&Xli;Ia>S>~v=(x+e6%L2NMia?wo{u5h9-}0?1N2ie^>%B9W$t~K{vBs8L^H+#kyjynkt{HA- z?^)(;-+r|s#=h|M^rDhHr1gFmOVTu28KvTMx!$8yx-SKN+Z)=JLz?YxO4ki|GAMEL z6{^r_oVr5XrK?UEJ=96FdEU5qWH18$RpTQQ(zUz)0KDr#0_3;qPil_s)rVSV4ZH?O7_TAL~T5#vO%wVn*e& z&k(hgh*eJbFy4wqrd1LzH!oUe5F%Xk9W7Amc~!VT{EaG=4aTsN^p@KGRgK#`&*CVo z&4@eNgG(pM-RouFz3Hr&-EL}pW?_Qz+o7UcserJeCz42%jf%aut!UhdHs5>iOA|&$ zJCJBc0)ZorMm>#AGFFln21fhWtubIME;ga5g}%};xTAHsZ_2t}hHZ-<39Gtf3wG~O zYbBP&4^G6=YGgRZ((Y0xQD&Kl#jJW|>=;dqZr)QJwGf>pY&UzKV#JSBT$8DrIj z4McHAD=L`ZA-APGTBVPQptxT;Fvk?Tpior!TDUo~$M=Az(-g_Eu$RADgG#E+7;m)l zu2{217tB+}$YaZFsy!^&aJ>b|qaT>HY83^5klXbzpWhYi@<)twC6nh)EVv;(`qXb9 zP9OUIlvexVu7V%tt9z?|6)ZZvY!FHgMMx&U^^HIBpZimf_PkOC7U80spT>&9IorK+ z$i)1qV4rnm-_o;9VBxpHZq@9ud%hiSD(}=eSEeN;;mDBu#27eaml6pfUSPYCYxGYfyYbU$Hd`zMXK$ zT2T8@j#(mbdZ}Sp!eEd0REmyIX$ye!skBR}v5=kEdDH9Uaj4;~xMOS9T92U{Zice| z0B15=pcEpC0~9yOuUt($CJ>2YTm9t?D#`5fhYe?5qOm_nAp9$Dq+>sY2sJ@Wa7Bxg z-qn_mNfa!v%DV=xeKtYG8P`uNEp4oJsA1Pd#&sU)+j6zSm@X~a^suffi_l(%iS(t+ z&#?xwS&DH;FX=}~r}{la+O^(M-k(eCZ7YIBse>e`eu&i2#Q;1%GuDFpI7!s)=TK>$ zRasD5@6(7>}I?)BPW& zj_qz_sbGoG4XoB)f}@$WBPNIgw^~KdWbac&sE>`tH`=v2h;+qbWeQi;xD)8_>*ZKQ zr~__m(>EPoy@08Llfq}bg*wr1ZB@G9vAWu(XVs%{hDncFSL0mxx>Lp)#QIRvyIt*k z&z(u_+TJ*0b>!(oh3`QF+p{NBP={%?ZdFZAjAd3TUE?#zDR7Cx$t0 zxQxe!vM<|lLrn-XBJ%~TB#6eru0iWid#$f-w4|?UX2X{LbW@I6Fm?xewAx+mdUnY) z*5&3sYlj<0Zqizjnq|r)D1Umf68E)93Ts4d((Jo6djl^QSrX>b_9;>07YPDl!8@Ni zZ3L2(I)>(zj=)mt{jd~^n{M>Y7PteY7?8tzP{VKzIa;_4D&CQjWn1sLrGV-KyKRr9 z2pSt4t{Q}RFi$Sk5S1ko`HzZ%#&|)${He7~tIN{eDJ`jvG;U?TTHvB6VdJprMnY|K zYx&f|2#m_ja!Iv~3+XzXyj?{I$I;YSHWzB^ zAIg^h07)VOLSEfSWY>5`ceM?5*d@VGpcnzoiaKx40qm{ zZ+<)Fw`xgByOvB3YgDZFW$Ll{P#(-r<461}U3%|Pxo#7&Wj<@ST4oY}v3d8W)RG#< zpf$Fghy2y{4f)e*k~ax4$ZcI4@}SRs{{T!kl}6~6c+J70;SVr#{0%K8vT8|NISNhQ! zK-oWA4aFq0#fZd(B>B`Z2+`E7oRPh%NVJh3bZz1kPPZq%(T>2;L~9QtNUZk+_7o&Y z_?$GWl?lCqj*2QtobuoeG?$q*5$ucb$wQP{{u5DZqK+nuRmyMv@5-45wVju(spMSR zNa1UOZa#F{nLd;DeL5azcg0uI)CP-O`ozd7ze7zNgWaRshC9}uQKyrMB$jN4`>Gnb zrfDRR$Tz6sjunx*?t0Us>G(5o1~&e+#g|r8No;am?nNCrw7B1^TQA0zL>A#4x22Xz zLbbWn_NaAM_b9+9^c60yBxwYSK~&hKL9b^K2{|K;&D_+1sC5JJV{k-cz z?33|VgCRWIji{#tslPRiJ+H!y+nn6~6{w3>#^K2J&+Z>OF-fV0Mvv_02HB4z>t9J2 z&Jue#@9Ha(M*JR=P~hG*wnu&Z>2(bcJg6>j)`PBM-LFu>>xKa0YR-meZ&|M|?zdVI zBG|bAS2Xf|R03Vp^`nHj1LxY69dPqE+P$g{pBNn)gOOqm;-dk*)RBS+>}w=?kuW#7 z*?ZItiB4+(eGM?NIVj511-=?2`FE_xi0mted$BhyMY8BB8Q@01CN4hpGcIVW=6h64 zCd}mA+J;E2diPGt#Of5&$(R8bY7fLiZnhn00rxBRSGBs@yQvhhKAp}2+@_vCW|!Uz zZbfk)bL&>vjUWXK7`CM&rYSM^xXzPyUEwv10k+E*A(ztS=1zRF%=U;T( zP*(l6qb$r=kSu(~7`42STK4Htx z7+pbm0dswrROs;>6P%9WDjCrjrZ|L=>QY>Ma0q;UEtc?Ev(xSjv z6T3Osde;Hx<%!QQ*b39MOgM4uH~i`U0C<{?EQ#j7T9K7+7DGCf5@ia$5NRXR-0)V} zG3`Q29C)2gM`Ac(J00m}Hc-f_NU0%*tiTmzWU~CJ zU@{J#b)G=}H0I*{fTojAk3QvCQ!|{Tex}qCSB#Dcx|8c%ejHc(x$RnXhFHR-_6DuQ z8+z7!Rryvh=|Z`;Fcdwr4kwW{#S0MirfBs1OxT>3xXy};A3rhPos5wt8*}%r5Wzru zWqN5=?dR)HCNcrn9CxYAv6$M}@AIy^dR6hc2dSZ^)h^G{JP=^{B`0m8}#CyDU~Gwd>u>!9IO!Nym{(+pnEs95LUl6$jQG3B=QJ!^)g!t^55XXV8CRB^@)g6&~ZENlla zF+Hh+NLb&iR-7`M=5PGe8WnO1{34LD9IefIYcFb!Hy8)7-nPV!lvGCfTdw;DTc7GZQ$sxedpHin4>}h1|7aThpI+y?+ zbOWmuI~#Sb6geArY6?#+>kTtvEx)Za4`Y$N>DS8E-jK9}#n%j|dppR0j>D}JsCihP z=AYYS3lQIa{{SkDG#Y6p#8_l4l|1bdM-uK+J;ms>wz2S=_O(pX_;xYUVlB&iuSyw5 zAx^B=^{fOWe1}Sd+8}2t*tOSkDnS%16v(!+^rGwC2WsEO)DGnNFItStm7rb0?_9AQ z+pUFZD}m3PPZ95?gUg?h=feZAfk>Da7w;_loR$ZPI}#ZU>bVJ4}bmtxF^} zD*F{-;q|D4GQ5zlHN~$B0GglGQM6Kau<}n#YC|oGUl_F&sz zYLz35t>!IS$;q>nJifJdjq%GXQ>^g6HLa29OmwSYyAkVJJ#QHp;JL4Q#Gojv&TrnO z#Knw+o}DNuky_pDO4m537GB+JiefWaydc_v#^Cu=?QEL%r!ye;r!H#1J}Np3)*@R9 zV<-)AwdmRv2k%0EaUuOZpm8 zq2s%YsWhQZQP^7h+JMwTmpe0bHNev;3fqfN4S&XjYbCGW%8wnS!abA@;;jTN6u|B4 zNF+PjugPoqR;u50botf_3EK4W1UI!pTbA8#@}y1AFr~iNh3Tc0$H)$}$r37+7sb2O z#a`EtispKiN5$5$r_-Kjq(f`=HN9PV1?+LJba<|!*6(^zHk#3%rw)|L-dh^ZR$!mr zlZVQmcDUHDFdeCK#4+t$T}ovT>?>1dbP~sIy@flpvjtQ06~@wIsS~d^np4N9gf-2w zBYM<(0~4u|J(Ab8WvTjZNPl)J)E!2mS)yH(vAteRR5$OvU3F0YblevcOQr37sHB>7 zjec8?ok&K^5ZI5E6Q|FNgPJV2`HHhE51FYgEQ-1G70orz&a|L6`@u}lhy$rKAZ<#o z?^bY?k$&|^*?X}SvGS{t(Cz6!f3!7U&(eqZ{Y8&_sNKVF)Q#wS!TDC0h33nAN2O+O zvArs!OT;I~ZRw66Dh|GNXig)(^w(m*dDM2brRu-yQAW(Kn~WJe{HbjL#ZQ%A+28qS zGOf0@;9%)n)qQXsZLqE+MCKmjZk26J$rf$QTetrJ(@K}wPBm_T9(D5d zucR7ROn7H=>MKK|mE(j?gNc{j9lsiqct)IT@d8_|?QiW`Z=?i?(R0VA<58}n77{k> z6kIRkMra%|lol&b9;z&vIzM_zqJEdQU8t<<>%BD5X$<)6GHr*gNvO6wUBCj=0KciP zXw)z&>21yJMdJWPWPTF>suucBmtoKtsKxEPGOS-uu@MQdJyVYDkh^ zB8Thbe2paItd|t==ZyVBj>4Erzr5YA-i%t=nPod&yZTc^*Th(_Hq_0_w_36?vj*rY z`gnMrm3nB)pglncwR<-6OOYcT>3k^!3v4ZF!WLnNE3cIRvaw)&=dBfe#yt;uI;7J@ z#Wu5d6#89jkr#+rtZzdFx-Yv;yH^Ip74upvsThC5{puW2ovbZuR%4X^0DA3Q$^}Pb zZc6bV&Z4`Ky$vKK+m|uTVfob7m)_Py*XElVb0MUTp{iW1IgvR-nftz7v7<$VSwZhkZEkAI8oJTRe>$D7eo_7Cy{}zt*8#nWk^GTWpl!`ed{?e9HGzQhgu6 zj=)75gXAeS5?I0E*v3^k4lS{8aKt zktv+?i=|{dL;BRW2Mi7T(`ph!AsUF!ES>8qk|;GtVD{QWQ|Z}2v)QN#BE!`vOAesf zIg{s7>7;Pr+n9D9RV+A#mMb)7)FF{^WjoaEZ*=i>^%aIzxv8}cskUarRq< za=t;U+RmC;CC>N0)~{>gaAbDv&{wm=r&b>nkJO(^TLY2~tKO9hSxjXli|=_F3nrc3 zP_{#tDvIXYGh|ZTO+0O%*DrAT~LuT4Ua2tCHqps&uL^%BLKb;^QLvm9&$Zs#x`QM z2iB*PTW0?NcVX#X<{&w_k6Y8si;0uP>rSnQP5kMLB61D#J!s7YFqV&dZAqt?g8G5T zbLCH3aUJgw^4QglvmIz=+ZOa5g)5r9%GezlN%CiENG;@i~M?9l#aYB$rvYx>s_bTEEZByFaS#Cp-Z zYrm~u8iQXn@}ZaDOLeSpSUc9WEvEf9$$lj2>${A*SIbANxT zV9Zv)Q%E&0$ZT$DPZ8O2vAqnwlGTH6_+R+1{ZCo{07w4-6~y{G>1|R(4?iupqoO+9 zY}VPGaNqhT7K}@cJn?xuCo;w*Bb7>gvj(-S0(NJ750*^HC%+IZ}YV`!|(v z1{wO$=sd{pM~ghjBI#>yO{s!uByzUr18S4_XsWIEud&~z^mPu>Mq=1wTsgIK8;jcX ziKcEK5Q$pn&Vx0Dy9&>5#-sL&^EN6Jb*#nI{HW<3M+$ei>s)ly%1U&mpNl2m%A<)S z3=w+wqDB@Q+=_FhiDe_NErkQYh!fj-W?ej7#^-ZVA<$|0PixltI6tj(X`@VQz_ihY zQqD0XcCKAq!}6{5x{LYkUe6dBko?9;yk9M=_9efi>rC>XjSSeym%~N`ec3^<@~>!F zIhy-J%*Vd_)>C~Oqfz89dd@u@ zICSQ{CA5m*uT{9UCZ0IJXEqDI^@+8*6HcQ^71Z_@y*eaO##Gy7pt8niJ%&cYn^POa z`~k(`X5F`?cGE*<^)vv{>Bv50(ADc>E2zr_jtX?v`$n2O02W_#Z z3o8UZE6b}1F`WNRxwKpt6s#mJNjOOq^$3rGI;~( zORarwp=_BRmF+LColp#~wIr>mnJvAE0=Ab>-~HsHr+sdwDY}TwuWDsN*sEi6=}9|k z2uy6a)L!x5F6NBUw3sJ+cBIsa%(5e8W74hu&(jI)ucy|!BRSRLPk@ijzs6F<7$Tkljw zCP{D9c~`ZR#zAeDy-PHc;V+o$Mou$>eZ>u2j$Zts2YQY<*hL)4O`m4(el8&f+m+Wu7EM138LByDpfgS|95ofJzfZDs=XEYU`sSzhIPQR2h= zhLS0lRT+J*qV=fK#xAJV1oi1sarlbmZkUUyHNuiCEVtOxyRn5tfkr2__on$rH>>`$SrNE@@eqpy86=Q zk?pwNx+VQ6rG=!MwPNo0by6!#5=LOs9=|WvN2ovvjNAiRzLEy=2KFDMe~OT)=~8hm zm&5hxS54!IIh*d@vi7+N6#BWMw1_Zu9S+sRklmQrU$Cg0fx|6n7PTkNQ}t?nrEDzF$7SAv%yp|!hlX6=ZRl=Qg;|e1sG;!M^@|+0>qs%I_-JMO z)~;q%%fsp`P&E;7!o8E!R;T@;j4{N0+aBZ2q~kPYoEtQK2<=AN_Rjr9E|l=*W49N* zMWa##uL#6>`qYxUqP#X7)KV*DdpjQMSVFAdy=|)|Jv-DBNDsS`@o%^_?f(Eq{Up-y z8{b7H{{WQ*=>5T`p+R(v%E9d4Dt~HPVmSRg z>qL;sz=zGRxUEsQS!>!c970Ov<|$CMuOO**V9}dy*1cLBO@K7O%G^`Y7s|Pr9$8zM z6Md;*g*gPHbnGc%fnt;>`{(K4QUbn~d}NNjhh5>NM=p>C86=y46X zuV?x_I8g%2an$xT?DL=S@@(I`>L>}U+$hVrAJU^7Gs`ve?NMlvBjL9(Yi~=A=KE5m z!iGDO)`KbPEHBcv79iRBiaKs73HNdS3W#g z2hZnBV3K*^(!G;@r?n^eb)Qv;;zs7wnmGQC_Dw<%#AVc0l(1#i!n~kU@cExMJN)QF zELfuK2<=eaFSq4V#hUQk^dhCR9n^|tBL4vCR#(4!Q7K@n<74pG4n~SF2imynN?T*S zg7iC7sU(Mxi7(Lgp{kgxPd%=0(R)8!@>)RVj^< zN7Ozi?+yCbyG~~q<4Y9ZYku_#M9Pf_-@P3sqfBI%yVGf4Ll!A=F!&qCQRJ!$^5<>or_D}A{zkotSo34V zN_nA;@;w2r9E)UrgpQTeo^KbODk}3y;h`E`pP?V#n~e$g-n5bF14W9FaC*>b%P~DJ zYBl>Ht}U0#trg;^$o<}w+GwXtWH}a8=DjwJ;#l+`Cu9b+5iSn$tO>dQ8`(Ju7?0k=vG7G!6YW3p8vtaZIc9ZT|q~y{c_4w@D&LHsH5vWsVPMjGk6QOE$d(F)u_Xb6SQ? z6va%Y+_iD^eE$H;PTGwmi+gNGS|k>~Y}a+5hbbgt2Tg@rlW}pq0W-yiQ;FWR(Zo8$ zd>&V>Ac?XYiwbkD2^t{VwQ{1$DW&4@hmjPi9IGG+)O4%f)$HKh%lVotq;SFOOOfKt zdU{r%BvL;8g%o`x$#Jul+KM`u_wm2QN8Ybm46(6GY;8`DGOjE|s757+E0hq8Y#=tuM2x%$V+LZ4osO zv}x8Ta~ZMhD&E--TllCyw?(&3YLTIE$XlqTKt10vGy&LW)|duTM@mr?`L;aXlxEUt zWnhP57h^zs5nP7gEm)+{HMw@{O0q#9b^C(U<_$EC_TOqeZ7Um#gKESnMHQ!&6xuFg zYGg@1*jTM9v`@oX0FkZGR$6UCNzKeELV`{MTFZKWc7EFQ3+oC@-|oq&HCkjQbR4|2 zvb=I!4Yu07%tlBr&$mj{a-G_jD4bmFwR1x(dmn~^a3$v)#d}drG;%lta8so~lN8Bg z0Oi|inHycE%1KMG2WsL+u1MNIEN@fItkk?ys{5T(a_w&`Q&04Ux>+RCwcwjAooS_m zcg&ml??P!;ADt_}$Z;Fqz2qo7{b{j9-A`J_fd0Ey@S6kH^mHaM5>9N+-Rmplv6|BB z0}Dod=clb|W7Cg#2`D2q66z$vbF=1my=or_c(=ad?Ru6rlHxXg^WAyv+P~UO9D%}Mm@oIHE(x5DW^rx2 zg%hfdVy)1q?@J>>+^1@h1em##mi_8QU!x_EHTg$`A2CkSX`FE!vmAC8rXpqGw=M&v zs60A(8-7v;T4<&i6g=K^>KA>_Jv@@>TuCEtd(fUn_hRZxZOz`T7NRK;f3-fC$^$9y z-o2~YSg1D=Tde}HzU}2v0Sh&gdkWu7Ks^N@SlMy6MBb#-CX}S5dgHZ5-piG|#YXZjtQP9#bbpXX0BPZPRtZ#wp0XHlDF ze0KR#>U8BAS*70lg#ce+QR-zUt=SaE%?G7H~MlWGsuxdZ)n*D=O{{T$Y+6NzncpA5bzbgAC{HyGn z@~%jj{uN^j1M;gnk@NimBvR2Onm|0O9Tb{vJT~co%8|6dc&tbrX;Lynee2sAQduMi zsb2p8F+>HhwH-F8E(qA)r6!9km&F@-isMeiCF}t2OxjXIFT#7&8W=98A9&sVRV30d zy6s|jH7qB@1tyXdjT+qY9e}B0;V7OX)DFJ2CkIUkiYKtYR<$f~>*V3cOJX**{&lEL zT#n6QxpGBF@0Jnuw_5PB*@uhb^{;wcE*?Y%?L$v#p=LG(+iH$T#mXmtE7kA+0OTrk zcMlTMghk>0?G)iQ{$iU3EZDCA_q)YTUZ(1#6Gkdu*@aESA`_SG{F(kR*Pne6qyr$5b~&l z=<$nE2#v%409#O0Rs;>JJGH-u{?#kUd@0xwS}j0D_oQ0lzL*ZS=IlKyl`{?GMP0@o zgwv3Wwvyb{q#r6{M^C-uPAAT!V(em$v>h>ePYAhl*A;pzBT-_EAfv@>%508ZDSD^Vt?xS2_kdJ30O zV{m%^0Hp#_xSTVG?$a}-^4lB8a$4qdT3z-txB6EP==ssOQ0FFF5`4XDRs0M80P?9O z5}}(=-^e_w6q-AT23Apg+qEC0Tzk{lRP6%hxqLJycv%qbb5m%-jFXr#^1W)K4}2}t z*16Ef&U|CVNf!ba5$*$2+&v(L;{!ZTii|B08(cX%8qKyK?d4JpM7$(c+TK)~I?ZG?i?2hOKg`!9j9?dwqKVc}UuJJWq9nlK{G=TXHZs-gNZ z=~8GCNhWo%;x|g4Oy@l1IIU`;n(8npJ5h3a^aj11r4z^0#(Iy&mMtsH92}1=t?5N! z)EnE%qB2T-*%$SzU;5FJd5HK$MshavpxE5}x>5@qK?mhmlQTCA5pV6aDQ!U#5qEb`6gXxfW>j^an^ypBKGOll@o)DwV0AFv{7X&(c0jSv^4Re7H<_Y6?E9{)LmPc+*_@EAF6?Qo+dI9ek;((ZU>*}F@FrJiK}Ul zGao8dlvt|`t0B#`jbI}-?|MJHwZw0Szm-Ak&6IHt9$sdmRn#EGcNOlhBYdnX4Q4lv zQO$0~kz?^=&rqYsuwdrKz$(jsszSgC7EC^P_ z4u@(k#=@g@@V7PS#RGft))wvSTxXciw}s6p)J9_swn28i23djmQy#9HWltQ3Q6{D# zyGVBYDCwZod(3>geCZ&X%^CYS7o*po;Zg0&vGlP{Gpjx2^=bZ&^( z^{q$VG5t{B@o^AgW@{FsWU&om4^U~B0df1(CzV$b>qW=HD)~}>4O_~g^mACEn|fEY zWLp?;jD5l0fC?{-Mv zuY{Jhy4Igrs)?gm`F!c+2J36wQo07>4YnKFili@pHl#<5M7FUt?9(Yl1dNYrR#vzp zYSJb7hW4Z*%GDw^{_vVtcEpCwzkAYyGIBWI(vm3dBNw%4A6utu9%Z*y7q2YaVM(Wu zw=-XswJ<4WwTj+>mNSo?YStn^Rn15}DmT?B)FdLxfA2u%q`vkYt4u%npZf}rT@=B! zcotZ#Y=0jA04mmPxk3K`@~Jf-1Q!F;nhi38G>La>Z&GN10TlSVir&^cSRTH&^PwV} z$9t)wkf1Y6`?2^|wQ(MjfnqvrYy3nY?PA35Os$E(*{~bb`gnFFd79m6bn;Dr?8MPo zTu6%BFz--H!n>V-^Au(*7Y(ttS!z9hkG%#1=8n>3pXuk+!a%?E!LsyF4H4KUCQGFm=olU~y)A6P{4K$AUb7XyWP#e+r zFD(U%Ly_WMic`GBrMSDh6oNYhYm1iP5GYVQK#<@;8)$Ja5&}hw7b``ALtnnX-|x+v z_ue0OW+$_=*|~dn&$;J(gak}~@PgI&o3qS44jFr=>{THW^mwB}3S-(aE&Mh#p%pz8 z?sraa`uz?*Bez|6ipw_4--=i@ zB+DgERfuArctu5m2KtvSp+odUDk!+f2gapKfb)%1Y#P?EnW1c{>+6xfDmh{dg+HIF zxI0WQ%j1!h66Q^vdh}X*ehczIy zoTm=Q$h)1GyAG$HpVix=9N3=PuACM?A%^3SE6Zf$wegm?fA@s^o1bB;b+u~+ylzDl zw@a&W2FhYcrq45$T3;vJKh-~Exm4H?n?l=j%^UxmB1*j6=5d0Jr8pii)8MOi z_GV$b!5Z%AVxm0B%R0-M39-|clkH18T_IKQzp22UFkb|ztJ#8tCBM=W2O)BsSl1@y z=Mv#uZ7=JPrCJ`aZT%6bIfX83>kkLl+HiNK4t3x~)p;I%sS^BG*CTD{R0MN*iVF1$ z(RM%X$pAfHQfh z>&wP+>jQkO=-vWXcMXlA@P+QbF4AS$9fGNVrqBp4n-x#sr|rg9A#Q=MG@&W3!$MtN z9pN>!bQCn(J%n%oZKECC_fAiPeWLUpFB%zl{cp+aE=kKaWiz+ZvO|&nasQ#)95yO3 za4dyfvZV(J#ql?+o;OiTH=Z$6tDighk_>%9aEQ2NU|O+k%N@X3w~7lV0tp7*wUf#( z)Wq|S3a{YtT0xYbpbXKab#wjia;a36Gv;ug4WCS$P$L*$Qj@z3;E~KfF88!b&owRX zgG)CLF|_@r@?9)5bh4pVjlwVc>mT5C&T;uNer6B&>=C&zeb)L+_IlZ7;MS1=U!t(i z0sP<-K6Q`MD&k8YReZc>EcM(^`K2t2bIoU8mvG$aGJ?|Y*S>|#s8_IYCaq|mR|(d1 z?Z4lAx>i4$1jNl>BXZvOas=no+i(7L^*xF>s)Z%gd8Da5`v7?1R=%Pd!?>?dLKpcg zDCAf{EV-f|Hk`;y=i;{F@0E*UUD{zvKX-Tt{X@)glTYY5-1X=;_cRSn{n!|ynntI# zpCe49`pW%&Im2yBJPqw_ktXUF7I3TL;I1O;@ZrY6Jn6EBBfFt0uxO*8*b*wDr)G&S zd!PzjALC4odQse@4DltMw zkMd{Edr3n{INKLDciPY8$&57OZG@gUM#9JwK9jCl@zL3H}L^ z$ffz@sO&;F5)*MOLgU^nF?Mx(=hgN_MpgW{qb+2!J3D`$rV53V_pN?<>zbVu!S!t2 z(ri}}@f?&QaY%mJq&z5k; z{%9L$WeUA=Gxpy`ygxdtBWd{9etNB!YjxVtfNwYkoqxJsD3&7T{S@}HC2TjJM&IF2 z`0tJ2*7@=XMHV*S#1fdIP`gpPoFv*%Cw#)fme@%=?6OMMq0|>CxT{aT6@}p-ygI?V zKvxn5s7je3fsSD(d>SweW$2iDBjhVWwyI^ZD#RC$#X+1ip5$?O>tYJFtf2K*aVR9T z-$nUQ;(2k6@EKu9ovUKHRmy?pvw`>6IWuuNh7@K2KHvw&9T#@{AmHt&(O4UU{0JjU$#?iW>V@7Y61K2s%`(EWT2H_T@9GQSL|1gy`_s& z%MJ15>z)5~a>=Yh8kM(4h+ zU3GWtBG~h@rFQ|)k#ZxVy zkt1ov?-T-_Q}B)GG~hryYHvRPcs7HNCZH{-CUvvXl4$}iQ-A*gSaU9Z5H2g+pEVm{2p7)GgTxwvg6Lj zvLamTC_QU>pFbNWEd7bLYeUMUkddC$$9FJW`wh83dfm{EnV8{NRV54mN`C32hz$~? zQ;{lf+np@Zy4dV%R^i7a!!xNAJ<@noOFd|$`}E1{&25a)K>I9w|y2LkG7t&iXg*z-)i$j(f zy6Vtly=d#j^J*2`tQ?QIch}eFd9o1nfg`PTB>#clW}NrDEPyVeEP-5QcjRM1iXV)sE zwk--=s>%a>p1d9O;MF`dkQI11x9FS6cVdQ~R_j<*J`%c#F=se7hvIGpI>Gk9ye@gWEel1J z7(oWRGYEj9fd$lsGNLZ03(;e$iZ@PN^IN^O&s5z*yk|0G*f*p7aqB3w7M7rj%4(}~ znQfXXZV7YZINiGdA~A!{_m1^mDNpD^!->?EO&3Tf)3+1&-UaN^&#<}v{Q3Oz4$tcs z@zK#PUVT8tQ(iVG-b@8j_f{(N9i>XvN9|*HwYix7-)Ur0z53IvYZ4py(9HYqs5PFC z=6?wZx0aXYDSrP=rXPHg8)^f>X+oD&8oyY$tYG9{5`YK3j}mh$MSf3m?GvVlv%5Bj z`u`4EObTbbxvFB9vzjjxYXDD|l!JXL$8&$;LE@WD9YAr0Q2<_=9i){Aogd8Cb z?id6v*Idfuh-pU2-#xJzEK4;fsBoFyYDI(@e9wS(gdM3Rmm7mgPPaR#`a@|-SlS+V z?9XjjNIW!`Zc$Ncd+e6^k2&ZQyEE0Qa##p@;c}|OCivFMP3~6Vdz^NSzN9=wJZ5Fe zADkoYNNp`zPU8$;%HAV*46HXBw+DIP+l6&Z|zF=q^e$bdDq+crb+ zDPr#=?=?9reLNhh1d&#va8&P)>%BcQ=+2}1t}=+`Y}PHsdC`a`)Qt>T#Lm$_Ru*sD zQ+x8NX@kUcwVmBy{fnpTZ`J%mNnw+kpK#NW<4XA|Cdp_e_QrM~|A zqw?k7yZLL0**lu*LEFlrtAu8Ls0>f87woN$Jd%5?hS+LB?Ra##3|qC`OyCR8v6-Rb zVxP1c!ngw+gNvp5vGW}=76F|4l=wA*K-res+G^|EDYSaCbf4BDA!)xLH)?Ra*nOtT9)~GuP{2NS4$jDn;%K3 z-aTKXWMExLS<=&cV^L^IG_>AWo@)9cN&mJM513TreFmWm-BMi_I9~sYWJ22t(|hZ* zR6#cSKJ2^*W=n6HG(_q#)1(~e4tH=jlvQP~p48%9)|ab>{2ht}pEEpG2~+Qox~K^A zF!I)fXl|_{Fy(h_4_pj4y0gPTls^Bw{vBNvLFck}W0ifH|Jqpz2WZP($FwV;jyBoE z#WVNTz&`rbiEw}z9{JFFO=CrUo@oBRR#Q)gF*OwS9`(DNYX(AO456n08&iYbw43XOXGXeuh(vp+|9T z(QN5ODBCsiE?RjP=~|exNEpsQe$mSu$>(knAtV+ImFtnpzwUG=yHn?y$!X;$@xo?0 z|3Eyw(1i8DHIKs1dkVO-e*%qD3*=2%W*Z;Cp%+QTj8f@3c_D{*3H1d^jPEZ?Easpy z0`(ep?C(A=bDx%$89skTVwdM{HwV+x?q*BDuiV5~{7-0RnMix#Mo*zYC!h``Wy}IvC zLemxO(=81LR@DBMyRE$)ZaPmly1YiUxW8!+lA0%YdX}7hdB{U6I&LB6xwy1iGCflM ziTwPEr+D=N&OE({pF)=8r2i1tGFBPNoKoDa+EdP{m!@3n4V5C^&y|FDu0Y3>)ng*qJ0DiT&rV&Yx4=i#Q{T51$?EV0D&^6ScoAwi8#nCXAY zfB(Da;vz!{*-Fs<$>O5*9;)c##oWC3@NV3|)i)?ZqCKq;9`V-XknWy0r93zO zlea}3Z)Y?2dib8VkLNr>gTlb{(LzY|>vg3U11B<7-WBP=Oyu%=8Jf{Si|MH6mZ`)} z2MO3eV7{fp(1SJ&A&cuMjg6K@oYqbLgO8?t)2aaP**R^Nc54xO(-!4^qrw4j>P2yk z+W4yKOoc5sO_Xhop^#l8l$Jt?jPdCSX8RilZm)yTlYkS4P|a!P5!y9BTcp#QeL0wC zVPOr};FVZ_Ihqi}HEeSoS&&*FL39L6X6z`k*DK=)wU|`qBf~XUK1u}xM4L7&ehEZy z{cOamBOcSJc?@2YwuSlHWxlwrghG&2sB+km;+xy%v36p_ z&VtB!oKlkFAgYHS%7je8gDFLZti1|tf_b735#^5kn7A_!a zOWhYGfkaTC{x=+4$cPs|z>v=*DPT20?R`%UaDDBw-Z$$qiM5hqo5zojWABVa51!RC z!cgbk(+*S^K*p7o^Aj}S;&Yn7LqOc+BKkZs#m3wTgaKz;%(e)fHZPTHR+$^A3SaKG zEV?;5(t*5qC{u{OYos-x;mVcxA>%n1X4L%XbUs!e=G{?saWY}mG|cEBX5XCnj%mes z>6JoUTfCNaYL~buVdCkV4gGDs%5G?l?iY)?avHO}-x^ZJ5-*_~{qYMpVuV(<()qfJB%2?doz>A1W;Vd2qiI1yQ|C!2 zLni5!_+IXnC8{2sxpRfo@9}d37uldiX782qIkz1V$pDtwcZ5}~*M=veCc_02;#@VS zmHX}-?|VKU>-&nH?rlFK^jq91oe{ed9`sknu$NQzr4P!=1T`l>u-@#wd_9D}JE)~q(7bJ4{SI}>#kKC%$Ja=^QoZOVSKe*L-r`aaOW zOM9E;o2(4h?j>|tyttag*=mW2={M%yxG!Rt2P*i1n&I9UU9}Jc>|i{{)u!JQ8*Ak% zTZfh9fEO30JN_;`0A7yQ=%!&_6ETKb`N786(_!m`qf^LCk|_mPwWj^KKLE^X!@d)C z0pn49y8burCGYNmdNgBZJ__^~@&@Gn_h#xMWMsS9EU>oxMbHi&AW(k!V_fdvthlbX z$^*nIg;M~$zByvMm+u{WSm4U8D8*IYsW~-I_CjZLMRwrC=7Q%;5y**4ay3a``9)?B zXQDI_YJo19H90-cYEc)oZBDbqaho+QNG2LX;)NWb*^++QUei*oUDc7Q!3Z>{GVnrR z%KI7&G73ta`NfP?uU?HRAinNF=xDI9;Th44xL@M#N7IwsnRSfm1MX$?qY@wExZ0&W zQxx3be&-X#3xI9pH_++-C5;c5X|rCmkp$;4!0HPLd=Yn z#OF5Fo7%LzBdQ1OXMyQ;3*a@*9wa5FT$z)*X(GmP;tskrwSGXh6eiU801reyh`m|Q z{Zceo7UwBjPT6|x^oX*T>T+4BC#`0XUT_6=hC9DJSFAko)pJ+v;F$;);>PlC)x6CY z9a7P-^GaS)iE!60w?7wu+%{|ozr~GDieDQlaIlRv=uZJrBVW!k5I7b`B3mCl&- zOi)ZMKldkB#A~E4$11r>`kOUR=-R1*<@+$gW{790&Nf%e1P(7$jc-S}UuH{veSZwD zb2A80Eai+8Xc?mxVhcuKlAOvOqMycT9(Z6i`uyy@+sc_9w9LEvpqhVJE$|}rVpEPK z)PH9t(-!^%vlE>^^8bg06#H5iwNq^1Mwt>Lq0!|I=ReDun>T(X>!4(&UQ>`W0^$6q zdLCCr89*rV2!nK9%zgf+r9biMteQ~(PN#`FZw*O|7*1ZO6L#^tm;3wf{ebJXT~l)i zGiUu0Tn?@64MQ?N?A0#?GNZL4S1{_!)$TcajAqw5Nd1Sjxu(M6aa4uToKDG|yjp0@ z5&cx|8#M*an9r zhvc8#+!ReFF-;3TMQcsUfqf9wk<7~*^d__FcXQrud8_7m<5JF-^#~hv+Ma~f4zpBn z1m=;6d5T6q0PgEB_t=KoC&hRuqHc2A?$4AMwR%_HOho>KVrCQYYw?qbovtMvHF!{# z$qSqg9yuHBp1VAlu-P}&L=V8MW{0D8%G|bg4kl2oj@R>x61%3G>J1G$^>EBg?_iX= zmQ5c3?dkup@;bys`0pTMxBr=S;>bCPdnMa80A`wkBwG#2`j>}p=1Oi9M|Mns5~v!_ zZH>;Ax7Qut$+f?GA)Qb^`VT*tEtu$hwXyyLZ0eW64TNS>3Jpi+{6-`gEB#v@R)>_|8LnckPTkI4}uh_{QbLVg#V z=-fG)vLt6aw`csC&s40rbJHH{+)lSd$N=msRF1%T-9v@o3QhA)+?reArCrUBs{ZTE z=K~TF66NjIv4u6px4Ij5jr9f?vZXg_>Eo8K z_lf?4bKsTdOyi=boL0_y6&;6)`{6)ntO($Bve*1#+3V^NcIN|c9`e1=F7#U)PQ1uSLq+b5uGy?;afq+iqtqxQH+b2;L&NSl@Q*#ry>W*FXHdEfeU}5 zdL(<$y&AAA-GqB|Aa549ST1b$qWv8p+gFfOeDz=+X$0h=NoQb>>PPyGAqJ3nYGOiE zGLZ8M*(q+DHwrvK6$y31%Yx-Jua-mgpS<_mcPrV%GhJRd(#|Ju9oyC0h5i=&wD`H} z*)0wxoiK@8mz=rfEYyB`vC>5Vc1oU`VnAMCw8C%VyhgdyIb*Ms|30J80(~NG==Zil zo&Nw{1Z7MYk(%w=@fRh2w3g(w+}3eMi)@3N|^ z>Nn5AhD2;D6)Zd%gzn3)lNC!kt{`_8Fgi)_h>-uT0?oFx;y2Q+UH#8xv4gba@Gw<; zvhJ(Z#%QYX`+fOZ$FU8>@4=bo?aS(2PLJBJ$msV$$1|DnxkO#?8@gDDc02Q)Ao$a0 zKX6LCjshr@;au#o+u0zCa{l>d9QF5wLw9Z$Q?-FZPB zta#ZTwehAWGhzN&`LJ5fZ)2X_MGM1L70J|}?_zBQ?7__5b9>RuG3th*&r~*l2xM`j z4N8w1sEt!S8oBI>G;(5^4U{@Hn<=K?@44Cwv~V+J@GzdpOQK7=R)xdh9ONbR7TfMV zb4>`}!3wgW;$XOHRiYWQJFcO|HETKqi~pcub$K-~-0_Z>&!5++E8xI}t!)n;mMcsi zawZPfCCPg|St)CkvL>0YFItjz*w1|bLuhVy5~tB}%G|<3Fskt%)`a~N;D>|08XN7K z6`HmeV)*%?k9xP+Jh3?@B-C))zx0+qVG-*24IZOxcaP=2GkNAmo!Y`e^a zi_w=j8zyAqYu2vN=_%GUn%2KaLn(M^xu=#li{n`0YD|;s_AtX&MlPSLgkK&%sP-?c zbT+rGp~}$5ry+u7qba0m77@dbyvKPa1|Bs2VTIkn8?#9HcqF716SLU~tZ6oO1-V8% zxD*BxS1#`rUv&?`1t~N`wrt4(JG1hMW$qFHGpFYUS+j1;S%C%0Sx>gWEfynj7{9s|!)#KKYuliSL& z3q{vKeoXMZ>fN2ba#_eo$5lAAk~lByXYe*9$WJqH36P;?J~o6BQJ0RUcHYx>75!RU zX8T z^Td!}SA?Q32OfP#Fti6huxfR_vogC79qA{aM*srSxJK2KxXwg{d2n2AVwIt6H+;2q zwSF6+h84{k)9N%kRGcuA;iJSV=w^f}zEDTQYJBtf_Hk=K^BY<%ksuks{Pq|X^=j^d z71!dfy=msEXpTe`HQ${{|Fr?JCfoaHn1*OtwtXl01H5wZci#YjvS%Kr=I{2XS=^|ltX%d2b`=x-o z>DkNSskEk#l3*x%W{?u599fkf!IeqDp$tR#Ivud%5J@g((=5X-7As@gIo=p^rnTgZ z-56PMdU#*!k$SO*5bxUyCJyyH?18QZ%waQM2DANjGKegB)(@3Qg&HJ^wW@nM!;l8G zn)DJ0Bv~i845GjJRSZ5Jp>pKwv}Am+?*T@Of9rR+368wrSS(}?aJlXQ?{#b#%~evo zK#i4a`G%#t5X6v_L?;_2YFL}ZZkTD5^u?eeVDYFEHSRM*P$gOTEaygmu7-XU zUc7r4BAJ7mFD68fSqI$(e+VE0xJWDz&aF{C$ikn$%B2LE--%6T_r6lcZ_~-L|Fv<^ zxmEVrh`%x?o-8$QDN5sFp`wf3yk4^=Uay)}!g;iIMq*R>kyPSnh<@@-x><{Uq%mhn z$)o3g1kOK|Tok9^7;CQJ=+4u0pBqc81zi>^odM_ z>pnVnlcp~|$HI*z2R~s;#hV<%2vJp|tQbj(aO0FzGkw9Q-D$|y;$aBOhVz$at}u9} z>U^wq@}6u3_ej35j=fnxol5Llxa6y43KHS+|DA`6lD(~MHL7=--g_%85R`#JmJeT#tVSAQxe z9x&Aqk{kk>AAX=>Uw?NuAad2TY~->0bwzq#*`wqA9x7%lht&h)Xu8$0RD7Vw_FT8p zx_1rsbYth;$t)8&PWTNSeq{_dlO!?s3A~1NJc3(RjD4frQ>DPGCm66W+`|c(cD+#@ z^H=*|H5Tb@;gSkXIGv3(XHsDmd=0*Rlqbq}rm!pwO+^NBnG6tak*|gJqF)RPK;SqGR#R^r@l{=lp5C zqv@{)a`gFKe79-1V?u zbgXozD6_%w!GJ^GGScOC?t8ROx|;I$et(nFP);(4KlzWT=}@$T7i$wGLurd{X}WH6 zw+b>-pkQv^eYJAlcSugG>mSyiP<=6O(xcI)?IAN)zAPE)FSCx$liV1n?~k1&LA1iC;nkkgx8<`=gzi8E$=HIHSB$=Z%yf5l{DJ|``L$K zY^!_k6X`OI0u$akTzose85W}pX;zeCJUHAP`;%_AZ|}5oRn0d4Mn8Z)lx|X}r{IyF zw*jE6si|%Y8Ln;V%W({#^Y-NZ)F{NgPU?qzA=^Q6-Gz#NgGt5I!EuMd`K4!`|^sUxBk`TX4 z{K-^Gz*dr98p1u5h4(3I@@*cnuK9txpDCy8bq>)3vjvD^7?=T^>gx;hT$^ckwz!|@ zxs?8g)yuVx!L#!}&k|lMuU4Dqylv0urKMl7Un+u+{S^hOE7=#+S9kbe_N zEfU4m9})Yvo*VxP<9x`fFYgs&^jwW;2GFxkhwo8|ZWyy~)bWx1stcFNuJ9rrA4ypJ zEL+fhLbHmd$#~=Sb#wq|xTGG`V`)=i9OEcqmEABz?>#d&VJ6ckWi!{Z(P&beo0E04 zr-VBk$Z`%dDq@myMs+eHRhsApfl*HH$C$N>;Euw-DxI4a^bIO@ovAi)zE**3*26_P z^TKs(FzQP5Jl9@4su{u->ZDHvExLY}67oWp0nHP-&a0+ zDJ-@_*ZIPAf}mHrPim-!UK;AQ$tlqmqi7t%-nko$pR^HM(W)(5BHZC=`|Zi}r0UVAWAGf&+duwyn>oPx&NL5f`J{4oDKW%mrt-p2woY>(Lgv}_r1_t81F;GB za>2A>7o6SwC|!z2H~Ei%Kl#t<#H;KLmx!*gO zwH%HD>4Y^a0`-v(IV4ZV;Pv*lO)`}BE{2Zi%qK5S4gmM*2N$aKKbd-*fSAKiqC}vX z-F@E5q>jfsbGPjChpCwJBZtxd=kT05WRzkCQ#YCm306RrrD}c32dy!Ihq!WmhB5B1 zW@5?14RuJ2d6wL%(UL%4+q|n4K8CTn58x~$4Z$hL#_%uwiU-XRi}!Od`LGR|#rp#p ztGyW#?Al1xEdxOXLk_^n*8lqri8=YF?MKoz;w(?8dKaZaLeEuaRM`x>QGpv zKWE$dSe+<%+fS43Nuk5n7C|@^Z1)j0d@1@xo9Zg+xudC3m-9?Ru9+yuuQ8PAHtR5;mJX*SS=I`yt;@%C>y*{B17y z!1+MWr(knJj@)1@d5KB!&zlKGFPb4~@1>H(BI-Dm(%PKrEZ9-m93JEg5VYKxi?@R zu>2tf1lSbx=^@ev!`^VPqu)wkhyBkbTPoQ(&;^2nS9p<)uor(nb>CG#kJ_O3r^Xq;d z;h-nHG2EnAgKeALN|&L&s8g)oCo^He|6$P;BocyernE0qHp?|D@iGkDMBV*%XFjL+ zNJi^dQF{cRmwxv?E0K7T-J#%&XfdqKTIcOC8Y%B(RRtZi?0}PZ5ct@e77- zXT~wtDcu8%iDGPanUJ_<#5W`@N8rPP!QMs|=JlFtnJ#~WAIS+Go|myUuN6mnZ03kg z!P^IKpXSFS8|0uk-`QtAme_ZrO30tRix{b$QL!R5uA?}hkJ1=5$7VT*{Mf7=P%3-m z3DvH+v2nA@{DWCb4E#!;v)%~;b38Ogulf) zx}Mh7C8yF(xi#pEOher zEzOV@!&stO6|lcH1UP>F{%Hu~IWihy<`=dURUinDN{I1tJd6zNol_?0MLq5-w`WR` zvLE%SV!4QZqWZ0%hO%JWcWD$+!q2(z3rIMRXMFA?!gA=UP&uVQM;fcEfpqtKYPNq4Pt#EK7Cb-EbRF8CQJWyV>)!>eHt;n=EEmQ5fKtTvd0J8I#NV^PAkT#IHuL<1TJ~-Q-cXTI*fYg}k5+5SJ^C8u30aIfJQQ!|optx+kR7M&`=k1bWlO z`qTOUJst<+ct;lLXKJBbsO#aj+y6CMnaA{Xg}UI{JR#`ddmjYrfwDmT|-`cx}e_>;?L-jWZ0*ef^lghuTlm zR8LIl!~@LL_ZHiu)nB2D1#}rIeV@yJd@5QW=D0z5itRz& z0k#fL-KR2z5#j#9`2mycvH9TYOa4b2K`a2sIq(TCUf2fpRtg>VQC86abU&FuWv)C2eSjlVK7M_yD@ut-|pZZ)@ zf>z@S#{*6+JVvofi``u2pN2O7BAtxZJp@dlDBheyMB(5)q7c^#~MdlYW4OX5`DApbT()51mx~2;?gk%t;?byh=T@O{xZqAeb({M zuLlWwl1w$gpbIar7pII^rxmTaS&j`oz_5ggc;GWA4o2F+;Dkd0&;63g2%9-DGAn`o z@=)-9K2)ga`F#xK@}a_JrAL|K48pj)w9ouaPCYU}PNCZt^1R1Td8xQ`_wPM@Vpo3A zlx=S112a?c?Xo@tHW|ui6^20x@7dgIJ%VqGW^%>90Gfbfy~v!!j#szAgNz+pPJV<1+GRfRsKr%YUC6Y@oqwk=r@p-ED% zMO2m6xR5wdxz({O|FA$X4fY$csm;-lppqD9ow!rvU}COdLYF1n zp7!0N`Z1GdE11T9F;YW?aPC&HoR)QNP{94Ck}bPmZ<5Dj@ZSvX$D6D&<~#?KDf|Y{ zv_b+BQmMQq&fvlvME2gI2UgmmB#KEIb>AW}^1yp{rT?&^`D!;vRY)+hu^I?=DEKonc+d(i3%aR_llZfPx_Dsoxs{K#s( z=&EK&V9fp$dX-6%AFZ?FY?Xk>AN|Z%s%1pb%H8I!mtkZTb_cz^^7fj$IKh^$tK*L^ z@P;MVV@e4gpOgaE%A@uZq6F(+5!dxbYtUkpcmqqHyYy_TmnclITRbRD!XUHNLJZ?l z$Wg;y0l~#QAz!-suji$w6Kqv0+LtBFQf3bU@+q%e2Y7x)`&*%uG-*0c7>^ZRe0)|v zGlD9gecL;J2Y!(&|50jGx6eReh|st_AA`V6{pqZqV>x{z$7fwjTa%na4xtfT(bug0 zXpir-Y}xOsbi}@hcRLoPL5B}~-OPMzI5N+sePjX7Hpm5884AmqlVOu|P0vD1!kgAc z;gafQTkG)+6&q;#QT4wwlgT+Nj}i6yfjT+t<<4bs>SSn3PetpuVT`bC&7^P{nGS}O zmV{}KCF-+!XKiq(_E)1B6$qpt7)Jl5%Q7!Bv3;x6GBMX#k(oSs2z_sP_fTOthL)v!g>JSN7LSmRE4eDM9(C|CJr%k^`jvf>a>|rZqh`S4zlwrI#JF-Ym5l=fpt2h3g3XG?} z-!#V;NsdBrhm^cXZafCO>Mnd0ez$j@O!f{n=b%TI{f$X!5hK}ZorjDJdVZ|fyA+wF zHDtGOZbse%9H+7h4-fF1#e4 zpZUzy;Y4qIpLh9(m=*k2Sz-x+|2d*+vE6f>xxnqP60A#t<^y(o_i6hHbF_*iC2^2h z!VdJdSjV->M`e)ET!5_0|7fccyJZ6~;+J$w;HuIB6s~N4i-t`TnH;om$UFv7+Tl;0{39H513Odj(a01v~&b zDJZH+x14n2p5uM6^w6-0(BXJH%jwOPZr7lYY$+t>LMpsRxhH<#yKWUcWs0#IAstQW zs{}K{BBM<8ujdmY12>opxSx1LDH34RsP!VowVPphDkC&j5}L8bXy1#dV$CQ?uqg!$-E}RCib2u(tu~ou`H^JZe2%S1I=|hBzTZZO z0O;d$Bu2|gMI<{txXlP1!iZ6dhq(#l>Iz|Vrr0= zUp7b2`i6KQs>M0nLZ(*;;iFhaz2K>YdyTZ+c%N{G z{q&ca6R`f~m!w`(&7kB$AmnR∋)CZ_1(2{eFsco?02Tp3X*Hjf>4?@pQ3g=LQ#J zX!C|ZX*WulNERK&)7+`OArGAHQ&aBB-ZN{>^*m6Aj!{Qk&=bgB_N|cv-lz4JKhp^{p(P<_BIGFXC9Hbev8(%U?Fxdi}$yi7G;_$-z=H z?hVKnIUU@gL=S! zW07e{(~;_OZ5lHlbMJX4XM8t=;-Fvb9*B~>rMlZ2(egblJ1s-kkI&xq=;O$pkk;Q9 zNqIi}{)bgLcQhr)UKE1YivfCMtnDf7TBM#?-rfU?Hkgf;?p*$Br|0wEA=4EUcL0z~ z_$7PXj}LeG&OgiaOAkXGQ~5kBxaVbL+0UY@`K-Cwks9@t>|v_*KyN=GU5ZfSQjmSO z3IwnJ&o{y&-F}1C`v>sAY~!fw4RuBLfUfwtugg-oQW?*(I_}anLBOZHgN^6Vx5OB7 z3hzD*cVsl<9z=x&2UCba$!*nPTEs8#1D{YoULu{bEx_NlEL`JR!Ra<~jHR+p9yx`~ z?xmM!*e2fd1c1MT<0FwGI}SWU2p-i-9Z?IVwMw9Yid8E_t&Bc-sr)ILE-1*tJhagh z^%(?=JJ!gP?-+iQ#eZ^b6url?Y;G_;yxaFySZBM9gT%s(^65%!9%v8#oLMT-6qzRU z(DHqVI6gKWnD-W*E#jy^#~zikM&;bkoxNWTG=_(2?6vbRw$5&iHQMxMNBWX`$3rV4 z z+BP2bJyoCZ>PaB+1QAF3F)(98zCG}N;-+8W!o5~RWJS!yvOXR{m}xNfrgbMZ-xED_ zT6wc%{A&1R3~>Hu{k9FxgT)@4*h%Ag*lT99jHSGl)LrXp6jerYDzdvPLX+#LZBO$` z|62cMe#Z@?YK-#oltE{cEMi3Q+w5WT|tlF6}!KBy&{chG5pl_QnJZgWo z+bOpovwkC6R;Ne$-bex#4><$wFj%Xoe(v!az!}-0;^>2FZv#^dbVcRp;%4%WzQG3B z^z^$ffR?g5L^+&q=cse6?quFZ3VCHYd>DtJR=5;|xGokRsdHwl1Y0LMqRN7Vc_or0 z4|#tkX$#T&MFG3UDK5j1pzsdwbVlA)z~4dvOK8uoHg)`7o$@|x+h&4tO*)Luee}B; z9A>2`);5xc-YiVAfa;Dx-lD6Nxu_Ps+ZWLTLh_2Gt{uchF_FG|>p6f&_{0Nl^ zs^B~cYoaTYl;H){BSt#&q^)J@4@^Q7n2c+Z&o`N9*U;W)HyB!I&t&S@f$7}?^rxyV zm8X^L2)e#p{q*J^)~;MJM)U_=Id)F56IqjJj<=~yUqS^U&G|j{|E-#PoC2wk_|Fie zqF0@G39F4AA8FeERU+`-l&k)q0GA5(o@w?EWW)&(^w!|nYYr?)yt=3jQ4%cRizlc0 zy@#mj)#YY9z&NK=xeh5Uqe2|K{d+7-`kd+gjpqc4*XlK1dHP-gl=Dr-pz1;yE6>U@ zY#}y7l`k!Nrdgekl}nxtl!B0?pe_Ry)6h623RGfeCNfgAvIg6LZ|D0F;GMc3^>4TJ z27$5B2zO68G_06;LXls?q2Ctc)Bc?37acxi)~2r%WYK_f)4*k>-pF&FE^$A^({QG9 zrA*0q(zdQn)PQT}m;CXN7bJqAu-XDkl*6p@Fj#nJY@l^G*=G3|HajVmYPCt%IaAgo zl(uY%^G^hK`ZV2J%VmS_l}T*X-2UD|2ID_kO_;K}EZIBG;JPoHw@CeG|5ip&^&JAw zi$~W%fjt+cR6%FZ3&d?`p6&_l_)y?s%Mk?w+jGu#uancO_<~1Rdb0a>qO!Rg$CcYg zXOx^5-OrLg^BgP5$QPNW^=FTCqOuh6S;zr%Bf?er^xNCsJkOw7x9Y*0Y{@x_C>r1#U)nnnuIy!8vGZ{S)H5y^NW6b%*}f)s+gv=BmoV8yLKap!yU{_Y=l-L;Zg zGdYu)$Qe0%Kl}51<}n*t&vHq+Gm50sQwqvHQrb~ELd65n>cD?Zdxrnt39~Hb!hhGb z*=wiu6%|yR`py0SFDPK*6&zeBzd4mg@n5@mvj5jL)PxomT>m1>b_#YK^Pum#n-M zS)TS~+Cns#ij~5@dA$M#&JZ5Vc0O(KuCoPac{4b?0vWi;ue?-9E~`9&3ENGqVxa&H zZIV8D0U}@`2T^oy&5! z1Mr|-IuU*YlxQdH!S;{NE_h85JRrd*v5aB`?~(FLb6uo2dl9w;g=%hPjWWl`MBvb^e5| z#XU2iY5!c>KxK8be7Fu^`#!Q>+$#lp4DDcjw!4Jp>z$U-O|b<3f8p-S%KyFKc+M0+ z#SS>0Tl7d@pGSJQ0x0+#TyN?bj%a2B8zDg)DUx~JVkr;0hpq7zc5Y@g;}7%H7t#!l zR4hWsaP~$i5{Im@>gIKkX0z*Z+!y`z1wP=Ib~^xVR(R8n;HwI!O(v6mTMbp?LeS3o zjQ&3cuLsSwEPk}#4WL(($n_+y_1jXJ)4RE1hP+eDS9NPy^DjHdF3Z0U;j=^eo-Uhe z&GLeiXzDvH6yyh=|MTCIzT{vvAq#Q=RhPi~H;(Kq;ruKM`zrk3d0`-gqxyhDKM=F^6n>=`_1>l0|!?|K+~;(D7Jx9#03qv`4`)yrhl7X zL=y64pgv|O5a$Gk<7ftw4c;la=FK60;@@0F^cojTxh8p$CK@LI z3DMz1Sb*~vxe2mhyaP>jis<-u8yVrhSO)+pxg%Trnt-(NFs}JQQFO`jQ)3!p>%fFL z+l28A(+RoFb4xg5$^m8q)4_r4x{9*)wX^>I&jHfq5AC0xEXtgXkOq&>jq%s7;`#oD zNq;(I->6-`B*3c!KB)G>X$9E^I~v-bv#wF5NImQKhF+a^M^~#F0pMZ7iTW_5tZ((5 z!AF0iT}ix_oD0XF2q}>RJ(Rc38;{`s9<{WHdjz@TFj+Cg<8uL{MBTj_KW9RNoK@?8 zC}8r{+?6M{#@BmCsr6~01~Bf4p9*(I%Drp{WIGyEShbs@?y$FVf-F$<>e9fIdJexQ z$ErzZYti{LGd*U4jWa*+w{#RvX!TWmj{bPgYjbQJTyu=ug1TV_*2Uslis)s~v=3dO z`oC!#>OkX#);D)@n{euh*&lF|D>sD2aQERjkJdoC%9e@yR__SF8K3rQbZtsSr zh@@XVzo}uoeY?;)txvkfYqKiGd|tWNjDe@xg*Y6zN`q&!MtPk8=Ib~i$o_9G%^ID-NBwSVSYG>6TiWdy z@g&n-Rqq<^xn)Zj*1f8-I#v1>w+FBVg@mxPLHe=TwvTsI-GK15U9lTUAKnOV#C(;! zIzuca_BaYZ;pByy<)1EMB3AMKiyNFIy+dtF9c4w&^8`U_vuW6edZ6nPLc5`m6utmGL zHcP9ABHhww!nLo?AJa^ian?X{^bg_fxSE>m|{O-SvwXTMw;rBcC#AKGl{rO(Vf=yUT@!$o<#nEQJY@Qn*^rt7UPAa!Yjx{AQ!evaetoZD&y@}@d@ zaD$`h=pJit!x?r)pSv{&@;=yo&Kd}jEEBuZX17P#btex%W0fk>1cxdr#(|7diT?S4ey}UjD6}}6hF|9z zb6w-g2vD)6Fl^$$QXsOHGBec&3Q^QGQbZbD^>}Itw^oC9+QzNNl-G?_Sw78Q6OuXi zfChxk%&W&bPWI$#e4DWwy>&sS<^1m>dU?P>j#*y&BiAK#v~L3a9eS=Zy;-VBT^HSf z3}(k(`2CavF|V}28;ge8q|{cV`i+qynE^lT0F;gza+1>7&PLc+-_W`)N!<7eVo`xp z>R&_L?xHyo>^uy9)?6&gWvgRDoQ!d}braG&I1BjZIXH`Es2~GMP0liny7}boNyzE# zT8o7dDjtM_oelrcss4+nJ<%0CRujVT&|hzR0xxYQOPkq*X8pYY=vYnEGwX)>z(#o+ zOJ9|?^?UA<=BU>ugxrCX=lbNu!nt+(X<5!rhL4DtnTLG1e=#Ng{OU!_7^VI!`a^jxW8mLo5M^>lOAW;}HpAal-zccy{ zb$5`j-FQed=noOh(E2dvDUw2r-{xspwSAUV<#D&i^H6~f5`Q$g_W{~$uu5l}f3RjiigcD9a zaTw$md?AOd*jxK-ALp(P5$Baom`hajbMGHk{sM?)oZkUL?1AY4W7y&10t7G}78CLf zVcO8Jhr;t5+*;Q1d^ZC!rfi2C#xrb{KdF&#{t?7C^1NpBEgKmB8vOpMw7yfgir!{W zXe@rJTHNPAb-}p_8`V9c#0xpl*&_ziVmA!oPD=rn^mzLUl|6d5N*&7SA;8G>kBy6$6*qo*P%x)BUOVt3#P31}(P8T0Iq4^1 zT4s);kbITxcNecASd`mk8vm~#OcLJMII*U0HP@qa1GTo@?=J5ez5|!A(`O{_Bvn zn3#Om%**p?ey7hwykl6$|9jE7q<`uOk^Fw?0I6~sQljR-yKZ=~ayP@FZ|vp@%Tiym z3YrbLTyp5#UeN_0SqV3T%o_{@%2ZCngLp`)qrBzoIke1TDrN3qRmcIj71*`6@C))$ zK)KDu-BY*K5{uXl(%|KUP+s8ZggVL4MIYFxEsG(qSXuC{4`8*SX7RF|uz0WVT1lVW zYWHBM6lF{Q$8Ts1&{}rY579kKS{lP-4%D){a5UMHH>+_Vy2p?($}Pt-7(_zy_a#yZ zHuQYGB)WigU@Jr;3(v1&+As@=Ig%c~>>i6PDT|#;F9?KdkNuc+jmkb!Ze_}vk z8+x(4W1b4?o;U$)keydeH(`lXhYgOnKD7Ee)4pRC0$K4t2F>g=_AKU4IP>{3Uf27x zm5n>`=R4(&^O-J4h%Ml=&@bElIMxvNWFUxhbvAm1nZB@yW?V_C@iV5LwuCOH#gI_G zGK}rJVma06%38K0&&u5;zkJ5-(OhV2ABk>knU1(XQPUdYs!okOrN3Lsab0dS0*CkX! zBn@y_lQr68^Swdd+nG(pm^Jh=D(04=)lv*D8ni_H^;2Tbg$0J5o~@ zU7gK&_H{uSfd)HM&rxcsh+4ViA{s zW`aq+&X#@Ll$wIaJGFVz-QX6BKq;orxZ`Dd1ei+9eHJlRGIVQy`rV{0Nh+cjfwqGc;H^2P#^JFHYYkhO8`fz(wf`hlMCS%$*X0 zN2wmGD_}>T8e8M-gzR<{-+b*ZG)Wp}ox|ZGTkWq-9h-h1SHv|mlVJdU!(&b_)0_~0 zO}?W-DaiEMGBHef-`(RkZ3+|yKtnxwfdWOX<_*QI|(K?gTuwcT{B}omJs>^ZuGKv{zlzcsa*AN>b zmr$FMY2P9$F@>9&>`{$AFrU4oIC-1V}XV{^R&Z}1so1F3*V6+LIJ@tQwWN*vU zq9sj}sUz(*Dazg*8rV~9IIoq*RbZXfZ=GTr##-@^XdGZrtfs_G_qXh8}u4;K^s_n1+>Zy7F_UKP+$}vsuJWGu` z6(Qo)%y^-+SB#$k#GV=>0~=rp=D6U$ggtuV zHPVFbm#+wO#L&EDHQ&Yvo(W~CfMg zdKTDY=|0QFzgT;wwM+*dCwQzy^V{)Mlc8-@B$>_f?#{PWa$K>Rm}aI$5wzQ0 zUza%v0vP3mQg1GaeT4BDKBVioWeMsqROjS;n9=iasa-}wlgxROl4`ab(Z2_#X_mB=G)%T`99c-1uBv1d}j>EHwO2o-(dKz9DSm~J#6CqozqY{;aj~h zw(BDMY4eNk-NbxF=-U3Ah+-CC0x6YcDl7ea9z(i9Pe)0~kjJWDTw{eA9@!M=%WsE# z>j`9XdnZ~F&zVL6CYWm2t-R+E&U%tnj#y&%aN&;Mu{*N>CrETR?djO{TE2T(Yhs2B zU2Ji7UXdlFUQY@S=o?yCyn}HWVTi35)+mkql|`YoHZ>NwR5t-e-~0!ptl$bm zNX_m@lxwwApx94AvIEM?jK`rpQ;(aYB-P^LQi}OBiz|4Z21>XP7qt>+~ zLZt`)Yt16V1+AHbE1&w9pZ~oR z_A|r``(M$-|1F@l?}plRqm*UMn=%ezf7;A`>%6aRLyFEzIZ9QIIsWO|`pgsMbBVb> zlH4HQr|DJxCHPcHZ{MkLX}+3bc>7%xBr9v`90yr z&HoqOys2f+iFcH8D*KcaQ_aC-+48|W*k&Q_kQUkC~0)9yRWTt z3ii^HN1+&p{O?~rng?yi5l2)z?i)E5FPjTI=eYOGP`F$iH4I#Xmdu^FHHapsE z?!ufv#MBB`7Dg={qnEgfe^Zp zRLikZkec{?h!lnW%QS7T_-j|C+3!pe6ryX9iFnua@IX5=AK|2Z)eOZbS6k7U zgjmyTF47%M82*TE<(@{|Qck#e zhi!}MDw2OnT(PhfF4$O;KPBMj;_W9>hH-}EhjBWcQWEB@*8Y$XH7ntX52ei) z;JL#1V4A?9lOrt&Hc~y%p5*Z8AjJ6^?sw7PkLyfoB%FNZMOr)7MyqD1PnU(mAxQ}1 z*)iHsdJH$hQozDnkHK@aQ!82}8U9l;8f49$fpU^PQL2@Do<}iUE4Qz1P;;Y{lK5+w ziP*R^ti9>jL`u}||9kPiBt-H%>YGG+J6%47ZD42Uk{C&@qFcvjDnA-ldR}b?mgNY=3`(qwOYE{UACz$`2+gDpbaWcYie|+Xiwq3?uM*8Q-3;=Ib zEmBGHKi7K_(p)E>F>w)elF!LJxS{78ir2-|P7&@a#0+z?6^tiwh{}9pJ8_N_UTj_} zi==70ej_z+3k9+ibuhV$i!CYf{>*S;gaNitx#HZQhtQ2}L>m8OXik!U>7d$c@XSMr z?mGl^H!S zCCgHlYZMa1`g8K%`X(_33rNt2SL zg61&Q_1R>c_jq@n^!p2>_R2XoaLdVxG0}Pjx^Fo~AR9FI`or1+&YnELY-uOSb!s`Z zl*cq6qTj>?4kpxBclA~zOko~jr^vcJ`)m3vid4^_=|9#RvvIO1tmoWQBh64w8`9#f zIR)e@Cos9VKoL!z@N=QnTC$jBHv(^CwF+s`Y@N-DsyTTYdG+RaXbo~Jo!GCxE6#qPfqoej{l#X+eWI{nZ(L=iV4(EkE3E~2D05Z#q; zpF(~5JG7z+lOtxJBilS~@v5pq#p^S2zC5yYYDfZ5V2~K)j2qC{%W3Lf7T_YJLg;kn zJteCDX;<2`HIV-9ZhlWN{fS+-bdXVCZcxtX-Q`)tS_|)a5gRKgmZqH<%HM#pGmKn$|AIS)nQKkM+ckKe!+0(OH346AD5M8_G84v$5|z zJB~GHDKv}3SZz%G=epGwcD2}I)ndC;xL~x2VsqnV@L`t8Y~g1@xkrwi#`TITht#U4 zy$aD1DuL^v1+0mhLCELOubmX%0A0fEHoy*nTz19p@= zaH6x_H^s`5q>R=IQ(X9BY_< zIckpnOglsi%@W3ZRk`F={z;%RRq14Ku>juWRIY5zoUOU7>*Sd6_r}rvvbxMr(B3&c z2$|B#+gEgkzl-Wa^Gpp4IpHa2Kty!|%+5$=3SNaW2j6wR0yAn%&rRvqo;7YuC~#!R zOfLLNP+WIu1H_00)37d3i9PzS=mY5kBQGciJ{h-0x{He9$(dm6YuW9n`h3Lw3{t9L z5d5}cyjE_gm{iY&$Ni>u2-#6HW;d`qZD(rV+ruJyUGf=oM=t=yV+#WiP!G&~ysO#v z_HkGBc29(xsPdwT`K{%DO#igg&gJ6VPsJXP+q1Q!gil9EFKlwd@T&!`#bvZ&M5N)H z_ir`r1*d(S8glaZKGalsaJZCmN1jjEl()tiU#L#Y{rCxo^|M)L$vogcb|!5v#4=NRi_Mv(K(8o7o@JStHdrB#wmEX; zB$>8;LRwGk-6j4x)btva=|S#i6B}!e$jz8R9ctkob2N{Ou(6-xr*FsISf*-wa_Xr) zhPI~iudDSr$}cU3JrY4Wz!9=K6MVCq2ES207+kI8A`qs<-qb#z5ngXHLgRmkPNF8- zxFgYD^7q_ZZO1&2e|F6H?-Woctvt1Md=qm}C!;!jK#gN|i?AQ~N-aE{{E}(PFE*g@$wi{9{LjBX#!WU!>Nf%^Q`y^u zbgce_rWazG*&rI(nB4ZLER!pU2O0hl&y=9Y4#M*BbnI>BC?EWvO}F74kF294Z4%() zA9;~Y$c!}S^pEa^WPz01*9Ycc@4Y?4`3&6hYxTSckW_xslrk z&71@IDxH1Say#2#up|Hb&urK@+x=~##znH7UY zx(7oUmCkxs?6J3x7D>w8{M!vpswiRNK1Q;@2e%%`0DP1TlwVjXU2Wl|T>gMSO<`8zh@IyI+gILHaWQ(*77z>jk#eD8`Ya70z z{hNQPn@Ax%uV*UIK1bCJeX0y(?zYfi%$Jb6t>0aa$Kb}a)!faea}>n0)iO0-S)PE~ z!?pU&7KzrP#;eiA-U3vfss|=^+DWKn9}PO!DlBRh=#^gh*#u>nixj`{72Z!Z zkd@0X2AM)jba2^~pM@v)Z)rG9cIN&ssB@XXtMW(o$gNxG0(sQIF2xX~}79HT0 zPfTIv-jDu*78r@lb?y(DQ}Rhd7_G(G*IFPkVOb{ar`jcE_U@a|AZjr_&eiUkn`daK zFMji$aG5iKRJBPrJ4@9f^-NL@#DTd;bxy99TDU|VL*fYO<1oHyI^0F(5$I`SE(LVuZ=0Dpuh-cI%?=Lula%&8ddeF9_&C>wxXq6O3NP!QwDro- zfs{%ST)6oE!EsDwrbH}$>Sw5MEp!n`WziCja$T1Vt-DKRVe?FRa9))D64I3ou0m#q zsGaS>n>a>6N_8o#9g9&+(-(C{pR^f;O5z}GYB%*9={PqSO&%}{;3%{zb6m>->N$uK zO6!)r59J;ZtQK%3c6~3nD>Tv`)It-nGN=AWa52Ty5V8F>DdO0OB3x)KZ2^U_XO1!s z`NQfn@Lo{JM#Gw`tfUy}E2!f-0@rm?%@n%R6M8$HkIL!e6S+a%Ef<)rVxFpdHmQJy zW$Metn%Y7T+jAak!;vEO!@%()MWtQVw%xk7kY8qg;N06pKSm>{nbiQD|jC`R)WN(v`<*I;|haw(f9LJ$o~TU5`9s zBkb6Cll!7thIH?~kQ;UB?@J+2-L2IF-LK8t2!4iIUb!w$w4ebROSlCGCt3n->!&wt z<`8ulxNTI)^Am1ZCFyI&4*aBkZ>AT0ssn<@#F-FKPhU+BXf5=MU?Hw&l+6?n0SwHa zf^wroOYM5!IY?zf>D^tAwiRK3v^zY`Q6_I^BMm!fQX4 zreeYKhL9-dM0;SYo|?37;0S3Blv}LCeCP(hBw6y)T|PGC!IPvTv?5X7RX^PpHQ2C~ zbF;{)v%?)Y?20~QGeHH9UWv|Nsg|zi-p!X6HdG1=bisiZRH`L-YfndUV8yiiAI^H+ z9A_w47#pV)@$I5jujz)eIVtJgOjgPC7R|3Ac(WY+$YdvG1^mKk?{iVLS^!v!qpSvH znuX;xlE-wtmhRZ(Y_{!Wu56C?&ngxXQ%INfI~4x&0~ZgYYQG{pPxUu;L9~_ZN;X`3_Nmn8Kr9#3DG1gqq;a*K-?t`e)P0op(LH&y3t=iS z663K*y04=)VJ2Eq0EGgM4eDRqKm9T8E;o*jA`nB_g^4kW=^)>ilhE#Rr83Y+&;w%rXAL$(&|EjNT zFR6sfB;^bu(hMXPB?+Z*tkZGr+6ULN1GvdOo$EgsPL&@aRb}%({*EW-;v!%3Z#3{!p4fc;l=&Dej>)KtY8n zbJF&0ROvNCMx{Alyn`$e6@!%1YEkkaO1bC~=C9uq@ z$YY{T|0+|i3^|iNx@oUx2>C#aOc9$&7fIXNi0&~%li?h9CvzVt88qh07%K8v!1FA? zKMXSDt}=L9sUiMW``3JEJ)(Z<*TEv!{gpnv*ME{tP5flgj)mexTIDKL=qnRs5!_oKJK> zJp2l3qv2CG)3khrPO03omosHH|LR0+-^z3pKm;weJfoae(*6oK#le) zHO5>kQ=vf?+4?%59th8H(zF$8K|elHqCgM52muP3N%FfsY2E=g>gP>+#pM`1qT#P-AbEXv`+X;vn*d! zNIV*mjK%~rG7uKYSUwz(XK?F4O|E8v1(%gK12Q=YzkWvOdarCj!1=zIzo3PQbqOQ4+SJPHPjZ*5%oQ%ZY`H!BwduD-hYd{M zGKRyKmjERxWm%f&rC>1G{D}KDK%^D-v!kGc#j#St>-JtX-Fk*!4SDTmv-X*1oNbr6 zPCK?*LP4nd9UP|hE=1%5?gGZ2Et5n3+FB{U`BCE&y{fj$S*1$b=7Lv>^CzVrW=#5$ zQOR+<4{2Rnh`PR@NAPu^RCaNPLz+^S&AY<;uc8ZYo|dRiHL7Jle!5iH@2N-{N{(8z z%=3Ir6xzy&&>|zM7x~Lt_SGO5bHE)H^3_jAZ~oDxLrCAfusZyS@5T1E4FB`eW%}^% z>tHp@@@pEu@A58+x;GNehWFK*6Iewgy@}Tl$Di<|G)0@FS3kd9O!~jG0Fb9v8}}W- zN^`U-HoyIN%pChioEhP8S^ZA?kcqU3iqa9Lz`%YysqJJk`$F9X7{5{SGS5Z%K#gju z6LTL=>h;5FW?rhnULw+pAA3N*IM8kDC!5Z=LcY`ClVkNfEUeaks+wc7z@Ln|#5+(tfp? zBM*@)I&VMp>H+2=DY)I>7yP;ZW;@CKOAWeCwS|ZZG*#_FdlYkY%3(b!TWo@yx@zKH zW$PuDt@QS6oPcj%WNUst9LTKiT-94}YxXP4%s6zGao8@Vq(Dg(49R*1nrxhP54Qj5 z{7i^+C*dk130PU?cixhD9my%gueQ`ZELStL#$l9;f5qxF&d`k!6W%qBp=N`#@m zI^wv{yW%&sKBO*?^LiflK{F}!YujL}qo#@*=~y4PVc=dmT-EYLnc zfu>BavsY9L4BypA{6d{*G8)~!XEC8<=dw4}&0eBuEoewk2uH6o;^Qr=#6Nv^8m3CE zX`+UNi7ZdLLd8axqci-g;HDEV68sGHyXY|BU@wS29)T;#HP6g%#O|&-48kP*wsRmG z0P42t0nX1GEdUKu@w>3`_}`gr>{jE~pKZ6L1g$Le7RI3kxdM1z`@8D}>5*@?E!TN+ zPe6jj^ypRlNTJ}g&QUXXo@?5z^XhmSM#8rCYqrT{c0Jq~yCXy!8UWHn5$em<-DT#> ze*uocoM_U7w1f(LEzn?$VgYl{UK}#^Jk_xNV2nmuz3Q<-)U*2tRxR0++IRw9nqy21 z&2&!;j#0jMa7Hku4m4eo3`@8I*6($+q)a z_9x@h!izwlPP)B(Qu~pe+Q_^{+BsnEP%;HRfnQRq@t~g;q=vkqx~lwkSQMHm@q<4q z=LH13k**>%Mu$~AbdD_lrZIrrzJf3nqQfkr+Ip;F0T!QT2^kZ9_1(5tc>ERG57JKq zWVXuEnuQ$c4cCmlbDt1hAo1Fzk@BmEt37!8*O`32Z{iWYT@iAsLQEJPD6|;}G{Too zvIzy`;mEg%cSDG|R=U-Ej^s3v9fK94u#BfddM&0q&Ja2_X!*!r#AU5J-T4BVeuCeU z>8g_e0TXLhlji*2mTm59`>V=lwxXpgf_JiuBk>`wYSr@%;vSLjiO-xizbrm#m|JT) z+4>*raO5>ty@xb&?x=z1B#;EucI|aC{6EAw9^e|UZ@eTn+Xv-6RRmKhCeHzVfjeRQy%u&5h{60vG*?UJ>4n-@Zbbf3+*>r^mCLm7Tdh+3u%e+@>fH=s!U zbJ9MN7q@<-Etb#Bw8^a|IR9=`OgJFoeY>1yS_Jb+k!Yj|EgnY?%k$2g${=rx29183 zrp3F3Wszq115A28KQ)(U)br<5 zRV>ITA*!XmIp;_E zK^6cOgwmyt3lh#to77D`YN1!Jo75u5K7|$rA6)XX7lB2aQ>uS-1Y{GLJ5X;(M`L$s_y)tT*aAIC1Xn#F1A-Lew8{)aM^s*5RV&OLV z0X@PjWh7j=1F)AD3q&|g{c;(lPEplk!TZ+OAk8*mfu~r<@OcH<{@#7s#KDQ-pJhe5 z<>&O3q4}xs^^mse)@EO6#jS}(1~Pf*uuT;iM>{E4#3eD5l9xx~ODzBg{XkKmjlB)7 zvL^e3aL1T2GWN#A-SXUwy8Z6{|ezt>@ZWYknQiE+k&t$<}xghe9;e zml=I9wQH`m(#E*zI7n}TadY*7IBjE+H#rF~kza!m2x^F?S;OknevIKWz zlhx3iRSO=5ASa)eS#*LJIN_9 z;nT2i&Q8O>X6qLxsLwVO2!Dst0{cLZGAzQr+h%I~aDt2in-tsg06V0Qp>>C!!Y}aW zq1n&Ak-Zaxb>uHc8?jl{XdU(ysAXFsn*jdvj0$mW(q?QKnjD=o3GuFbsH`%VKiqGE+Yd20vkQ9uN?bwH>=uI_#1qC$<2DfotQpFk% z#ti|GbPiLZM4b5xStmIdLNu&(a(bkIiAS}oNnjp@Yr1KN(<#M4TyMPqXqC@<;{>`t z&67?cwgq`&Q?bcgI-Q9yjamGr#}HQN`KvI>tuqYIsmyoGBW9$TO|A_=!dHwQ6@>kV zJWp|df8N6^D#2dqSXZ@v;_s>wAVZ4^BlU)U=n?#e(SGx;XSY=OFK(ic;(@#rfBV`h z+pbvib-nTg>Dqkt%vdo+Y9z`OQszD0plcDR8u$Bv#Lor%b@Q+xmQC8eD`Mq%#lBG6 zrUZb(Jisho(Za8C_GdjC!nPuO_wi`!abJye+_m00W;iPLS4=Zr`NZ-ZqqzXSo8PIT z$_o*yEo8QG_$EgPd#o^a=wXA7>38(a2!otcciGu<=mao+5ARO@XPylEOz=?w$n*22 z%`S_TR6uvdLvYRP_C)D<=rIK-EeB<-NUrbI_>)eRVK5%29r(`4F))DkwF5FIK3z%T zq%Q$mxptV*&EcwUM4gNbU+|G->dxCeXm&_s`Bw)pGaa!Ua&X0p^BRhn?%mO<`qRNs zeQN3=pEL-Gq-jtzocsJc_uNzlgq`@noA%_)`nfOJ@v1goTW5hTwUlu)z}+xz`Z4ZQ zktKa46qS0dV#o{0<4)1SQ#@PK$6Ki4XAo~nxd8~)ZbrxvI>QqhQMEsRmVYhOzXtvF zmx}f%9xEnWnUVI>oM&tnxJ?4~bGRO;f>AIDgOTeCFb0*rdk1pcD(G%@DQ~{sX(*zd z#1}cw;Xx+zLC9U|LB~OoXDwR=ShEZuJgi{rFuks2aI_X%Zm46y6v(P|X1@@;^6Z3E z^;3|ih@NqB6(Y~FjsU|SQypf|Ba`lu>%WL>j^CCA^YvXP`pfL-NA-B74V$eJCq*6@@ghc7IP5&oH)PoGXXP?W|r-_ z)gA7oCB=%G@2$%}&I5?9ru10J>#sx>XgRV6gY^*ugXW1pVKO_}XpZ+Vtz=xDuyN zgmW@fUymFWFHF^q8z^em44Hl~gs3HTit1UI^4|u*##>f}!Y+#g3>iwmt0Vl##O>*B z#gaFYA2IrpODl|5U*JLL04|xF3$!`lVNC4MoL<09e4QGVVmHH%8m^N^C3Q7|!}vI% zu^c47^`rYi?Tm*Xeyp^sp>M@F=vT4LBFOoAolJ%ApUx1w}~?Mpb!`N z#-XIA?IgfAJ(y~fXgB!_nxH9xR0((Xp@mR+oyCc`Lpnz}Ioa&&-7erYs4t*9Qaj6|}0m7qDbW0szSfVAQ^I?*XPJ{Cy+GB(p-<-K;4Ps&0awdI4JhY{DC$zfQO z){q_B4u`c*bzX$!vLz>gSdp`SwaVX1Djj=6>1hMYaa z6&N2nOO$1y&w>W^$luvI`c{RYdwJFY1#!+UQfk4xZC?PzUz*- zbY!NK*_L9i@yx)aq2N#A4+u&LF%{$=S1pd>pviQ}V+*{;aqzQY35O?mOmMeCLto>a zdH7KWvA`R)4V>hPrN^2_;pJQKwQ|G-@m|IJ6uzYINxk7p9jvgnAJ-m-paSkmyEgYpkDCq)KHC@%2Q6GI;c{5%^+A#%4W|f zpu;|80wu8#KGsM{b~Csvzn656-_bOBZuQcc8#a-ZRThhHfp#Y;2zD7P47*)@*>b`v ztBGZ|YxM_hGO!D&vP_3`T~`w!btr<;j54&XZVV$fs?U)>ST=DedyLRV&Dh`D{SrU2 zfqLw0?{sw3J{1SJe)?s+xaRe(GN*`r&R|NY){ZU_myy5ej8+V4sV{ zc#IhN%26THzbS$-Fi$@JO5SO<3(lZH*OkoGzGzySwVsVI(9;3D=-ct@Rn0J}{PD)? z*5HU!bgcax6uq6V@%dAlBr!^@lT2o0u);m6{X|mrF^xg|gX;p|+r%%1j4Y)*2nig_ z`J2@ZB3VlGikw#ep_N*_WL`U)Y}uE5#j-CSR>-OnFchlFc@=r2#+y1Kg`w|DPRMX&UN7mh^u77k z$s=^RuG(b%18{8oiS3Bl)a=mzyezXUOm$TK#`(#E8S=W@_Bo~z*O>OYPU#KIHx-Tk zPCNdyyqBPyZfn~4Vyb>pRU9v88q_OJ^hRJ5cA%bGwz*$f$KBe1A7sf+wGM-r44;;h z?ueNt@+o|k60*YMF#Qsc5Cp+Yf5%5%T3n>_(k7>yI7ZU8F8JPKn0N}^(-y$LX}QMc zXd*5*gmIa-9XV1OIWBC16HxGAL^%5Pk&WOJtxt{XU@uf?)tN>#hHN>%zK!aV8N}6) zxiv*q&)*`LhkFt_D@dU7{5Aom!1diz?Bjl4XvCl)Cbz)co^JOcy*@)s7_cMPu|{;= zM(2=HA|uz`c>FRJrS&?Q1wIQLnU*KR2NbC=9k+g?;Ax4l?1B;7Hyx_EIi)81hE|{H zZe-d3t)R^0SI-caC~F5kFPtjk<|pd~*>&1VwZE^5B3rH_>>3$$T=^p+@3eE|%2oJ} zuU*?VdoM4aGwttHU&oV(k8H*e+3h90-E+H?_)REDmqkeX4MnGu>eQCr$MXWAEFa>~ zUbuQVr7FdpDkT>9VXv}O1qKFFUDo@j#b(o&xjXi;MB7=yJtY{e#D?1V0^dzu~nFhr)~6CBv#h919jx$i^K&^T84u8pVxIww6gQA2H{i* zzLj|bHN=;s6**7Eps^vZ>oSNBGVz^K2A}10&;0)b+CU}0b&k2$lj7cw4_1GcziR90 zUd5=Bd*A0xhOvss?xzmFSo`=LLeQujd+$oysYf3cQd&;q4 zIzMAl>NRSpTv=*OA#gI>A6oX^OtH5ONQ^CNl02C4PQsCONn_b9){3+SJ{t0G{*+!c zS-0)AM#o38uqT*Sq2@~m0T%xN+w-n0s;R?|-UawYOFS=>QHwF8T{O)zcZ$^snl=#3 zXc*>^LpC6F-nQ1uziN2n()&R8TKk&E=?|hp#C{}ukzU?oH%ciX5q;BatwY(T%`Lsf zKDsGa3-W{1)+H?e01iGXj#)|LiS8;bL7eHM7R`R|%AItO0VbwQrIy?LsbolRbp&rk zwMaN==SuqWsJf2=k=DKAb9rO6do(j}_@f&E)cIEvQ5g!&w)AznuXF(Aqr^E8@r{jp zy+Y!>o2jFw_PE+-=)Eskl^~A&fBFbqS%rezy(sMMYj+i|)`Yo?1yJ5}+NmEWW6W!d z6P!-PdoZUyY<{VVn_f@!tZ#Ecg_mw>NtaGlhs4LlM>BSPi+NHw8)LOA?c=pb&G^+_)uAanJ(qZ$~))?BXZ06N^!10zjOheEn!@HIAj1YaOjdSHJ)) z9r|9i3#i?Awy^2=SFpwf-<8k$RG5JrU|LBWbsj>vT7G5w#8r(Vy5tu2Q`(uhY_X=o z=W=L1_`cv&m}%bYw*4wA+(w4wg$d)6-9Mj7JW-yQAIs%Y9R{qyX1lQKYHW3LvA+3@ ziTPBTP|vv<93*i%5aE-9jr4qo8WENnXWsU*Q9Vc7QDtt+}@7n`MF z1VQE6$iLpbzMG>?x)l>HjN`}BVKp4AM(f4xBy$V z=rDwo2Itn4s;poRc^R*NN-#n~#Q@)H=T{M4o=rre@(Tc;4Fr`z#Ch7S8~}mT-kdKg zWwoetHU4z+O&1d(wTDWv(cFJ(Sa=X&BW$fv#;h`cv)|X5is;=VQ zO-QMhEwfSrd2MUbdovCa`2PTUmPK=C7wb}L!rtw%p(denyn_#wF@b|DD1`u?^)*9|?=fSw^63!|k;P1@>%6 zZ%*Q&hBmi8ck(rZaN6x*wR=ss+Tc;*d`c{~=~8)IeQ1c89Ka%_geMVWZxHWMO)`LH zwkt`kU6w=P@~F|PORmrpmjyVAH@LM%wk0Nzb=wNjL?KBZ5}}x0-6>py#=k-3Q8?A# zaMtyv$wRUWijpRgh+)vyDlTh@!6j*8Db0E?NR2G__m6K6{{TTAwLlVLEqcaS(?1qt zy-cNSH7jFpI%}aSpBHLSLE%C&WAdp#jQvw08*}xi$;U6B#Zd(3K;wvplt*DjN$Ws| zx+cKTlN;FFRL8Br{`Af*l31suU*c`e2Z4w-sM>c|4!>zdVwWKy$Yn%{f)Hrql6nGmG@*znvH*%1$RLw~_U%Hmq!_2VYu942C;(^R6rDa%CPP z-h>dZ3jE5}*a}5O$mTo$09wBgmHVBkVn|aJ=qZCvqBj=CmV9`rBUr6rql1Bdt$pi^ z9NeA#X?Tl}wW*ECJ*$;$rFpiaXvMPSKX$L$fFN7euXAIysY*Ud7o}K@x8Dt28B- zwZ)AKx%67*vFa)0Ni4*k^vfq>_ z=&RbfRaCOyJ6zLB+N;c?l=;+>wZymp)nj_gwKRe}ZpQ6}Bf!|EHpq&_U7r)*YBPug zWPEofvzC4+!uP9bk~N7JLA1!4xQ9DR&AAN z!i|0&+k9J8$AsKMe=1$N8gjsFD$2~N4#I&+I8*SvU~UaTr&QSnEbKRWeJM?%0;b`k zvO_5u+ocUd>TNKHmR0YyN+n|^tBsmC?NK5@i@=wY%kZc)r;C?MKj!alloaXx8Y6hOMTO7TShux z6Ig0AYQ(>Yib;|#02k#0#A%TQfc+HH#PLSa!l#?!Y8pA+r0M`J?z1SQV?m2vcPUWn1B34BDQG?^Uj;;7kN-e5NJtC>w& zW-m(=K~mlOQ7~!|%8v@uBT#^y47Rl;*{{RoDX(YHQeX820 za`Bt<4-FLJkbqD{)UjG=OoJuqO3fj62bROP&Vyn{5w<3bk?I$M7tYtAEE?Wya>CuH z#XQ;aT+=ci9R|Bx8w2M+%_Cg&TILxG)}!C@Bkk!##j9%;t`4={RbF7Xt``N+b=>u) zMAAs5dUJTH*0o%>UrI=l`<{cnX3rTuSz3%z$8ENtM73c%U%hikPW8=99d4voyNbYi z&|qKXTtYXiN4K3=H*Thi>Z2$3hV&%UOcukn131PfZfR|L;rl#HM{(Yd4`c^w@qFvW z*KV{F!S57Yn%x82y%&>$s&_$Lq*(PnwWw8?2^Jv-Dm8Xe%$ET7rS@(@`(0}J7yH$5sE`l4da+|_$s5D9ys7dh@TC?nm3W%owUkH{ z`P#W;KZQ-Io&h9*4>{{xY2u9`Cv4AJ8m$6|ReUa$`+T+k0H{kztD4U%j%GDyiFfKj zr;1f32Hg<$r7}SuPYwE4*d`u_pWcG2;S7o?Gh^kQ0jFNERskr?0fH7H_<`z>cCMN zeG@=ZLAf1hfW-OMF)KbjhiY9RgS>3ksri^_fC{#94{FkNcihh`~Mr;+w zilPfYvLc^&_TL{mm);hFKMTIwP$l`gmAAt{X>q@SY3&#eqhmy)uMB*08w>er_qh(iAW z%|J3;Q0zrS@x_=PA$wQ7QRz~ypX|(>#~$^8jCtqhL}zYZw(CSAzcA_#N-vYk=)?VJ z5O=oJVMUKL<^67ftm$xk|6}9$oJNnUhr(iCbz~w z9cheBO1E`X@~gZS+pRr=*lbCqbqh|Sz_7Kv>d%ytW77KyFgsr@!1DUxjB__j3dF65 z?OyD2UuqsDw_8%e1|^NDv>I5XjTGW!RqH`g2^gKrmFrrD5O76?3TuU^@R8!iizcQ> z!QRNhWFcQ zpG+5?R&VNQbb3aXP6}RKw&_8Ux3INFX6HR<$f`Uwrsh0pVj)#}^)=5b%D;E1somhi z2?e~(B>C8%gm$UGOcF?rZGG=hh?^4KUaqWiUj3Ses6)Zxbr$PQt_R%SgE;0|OhGHv zsE`f@io}4IZ;PcX$P*ARb?r$Co?>~uD+Fs%5f7b5A$*vh2bCCOEUOpVVk$u&MBCJw zML;eV#+V=^BzGp=sY*yM;{u~iTB|d2618+i;j(3Kx@#~4z5Z1aw78!5wW#qUB+C4) zZGklLvj+28MP69-H_Py#V$Tx7@%f6-o!ulN=e6%l?AM9gmZNDG-L!j(k}(MjBJ9i6 zXu%AU%Q7x`+pRLp%JDpX2U;tjnXIY0H-$e-j~t2)Ru`cbqBWI%@(&dqL|1;G@J7(Fk&ZQo@Ei$#la?g^`K;v4nKf`JvgZF3U!lMgsii zu>SyPBKZ!rnVA{AdRHk@Ju8@gG;&+n?xwih-8WxaQ~|d&!!fWvb;K=keXEhc750ni zSPeu|mG9=v zAL&}IA}ZW$LF-XLnjw_pwKkQA;wAZ8y>QmWiKf%)uB@~MmD`BkB6hWM$fNP8I5Py| zj9%UTbkPtBtM{i}JX}JHbB$^fRHuK0cCJ-w#}+1tiAgl_s*vyEt{K!A&Fw=r;sfz{ z*BQ*FJ%OWW>=^k{*kl`fis7dak+>dI-alw#)bthXs76wEYPHg^iIP$};%Q-GQdR3# zcOO4GSbBPj3?x2vZ9-i4w$w=0Vm?NOoc$|jAGx(3W}P(!d|s9A(mS5*L5qc3ctsaF zgeyuPP?o;?bQS&&5*KD>8Rj{cW{M?4Pu3e=7DZIt*K2 zZ?$6-(c^8D_NLPbBVRvSh->ubSu~c3%6XGn`waNeo9O*B#0<7McdxP_dJ5u34YIX` zBpQgr72R*;LK$>sS#5s3DIwFxh+X%qB$+$hFv|u__wJd81ddb zagQa5v9TZQ?)n{VRDk^d83odQ_Fo{DR-YAjc`+T@hz)Pxc>kZW_CUhYErFyR<9We-*H&TaVDD>V~%fv%X^x;9Ar-T zR6#~c+d9)p8M}7B!$$YHrFG9B*FOz4rMi4q{?&>NyuPT*4qNXIHv|pVow%4S_PrS* z;kq{FqijGLjfbJG6m2Eg*~h{vGFzS|9KEgQS4PI%r99}8{`1Sd2D@0FE!fuW;yZcT zrilk@F8fm3stXHtsAZkU8!$2-D!Q*jdtRT70<6u}{vq6VR!9X zo*DQ>h5FMppn$z^LP*rFF;Xd`?53yp0PSAT zk!l(-_k1^_2D)NLT+f|FpwO0eZPhD>tc}37Ns8X|XVFC5P}_nzMe9)%(A<^`;acI% zVs!Ya>%9+Wvoag-o1v)FfnvR40+KyJNY?uV9qJ@P_}}di*A%Oq{QRhJZXKY7;lBAFIN z3%SdsO^vty->B$op{-xEi&+6y@}&rXAHC;VcTmfAw!^J^q+#Zsb`Y;*mPR|0BqRsrBDJ6Q7NpJd=yPv5x!FnBaMzz4oSMQ6xn{{u@?%zKpbbQx6O{5a#@5(=6=h&=Uh#UN>D>;#>L6uh=-sHY4xLT5p(ZL=bgWyYAPr_!uOCsXw-q4^jm2@JcL%zI)|-P1U(TIe#HTOB(Ek7s&lTy$d1QyDVc*4=Upy-R6$<7>RV?`tdnaub)`34QcmmfQ-x65)$x%} zokSi?@bH%FYQ618-jQ0JBdBdLB;l7mK^B@&g;Xt{YFI?23`u^K?zK`2_6u1_s+rrh z>2k}M+i_DYoT(AAk@FWwdH-7dzCmzKO2vPFAnjoqkJdohFEtU@e%n7@%$_?-YWBc(iH`Rz7vdu^+lt znr4XM+L}Gq=dA=PZ_T~@>ODC?#CVA{bxrHVJ&`P|>VCmt{)PqQd! z9FA+@CwoyNRN23Yg4*@$)9ELcSoUcG_`PeJLqEu^2sFATjNbV$t<;0(T)=JGthTuI zt{Vd!k)F4%q3=@aBx^|v41Et2lYc5E>Mq8k&M4afa@O6AaI}MjLG3|Z{RyiqgeR2+S(yirG+UP@>U`$9K z%7#86Z1FwE>0+`l9zfUHx93(a#C-n%Q3nz*uZX&`^))L#o&uA4Zla=1F5`2Rty0Cq zix{@djm1X_tWV@{wdy3adW$H}e+jQ>f;^sCIn%Uv@}_%*w!{yJ)_-Kp@V8pk!-yA* zs_#Qj9!(?uLW{PoauvgFX)+{v(?`R*Mt40aV;JIhSr2Me)EKSs-(vJ6(`k_Tk|}-K zV165IMkN+dyC|qk!wazNC|(%<00Bw_3y*VZm@iDttHf(}V{cmaeM0PhA=Fk5*~zMh$CLT1Y2^ND(rU}yq%0cs zaC%fhfMJ6l#?)OxaOG}Og-3KrMeX4mRG0ue)**p3*&6`0W$9{E=xfH^XqjcW@4acS zpAO`lb~dHcrkP-oTv?gBcBV4RX#!d%bZJTQrIMbU2n~^QN#;-N$e# zYz66HI!lKC0C|qp_G|f2vVbw7kKOvRoN8!SMW(@g7aNo3M^H4$cI`s& z5qfA6$X2&4bJFxy9NOMSu#od9vsJby&ZpuP9D>;zyGa#e#g31R*^R~PhtGE3rqw4g zwf3VUJ(JG7D|7kKfv!(Y`&R|#ko?ZnuZZ8`9VnE(MH;Xtaw|Pdrpwa6RGLu= ztg!bd#p_+C{OJM%iAm7p`Ls)DUrZfQ(GjFdwJ02?_SL& zYvga%xV%>Ja@_dEGx1(E;f|jwk0rT&o6@G5Or<}1gj)1Q%J|7^Z{vtmcXdX=+$K|d7%#dT->>eMXq39V~U z)ri~`+UjaJ4@;j&a~PVQ=Dm-B=uTp7(PllQj$GL@$?H=E8Cvb zSPyp6TOrc4+9+Wu9Fc>>4RYIWQpTCNO}5^n4-5Oou&YX#HT*WHL#hKH812_;=Lo*1 zYSr=Ofs-uNiw(TRKCTlSh#PR7>6IslG^t^KS`nOIF5-e&7cGWWHlGf0HIJ7{7MlYZ z>?>5tGBJw@Pg)8~ys8Pk`+xKrD4loV1AJ&k9`T*+_bW>xJ1|a*e-Nn`*(}fDRjD-6 zD#gNE)!@`)i}(1cqehWUEDqc){2ESCUz z3Uj|smY`iEv!3I_Lmfd?Zik_w8^E3?zlNM~5kPCFdjVQGCDd|4I;iVW&7+%y_W0MR zrk{s_W(Z`*sQ&;;$u5jLtu|_IPf%%6DV{YV?4i2Xi}N)ISuRWTTKD>EJ9N&7j7pfB zW-EjX8y=>mR*2xh^PcsI>T8Gt8+H|vESJ9fbf(Ol-9YPNDVcPlR&AA~qp6T{vyqRL z2qTI!E?s=76`2|29}jPxaH8HqGTWtNsRN)rY2A0e^(1lH1!2m1lS;VSA=q5efc3{C zUe#mFn2S-4KeT>hk)|?gxxvQ3Q6g8DA=K9iR1yIXkmxBRGUFaLX2$ju{>~)8gYOSY zf#y8T=TKVUcVXp8!91!03(-=fv9~zw=~>GzQYxMbx0P`8+_T?e%eDS>9*ywgLt`#I zO*6-;k|Bu9d_&fuNf^2aeckG9O2!K(OZE9wv1)YOG+tqe??}kjx#)Y-&nXS*_i6Oe zwTsAkj>GY+{jwHSJyhO=x(QNZM>mx=npP4C2Qraf(&0w?UZbB<_u#lVElA|z9OZLZ zrP+NePSHT2G|8Eg{x3@6*|KE4vFTS6vPz^F!0tQMmbDY4OzaZY9V=1vuDp+E)TR-j z{{VKhxnA_z-?hd&qIbO=PuS)%;dvT~xT?)7fMS!Ss%c`5Ah&OHe){a=+5*T4pru&AY6t7^%B1>#7SP6Mlbsec;_Efk` zPTh8{J+Y1>l8aXkNb$FNO?HlGrH^hTWbU=7f+iwB`7^M`AyUb&wmD;ZlTD|Xb1zF; zwMCqaR0{vZ04opy00II60s;X81pxs80RR9201+WEK~Z6GfsvsQvB4nG;qdV=U_em+ z+5iXv0RRC%A^kEV+`=JPE}L}68Hn}2?DtO!VzwjfFEujKBC4=q@hu45%MM|II`uFt zyQns-^S?h|r(a7R8JGo*CB(%hDDp?Pd?J;{iCYJvR0%bb2CKe(1*RjxNrM9n-x z&~wkZv7)B+ujXYtD97W3eq_B@MksTcba2h4VUe}rvSxuniVKy%RrjdIbXt3ukv~c? z<`?McLY^gIW{hlHC4oiiG{pDph%Q0ZJ~@_TWg7V9h;2%Xz(J8X>VmS z>-28uB>L9S^)e#@`@lLk;NMItdHym-?rKRAzn*R~`wU5tU&?Ic;-|-nohnm-;#$eM zNNP~1I@lPD>}b$)i06UJ(}euW@}a2y>1<_K`MxVy%O6s&OV*p00jsouNZJkB@?yrlkzQHX@r84O z4H+y$N^S8SAf_q{_ZAo`@_U*nb7ZY2va;?DdKnLO5V%HD%AW3H$j5k`o0ys*uwPKL zGp!#JVx9W#I8%V=G;Nd&sxaC9WtW;1%41VcqNw$)2c?9S*WlOqg{DtaBQ_2rR0k6g z=VmdV2HJax6+oOQhPJvil&QSlF0&>Ng_RjFEYPl=~!X9wC`yyOzI z{7U#-Es)&ob+}~*KmeB4H!pb^_!_F@mNqN0(;6UHJu1)Pl@|n2rmr(KNpZEhuQP@Q z7hR(B)aHH4yXyF2;ral9N_&9GvqF~Ye<6KtV|d1xRe$Dm6p~xir1+&RFL7W>6?vVa z?wrbdS*;g$7vYyuGbP^i5;wbW%n;xTBpiYK%mS=vuTibM?89$}{}$k&@< zYwfxNpDBBpojx0tou@_p1T}7m2WJtB%(gMRxm?A|-Yz&0WWu^me2b5krDPJS%hYnC zf`OqtKwl-HXGvF+K-4}ypk`*H$Z7>k#w8qu*c}|y)y#u0ADQSeQ04KwkcEKDAHj2O zGnapOfVR=2xo{>%ayX5USLu#rr@WkF=_n}un6V*dcNF}=J4!w_1b(k;3}=DsC)p{GECqRR)oN@`IxV~Lt+hOV)GW$CoZ zbage+1L&75Le8HMl;ZnV=N~b%jY&srzLQ!u*Za!rxSWeWWUCZ->%YtbA|Xd8+Itpr z)c*i%Uoy`)qfJDKz-_->%PYt$adjC!iJap@%?JiOIm#t4Q)nhl5Lw(Syl~XD(Gm7< z`27sHJfqO8*SW=oA;X{x+jMlU+eZl=0C1 zBcm}B+4C_X)1be5{v~d<>{dKIL?#*nudA4t*HcGaLZtx&lfh^xhL;@jlYUYCC zomzN%mXx$^^&9h*h-%{SS(aJ1BUcF4PSm0MgO+@>OC6LGf<8i3oJ;zSgb^X=Dta(C zoB4xkk7`L*R_0CbP#i4`Tj~igKN6S*hZmPCl*7Toc_Q>euHoiAYEEj)b1`RoNcRu{l*H?k3~WveY)oZQ}gyjei)_Lv{$GgZbaa9q@=Gn77K zwK@PDrcAQQ&+bahT$}qQ3SqMqip&Kc67MqPW||rS*W4U+0*Z3Y%(gSlN;>lpl{<`8 z`joRNO!ObTw|pM&KIKxz1*4kv5Xf8&4V^NlYC&R)k)F^!^BOViK zv+iIRm_eD@*`VbbhkdVdntQaPB%KUc` zIl}nC{{V@(Ql);iDpU{XCB1+<%zCKAFWm%iRZMkIwQ;$&?sr>HuN4bXpL8%)=QCKd z?X_+tqfjci1O|;eC-y)>F`txOW;l!QWlDqwD{dFm6y?a(W7`zHu>ylYRGXN!*M;AyLsCm& zlCd*^nhVf@Woh|4NIZF2&xkBTWNGyU4*Z5?h}e{*M7>|(eMG8Z=;^Mb&iG0vs*MSt z?+7*T&1#sARGl}tFf8_4E)~2bhvHO(a-dwzsw|Pcz#Ps^NeCgnJ1QjyVk$;Rw6Y+d zNNYO_1CAz?)hrXl$QB&lZm zA__Zp;pm-uASq+vs+y3sGV)%&rr4WcH@MM}M(g&nWIhzH+;9y>WP-5E(9;$WeC3U- zH54EN=&q$oPb;BuUQk|QGl=2$p)5vG85AdmY??2aCzUe+bt_RVxb8A^fzj93Gfi2# zwOry+?nXuY#|TagRBSdICHa9_7oEPCfr*N^YZ>lcL4sT1{{W3Fy>lDIxXK8$p4~+d zE-nbK&}y)U#maDQ7YTZYBB$lK*eon6T){;leLzgE$)Z1@9hV=w8WGAm-Dh(1eJjgS zfTr4Oo0a$Q>h-$rKTYKw=Gem99}YN%Fozdp&!|wuNxwx(1Bh9M_bH5gGtAi=Mldk^ z{{UzNer?ljrImRHEWiu5V+Sk1US7nl4or&LdSw6y7XJXW%QhRn&wW&)xXQuiQ);f( zy+?Dx*Kpq{EcMtN!xzePu-`KTh4aRUy8tB0LKnz6jd79rYQGYJ5ROVe>Lh_IDF_t~ zrXNT_a}gs#W_2bEVgmrV?ZJPSWh`!8H&fZd0>fvOa|W>s}?bX4DZZB`;a zI&Z83lDaV(2D~tgiA?8@m;o*(I{E4#7Z43YQZ_p-!yC+o{Ds7R1kwDXDmqNvF&;AC z^JevNYoH~~V&r13mV(WZa^%p{(hhm=VRo#x^5Jb z_592N!?ss7a0Uj>rTdIDPXhhO5L6Ic350itoXd4WbkwU|4yHSs#D%Fdyva#n7S`0gO@N+Uy!5pf*$730qWXk}z zFo%z`y};WZJ>iv#ECnF>kL_?!o^c1XoT^p}^Xq$ycJcfPboEoH13Hf~{{ZQI{6J8O zUm)e&w=<(Zx6D{8p=ZRkai^2AUbqx)?TuYPKS9L5Tyymh0?Iyz5s83R6o90pjc`qlZO~UxI=3cg+Y_16Qr*vvP^u^a);BgDjaJPQ(7`~(zq(eVb z5_rYTmDm=+$c}n!?TX(1pxWWRR_ijreg6Q#(V-3V(Jva-ro`fxuQvdizegFD+6HPf zo%J5a0AD!f=pZ)6FR!#Z5@x0^iA46*&X(ROe2?jnaT}I4*uZoc~}FG zjrl+&!;^+642vpK^T}KBE%jEWN5dOE&6`^?hUnqi_RsKPB-^~6GVSTejAj7`n;Nn1N7IojTCQN+^Zcuj`k zN#dMMM}VMfA5b=BkQ}rrK)-QBl-KVE1jNkDLE3cy)c0JY+|MJ@Q4I8&9d{}=xn$-1 z#tNMPnOYzg1X)29=qYI|BvPpWuBt&{v=D1Vg;xN*%?-R17>6JO$ZUIM5)6ALx85U% zCX?XmJ`FD_O7zB(pY4}k*_Hv)TGok1ErtbLC6;=fN;8AiT%!v-;o~xZkXXM{G>z5` z81WNY=I@Iw*L)-zf4mi}hy19^PRkdz@ z5V*p8ahZwy=Wb%o0pm~ruX2hdl^8lP+@8T($z?j4r6rL{M?G<$adoUnUC~sJ17I6(+ ze*v_v7>}?D*Com!n@U-(C6mt+_X`Nd;lso`m}$x4CeN8!o4W<3Tkf)6u@ZM(3?;mX zSYjwB72d7c#A^G@t{Vfwa)@yZ;b7J|+_y$9UC1dY5V*Ov*9ZpK4XHsw7?W-C%BaHj zhBDoYLM$b`h)NjUBp2az0Gg3PmagThBH3p54yv3QJ5B}R)H=AJm&)SImACZF58Ud0 zW4#zNzEw9JFVmjmaE-$~PmDy7J&GHtNZsz6VjCIYR4A0rC&cyA3*jqzmqNx+#E)~& zFsA~Y+M=teiLpbNYHHD+vS-`?x%QPLFa!5M7lpQ*tKu}`f}>K#We36rUN#ZI04cB*{E^q_O65_R_)GO_oe1UI4$}$` z%^0P&^Y3>S!f#YU*q`kgLoI}tAg>NFFl)eG4(75E2j>JWN}4G6v*?5$!U;OoH>(@s zW}P-3M|TFcLm_c|LEY|E-}0|Ng5d3{zd3&tSXGRLPmjkyeKJZJm)`GNb;69Fy)n|n&h3Yr8(90DX+?wDrUfH{{R%wRBS4~ASnwN5ZC~# z_>bGRRaaXwD|Z3jlXkYl}H!axTrTs|Hp1oowo-SbnOOG*bWajrG( z#tvLddTS}N*ZGW6w{jB)u4zQXgxhE^-lW>|3-PtFFcUaG1Tl$uMNqCEF^krrFgGYQ zi#mMcJVP`%PzJLqbGn#zmnMOm%?020emfkNdyN*IV~!r1eSbEQqLV;)UFN_x*4o|XJ%!pv2} z*O_JanAz8fji@c+_%|()p=Qq5d0h?`HMo?qXytEtnp-S%r}LOP;zo^E>OTw0HBKn# zg;l(&{!)jOCpnp9Wtf-o7B@`L+FS(p@?syj!*S^*38OfsB=9!^`tB5VP-BbzqeCKr zny+yhP=_y4izmirT(ZIa!&{8e*cGn#^9lTU^Ld2V9nE44Xuw~S8n9Yg?9?b%{Y93( z!?nBhAA754b1rLp=f%w*e9LoF3P%#ByDBx>il|Y92zWaLQ%>bS66iKiR^#Oru6UO+ zD=Qnr=MZU%x?n?aGV5lWsEr;twNJ!V%GADy?(Q3<6Sv#?G-er=xkFe^f*G|={s}Ke z5N%qylPpoT(bQfl7k)_1ww^06DBl`De&W9F-bcfU@eqQ`@-@ADs%<+lRht7T-Fh9vj{JX!m2wM&jNaB{1M3m{Pd>C>4v682g%Mz&wD7`0Ab|QM+9lk2Oo5 zeasJa*L*)3M&bTVd_suUR2BS1ZlcEXh8a2_B6}(vLHrSHP*^eviST1m=C4jYpb$}G zJ;cRBYV0w3mq&c|^D`pDL(RZagLmgE6AvV>l)NSv8)3M;$FnLrgC59~hJT1T*+QZ! z?9k+uk`1qjt=MMT3I0PF>d&8u;81Or)g;R~uG}-=DzOW$ zV;C34=qlq-%rPT|^NFHd0OH{)UX|n}T3LoYIhWb^?pUmn z&x-zKySO#t`9oth5$tpt#nN7KOb6NNhwQP3+%(KP~ zmkEr)Ft2!zB!}7HAPsThuSB)dj5Vomq*GosI)t93-)VVh9?f5Hu4MqTV4F)E)t?YI zQZc3~#R;(PSu3X39NSS;M6dZ8oO&Yh34TRhJRT#xMd1Z3DBEyf4vT%gMvtFszS0OP zHClO0q@tMbzf+jr|h=3im>9HmE-XJRW;t&d#$9zNfMwx({{Y-LYepC2Tt#y2Sz`6T zD_Vt2j;nKMQG*jSV}hu;m}281)Okj>uiR}dZpPyBNVT~m<8fnOcq;QOCgl~F1&S`~QldmO`=f&~zT1}KNx&wRs+WRFe{@fXmZ$SE71Y*-K;6pl zV!&BukCQ$jeeBX*=6JSQNpC9G&Z88Zcz>DZ;E!xVZ$YD%u^@X2ge{97BKHS^40M?! z46-WmoJCmFZ_2k9G~HOHpa?*&arpV0gsdd~Vy`wJk&-}#2DoMmhHD)VT_egXzWJG1 zdz7hH5==@hqcL}#F4P+#wti4}n)Z;@r5h!yvTG$d#J0SfsZf+L+Alm1Wl(kSK*tY( zD&;ivyRvLV2I6@)r$)jiT46ZC;t6MkPYNT8ZIz z$vYNSqx0JhL81?Tf?}n0+2)AJ;5wfVvBe9+G2AB+$fowVhzz?$SDJ{f&hfY%2xDEl zm78}I4~m!q#iUTUw zv&5o^`r4~Io8hUyRul^d(**)37v^xo#N9>AS01@q{{VQL7egUG+%($A7=+Vd3lQI8bY30U32U%PuFo2;fWGas3Im{?{s&wJN`f(S^Il zGR#1#d2_jy8E-guA?%0}8+Nh<=y2_d`%my?Q3iHb{Yz`YIdv!s>%Dki9_4iz&~yY! zh8WxCcpw|1G|N4qU*wM!fa-Nl4&{1RufNj~h*uqK_Q4L;=~1L^=j)ui|U5*%b%_z8+X~VTv5mG9=y97#VPU#ygn?&u|%UIAa|o zC7usa?|-D_lNVvo7he|uk+Xx#H7lr_w6ag(&Dcs-V?TNgu?*HBJBBHB(RDA!LKFa! zKyAMcWmN^8E8IX12?xrj*!@aGVA%8J_X@~TXMKD^O2PF*#I5dX?p;2nmjHv$##M6% zK!I&ROLk#}bDhDkI{90}DY@<#)0or{+pojYQp1ehsyZsbC-D`b*v~-@5$9!n#ijQd zGjiR+m8iF(`ZIAD#I60H6QffP)s^NhVSV^In%kA;LHGI3?=C6W?(+4hU>;B(Fv$y@ zr>cR2N-mxt%nxs;P$+COH0E(IyU4PB!^{bVlM*9VoZ2BX3+}w1xMs9!USoyTTq3nl z4?CsFe#m@*bZFy;P#KsEHf+=+>R~?n4{8*kZ}=u;y)x1N0I=0T&@aTdQe4GOKi=g= z;qaYzDJz;A2+(CuXfNVy)m^%SY-9$wBBTuwV8$euWkr>gVc)RgQ$y-3xq~G|VdgkG z`A&Cj1|Hhfz;55{TzEOB$U~DYt;oL@0Lm|XwtdtAr42cImr`0Kt6#-~xk_Evs7WL? zm-w3Nh>9?WiV$pBuMo$0sn7hB@mF7DBLqO=*u5`9&fkc>;gOY<`y_7u(4Jfbs&d3M zUx`r*YSy6xI_POc4e1Q8JOs(s_sV*hA_o!S6_2x5FNDt<)A1bu_DXE`%q;c$B|E6_ z!PNwnrJsi2J0P~MtmY*b;OTE~vQriU%h<|yVP#pIxQlB2{LV>s*IuSFN{uw)WHs_- zTB@9^H~JMULf-jXpMOM}35tsY9548nGGd39HL9tGBQ^s0=46|Pv`RSZQ!gDOmSaB= z+$^hS)vK992~EiE8hxC3NJbCpe^w>B{rH1;jzOsN!i$s_04av~e^N{syN4ptjv@YH}04; z(m{vK@-Ye(kZ76e0#l_0QJ*@2F%Vpk!TKY_qSv^~9OLIK_X&`XUAik$(hTSy_5lnP zoMlV%gq&FlNT(8~l;I~Q+^(~NUOu6}>zrf3Xvn&;TeMXg{_FJf-!ur8Dm5IC;1?D%UxC_{VBU2Pcr`3QcLP^VuY8>tT z{K~7fXOJl?VBea2LXx#wu^Jb1?&fyqQDiUG)v(LVCnt!vWz9gKlgR!eR95W>_=q+* zm#LqM1hza0f-o}4m>+9l_XsZ;Fa9W*$~T(*&@60AtnT7d!w12Rp~TRf7X~&bBE3eo zM4IcFagMJWw1ntt?ArvhGthAgiu{{i<0;Urj){y6O^=6E>XD3yNn)E z%a@y)xopae9pMq0sH^PzhH3?Z=ZQdY(_wg=ano3MjI&g5Ww$NK(YQQDB{Ms#G7Nph z8E)D-A3hoj>WNUpvw?;X)@fb)yjY=8eny~(3Y=X_B z!ye_{B3yBx%hki&=TH+Wh>B3o{Ktw#ZZs7cK=_WF#zT?B9Xh}l5pwd{E8&4YgKM_} z-?^`tTbVG#T%U~LcL5I{VA3m+@$VhAr=@lozgql_yp zO6JuN=r+oQX>RIhd|qRFDwRH+%`Mm^&OUtg0}ii3wNN+xB8SuGh%E~8(f)|O_h{rD z@u*X$7AcLQt-NOW9>`f%U={11iE6@I?tRSurx{dkaWXNUY{xLrqt*^jGV2r&b6WUH zW6G&Y?~kah4!|k(Fwc$1c}AQrf+qkWw~GhBzjVC{LxWkWFsH3*jX4g%_i9?35z2a< zy?{}iyFl^3GUrEwKV3|zNp2w(V-QpYbvME=mljP-IX_CDl)R$(LK)EMkbV2}3WBsV zi;mWyAQ7#Yypn6G)2=6F;#!omWYAzdU1B?@!r zFiA?NfU-Fke8j47L>orYP>-$T^XfG@3g?SDB2y{`>*7`+tcD)gc9y!wFqT4rou#L{ z;LK@23eK4DU>~dMtkQtbGbO!DsBTE=x zMetS1;!;1|nV89o!HK=xTgOW)!dl}As=teXy{!DGj6p)FZ7Vb2&G27po=A}gn97i) zuLhyfAOXDm_lZC)N zoy)oU!;y?YzD?eWxWIcE0rfCVw(6j%wIb~Op`Z=SqTMm4`sp}g(U;&bh6Jt&K7J669?n4V;C|(3qfUa16K!E}rD3<2S`)w3iDt<> zBe`HpjemGhGYZD$eHmk3iA!7x^oA1j7P@CNu09c!DQI@|KqUonz$#<}UedJh5JH-a zVp?d?bF`0xc_lAmQ}UOX+Q-H5MatDt$jqfV)t2l2!Ezdd<*wp&w=VTBvz@HPm?%)< znvY&ex=83waflLauCmECw(pDTA9ROf%x+b7HvS-jWrDl`5Xe~DGx#u>0~U;v!-$fi z){3LKUb7}oG*P>T7y&t{`4JpfQ5Yj8!YGvFB*>&UXuyvwSk}HRSzChskFOm2nn!PDy#%Lj`e}szTcJiu1Uc z@loz9(AC;pj*qrlBc&CX8%4&0IF8s0aHWJte4;>VjiiUULj$q-h0+DFN$yXr@>;)V`$vmX!wg ztA!7wHo^Xpi`&QyDq>!>pvO~@L|;j`Lg1njrEXzp#_3`o4Y8#N8|Z3SS2pGdC9>ES zUo$pC5q~BE+_YU&_>W!&wY!U>16xZH-yGrqKLk`N^k0cu2qrs*a0+uhOY|Y=^UO|m zXMtnP=ccJ!>>E!{zM|aFH!hI9bJ!v>9PbTU9l<3%Qw|dvgKVvuA&~HR^=|4c8n7zH zhEq!R;g5ku^3QVc*vo+kR{#sg9^&oNHuQ%#j0QhYqQ+7A)}vue)Mn60>z|2emBI@+ z+}9^I&v~0DAlBuf`w^CEH#Dr6Jlh&no>KX{H0RV=oI4wW)~g&7s6U-pV-d0SBY8QC zrnQUAU{SaeKPhL+XlS^-Ig~Lquee{t2W)X%!6_DdC6!^TzF~A^&y*1KiLy4n^QK^G zAx7e5u@~d(gqoOd*^6+yc$B+b{{U$J0Opn@2B5Z7uYaLDGCwdw#u^8ibmY9=rP^0) zuAsn2jtNL5DSJQ{r1z;Qq3Z}Isj;5fVbavfqIbJ%hV?Fbc)ZGIVc>BzCyF2U668Tr zkL4WK@oDfkF+U6H%+Crdwh%xLgK5+OZ)LQ6OY${#)F>+Tx6(T~pX<4Rd68+`4nj_T z2z4~Hq8g|eJ_xpfh=UR4s*7;x>TG{7QYyPV)ht0O@-f7|2*H^R#LbVHFL4Q&C-&o& z!8E)=u;$}+=|O&|Z;6*3%3h6uU-oBf0?UMrfsV@>O}TLjlIR&?t(e&JDO<_hlRWse8eCt1iF{g3?acI zBq`i)j?~M96{y;qQ#J*+oJ+|=VQ1o9;8D$cg9>x5d4w#WtixEVIhC97I{7pLH3RB~ z(7YBIcB$o2(#EAOpbBIKZM5_wh|{U-5H-%HnR>K&j*N9dDF$s3a!q4ae(|ZLt95fH z>f3x#G=}kFz4RAM`qq8VOfF0WLrzjI~WFPsLGBaFFq#>W6U6%7w!B?GBk^i zL(B+@Cy)o_Wr7XU{gR=Njy|DoxVPLrtO|wtABcIf0f?lz)?+u*O9Lw8(c~2bA=4k2 z4!T8_a%^KMAAr&&3d zTNDp>4FGl%#tF&khxSmYEdhJa>4At^zi3?iunTo@VoEO>=H@q*pnOM;E#Z`+y7%S=?0w&_1SO9RO_fa?LC7 zl=C=4%q2vOE=b0@^ zmb-nOPLznw$;3)&_!vY**{AP$LshJRR8s1PhH=`fIvp@_fJX8^6GPk^9wByjfnAQ6 z6s^rEJybRJ8Lr=Pa}-W;)?>zg2X~hcQb){;kBOK#1^!{tg+533i3SrSU7oaZF(@Ng zmS7>IN7*Q9*>I({b5&qq&$eLUf9;)^9Fym#+KV&NooM4^M%cyt084) zWOV38MxlQkwSeW)rV>^N`!+6)u2!|W<$!j!$63o8n&!}O##B;Qm@LqyLl77OtBnx~ zi@d&UzwH-6$1j-MO&nnH3UmAQxkd(RJ!jlZFzPo?mMhBFdMcv)`j{8xyY6(F=pJ>!eNfQz7V&}ex)d}R3y7F|y%c{$_U*Q>^htWj6Q1Z**d4;A3-pKspCQnH5 z9!0%@U+;3}QE5mBg!rfl-*+ywtuOsz4@z-fx{o!su9Ec#HC5H2hf@UmY%jwxIxRZP zQ0-THt_svdwX)^RB_UMVlp1{s*s-oShKos;EtBe?rgN|xVdGO0H3SKhcX1YSwl^d` zHPNn*@Qx%!qjb6@fcDF({Kfao;9p+xrEH{j+6-S|r8qmfrZTj~W~Gm?0@MZWwNPMeo_V8ZdtxIJ&m0CjS)-M?@B z5SOGTMR643zgIXZf$<3!iqU_VwYLqQi+=40YPI+wT?dO%RUHHTJZb^zg#ib5`40YK(&65u`UMB2z{(OZE@`orMy7cwi;=_QoTxy*L0lrjvul* zrZhnEL5QtEL)PXXATCF^!LOT#u9HwECy;oQCy~R;dWpeW{i0jlU3+`7YD26CiX$Og zR?0L1oPtnFWf@zDydPA$0`n1kSDfrD?(9$d%(2QxZ<5p8YrDbyly#3+$AWB zwheBt;xB18onORPILGXTR5IHinN{jp{hDE(XyrX-RI-Wtel9h=jiYef8N&HY{#vp8 z!ifbrd;?L936cXfH%y~Z?*sXoW3zRB;|BTrXPeXpQOO4%Nl+_VTj~;Ttb;!0!UJj# za^u|!!7W)ppTR8!E^CLl<3|<5Z?XiLTMr`K!5C46OnRF*C~BA2;$U$LFndxtU1AEMi*Vot%@-?Q z#w8|8T`)>RN{sp9T{atE<>oRQSd4Q37lAWuTFNrFST*;mi=d>>W6CDV6aw55sX-;V ze8mlIw{u~26(NgcYlCqaX=<7u#38KAF8X|4C7IGlTkht0`7b6V`D`f9%wna5H^wUE zQy%z59R0$9{1HKjt?%T`>T4o68=&-FxsQzo#W8ToSj|zvR%;|IUabQj8iO*0_iDoVG#u-r^YN6!}$~Q&Ka5!dWStwLqSUQ$2+}VH3z1aNE z&gCjpE2(nXd0~Vp<+F05RF>EA8n&hi-w6hauSy^Kag}T`!?N$If2ki3o8}!MHgTL% zy_R$O4~;YqMxVsOPA;z(<^g2Q*^WA92wocS9wN_mszEKn&$w<6-W~Z{xriBiCguzr z1sL1`VV&ODTI@VP3Qe`L_fr6Q4XC`s4T;=-_IQrCYWXk?oG* zv7ajVmS#kU5S7ihvoi6leniSuvRuokDC6R6n99o$-rO_$JjNN>9%y}CUBx?m#P7}w zs_t6K@X-yvBB7ePlt}^Ga;OSX*rM*g5r|heeh*|jPC7o)xG=S7_~r;JWKeN!A=TG^ z*)jyo3T5CM58fPUSTON1p`)X;3h=}|5XGya*E~bha?BOv<~0piza}6B9EN@gX5DZ* zxEnF8&$L+23g9J&52gSv&X!_=ygvLxl=?WhdrB4BRsc2*E?w~{>G^vSLWRNUCD}HB6q zz9YiHMfpN|dt&!Z~g9-*$GGdpL200Tk2eS|>;O&jW3JkxP-j9sabgp)3+Aup4W z>SKrrcrQ(J+{ywI$(AkzKuwntCGi6mzN1!0aL;d3v9>fS{j(T%ELH%kr%#v`rUT2r za^2WlYU}eE1T|$nwz(_1>RNv>Hh7jj=sYCu2{8DU6uuy8CJAS}Lu2;ZR#!Wj?oI8B zDD|jw2AN$C?}=LLES2E@05BEp8~{{9rOGa9I!q0l@PIPxUU`dkRzAio=oT)tknJ0hdcWFss8MpHC=4{Td1fz^ ztCoLqfg-d4(|9u!&qHtj03*-3m{FM76?&gYVt^m)MsSYiz#lAGA<)}Tt0P#6r;=As zkGY@>(8co@P_l%umX%VsDU7OmHT`w{W;Zp|cpTcP6-!;!PH2u3+b!WcFr~_{Yi$%@ zbOZJyCq!!Y&hUAgH8@{iF#ClSdwb?`R8+5XHEA~3iiJlTmIynL5deKO$vB8_MfEJS z1v=xEt&}4&P4c4rN8j)V12Tau!~Xyw)ES0eVf3a%EFmQGS_mQh-;->0_T9z=T6ugy zCP{Zugn7+;A;}I>Om&ME@AM8`OtRN8Gt68Z&D}^;r80#H^8psdc(EH&I}Auyq#Yh5 ztk76oy%D;WirB(E%$a{%^DQq8napYFS9q^Es#Le- zIQg98ObkZbZSyTyJ@lKQue~KEUs0;e;~c})LFL@CDy}(ORx4GDYd7_C)Y7ICD)l$@V?ppvB?a39c za)Zyr#ER^x>Kt@RHy-7v;w`UmYtv&-F~H(pZ?+l8<=f&Go>pB<>hVviC2J^o*;pOu zOfEZ!F|dOi!ltEU_}s+ei?%!x?co3|g`~$!jw>m=U&P;7I`I>rV!2C#l`dC}B)otm z6nYZCfXFW~j0R451|5;J1?WB^j9RrJf8JC+7$+}Oq;=84*RJK~&4JyCX$9V)5S7t( zM&(L}!!HuD$_5KGX{Ck5#t5MM+{1>Umso)rqXU@M6$`~S{iXae6vKi2{$>`3Ecg<{ z_YBMu<`Z3d>NU3LMmv}bh5pkd@{W0Uc#L4MifsDJ2XjWOhWZAZIPE`sF85AiH@R+j zcnDl+fS{Il_<-|bA43()Jb)4K)2`r#fN65vbpY^A07FGKO9mpEQ1a-5`rsIA`Icd0 z7fQ#uMdxJ-_-Y;R22f}HWjRE8FAS$?TVv%caEXl~3d}sL&9JcMaah{yYLiam>-md7 ztgaYy69)I}PK9{*F9<$TWq`e?ySG^BF*c$}G(cs~)X3T}yU z1RMi9kCrtp@;l>txSf?sF5U>zpsA%C`b?MRrU|v;6L$tr8fCjS(!1FZT@-}6Y7uCHJ)Nh<{C!@3)pce4;)*u`E->u%rzMj!c z>{5*}gyfb?fUENqSz1ubE8>-)#AKYcQKOMS)D&=FO~LlGV;eXueJrtJ-r|U0vSDM1 zbMo1lL?M3(V3xyoZdh4Z?m8NlsW`Q-Q=BC?+M!+u<~=p$UzAz6CaWos>fvZ{v@a~E z;JzgfQ(=El3kBHumY!$u;l^I2uoIAVmIW@k@hvL<01{ZWG2E>og;;X-MK^#-%o>4o zyEhe@1$U&tX)-vHJvxP677|<8&J@K-%WeW1MzO2(-Uz z3q7_@527i|Zo8UzZtWH@)Nq{KHlFp#0|sWQHR3e9RD{nL3n9Xk5(Q{C=3jG8Jf?`C zra;ZQn2G|>Z%s_4^eNAoz#I$qipeJ3%!;p=qV*P}MDk|lsmr`?h(2=+cF=vyI%#R~ zdYRn_7&!gKfdUB9cszQ5T*jpewF8;K0~x{1d>~qdYEhOjqa*H7oR%B2cK*;-#Wj_L zkJvbi4$uq90o&J8B2V}yBc5W8Q%*#Lyu2CAq=K!Oi7|Mtv78=az`PoYxqXFEZ*ap< zS4yQZgOxG?zdS`2`=$5d6Yw-5uQ2=}CZ0%_ONXNUtILY!46hK$%=;TRadT)}kzLJ9 zMQDSRFAYto`$y)mrM`0+1dPy~VCq_4Qh0lpY)vsdMmk`lBYXETJS=QA;%Y;f%fUQE z2(zdf19q-M9i+nMuGD;x(v$S z^b-MqEMm>gY}AZ{sD7ykQ{e>JCF`c^xumE{fr)6H3$@mK z5iuNk1Y$W9UAg{;+O8ZxB?hhhM6s9(sdOc%4r*3b`ZF(fsM)BQiXz&w+^j;t(OsvB zPQLS==3i-_^jth)2y(}@1(ow06DQ<(^%?D+#alj5@imSyDSqRPg~whQZo+{4y-pLr z#Lh{-`BxW&;t+%zQ>c@JfGqa`2oBmqHzwW#XgIs0xnCmxYV0?3pn~OSE;7&8N&g4?71v^(+t% zBPz$&Nm{pR2m!icm@v7w6#~vB?ZkE*@mW4*G}yaEHF1kYm_($w4QMjBlUKNSNbc5igP7EkZ)2seaL^1YEvTZ% zr2*4~@9r-n%`CnLi>2ZPCX{I^94~u4I)4l!H~>psW82j#LHt35VUjbYLjM3seIl&Y z-s0e)X9Cq+kUJMv^5BQ227KZwfZ1s{mSH`q@<880?L0@kHXF0;5j#b_^z{mpB?NCN zr%+0_OcpW>mmZS~be*o|^4u(qOuFP8cf``~Ft2XN9yQa^;y78a71aBJZBx!GP^u6` zVd7mqYoCMMK=4CKI>Z)&RtxSM!5cYD)%$Iq-i>?j!YNOquIoAdqZw~>cq$9!bY%GT zEMJ9aJK|H|dzYppRt%K8$p*thEo)WoYkD@^;Qs)5g3ng6pe-$t+Vbu3!y63vh*^^5 z2bdwA8Qvw8i3y2xqEq45m_WSc_{BIJVEV+wr(hkk@fG)_HhT39S`)gvf)PaMtoH@Z zmdz^j{gL#BMHexO2Hf8Wb~2ib56#?n=#<=QqD8u6&!6@X55q>`#6=-s)zd^ z3$h1x{R8y45X^wQPGO=HeI`1%EIKEE6=6{wx>5@zDeDFpf_`z6+pwfi$E%mlW=M6` zAKnmlvc%SHq9lAxzlmH~+h4>JwaK~W9sc)^srCW_*YgMZi!+ZYWTm(Z4s(l@RkfLF zp7OJqZOeD%GG!HIXPJK$xh%r78jG^BoLWzoVgt?9r)u{MnSdy4=icW+wvF!Qz`Isn zJeYw2)SrUj{*_-2XA?=2o`{@exAldzzNomvl8lV4)K! zdXJ#X)GWT(!*bhvp1&%c-JcFiUs^#+5)yw*rwB`t7 zIhgA$9tZ}2cV%ea?o`5E;;C*>I(WgP)o4G2WoX_; zPpM=;8g?8&ps{cyp$nv}wH5BkhvN_npybSL3vBYaZq;ya_P9ja3TyKhkbVc`m0D}K zF@jEypK&C>C^hz!V{qVoO6a&`0+mLM{Ztn_{ydFJHzAGJ;%UeO>x@misut%pDlId2 zds~Z=)L@=1h>WhG^eoz=t6vX@;cWg5VxN1W^{ht1{{Y||J+0A)uU;T9V2ie13yc@O zyNp0;MjNY2jiRk=xsdA&Zfim+P!sI0Bwj>edsh{93&h7{>B0GeQoCd)w}KT7Jl`Zs zP|vFE@`3~=?~yHuF`+6m^iEJl1Q%flRi^a~w{Sr_9Fo!wqv+ZK?QfyK63+7lPKen( ztOwMjWjh#mbI#*Yp5yK4{FA%feWF9i?N>GTo`UXcK^;0SRp@N)M+{uvdZX@eTNr$2 z6Vv`0c)Z5}Opn1Ds2kT2xRb?>? zLa}UuEY7Ih8V3An(CO>M2-ZAgb~8zGEAd%dnSjho_KyuIINqCzY?X$Htcxy3wsRGR z6GQia730WP^bpYu;h!m}iVB~2#2BjL&(&0?LZx1T$GL5@TUuUdCsxm>XoH{vwWzK% z9$p&mJj0KJu9(Mxz6&X>=1>ywFgI0BXZY?eoi%Pwq5lBGpEWyFEa5R7D|h{m=%(Wr ze}&9J#%JPJ3`+A`r8%6F8RlaKjR8%}rkq?Bnu=BN9P{KkfC+e@n4cWy49E$+LRC%( zxO%9<`_NU+YAeS;XzDyiMl4P%0r`$8%JN(ihDU7|vBWqpk)iCB*KGy=6J{vUR?7p#8g(;>RD6&09Ym-L~ej&!L$d_;N80C4u`i;wPbRiq|#v|hGPf;YwaPKjC-Q}2Q z0Izn%WpfW?dCcLTF9cR>4Eje^j>Ft6$_wyDz7!|5L~_Pk)ZV7;wUVkQ3#yz9Oet-7 z4BfTyF5f7>6-MH}9kPUVz?BfH?*9PFIzYf$^>MkV0_UW-!tb11KAZ|OvNo4%#Ny@Y zjUpdR+=6M2Cjj|A&BbqxY9ZX$9*ZF4lqytbmT?6eT2+!+8B-n%z;Q(_IAy3!iL|p< zGIekxCg%;!ob*g&I%Xx#R|9tyrQ@mx_nwhb3?eOnbrxzQv2V8#w*|V9K>q+~LFP5M z%?%#rRjGh+x`XVZdokZJNP#+T?F0hF>rq^hS3f?c1j$N;>6us|q#~rh!ks~;sTwL( z3ZfwtRZOeXDcEisHva&$tec}FnRrxQ4Pu=z)v@LQ-W36@`+;{)BNv*BVrbOnr4I`< zA4CXOofV?b-Xd-T1V$KgM=5P~^)94|&-PB!kud&dwHF{ z1z*?o6M&U^F(@~z{!-Og8`>&ETK9ptsazaCV+4xcWLok{R5XnG9m}!^!99Nxy!%`| zLtZmi5T6WZF~2Z9mJ;-wAjH;u$NIL-RozN|#SYVhj4MfWBKP+U0vFW%LidYhEMQnn z%VPVN&>W8uXwtU1l|w0WRT$8XEoGn@2mv*!KKc{$Yr^gxVCV;+b=LqE$Ill@mOQsoO*8y>`%B(1GtJg7Bv6|kPYz4kg zXPC8cMD^+t&iS_}2jzk-yJCBH0ZZI`@kJHjOsm@Q+GxgZnSFOClM*UYS7@QHE7ypzlhZ9FmU!ub(Mt%W$=nx zyue&wEXlcO?p|zVVD)g8i-DBIm@#+i5a8nH;yFezoYJy(JRC~t1CW=8rv&!}HiG3n zb05G+0^TQ_^v+ioFC3xjZ-eTFTDkuKDi{=+U8bfIgFY+VJIh`Vq*et|GMHPw;DyJ8 z?p0EkqN;KRL;tmT05pe=w4((EQ1Ib$A z6mJR zY*)pI83h~{-LR>wg}k#E2Z$^`IhimOQ0~J~X#&dB?oz8rxAOaoX$69vK$aURY2ei~ z7--;ZWG~d^K7arYTWVSrI*-qo_zEpeMvkxqq;4M%sI-qgjLbsHRxv7YC)H8TB52#! zh_o(fK_bL04k5j~wEJDgVcbHG^6>*Zu#g|jA(iK(M?DolzvOVqA5Tq6O}mczEbqfG;y4$&rpjPiM}>lSC8iKa2T z)749Q02)TWnL=Qhf4gR9u3wf8p(LKND{%rQEQGHDT#v~X*8zLib5dd?z@x9xLn{aFHT$5VO5aY zrLqTxRA;O7MR&S4^Bi+Yqsk4eubC){UYg=(N`Es^oITJsJ|U6fzi?$nc3X2Z^Bra) z34TupsN`6BF5uonNotHrC?Y5=V^ebkx$mSO(!XRYi0GlP#`*CO9Tx%9?pod)-T7h2 zwkxNn%y+pCtb$tjT=R!8P^#(OL73Hye4kMWky!bDVP%&J23q9c-9Y#tsguevqZv6n zC8(hy-kEXIl0IfoPKWc#u;9z$QxDmc#*=|&dm}p+I9hBN2g6KSx+n_GaTv6Wn_)uo zvwT361uhA~^SX_4Srshy;w%u@x@}!C938h?{$_Mcri?>Ri9zNREG#Aj9Cdie>c;f9 zX_k`q4DTOM)Yof|`l{Z=LgZG?ZVAy#8PqD_yTNhKZ?Tu16z{oAisjTa%-=c0ONIxz zS9^;{I)Wg`ddptoMFToEvzv+@>4MXc9qtv*!NXON9S~d!yAv<__1wD(*mRS$)!)eg z9JL#u#0x{a0)_lt$`cV<4&s2ltUDGVh89 z98mALA$;%d<(a(sCPyqKLZ_HjD6O<6H(&h{(yK@q_vo)3y?FQP1M0 zhH4Y;Ls;uyxApt3(1(EJ`dr%u5WqBLa0}6&3T**w8m9AnK#f?`BJW573!&y(1E)ly zTPc8wjv=wVA4zPbsm(@o-Bv~>lB4>OYE{W%Ei^LmLt6DU1|9o*iVOn8((?InE_5j^ zswG%N0q2XT(iuh6;T|z8LpFFg^=z?kF-1bc9#U3v9);;D?yFAK#gHt5R zmpWspib)Y#K?h4i_JW4Ipv2v&?gI)B$m%@@eYo=;%OJJx6jsI=!W^MFT${*Sr?wmx9!3(jPfbVRk zC7edU9%1HvCho>D(@CiTo*>VRFal!Y)_^ycW&Pm>5~}9cPNdf`){kraN1Nl5Zpczi zn;aK1U5tcN^ zOPADIY-huvD$_Ej(q#nl8-;rn6U>E_3|N@mltZo2 z47xb{{ZOMOx7KCf>dgN*sQ?8HZOV6 zaZx!Ssh9CR{{Uz`tp?_*d>bZScy#!SyJ3lX#}kQf&TNnLSg2u{e8!kwVJ*j7$`=q5 zVEy4`d$cykM7(B7RmC`%0RovW)LP@UcwD4TVB&&a08Re@rerz*U^>`~M&7+dmrX60 z9iHg8YnjdiCHc=D%4Tt zlwQ@AR|dwc%;*q9A0CnTN((^-p2Tx1{{UO_Dp7N{{LsKY1V`I3SNdM&fd2sBxo?== zti=m9nEFd1J74oWEMR|lW{@L@(K|=6*+y z^AGbm${$Plh5rES{mRgFWr};;T(>0{L*38xzve4teg6P{a{mCO?ti#{GqK2phc}gh zeZ(8M^=0pD0!-w-pA#SQ<)#gQA{u4okNeq*4zHI}KhysJn3x>b=ZvC9OSq+$_XldN zp;FFI%RF=`wO8Ch+xH48V32YR&w45LyYTh7BnC!G!0d|Wr z+2s{4^Dnzg{{U^n6&-57xe99s`tuJ;8V`azOO!;#jj|Wi3{_d{XT-fDKzrD10k!RT z++K|uGcPl;_Lgr77?pz{RZ=iw?Lu17?GW;iE{q~s?T2ZXj(d#9)w?V;#H9oMp;BqL zS}uQcTzzU-^(F7AaT42gid*#M9IN7o2YZ>0Ky7a^tI$+~2f{Az_ZG&Sf_sa>!5hFDqh2KyE$+}><%cv><<=Q~I9keo2!N?&u(7BNdI;DE zSCHi?7iERp#81_uR~?*l-8G_H3b_>Fi%{(0`hNWvgZ{k>$-Czv1M4H%z+LHO}S8f@fOeZzh(lej) zWuv$5pj(Tur7^)6a@1?M%$7J}nk&RgH;WH~ zXz=R!jwm=4S6|vX{*nv2C2FIVrEX}?QzB+XD04y~ye;YRO-n-zp|BgzD4_-s>6}X~ z3Tq!zAzDh_7Pl!=p>jSdI^q?f^H3MmZK=9rvUo9#{KeiEVf&f!#^9#~)IQah-`Zbh z6=Eu)2I6dgw=u;LcshVxfmw%~<|7-6Ont+DrsyXr)I{9zYIyM-QU#liHz*c3l$bTi zFxBda_Ef6qSXvmZMexmGo8|^1PEK7R9M1m$!vSW^Ga{1TO!ld3MK_t_9n4Q35}mwG z^B}K}{+_6SXMvf8R^SiRC_t1f*N$P{m-5x>Q;oZ9{boDJ$G!NL8QI3??MvGd+C9K& z7sT88&~VpM`S$++gbOwKhBZ*>eJ}K2g4xQ1p)_;VYO@GzAI|UQd95<%F_>lXn&9l0 z9^-@J61+bU>To#fA(m<}h+%t%{{S%%br@enbSUfor4^s=?k|R$ZTXh79CLSy%*-&jlOOjMV9DitLH6-oCLnA(w(cFMc2U{k0%(1ZGZYp;!~#* zp4cs8Cuy|~+BaK-fH*fkh_^YgKTtm`@YD=ubK)GLgBRy_uwNZlI`x|`B{{T{arr-TB`eNVda12qrrz16+A?p{#+2Sw?I5?dm=Tw}c z{2v%)CfRD`X0e683?N!W7hiBcwa8P;3{0q3Wm2zBLH+YntkUUM+K5cpb`NYk|FDG8JyQFj?jWZP|903|~h8 z%choB;tv%&U@xvETN{GUXK^krl0Hg-rQp*mi=rp9eM*r?>5npu_0_h;kU1U+SUv4x zQ|~p>W6K46+_lRggwxxC5OHbV$2V|+}z2YGTaor!vP(0kuX8!6?TLfu}OZrF6~&_ExWOjlGr}NBSrJU``sKm?@g5D+?12S*5HY!y$FbWkW|jrYcw&%YM{e#=)fpk&VrQktHbSReMnae! z(5-*;^)Y=e#awu)8eTyb>h((raVwoCa_%TkPxpo-WMyw%N&q26lb$2bZE)kKxk*q_nQ&R< zFNf62LayuP267Rj>U<*)_x`3!Ii`84eN3Rg!o@} zh*Up`MPMFKS8SRnltbjnnmvBVMf7P0j!-jpo%Jj|L! z47*=AhQvK457ElP^9^yP@2R){089c^gS@p;(5+Jh0p*B6Sq%ZVQo1~+XQpLXE<}B0 zR2@wNd%2;-$mR-<{2-&rNKd$M45v0#+;j#Z z74kbKlS(p?fB^u8(^$Q0!?3X1OOl0T}%%bP8gSz@) zkw|7eHvqHiU6Y*#SL_kZmbkV+UERreSuS$~&lQj_;~zGDCV?Ies#qn`#iK>Y9n1_P zYdT3q)+pStL(ERl<4i5?3GUIiBi*`yhDPtLRwuh;{BBV6Mp>mf(NGmUTP6RtoM zMd))_-Pee11~lI<6IfSR2v>%Ad=D_5{E{Hfkr1HB5LOIzInc`yI9Kw3UD`N9zWEIA zXI+lF_j!xX88lEj?nczmTzH+ew`3mLypO@IDe@T_na+XAsiwSm{of>5 z`$^M6G9x7mkONC2lU`*5Jxp+7NbK^>BRKqMaD*&^Kh>1Wxq(sRt#SMEyD0bCO{3L> zPsJB-+*BikK96+`3BcYU;drj%lxcX)RI)>6O7~-)dx^~X=-VL>?Ztk(FZuHf1{;TF z`A&E78E18j+1ksxC0YWP{1^$O^rZ#5?|^lkHv&!!I$$@ zby)W*MUxWtN4f!v_N$!Ot>Uj3<-^gAOIPnOEL}|@G1}y5D^7~9%G#uF>lq+cJ6m_2 zR6WHMjTL@-gw53wYb*4;P_P!dooOG~uiE@mBi}xNCoGd=095tYZ1SUxfP#>mEa_e} zdnPWYQrEw9IlD_AdX>s`>+;bGoy{Y-LKUEX;GXFn7r~ zZ(w=K1;)aKB1=HQA_krOo5Ey$u88j3d7)|4F{u>)oBpEsIJ zTo9oVTg}!b{eyC%C+!=PkuKZkw%Muo zh49Xw%bIpG1lbv5UQlsqG1O0Q%>HEwLtA=g8+zZY4Lc25eAC$IzEV(WlzzQFLbo_o z>oe<9pD^12PfnhM2%z{vL>~1CGWwZf&`aQ@iZtAlv=WZg@QN-`fl$0V8@^p|;vYa$ za60-T|NBpnu*p#Y+N);f4elpQs8ny{7kk83823x!eQNtgO@WPxX&N?cX_M|cv@pHh z!zH|)8jNJU4E#Ur?wV|93`fS|vIK;|3rm}j>$^PuwCBlpG}(+AeJo=*rc({?}%DRp4A=fN#cD$)+`#Q z8piETT_R)d6{YX;iLk=}G~69o+`KK5iG+433&t$+(bZBn{V6GCGxK$XdbpmxLWebz zaA+ehrQKfnRIqzT!;Rbgc~azUNNlnEbZU8E009?_@m^1{QMeJ9!b)b%)8RE=>LCQb zTYlGl!vSPZcI+<;mU6S({S<$C7F-mZ_c9eQ1?+?Vn(zBHH;MhSl<>U8WA#hMOsVFj zQ~Pfd6U%wud|XyKGF~-a#N1y3OS=Q-=q~-J+}KwmC_wui?Jk{%|GUx|SkURb3;j3s z`z1RgBNGb$HV%|R1k5_1j~fK(xgtaFopc;+#*So-aj7%n8OUu{T!8-v5E3&u(f-c2 znPvVvo#z zy)=m;;~xOCf)G3G)CwpFbWO+IfOnaZ6F>EB=0Q|9QA`bP(eveSf%%B!BQr%TPP*Qj zReC@PG8KYR(q5XGL&Z|f$5N(A71El{n!9}bR2WI@XlnqX0E1V^fB`2 ztOra6zJTy`l&A-3xM z7QEnv0YS2pGd(F~_VZLL>zDfO+I0Q=H{u%&ovoG%3>w7Z%~;4Dpp%k(UiOnoy^i)$ zU+N`!&a$HyTU3=A5?9cWLz$nDxPH^S zF{p%qpetZl>|vj`UOPEVz>rHxxfRej;Hy3N4uEWiKrGbH>KE-U+6c8!kaGM2Kv6>> zN$_Z~RA7w+isN)kW~8NeYeyM%pxX^Ph5W3_Y|Qcx>vHj+5`CPtJZ^`?8ip}2eQ%Hu zHxZxJI+$1eGMp&LBW4+s%wgRiVGvlSz6U3zAWaDOc8`>>r1+$$$AEY48z7g+MXh#OKl+Oy&w1n*(**p@s! zhcb=e-kdr81LXeScAL^t?YNQpo#fg;+vlKyc>*|(CYHC%K%qC&6T@o!V0pne>*MfS z?GX;m>w+avv@1KmkgTt4z#O_zCE za__g{9lNz(gG;9)=S=B*%QWmbe*18(=;|xL1_6w777GZx;aJo=#AMj0s-6ztqX*-D z_qe8df^s-6gT9KbJwk)sZ|i0ZYvfw6+-S?Q)whHK=S^(MhbpPy{8Lc>S=SrZ-S$5K zSam2k_$`3?sf! zo~O%ASeVxEnHFvMOn*Sb3q_yf_^UZhzEKsr9f{1?7m_dF#|UfiWE{aa(A-wxp23pK za`$*D(go?z&*{0?>=wviUI9-2@g&NuX^WrDRYK{mSfv4t{-~K3wJvIBj$isubAalv zX9c%PJN<4woXcCylS%i9$vIW-$o$+#`4CbK!5e+hxeH@w?h?iKadrgJB_m7mN1jxJ z&zdEdvCi6^ogjz8?cvT~bUq2}an{vfzktKS!%N*lbnUk(68%|gl`k-QkKYBaFBymm z(&ohETe4%1I=FIwWZ%FojVi?uTtCI`ju305HI*&EKTMO(&T5u^PvwyJIy@s|$vV42 z8JT$M$rGnHd2w22Kgq&oH6_Qa?*OoDE<0wD~TW;M_VkO!ZX zZZ5wj|9M{Pnhy5e^G$Zo>kLZKv;RCrG}&*6{|;Kq$Ku)B#@Ze%>_j9e3JtD!RS}i4 z{ljajlkEHL+2p<27vhsLeEX0Vdh z%tz_+-XvWde_~X-;phKm=5?=U4d<*xsMtY$!QI~&s0q}Y^ef}TKfv1Bw%!QSm8XeC zdQC|-P5)O+?&+pp&GJgWs;m(@O6T0kqL)xJn_22n*gp##&UFEzD=4#XI=k;J@5?xC>n-(o zhC;iNzLqhh#S9cji&kXmqsR|XM01myFadHq2BiGZTxzd&_h6YGc4qu7eeqGkr7~;K z-Q{;@K1A3V_Oy5oF!nKX(JsxeVx{V9!w}3R15Ku4@rDWJ_@`|z<~=V`_GcoTRPk~< zqze>(JG`mS3|6!={3Cf|-&3Yf_0Kpsxc&j)!Q*{cT-*)QHc3wmS}NKvUk6s+-P1&{f!THxT|05 zENFalA=S87fHqIMU$meN;8|A6r?kuyKZ z7EVMtz@}8bZ+5(TLW}&(yMF-S^3-K8Nc9orAAq0D{$R6CtfC+Sn#Z4?JD(L+5ZXEM zzQ?9?MCbaKJE9)coH5!+*~YUJ^4o#t(6}n8)mFW*9~hRxJ9FL1m?tjzBGQVTIUpwF z&5%`huf1$=8~JO~ypmenHSyw8Dq7@4jpH@XCI-5A?Sm!6By#Mxe)PhyiM-v7re9k> z&MUFW9j8!33k<8K*)%q@Q$g^e!XaB3wG=gGk<)y!0Hq$y2{d`_;`kLXaD-6Xb z9`q)ve-6Fx_sGd|FUOyJ@MA{e3JYIuV6kvy0(fc^^0lmeaOY4?7mH1dqmC}Ad~HT! zUdE3lgoP?D@(BsD1pyh%*mm?FH1pboveljGL<&qpmk|;xQ4NO)YT;YoBEz zMxy*dJLbvA=O&lDqU2~$4!RoBbZ5SO@()N(x(SON^1KB)DtGDNx)WeMRdTEH%PQ?K zqsD&6>r{i#?+0e4di@7L0yT8+|1b1QG^W2lhnBH$TWEiy*aps9JqM-1A%31ANV6O~ zeI|ZZ5|epDX2hKdgf=7&6e?BAo(tj0oF~%ni~)lr(K$L<{{TF+hPR4TMev=jZ#czO z!RkLx{IbMy#4gOuhFiDi=2U0jf;W7`7}|H%ve^FT^XhkCm*z5S@h zclr13|Go+`%XAL@J#(Kf{dPU3U*3y@@Lr%e-PvKT01KHyrS>(zXu){)eX&+QRac`g zC5tKjb%(w^%Qx7o*qUBO8mThEP$fQ7Dx+@&G!4QQ6RZmvqr7lrB76z2ljZs~i|v7ZBr^D0IN4uHw}ML9JvjeRZT!C! zjVDEI6M^~x^qXIQD;QBOB%d;Z_xN=t_4hT#)}1)N-qn3OPo)&qKLU}-v6cQTt5GCc z>r|gP1iNgW1WZkFh;?H(omBD+%8b5r8uE1?KnT!HZV831nh{tlu~z8Y=5DALtIF^;Sv z#5D=%q3f)1uj)87%BsL8V!ViVA_@Nhit7a>D2w<*)x~Nc*3J zZM&`4+u-{D+4R*b$+79t5v)1YX}nlgc|sP`-;Dd^&$GMr&m2Xa@7?{c@{g)en5;fz zQ`Y3#Xj&1W$k>fL4XB%Q#I&s_j2dJKpuW3-$7q^?|2brmtmVu;*qB4o8()ImjnDB8 zHw{HIw^C@LJcb;=2Lq6erorG$RRr~afRuzD)01n0rd_IsMRC-c<42Vx&nl+VWHCz;Q&4 zUJl!uTBME(kv5FZNZvaG1uI%Qa745B6rGFbM5)>vygy5$_cB)89_Je{$ybKB^ybDK zck$i~r>MwOvNdY3CACaV$6$NQVOw|dXL`3oq<|_eE6|(OppW=@2o`d>E@)(3viXG? zz0XTsOjWa-9}M}mpFTwZ<46CagRA$mf?rHrJy)CuPEf4k`Hu{IQqg_Zr-Yi`rsoj- z^gv`7sUpfcQ8+;0C_SO=$JCr*rU^2wK~10kU(_K}_^JS1yvRPk$NP!D(GH}L=Qfl1mYeXOxCqD?_vP zXSj;d$eHTP-D9^u(k4D}=+abHndssEkv6@=`OUmIYmm(g1Wr5V&xt~mNDu!3zKiXA zgs9C?+fBg*3d+xAX2?HZB}&?nZIQCybU7qT=$=8eppwWGK->E zX8&H*?~1y*GskkxW|AprR354(2gK=C@16<%+-+1>3mJ<_IdtKxmK3bedH3;A6XL)Ur(yhe>dx3RQ3zAfpu5|kX=Mwg>8Mt^Ywx7NKYd)cdCgXXqQK8znm4$z--@1v-?-=Q zkRbg=-5DRJIx1KdS+l%Or|LEKsD5xoRWPhzf+J4T9>|jM{x1Ff&(01hER{S<((*=E z?f$2byXik+5X&JAD%kJ$dv9((e~+(lxgl_ismT!A!+R$Y+r$2g8)KGyi+5G93vJXa z#dovh9%z!XJCC9Y8!V0xTIc981=U%u`-#}~WDI3e6pUe@!ZC*z#qgz@J1q@D17ftD#I?&Q9sCmK7Glz2?Z?2$ z4(|2{5T0p{Un)SvkCI2TYlvbR=yt%?(tV=tAQzwn&{-SB1V2i|%h^ z0Xr#eoxLHue!c<9Sl!W1m!U!pH7N9isL6<;cpUQ`0%PPsy1ddUU+DWqI?F%qZ|WxV z4Ym;g!GbU;Z>+pCnhiG7MCWMf33KF~4gu$BV3=*XHKTi{#K=+sC#EV~&#dg?IyS~3 zHvWu!jW~PrMKx=>LZ&Dqe@GASkpI*S88NV|l}T=4#^oUs`7k*JN=|Vu(EJ-*4@cs1 z`5|%l+)a)3__l?_>F$l{k8j8vi&ic_veI@B2*^{*dWuSc#JxpLd`##M9Qjr&GgHs~ zhA|8)rH#7J4+8dr%_HR!lp4~+FKoksp2J~QyXQ}Tc5mAZ+0(b)lFuL6{sClm?_hRE z^@4#3mowOVfJUqQ#B89fl{JkRa7v1Zb7G@ zQY&w13gTh=smfK!mfib`-okCZpB5Yub>zB~yVcKlnpSU9>}(hPosP6;`zE9rK91yX zqp=T3blr`=WT-Lzebh*RWl$`1vBIGXXGebBQs+4_; zgBKv50R(PgTY#Oq>eVsB;#4EK-g3TY2;B4xplOqQVVxOgf1740)meSa9LzE?R(_9U zI5>n7OJW}t-@%n9Id?Foe|VLstCCz#_!W=2`ORA+A+)uNKLt~x{DDi%-ef5gfn#Bq zmM^6zbSULpj-8_ePaPIA?+&+O00< z!}0{|j2-nSJBC)*fxse;YN~mr-!?BXs{-%Y`EmB>f@wdY^=A6bv!Y}`{o=v>sCN)F z)BDOQ2ICGJjEdD}6`=Ed!1nO7OH0#DH<<5n=4sn9y^Z@=|KTdoCj49k_eyc_E_IlH z@69ghmYZsqaN$jH`yK~|?>aq<@gi805Z#>-dcyZm0DT#Df}wf94@%v7JO0m7NWezk z(hb$=$toOoHDagO?E#OIQM3P{QI&s}A)7=%Tc_j&S7FUTj8h0`v+&FcQ2JoQlTyME zh-XyYGFWm%M0~>n-^RYLe)SfbKcxN`cOxll ztDk%AT!JE4X6;o*p`|_Ui>uogTE06d20j-PG5tp<2M>@!@O!! z^kWt7%ds(TNY}%iWpQ2^5ILrz+rH)rGLjso1*6;lC7soqLT_#YJY^TDu_in)OE-0i z=1J*iDGZkpH{B5~YrUPL>Q+7pmQO=xAdSAqAy5cWgJ5&E=G2b~{jQ`|!Of@8y6$23 z)cxo^gTwS6U_=aB@V(kv6UTY*vZ1R<_;sfCuR6LL=UNC9rFR7PiTX3p+$*ttenX)Q zFK(m|k59nR#Z-b;yo!OO)4SQkhr2`E}PzPNZKcI#(+k;bjgMC^c)ftm?+llE0D>*T(g^QXFVDtV#6NRWg6A($SFteCi`!_~EVzgk05<)LZQp zdUQ*G=IOD~B(AA9{EpJqY3gANvfYVOb}YiU^N>!tj0(CK3t|>a_Yjz0fg@w9f95*Q zpC{hyV|sKn%6^GlRld`&;QIwJhzce2`QSvr3v{^0M!IvfV*vAWF^`h=*`Oi@X=dGB zSP;_6&ElsT>CQc@$Ej%}E2_-nokqWx;z1tQZU*%RDJBSmPir)&O{dwJ{zHw72AXX) z99QPdUdHKL7)#*K9m>W-%9W1MaKpoT2w8?ykbVls?zqUHstW4JpDpW#R^Rx9_Cyy) zH@Kiou!|kG-RZUY%oXSzw|eaO6$-)H7~?K(Kj-zm3=&|IlHXwNn6!W7y&Qg$l(H#) zWpIGt4lfKCM zHrQV#h8;#-P+Y|^k7W1r6F&}6_#CW(yC}q~$Oi-J_}x;e;GZt(5Q876pjToVF*X}X zE)62!zU|=PKmNAykSXwu&^S_@4wd3!Yn(8VFYV+Jf0!a?0c=PRpcO9?zpIXLA|SKqzy34EgYiRJqS&GgAQq<>ApU~R zll-DLV|Anw`ev^!BPeVP5w79r*yC)5Nn-20+@ zdbH6H{y2%+xtRq^X`ln9dB_)!P~_Vs(oBpje}_|txN+^DOOzLemO#1C<;7<$c0*n7 z*T)>R;0Os>^&WJBpCy_s)zw*wIe>h_IG+~%!w(qpIHM)M%b%5WZFPFNL*pm^Y<20O zT+tEsKGT&^3LsRUvTu}fo^gkpodk`KD>}Bn!~6ULEIH{!PC$H=)yopT^7n=-wQKpO zvN;gt$Dk2c_mK7DrY;-)T_cvSg~!9`)iDX2+W7@{5_JsoY3A<`0t(Visz49@ROG5< z0VP8L-N05CqaEIN;$9Go@otKnL|=0FKLE~)9?C|JeiVl>VpL~8cXXF6wuG_+lnpp~ zmb4DGGH`%5ky$QNL71}EAR*#ww-U;;LKDIQx;zLRY&Al>XnjU6oS8+htkChaA^3gQztm`= zD71Ez5^|Y+!k!YSchc7NP^v9o+ldED)>m7#=PjTkj28>J{c9l@{|N7Q8Ywy!;FXOW zbcuncsEo_nn)6H{n0ieItlH^i;*~NHg0riz+I9 zgI3>Et1i=)0k>oOre^0kqO>*YG@w-modqqtdf-ge!7$tfIkeX{PYOU;lTvtGrL!Eo z+)n$2{=)#GwQ|FRZXy8=Rnu52J))65tW&xV+B%{*#0$8KZ&nr`MuM%)NSQrbSdU!c z{#wH+&#z5>)D_|(7WWxZ3JOVlyEB@`suDKYOPOEo9g89jU;@GWb-5M%!F4fs#W)@Q zA;6(;4g-^128`hrPIBna)M<>vam+%R(pXIr#hK_LYooc#`GF}irPRn8XpN5jvmXz+ zmsbwVthX;0cV>n*?T)t;0r=czvyeuNXu37sVKGSMsG<>;E+dw<-Hc7l*5=~+qjt&t z_s$JhGkVnvHF(yKR9T(MN^5DU2AX^AQLy26EX)jKg1&!tLcMJ5%$r`1I2Mt#wZ}~H zQ1lgE5~adZ!nGeZbsw-$Z;jjZgGtt_Hkadc6KD)g$%;&Kfp0VRl;jYPOs5(B-a`3n z5;2wS4+s^Fg$>b{rX`#`pZcb%B*@P?fK|LrBr9O@k$!S=H&HIFYt#-?e-B@?gDx>m zF)!o*jS}doKleimQ6=p0scC)A)aZ;=Mv9*6akD6=1MUKPxsx`ng6Cm|9$(_Y$TN1p^ssjD_)yjk<5%Bd72Qm<(h!HI((YfPxzb- z_$^T39*gD;8J|BHw#rEH9GK*hl|kURU3fQI`2*RyIGTm8S0#QLCF*-h=h9AZaXp3f zGnmhP4Z=X9Qy={gpu%X|s%V6fEMyVTwjCoC<{ED?R03#i*dCGTRxG5xHxQh#Fkt@H zT9)-3r^jdHo61j*Y%IB^ZkO

$?-xq_Ih=z(EDxR}D&|h>wgqp#)ql96N6Q?mfIHJiivQe!vM*dvOzvzh0bjr!qI;D|* ziK_3Jcv=(j+l0S^EOERd%CWZrL&xP%44^a8Cpxww9b_fa4Yu!)5paAdgOmxpb5}|% z+8O?#q`_UsD5>Z!Yaw*!=|vRj5Y=Q<+V*~m@pm&Z|M3XnKoSbsa_5}Bz~o{J19#3I zF_t!eFU?^dC>r5Ypy;uF(&{|gk_OJZTmMzgb!ALs>!v(BDKapQ*ev- z{w!o|OkwmRwaERM7vkY3L=*&($YtwoB>QDmcy;Vr%o=$yv5weU9Jif9+D3*~bE)Ml zVAT#mR%hzPJY$S?Mr80}9cC5I4_=14FN^J~um&ovJkmc0m1o>hA)?+tzsc7>+d8%{ z+x1{SI%5E3RArg=(@&qH&9AAhpuXmnU>qfNSlqLPg zW5Hj>SFA+>@uHuqOSlAA5@>h}oRiN5e;J(KOO(@|ciRWCkj`|xpTnTumY?S;Qsqf( zKxE3d(q$>1m5ZMl-?YkI&Me;Wn_%x%l8YTPis>EGcs}Z_P;c!?YClKl)>11=Uh*9N zT6U;#ybiLIR})9WRfSnm*vlav3FlRn(DJH^+#|3>)t<(Yw!?irmgWwN)FPgUM&`Zw zybxUpCY2vzv25MzS53%KR4R#m);d_&?7sVWJuowh8#4*UE zP_%EsRHMO15b11RN{R$Yqo#U(TVl{;_WBwo0k%R(Vv4Pi(hXJNC@+J|Ro$ie*zvE? z*cE+Vrdzu-dU?!PdVzJXB9DN2axwcr1)3(#!m^;pCgCQjcM`QEVmy~X+(3})Pwka^ zZK)95cd{6QHQ+ID)&rplOWg#Ov6-eSujl^%|W}ThC=O z@iZEhdRJSM56&D#d_YsPhB`AxEb=jF8wpe)r(}vZYNI#FdHXypFV+X`7Sl?TG6eIW zuc0lG3^eZyA&jc29A}?&<@hY@WdyM^B7Tn%oifMzmcTZd*WJk7r(^eTs(ZHE?ytHM zfw{?0-QTG_z028>@3m4}`)ll=o3@3VNw%oOZmeRiY{G+}`RJD``k2 zojP;~N310khO1%BXWHA3l%CXfd+NhZa64J-!O?HpPU{NONL}w*mF#{O`W0=A5!Ah+m0hK1C)P z^{&?l}H zzTd`8}XC|dTv=NP^$}5B(AEV9@G8`d9 zT=z_}H@IdCGnd*8$hFQ>@OF^TU)mAyvTLfZLJ2HetBqyvcEW1}v(n~?9>S7B<(HnB z(^%l=2r|*)*uC`oL2ed9g(IfZTLDy%ObbYi&wiNjQk+xl7$%34XNZX-U+H5Zcz-p4 zPdMyP$jJ*$4ZRIP5Zwuv0M<2v4h4qfMeIlvH+Ed$794!Ji)(Vt3)SNGmq^c@^|2U{ zz45cxEimo6Wtx-kj7p;(Iw zQs77~rtm|Od;rr>3i@2aAkg?@mSS@>lx07q%Jz#pu@<-=E`8**lA*P38?u4o05E7N zi3eM7meD~p+(a*Cm8J)FXW62%Tqyib~<|Dt;tD3`!l~=U%}{;l6jQNORip4#sq_{kj3cBs|q$MprtTQfce`5gzN zW)#S^9Bh@q3D=cC5v~ZV3q)C>reyBmUJMz!AD_R6ezCG+L8mezk+445QVj#j&5+N$ z9et*6RrbyBct8+K<)$=6I727t|I+M8rlWcjjs7i7j;)e#+lVSV7RR#^oI8kSt3*b} zrK^>cAh&{0H#S@9Q}IWX{w2%&YqLPFpod$4DU5X`rCQY3Fv2hK8-0?pbZ!YIBxC`f zfn8Da8OjUpoV1d{kNo9^yly*S@$bp$qS%YRl;0J9Iw6^gQmd>fS-MY+F z{_b|9uZR0Yx6l@q{~Uv9-uV6Cby|U-m*ptoj66oZMJbka<-?<3W}=vn3?93U_U0#> zZz5VJOiH2$`a%)si2#Q%@_sp^i&%-6<8XF+(`(&Hw@g*}NTt8Ok<6E`F)Bd&pQ8n-4){3Dcij*&s6@Mjq<({?qooa znQI#oC*A?!_p^_U4b{4Jv|3hYFme2)4r8uA?+sX|UZyb{s5zR}oG&@GyQI00Z(wCF zjANbb>x_h!Y`fD>98xR!G-lgJ?~uI6x3Lx4Ql-d?+G0M1SGsU=F^wx=_@fmB2sNF4F9M)pDhff%XiM(nXmDaM_T!Ue}FkyiZp8>+-W|*aEeJ~=WIoBDvg_Trh zy&`cqn*Hlzto;61svHBEJya_7N#^X}jH*q**PjT0PcD}yZpDvi{uGh%aZ8R<0eZrx zNEwV|Yy|tmLCXD*Z0(mRwR-Xdj3yIoLn@rl!?Lat60G_x`Zk&T(avddx=;&5~|pbkoxGyBsRoSpVKVi)jom= zx6};zBp>MeNb!Xu@!wL&=7@3}Ts}*X6`_ATs#GpOl0`B#lnrFD6o7nOZ6vY%sqQf; z{K8~2d$c6EMYr9IOGqL_XOSL<|4mwo&E9xTWH^Lc21c-AYG9I|Er0y*5+_Y(By+D2 z6y#m&IVFx;kHJ??GO*-u09P=Iqqn{EO16%oWnO=1M%yfXNcj<< z9rEcf5$6UDF{a2F|58*Vvnqy2A{$4}r|oOrt4xxwSIP}BU)G=utqauiACOF~fJq+s z(SRwbL<``{bfqi$817oc zRBj<^P-X0NzYMu)38%mhWwf5l-5=egdOIo0aKXjFJq;UHGs5<)&AnJA>Nflv&mlRV z6RC_96gxW;uwgP}-ufjCF;7_=SZ;1_2jYcm1MGE2xUh=YIZMpaB^~u|I0XQXu9U!q zz^e67K2QDTX~~hLGNMbq(0J|+G2G{@1`35YhVVf4$mw`iAx1H-FRM=5h{RszaSkep z#fbp9Um)0l*LgfQJ#%8*9IPAUUXMy~oMLbE%l8FH3Vqz(`;D;@6pfznx(b<^ro->} zq&q~fkib#UQzo+}!l3KNiJH-E=JrFf9j!*yhhFni|0^l-RnVR~FfDI#&>y5t(oz`2 z2q+{B1q^ew-PvGEbYbv$uDxbBMWkZk1nqS! zfFQ7y%*-D?kvtH$RWLI}$@l2!&zOp3;&k^Y|L&Q#QHowI6Y)a=yKzirlZ|LaltlW) z##RVmaosjZgl|FYU?#1*qNUVb=ya?BWy!qiKL^}_zOjwoaB|!hWJLDfVTmpk9T`nq z2)nGmPeKs`exeWMY?Jw%<(QqJKq+@}b)0O)?KazslU;Om}f2$8Mic97QCsfkVQn zok~i_a!=b)#SRZ>((6_6{4&&Que+U;bw=9cF8VEEV0>@5EVhhJ(1b*6BQ=zglP%2dy)pzo>YBJds4R8DtjBIwKUp2(zU;s~i}f(X z09|@!3Z?~Rat_$PC=#Z}LW*?p?KwaJbCqR7%`u7K3uVGB9exK>q#Gi@&D;+n`yP&+ zC0u>8p=g}T-#MmWipxbRC)ozq;lo$0FRx*0l>}mL!o-eG8Ds}FHB_--q&^F7Vfqs7 zu??O!XyB65oJe}-5`8lkf8E%FCVgVtzUWNk-iE?8;`9$ZHPuuQ+UVxyi*ZXl3c|IL z@8==`a^?+tB}=QU*xSbBV;AV=4vRzDu1^d)#E>Oo)NAxa*Bi1DYL^WRmkOtJ&G4rTsH@iht5Idpajl*1@6h zb2L$P)H@5O$5%Tyf8ar9pHNtNR{UXHtI>A}ooy`M@?nx~xxug25l@p}h-}MQ<`dD` zuDYhD8y7lMTB$hI8*k9Ke?fFc07cywo)vRMdX)zqI2?86-(3U>`y#FTaKoR(o zUFQ8J3KR@Gy~U1%65(f$Njq!$I7}UQoFHoF-`ZH+@2h5(wfFVp_E}|D>m|Q18ieEZ zf}DLYNrFQrOH6^%cQ6C?4UZqHUXZ+m6c5f=HB9dnM{ept)NzJ|Z)HCBBWwnghm%UT zrvmgY(nAdrny3yCZfDXOsJcEiV(LVPH6#5Z+vOrMw?;J=bAOl>vGu;W?WFCI18ly1 zXun_I4YdG;V$UwO!26%LYmNO6B%wtRB2*7q@GV?!WnO!zK3KhNk;GurW`(QoQYx`<%cXT^w*e45kyJD3;AN zBS~+XWY+fs&Jk&ahbfY#hzz(*lG8DbHr^_<;7-{K-z|L{Q4vss84H|VTVg3Hek{54 zNs5jX=^eI;3+G4LGhg&_sMO>lk7|Pli z_}ziH%D!>KQB|d5wx`!xLadyE^brBQggs!OLzeY@xgy?MY!mRzg{e1>tmmtDW`wNH zMu~ls%O=8T`YYpSHq3<}N9w^f`{lyWKfuc5=d=-RD7$p%Lm&Kx2@)2@8K7`Kb00{` zKC#7u536+X4!ks`w3Hi>S;n+tVfz@{m90q~DkYcE&QSc}U0rXriCiEBH2 zL1H5xTJ=- z_nGG$2q%bwKW|*6%42SnQx;P;*dt(6k?rXfgN`Fx%*K^jUw_O&+ae==)*lGt!Wrr+ zgSFnGX86MFe&ABbKLDaXn=RpJrEiQU-cJCFEu@TVp;&G;?@BZBSwkyk$nW`P_j3f z%azh&)P_yNGq#&b$qm7W3}>@KHcmC&>QJ3zoV+2&(p5`xEe^D;+7oU96GXG($ysUz z+lqnBjMxlZ45Q|}B+X?^Uzu%@nJ40=q-eTLjg67BE2RhwH5SmBWQW^f@dJOtpc3-! zkA;Q~HI-GtxTxm*Una%1bU5&Uor{nSMwW3!;GbgDSSJox9hJ5`DKRuCogM4VlIqlz zm{KNZ7I@-)2JJ zwG+n##JV@%So3ZMSKP+ueaUhxzA~;u z;};z6dQ91V2)G_)@l#idZR&MsWIjzTI^2Ci*Uu_J;NSgX62?*kGf$0#h|T+WI`%d(d_U9<(F^lVd-Tjr(R4BZ<2QkAK zC@&NW%Rf>h_?J%}5c1nHHWtot=u)dE{-&jqX6IHt40%TCyWZgI&4)8TEX5#(QQkv}>5u4ScS%3^nVWZC2g-G5Z zFDAw^;Rw(d9QuZxyQDDV#Q|BA+5g2aM4k zIf$z57Zk;)MSo^ZGm6TbyMYnpPBgI<4A-nGjC8&#=%dfkL1I#73aBGdEX;->&~`vH z-;x5Jfsc&$5~>gsWZ*v62JcEWE{^aX-tk zFcVpUBQ+ixk3TQXpSbRNlcp=v`keSlaWwMsEv7E^RbBA>&#<9@@Q$=`~0F=6n~uKuqeHsksJ$pIVT8XptY|@3v*@@==q#A+!NK` z)8zp%}nytcHg`^WDfoN}pkkZaOU5~zI zZBUt9PpjERJQU7OWf+pxpg?|@rA*4|Fk_Ew)4TT5M=1VrSK26Zo!JD-y|X&RRWSe$ z&Y5kz+D@zEHij_AU+Cr%ri4E_?aZ+B|1tJfacwnG-)QjQ!QG`e!QI{6-Q5et-5rWU zDPG*&CAbs{6blYP+ETPYp*$z=ch1Fk_nx`R&g{J=$=Z9(WPX1cq&J26&Fi2=dSpDT zdDNy+g5aE6d|ZE(wAuBT3@lAaE6`W zeWuTBvrU{6CJ~qaZEt`gEs?PD^;_T(fiss#2(1tCIFD9cvF8+7vq>nAd_AqpN+9kH zbt?>(I7Ym-x>xQ+ZL2kh+WI5b%*`1Pa*QfOPWp4#oWeQvu+{+f2->0~XEHZd96B;k zUzrlAYIZYi3huDlW~{|2P~?W?BK&2KjY97>v=&*Bok4fO@BtOQs1o~oVOxw7-$!)T z20OrgnOe~my>*uhS`HQ%1KMIa`owsy$iMQkJYV`5qlX^=c{y$0A)@vvX=Ay<7u>fg zn~?dSBSS+YY~n2Li+l_0C1R<-1a^mmmyNZB=VGTj6pCVubo4>IS;Q@<6S}sqyvj26 zuIe`NglX)V?wz=rzyLFDl{U5b63VzosRM53Ad_<{4(UwknZ(1=tU#-EN~#U}(t9G;#IcGU(D3WmNi{YM+yjgd$ zC1Z10!zB+3<1Qp^bK;#Imo||`;8ii&H_q?+pxz-DsD-1$|a=bOqC6Ed&Ocp|46_!F9 zQR3#%*H|MEduw}7hpvk=nCT78Y60PfCedxBeSKEkKb+has!Q3y5fJf4qefdchZBl> zxCc-*O@N7I(dexUZAC1Wej02e|w_h z=9SxnyG_hfarlBtCr#O#C_s*7m4_6NB6kE@5@Z`Av!d-7BhE2r`chg`(%01vETt%X z^5d3KcbD3ufAC^&>y+t562ZmApR%(m-<7WJC>zP4E7w>^F@E7pN~k1ulz}g{`GoTL~Xjin-=n?OzUzvzFiovnFx?ie}?G}ySQ5= znPl4G@};gdpK^}2$Xyvw(D^)YAq|{jxf1)W_2`=Zg;971-TSeN1eyIw*%$*kL-Mxb z_sZWHR%?t|FqT?w0$K;${?YY{9DaQ%Xd7gESEUU)N8(e_tkK_xIueha=AsiC*J`vy z)}L-7wD#81*Xh-s2iE%+rx&;tbhOq#Almm_3Do*Zem`2}tPYB7wr-SoF}1==+o>yu z+W@C^7%j{#rsylm=nj}48PaP1-f82FGkDI$z?4Y&cc_Dmnn;N%njMSL<-y$z6x?lt zkr>Fhhs}BMl;1F#a@;IJZ^(TIj;3YB zA&%S;9<;aXa?i#o1Di%WW_D1Lp{jHP+f?EbMjEz>^A-H`B~u*8O4zsQYVW)!nkJ!& zzv6fi6#1&>_@;hjg}B?TAMS94vS7ucBJ*YM1GhpEg&tn7hZIB&_of@F<~F(=rw6k~ zBi|#08s)P+=Y^!l2hpAWiK!@bBv9CLf!T&3dAKVeg8?HvyKmKD`V310NT57p4ixXp z(~tM21)WBUr7!8)+$H+@*lu2RwW#0CxDS~9YV5B+$maao5dJ6dQp#qWNWhB^<-99c z=Fd(oF`o^{={hnVy1}}JXkwS1mV8V=rq?}jZREqV#T2F5r&7%V>yP!Ut5e8nm#@uE zY7qRHlU`TU$jmr0b*^oH^)0LtGU}7rctW(k#F?U(FT10|Mzf293v=oaBNWl^km)Ek z4BM9?Cx(rIJ*ZzjyS|SHh z8C{oySu^OkzC@|c#oO47%H{Jhthtd!GvfHLR!|4!)`a2@nzqM!TxSJWd0nX+2BDh^ z*gaG+8VP?16yCno|8x#X zn}#eU7q1g{J{l)*rqrO4e(8KBFeaF?8bm9=81s)RSSM1QoVVckQ>z^IdE9c}iC;zD zSRBz@dQaOGqnKX^H#-OaHw=}+b+fOK+-4^D4mox~l!nAd%0Yj@C%(Q`yvJyc8m1LZ zDV7_z=Ab%KsEKFlXiodh8NE<%+QubH|3S67Xh5cD(@Tix%xi)0dp#CAN6+qP4K+v=bq^WZWaGJdphJbMdcoBB{h!L)<9DpJlgqHyn2& z1KzMtCQ6bHj7Os1gq(O3XvXFM8lU2T`M*dGxChRdlEX#PBKCMebw1la0Wv`;-$KxM2>@UxCvUHN&W2 zsPw&cUX-DRAX2;}EdyO3XwT_3E?P!kqJoV|a7cMi%YL|znbYjbUhLED<+36*!GFf(`|egHv@}K(iE!8KNosA2U4h3?zt`N`U0EjPRqEL__39b&uLQi7 z`Lhv~v<>S^n`|U@oI3U`l4V6}Zn`9Yl8c}XaBh1Wndv9tz(+>t%}SB+x$7nU!RaiHG#LLrU}C!e0( z2Q7#SfY&PD*KD7Ch>S5*gS_$yIZT7-n(&dP2`IsADt{P#*6H@%*oD#LrD@t}D1|fF#3h5?cp;n?xzn9l7 z@<@zwR|`}u@0@1mk^#*N!O+_;5MZJzzlNyB6cH(ms0>Q@Uqwq5x*$vSPbBWM``4Qx z4N|Z#`JN@7)+w-MScQqd zTg&HxTnLzKJ86lvz{omMJIdd@{w2p|rg{}Rd)DW2!?m=iZ?i`p?{15RcbG&E;eohM~CT-D>e++g=-^E2+f z!HG<7K)?WeZ%LFlZc8TkATsG^u{a%jrcByqR?MlEJkmRX1<7x|!EuE6yand-6Qx-d zP8Kcxjg6r=94;UCcqeoe&@y?+i9$PCsSE+NDQxOdYtp*0%tw(!eYfjeqvn`{yE%2S znR_oElNv9=QkGal%P2j-ItwWpqCA2kPytPF2rjDnxz?UhfI1?YZT3+#Q|h z&UDHaoria;3#Nz}1{Y<0@Y;0RhF!hA=9PTE=JQqGItuXnm7?{=vYGNH>#_(8G8+{b zQ~awGwIt{%in@J=G!7gAmL_H$DbCN|plKUW^?bXhTN}f~J z*lzOcqolQ(jerz=Qh%L z)r_%7h%Rg=SY_ro0plm@OQyOQ$o*kA9S~dBz zv|dna#|w}8T1{8bNOGJ{jv>fFkb|0^U1LfY%sH8uK9RAU3Mi|Fnf*Y5%o~-i>r}~Q zemN4Qeh57*zj*_55gno&r^LnR~TQYB0@a+HEkIxJCv*9*_{sSmxc<(>KZ4o+wPEp~|EKKc$tuA@{hl^vsPD{4sk9-XV znCNDnMLO0QSF+kzR5dpMI=q+lIkVtB*IK*bMn@K}lq<%6fDW>Z`0_iy+XbeX)!C@C z7+gOVL!+dUF_hNs+;0a3wI2G23Lc@7eZqS29DszxaGbmS(@ne)(s5` zy)mr?k(z|fS9|<2@?BNdi~ZI}qV$-P!FczJr6lqI4s4tB_g@hOZ&cVJol!x`m+FJ+ z5w87BLQSfMyt#RMwOa>wR>8UZwn4$?I_0F50YcB{A_k1XZO-~B?JI5al=4{&4kMO*HZME;YyFvpI>9R1Kf*%7Y?E<}ENl4HmpHmwRj;dD6Cy>c$~GZ8D+4cTzu)H)vA`4$Q3@!K+g#bi1| ztLMk#&MCaX!4SXm>7(Bm7ujv?d-kQ>uXF0$4U1*%XrAG>rJ%a|1MWaT4~gzCWGolG z<%_cZ3s0QkuQr6B=unKxsukh^mIu}h@2ZCUQNljD#c2(+*a)RF#VHhp9U-=9?GN2G zq76Ki^Tz~|ts@(f$y;?Whp#SYgA?6nEzma!^Ulz4AAD2gR- zy-R^^9TD@Jxv;a0&lDip9z3Tcf;%jg13tx%RPEYR#!PVjh!0X|WgBp-Ohg~IZ|F~M z&M~bVBd=qh@Se|nqB(&=GVefF4C8P;1sINb{fH5kj*JEOV-dMCg2BHdvXWJOG~Qo= zwk%pnnN0yrn&n?rzlDRoX0<$!k}Axi1o64mH5<7(AsEBW!L$fyc|!tj3n=mnN`CWO zqsnK#eajK}hdy7JA8nqTyQ_|dq)wZkuTA$ps7R-4o$?){1zCgcF%@!WrXKZO*z6E< zDMCLz8)3h%I%6`&)UttlU@IxX$D_}?dGngiLr{~$1j|oOwSR(e%3Ai^-wIdRHfj+v zHQFudXPP4~-i7rTg5MaX!yf4aspI%Jp<3!+>Af;XW^$8&-ZC8vs@YGILiT=%IRCwC10VyLIt(Wby@OZ&NBU)yoC+|Nar%dBWujEN-S!y#g2?_O5^(tI4i}YJ0?U_(qOib5Tz$0x2aZH*550Y` zP}kgS3zIEKb`{Na#xNI17`e3aPv7;&;v@b=Ygz23do7#S)j3d{^d~YGB-V01Ndw+R zy#A&yu9|b#e1Ps&CK5HyFIiM>YtUHSjx^eHM8B=m96EL$q}0L2&DzlD+sBs1A>uLU z$k8C&8_C90bO(YlyDzc5Hv``w)vc9HZu^B{m7gDTtCXb5Pv@<~HX~n=F2G#E6mzcZAR2=nuNZ zq^Zv__6XMAP)5h8Lgm)y{n=`I3IVEyTv=2^QGXy zd1P&2wrnxaj)A8Xxp2b!f(R|MVuM42W0el0(M@L;v1TnCb50X@O zcQx?oA=B6A%BipJpDMYfuv)ZDru4E&>D!jVUc-gv*Cy%BPM_V5Oh~MVy{(>0=o>Ov z#eIt@@FW;MR^C?Vpk!_?49&Fb6f`7^)(q55hdDjRmlGL<*OLMT-w?m7_wyPC_$NQ| z;e7fS@Gc+!;2>T9>?@Y~G7RS2rG>Y+Y|>d56vDJ|gm0OFGUbpoM_cYQ6@ml|{G~hr zplZbIi+a5$5Yz`%k1+??a3nR>sS0Sk-)JAyH+v`irsatZ?{e3 z*{4G{?2_(odm!XTc{e(P;<|VlAvt@r>~9C7Wp9SKkjYiT7X0*@R-}zK9eO7@TVU*f z(LBUpP_TJUDS*XzNRpX975mj<&EJ`+dB9WrQu!xS*nF|5-@n7qc*7X#ykFkQ@pf5oX)`&Ye|HsB1^uvOvW%`5lSf7^;JAzM z{$;rLGrSJUOS!1(#0@2da?drJi$m1rP_iuSn~VG(*cNXa!h%!ns=RO#J*$4`Q_Jzw z6kx2ci^&2C?&J#PrCoQ`Qu|Onsq^5)CwvuIbCT5WcKPrrIyTpV_ajX@*jCu~$yyd} z(riSH5r%c3+5J{IU2H~Ix%yYdlii>^GxoZc%qi_R-RBi(M9m=@Ghql_7F%=Y`aGY0 z>xa@)QgORU6;0EU$$uNVWpis_OnJ%`nT4FOE>w)`Mr-E}RJ2-_Il{iRPeEN*oR_77Ztg<>Vmvkt#)Iq3bmWU?>&jM!2 z{QX&C5;>LK&A_r|c&yr3OCRJa#Zw*qc`$SVU6m&sxM(qm8%+#;S)n~{X9ZkvcA#Y= z{5|AozQfg>)rv8Knpdgaj3l)$`<_l92^JlBFxL%6xOm}-mY!u7T$+?wdp_PE;1N2N z)&wp9t0ge2<$FUlo|(`i(U<9tRNlk>sczVl#Cd;Gq$B+bzQS_NZe3GUWEY(3B#`?c zjOiAr;P|?DG02>h-R3U8%SasH_d4q@*EY1UDDh>CDl&*8Y_f?Vk-nD1133C~Ur(F# zOOkX;>|Ai(?dM%_O$)G*tYU{nuKmjci{qo+Hs*=NyL{a&wcne{j*HxM339zA+egI8 ztWg=6{{i?IHsb6l|K_#>KA%KRN?%K#9yz&Fm4nGdtz!mt*W8>XY?w! zYG~&R?WSpEcD@l-B;*tex#L%J$=(M9uHE>y+P5BM%t04sc134h zK?Xy=tw%OgUBsqrWDd`8ri^0`cB$(7x`3xg41Rwr-75^R^qXgkJy|(SA}n&|9d;y~ z`gg@RPuqCoG=0Z)JrYUFSHNl`^3gKM2SDrV<=bZx+0$K9Hd4N@bwvPp!Z^sGtQ)uE z)gHd>oG;=k>lcO#Z`565aGFike1fjlRO6C03{L#BBc{8ISjtmnTZ~H?HB9@i_olnn zo05G#==P>+l8TJ0yxaT6Mzs*z;@u|9_WNaKAa5OKHyter%2zi$Jgw#3Qnqv0@MgQ- z_Mw=Dq3`z7BXNH6)um}#39M0qc77r;$EMKv&?Y%ZBazf7-}O4GP;i^(lweCoB(Q-g z#QYo4JGm8#o3CXpy=kW57dB{dzlRS?G@KHC=G&b2pblqg1T^CMb=yKCz|o{Awp7nZ ziLr~8&_GFRo!kn`{lVtWEqZpvPd3jvI4ePs>)0j4JXFX>X7>x~zJD)c%1oGRt|yKG zbM}9NoX$9`!c3LV;+aOd`H;K6K4>2MQ%S>oDoxjfNu*5@Ws7e~*IfMz&m>{w)UDTW zX9QGEgl&tiFlES_ubxaS4^k)3bL0E2$!$bI-X{A~QbOdFav0N>wM!`+m=w_**<4B+ zGK{~LCos23Ag`?TY>#lv*u|2%+CclmVih`O8QB*jj+TYF6cI@py$J~2hw=EQW*XCm zkUQYHgo`-jhrAB+YHxpH|JV*moXGz`22pf`h(FgTPCzQ0Iunkh1Z1MgvC{3vfP%J_ zi;4Y>b8rBbg|>I=_ba57Gsq2C1w|SrPV8-+^DEx;74^rU);pVjE{3=wH2D1$fyxm!Sd zdkI+P_&cmC{{cYP8-KnzC;#q-k+y94$T3vOic;iwPZvDMIpMzGAweU6RzZGe#DS$h z{)9wHXRFRQ;!h+ls|t%rNNcDgM5CNL{Y?CpoLWkHGymfW(k+F{jdM`O!tSm4uBY;>8|NL~8e3R#uZPY8 zaK=+bbsg$C>Z&hc1d_T!(o?#ysr)x`@s9gH0HC<XtO@)OGcVqaI?t?rV_`duu zx<}o9^AXE%2z&a_L$oZp@e0Y3sc*R?^Do|eHn?4<2(1;p5f)obw-TcViKaJoGzUdj zZMkd-f+d+dyNZdrr{GCUFn4a!^dUY~s?JeVC=cp)1ykw@E#lT>qc*&M_k<*HRF8p; ze%Lhm__NbOINjbl$2ZbF(c#L`Ep3182`6GMLs;{~bKT+`$+K7*7Jn2w>V(waT)vK% z)Tt`*rKdEjE&v};*o5t6o;-f}JcGq1U(%XCC^$DH5Me|&P2@3^`-g0$A=;j>ilWI% ztx0TuQ&4K5&&MZ~>NO{Y1K5y(!vT zGb&Q^ok}!ihXLX^pud4RfNSQ}T-Mr0p9F#3fs^?M3x&N&VHZMt=V_D+I@_nV_IN_w z$s|j!+<@Yg&$KQ^%bG~B&LW0%Y21L&kVq$Y(-j6F1rpSwrr#*f+$zRM@J}J%5sniJs0F z&Gi%zOcGCeM7Q~Vt*_!>DU=0%=h1EP(%*Tzv>)(=s&hj&QrBEJ^XrYd(?&%-uC_A3 zB%|NA!8x&A`+0NkbW75h)fl7L)*D!Hcu0N@h(drExVh98UwuX5Wvs;~31~2mZCq#L z^q!JG#e|j~lZAM2Th2O7{M3&4-2fe|ZyS>{;#2#%V@=st5kGbO_X*DJMUl2ALq&@wv~MXGzucpU z8Z6+F5KH`Gbx~+@BkgaY%5B zd3Ncjls&grL0!$G(;XWRGRGLX-L6ybgbySwcq_T)7ZDf9#aeL3V^OoOXKD41pwMd2 z!NdZsVz|~LtLh_{nYWoVm6admSF5oCuF0)M`16{m&d2wFgMFr9$PVRwS)&gS)ns}f zZ>VO;`ACFj{%oSiNnZUQ0RIvN9K^?s#hMw_W(WK(_%DkJqo}#i5OdRv&!obxOdgdg zl^dz>Bdqd{g;N%}Lz-Lhi}+%b57P@T1P6IZqq z$%CuS9jt(AGW~K;&yte7-0IFp_g>cWuky7#6Hd)G0=jGOQ&>KVkb9EVnWj#gp>OL2 zwT+gHv|F1fH(1rShsje43gXob!ogh%+1^DVwu-jOLhXZ&r=pD20J5BOnt5hVI;oZP$3v+2bx+&cl z>W>_ed24siOGYsrgM=JtBb*L7Pde;@7Fr7Qj;v_-O}L3g_K=^}p^=mte;tbfr;i=k zOrgAacLjnH3}Eyu>usiNB&}AcAH)ixJiSw=DzEhE2vpy6(tO8yFbVCnCQ5P zVBX?V1c>f`08U&&5}r1*f2XNU;knZZJq#u_9A%ZWZqX=+gE1R-H(a-SjI{;T(s+JT zmCz%cb9AWAw)ubC04yW~03rYh004jh06g-uV_Yg%*~>2sFbqj4Q`}F7+<$q`yt&TT zm{q0S+qSqrrf3!{qAIL|M=nPzw8N9u zabZYCJ+JyVz$yN5zQ+t={!|Ie$mwDXonYWFE&(hpi!4TY*()5C$n9Odng zp{|~Ep?CH+h*4)g<>E4=2nljjF6tRSBqCN-g2!F$(vrVjPi8I82vr$7q)JrU9 zzP>X%>eJ}4a^2KQKyybSQRieh9+zbaVf-i4syCTU?L( zJ=*jh%b<`Fz^U_*{AJlREzx!$_&6cW-JI)ITx%#xxr{0bu+^A#G&iI(7q3`J`RNN` zOIMB;@rGh^T*t96VnmLhK8K#Vp9GHnOLF|Zjvq+)x>XyOJf|P09XW8R zBT;zOM|BOp%!^=tD!!l(xFnJlS`JG*6NDHVlzZB%(e19RYMDuCQ6OKh1P77Z8?Xz! zgRdR<#XOg}9i+7Ob%KEKbRx^>?*3T(Ax)4Ke}cM+`6UZW8)5wx!FQEIKdN$~3?3kWaDT9;5u49KGP088rU6fzDD4Y_FVS=K5VE)$|fZTZR zk2t67$R~3QbXuBhHpwfq-bB#Mfi4w!Ua6mea7}RdKu+N;ecJ7a-+gS@SUYZFdJ28qTznzfLIqh+| z>}ruM5ESO4m+Y$}Mf@sRy0`}9cc&>`4T6x{o_m^jgMa%O0@tMbe&xiT~dGyA}a z_(JO{OXFFh{K2ihXVX2BRfyp*C|Rt*PS{P<6apy$9qLc<)rch7R;>C5P%I}P1AnoF zs1Wv2Q5!w+%N!uvHF;14;@#%N;|nA zH<9Sw*_f--%L% znZhFweyIB{BW)RlOoT2*bnt&)E(eH&A4dP*Zvklk&k^eDK+78%zYGwMS^`A^0M9Wg3?9k( zi(h>)5CBLu?-@7X_(LYl#Ff+F5={UAfXf1!0BVl`06$HUk;Si5NVQSn6$y(@ny?H7 z07wSGf9^RlLeFs!RzpqB`d37*tXBdPKzQ`?SB3`&(mlKzmj$3i3ql6~ZWm~gIU*y0 z+K3=pEa7r#!fRCk(kOfkzyn}EGb1C|Y2=Y$%z$=AV)|!d_IWesq=24ea^(L+AeaMW zsS2B*%)uO`rPT;^Fpwh=C~Zaw88H682udR9BEO1Q72l1~0<=?w`wO0giw0d`X6%FK zum2~)Es*~@ng-vez>b133z1(N13oO_?I>Xx{9BL0Ujr0eXUTf^C|qD44*+^~*Y!mL zP~%VEKS^~$nY$wa0Bn{c1x6WERHjgedhA4`d0HKLFbEN#JxUleMT?~RKM}ltDQ@2D zmx#ymgR!)Iy#s!K`;#EJ>yksAJ&yQ4{lG{7KOF)ZpLtQ?Z*9CeS_CXG01ZAzU;z+l z0r2#}Q+yq0797rt1-4F*M4a|h+{E-H=i9pyD2cz_{Ga~+y{TSaWe#=a)-9c1!D|=F zy>76p_efKcK-XulOkLXl{d)KrD(YN3Y(s{!n%mso0N^M2UxJ!B*nQf%EqzYlI~jfiIbfCsp&k$*gMfT63W|=Ul~oeX zTm)9eVU=U>>&erO#}(x-YvEBjHQ~XsZvl}KJ4E!H%!w6Zi>MwT(r<{<4>3+hwr4<& zPw0CT7E;|oLg$fI2hTGfCe_T~##wKKF$8?AgJE@CxuRU!r6~gH1VMj4{TK+!HkBzd zo~=KE?>b${GFPblpicE`Hr!|?z>70@JWVT_`dp0iR#;XEppXY=rEovx*nT&>h|Oce zNd3bngS0UE;Gw>K)W^kj4wCoB49<=?zE*j*z)78&10K*pd#^@DjjxrNHX>AhE(QU6QvxYv!zc+@UHMua26|b|z28POg zr$0z6T{71>VJ3Aq&pTFR+r{vaMhP`7rW@aWE;5V(u-*9JOecqfkha2S1ka;qHy7os zVJ-*nHW3uHc-U-1kJIew=#R>{+;-x`N@a}Uig%pTw2VAGj171b7ebg45r_$GbDpd* zJPa!`!Az+-N>GFM*oodKU4DDVpZe)`R`m0kPWG0bzxo{HgsC;q%7}fx_w?;4Q~rg6 zthY$;$eA7qz*-!RWb6O+#4H3BWJhb3NskS94AnS2dKTcQtN9E2HGb8Nw@;7ofN5bn zjl5g}ea4On6WP4ynQ|;Cu1}4z{|dl)oQVWhcWZVaFiS~@S!1i}yx2t%?5_S+j3aKb zv9|ADScIqh)R;1Ft?^J15))jM(cMfYm$-5Ox-_Fzu2DddEUh zN7Eh6KMX!;o(5jT4ftPpPAVs^LzYx-!!VvoVi09s2%cCnv_GJdcM7~xi8wmk@&eF? z0RZa9>9CvgDD+1}WVBbbO^akaJ5Qu<;fFK2W@=x`q%ffd80f}y9jJDknC0+PuhPun{`?SHXu&ktqkho#szwBLD$S zPC0yqbk1$KA49mgm{59^8}+I z@a#AjX)2Dx-g=zk72LC*4&Wc7vwqmTYv44o>iBplRZrfp10{(5{sv1XYdN2=_L5{G zBbd(r>x&_c6%+npO9Frbx#G<)?sii@IsO3y2mb0R~ELdtyVbl zGD%5EfZ)LiXl1wKt-^K&{;zQiV{cvcxl_9BrSI;)f&9xm0jU?bI21qgjg- zM}r;8iGQg2p2>`XGmvdbs9UsCG3e4c(A|}#2E$x}I;m}oTHD-doPWPWfQu2kUv4-$ zATj|-+AtnXS;w{_hH`bQD4pMKQ`1|>U1K^gb7qyj75VC3Q`O66)6kZ`6M+gM1jf?g zekrU$ylVZ{qwYe2gGonzz1DB&jEKR7rWlLvsw>IW9fGJg` z5irMXw_EY;UB(J~L^yA2C4MBRW6@8*C8GM^eQm)6uGF_&2sm4#Bz{R=^plX?VNUI$ z>P-MP6$alBoYS%&ndY1bD=9~%5LrDH{UJy-8Z&Ay^B6Dq%{+j6E-hK;#(pwsZbqzE zi8&apH=j#z>8#J;4|O46-UUBwa_t7@5P#+wf3-ZUCJ3$goBRI6H6778<2hfhpibQY zgXIh&qlkx~t;$B;&j{WyUpVeTQxL6@*&b(xY~l}5j$Mv;^9Rx6$cox-e`6%Dv2}9A zszH=m>muB&jd<$yn5!MT{=jAYyHNfeh~RGa&;~!{!x8b-v3qZ?hi;N$Jh8jBvR45W zM`n{u3RoB`;t|MV9N2JYw4ckO~?DSNcZgvDoM$fEWNf?9 zX@pUJ+EQvfsay27cSWKz(ISr3JsuJvzv?gj=f#$q@ThFBLGqF(`pwSY*S$$XG1Dcp zy_0|b(7k5Z#NY=vGLYPL%Gf;^q2Wo>_q7WN>7=~xU@R72iwy>`Ld zfgZ20{*97=((XV}b6H(5kDNt^~@sol3WC*v87ghLjGdmut;~a^8_d zb6BbC3Z`bW%t;pmDc#U5;FVJ|UUQX0-+D+NLb00Zt9|rbdt(v7C#B@~_ET2E=s8&& zFU5n#5YEe0Jc`VPfj^g(Z;c}XiB_Td7`^f4-~CMkNV!nd#F{Jdj@PZ zA=xTSL#A$N0cs5xP!g<`jY_Dr7a@>v^&t3PIkMRWo5KB?_+!Fz9IzV^w<5q-Y9&`Y zPH!dy0i5C>u8^q6HL5o5RWSZA!pdsEUW<7TxqSB~Q-84GzYstvUE3<<4+!#;2lwu+`vjNcJd2PAJjB3k(3 zCSq~Iq++{Y?PqxL^t*Wx^~pPh78EK!3cG%Q8<*BX+CP$>KC*aMo+^$V+iv>!9v+op z=B)okIApW^#cFJfP(9!UZ9)D1f#VypE7vK8bGnUmy_WBontOwGXH7`7v%s2f-cP4B zkpxrOT=SjSkaH0>U{(V2OP=bIf82m{8)+s6HFIq3>e~ayN7qZeV>|cVzS|9$#fK2} zNRQNZ(eY~FZd6&l*IHd`bN4$rn!Y+UMs2#xqD(|iN+yUJJ%b$yMWYR<;C@mya)5pg z4v46IJ^2=V)9RuH4MtF;KIT?_YM&ZK3&lc16z1|HkAF(Y4oOL{JtwZGZOp1n^)9a)fl6iwLXp3tG9^_!DXOlIk`i zob+geTPubt)lT^{)v}z;t>A|>|4coJKdLy8Fm>RG*3ff#F;{X9>sT&XJS3&6xj#vp zVR`Hdd4?YW4qJza12hi^*}o=I)Vi*HW4&V|Dy3O4ZN}|ctBH@g4T_}ThVp3rELjn* zW&Lte9v3fuHsJ#|N=N>1&|oZuNFmAY6VCxN#tc zeMC*O!gJQob~?M6)WS-qGb>s+xv9;l(A}gfxoHHkM2np>za#e%smhjkKtz#aTTVDy z*x{fZaTmWl;4;{HZ3NRSTtPU}`!zYA@a{$diWEm>cH^Os+3Q34QjEIzQ#R3S=e@h2ZcY z^ukyA06z+Bd&8QyYDXcVv^*VoGShcoE8dTV{LI_r^YgEITc!F| z1fv*wtMLIgFE0saSYD<2Si!7rwR+3dJZ&dhV@KzKTeb!9#j^Wr!v{G{^P(m9Do9n9 zEc0Yg|IqzUtBGXz&kC;|Uin!El-jG1UYA{j` zF#eW6+pOrp0Aq;3E1=!h6C{-`Uw@9bG%J)2Lbs>hfgKC}6cntEdB^6}xO>3c{U2cZ zK>--lHIf(!g1uyXq~$GJ8)ooC6!p|@W;Ij8`$@}{_3c2R5|zciWmO9tEk>UfO@Pd9 zNsCXXZ=y^ABSGP~Bc(6iWMI*{iiEPAHzd=CoBXM*MX|W7F%YHx8;~7Cq~&ZwGOyM;E@k$!c$yk{tu9lu<#x8 zkRn-=DAvXg`t6-fNQ!T>*2~%{+U(jT+@o;klJh@6nY0lyIqvw?DQ;&&-#81+{$DiF zlP?0YhSAI2t#P$C*escO*)m!4;L;t9P9?C0om!1e7ts9WCiZ|!FY$^_}zEW=0xuhD-J(BR}Evi0~ zR7Sbhkx8S$4oSe5i`2}FoyH6Q{1!7-FeOo?jU)VvwZkzDrtYF2mv8l`;8B0x41oenY4kJ{3HP)BdU zYzvj$CMBf+BfBjm-TbqT7k`cMX?pm<4M7=`U$ISs5yXz$hA8A+WwJ6 z^I3esoa~qVTFZtm!pp?MFpH4)`zud87nFyC5d)XL|0N2g;1K|*@lKXloj&4FY1K#D z4g%gxkp8?^szclQ53rwy5*KAW!tkdA46=Tk0GC~$nfr7)%jt@yM0c8<_OGl`_hj{vtmW77&-_NH z_-axY?qLd`R4`hlF4$0{7PIjlF8|-38W;NSFnpbTO+rl?ZU@uL8F~NJ?=fThJEg9e z$mhRU;-u8ZJ#W1utf2#W2#_OXmIOlUEf2+60M5Gw2D7ih%}mAmAoID*F(KcE<8%gM z#khL0T!P@u#fEr1b-<2Fgmx47q9Z3~(;uO3h5?KpFDC8a}C8PV`IfV9(B01?SAy#X6nhV!c zIlX>cM%QaQqW=#NAF#P(bQA+x#V&`G5j}nKu{ZQG@nL0kj5GCH=rbQVy<^+3S_kOY zw)tjE_#*pfwPv>`fOCUly!(!LQ8m!KiIEqGbPFN8ZwiC7NAG=^k-rpRwYVQfN2P6y z5i7prF6Hj9=O(ODj)sB9t|JDI_{A=Du>7>sc&nej^tL#^((>F(>&tSydr&Y0I=#2Y z{C@z*KsUdF87_0o?20JXiaf<_$dzs7G!{3i<^c)rAr%^>DOt7K`Us8pJB|Hh4eEP= zTMux`Y18UgV1V6;=!4Y$7$NBpC7-lNGM(2lvegz`U!(ovDyn;r+8z4(gexE_-H$mE zqK9xthvMEMCoo5ePg33`qExi%{%rWTq02qf**%dS=2vh|AJJvi!M}?ol9=bsGuaBO zmQ0k&Vt$%o#gL7`>Tq1DRJa#2Ht&XHk&89rR!%{wUBfn6ILevVB2wF?0e(36p!pL( zQkoh-*;V7e%*9Xe>LV&c&oY@$Ek?!~Obc(hPAKN~4+c2*8$8$I8*Sams6I!@3L3ZX zFQ%=leM>~C*5opd^GJ8c6N)ZNZsJD@dqIMO*(!uRupm8#!ZK^TuH_O{&C!2}C6W-sh--*pHC^oHh3)``Zut)X01z=OCd`tmW1|3+NrbSL z0bC%m20U2unN5P2PkNRvPJl|7LFz;8uP|+O>R^myE-s~&*FAl~Oa4T{uZsw)@f@b* zb4?2ME+4Yi0<)_8M|r36fl5ETF;elqqkZ83*qm2zeR$uvwPk*86@B=eSmZ01!yy#3 z;OYx_xPn)|bIDjnjn0F_Ttj=5=8SC|O7sl#Bq;<3`AKD;FpZt&L^6X6U|gt{Uge8dS_b@zBj@PKtlX#{-T7bEd;dlOJ!)OlhjP4Csr1T{q@IeD{D0uk#NK47V<<*CQ zL*TdSiD?&(5Afv4p+Mff&4||5GObgzt|qk;)J@c`;LIW#T!Oot1MxI0WaipzBIHi$ zS=X2=2K60th%F9!mwgbTpfr#r5zJ*B92FQ{LBQr)EKIz|IPb&_33Q!b^DJ{$jIfLM znY(2-?_dFIjrV0#wn+OUs34IY3q=Aq@S&ZB-Srof;=u&3u}T1Q5epV= z(Gw|BtPtus3cjF*6?Vj2f=6;;#$3@pZdB7KA>O6K8y2x)_)!)dTct*nk-zx*hg4Ot z7Lu`2E-?B*G={3?0n;Ft4_D(8u(H0wKiiQ;FjyB?V`Npu2sUd)TaXaVF+xq7%o2s4 zyf9;&~C`0V=2xoT0%AB_U+U{Veq8-ayL1@YQ*U$0S2G z$S?53Jh&QSzo>vCglU1K@hOUYA0!|YoV~A@4$!`D{@f~#MMg7#iiNK}qETp_(7mxn z>@l(wOTCIk8&dj0^gI*{>4(0uLSX#a65#kfOiWVyOrcJUkOu)M=pu^gH|8{ z#EAj!2y9?kDI|L8;avEPBf*@BP|MV-dg3euOo~ue$42TK3?LM9D#8btSe|v8vY=8! z(8@t9^7ASSTMI~0aR973h1E<%EDebb!rPi*8)C+wRV=Ukl9z^B7nkubtaz88@Fl1f z-A6$g2+!=5u26stlCl2)vLYHF&zhS%`EH&bextNd5X>m04sqkmR;Xm7(Xc)9FLn`Y zf)p72> zRD|L+6GdVOO~i&Kby8*~i0KkGErnNI zPU!aefR;mE;%k0I0^lA@)oW!hJ_(4OE@S;#6mr*yHaIB{bze@PA$d=fUm#8?gsP7^B%1pX%K3PiZVT}N1P+u+&2oiMG&GyS;P(!;0`57_dydcW8?^i z2V|p|Y!j%kE~-`8whAnkibo6BIOB@RZiYnR2^9wqQM&8kED@;DY~6^ErZAnHMS;D` zL}@yR%W;s!HQdhLwzf1XklIr=Cr2m&UhnNQkaZ$cmdsCI}*6#0Q^ zF%dgr{4v{?+Xk_a;lx^Oqy;mh_>OC^D|bXr0M{J)K#^W zO@P8yMK(2WQOs(XLZ~jxKq_#@2=O5snZ)2Zh%(e3s?#9EhIbMRBrjwmkt+(5k&B(H z{{U)J#n=L_;HrE#f+eVBVby5HVY}{MoP?%-|HJ?)5di=L0RRF41OWvB00RL40096I zAu&NwVGwbFk)a?^vBA+mFyZj=|Jncu0RjO5KM;wVD$I!AkMKb&M?UQDzJD;F;r{?p zF&r^0F6%ZB`lTn5T?^_RsL@KeN1fP7vD9;t>{|p#CRdqpZU(|RwKfvQrRv^75Y?i~ zM%!>WN{xoc&|k}MnB<%Y+R>B>s`m>Mv;P3u2wH1lyZD7rq_hfM#329-dlt%z>6Kqe z)+f7B5#KDPZf?^2MKv2C4Ylz&Zc}q8uMeY(G1yB;jYtZEDdMD3@;I{g8nD?-Qf;Ne zFayVC;=$*{A+=;4AYvI>MmHB9rNgx~{-QH}a6(Gnq;jqdAoI^LI#l!%E%rGP$VgKD zVMGOpfl>Y;z;EUyZ_K=Uh=s^xP@*kDv@33}~+)&|n&|P6GipidJy^*T_)M=)ln^J zDYRD;=&dn$izQ^B@+b|29e+VHH}L@qW%4)-L%Ne*&_W?{Xe4+!=C77NW zEX#yA8bLnz;8Nl)hfx#K{RF;*XOodHw->x$%y6)3T7q>huVfN z0IU_P^9AJKSZ6(}{$&egd*q?ozCFT$$!+T8(P3cnq5%z1rcw799_I3hp;AseAgy8s z00Ux%DK?k+lugeQ<~aGCeo>Zcv`$=wP`H(ZL^*QR0)66a0w#?)1vlaR$9$iV!Vp51 zh7A2L>FLR-=V~A=A}xIgxhN@&3&9bZ7>ovq_6SDx7E(3lkvj%bP$Zf2? z<6-hV4#e>-^S2G_--t^z!wYg+!n3~{FL1xSv9-@MLRnlFB-EwCwu2I#FNpa;vnxF? zC9wI5dKv5lD77!9Gqb*>SUga|m2Bd<5)uqOBGmbt%S37cjs>14 zM&jzi#w9HMrF0cAmV_e+9iGT`f-6YB>9%M%!i@g_FVd1uM%XTPnueP0UOw-LexI+; z{Feyfo*0~=nFtCH1z;;#9rRqa!oGrBcmw&KQzw|nBTdT{!~DOjn<;CL#74&>OU{C=mYkb^5#Zo4Zo9PcUZNp+>=vjU zq9BMsgtGkPwH;ibIPk*UlrzvuoBKpGPB#uj%Qw!>U0=9i?GoP2@g1OTMTO=AkzgT1 z2hGbNvs@E@#a(Z3t9dOF0KHq;3pqz!vuvlX_`u(HY z7S@DNxR{%&L0ulgWB85FPC+cBV{>O`oCF);2=k@LwLCpj3h;<11em7E;e_v~6K^6= zc#qoXy7wtTi`iBUczTFaJbM~GPvQW4j9xVUVzLmI1%4n-YM=||l-E@NZL+FCYs6c{ znF`&nf>#6teITYqwQpEqbxjrOZB|MRr~HQ{N|p5s=!eu2*~t6vk;~KxyU4rY4yEeA zRB!DsQ#*!C!k>O65mDTg;ZMJrMrX?6S$7B_x}BgRMLQJmA2NcGm?}^n`<3h4{{ZtG zQZlL%ps)v!@bVBQBsQ#JlN$E|9+8)66y)Uz+(<}+pd4f<(0CrVdu?r$#S{BOuO3VIbHMhV@Cc&9VP$O`9fVRI- z7MAk~fP>D@l4VJB9wenoD;HbKVekM0n3PhLW>`bSPXYbs(w#2osV)qO~WsX4( z@xCMRkW zfm}ZljJH_rMM~7n2VvB8j9yHY1#&HVMG801sCJ7g;xX0V-L?Qc;_@DmitmKT5ntKr zRSt7Xfdyh2?ige95wmxYgpqJx5G4X^7K^CSIT#hcDMpDNygM!fE&W2Dyg0NHz9L9I z*jMopKM)Z{^6eakSK5VEn6}j~+O&-VDj+rNK^nFU2jh~}F%)evjbX+u_g|nv?Lk82 z!2q#KFB*SCM`iBCP)AaGk@7rBnlO7s#x2I+v4n*ol=4|SU;#1Eh!Y8R$B0-L9Mi3= zc{dAiznOX@gt!wHxcK`=6n@D_d_XGv5LUKdsYlFW9T+a8PB2vf;@V<-!p z3jIf;SK1=5FamtX5huXQ!4I@|5%-BwCGml%M2xG3qFd0MZ~k6=0023%24d!v_PuFiXXTh{*GSwd_Q$CyCO)!vOMOusg3} zjYoK)<{H)R5qK4^(r|rlqV*YGpsmEWSFoDy3c;`F9&oKws*dsxY|2ZqKTpG70t*pI zIER&%%h>IuB%cE>iD?n$m_#xnYjIpPBSny|8wr=pReVJh2~>1ND$q!Xii?jJn4oY+ z_?6lc_oCTpaDXOD-^4Z%jyYTVgPOX6P<^P>W|$ZFKga!vEr@+rlyKi8%Udm1m?)2M zENTL>4^d$*S>nh5yXHFkD7ppth1rvB3c#E_!{nHRayNcZ55wE{1WV!*SVxq!xo$^8 zox0}| zcApS{J?*vs03jC~PaP+V>2l)h1RaQEuZcitzcGDMJSbw;6L|5J^na$cYXw%K)M`JR zn#ib%<+9QOVYKlAoR2eD*Zj@gyvV-g<^_^{16-Pw82G7kYVC3;&ghGO+{jX0)Yy-> zl!PzW-=lA|Iq5|@-(I}whHfa83? zKOqW2rAvl4fmXg{8q|LcL2R}2AMR-Z=Q;%N(*tTc%O!`-SfSGGL3UiI8F5Qw z^%)km3C|$QOt;7M8MBFq=&IikX+4Ib5};H55lG#mjI26*!8AK5RW3z?%n@5HlRps{ z{o*-cUzD!gJV*O8{{RYt(IBbr>N}M7b^7#D5{Ox)Cta!2APnu%I-0Z zdWD^KS_2mCsY6)#f+5)Auk=_8>H{8C&5waSh$3zA2?N4{2B3jTIW22(&F}3W=iw9L z^!be}Ju)ZS{{RqokmR%ORS%hc@R6sg{{X}M2(7r`DvJX-4t=v2|U04{jP($8krKvY7fPmtJ$JN(L7ND>MMn8|PEE<0I& zuhb?Wxq4oeypKwhm*&*WraM{c@>xUAC2HjM7m4e~{{RXCF`k)kh*!LM$-oe=>@{^lgs5CCqx%NH||dU%vm9|C@0`CwK501}kLSz@-f8{~iBb}Xl(_bRvCeam>t zK7aSL=%{@!{X%*AQtxtDSRz2%=zz2VXNXnOzL;(*QBUX`d}S%|<|#xv7t(=d%7PUS zkC@~_M)WWZnV!9Wh(ZMd?1av|L=%Ux{)E@TltS(I4~ALX73H9bp6iLX6e!0CGUGq_ z#-r$^LFj?`loe`0ePjONWme5D(S_la{{Xq8YN+%$UUC?V>|y9y3g1&`#5`b5%K5}y zJ7v~MW`K)>vHItkak?f>4#bF|xnJr7VYXTrgKAI=?TNft0y2-8-_Vlx>CQy31WR zkPxtrjrgO;w4V)cCC9>lN{qS~E!HJJ@DG}{&$IMTpz}0CSO z&j{1OY$$2^gQMudxL}pTnpo`$A}n*~umps$l^+A<6%b#W z*%YyYhiz;ALAk34@Mu6!63B=5N~|T#A|_;`cNP~s;+6dwWW9jPeqo}ue&slJ4b4mk zm=>ILsde>57tC1Kc>H|E2n&xKfP6lD!)R|d9HRC_mRI)!>*T74P*paiL|+fXG}!pq zEq8ZS;ueVyM`inpb%~VAg{`M$Ef4oQ;N%A_S7+sMG=3jX6=2t?N%6@`u*x|fab;#| zu1sH%72uBYP0HJ`4ElxkFanu?>LpO{)cBqlTo^@OeWEqm@%|;64fcQ8*0v|F^%Q|D z`N(lkc{n6Txu4w{8OjZY}z=>3^%2LQkch$;4 z$N;rJXR#T!S0Ln`a-~Xlk=N3=9?E_`w_J_85LJWDZWh#xcWC&7?H~PiXgz$(so}A$ z_WU36P_jNpADGb!ov3YB0QvU?_56M1&tamUB6|bil_wt%ig1EyNxHdMFJV!~eOh=)%*7G#MRe?|)@2+O5p8Up#UZgcGr0Bg*$TTQ4A^`8D?1zn0D z^g<9fqwN0xu|+z;V=3-}R&ZiDE34ncTI!BO#68Z}7(uWxm@&+)9^e#=AYdh@#AO38BEXC=iaVeat@0>r zRYRIw@h-!30jhQL@#c7h;r{?33&Rjh-X&ch#(tpCt5tF#D5y%PKKl&pdFcxRLG)V7 zkx-XSq@qT`sDPn7F{|wr1hCbDUO2OS#L9;@J`(&v5+dFurIpjqn*RVXUQ^;N+LW5$ zO6Y{nrxGloJbMnXxK+2>AC2<|R(4xG+?vS#V^ts76{@9LFzfPy?I4Dnm=`Joxlu18 ztcCZ4J7Hd7!qq=O(7MSG+c51!rMkVB>3pMq_$J{|2Ej(QRk91L3ig{!Gr<5w=h1}& z$aNtJK(T|X5VR0A`%19(m#r8WZd`7(g05h%@I;>emE-A@VcE?og2_`{KKKEoc%*#jCo2CTVYgrDy>GBxFE+gGh<`9KLV@3}S;AJCZE_9di6MQ~l&JcNKCWxr^BaTIr_>47>b%m8&i*4*jX_cJK z8i-Q{Jnm}`huQ-=tXRF7-5P)>L3toDiCKV6Fu#=P`o>@naMt+*x+cM~N(c^$Dk_5& zBP!^K)(tfJ-9o1zuhY;-=nWm0Ag<@iUwz>{X9p>jaFy;<^u)M`J;)Q)o_rAjiz)BW zs*7VRJC;_*2+3=^(g@bB0d^jREAHPR)Z~=(T`CQM2hKt<+x;VY20U zG3De-dP_Xq9qymBrf%~5!~@Ic+723D=2OD`<2Q1m4E7paAet-@YB2`#9=4&pC*o99 z5X2V)W#e3lO9^Ts(UQ}`SjZ$WScY(b{zzKgh_10tCSLxcEOOwM z_8!?PQE*CN!0HdlfgoCyMxlq{$#8IAv^>%oWk2rzuVeC2` zdX16RBC2EjsE8eXH!o2J zb2-AKDnByu2GmB~1~s=hKtnWCWO_t*a}G+%w&M(j%Ew%m=E@C;30iV6DBXyQiiP-^ z71-&d4WZP^{{SYS2Z713Jwq%x7?py)5!s_mWtMd0a61Tyi;N&N^-+O%P6k~W9Z#6S zeqrw+2yDCXT&P4$fB(b)D-Zzz00II60s;X90|5a60RRCJ03k6!QDJd`k)g2=!O`#_ z;qfqFKv4hM00;pA00BP`!wJI?b(@ZK-x^iqXg22%Kmn0!B8TySsMgy*#y+u_Wlv!X z*Z~iC&nbbRZR0O{qk{JF<0@V%rZ1G5+sT8yU|kjs=EB4Afp-LAodVC7j3!t%AV6?w zIpxP@h$=LLvHdq#Y4Rr4z>wdbUYu^x0=m|&-Rt?twbwF%M~9|8r9z^2Q4jHn?AmE5 zUY%)p=ZtpXYZ)6%&|wDS02KLd?=*D7U^FQ}G1mo-`pM_e0EPqgcd3XX0VL}YUb(#C zTXHaibvWZBtFuBj>-{jnP&6Hrjy`b1MxE{_Gw%ec1og86?tNrAH5=*-CDkWxInL~( z&Zj@;Iz(U_POh9H6%{*0`Rn5t>wfe(i`$o+#oe@Ycxw<6SQ6RUpjh)Yj$|oh3$x!G zv8$7jM)pYj<&25bhMyDO85w>lL&CF$;gzByIyhZf*)es-K!x3TmlQ=uk|1`|RXxn9 z``|Uoy}L<(9mUbizD|Me;XD1sc`Bu`9SfBpPEw&x7Llg2Q7FL&HQvLFuRxO_OWhJ#;#Vsmhkc8PgqlRXt_;fYqokZZ})kI0SS&6BU38@>v?Fl;GjGX}Mq1>4^i zC{hqm?w@$gkWD8(3(m2EiNKFPc^8r$1dna!C$gZJ2Yqvob`XNn-ZQ=~aiMD=*gQdf z+)HPSGl21ptD&p2gBIB<8{odHaS+V334q{P|nT{s92eYnVSh?ZWP{#@0-6cifyB|Tz{)3pSy*!wc` zIuOs?@t4sUNRx{eI15k~o;7@Phb#K>^=F&b^^rn@1tzstGn;Wi7^F}NMhOR4!jA#V zVY&`9Ya^f?6)_Y&xEeu4?Z!p(qc;MG}aNHJvm zW}*K8*jft8%{g!jsCId3++HIBr%NC-!DHJjKnuXrd}6~DjVihqbCbc9-7y>JI}>tu zF>5&zSBC4zu8D*vI|3b70+BqotWZii>_FAn3vI?mjWva$!Mfp$kwvaUuLkeDj8s)7c3aKLg)s34 z2sNbR1A~RABy(R))yg#?)dmO^p7(Hxz!HrEO_11Qi!+5b7HJKW;|XX6O8AlF#7$t1 zzikluM~pZ8%J0*EZg7xj$Pc$$FskD~J!#4_uCm^wR>X(l&v`1cFbaOJI56ghpoSVv zlM<;GNyu<^8F#ZQy&sIakipj&ePH@O8R+~nt%mUld}IFr(KclcO{hZeRw&rs+@Qox zrktq>zOG54f`PCb+HIy|oncxI110Fm3O|l>+j8+sypa@4J^3D&Zz|cFh@ra%%FuhULBlm$`D6Dy(0477% zl_6GrWwHpJDw{g{n3$ldNv&S-B$BhQ3{XduI|TVcVmZW$TDvthI+<3aAVrMtZ=YC$ z?@HQtbBWMXM)ABfRUKUjo(v)u3R1_<&J1Ghs!cmrHIhjoUS2VO>?VeGyuZ$Q83Rl7 z@L0HYh=FPl+Ci;}j`Wcfh9`|i7aJWpdnb@O!=+m*61wQJ=By1+EQEqI=OrOpyN!~7 z+@=WjE#~za1=+;oAomgpHJQ&GLQI7ww~(7|+p=OId(djc_}&ySg4>g|R@!&GZPN%v zfvw;hSRS6D8gX@M`2Y{)OjITEs@vGy^)R)ig)DdQAu+gHz)Y$y!eJ@8k@^oG_2%9~Uw+87K z3&}id0KKIrQh4Ly`%}!rhgtpVqjGO(Bt|cbuCD!ogQ))>|yKr_lY_51c zU_T~?HiwKqSELe#k-g*o4?qNuapxd$TJtG>Dqk7OD+G^Wd?G)1QDg?5Lm?Iz6IS9tGlE7v5_Ez_J7f#sHPLFpc?j>lwtQP|xncb4-FC^NC`# zB>}vFzKxUU!`uzfjzPt+DR`R)&Qc4-p6Ac&JpTZJpzGVcW0zPMk2eBINT6D6#%}_7 z=reo^*ge91VWRV5LY~44lqPv>ITGvV2T_i&0C-EktXyjasH3JcLD__~QetKZF*`%i zpx>N#Xw$nWc`);}%PzqR<8wiq?T>IXmkS5)9yk@mEaPB?h=aFInxqLAa<2T^SdT7+6ry zsATo56k|&uIVJc$@d@%?6p!Zz72xdryyIDh#us0O%}=wU1ZigNeB&=_P4wp5Hu1i5 zQ4mEL(OR#@HPRXnmx1=@50RyWLv`xp`CT!$pwO;pn~4{ytS4IU1&bjv=mAspGQQB< z4l~*l4AKkMpK1WZjY6QRE2UB!$6165i%L%&<$Pj^9j6>a>mRTxA~h2<^{goqYn3RU zMc2FnChb%}3{;*u&9u^2t0k8Ks>~w8Nu!BwNe;iIP$VZ0j1o5L%XhI=TrkKYPWtu1 z$BZUVJ3}XXw{7q`amc>6gJmyP`^S&K zVoARXI>fyKQ_k8$)6N-08)xT-VcW^j<*d|&00AbyzBfOC!5Tow&MrXIX5prc-j@kL z9kQGi!VbPsye`XwvOxMWR@}Nc3+n|!0^<-umWAQM9q}-gj&<>Xq|oWKVSDL~$ZnvN zhidBsJvCR@3h3-I=saU24Pa@K$`C<-GvF|Q{K}l#cg9gGoZ&*UAG~iL?a&(XpclR4 za}*GW34QI&CMpPcBHQ@Fw2sgXi1o7<06>XYzDa=O8+BWpaKri-+{S|T>kL!{L~?T1 zQPwTPxz_XF(T^zafHgrS({WllZYvTt1Cp!aLobWamZDtSt^tN0C0ifLggatOx|S<^D#Ku>k0`P>v^*n94ZkMZ=Q2%rf63k zHm{EIsGENMJ&W<<4s_KF8%`cWePeCSh|&Vz_Wl!8{{X+eilNd}E;1 zU5>-g4jvGTL>dk?U(GX_#h5L0=+@hE^awn?hps+zZs8n^X}w%nF6G;$tr&kkxzV<_ zgB}2!3ZUV3ZOQCD(Xidqb;cY)eq817pJoXJry#05onX_%3WzB6HInOM1E46MgC5?f zR}F$j>4kz|yr^1TH!?F~(1Umbx%G~gFm$q??^(rQM)9wt!P4+_0oQio{4t^e?-cC; zG+5tS9pk_wP^2fVWZo13K6kKaS;cCdJ0p(J! z)(l-RDd^W3w2~yeO_L+idJdTf9x<%mRvsHX{pCd@6Niy-;$lH*%E!j@wK!^a2VG^O zAsfB#vSrVhqLX{%t;5VjXiZ&l z_kn;-p!*pA0IWSy@}dimZUEqga(I4owhn@Chbz9Bw*LUkBxx~u6&0%7s5La*IATyE zz(>5}whm{lVLH4c1Ya)?StlD<_kNhXU=Iy}zrpj4#e!6{E7VMx5lXGX=+n*K01lv% zi+NT!Q*zissvVc(oUbWpR8;5__Tr$GT9g%*igTxAcvOXWmnOgv^)Q^Sr>u1fldRUw z`uod9WC%T=-TBC1MH)?jYn*%LoH3xCj50w1d2sTS)B>0i9T1oz=!J-F0lZ`HfQZ0Q z z+z!<-3<5qcJ=|(I8m4RgFdCZ^ongJD8t8W5Yo$1CPY;}Vk+TF^@CFB^5S@5@ zU>f;Vpor`R#~})wVt5t6c-aLy;^mF_7cg7j7Yo6_Y6cQm&an|*3QoYJ9vBXZ0c;?J zvkQODQjTE!Wr*QN;KOD!5IRH7Uh2rROwbzD?<|J>If&(^u$MGa2Oa+a z_Y(ZvDI67KoNu34`va-~5__&Zmp*9URfe)NxqUOv~k9ku) zY$k~vzHx>TYE~0n-1_Usf76x^1Va2AVIkNO1Eg)^CH*8MJe^JAtVViY0PRAvjCYvI z3BK|sxy~9AGof6zWi!YWu}@(~>SdA@5LS3G)>wFDh$mV=b4B)X$m!Ml^MV}f3Fv%0 z>!rsbx30Rt8BKtiL!{&95=ugW`L6Up zF*r$C(C2{h*bXUb-n z==dzCzwE(0=VmBg;m_j+-*~N3?_lH&&6@_x-jRUX145}^kYSYK5*U9wf?Gyi>ZV$Z z01fT02lmaai3f!|n#H%{K-;3@{_5xm6<`gE@D%@8yx6P#I}R)bWQwW`?*A01MwtCUis!wpe|HCwLFm)!@9vTOSI#%OaPi0Z}c0GduQd!ikN@$fLGoj4lpn^NY5 z?BBeB0PtzWb0jvW8#A@`9#E$=?7{Igy}PHbWx#bvZB<3={{XC5epQGN7m%9C;; zQxT*QI5+0!0m+);j;r6C0OC$0{{Z2+v^@Sl&S~!H1`6t*Jz~~cI~oK}yuQm?yME5T z^OhPF7;)YK0&$%7&TtV`r4e8@EblCZ`G)ZJq~izosHcn9#!Alx)4bw-{{W%Rc~r`6 zA&iHB!X%OHt36-~822-v4j_x~1P7T^ynyd>U6?o=+7qe=1C2~t3%Qa2XJ?F@sG5Vo z;-3vYVXWYbjh3tR2O7{W)u+Vp);lrw`O!(C@f!gEs0s zvaMuj9PYSs$_~Q^ww?RM0q{|_ys72)l4yu`U=sxn>={JEMBKn`!arCP4vI=+<%}?QEEWm*!z+wCnmZKM9d;nCr)-*YoA&+4^FCeuHT^{IvLY^N<}Hs309Cj~E&F&B%)Khf^m}5lOd`2Qx<+eEBepQ$cimuN(upFJm8IE4AUK73@d zu%}oj`{m|)z@43V5^J4gKzlAg(cE&~LP46jlw<2za)#|lpNtm`x}K+jtz$YortXY2 z-^Mr)05{StG&AoEy+;)pdVA>Lth&mB)c|wLy zfV;;v*5pJ7k2uqAKvzZ*hH3(rL*F!v@sf>FuCViPQ&Rf9dBgc}6)q7uGIA+Bs6)Zt zNd?j}r)HA;_+`@ z;KhadFlRJKU`h!ClLWATW1nEXOM-|&4#YIv{bM$ap}c3ckmfBmqdzz`dj6Glh_1!(8DB@7iP&~4gY6VI>ZIRKtXk0*WI*v;Or#)Y z+->U>xe~E;;dj3B6ioR%ec}Y@ieC-GP)}cN{H6qU)-RVv7!RkiBzdkd&Wfad-DNAP zpyrDC-|?2kJsD(u9_|2@NJ`~+b|K5W<{8T%j;D89xKJ5DhW4wx27u&|MyHdhmP&wD zP+hl6{9u}m;u_`BF8vrXeg?vbc{qIGH&dvd4_P0Qm8c@<*6NN4 zv*aU}*_uJ{+uA$L4kY zd3jeY6UjKLfSX9*#HFtVXsz?=;xWie)OL@I*_H7fiuxu&X$Uf^>#butO3inD%q^Iy z$SOw6^e*3{>au1TgSk2^?(8%81_E%&rXl_gSQG(hc^s<&QjWdLh~;c;5Z#DSjzG4qJ^#WezJypc)Utf z{{U>T?KRmsIkR11;^qL1%2N2nFxumtg%$UTfZ;6!RpJdEW*-SSs3pCGyP7ZtILH7a zQ<};EbwG;0wmw(YAnM^7RsEui+RgWiZ{kYYsJb@c5sDGOpL18n18k|xn&qsy2yr0! zx^lwvjo1DA!&KQ0uXFUlW{qm95O=2R#*lD`CcEp2?=Ezu%5|py062GR$ZJu57{s~? zryyOEan&nCjh}VAM(|`Hl+vTBdB<;DZe0(%c*RF@L1c=YHa$!<6e|)^ou~)Vhg5*` zaHpeK>Bw5YlNuW99gI7mXnN;fG7O8A0O<+KyhN0*a}n}|#zi^83So7rbWFQsZJ{(s z?oS@D5Y9Tiy5{kO(loKVy1X3Wxzu2C&0RMU7~KJBr#c&fL;*3-=fOSU_-rPF`)0It zg6dFKh>OIR>lPu%=Bhic%{j?U{{Wbw;E}!YfHu{bbw-wdQ=BDZNqiX7YhCfEy<9(v z#h&`kKwS+7Tkh}UCA5+i1ic{Nyx?0|g+=dSnwUN7Cn7p z2_OXUf`;|G=3=}s5=Mh*tj18SfkIqpnQN$+(S|QtfhfXHV^aLm)_OmPqT|gk- zd-@#Y162$t{{Ul(iWMtBzAWn(Si)|Pp1V8VCDcO1+)Bfg?~IQp2jET0CijCwT+_b~ zSqQ?M)6nkCXi%E?ihr9zUY>*WdVa_wyf++j-g2@y;`Wzi&((5~deUNVRWS~d61A_xJq)d4T-4!G!C1}V7;3P;R$J-ZG8TVFVAnME5#ZD5xJuvzp@N3+ZxKx=Y4v2mlz?NJOeb zL`K7080FkkZ7X`)b7!4rt|g-*vj|Ncf&krGonLrFn*|IT(X-Y~t_e-hqK3DNN#UXd z9(<#Ka2z!j!@Vv(_ev6kUGv5j72>;9d=~2@@W-Qu`rp<#^rXOGyay)mAU0Z%?WJ|h zz-@cz4}?pT>akRoqe+({-a?$<3|{U^mUy6Fg56_SOdlw`p0WP`fDzd>!NPZxsbW=F z3wZU9wn8jBX&2rg0zv>@N1S-oMwZyr5J2!@kEqxlmG5AkCO0iZ=VEP0lTD`93i!7gNmvRl)tsqt)4C#B-gy52%zI)R!(&v`>k>F2 z(*YT&Rq=3glFPYbaB#1TyG%`}+}d$+TJV$>pCHO4?MbAw^R48C_z)`oIKog6K`7Oq zOq0?nNCDUDco>D%fR^7r@kKcBqs_eW!Gf4nMHi3Hk9eL`ls9*JTx?|esp=3eOfBjX z1U_6G;N`VxsigbGTWTF*Kv_G%754j%9cyW#o5^T#agvmBH-0dMcV0e$ynPtf0JA_< zhi>p>JOTdztC7LBg|sWMdd)1Rsui$Z^L54>`9(G}8`81<<8HB~LQ95d(o^$%YAXl5X$Z&a;zXw$a5Rb0cLP*8cz)VJh3$ zPhn6ooV_K3Ks#qSy<1;%MFzaD20Av8Bh?OGzOp3%yw2#4#%Z)=Si2Wo48{U!D}TZH zVx?YWDijbNJ>_5j099f&dwI>KuntYO@z)t^dF`RXcuoa=iX#sahW-9=#gHX&SVFAp z45v=|N(xsoYEctme>ht3ass(P=3-uaj?lXV7zIYV3Pj6G$_0H%#k`^~phnY;OF z>-=IVHE$47=DoP3hO9&ZbPue(u7-xRaa!`j#&b5rxdQKWXAVe10&4+N0fwG3TOt|_ zG}gKM!Lp#qi9HJbbDkn0f-yb~{N<@`pkM^+ca7pp1c>CQYVR0D$~y^U^7D=(-tAIG z_AlNwvUe|GcXi%vXut{G*D8g$Pr_S`<>EJh`$qPg4zxhL%nhX7JGgdhjH*E{@Bo~# zzcUwIar6}lDw8@Bfa=rP%kL)M--2Q51vm{wA2S=tb0gNe!ba56W}L^awY(J)v{z_r zhn;xMP-fs}#ALun=gvo^tSl}Uj~d8D3?O9}XplN3m@8?l<^kjE=LRIa4DAo=-g3)t z;{rNuzyudDlf#KGkvAM1lgo-=qXeYwINsS4w>I&L&TM1vFY0le}~|8kY9Fd&gY) z=h!>roMKiKM1)rue>dF1w`WeaoSMWMwD?%pcG$IIM_1_ z?~H>L2SfzRbUY0OA0_84KbBj1IylxrV495CThz)yegriw+2bh@7hv+d9qV@i@d5^R z)2?@{1)c(&bO)D2^NOC)xf`~l!y|t1aLR698t9i?Obm}L zeHc!4n^%;}tNYv!p=+WtG&SK0Ao<=vZK95@Es`yz`4kVxU(*Vc76y zo9KLTpEh1>9<|fPoMpbv3z0r>O)_O~&@)E4$LcN)3x)4Gcf)}eHq@-uVSUk!NxH0e z)`oahDqyc~L^5;Ee}o-$S2RxI78%vELl!?5O~L9=m&S_E)L8 zuSM&;N`V9uUxJ!*yhjZR7^-&N=J6z~tA$$if|IQlkwRHZIo=(^1b8nc%dCY015DXV zQ)UO!V`vJtTY&c7lqp^g@v|thV0kNK*B#)~$V4&G<9o%lnc6(F;$r|Ike$Kab|M)c zaWh9^7AtGOTutyUIPH%&tO%_H9f0bc?+Jxk7c!o)ZzGb4!Wua7ktJ@eRn5^cD!Z?} z(mndlKrnX+x!W@L`xq_bBC4cA#~YIOa4>dCXGzl_^@@;S)Bu?2rx_ZtL&V6@GqEbt zvVidLmDi7WUz~50lxnlqSjN^I zF~h~;;Qs*Dmv*?@z1%2>U`Pr96gwVq!TUb}4FbjrZNu= zEEO!{03wGeeV3WK;#um3Zj=SFhj{qZ$5_Boue*%Ws4RXH<2A{GmZ2wbekMMQCM0>i ze>m%e-p~{%^jyDC%trZ&5Ale`lqU_cQ+T)e2=DTL-tnmF#N7~(TQ zCz8E#;Eh~7Z^@j$p@|VStQ`yiTmXUbG3!oOXYUvWnFN9KxlEcWY;bv@SF|tlm2^4a zv0rP7f35-#M_42r4G&A}B86zxzh)T`h_CIFKcDJoE|>Ml@&i{aH1Y527+iq5l{(_# z&S)fv3Fn+XKmbEix5goSwBn$vBy! zH@*{VIr%U^l2U&p@07e?aF|;Q06e)*d>00*=x}($pd+?8o%`E@NLXtMzAyzObsz6H zE^>~byk5Lw9gN-z!&@d}{&aMFEG-v0tl52ZOf4Hy*E)q$K7$OxVI#glb0Z z>k40tz_5zhH=G&>Tx$0K&ED!MGZ6=H+CpZy1aTxo2gu)V^G;wa|r# zLJPbDz**bG@th}*iro|B+AJ!?^4PG04Y0R&oR|b@evb{pi`ZBLr;|$Vb#km)c~2l4HF3Njx-u3HtU1bott`lV8T$;#xKZy*%S-^tJ zH}IKmE_n{a?;Qs0NVAvsl%K4kAQP#{#0O~}Ss#t8kV)H@8&c#cUFk2B!7ZdN+t+Ri z1-fWJpLqevChsHiOg#(?Qm_VYsjS#I+&^XJb1_ko6;^aP!E4)su{Y}r)*2;%8u!J= zWMBbO608niIB2k%jU~FP&BP~L0RbfRznpOwg#{7NyZD$M?}1$iFL+ul1qkWv;|Oi+ z6|j;e=d27&(|HB)>p8BbeF#-fiq3yL;*!jO0pCvn~+!n{t@kCDNt`FfNzPNxLk z^2wSxA>*%%;pi*}p~x}b7;b^VCcbV2NtL8F+3?7$?c4!$9~!{bnM0So6UG!^2b6Q5 zB@790Xa!Tohj}Z@=pj@avSy)}7hJV-a0WymQ>qdkePv~MI}{bsyeMcm1|`HEZsBM5ow>9~%>hJ&`d!e}57p|io>6##ANQm=pKJYNmnkFxpRK^D@bt=V~vJT6sH zUeNedSAQ5#-lRr7icW4e%0`QF4g;mY1UU=sJcgZ3;h9AR_ElF4sQQz$^oHBMGn&pK z7SF<`)0TUg1D9ihGzFgkAv2tdI1#B*?k< zflo$Sm{MMK#db>}<(oUdtS4uHu+C)QpC!FjVr_pIaRhWW$M>pN@CnLcHa3>Rz1-fYxf@sk_KjyrCIZdZoBm*@claPJi$!4VRy~RajK%cXmOwRsLkE7CxZ^* zK|ir~iQcpX`Gd|tG}VOkZszi&NyP*&4=0gZLZ)^nb62dYsEHznY9i`y z6)0=uBs$^Aiej<&NX%B7v4c<~Cp_Te4OKrsI6&biMI-eb96%I}dg~gElr+A!!nJ@q zg|USdp|y{nPA@PMinqJw^M&RJ^BxnU(=n)On@=|=%Lb%ZethPkpZ!SO9QINPMWFn2nYx3ZM?ls;mI6Re!x@rXLTTw^lkWyu1uu|!#6g9e z@W~AwWkD~>Vb_GIe__WW21^FD&fIWm11RXY0?x5O`6&@Q>(&N*$f3XDZv(2Rhk#je zoM;`d2;ZDYLW+Rb*!*FtBpE8w@%4%cL;JXWFBk{iY0fcz7ZJ1ytE+k{m~TZ8tUFFO z#mUeiw>V+!!psEN0C`F`6s*I0E^M0lz%9z?DpRx3)y{p#DD1kkc+))t4}c1PzNK4h5=nI$fEKH@rrO5ii(GxKX{mlA1L&s%}j6|WIc1d0@ez) z-PyuFc|vZ+AaqnJlj9uhfvD(j61vB$OPY_i5g366hYZOv9KC^TopNa!u{e-z!(>i#~cHZ4T?9sNl_FG&?RV&u!GQ0 zRDyYbzABO?jqR=C97^=0+5P1Vw3D_z^QWp!HbWnhL~NXc$L|9r1@(bh^vz*g(eYo0 z^_t|9D@t$Dxp4+_GNTU6RaGZpy?fVqQHoOuX&w2;qmJK~&~b|BpwBtI@9N^x!C$u3 zW8*lcmS~#s#~%IUc2p%vPW!;5pg{_y_O1w_H0+c6<2_6O8(Y6PD7x?e09oV-J4=BW z{{UL&dFgXa*~OeWuRo0(Zu#f)lo%|~U7kC}Z$!TCcYs<|6j6V+SRew|^f|!R&|ey6 zyY~UINKW@~pxzSD&nN2|&`Ar$Yr}Feae>+idi=6nxuHruwMg-e2D~f+L(24GpSCF< zE6dZGFt4)?gWbDv2#odohfjR5#k&SK4};|*|t zDMT||{NrXI0|B8dotP>lneF5XXH-Cywwb22O!zDTnlAGVPOEz!B$Qh)_-VR{GC@_qFi(>!h2;#?B8u`!{ZZD70u$UiL`tf zQx{w0v=?5Tc+EXwdx@LVJ?jgis_$cxnnCAzO=>%vkoI4zg^m*XdL`rIC+`whY-l(< z>l+LJ!YJ+U3n|cAdArMp%v4>p;fjMDQXdXqFu{h@gHMg=`7-g8@NBC+U@?ZNYwr;2 zDV6eQaA*8Nc*l|Pyc%g+DeZ4~RB(X@$HL;bWLrU_!ZQTcIY$8@!EOu7$Q`aVdO$_J zZwfFmac-Yx0aCH_Br{s)9J!!=pyc|*c_|f0mOW#mntRez92p}QXjU}4*W(|vT3B!+ zgPZ}{Hw~W&?>Xr{fqFHad$`S#MR)Kx-}9E%@GTf9>>L59gsp{|3)hS$(N|#aO;x{~ zq{GxWfgRHn#}H2{$*e?`ZX1KH7^;qNNbPq)wOqX8gPcTCQaf=Q*{V(!{{Xtj4cIL& ziHJNxEgA7YwH0v2FrFM0EZh4 zoOy|-xUg{ z4NX5cDQVZh7f%O_dn}3+x1?xu&OX9L9p!5H<+{mOEQJwTrnoD+#sq?0m>Q~M(({C( z+q5qW>Rx!jg>EXY*}j( z0j{)1-ZF@F4*;hTm>m%SCIg*t!X*irS7f>$gA6)z5*bI`xww#65!@*!Ti$5i1f{PE z8~kGkd>jWM4Vy4+0A8;=9`jVnAO*j|tLJ4e8cF5Q3?GGF1i$*9JWA{st&-BN;Qi_{&^T3M898 z)uYI>r>7Lgftm;%YY$;+yub4k2t&`m9f^vOhqvm_x2&=wBiqDky5|r)3DVAwrXpWd zDB%thE~-I?$8Xjtv!j(NB6#lPibBUmo;b;?N}ZFZ=P4I5Y}b;#i|GThUJ!Gxg_VNZ|fIg@<>Mmg0RNRpp9U!oCQ6KE2<%dsm>fqR8DzT?{DJ@&@gXP z3**i^Na_)IDcV=(Kf4e$K)bFP1j;~;G;h|j>BT5M`*PiK%B2I{1vM~eeDJF(-_9b- zMY5hxoII^ClsPnkg4rw$CEwz6j?2T~@c97=>kYNi0?+O57D1Yb+cDnTxz-F6DhF&p zbnhe%-4uPn^0+}%Egi{t!3Pqcd0w$rFvRq1*PPgZ;3r7Nr_a1Uf++wM?M`xF+Z*!C z-d?*V*d}9nXeoqjZ(6wsq~|s9dGC1hFxyMRtK%&(%-cOb&IM9R3}=2mwU!)iyl4`; zsO7tM!%E7UX|#M?PM>J4?Ee55qjxF>a2(edsGC$c-1g|fVr5e1`bmEi3@1nuHlB~C z5EOtEI*#6OSHD$P022~9Dlo)PhXf^d;}@)A)b7SaR5+Sk|-6feMXcB`_@J5d~)gHtky>&5dP%2v2OJ@CJF&#Q|^UrLxLDK!zzEpgW- z4sVbk*0?n1#&RQ(73_X62~I#NzW`%addWha$Y*IWFh!~U4wTzTe*${Rstgsb_PvzrH(_Zu07!I z!y<10cQ!ZH6J(c|=iB?>MX<*JF>Wit1pz`m0e8mm1Ve{;gU&83c{L9`(;pLw!n7X^ zHR}*L6o6EoOE-%Hus{oPyj1F*SoNbD zI0Q%r`oT<4AK}Mc-vD(Y<+ZN@T7q>0AhpEy-b`1E2uk6{DK&>ti4u=*<2E@+Bi%#G zca$eoLO9vp`peM3mM-0H=VEfgbwvLF+(2BUI6X z_~F)0f$vU~_-_IDRU>lGgx}s;Py|{y9enFHBY&dw87OBJQymM8W6iOd@N4mpPgp)W z(#rYl#pDh@T;=}&0|#?Ba9sP#IKu$o5k%9*FC)96v0mo+C-hOxgwZ!S9rcEVNedl2 zedV3lwtO#NzgXk6jSjSh^ZLj&c@Bud+k)ig^id3UuKmI2F-a!kV^Eh=5Y~_MSD~Zot|U z(eUFd=)j~79c!*N#vS&2wDeyX4!DRe$iTxd0#o-BtJ~)(GgSy2n6wC%aO``;b(^9G zPih!U>N<+b_pb38xX1t&->f9iulfdWL@Gf%MpS7fwZrZvcGb8*s6}ZDubf8~khl+l z3G;}V*zniOmw0gNS!aM5k2T>_oQ~`>TBQm@!Gq|b#T4(nDJl)36VDh;Bd5*{(KS=j z=Y5`x0)bGGh9j+RVS*x+9@Q~Z(`(-X2EvyVN7XiI8@VwksjG&OeQ@~2lyxzD&UJE9 z7EBPFp7_MBF<1=$0Cju^y-?-z)O zfO*JXwh%_8n6L!_MsO%${nBkJ`_(?N6`+_0NckDOC{AmFZjHY2)i_quN%CCR_U`*o z?f0w#86ZZGH+dNxS%_Jib@tCf4s)hvSi^3|M;N|hU1>-^215Non3c&TARJeV@q~uh z#Fl&nj$N;Sa7uII-fPSjZf+SPy~zInISqIK$toB?Y0<|YxByw%#u2c0;&__N(5PB= zYlij4J)Q=K8a+6+kP51l`@qiF^E9LHtZb|tX=!KHvsTm^SHAI6VKRXe-xy%7<<+FZ zaZcbz5WC(TVFXf;ePA6QMNWhL*CX9xoRwdg$@((IXn2o@^x}ln$)4yFtgB@(tJ&}*%jkqw`h z2&7b;gL(lTnw!@01{#3aW5k2IgVI}y_ZyODI;F%d z_-_fsqmDHfvRs-FNh769aR&Odjhd)Fvq8ScP%vuTZOP~%IS@9pypTf7*+$8*7dg>X z(dBmjFnlrxCjjVqz|v<;()Y>dr#-GA$kFfK2+05eyza1!jBI>s`}l}mDYRyJKhvgl5)el?H-uuX7aH$p7bu(|*lj+t zLH4$R0gt!i00Tg3gAR;xU-m$Mi~^{vheaI=ag^|7a``(yGYw>dEgK2kahQDDKj1sX zIQU89@xK^E-t6iL?a6AiQV%5s`rbifTQ>_t~?IZwEW_f01|GP_0~)G)CvO~+2U#Dm*cY>1;z&nD_l69iX2xP=M1A2!d^0B<{P zIoLyy92!Y3Cqw|`Hh8PCa(i=y8jrf4ta}!RMXr};oU8?Pt10?slY__r+4{jgGqFrl zpoSS|T4oUNw&CKuJ9w{&gEf$sT<;vd0X_v(%O>s&5UJPBC}g`i?7X~KxUvqS3M=+} z;o>JLohjJ$-arq^{s8`5VwSLoZhk91@mg;RQA>6z&v+C_SzoPuhCYEL zdE<`mJ0OSqRjhkb#^SYEc3GshP_@%dg{jodvbZ`LS3 zRfo!lLT5DW@9;Nn*LkjFinMFwd2!FAOtqVtuQLo)6nq0QJ(%A~E_ARM=;w)- z&YN>f1A=dBP4|xw|eZT4F3RYs`0)s2UlbX4~$wpssRgcIT%qJaod9pJYe~pJG^MEh< zB+`xru(cDM8Pb53P*iHgv82uv7z7??$5;}bxA-?V@Q{vyHJWiGMU)3J74~9q3X$&q z^O0IZgvi_k7iUt_bm!f`u8{*rJHv8qv*Y#s9)I~6 z%qcnH%WyVC6;sNO^ckETSjAU=j8$U^r1Lh-<;tlKMZ%33uHQW25CclS#upA0puKKm zt-=zIgmF=+Ra5y}goh^sAcc23$m|wH6Oi=p7#2w|u>j~7Bm{)G2%<3)ZzmY2tYwJa zr+F>G22Bxc+q@=L`3@~#FN~kyD0#p?92+xgVUSQ>j4>qqjwo^AZU7YQ)R@0BO4!AY zOSyqbd4xz5h*|NL*mPQv0Qd8UYCvT7p%6RqfbEbeAAm7gk5*WD+U5qccikY?o4lNh z$bz=huNfDzyj~cL`CL|VI5Fk1#>uO*QBl*@JxYaI(63y5;xxNwP?MCL`@$Ip(5z59 zdUu98QrN;7A+qlZo7#TS2J^4uAFB)CdF{Y^zzwKPXp}>Ew8aT-Xqx^eHUy;j@!kS} zwwqPpFmgMv0sTWK=P{qmFB--BgK=3I-;1mlK4QW{WBFVrP(fe@jrGUQD1#u`AGYcc_=k89m`P28s<6 z5;j=U1T-Nbyfe{+ADUa5;|V9bakpqndCP^QFj)O#hGx0ljBvNSr=0|o$y@c1VFje@ z5D$zj=VtU4$ZY2uYLL=Bzor*j+Pe@qX8BKptrb4qW!wiT>maIa9q%2rBh}qmQV#Nq zt!OSRS>p$T8Z~(r`*E4YQjqQE8`~%u(zU#gK!Xvhr&w0ButA~jdgGq)C@BOY%1CC+ z(Ja#~p=o#bfCDCSm>rcLYoA_>1ON~d=MibH;G7$q>zrcrIu_8l9sY16R~ZytcQMHz z9l-Y1yYB{XEv~MMfSyDIP4}Viu5do@cVKuF;FzM~5cciqmdi%Tyq3-6X`dzCbs+6q zfND+>cRm4R<+(Z(x>ycTNk`6D2!EMT8#hhKfhPwZ91V;(XDo3I4dLtw$8UJE`=VgT z#x%U^7$~#=AxB=kxcCcjUvanV9VS;0?I~x3J!FVhcC3nT-;55?2vFd1zZ%VE9d?aD zehy87k6o_Wl6b&t%>3YQgD;DZrZ9cJ&%0UqwO`Uqfw+J)4QQOIMXkL(tqLAr_8ky~$<5mW^&wTn30-lOtiJ=Xw}t1u;Onta3`rc zvRFPaL}nQF3X5DDz=Bu^FClutD5F#!wSNvDP@=&(WsVfnxZ&=#)+Yi9ZyQmh$&gru zBT(Nw%Hh3-2d;?cc%!~TRCr`3|FqTkZA~yKCpBR9`jjZr1gwdr4 z&5xcr$ZOX~8v^(-No1~?wcv8YrE9u6E7^jEn-!Ygb|Zo$okQm@ZduX3$DhtT zhX}aA>{5<&f}tG_)me#U#l|0n(esIUqk#4H0zR-&7F@3fE^?ihPviZX)-g{Opa{m1 zdc-VDA>IjLsGm3&quvN@3}?mAG`eJK?F)Y=D$vSJ8$5cRt^nPZqfMl9g;YtnIJ zb0J%qD?7!oB#`;_fQM8Y1f^YFYv&7PXc2bi>((|lS9Zzod3?{Ev_rZ#CE~DU5DuXb z7zKW8f&!1pFeo!-qJ2)!dAyMsRv3fN8(E~+)>?MO%gC=S9BM>?s&j?NdYBg_lqs71j6_Bod262G?%rxG5 zoZj#?5>D5%3-r!R*qYE+4Lykn<@NJD;XDa?S;K+G#BRe*{9zys4#9rJxb0?%M}dN^ z(zab-Z4xjPZod)qSF!Ta!DrTV~RoCQWo9EdLc4H4@JU>3Qkf?O5*l; z@~Up@CJzFDYH*g3sP8l9yhh`XY~jb~WkQGxq05tEh?uGQ-Ve%Ji=5n0T9U%=&kk`G zRVTIj#n%K)G(xP~I_o($Sj>hwI*U| zFy+>K z;bOG7goFDmm;Lsm8f$~I1V$4tpIIVt(dS#EL#Jy-N|X{2mpNy zm7*xo5dDMRCLnU+h&nWvBV$4D2VNodmm9z>W+dV#jpOBjHVPZ>4&#l4i=bGV;}rJl zphHaaE$b7CuIiE11x&U+O|QDE<;E(Ed$x%UrpNii8P`CRt@+)En^OH|hz&!u_&&(jdmC!2+G)k^;75kny}65Ywa*-p7nV z+fcD7>5kla$hk6Pf1+z9C-q;PTJO=ua96QkVTVl+?R;W+4vpupIa{4dxN<%|@t_}7 zJ&pvcXoBKCwT#b7(cm>OALVBX_nQPkATuC$vT)}HPCYr$5|9Dj=9GIDEgfM+%XJ6fStSJMzBX} zjX^{69Sofak5xOws1=uH-c%HDO`LhlmykG%{$KrA4A1h7g zMtL$v>F*cGk+$L73r8Vjs2cWqW4o{c7S^m0VU#9UOE|gib z?+hm8YMk}&8gg28rm|`zV7E()NQ8Vp?|7@kw4m-L*@{WpUV!oAH6qH5no|}I&P^}N zfGDPbeVI@WFs@IfPIHk=%b|D2Etie6JX!+_)j%h4F^Rf+oaNKs;e38S}#d{Na=W zM*Y3Z#mfGGjFmeF#z)Ixi^(3Y2m2G4Pq^;p@DpVR=^@@9>r{?vdkXKY5x}a6u^&zq zGj0MPr$5^OAwslH1>S2xVSQn2e|Rr=aJX$1;dsUqaPzdAcB1`nonh*snoy;7%cG7Q z6dcLOBlyCmL9sSN?FkSCmZ0b@&P@#~C|5hYKLoNHZxahKOw1H$a_ zbi3C+@Ge+TY~i$7qc%u6?LRY&D8RWQ=-Y>0vFuc+fkKef;lYr-9mjKydvg2fT~X<& zSb!Yz==mk0_Oeh45uRK64mX6tv}TGNQal*_UK4wyytB&UWdn+N=Nnl9&dp!9c*`mk z0#A*fEpO{2@y&VJI0oI`XD5 zkEF1h8`m6R^@^YjAx`z3#!__w7NfiX9N}x&ym65M>WUP-2WM=`P3=nhCH8E<Eo{ToO(jKF%1Z-hx{%Xa~Gr5 zB8#*T;Tu!VvE-u27gge=~-EssAyFm}v%k>}NJw}yLxHVCcvza~5a zYXDw_1>UgmHkHsK0W4jojB+e@E47%g{9~rWO`!6nP7ULO6(I-!`9x!Ke(X!kSr0PG z*`4q}1*OqrzB78_CV>^>Rl*ns>3uEZ&LHF$N}@+vj|{_aB|xtN8$+yEL-(4EAG|vl zOM z(%*Sh=@<`$m=yqyJ>W|LG&1`5bC>@1QFQ1(89$~|BZZ>hj3HIPx4s@QC?du2B=Y3k zE12~>{{T1=r)7q`hqJuVIT-I#bK8d2;YbS()^VFNEs-e)Wc*_5phKoOfPAa;-`Tut zLQc$GP1&i=E78J3cJ@?HhX;EluzCh@_m}7pX90j91=LQx zv9`?sTUW?&<1gMtORB06MVzKW?FqPZ>yG-)Bjm4tK&}Pb7o=ALUbw;-B*_47@?P;; zLM}BrYEf`~l#w)~w_Cs#r8On(HHH*)43a%K9qx>pg68BAlb%d=n~dk?rd&L;)>lEoLJ6j+YMjR89yE)ii+N_w8c?1BeYXLynn+O|Q^y#{ z14qDq7ra!;tq9;p8{-miuU;d7a;r0tawA?nVFEqG`@(H(cFz9*^uS;q?=&IgNI+L0 zQa&r--ZR=$h=$FIkIl_Y>GyyLY~u60pq?~KL9Z-+Fq{K*sM?d3FhZ8w=y&(tI5R|4 z)Zq*77ZLfw1D%!UFC z0eH~=Wt^Jhg`S{k(HD9d!BNk*VUwrd}3 z=!_7a&UoV*#pqc)gKm6epL^j8naU)*E)))oomi^}64CfgTq8pEu|b#rrDdEvqXkYUn~V{fT3^&J6k zC&}P%01_?(6i+@!9)G2A@7Km(ldwSmd}Gb_K#v8(>RdsYRtcJ0sb{!98a;a7(*!t1k}6N zjMUS%)HA)v`f#RHa)%TH&CVBD4KHf`gvU}Bag=-rli|qgP^$SoPu@ecxNuNCIJk)aZR{GH>Rd1?9|`#n7b)$8 zqAu^eRKbBkPu-sJdIk-OZ`SZD4$6ZHH{Hea1nAd6*=|swJQKb>kK-Ur2HJi<&J6@X zbvc{EHv-VgJArm`Rf|XA=D=+~|2 zA4Y2cVu|N>6ogGqi+wvWj*>9+x7$DJ~5SyyLBpm>)kF+>yJj;5{xNo>!4i-jHy@ zb~dn=<#Ha?gLoL*m7r@r>drjr@5UlCjEbN?o#0%Ii1}_w(qT;ObJ7rb zyM#kWV~M5rg{c4#HsB(HcdOPrf2Ptu1H+VR z+-x3?#wjUuu!3`%N2Qa&AB;W+r1Rap;Q2IIux8(Q`sI2T*8FCQfu~;G`**D3K$|jR z?vo1+#LdnVRf25?&I40GM%OCPF-Nx+_`ZN* zcLpF%(s!&&1C#6!dqI9N*ccR#4z4|_JrQe7oJ`&5QZB`H)p zewfnU0GF-MJ>-aqK2wzb`4rRkHv`)7F|abwD_{}qb%F*SMwFAzTo%O<=gCn_WKpm| zpn-X7tW@x;MRj=ZoI(`usyggWJz~b$?*N|igNag*66tL<6s1J*(VMTyXc?~o`oTt=3P7mkwfG=h@)I(btQutz7^=j0++V z(X?hbz~O^Sd2027bm~vRO=R&gRs>RhNO_)boT3Drkk~D}9z9?0n!w z6LFnzalsHCkL`-hYzo-9!+PT>N>7mMl3bm2P#bL&?t@!#cXy{a6xULmV#VEEf))4R zP$&e7ON$nl;v`71LUDow2oxw@obTq&ohx%^XZMet$t1IrcjtMZbI$W)wY00@#@@Kh z{B^y{sAq%PgZOwc827jIyiqv2eoagX;~me}wju8vm8(pa&9bA=0zI+>%OwhIS`oMqhA*HNf1A>v~Q=>(*0B? z+|+hQ|CYbTkGn)!HSvX==qH<3BAruOxuv>gFB~RwD?+~}Of?ja4NeJs{$2$sr4>>& zQDbX+NoXW(KPY*v8IQ=iBAc*vBIIk{(w9zjJYAO58q-N85`_46G^6}!)SM_H zjan+*+!k(FTNGt);+hHssaE(9X9wK|=Ek?7Z?cfUL!+^)?OM1r1(rzbcxxB@3dLLC}~ECN;@Z@e+N{#Iw4s zpKmUEKlt~4B+OJL>%ms;VBBUg={OCp&TK}S1U7%NX^7Ta3*t(+gAqL>PaevB03(!& z@vzSAY$W5BudVuM9w>m{!v#kuC3Mc1zCs8xksQi zEDd@bwR$I{s_B(Y3e&p0G^^r3EoUcL^h`rCbUbR@Pe zqzSd*7isBHyebMI$VQyU(JU^EfBU3;K6J?h=3hX*;qh^Aj-9Z&){Ddlt_x->2s9UY zw&?B0#XHhs52k5)KU-O~-nL{;)xr=$xYz90ou~C_;G`RhHw$&vV z7e20mQ5K}U-#&m~2^+E6P0iNJ;lDDk3(K6Ycg%)rz$c1$7Xg^W0nG`VMJk@d#;ZPI z!ptm*!WA{5hF=I#B+pi3HGKKUf_MD24Zp^5WFR;fp7hSIM7u=Y{8^H|z{Rl1?xKZz z7RsA|-tPB`8TvZj`O4Ke8(#_HJfUC-O1b1F1R!tan6=z@Z|I%Q8&QJWv3tl#cDtYJ zcNW9?%DCqqBLkZ)Q-q83sr&O*$QV?}Z_C*w0 zd{nz@aB%LpYWZJq!x;{zq%vG*p_LkwHdHKNsb!lr7t(2}_Bh?rZ@=y07II8Lre(w6 zxysbF%*7Xy`15YOQf`i*4f?7F(>G8`mJ+ilOEX_xWf{nX(kt`s#1k@9dERL`keb=k zEt2`?+Pcr*OTTJ>=j+KJgT8q6zI{!|J8vQyKKhE=PxHJYOE#`eL%}?4un&0c37U_uqR58kE0$X z9gQDLJPx)tP(AM1-d;PCYpQ6t!Y>9uBv`~W+n#Cz{0w+k%_|OxL ztO+)}ITleB5~osMosOVV&RPRK6v)4mZ{p8<$oA7?%T)Fw08aWtsPI39+Q|O>jnB15 zcbgnfro}FBeRC|p{~`SMp4Q{Ab;HPxug4Fx{F&;hjUpAaLDzasS4W=Y?x*cJc?Lu1~9?m8X$%9-y({mtYc8p}^88FP#JFW3ey-EO5{&ircS@_l~ zJjHedUk5^Txri!z(8_?}+Xa76HJOehjtO82Y2$B%zWz(@|MJgPY{%;EIAJsp!s9ue zQAj-~tfOjp>K}%|sqK`8-uMlfbM;vh)jg!-+>MQ``B1#zdJk$4xXPx%tc?_LE(U zKEJ3DT*tCS8e9uZ*yt?hVaix%73algZKA|vMn2L0gBH5PRula{7faTkLDEsZg)LVp zE8@xm>H^a?g{5<@F;|v0uatg98OZpVq%h6LGXmMv7Fl)EL?{~6Ln%1J9mU`(Udass z79nVtKMW)|a;3jp{N5$g z@W5-Ah_Nj8x8Gz?+D(O^_u#sJDiNk}oy6(5z6(q}7tt`*0&t``5UMhFv>(`oy&a-r z#@GqW-Bx5K%3!UZVMk7hn!(*nM;cwkj{F%*ebji$h4T!R6RHkvsY)-Ll8y9@v81*>oI(PP83qbmN09s) zt~~ta95AEiW93!{yUN67`=_qM3H}nlMUaan4?IEwUE#p5YAx$$w2N`bZe(#uen^d# zm#A%lTqb`q9K`TuJu!^l#)B>l4W#ifyz`E~Szr9-lPS$8m{75)&sl158MJ{7SlQg5 zn24o>N($_tbu0Aw*)ua8pgE%~Jsk{N43l#zV+@+P{OyT#@p-qkq?8E9KIf0^7e2%* z-^Na34(>xsqWLypifVW2x3^qRWFC;YawrN1F5fj^wZdFv0+_1j#y_TFHZ#}|R{3Nx zB-!FQT2WPeWMXPFR|tVV0C=P4JUM6$-m3GCc`dM#8~cbeL%I8b=>ua8_1E9e54V?a z(x&U2ptB`Bs=2cks7-<)o(eRLYY{!%>*9Qa-CB1m#1>pk=In*V#To2eDDu3+lqKum zS60kr`BhL>&PQevdsAyu27ylL-m2B^_TuLsWme2W=oBypKqX?h{3fa3^3i_`A7kAp zB8sz~qg`!aM2&nQ_;qQ(lK9|>vUgDvl@OCsHBVGBzSMXhX0|u}_E}5V`mqd}F8ZiQo$Y6l>R&pV?CMU{;AGgOYbWuumy zLl9bNb?oSTUmF@4tQdHjS-T?9&XV|-k~{HMbBJrHq7{PS+#PeL zwUptK7zK7idobv8FCoi4YhvkGkTqDv>~R<0t?v%Wbe>+}XPb1a%8DLHe-2e-u6}Okqcz z=#1Idcrvfvd_&j5$Uvo}C-9*!3sprVrNU?nj2n*;vJ{diMM1&6h>E5FZimbRK|G?bvN=D;m#TU$M4<8+HgKZ9W$M{Y^+VqRPwYvo`RWgR(s{DIGz7kgd<_zbb2y zkG^5RZ9ho`*(0uJk)(!*<|#6{#UYTr@A7nQMAaQvpcE{Kl2F#B`R{p z#SlgHT*Cnd@_Wz(`3%!aXB@g z(9E5{g>AA6AEw_GgRup3?(9%&Cwup~=u!Ar_dDr2iYbgzJ1p!5T$taz%K5&J}m zepbS%XX*Fhqli}2yvnb`dO0x0i#=>tf(+_*;aH9pHxl6V@T-;{K|vv*+D4A_(jdEi zO+BnB!ZaK7Ux3EMVo2d*EWBPKO$OxBnE4YL<3k4sKLg%DQLF zh;X=Rq~Tt6;yxuIhZZ9*;;_ppf8^*rf3`uzUbd)NeJ2_Fq#d;#csKJJ2WPvvVkhm~ z9_KzvKf<1myd8KSMMa`NVHxxz+$KoK2d&oL@rFV@@sYYK(PLfl*c7fTW&uyq5Eu~` zzK)@5hnmjtIeiZKSk=-lCYDdRliD;vg* zuhMl0j$S9R>!n+2Er)PAU>>&5EbVap>;Bfn3X)#)5Vg+7?Lsf>K#y}L!~MCHC3^+& z6Dk#>{a4bS^3j)VkJ0q&4{Y@Hk;xsIZm#m`ELW#ZSKp!o9|o@Hs$MoX_RrkD*h_P_3Lat!d^#hS~EA%3*7 zxTr4OkD0i(N}$3xNXA_Tk)eheEIXzOCufsDBsQy#R~PgC>=@9ZB{lQIE}hBTS3tuW z>}IwY^P{})CRsH%O@UiU`ix^`DTlXxeW{r&D#to+Q=po%*sD0|ml%^SP%Afrcm6Lt zQp0cn7*rliSlu#U?Z$dO@JWTV{P&ndfhES{09b%~z;>brsQpcDF9W3R#NKhC4HMev ztvgIx26bk}i24yWe2EVa-^=si)6}4cuO!Ohl->r$D)HxZgao}_!c)i-m*4Qs>Jh-1 zu%xV$9z`uA@~K(!-rP}&4Dz5Vd?mag0pd`_KuuH*?EkJ2<<;OSoz5(4|D8y8Jw12P z^q%oHL4)Xh^%VbP^ZV4;2)PPMsM5C;xg4~Tj9$H&G$2}~&XE6VPAA)rs(ftnf#40_ zMt&o1;qCKEyWy)p2osXI08;*U@x@NFU6`f9R7oQj9s&C$)xLn=Wyjx+nuhVK?v3FwJvBO zooA4FN8R#ac#+0CiZVavaKdJro3v6D%b&{YQ*CU!Llb$ot~xtlrvm}r!JBe34=3!? zVNyAJ9Y)BZcp^&U-&K1R!%+8#j%-N^hV$9PRm)1%j~B~fS6@Zg(iJ< zLV8Qoy~@EOu}#7nQ!rdB3QRL1Ha^Ccb&ZFA+{;<5!nI zr^Xg4jh`!+_4Ju2^dgfg6af$|sBAGwSxDR7OlfV1t$Uma`iFZKEb%}iT^F2gY)^#G zz3sbFXOz5d={_40uJh(CnzdT!z>%DM2iH?NmLj z3_a=82R~vW9T-iAAL7tsYB=SNKt&o~nMEf(5PemLrEV1e@NUyN>-gE!?6A&j^O|oG zE`nlqpZYe`N>?@Ch1@2#2FBQI2r1Q`+GZZ_ynXZ192<|IVa!j?qImBcG}<6);qbe( znR&cYs(>ulD2^6PV-bemizbHH^+$oZzL98h{@Oqe^y(8<6vTg&D??|7VZ*)n@zlrW8G; ziC8Vp2|@hog-FnvUOW5t?o}sek%698z|h1D=ByxV0;x~AxAIV;Ax@&cdTYxchkviF z^6n6#{Q$8$D#l3%oOO+wfbD}T%x z24V3H#MDI!nx4Ynl9uuz9liHTrD= z29hyMBVRq_L7 z5QvFm;$uauK2gsW%V9j1H@7(@BQk^P9rKFyr!LAikapN+BA`{{Hg6abZTqxED*k6R z<@(WELha_nIt%t7BqdOJL+TFWQp)cPh*}h2{HAtvPweIC4gz_W2%xhBV>=`3Ws=-g zp>`vO%*XAra~ux&?cjH5oqplk-iYLR2hAD=&0w{bcL z%okDYHDeQhx2Rc5?u7_A@GFt$jA(3(=f{3aEh&diX-EIQExL1CLV+2d-bJHr z!`@KIdCD~YZc{sbWt{B8850a?!T7CZgu?Mk4R>)3vn|0JyTD~wkYiQ`x;F|~9xD3M z8|`$=tbq4v6HfFd49=dwV|+oI2(?}+9Q*5*fRg>*%u5jqwKS`MWz~sML?w~x`U)g3KL2jZNe~I2Or`RwgHXPmN5&G}ROvk|$(c%K(RG_%Gk|<4)&K1MthUbcSt?f{ttGUu9kafU)y zXh1+wai}I*W%@@58VTyoFpz^>Q~I;jabMd4rz}!WvPZD|c=Tzx%NrRJdX<@wmcm^B zY=(l)TvU6h|1U@0ud@(|;M^b_yz}B@=Co$Y^mj_#vByx{mpsW+Rk<7Mi>dl(!A9|F zWH7y112rb^onqIVj}7^!%RJv|F5u|zqAXoFC!KSmdDX18!ek0o4OTaO-4kMmj&TNn z>LbEP38LHmirkHtS(3!!TQU>~;Ngg}$SL}S3s(4+hQF%{j;xUy2>5hYAO~8+RUEv@ z(=0J(1Q)s`MTQLiwhRgR4-gWju=4s2Dl{O)N~A+HhfG@5fd;BGe-9m_sW>A}!2J68 zDlPA0jt({5i$Amap7YuUS+eNZC|?cu`7g<5^MggafhH zHX+C}a6#EhC^AclbJ&g(@0&jH3K9$qOk+_`yS^KYE$({!ko1f#{X%~x0LK@|LgZ*LXAUbaJsT{XAkE%UI4KHs?xN6!Zh%^%+6uHzoMXf zU7{AKtl#?d3()3)t*&Vpd2U-JE1d3&WRVF3HbtoNR7rSw@AowxC5loc`RRKk2i15c z8Bz>jehimsVQV(k#jmoO@I^~b)dC>rhMjbof&j(iLs zb>8gESpw;0@Na~njd4jx%3dkJCjd*`SMEw1I4Hd4hwg1cez3Paj=ft)5STek5>>YH zI(}~mvw>pS<0d#oyzHxfthJ=A2G7@Icu?pFnNX?u-1&5g7e(FDUJMY1@1XMQ@*Yi| z14CRvpJu-0&%LnT3K!<4&P6_V%xvFh6h3xypUrNz(oNTuF#H{Z*QFSnp+fI-jSCBDSk2Lu=v<-Y65s9h4041q5NS^Wj<#F~OsmH6@)=FyF`XHxq0N`5(N zH0j_EsZEED0`eEH`o-!UcZ{Y&g8DMzsm z|HvD9M-KT*sY%DNY3llFoU0TIpZ@?!KNrVDVXtQhuX|t?Jzvh{d`kSz7;UdvKaApI zN4-x;l^hxrLX{L>xr!F4o~v6?4xFCJY?em`j=k^nFtMYRKu|4{5YsCX74ICH_Eo-> zS@)H=%_;z`_TfXR=1tX0yo-5bg6Mv1Y#5p-d#Awk$4L8KoU?J2%ksj|X14DMfJ6B( zM`l8F4vMuZ_J!COS|6?uB5FZC>*#4JI&svv6sTI`>%A4)Y_qg9bY()h?&06B3}sfP z&l%3=Sx@DsW)$~dT))S1%I*cM#%QOKzTX>1sfk-(LFxA?38FdKT0cgbu4IcnJ4-}P z^;xGu68PQq`ei`;9!QJ#lZ7wn7k?A@JLXdsfWEcgXSYLjnA1>dNl+^*r$M<5hTWya ziF4>5BNSJjr~cQ5&7gcc9VQIoHs6e;xH%TgqNNsWy~-b}55?>KNC*>d^A+uI(Udu( zmKxU-e4U8ku3%eq*ByZ4&7*~FIdyuQ?(~=a`61F>IQWajs)A{majN-MR%O z>K4`F7Y{gAnb=RJ`ib?W*oL{N8tsKz*CL21{itlD(^-GQ z_BOkU&DW1Czz{-@{=pVCyW(@KTA)H-p)bf>V(ULZiSU0&6dFbi2OXa*lvwX5A z;i};Z2&s1ticY}C8`a)RRYEDNGryZs;kHxJktuM5*W;}bB#153j_YB0W>8gdpb1rBJ<(}31)>J{RyGH$}_wMtWWmTzoX+|AN}I*+P6D_ znJ+3l2>%c8CP%k7k1?7DuMxK~s8{;I+H<>-bpA+jdwGUE(}pQSLE3RVuBri+9S}C0 zKcJV61>t}jDf9r3sae!1r5PHV(hg$da&tc457W#x{RengHhq&f(}@tb;0hznlQ6Q? zJDs-E5Ic5}m(;@vYol#te&ck0$#C&6o(RMBFGn9dQcm%F*M(+_p#1(eFHCrCk*`5p z+@V>BJGAMGDHh2;q@I8qHu_@!TuJCBd)@&{2QZ9{@bMHG3C`8D3$4Vej6+$X;*%K8Ca$FTta&Z8)qiJ_TWk=-t^K{QijJPOpk!T4a*_@K|=C0Ft1 z85N)#hEMc8^BY2(1vB#kE=V4MX&)e^P@jQOl7pyyE86LON+o}aS>2P^*M|Q&2bhd%RdMoU2NU0wJzouS%O6ib|0&d(-)@^ENR7}DTHSCJN zoF(06`b%~G8+)g6Qmf=m7n?*zbw0qqO^17mZTv+ueG}Q9yCGi<9UN#>e2X<+(sFpt z)0EcTQ_p;Ox-)*qME_A%QZiV*l;$(^hWD!IVA)UX{C73$~Cg(xO^4;574(gE?#*>tmaqrR&u`~#BM=q z38mgvQ{T+64*ZSS={V8k?ZcpO*BEvoZDsK>2PS}p^HbyZSz_>_jLMSNVIoGu-q4kD zyDu2I>HSumi=U5Q_qfex674U1;{R1x` z#0BIsH~u+3*_W}F_n^u5(?_Aw#Jtt`eTLRJ_G7UoF{2NAPQ%2aSM_lSw4KCgm|eC$>5ES+x}IhE+YxftSM%89g_&X zh~^$F;1_7`Lg6}@i5mOsPO&MdBiRP$BG{-jPGdgbGu|XDkKd995s?^%>ATqXikClq z^P}%!*4zRMOcj9`T7HSt5<@okY$RruRgV7y2w`4s8=c%m&w@3ddKvU1VCRBHNm!*l zP%I}R)~XR0-S25L8HY~`3{w$8@z1tjbdk%`IFK+`GP?%1|-&bb+P{ExVn+FZ`IzbphMPn~7%G^eT_3S0+#>IkB5$?#%{FXJ3sP zyV<~Y%Ox&cGUf-FD;z}#+htzAB8ybS-$QYxct%JwyNj5eKx~Gr=B>>SUa-5*VhX!& z(s=%ureIOBP-)EUXYY+cA=z`Rf$%-Gtf9EH-K%HA)n|#1R~cF3(jBM4A&3`eDcXUJ z(cIpa6~(KodiBfp_tShCEF1g@ib`#4?hk`GF5m(QH~-L4fek%PCco6~*hzKf3su)) z%n>9xRzt-`&&psoO7Xlo@xAB?4EJL6Bg;F9q?+uvE!xRq1pYlD+_X8=QQhs8tiv15 zb)Y0R9ITSfJ+>6R$uWVd!|A=)v~;ASo25+pDxfCTq>mqug!YXX!P5qINgVSp~k9z2A_1&Ckb?pO*4lI?Y5C&Zq9;JyY=yH!}uQ7js8F zUZ!|%spt|g97YxXb_@XGRn#1Ny;T^@04;45nJX>D-=?5t6(&7N#(GKI8DoM?g&J)n z=<(}T!2GeW2@P35qWLKQEHp5u&9p|re&ggO^e!oge1M8iV+e)a?k>O2M~F?If2Wa;~Mgp2_UWe3yiD)Ry^p$^( z@%W;HZ+Zvlj&VhY0#I7~0;b>uKgcD5=1C8BNeDU|d6Q#glu@1rkuUFheX%kZTAzQ6 z6mM7}LLq=PG%%aSa(;SHc6GLP_Xzn{`I-Kn1v%jwzDUN&{~h2!=;~kMmrQ&b%wx0 z{KW8W;pGxx_?U=cTZ7`zAr;j%ckLh@W~?UU{zc^ahhL)jvZw7%3%<-r?<*9IOV+i@ z2b+UnVb7?jp)iq&+IIOKr?%NdC*xn`Yk0Q_Lq$&Eb(q?Yv`Czut`zhMDrKUB=kM%B z+OVGzk;~&vNHY$Q!-Fold|X((mRS-fW3Q$9R!Vk1%#Mp;HgRA>jEaRne-9I4luN^9 z>`f{q(l>JMj!`Mqkgy>}Y88S${xxPPFrtIwGqqhwi@S}gh~Z&E+8O5eH;Sdw{8tde zO1W!mJKh_i?y3$E?y09|#|j!Y!yflpaPg>lT>3C;2%UM&>LSIl_WrJekp6`1XFeD~ zpYNd7D@DgIeOOzC^z`MP8|&|Wt169Fv(9|Za7)--?gYhP^beoB?bL>%+zr|5hzW7v z8jr1xl<5S1x(v^>1)3K;GnPqaDH`@)S%q_9s4&`QJ5R35;2X}TmAp*;w#0+be;U3e zJsLyC&OAB)2N>jUi~e0J`{tm7)tk_pgR885QacsS4G{NGBB1SKO!(9^7o1j`)m(@e zDVa*X&D-9S^P#Ky?;Vu+|G%*qjA5&p>Ns zy5y#sP;NMrho`>cYVVbwf3xFq45iLR)Yu|r>9JGD7HH9HvWo0_%nD!MXh>9WKbJ6s z?H5?XcPOhn=+)aapEHo_AlvItHM~*6R*EQP4cl||i~SPS$8i^)#yR%gb@sMUX@tq*x)5H zJ2fG5x=l+dkvByh$?H{oyEc&%#IJE21y~rG-;+FoYwEcr-<(kDs}>>n5?x6p=EAyY zqq5edTVT4}OgD8-`RT&ZO&T$b^|fm%mv^Pp#-Wh z&;#JKt(p5#c$%OG!44pP6NR~W^+_W6)zRJz5?2)y`NSdPGY|a_ps=KEQSuWRTX;r? zbi%-qb2Y=^XXb3m3%Xt|lvR-^_C4;OsNp7}~i?gypU8V80RmRll%ipD>!ttp*DN1!;xX9Mq732>Sj;NCJ~mzpXzB(TOR zxM+>NIo4mh%M7=iX(n9>gS4lO5k{+wQ+qd>u7DQ5M5>oonIWBiPOBgVxD2t_6L+!C z$!VNu6D0MCO{o;Su z8s<4;nPE@d4dNdjf&5$Q?@{ntA#8p(dzlWTW~M!TnJ6>vP+;AFsH}|q8EfDeN&OP8 zV7RT)8#Dw%)&PxaSr0PhrLk~&G|=fDs-ob4-kK&eNu)=F!C}G0H>=9`4=?9G0B$W< zyjZ`_ZDHmWc5uIi);HULy_-AVB4q^D6?ISKr7DG?20P}srU8MEX12tFu!l!p^O>IkBTkd_pK8g>6rJ%{4yMD8PA zoc`ylfw zxGhO}jzcNp#r_HL!izlJ|IfMI@fA>m$!1?)oJAjw{M7}>5JTj)|NpuEuRH(WeM#?u z=ojC}xUOG@XPi&t2V%@koK*h-rd7>oZ&P{uxn9rr6evMOP98u`iyz96=%5sj&zu^& zr!zwpMfK~Dzg*83sh(Z#Xs9>1w2}42_&Dgf9vf#cvW7W|WWqvD2(IQ&bUyhAyQ(va zncN7`5JsuDenHWX)(|e8Xpfcw*%^^Dsquu=x~EfSLL`~J&r3Yz}-aWr?FIV7TqW7zUm}`_!cW2N+JtIJLONDlB=FssH+iTzB~&W z=6c(cR=y?rfZ9!bG9Odm9ni4`vko{k}2UeAdb@!TDWFGx^`3@nMA0^Fq*15O2 z?8K}q2+}B2lpzxf+eGd;=0st1C$X~T36DFnV~z=Y1@Z0%aImlcr>|Iv^x$K%gynax zvPD~x(|{ze1km2IL(Pi2mTqOCwghvxk=S|2XVY{AlWeVE1W16ON-(Bl8=e~0U7?=? zw|T#})Y2ubRf@%A02-Wkr|~auTgZ1@B(A4JmfX6@o{y z0(CmGIdJ~2J%rxme>nXQuvctFC)Zlg*K5_f-#6fW-0{7P7DMOzy}W=%(&1s@v$NnG z-M$8TYgzj^03vAVNI#HI9&MB}3Vp%h@Y)pGwk3%s6L0EMwr)j1G9ie`(aT2e#1L8; z_jLPo`6gd54LpC6ZUP}q$kZt_qPmCCLhkU_<XfnAS;-YQuoen);iG^F|b29izz3lUOb_ud|EAg~B@XUnfk5U=7YBt*V>@Zxg!% zMYEI(xsR#N^vV?Xj?BYt?~8g+s19)uVZ%lm^W~nQn948ClCMN8NIjN zznx3Y4bx>9`gKn~&8zWWWzmv0yY*FbkqrU*a>HNan`Ca)+HiVa1+S{W5oW>}r}5`6zGX>|*!Z819HDVqDYbASNJf>-FXO_d zTN49%y=TF{@_|R=cW?;tuu6RqJza$_`JRFXrYC;^v#mRWSjYwe*zCs8m-x-`E9+LI zSP-0xq<+kR%dg!>TKVGq`w2%OtaxD7VASw@6pLn3LA+p*vg&`oLOePHA~mN8GvRm8 zFL!iy8BKp}d*#oEu={rPa|KE2N}nSkUKd;S>bpXdU=)12J!_eBs?v9g~n1mIJMO)`_gpy=Bn-BZ+u;B`OZ?7vM09R~YI058(RZ|6FrF z`|k^aggv2q2PV63fq;#?Ts54I^7|q(f|4s^nR=h zbSk7xUi{_iK>y z4=Ba|!TQ|#xs=W%F`7awMS>>f?YXV*w?>C~lE-7v&PtOPj!amnfdX+ANSy#&3lOI=wo>pK0nbelx?I-*L*}U(x7;vA@Y5zcS;+ z3B39qq}UL$q?q1r8vqom-=U}lV9##}T)FM7O@^+dTqL@AdAbE`{c&*rSXH@KbT{^s zRnb8qjLT-uQt8**zQG+OAw zIo28MfkO}d(1GV&&v%^O$<;aAL@hEj#Bv$neLpBf=F#~dfMzVv_PQ!`WSdo?e-#RD zwvAIR_g+0Fxg&4rhy6Sl8^k1{{LZ>J=@-IJYUh#f58gu{oyGkJLb$x|lk=ms;BhOb z;87vlA>k5HEp5LQZ<3zv&mdnBYzhB0WOd*{65nX>Dhh}2M8{PN=R*A^J-_=C8PglL zEd)d4F5v_I_PIb4jmU)$nPowhxsX!v7inaf3j6mQ)iyEZKpM8R>CH_*D&K*$?cR}8f(*Zqn5)jf^b}RR5yLWl<+R{;@9FR=drlxd^m9ovA4Mn zSutwR3<%HZ(6w*OmXy;uB(qlPcKFcMj#_!~2EmEVvB284#Ado2pztMoXOlT<m?FIPgM}T z{G9?}0(2Y_>-bB)Td^obfAceous*a$r+TVUun{*=S}%G1DK1*%ZsKca0^kt7hM56& zJg+a9u71HsNV#DYN4J{5OmcP`>l!R!!KsI2@x6z?r5=W*AEFw-;lbH=TyNe2BMyGb zidyqO?V_*v2}!C@s?j%bqZ%*WBK>P6?dC|$#5Z5knNP5x`5~C~$Z003m5ASCH*Ah) zM3>dq0OOv&aQg~aX~N6hh`LmCb&q>}$nfu@q1EWSiO|m5IS+=%G3==cO6?%uOuw{Q zLT}RBG-V8&ZuecczZ+!)*|{$Gi8oGJ^ma*v#VBJF8YuG%0|*l4*8th~d-qdzs-ylE z)f+Kf70F%T*XCPb*uEYo82tjXN2=*6F>{lDdDF@N_dHJzwaP_c|@4GX?L5t1m?|>tNzlYCp0T zhXjCnASufGU(Uj9)~}wGIR=_&2hI@t*AI45dNMitA(7F^`aGo38DGrw6zh$bk8|&4 zO#Po}BD*dl^`Y|fUWAEQ56=T-ooE)w-AE_&TI<;xgmj$23ruZcGlQRb*&xP%W~XDk zF$y_&0uMPc7MK607PgEW6853r;S3hM8)W$B;DXQmW(``HJ#a?UV`VzyoS_U#C6%+0 zy?5{5si%DV&;-(Y)5^mI6%B0J>CSv}*jev{$Col{c@c#l&!lc)7Z$~iHv$xk(vGo_ z)ahC>*FfjFY%204(nc*eKNRRi@X_Q56tMhk`9A-6Pnse1?hhAA&$%)sk)!!JR^ zOu-iR@Hwn&(DUuRPnWcje(1qF)|3aU8^~L9bE${BmjsfLz6n|p!;hDM#q~FiZvktZ z+Gl*lJ=|V|@6%922|N<;EOmTbOIf51L6v)AoDrB8OX!WYWT7OSwj-cFqjOSwB+q9Q zpMrD2EWwhuzH+aMr3pS~FcQBso_u5mXY(^%{$qHY)+YUI5@|g?X8Uc&WooMFti+Hy zrbW(4r}i>Kxld6sj3@BL4CjEmFI7BsT>IUI-!aG6C(JD>U2{#U-qJTEHYDhDGcbfrqgKu5}_otm`2K z#sJKc#FSeIMw>dDKWPn^wn~(=RJ+R&jRdhK`<?zH z2ns{LHRU3g!wI7WztZ!mPJa&$8^R1SbRPeFv=lgvOnVAEg}78On=mnbVDP5;dJq_S z|1tJ%b|aGdWqInM~~$-CCiW`;tM7tgXP) zhbC$&uW>A%Pp_@>#fj*UPBUdmafZj>g0>+-Fai*^b)ybtJjVzSyov0*p7)O9`VPN+ zK^ie#v(E%1Z4FWHcAg`D-_PEDq-w{CakQ};2h7^$)Vt@fhTBPyv-h0=#?zhf5>-+=f zcg}wHvja0xcH0%dd&xO!vHl^8Tf5(eJM!U+;gfL^1_j1zba>IWP>m?6h zK6kjJWv;u#d@urqMHLBsK1#WGZ)3l_6nw`2+mnxw05!|hhmrJzVb!-~AsO?aWQLs& z)JmCvY@8K-u5)nwDmi+!7{dAFwE9R%J%gIW^g*mt8vI?j2Z>Kzu`R;xxQSkNk2>@W zC$o=I3f^*-a9$mnR{-j5vR5C%t>v&}5f_-$@m(3zxfNRbwadBmn}nl2S_^!utrkZB zU|1~M6$-{j>PZ0nZuf`b$O=#`U*3+yaCOoM=*d&tE4Eu!s#^%fy?2i+f*;A(v%5KAfW^;RIO%rc>48ELS<-Fvd%8mKzgA_|&Oy3y~4h3JA$T zn6XM(p69UIYGZ&dmY-$=$`?$7A(>rEFD~4|`WFMmqwmJ9t!}zt?{Szog|gIek3_Vn znkvZ3QEu4y&Ga@w5`#@ciR1D+AG`PZ49q9nAfcKDyAPo!Y zthGF{&%_`&`_TVA3oSd6;VZ*Dx6`c^!(*75V6v8rn{)%l_P}^1{T=+MJn6Xt%%={~ zBPx_tK+#A5@Y#<7(+DT7{?C{}s^T^64-iitROc!WD#CdLlD^lDQFF6brcb+@RMETy z_d1>L3nh5!Y7}rR?&_$00s*~QAj}lKQmEi3n-HCcOZzt}<~6xdx)rA~6)V0F-DeS2z-JW%RE8ri z;o1Lr_VL!j>G|2Gl>_;6tA)k?Ju7991owI`430PaP?nAULKin`WgXOm{;&2(qSXoO zDPUb9rBIAd5Nj{mbJ4PN~dRH+-BY!TfrWWoI_v~#{yrE&5j?aeD(T-|rS7Q(jE*E_G(qHGz! zKpqd8-kT)LzP~6h8n5~28g9#%?@J6tXdhncE>tk>i+z1up6SL{RL_;YKM^~e8}9&g zg*YVHGq>3z4|{DB{mp-<=8yRx>Mp~`M7P-6z^)QQMrmcg!_D&$e9{)E$NEkQt<>tn zg^!^GBZ!ED+9+~Y47?2DmEn!phbAoHC87J~8djQmRc=}7`U+|=$^hpNV(=)_K#MIN zJ__G$YmOi{g^7ZG9a;AdW?X)Uv>oEo<2z!zYhx``bUi5c9c=gWEq9E|r#{cfom5wa3NeXsk*qcj*>Lal zeOz2UCPFKc@R{;IjFLX5+KV7Tu2*xOgpraRWlLG}z(&CiPk+AfQC};s=n6}B90U_h z7vFau!U4)y;l&M_;P`QR12AokHg&$)+;rv3LZR?TKK*IxqT(cC=o9wDpT?Pfhi-be z)}lUTZwEaX%Q!c9xngUs;G$G_dxI)$48^rN_4$x<1-S_Z_4-*O^>T)%{;XGQV7=uO zFoZo}12$k{Mdc*H2nN09vbQUVC-ku4uDx^?pT(bBQRylZn0vrJvFwZMuwfBgF{HnV zS>UiCd1kE=M`S}c0t4OyF-MXmg ze~zko8ma@$lRP>&n1uKK0}NC=ti^IY`~$qiTuDApjq&=RqzjOJB$$y25!7!G{!mEH zs7$3EGxFH%UMKlKhi3i{jc0%NMSlM_)cJWs{_jr<>(Zgipxft^sHTgus4GFVl*MlR zwwGm+?hQk4BOBtEqG)|#nPWi{`oY0e3Ru;YTFX-!r0xZye}k8&F1E_IG-jj_;>9Uf zpX={>>1e*(RqsMSWQvb<0xtC8{}xxA$s}eGY&Ln$*~EB9H02sP`PK0Ao&^g6AUXXt zh)ws}3{=m^rkW@wqfKQxf%E?8$lcAsqD2|g@E)V_XAJ497JjscnWFg2Q>*!z$X#D7 zDsh-YBATu4;*h;v9MZ09>i69k(qG!B2^ZZ0`7%F77sHFj(Q!9>GCk_aBy3ezCx3N_g0yCCWKP;{VLc^gFU+oUe zynqC9>Z$o-7^`Ozdm=F4B0ds%Q2rpIm0qLcf-GyVMf~|m=1Fo|ncNCViqqPdt0-^E zqV#N+-*trYo z2p${~&#x?FO)409U{qN8o`VH>RqDdBek-xcKZLs!m%)hXWcq3}^V6zmR>;Z^=L)c) z$XMmQ^mIQZ7JDy3@*m*;{%UlbJ5Wv;PTpr`w=~%S7Ex~E#Afo*4tt~-O5=%4Tv8@g zt_RihU`W;0)93GmV-w^h$R3_kA2FkV`KXw;P?W|}@I&aA>bsIrZ&ZANRrJgI-6vZ{ zz=DDdI@O<7I3ri}RFTd~q3z<#Q;q3(2bfQiGh2`$7;oGN5HI7n>bHnd#9kCpR8?}& z+)>$X)4lZelxgX6qIKVMMcd8P6VB>8_}z2X-}!56v*(*hzjsma=N3p*+xgTzXVur= zctYz)B8pYkx_aFh;%dge&$K{uB?5+y`1$Mq;hXTLZYoY5hi?D=zmwram@;Ge7JpRG z+nNMvbLr=2HNIlsF{d?bzdgP5pf~$ut>>i?+DNcz0h=ui|PB3L?D< zfeGsl(GBx2O=!VBKLxHQqo$?XC}oz|GDRaTU;r;OFf!aF<@_7LQ4>IokNJT~3d9c; zA}#${bzYQDcG(H2r=*{umnhg(y3rMs@j=!HNC(TwXYoxoGQCCLmL8i8I0kMfM2i>e zV&x=m(o@b|*{3ev2ew@V1^VIwn{@i!tffaOBN?b!JDMd&sHlFH@U+NLE$n?v8f8$U z;9^#K2|#+V$C{&`kJcp7S=CT)XFRiOp+!jFqbxtZ>AOm|rom$i=|vEE>KC@ksX-a> z#gi8!2Ze%(a;!S9C%Hykat{C6jg!!KQZ&9J^=N$~VpYfsXZsFww+XE(LoxlrGN1lB zmigL4=NC*cnbY<%;tmpiQ^tsIF6YD4{)#0gg7RQsBtR}N3Y};Wr!rV3Wa2T94FfB} z7JHWbgSkYeaP(u*2{6&fMV~aYR9g5+b=W`}*4T_&7?(und?Noir1vvD z+cVntkqFW=^}z!Uldy5Vr&%#Q@sDox=s!JNM%qE!UKDm=R0pCo61PZiY&^A%NkR!` zeBu|jVfg-C3T#ah%|fs=v(VraCUNgU{}N|4aJiiP5Waj1vhM@C%vBbVJ#9+7Eyo?-w zyYL{M+J)p*YFy|6LRN!k_5| z<*RKF5zaP`<193;*f1Qz{3(V;g%9rCS*yMn8g%S~;7#y2((yT@9wuG`=nzqo8uc5n zyWG_hTECNZW>Z%&99X}-PB(W{Y$0MDLuosR9^&?xK>pIpHQ>ws(yP}>kQP(wclMhZ zHAq~Wc}^HE<)e{7t?`@eBqovhm!XjrcSI5KCv5tzzR#As*u<}B?*#-5>vw=wzNL_{2diUd-wW6=q-3W|A#EZQS>AOM47HJS8RAp3E zXS|cJ4?{UGo?;I)tx`_qAxdPZfpnPL1rr$9CG8EE_@?y-6Y1D8DCis>5E{SjIsQf< z$k>H}7S3osP-f;bGw~X{`tkgaeV%HjvdhQBbUlt}p4I?DPMux(mSh3`2RK_g&RZf& zoOuX?`m_X+S_DE=b)h>j_zcR6n%#)Wk(C586_L6tv3}PLLaRci%oqPrUJT)>=qwUY z6EOR7ds(m$>MS;(EN&c5AmcG2pje`))>J?F27@OB?S$fds=?V$>cOimaMA=>WLI%A zUSp3`shhSfi0@=*xdaiIO!Ef86{+u{2k7?<`PY z*O0)mK{SFJU$)O@QERe8QA985yhtpi%rxX5U};YhHNXipbpMcS&cUiwPSnpiUh=OyoO{s@0r=~W#4 zFZGm}-b&jWR%(NBH~)C`vO^Ai_WcK_m4}4%I`hj#)&B<}!N(X7Q@;dV)RdG#o`sTJ2j!jr}(> z<@_IJs{Df6!#vqO!-B_|ZuMPv2U)g3M{}Eo2Cb$BM*ye_fukk zOf*&h#rvg`V2+<@_`6Ka8fx8W7aq}t=T0>)P<*3~w?^dY`^ZwYBf*q9)WdZ6DZz2G zJV+Ki3fp`R!wSCa4U}Sd&FR2}H49&Lgg;hqjn!#^4gCOR? zaK(it3Frd&ky-40Nf4cm6Jf1qnQyD~_|hFA=+jgf3{^DMNAxJ@CS?$HD_-Lwg;)Qp z3)9TBU9GtY34GeOv}`haH1S9yOB7oH(+#y7LHdW4)MLUxYOdE1^9~9zZ3`ne^8!%5L&w&Fug!kp7YjiS`%93w5Zdhf?y49{v*8H#kWy2=8)l{R+hZ;UMN3!^2F z01Ie9e$U}2-4p1slZ;n3zDy{%Mw5~gS~n%F_K$h1Z?r|bFSDrhS9C-~YX{OYcu0Kg zYP%uoT;~KJ9&&*alJdLB>I3&D`Kdn}!Dhex*Whhq(_fLEH>R?V!v6to=BMPB8E%!0 z2b)>05kJpcTqi@kpXyva;2_~3dux$@00;T&j(-3P`Rle$hN$kx3fN4=lcZ=8gbgo4 zPG`^Yiy`1ps#o+6I+tA-jMZ>WO55ChCWC*4UD;ZNp+-U|PRbQWFI{3u{a_<0Ti3S| zKbz9!m!shhePI$tdV3SKbP&F&-S>43&M*@EtZWu5qq%J}x8#ENAnL=gF=WGY3y#;1 zwt+1xQmg~b-{8(;(j!Pc?*%c?g@^*_HlvSXIT>U%yn|U=#=nB4VdRZ&1Tz=+WBotc z96DdbtCXC^FpN+7&*8v>y*in?V4cM}E4P+anEye`# z&{1tJ3H;EB&)CNcOG`#gkq#)acFLp{bM)R9Sdyj_!)Lu<#|Da%vC+ibuak3+&XCYm zhJ%~fCe5qU%Lt&K1+!}+)Y%kyPF9vcG-BmcOC;`ZS-RM1&VE>!X*9T^M zJIrk+z_N?WRu}?gOwz#uRmeX!?2h8SyU68n+coZw+QPkEF>H9wbc^Jb90v~jMlt!HT8EkPkP4$Eku_rGmRMM3% z9Yy&Oa4AtRfewjK@808l+%XL0lt;I@mE=Zy-*i>+u>HUH{LAw{cg0l+1bt!oj9$L3 zq0;k?T`hRTr6*Dm58!9q4-S)U36v08y$%bkS%*4H`*h}Jv<)sF_X5h2`ILE1|_ab`8}c$eXyDM>IZOPGQY)Mq9)x6Iaq708_Mcm z1!N?Jo4$K(8*%DR=T+T6#xpj~wn}%kkpKC_%gbvgNX2L3A;juKlk(H%Po~brv9QD9 z<6Q-`-M4oT>zRi`)`RhPrUnLKXQqvc(H=hsE=p!8jf47>cgLrUI#6}y_*=8HKfy&QZ~N2g`>q>C4~iDuFU`k2J2W1KXxX;ink@ru~9`g zx{H0xM80lw%u}M`XjI{0WIc>y1al+5^c5#hx=yRMd>fCpbJ>Nqol~j5Iv)hi#`;x_ zV(gY2%+CPb2`Ai`wp*wUa!D=md?yJi2@m%(yM6lgKUlN`_dq$x~|S;Fx+roHKST);8n*7J2+gkza(nHs3r^2*CK;0-v<|ih~Q!_~jqZ9?^smS}Z4M!BM-BP9wS(-Y+EUXcJTgL2Q-0aLYD28-ASL?l%dxEUSJgt7b6W z@(@B^J{L>H(AF$nRCQ{ zdPt}8QMZg@85Dq-DO1<$41DN`jDA;~`G*T5<O@!U=Y<0 zh6G=+)s@wD z8M+}tJN~`3?vBrV99jCow(ypZCfgTlXc#OamPafWodlDe?&nv*`g zNxTHXeHRXs43vKofv!28lsF`-OlE)(O=mniqRd!BeUERU*1SaCzuUw zENtB;k&Pn@h|jrg$61AL(r#(RWBqnyjCan;5R#sXl|N#(U;^_6M$sBldZpRyci)YA z#N2H2Kg=A|8SaCzeLB5!_71MeS%S#O2HcxluPq7H$Y%SKnT^ChWnkL)pP1;ZhnQu^9V(KlRMS z^mQ0t6};&>UPx+`I;+_ixx4e^j9_rYiK!GD2L+B#IzCA?)!$lDC+0;~y?)aKc8GP6 zp{*ZOTA4f7r1;c~!HAy9NMzz+^>}zRQa4iGtLuw7sU;p#H9si$MS6QW@}d!`p76L! z?;=`UpA39JVr83LXa{HycFo#b9eNUM>=<`XsygE;HOElBxeOmxR&>OZ{9|7{O5H6m zS@&iHYa#ck<12&Ni9wfE5_gqyM|XdrarZq^7Ramk+*L3lGc2%VY@{)9N&b3>P#5e^yJZ&p)G*v`*)Xg$ppXF}niKrKfbNDubXrLOPX$M{Q0 zQ6S+>F8)sqrL{zwQtlac8-vGq0=+k zN)-lyPU2bb+hUbM+)N~Ccb7P_oXB z2=uNJ#1iJpr_=&H>quRJ(N44!O=A|To}0=v`*%~wY@*RgSIOh5+0x`tq5XrfIOQkX zMug&e1YD|2;0Z+b3%TV7GUl{xC`r2T$K$n>Yku@3r5Ms~jYh6v%F4W;@JaoJFBxi< zgMN^yUmJO_&`#lO;qHSe%69*b;L6KP%kDVEvy-Py`{;n3wiki8HXKBZBSUged8XI$ z51`Z35SKS|AsA36Aks03mbU&OUI*B^WEdpvcTS4_+4E$qLAY1r@({*QJ$lYFCz1OK zAzG2`eiL2U_R(cAbI6oV`23fHL_JN1-;zr;AkGjr~0*w zHcd&l>>HEPZK^kxwUP%=^@Rro0V*YPcXhm$x?Qiyaki!i|CAd2MZO*4q5@*EvAV}c zZ}B^nF15kOo%CF8yoECe^>>QlK*yiSp|0m^Atk+^SW@j)1Rw%dc#5UTf{La|qo{ zYE4h`p;@l5A$axd!QKt7-0`}X0#uP|JA#ep!>`8uU7|Y}ff|d%DjJ-i+qI)l@+oWe z=c{#=ZKKuDJpLQaW3URocGAMLpd)^4^&KzNI6Yqf6e@IN$5?fYV9(82VjmQ2n5h(6 zo6de~x35bTBEl=0eAyv3biU5Vz$JvvwZN>nT|~4E>et1VyNYUjL%)kDm!Sudozfmca1gOU%22xZD-cbsg>6ly=^0-VF%o>v(jS*9g5 z2~PR+qE3(D=A5qwNg^t%=Ra5qA@QyUV2YQ^+GS?BaQZ8^4E+P>5gNu~p|^vS(m%AG zZXGt`mE_RLx+8li({s!{`z!o$>-Jm<9sZOh$|iO!02Vh2CFPWf>0YJ4N|LjWk{tfM z(}^FmX6f-a?=L~l<4y?PB1ux!YQX5I%65{Bg9;WpZtpZ&gpD2t-s|G7C0rnlobPtw z=I6qTb9E9mVsdJqpdW{Fv!Huu}k@sQt$S%fNV>$Cl+2r;Ee~?wF?Uz-6 z8KnV<>LT0SM2PAtVafsQr4MM25R4%2KR+dx1nZv4f@5isdrEe`+I^eTz$m*(DQz9W z?h?ps_3@)RcX@n-$uiOj>U}ZXSMwSNe#~(;A8E9mi6N!u_iEp%8Ugna%lQH?1ahXA zHp+$NY#d27)=LuwB0urXA#p7?z*HZY1hUoipl|%z5d;EhxU8tAc!Y0vX9MTW-rVLH zd1aaAVrjg@#miYcyp{={N+z-Edpr+ zZ$YdP#B`c>5paq5sOMs7BL!prfyn&_E^uJTlOWz$^^9_)MXJUY5#4=~3Fa;a@j!XR z$>{CtN~gZP7JsfH`?Q*YOZ$1b_MAxDriN`Bq>P=RHA5x{c(hvhp#*X+ z(hp*nm;%xX6O9aumS5a2Wv19b7?8R%!nut-1iztY@rs{V5?Q=A?X2lr+vWD+R#cQY z!o_H<{eimAWMaB%E2+#~*;Zuhr{hr~5su;Ku!Q4+DJ=-H)wE1j4lm1D#o+s3s$OT*MgC9l!KTyLMUepF0vG7`k&B zY>P2z%JPvH12!XcX5%VRp>&vUidO*TWF4GXq-!BMvbb?Jn^$Kn1k@buUoi}AStsZZc`e_vF)dK5NgcqrM=V9f=XwxGrS`VWH5%Ix zBDr6t&#faxE}T!69$*2>Qw(d7VnDBSt?A0(+d$@&4=_TizE>CHeZ7%Aj0q82lz&$8 z$GI+&ieiC;tIdX#GN!aVov(Ordk5MHy`oX$VSG@MBqU_KD@dLH%dhm${KNX#n^wWF zqBl?z^KF{BqRDBodgn%k><=9FFu3lfIR_4cDE<3I@+%&;o}6=gKQTtSg2NO9j?S}} zS)~5`oJoeoEdeDLud2*&lv%%zz7b&XVYE5>mGC@%|7bGkbWX<)D8}A9W&d+XM6x5E zILUy0e_9U7A>jMxf$O|-`38(|-8ytv8xu=50-380*hmD@wgrF8nJqqJYI(deDdBXogu z=k786&GK^}N;bT)lZ0&wbiRI9N{q?(52M~q*d9bjO>-|THt$TYe%b#$lcFXbtrMYP zlW`11mY$prrKYeysfpY?cC(d=TUuVFVafvN{^BUhqvhPEbjDCIjNJQl&E_3Tue~9^ z+WasSGK?aDDffy4s3j&XXnwsSM4oy773EVsnr*{qnGNWtgAPToIzxY)XWiyh7F8cX zJ?hT=Xg~6~xV%}vLcojafn-rP9EZU4_i9J}lx4nsGGFH>7!Cq?7TA#JE6jaxAzos&GcE!sDRV~~NnfDwuSdKr`Q zyE&8je}Fom{qJ7|AW96iA5n6x1)vWONRxjPWxAzV^+G34rG8UQAeTgx?P8I0@>>Ky zT?GH+tEGRxspQuYkNF_7b%wUogkxpA@$F&a5;hs67xS*|&8urnuHe^sg{45@FOOemauWF$+$I7N6w~h_ zj6Nmd7x|xUFR}t*pbxlc3f1Qae^)XwaDoBvgdsCSjJuV}H_^7c0+Tmi?X534d1VWH z*E`t0u351_S*?4c5kB4s9FJPmAfl@j{V}x7%-bL+v(pcA_Od;yK3)T(u+*selRN^4 z&thXCqPYvvKSoD0{9qJ0O?7cZZ4%k*3h~wkw<2~4LF_YPFIWr|+7%QV%q!1T}w|5rLY01F?CaNJt zW~jZ&szi+rO0t7AlZ8LidXqb?X7+wsM#D>z5MMnpl3XdtHM)0FBEVMCBi{Qr|Enko%;=Nk^b@# z7p2;nVPlcR*^A}p{YYp1u+-F2i@ohq%5@_zSp){K@GeAr_wTsk{^=q4dIWZ_aO1T@ zvXrs}&^CuOvdxWjP^?6CQ1FWIxSqyWK+GXTZ)+%B!4*{P^M(FRovtT(F@%EoFRL{y za}d1O_Y1*j$e??{$95HeaY2vz5rccqw!8LLWaliyKW_${b5W|G_oCNs3Y!UfbP{|t zTYnI2XZIvZl%lj2pI95h5txF*pRWiXH?M+k?mmy5m6Z@gB>uXCp;%T=Xp-H`$*xAf zo{|3Z+Ap9?+VluE!mdP8W*;{E$3Nj?jqj6P*sd1{m~Q0{w7ttJ8UGZO{uUzeXVovM z;YSwL=J%q(q**~#3WSFWbSUbgl!O2R-1n^8}t z56siPY-V>!3R(UfZYsB3kp4F!T|2aRSU)(nmXdPN`h}VH3E=g^z4sXDhv)WQq%s?Q z6`noJiGaO5DYXn#psaZ-X84}xrqR_g(CN`0FTh@am)_g8LiTcaMDE-VRP7)7P__-D zk0(mbk!w|B$uxPzyp-zmVWLMMT9L>wIPU76dPM!q^S!eqrF_17#*5{mdh%EA_U9zv zO7=pwIf9xiIov@LVf=6WV=ILFWh<#L`UX4A~-%jW9~sL`i`9}sAZ+2CW8UDeM) zhYUybT>p5Y;075IK!Q!}@L>7v@CtP<@AuQWig9B{;D)l`_ zW#T`;;bE(}Mb|5D@6Yc)AoCCg4ys<{zvePY8No4Pg=!EJ(nWxrq}s55)au_!O*-^z z(!JIKiBl;WDj)Rsep+E}4ZUdol0h^uN~-jg23Ixn(I@}*M|zYkS7h2ZSXr5WbOYi0 zgr8xO#-qz~D9wDk*6z_%nje$jTx3|QYq@zw0oU}~vzJ7(e?tzE9Zqcx{a@+0-;^na z8Ay24ec&T(iK-(`c@%6W4@-HKd^hpyvyDu+(!N@zQk>eKlkUDX;ukXMwEyZWp<(1#hbsi@7 zuN_$PD?ntQ5e0TXc_-|aY^MQ?2o?oJ!JWqCdX(2SG8s9f7-!_!{x7m7#Q6vS(c*QeWkf7ad7ZfcNqVsFXTJ^_C zxLOJXNM4%okI^UfHxsGDC`4`6MdZpdmus9pbAzwVc%?%TWOW*RS5BA9V4p^1R2q3x z!z-;9|53=g!g}!ln22X?)jAuSptH#L=gMGlH%!zUNiAi1kZxDem@>;%ps_+Nn|m(h zE$wGhGHZ##T@24xKqc8PfjAe9F0GO^Bc3BwUm9h6QwlOV?W&(@(LyGBq4Z)+S+q0| zp8%saS)5Wu{NxwitdUw|R2H_oDfkjA`^@o;mo)DPmCtzyhRKwqQ2dA+kXo)4M(~jj z`MN}(T%6s1Ja#gDh>nV|)xdS21H(J}2|0J%b)sh(K`}f016UZLsi!U$&)I||3bq01 zR6m1R=IeQUDEiz4Ac`WA$;+fq_P-R33q6GD(I^gg{5q&Eu~#en0J%8dnR@tO!#qv2im3qON-5xWY%D|pk0+?5#%LL`rhgIuWX;^XL-yAFglzCG`|iz zSxL)NsgfOG6Ut;hh~(6I>y+^2WjOS5cWCvLjFo#7L}_y~MOX%P&YNnp?(#LgmW|kb zd977~SXKM`MlK|2XqDKkF#)I?1&jGACN+{V-8&QgwwNA2o9=z<_1mg8{X%CYu)`Yi zR6q{o*Ees8R;d&W$hu7$9hLx@h~v_mn4qtqOFb_!&Y)i4x%^hnM@!P+hzB3pXF^Za zI}YG2P4qzg&wauEZ1v3b_)=Ym+>|pE{Xq`%iwuLH#q?Stbua*5M)awgW)!(n2DMGA>OurIm`b<@YQrwNvuX3(jh^@B2`<00v zyVN=a$1&92mc271FWPtRejN2<4D9vU^FKgU&U1nG?o7%V&+vxfdC1hIwz6h@S(=rl z3~s;b$f4-Ntmqrzxxwv_kX*j|PmhJz(6V1=4nM%uSgm579b!`zuQ8)-iS~Rge1Kk36NQL$U=^trR{hXb37b}agqC@)ZqSI))YHxyGCBXzb?XGp z-yBqtnKP#4-%wQY22wGxyGc8IJZ9;rZYXnI-x{_=UC*e+IX9ag^nwvtFeB{xz1;mv z3goR1(qlWu8qGOZ)(_BpV{4dgqF_(cC2p$wp*`Tp-a@VfjRMH%*wx1`Oi(< zkr=&`pu~u;M4=MjxUTXnscXwl5T0^Es}UBAT8r`^CNGirnJrxD?Ochc-kHDZVxlh_ z&ESk2BL>o==i4|9CBy1G8wBX$C#~K$OMKqSO1$+Y%54HtzeNp##pcrWQy=?zL6_ za@sD-0(qk_HQ(Jo3Sw4!XS5Kkia$(xJ%)`{b|vWLfulxcDo+p7T*|j_)?gveJ>?^J zJw3yWL~LiDGX1+0E5;C2bzvnF@gbri`zc%&ny0fHM2%^aj`8LxJmIDd-UL1K>)H1gXwM}3PwFh zv?ssum(2-Gd|<{!r~YA8bR=784QjLsY-)|g3`Q;wnW4S>VVIRB*G#2{%d9Tox#W645){6#BMT&!y&WD@8QZLIM$nMOFY*P$pq5CqsSGtZ3c_NeWXorCZpPg0 zKUyWP!Ok?55e#B`arnp4^J0Q>`U{B+UtxFk#o!lf@*1B|#I34G@y zwi#>L{P@7l9%OdP-~BzYqJph{ESY-Tf-~waG~*a zP6(mkLz|k|lQROnLZhxpI0HKrDkKFyylY3o3S`@mmYl0dNtL`HfV4?=t@-JnW$kIQ zdL=|qHqb80c8PHome1#cYgHkZS=vx z03~cXOTZTU>6A{T3W^dpH#22;GljWC5m9w$lZdfeeC|?$@A3vK?!|^t}{ADHe zd5y9tQ>3+~Bys%{2UeqkwoMd3o$lSg}j_6kNm;B$2f$P2! z_hW>@;a0V-dyY9_d#GV3Ae#5?awLmA9Kk7Av8UPZgw#<-S?V>_=u^=gfKLAafpIP? zBu7ziO%gcGTt`*>ss>-6w88y?dz5v)MB4Z4~>6t8F%9~R;cUkOKFF-uR>0x+n!3bB=u6TkUL3E zHMM&B9XfZ=R|VuUwmU(GqQ)pjjfWBRO5}#bzo9eSjN1w>H$D!6FR^YGBjN_ux$pme zW9QS&5KHTZ5C_VVwH8YcE%Qy zj|IZZtPgPnRK7iN1xKuBSL}~ti#5v;KcDAGpe7YrnjzaTJ^PZ@WthM7O>0A!qI-b2 z>#L6s9y0In<9or_*9{hl;h%=(r?;#Ek?SGz1Wk60-l-LX=03gp~6z~^g0%?d5u|x?jL)yc{ zTwFl7l(xtqw#ktGvv%dBm&RhduWuuUGmx`8f@JcNc${u-Ma~ zZg7{NjLJ7`Ek_2Nj7iZ`7wOS@3RMifMv4$-DL9_Qmp}5^Upe%}rQoOv3sqKnf@A18 zU+n}wZ-cf|n@)S8A9u0L3i8~8X0+q$>Ay<~{7e}MLuz`x+X25yqCwIAgP!3JPWPwi z*iYMlw#vFr=TtX>_oTWgyaf_DLOGRDS7$Pa;<%(!x(^5nR;)TZ%u>eMnMTpMXkATBLoo3Kk4&$mSV#JG-zLNyRQySY_j zJOdz0+tWLZPOS9)PyC+t${;4!8=_+xJr241E|QDe_iL$`eiR&?b_Sq zUh0dRn~kRGv21)Yk+7tT`ls)g-KV_?OeH?T8O}y4sRxfkOCEI*ztlcm-H2k{cW$iu z(g(vx!lD62x^ABEgLfmDSE8vB#9rrS!bMhJ1s65*3(asNNW};>y~AcEsQx?`nnaqM&9ia!enOeF6fgf~=pyg;5wY0ovyG(4xZ^djqoUE5A)C@@ zR&%K2SIf7QI@zKeGNEI^0p}ebSh}R%3fCm$C38+k@nSY92=14Zw`qChxbE*;>9*ep zEFsUoC)6<4v9z`-7;Hmn&Gr6sEwl6Yb8#K@A>mv$w>9tg;nW_+rQscY83&UYFq0VD z^5)oG7%8&!SCpk0y3Pxj6z|{9i~)#%m%(JoIw~o9RrH=aEgO2+c!rE!Pyk$Dem}4q zd4h}~SkB)*^`6Ry1~WVuhK`*TgB$z@*XrWRzzwC{KlSIODldjWcZg!Vvev>u+xTi6 z7@ioIw>Sgz+l)I#a4TL$hs=u6-BIp~p}2i((IK_fN8Tr`?(+){g-U;^4H!sB z+k|7`SPRQY@0RF=K3zi;>k0-e0~(63xon6)ASW*5+7DJK_O$M%ahf=a3&G9Tw5*qb z$QT`G$X@lf`w>qI`df+VT+ic*KLGV6i#XlIjWddsab`S{54tjvZ9g}RzjSuS zz2#k~LV6Qv7kR{5L4K9m^ak_xCar1SPRyU9E_Am_mFgiFUxD8*yNiZw zUKNq%`$$ec<^D5n3(xCCUMP=_39x0RUsTQ%SBaP^9dmm70tV^8i>?&P)Mo z9=N2^c~>YVpg1J2KMo`nmZpl(xLXW3d<1qf-fZ(MX0&2Zwu{xFD) z$4bBg1hHgNa*J7FYWo>!=h^PBn-mpN(8tAY(oF_B858Z_i`YLUBQwWPUW5Q+pn$TN zNfwX%n25=p72+CiEv?vK!X^c_V3ISuh+eck8LlRv;YlO^cftxv5`7ALoy3{YG524* z70pZ=-zHX`_a$VMx{0)W0j}#7z>twFk1jvCi#^*_Yr6uMpfeg|K7f?c$?&EE7ip;p zio)^V+-d|H<}4u3s0^_+DfKCU#IS{EEbt>5i3%{b*!qV7|1#kV>$Ww|aRC+C*{gP+ z*`AE_`85#u`v8_e%4NWE);h^Up4^@kTv+ZS@$*5w=kHw+X8J@_&c%;aVNQ%t2j};P zTRR@C9$K2r(ikCUHzQ69G=o+o5})dcQLdxgdl(wOv^X@O?1zZ(&tTUOFcQ?@>y`t= z!7(3Az22TZg#auyWlF|8$Wi(vA|)-3mK0y_ql`6XMf(hYYDz1!{Q<28zviIKN!zSi zkQ%%nUNWW6j?0TZX-4B*k%-~d1nm^l*3NJKvj2ug#7v;j^O2q0v~Dx=YRsI#`Z3oFs8C#W$;i(lur_@l520^!UI#O+kx5if z>N|8e#1I?TVuo~QQbhS^2>?_X#bZ5Y&Y8B_+Lz*}OBJ1*WM8cM9=hsk2hLN3T(0)o z4p0Y%jeSu9(Osh}bUjUF>pAT4FM?xo+>(4lUM37^McHQp)!3Z_}vS68P3U8cL>3}Z_@iLT% zrg;bFIo@j8JWbi%#r9b#zEv?lCBk1iLlGX_6bDrc#UuP?=Ar|D3eI(c6$lKe-=mN+C=>y`q{r+*C0SF?doVLW zAdoja+-O^GYk?;b*H-}ZBnWte%1eFV-flJkcGHJncpqbn=c(-d;l~?$6c(`Fu;VaQ zFD$FBG7!9~uQLAZ6F61V!H^7^aI3NPl9RR)80jOt71*N?hPQ#+@?h4tMQQ10^N6u( zUKF`q>iNV{0nxmK#%SEO!krHHaylMR?x}L1$Ooi{tXZ-p+322f({sWOhXD8qhDWm` z5<%JH0#=DtU7VZC8!&B;T6pKIH^2O_MbNfB8KJ2`rONG4Co>7Fu#yeo?jlM0KJ02Y6}gWjljw zw)eb2#NQF%P!0QVTheAm8gF;&FFjy+p0u7ZM9GEZ<$A@FmYB{wn{$2_3BR{p<1BSj z>ce&fKC<=%8&}0~k7VJ1g!I{(*;1gP2Mu?JPhv`PJ~VIJjb6~1<`8t^@8E6q)(_X0 zk1A-y6i&W#evIMk1Ve#P95$#N_T-SP1v&{T{p4{(?YQkPj9#KFOMrg{OdHonX8mF; zD+!=gxed0_!?IM{texX$yxPij zD3a)kKHM5PA{`i~tS%zf!lvgIhd5V=7K=gvV4A-F0H-`eQ&VLw@f3&_GttEP-d)yb z0NH=rgPuZ?ixoR?Kg~T76)fO^qYdEO9Z}>3~Ux$l7t%IuZ}cIk{J+Scp7$ zFeae|8tNXfnn#5e_wG7jgOQmGZEqROutudRJUY#>tw8i{OD)tWPo5@e4d;KO=M(S? z5wm^ked4%kS5lklvw17O(K*p|rx7E^t0v^N%4SzJiZ>kLwMDaH(mb zhw>hVo$bMCd=Aso2b6FhaM+4AGpw# zn>|3o&MAczEWDin*xsfab#`<;@fVlp0Vixo@jAtFncAJo;~tBEwtpk|!b>~?2d(Rj zX(6@~e%Vm>iku}SUWjz>oVl${Lg6`U46w?ALd_f1IK|$Jk_tgOJNn5?pcL6#lY5-t zAj)esgV{U5#_sl59mT`7B{QE;$H_RvqMa+VUd|-eD7FdUL#LWzaT!4xqHymK%S|;& z0R-PCj8CXzXi#~!V-*7F)<9FW=L14NVA1`7Pp^4dqK?P$@^zXy32H{Kb$Ettt#*YX zZ)84L><}s(w>7i)F)r8&VBy5-y6Jn4j`HCP#afL#Uwg=2GN~N#4mG@BMJ!jIX|sJ| zqY^^8;Rj3B0f}XR*m3U$NiA5~{&DsMG?Q9K&I^zWQni}h#~|7}XTRUVW3=(S39$bF zY+ae!5b*e1X=087w(NDAd42$OQ+TG(n!xS+=9ul?DspHn6_*K4<`5H4xMS@T%|L32J9KIc)j56Ug{YF0R-6q{{R?J!~};1j3RL~K<;9j zlrbg#&H{ueK@qGR90Tf#clkBdH8ERVJD@A6epC-PrHOF*?=>AR14WV+~kDbB-1Oz>=O;=LLiCO^w;$^D==7 z1_Y7mru)Z7Gj~h{-bW8c+K#&4-m&N-O>P=H0lH)7RkrE0(j)3)hbg1Asp+jcyUv|* z#cS;s0S8wE3T%xy#c^suxa1SOLW0jYud|$6ppitVp<~aSp|ns$2u8FSrO5KsjqUd9 zBIU@UJl_*@tRdJ{p(D^f0pjlrzC?5gdtP|Ktj(wCIC{swMvqF*`pY#llF);bmMidmsH&b^zc~sO zC&{>Uy53h0Of;}BGe=)|39McK2j4hKI64qHeD~}6l_-E>tA;+3$aTk#&hga2S0o%S z&T5W{!hV<5JwO`gkq&>(IA{PbWKq^ZTsuhBd46)d_vC{#cK$9yuTThH%fZJUGOD!H zP7NIxSN0TOs*aPqi_jt|w0aVTbCK8}>|Z%kluoOIytGyxz-#Y}ft4Nva}T@}4X49U zePP;q%XUZJD|ZS`pJo^17%?qka=qi87EBh0!!osFMGi&E`hqQLQ%y+S;2q`@+*Guh+l-q zz;u4{y|HpcPWKKAlGy_QDX^v)QH#43}e~zb?GumLq`ut^xCd2ewY-rp!U21q)~= zm)e+X0jLC>?Xye9J&eT#s?oNOGxLbVv$b(khPTm1HWYf_E z_AfZyQ|_vvfVOPb5b=x`!PsvT5CJ4=2aOXMfnq@J9dV4Nuo4GGoKEwC_97(ZaZL)v z@t;Q-Y>&4kS=S9vdb^lCB*bs$nEm& z>mMUj2d`S2H*oaZsO~tO2YCV( zI0miZ_{Y4!T1I=y1pol?=Psb=km1D|6Hbq;I7md$_~miz!~}U&G-tY9DH^Y=HD299 zOyW5h?-0m{Mx)E|{NowK zn4uINu>=4DL1%nog%*jsc@*6I-ndPAP(gu8-7%kBN z6M;xx`^Y6tpoc?_8pw_u5oZGI3d(llEEI{pRqtEN!mDwBM@k zcaa5vXDT`iqtlmWbZFrA1DIpUG!a*~r(2%0lnJ5`DfGC6m+*oX0&3&tQe4m!r;afci@3vAg3jFO)( z1tMv8v%H!Jm?+T0jylD9!6>{Q7t4Xr35{LH0z3#QN#|?#o^dXKZq;<0=PWdmR{{Po zA{Plzfl_lH=y#GZH!lX6U0|)!Hot~TK&`4!`E|L*R?51DiR|VBk=uZgk-hOKXx=q9 z>Y?IR^!2w2QW6|6kby=zX_@8LN_!Dt_`YyWoGIz@aethSl2b@|@z$oMb2EE9iRTk> zvjx2l@iK=_0885Qak7XN4*)eb!$BTuCI!2m;e5`yYGH^j|yVz7IU53DB!wcQKa1~|Nk z2>6`Tb&_k33*w8Tsdt+^h)61nf>z<#Ujf8+mAmtb%rn=G3#Pc4Yz>jTwAEtV3^ZjgYR3&v8lB{3mxv3lHrF}PzT!TPe)XO(g(4>*=y<`xaVi~<>mB51d~&=s zCFm%+0bZbtIdBY}>16#8B_lTuRLY7m-nSB6MOyu$7 zUL&cF5muGLfWW==jlCcu9BtzH=G-|+4Oi#RH62lF)1v+406}2Zka4^__BMsXJ!Lsg z%7>4S8KV7wNnLt!wH?B*W9Jh=C0+=T?Qke$Kos9R?+HCNfEXWPtUVAL;=XzBCypA; z19Zg#cehr&xG!En;Y1LfM-WOGhl%0U=NCGb5`j8CQiAZ7^tih$0u*UoES9`4wr+Y8zW#*4k6y2$`rjXToxxP>AKk#;>k+;0xpd+5nj+h!ZWeKBaW5B{&MGU0LE*y% z*Gc5&ec_gMGTWQ+facQJ{s(gvEzlhJ{@79YpfIKQOf*f9PjA*bNHAVx2R}IefFay> zd?n<~NED!Uf8GTbLG|{^{pS{ZXvN>RSpnwCh_&cUXc_P06HSkJS0B*|#>Yncz}6&o zJ7C#|--75{#9lE|V!G#?YUltw0$}gI&hp+wpA$6__$Na?*YS=g5fdohp$_<$7!%T> zS{%<<+Qtw?yBW?eBgX@x&YkSdRUnW7ZjT$vAu^FX0o2A!*1(hmHHELMQ5Qkla$(>7 z0ub}hedE`y0-}7h>zub{$b8{E@p6+X3!}80ePP}iaGnKuN6s|2NFov;|8f;x}wis1q-6#D4vCw)OufB+E302om;xI&!n>k?XpG6KMFjiwLjODI9JblyE82}nvw?*t`@D6($_c=MC2 zyCDMk4skC?eIMJ3SL+Sb$}Y>_3G0kSDvyX+t?hEMbQe;*n+;;B7%zKDYk2BO(hO6u z59bKo6En6>tkz1Xi{Qu{jefAZK!$`9(XM#FvgSbs&bi|kd{SUtCq@3>(B~3cV9<@Y zdGW*{&(B|3YW8s26|cuw+%M!Hk^7p?&;U>nM(bM0%6)K`mo@7Un*zX|%q@-UZu<+- zynEOSuRLSo{th?uiYJ++p_ns=^_&-CdGC2qD0)Fww(6ct4NWDyg(MVTS!e_zB5H4D zy8Fl}NJP*NM7X1$jxN|*?C9&sfdy640;j+{_k!7l zgq@B{A?p%|vMPI@%NwN1g}2184BY#)QvMEI_lh9UJnB4|&l|yVC_!`>Jn`|1rdtWf zJ!7F>87W{fn1R;XuFoOg88Mbbrn#GwcZ?6*uS1F9)O(K;88FdkYiDNb@rZc#{k}qf zI1jfVZM5-k#KemgR`6@ZWjVrXwVT25H=ANjb?lvH(qK`2Y|ed|2213W-clGKVc4e` zcYmRypCoRvzfL_KU!l@75hT=vCQ5g4WzS;Xz?U-fV8Sn5BZg=aONbF2j-5iXg zcqO2w`V5J1RryRh2DibbJ`)KvthU~Roy$&fZU()c)#t(9Iw+Bw`}fW*h6jOn=NT;l z=dykWJx5Z6H+xWYE`w21Qq zDHntdAO~+c$aIarU#DY@#&n>HEAy#>VG;n(?AW8g zn}ozrd7*TYYu~&#WTr%z9!(Y0q~2-${gI+)!eJ5Jy~i9^AG6xv*hCfF%o-{U#Tb@eF} z=(mKG|%h#JxTk?*R#CH+J5x1|q^H(DN|2xba!btA|mC zidz0c&letiCwi{S9!JhwYS023+nlL8s%-N+<<@ZRL+;ykz+}}w4>eCl42esC=Dye? zzso`8HR_Y8V)zc4I}@%kh9J zsd0>pUpkloClLaj`{}{7QJB~7g3LBAdX;WH8`78-Iz=QV;Y6r%JGypPx-saU6t zXD{O@MNgj|aNXi2@CcgY8E4SKyI%EjnGTi~)eLbg0y+c8d>X_*K)Yp4qD5M6ZY<$b zKv#ho&hvWAf)h=6*x8G6xl9~Rw>)Aq!lE1)@@V{E;I^GI4#?t&M7q=ig6-VOulL^5 z@#cNwo@f$5`JTOF@B?TAmHOu<^)YEWbSAMCK$#=O9P{&s+~TBKhZ<&H%I3!>G;H^p z33d@EzX3Pv4@s>%1E;~CtOkZxNkW^qd9n^evY$wf{A0nXs5*LZH`X15O|Im6`^Wct zH%$pd6UX#9#}3J5o54La0D)93UY@W)l-cR9E5PFc?}(J^wt2_-QrTLZY9HSoO}8NX zCfsRSqkE3LKYPN#Dzrg3f5%wy!=P@_1F!Fd2l7fiJZ-=#rHV@SW2R$|>-^-VhJaRB zY`7@*Gij*ka{c6xnj4^~kUU^FIqVo+Z_WsKe2sD?*O{z;z%NE#IVTE0c^5g`NIXWD zM;P6pS%pqC5UwV!0VSlx$lqM~zOW>DP%k6Bj#G&W1E9Rv+-w321TEO*n~okQY2Q!Q zYOeEkRU6pJJH|3r2x=X1FcCo^KpaIl;{nC35wTeCskw2SgAJ_@9_`1BWh;({6(+7V z0$YHBltoCB4Rk{Qwc+MjGkGgTC=ayBD98>3PVG0AtsW;uca8mEQ;8!8Mb?vDVd3ON z*1lXn=PwciRqIolJm*}xv{9i(^mmagfKcKchA6}w$wC4PyIoMjWyH{5rJ35XC- zDZ8udI4%Y+94$CL@QcGm06X{ABc^D)@U_%;h}cDFyjK!$7*=F=wx`w!K~#1YM}x`d z38K(XZoclYl$(^>b`#b)877T=XPS^SqrDTHNjicXC)$$XwKc3hIk)&IC*HDj?nO;7ha9*oJ3cNLCp!hD3wMf7-KkhZpuZTT1o5?n z-^jmsp;8vh&B0x`QlsMpNSY9rWinXfpuID}iQx9;^Nn+I;ssN(IGVkMbPwYmSlUHu zoN{X_B*Yoensr6NqzIsD5`KMRKmcm^RPU7Y?fO^^a3k>BR=jq?E{F$cLi#>2sadD>5$ z4tVX~Q+1yc8rGBtfYl?p)&gCS4du6dVd_m`1G-+lW1q50sYGWN9~}=tkEiDg@uecH zuHRUIJfusCMnsd;e*?m~!8TY8FB6<*aT8+qXRdKbYAEz-I~=5A*lBA$LM-G=I zh>-8^dot<>DrxgF$g|XXF=mA7H2B7aeUlF8K4Hc_(bcrJx(z;Yj#3HrTowG~_Ze`! zxAlUk_XQw5Z|mbVEvv`3->eK^rhtFFV-+wL)ZxaB#4*bVoE8V=?28gYD1*n?wX z_kyB)u2eO}xSrr>6!`Uwo#4-+qz%s(B11q2z`PDILNVa+kClb-k%I*k_3Ebc8!U9l;CV=c4gQog+0-k>iA#Nd{ zyVH7l>np7J5HA;|OofbaK9^UlYli}(%7gjARxxc_a(i`h&!cURXaK%$5r_bfD+t56 z?>MXJRx$~_+L>J+PzRBD0dK5ujs_x1rn$Pn!K5(z5P!*&$w&Fm*17n>J91+ZS}`09 znE8XLck8^~1u-Rpj=UYlO9=@mH4w#Uv(B zc&2$Z`Ofp3=`m-xny{(c&J7t9U^MnCg3ZKL3LL|4;|(`nOH?`zbZY^T(}0RPv1fK< z;cpJAXYVUN#x?SC3AOCASvv=I7W}Uc98^3Feh(OWUT9{g;PPaNU8q+}=e(%!5nv^L z)W=m~8KQCZm9Sn3$pkh1^H(NaO~b$eJ#Qe;^u-y^(X3PVHHcOEI^!IPL90Orsm>1* zjJ6iE-WGSJk>aIzE+JVGs%yQsFG=jPo%i+c?R@6aez;4cR9pF*6zI~oCai|$G zH*g*$`M?rIhAja&Jol18lpKNen&|8ZCeFyH@^y_MU509hf6bVgdgEM+E_Z6{WHGaK||sg&}8}<>M3Ser;5$`I_qzCXpd`eIr_R zfFx(^oz?MzTxD)1=&yNv0YwRIbbVsb0V9UWbcY%)C)*u;B?~xFAq;r+jonisERZ2OaFEcxPK+T=+1MJX|A4roKn5gPnfi1X7 zYDw+Tu>9kAP^Lt+>scGCxQPO7LSD6#`-K7U6Pvf4;ofkeNSqGc^_7@n2l7MPM<+PI zrpq9TX`r`UV~7-;C>gj~oZLRE7RPUeUVXU%2DCZ{h$UbA}4N(;VUdMsJ0cqzB0K%%-6tfu?zX&G;bGKt9b%8qPOQBxQKZsYV!bWchp1E@XRfMDBYsNlq- z0y~NvE{qp2vkMr;C$6xdbCFQx^*iA29>kb7;nV5H+CdYBmGIzyz>u$S=kEvc@Mr>` zKN$mwgSJ~AIBKo7R4?{=!qPl$+Waz?g#X6g~EwxPR1V^>KPij;7Vi@a6&8rBMpot|zp@RSgu4;yO@xDtV^up#x7 zZzoEEUmZ*-=MVz6CoZxU@<`FM9XtHv-vj#2G4_FTAB{WB1sOCg2MP9841U=bsg*7-M1JZ%M;fxBQRW&4E1M3{hvJFZ*b-s1)C<7_!wFe9? zG__p@!dz@qoPKVT&Ih_?%I@q4&kiv9R}RO0WD_v}?d#(d8dOCFME5gh_>jM1I7ePT ztoN5UfZ#6~9~mo$0p?;yXY~AMc(=n}-Hdy!TVR3jc=cq&14U>aPn-k=-kY;Ec4O+T zDfl~L015qg0IGp~=7~U$fV>g)h4Y!iN8?!Uxf4qy3%%vrOlNIT+2an9AkcC{)-Ngv zLwkNq0M%ufgh`+O9!igpp z(QCxUP&I2EiG_M_npn%D;P6~(i!1&f2C{DG9h>XEq0xq&38FD!KEHUyL`;bAO?}K7&fqGg zHpb#J)!jO3931Ny0@sebZ0pt?L?jGU7Ny~xVV1!{+nw0^J}`8u5Cj{1R%0?)=Us-mCLpS1e2xQ(KK{5QX@sim{>zgunSc5zcc;&eJ zTIZwW^^PhRuzBS8z%lWfz!g7VIm=ZGUI^^y#Ncp`?FRPbMUepxkvTS#qaey#Qw>HqiU+@DQGU!lk!D=D=uoQ3t!0f|3<@eJw)@I_ zaVIb}((kNDmZhm`q6%vFF;(u6C#0xgLjl)T(0S)wI3lfHMSn&nsJ9ET&W{{Jn@LY^eJwv z>G6yyS7IXN)VTM7yR`@?E9Vw|f;QEB(xKm3ztm3b&tuBWB^KL{cijA%Flm@J(G&HC z1peLl%Dx8auLUw>H3*1_J!e2CTkUMCCgU=A%1O68yXDG(^|cDr1Vcz4!tsQ8D~;rU z`Iy2CAYdIJh`(4t{DPIOp`+>;FK`bl&LiQ%+bDH_k~V-glhy5fO@Y=)5V zWB?M0E+QHAn(vDCAI~|~AtnfDg|^c0_2+m9he1{Db-BD4wdTaJx)DC{_yLuqW@}jq zNrXdnCY1a>u?{-V2S=e6d&Yr*;*g8aoQwN>CfFSCVcrtNx4@u7d-vlalsM*q4TYqC z;xu=0>C)5F!;9w}Cs*{tUcCL{+yWO~*Ni*!0_b!{M^|;iLBq&J6L?))#z%k!0j`{X zuj|j=ZtB8xW6cX%@-5$~ywZLQbGKZaH!usFh0kG) zaw$)@hEav+XRY3Y2I$k&JpJ5zFLrn>YfHI%$UlXt9a2Rqa%=oYlZ-V;R?4eaXxciUcphPv~J z)TTkNj|Iqr>?98lj9LcMjWrMNhK9U%a2-5CH+cH;DpultGA>A%8!{DT)WcU0ErjIT zs%r$F=Cf9P;dwC^p@!T+#Nz{@g$l0CbGh&t&abDH+3~C=Ack#APevF~89;=cD9-R< zI5ZpQ>zuR!;9Nv!g~iFn-UmcFKX|h#qKGxtp7(I#2Nuv?I^#93Kyp8AeB`9_+TDE^ zJ2I_&glYAa?}*Zz(edvI_SI}XILz)4q2L`kD?$!He;Bz&+R}G&g9F#D!^RLR;b^>l z+lX zmB@E~GVq1eA!GQ~Y=r=PPC@O4&JZ_l4GsJ7IIlRaxm4=^0E{k4k`xlCbxRmsjP8T!U=|?=HnP38ic~o8=Nn~~cyc{s`7|GAuGc6vF+uXb z8L(0vzf&FKq)9+x4RTZhMUmxn$j1!;Cr#pF8PpS^@#haM8k6H+ZU}IgW0~w(fE-{Q zh#wu|8(X|}`pC5>kNCo!P$c-@Tg0ME2#s~7=)h}9Q-o+&pU!Op8w0j@!GZ(;gzayP z*UPd8t?-cg#wRFb8g1a!#(&NHREB8%%vh`k(6$E;7YwWEUcx-r=4CK&NOFf^SSbz? zA$K0kKLjZ3s+!45uyuNNSh-{?fus?3u4|y8MClHK_j1(yh+czGCpo)e9eeKb1vw4#9JtJQR=Y@Q(dP;U&}9v@g&N}?B@QIp-sTN(Bu@N{GfP7+WmQeTU-7UL z#vGvaXwsfA9iRko>^P^bxdN0RPl7FRhY=y90CZO6v_LU&v&7`&!G8*?py6|;`u>NX zoEoVp9aFPoff-Gxspp7IGaJP8=7`3{UyL3rg`#m@8OAxYKqUz;3#){fbn4EBRr1Ub zC=e^{eocOGtG>+)`D|sviNO_nd0>R0lcs2#>E|a}k=i+|{NjyU%pR$B$X3)6DgoiF zfnO#Y&IT(uJQvlMP6Qu7Lap4HN_kOTa(L}4gM=IfsgETzB-Fm`ovdYp8 zoArz&BZTXB_v&M2K-rz!MCrr=K%(FR&_$gIhh$)$hjjBza8GzJ$%1i+J<4~Dbg4OK zD06aX9>_zE>XX4TsdhM2;N+B?Zx9YJCLad0lN5W!Ql?=*Ni0ek+YyWRB*E_^UY2g z;l#iY7@J6+%NyaM0-#%D8G=O7=p&Movkr$Mm?p;tzOX6EKoRhH!fFIwL?Eso8^N`m zAiasvhMJovktZ%uVqTbXuT^-%-V+>h>I&A!d%+Fse|qv;(q3;zxkZj`*#?; zY!b%$)m(bI;iNT26V4zQpo?_%uOB(BBRTp105c1nO^TuY+405-aT+3Me(n3fQbcq# zw>Q$%vC&_5>nvK~z$k|HXz_4LF~76 zITWZ}FD?siJxTy_Ki8b3eJgqctVrja-AwU-204icO~xKGcODEbkm7si44`tf4C4=j z>orn>UGq4&v|)Bb2MWk{DS@+X&jkS!qPF3HN&u)B3$F1vU8q#$(ytt0DwIG?dU~EQ z_Nq>*G<+C97T0cWPK#eQ6+*SyyZ$l=QUcysPH^C8h(`t3gu=mCyR>Ao!LM;H`Abzk=wJ;9ZwZrvfiZ&nL>kkkEp=Z104^tep5JVbj0-n|v3`r2M=DCGqsVuLfOzG6FHet7 zRZNhq`YrmylWZd~Rtm;pj#>4D8gGo%+=2je!dP^_+PSERH>TdP0V_jUuMPUbh)4~Pde-xTOzO}RcLB@i5zg#F z=!)7K=3wan16N7nbF5kl6kHSCoMXL!?@o-*yejhQLhQ;jEhEsX85F$|S7$z(L=}u;pp=Ph*bRQar^r zF!H3XEgGc2JfL7=nE2>F(-n}E8&+ws;yRfNwHl=ZrPn6$57O!!K=A7!o>28l3RwDianX3We-Dm$#1PO@?)do5 zV>SvOW&^}1iWaY!YUdr|FjMN=J!c7P$X<3a^(HvLFn+tk%N`U2PYxiMm5H1$q4SHZ zq!nu0u63`h2A2eLwAzyyjfo%)0DLYEBS5^0V{=+E;q_F2_P?BQW}Q#Z8|$1FUqr35 zTfO2HUFPnw&0#8iFhv~S^!v^IOhCt=O79bLN^{SBFWy4Rw+TL?2rwLu*_z%c zWWjic6suIW7q1R5HHE=c1zE>gum^w*{C-?mewq|^`d^%9Vn71uioegYB^-}# z^@4K%u&hP*GS!G{g77o8W7~ijrH@xxy3QbIyXAc2VgT%J4|8~$q+Yrr0v#Lund!@`gTc>e&833Z7-B=u}^Hs*6^Z2PwnK8Fvu zyVft{##g*o>;40&WyZfan>^s09Z^@8#y0_|!m8rDb%{%Tad{bi4Fd+5*7!P5qrDED;^?28spw6=zm{;bH*9K19h=4j4Jv>R4PBS4d|*3hUsvA zs2rD{JlF=CEeP#vk+u8hG(Kh$MlBE<_yfOq3$Nk{!-{Qf3`b5y%_`IeZJi3nYEcto z%e)TL+P^40^@@fr486MJj`3=d&;qk_K>6bp;zz4+)NZ=#9B>kWF40xn&let)7dic- z0BB(-@WrRjS98jNy>|X`ej}c}KuLd=Rhq-tD`Vf@Hb&_aQ3(?(2*6kvr)=XIBDI|M z*NxK#7EM#oU(4RJxb}3&@A1|c7K5=~9|Ni9Smr?x^w-KAV)A3y^;@Uj8n(ouI3Bft z18ZpqTkYo;x`AsF!a|$LpQ%lhSo&c5nR@P;vyz@4<&8gOvFP%6`N+{`kdHgBoOt0C zQt&RHiLZFPGNDLxaGc_E<`7!od%J_1;yMDZP?+9mP4~aZIW$qLfBkKoR^ynVx zn*jtdPeh7SHBg1IzAudOMKE7CSPbB5ROBlwPS7Cb=GK1jIqq~23;2IIYtr*{+Ihtx zMr#g(Qm{UG!Th6=Kv=iL;~$D7s24YMy!>XHV~OWRtEPy0#)U_uBGXMa_|2hN41m=b z8_L57@G1->g6w!OhLF->ZB`zna1v~*Oc1*$>jSaCLNTOXo!nGf%oPTMhB29XtZxmh z74vY{hYM|}gy$HnsH6jEoDFe;SgRt0UuB;-wHnly34`Dy;yWHa;CCrl*@L-d!xO9_ zLP)jLCMmC0aS8D`-f>fk1QTffFpz*;qE?Pc-YO$4(ETs=$my^ny1M6_24D3+u<$R& zU6kc@D5r+AKvjqo(gkDuV=MGTdw150hth=VF=>ek_GXz%gg4;D+rGE&8Um^;rRR-F z*x){99eqIDHUO?r5!$OaXO>}5Gebikh>lBpk_z#CVEtljee7-^fg%gr>v&Nm=?kRo zH&5Om&^}c1G`?BG5&$f;JcFz*T7?o({{W0{C|`qB;lKnbBd6QV=L{4Var0(C>e2)# zd`o~`7u|-QMdLPwVceIDXv!YL$tRuT#}>UqgW>&Rl?j=s;dvr^#B3lsg+O_p@dk51 zJDZnfcDVIcoKhnmi2UL?q!8~CI~{KY&_x?(H7C|D!^5CF>#LV1%g0Odz2H+zuPq5% zdvTC$s)rMFd44g)Tqs=u@7`%FZxRGm)ORuMB3IG2{lTt#lhFO2Rs zqyjzL*VabJ2t+6cgPXwJYa8GYaD61CaUMU;Uwgs@bvlj7`?b^t_zY)!Q0kWYxK^;> z7sH);Oui?9Q8mj$f`qa;f;!5ENF?b|AUkH{!aU&-X=aU>om_FbR{ca*?I^QPg65 z>O}d+*lF0DIIqqAac_eNce7Ilyre!bPslkU?VcB2?h8#{LS9L~I4EqON|N_m(Y`=pECktoSt%DWH?M0b%-kN zx&b0y^M*N?-j~)edtAeTuJRv0UA)dRI;SWEL&zDdYy{oY-2DC+c0*HeQcV`la92!j zo6-!|{Dy83*ttAhctW8RLpQ!IHqqsb(K)ofvx}j)=l-yoV~}q1CW({8tit&@t^WWs z7!fRJ=~t(jHO<4JpfzoR6!c`A+fqxTfRgV8Apy>Upu8_WvRxoiXRxRDYZ%X9(o?@< zzno3z&?r=0qJeD4`XDfk0HjxU#xQkgp0Eu99sFjhqyx~_^=|##dfpl6@ zKau#rh|m#s17zOJGveO>;3t)i%p~WG{T~?3_5!iF7@j~~a6WO!2T#oY?4gRGrY^kh z5tvuNM5I*Izl<=GsAyDB3@)Hwi=&)m{tU6wxti>}OnN^MH7s zctyhml_EUrV=^I3dLJ9b!B!!^Tba(z$_QQS&%8eSL12!w`PT8FiJn{G%SRz7r-{_g zSoJ23zWw1;Ivn^8%x!yscxQZK7P>Thpx(ES#l)!iG~NTf15ZR>f34$GjyqD-IH`by z5(qkvJ8EmZf*-Ec*5|y}P&vmc^LZAWV>O#zW(n%u^7PcJzt&4IA__l#(3-TdIx zD1u6O_{xcraP|-#Y0Zu#x`hU|_{7Q*V79Mwc{wa>^=~>QV0* z%Ft*70>3U10P7J>FFg5qSuUNSM0LGXXOhiY30D_wp z=Mm!b`c)Lyr4XJF&>5FcbJL4Iia#dJFNzv zyhVe@B7pu~n1>a4N5ZN909Z$TH($}eSqk0*AUVc7An3P;nZPk7HjhQ`!QPN#r@S>L zM$iI1K)Vt6cbgvQ=|2p55Pfr9!tZFi4-%CSStAQ zateSuo`Yd^>lk~9f;jAW>!Xa?+Lu0#pF6_5(rbMOk*+>*k^&GY=)4A5z(T9v7%rg{ zrn!C$zJt~Fz>U7R#G+&kqr&j}!&OMJOQYP@vaGBMJvJQ}CS7_ zr7G=SNa8n%1}A_xuv?yp)`y*EDKRwlv(d%EzRA4F5&N)mJo*rQrp`OoQGy`o@B3uO zT2H)RqZwCO3AOl152rOi$x3Wv1q_$Y5+6Bks3e@EG)?OnKu1bA{ypKsR^6YE<0Du= zg1Ljv@E{$ZFcYWYfP)C(FGz>y8Q}Dk^4|7hO&V!I?h|lK4hhhr3Ig@ zT9U+@y2GzzdmHBX%G+hL90JXCn?=)I5h9a+Sxob&MRc~_CtmT9RwOLuv`y&e9E$Y( zFZtf^ZpaM)Ji&0{=uj_Pwl}QiGGHQ-oZemo2u$^fA;OX4zOz+92ox;t*`o-xS*7t0 z9x~hVwGVNZFfbA%P2|G6pahX9o8tvg1WUXB09wQ{fX&S|zeZXi;MJ5am~(QspmjtS z=gtq%BE^a#&R+4RRfPe>>i2bV+#07=sum-Q`^Z~6d(yHv3%h9fc)#&-> z4B_MA9e~BJPO+0lQ6*!0RUeEsS=S3?csiTFL|M3&m*!#TsU{KE!8ve^*;5e~LD`u0 zAqIyj*9d4giV%=dsk57`9jYP?McD7GFVKSvqwL3~e_|0YEZ$daf+^Xq(qIyn5K4gV z#}7ChMCA#v=SCMzgeIFgql9TUa_WjdwsSR%Ho)KPVobHVyq#hyHKnw8 zo7bJ;31KoqI{TkjIYE{|q3U&laF=2k6q8^{Nbjz0RmHw zFqP-I0N1%Np`QaW=6lvp>zYp4mhxho$-qH+wtK>0r|l}Gy-f8#BA0o z5!{CR_mfurV#LE+c)|cEH-&coFq;OZ=5$;`f&~!nlZ2eRaakHwv!g}eOFv zdy4^%#WwoE(=;ad9j|#@6RND&HE~uM<*Gn7+&Ldatnxhi#7Zf{f(HzB^PYT?hn9AqTgBZ&KRK`#-+!gw_KKY4f#F1xG# zw+AH(yn?@+O;84pI0b%?LGpjGjP!9=gm7*I=K~qk2ZEX}q{UzAB3iNMcYZN*yiT!4 zgs09X<4_3YuemV__d$I)DE8A~_+owJY-Lwh!u!LrNLdN!T65zZ{#M-$*#J<{-ta;l zM2HWRT5BP;IN*2i9x`x?8y+dyih0(F=3MNob(-SOXd!KsxO_dA?VjdSeDvrG`! zRQr+ZG4R%5q&u&#< zibC(szgR8_iR2)3R1KVDEMs(nF*GPHv}T0jM#vCW`on|~0TzM?glRLOJFP*}uX6?fS6K+N%H{59k+!}J9DwDD zNy0h~-<*=tnI2pRj}>q@i3SUOMu_Eqb0|Ba)i4f=f)@HFMet`sauK&0@d3u zSOyr5R?v9D+iQtp4hEnTkYHLdni>EiXkO*P^|>NAL_70{sa2t|TK4|{?gG0nH9>r( GGymB`ySkzP diff --git a/tests/brevitas/king_charles.jpg b/tests/brevitas/king_charles.jpg index c1400a484e686c3efe045c56e4fe02f3e0f8d17b..e9db94acfc0d48b5e8df3a0af903e7c2c9169794 100755 GIT binary patch literal 61443 zcmeFabyOQ)-|iitIFv$*yHng_f)KWCkF{&@TT@vil*!%l{^XSilE-%qZY?ERhW$-~^kD&U2JjJym00RaK<;qd`@ zSfikk_5xZ007^;#W&i+y20#X20uUc-439I~2zUUb$2!8}1AxH&ujQ{Em-~eQ0nh%m z9Do+LO9nuFT!!^HuK)nNXeHZytf4(lPvwcg6M-iJPXwL_JQ4W6l7J4-!Q9c)l}gpx z(an)c+8pTS=t9NC$dgBghNj=H%uP=HwOT6rkez=dUmyHvoW`gYe&N zkN<9CpjG?>@~MsSf2EzCf_@_KMBs_Q6M-iJ|0@9$1f+kr6;K}Y6eN#1jDK<#09*_N z(*M-)5Sae0dCYDgBK%kW0+$34@xSU=5C7iihyS_$KN%7L!o%Fd0ze9YhJu2Mf{ccW zii(bohJp188w(Q?ix}@E&MR^fN(yokGBPSUc19{1R$4MLCV|(ioLs!Typ)VWVuIYF z>^!{O|7-*S9UUDD6N?BNn~0m5jGFte%R?ss7Y*PA&_Y6>1|Z@hAmJiBbOR_K_Y(Or zr}Ix*=idbZ5$PE+3Mv{p2Iixn{>5X&NJxm!kdTp|Jtl4t{2%`hc!rCNN6jUP@>0zd zmB#rM_viRrG}^b-Kk?PakLh?mx&)zP5D*d(lh89TGQDQz<>MC+6cUztCoLl@C$FHP zsim!>tEX>fZeeL<4YYA}bNBG{^7aY-5)v8~9ub+4_%$gxa)3fu7OZe6GKXE;_0RLxL z|B38>iwpNLF2rZgke;Fb6Bh!a=i`Kg`wW?y3k6S74b{~7B@Opyv{!HAbE|)%)AFbv z<9~D+#~`5NU86twC$xV>_Wvxfp#N8q{U@;hj%yx(g@o|<<{{w%BmkF)zHS%!5(RR- z$rrcG4C31-IVj1D4}fWT`%V(13-ueBr5wQ*uCd8w?vsbCKlM)Jm zm{^G}{FnaLhL`$ z3;xNS`3lR&^{gUWJ$7bbabTkCgNMhS`q(Qrhw>v@yag4q!Mi_mxYt_oJ6V}UL{{zc zx4?qh5n%XRf3#ccoagd>zTVSi2kC!w1ND1O1bXz`?)adQXgMwMaxvg!mZKmR!G7?^ z40YLXqg|K1mm{Z&bQ3(A1#ue*zKL3PmdmD}`d0ha%;SofQw6ujTn&CNycN!tyycxN zI6f$ts?dGA+v3u_*|?Lfzur#om7+S!XAlBz!kyR03&)Qo&1yQdjm7ur`;)|G|8n>Y zM6f5!cA04v$jg!ajm57Vq&9ZXci4^vFL+-01F9WfuxN#K%)|_xC0?D?+)o-JC{%d> zp#O-3+_~lL;pW}H#C}P(jQaqv%f4md4`Dt0}?iTf+tBDK0D+Lb*MY$@>~VPTJN=lx-+h8Di}9 z0H8gu0Ci>&cAQui;MZpeSrhTr2n>AobkT(-UfbdX0V*Z-~~a6A?<1Lmx6~rj28|>Rsl0)xY1geEjrN&U+!tDWXhwkoBL8< zei7?RV{Vdmt({_BpJFo}@se5?;1|v~gPc}o-YU;~%^U{SK&=lvYjUZuH=N{*2jiS1 zQY76jX{1F^V2*2PUNcH%`EVT6!h71_y@x%+y0 zLy&9&|BcHlDk8zr%Tj}1!nxz_aE`@z#M_$Bw{l*z)ue$gc1G}+bwyHp3``~F?V%fMA?LWa zNTIvj=%PB|O9sT43MJ@A_oE$!lJ9s?WKGeswd-OutGF3-ibSI`W=jnW*bX}4T|Hfi zRr%MoF`PJsDI}D?mWatdv+60>^)cUCCrz~o6%yCo{)y})L=gtYckP~^-m1@QNHj~f zHaoRg6xUUCjlNC*90C|~$mm(B2l!><kLV0b}@(Wp{uAyQsCcivVp8}x||N~h7&yz4zjO%UOH07wipU8kKf z=f?!!6pTv6XIYmZ4T*Z@SA?y{NGZ!B(K+ycaKp;&D8irgW1pzYG*Or;XWt+0UX^{# z4N7uuA$4A<@Q)Qa^&Cs^9mX3G+9a4+=l7t>$jEY?x=B=4EaWO^Ey3vJ4~|u~B`p3< zqECm1$PEd>8PmKrPH=P^wh?i922KVM_+a-qp{OVamNWgqHeAhxo30D!F*MfUhHS`g zF0fqv^8o79qKE5)wuIWuFm zUmZp#e5e}m1xgvpY@X4@fFlS+Z9Kbo{|aWO%|bI55((c$J3G5&`hlX-Lx~0{v(cWf z&sa9F8fd~plr$dh-+w#=&$U#7FA{*4!uy`|>ZUcEIiaFff}|&V9N7|YEH<5* zOyw>a>d|s;0S|WG2gM)c+6tz|{W$Mp`sm~we7^Hu#YIHUH0J6peyqxftk}(smYDH~ z;KnJT$Iyn0SFV6_T|9Ezl*ejBT)Q`_L?-^bOo7Od>|}eZRUeLA$~&KWc97Y3V2!ky z?JHX_U3?IPEO9rWj{Un;#;(Ax-M9RlNVIci3sprlYWVGFY>Uq7`YguprN4d&;$D!w zx@A_vF-%d3Ll^go*zmKq|doJ*oZKI47bjxQ4ow$Oyd17-s z4m7+hFKojteVdHskq|LG$WElR-_%>imeSrOVcd;!xt@|W7^(`!Nt$mJ|6;a zho9L8C+!A&FPN+>1TPY(XKAdmEBnHjh^Kb0zZ<_R=+r~9sGjrevHOM63v!~vs#tyi zB+A`TEO>jOi{QS0bd>;b{R$|MLCq3wW z`#dW^jg^o@D98xFmr(XATn`{;l)EuI<-p-Ywgr4J{;ZV^paOx7!`So}&Cb zQh0wKQv3qwgwJy2ZG!gTp~j^Sd90*FjzlJ%h_$;9E2fo0m!lw+Olt>X2d? z%8dfZLN|1f>VsUZehd{%9z&1gCg_AxSw^NO+rW)c8WXm5W<&byKJ z(uj(u^GRf@(Qx- zG;T=;9*ESn=J)VQYFFNfm$x~G+G=3;8s<` z)Pmh9K0$9E03rDFgl;iL8%@9!tbp+NcS6Ha`S5`9fI4m4*>F_t#~P(>MPwBJXzfW zXT?lzAJT1wj6SixH~4&$U#*`pP=yA=dW%=x*Y-9(h}(CAEV~@*#jm}#PdexsKPFHI z7m6MLjJ)bqqYu9yuMuC0qP+nu{^&@F{UBIZ0Vq1Lb^NEHr$SHO+i%&4kPuCB(5YM7@N) z?49f_+)Syw?Cl&}g}uaR{i1Uxd0!RAh#vgN1FfIL3^kFy!`(W z1r3L|D968d`LXt&03V~J`v0Ijy~tlde!}&H>#qp>mH5eBPq_Yyz+Z`<-1UU(uL%5= z_{m*Qxc-X3Ux}aG^@QuM2>g}!$z4yl{))g~iJ#o{gzK*e{FV60T~E0FiojoqpWO9? z>#qp>mH5eBPq_Yyz+Z`<-1UU(uL%5=_{m*Qxc-X3Ux}aG^@QuM2>g}!$z4yl{))g~ ziJ#o{gzK*e{FV60T~E0FiojoqpWO9?>#qp>mH5eBPq_Yyz+Z`<-1UU(uL%5=_{m*Q zxc+<8hK0l9Pz}$=F&Y0F=Z=Vg@Xzz#1qm7HpMvr@#u^zF<#F)))O z`*H4fkjU}0XQ#>+*i8iz?(Io@GkJJ4CEmVa*jdOlyD#7B1!!54p&V$5&&WIg>SQK~ z#dlN%D_rGD6Ta4h-~Sr_4t3TdRpgUIGFS-@^iIVG%au7*h>5*stO-+xMTW-6zL7{R zRq@2C^2T(ai}3m61Ubn;wEgB-(LOSM#aXo`g_YemX&N*$9hTyt^=eJe;B=JFDBFCG zJk4A1Y9UZ0kfeflx;fR{AQ;_n10l(5aqQWps;sXI>`nXud4~CvLL#$6tfl4XkOz6* zS@H6$IafZ*MiM2@%gdd>I?=gq=j6q0fZ6Bs9E@5MxdJV1v2Rykjv&}Z$6r0d~p1xz#WhnTdS|In8t6H$la@VoeadOcXAL>k5otlmmhzTlJNQ$)P-2}Y{$10|h0O|gexC{Z=NnU{y572M69G8wnW#K;4Tt2_n z1Hbkiz>s>Ut~KQmcPR9h=wMt|0-zS zWP=L8?u-+~JvOtrEq#rk_Q~aZc}?uR{ z-`UMOHtt%m_J=S!Uy{#90gI+!-FvOo>w0C{1bj;^*iXS_IYx?4>Iha}2hySL(yoi9 z=_kdJ3v{2&-%gkbYOe*7d|G@vPT(~!Z*YvpTs${=DzmImCCFHugMLm-{PGSVFL+2$ z32RZ|H1K&!q1`t=Tw?vGf?v*O96>hbJzw$vx zrV?v9+jJi341K5AbKgYGX9BNZIX@o492wgZt)yU0OftF%p3P{#1+V^M*4tsxevVYK zg!hfxaAfI184dwD$BQtHQ|CGiK}6I0t(>{vO<*9Vv68X&m5WfQq@# zl=FP*i}!|Q;^nu676*PDeFwHN7TJhYuO;dp+=6Ksa^r9MqFu?Gl^7|PY`IbNKN%yXDNky$eHPT>wuq%Bwnjc^wjPu@1=TrE3}*k4f#1cKQ9okd?)$ zu;1%dpx4P8bdF57$aYJ7pW7ABTVR!bPQ2`qED>@e<8qQ)LAG0#X1hN5?Q?7?Z@%Fh zUR#=qmA3o79E4`k76kVu#3o7KZYJnv`dqiXVl5&Ib?8e;vZu3188l^3p>mNcwnqCAtL0%0^^JiP=A7pEStG|9 zTy(b!O;sUYiHfxRzlma6PUe$aK3z=HNt?rdE)7JV<%8ubiMYWj;3J2drre6$rUi`R z4|AxoUrkJSSvHv~Xh{jPTMmJ){1-$}cQQ_alp0G>3hv(0y7*ug(HwEB`G!uTd@e2Oc zSWd5D_1FOs^k1@+Bf(o*cFAI=JIZ=JQ|o-G`7xlONO6;q!qmn5I(8cq>xNiC(*(g) zjmDUtX;PEfrb>FZNmu#Wc22&OIVBe!hy!bWsiL#nf~cQ$f_cJNJ6cV_-sGqS+PMQ^ z?1eOUbJ|9En?#*_7@*qz-VHk|hs7WpLE0Bcn>AONuab8(fUdj@j_d->eh#c19-JZx z)+iB#YN_~IA2EjeX+sgenKuG;xTH6WNIl6wz1fZ)Cz5oZpM5tAwYv|>4P^;z+Dy(= zVKo4#)i%*@*BcYaD$vW^^w zghP!F0P6!)k@M0IIQZQfbwlGd{ijz_;EB96VuGbCi)OGM3ScQ?A`|S1bCEeT+^Ka_ z(xERJ@7^q^Ee)y_c(3+>X1)@!XtAWts3nb}ry{^ILGEA-t5Fau;Om-QRZ+B3it*d{ z>q&WQ5(#ccj*ih@=>F?ZM4uuWYN?IG;O#$hlM$(Or_+7;x9#q(v(~RgA`;!Cmy|uW z?PXob5)Ar=5bMg&McV)5JoAVt(6+CJ5RoxM8D}EhdvcS1n^CqjL<6M!cO;(IsiJ!G z=h7!9SI1+wTIC3tsYPa+YQo9%2h^}I=v}occc{<#bjU4+Y--KFo^PI zC)SegNXR2zNRZG6l-v7FDo;v=22>o31-z^$AbOJ?(d%Uc;9MK(eX$2mcY9C~pr>jHoR|cgCTOtgNr+ zSwapZCbcL`n7qNX&ev|mYoQ`VS%0du@2R|4;A78Z7+RXJ^o8g&V8wlWs0##JXQ5v( ze+cfW)WwSQBH~mKD2GL!M;0luZX>u)<3OF3Xb2PHL{7I98Gg3uZY0X?DTr~Jd$u^f&YEaxm-lR9*0J$-2J5}4z4EU_5 zc*jzA5A6M8iLUo8;>`T;;CnZiliOKeZQBMVu2T&tj>OAh+qU+laZ1h4>0#tqDg|dh( zu5JY$Fx`h~qt9GM4HE9+W(kmMtx`RPv`KS3S-gH&TyW7PL+t@%zR$X$QpD*uEv!_7 zH95>KvO2@UuVeky`cge0K5x82o59jMWlM{86Qzu;rmy8&DG$GHECQd_IZ6zQnRa|5 z4$YxW(yHzkwYnjO#4YjQ;*N~HZ;ZN&!t8X0~ zg2?S3Ab)H~OWPVA0J{Y4OVL?LIWBGebW`8c=wtCDn)4hKDa(~-1k^s0^tGHOluk|a z$F5H&09n_8>E9yBnEj<$hn)eZgnlXS12j=jw?1R~n(-?Rl7@mx9gvQ96#Z>#<{@H_ z?{{Z>Y#;!cb~ncHrP!q0LMWD1b&;`lYnV-`BHA`qz>fG=1nMk-I3DE0=rAM<)Ha>w zme@_L)=_rkz0VnGiLa5{XJUqI1hlAEH9VxL}}PpUBz%B*TQX^^7H>*;kSy zX_*f;OS!FLU%3P_Ga+@P>&oZO$M+R=xGUU#xhUE`b;yUgk_E^unozzT7AN(xh-Ojf zLlsd`Dc5QW>r2gFxQO;&3M0NO>#S=SPtwv0Cfitbm(T}>oHbK=c&VjHT>IqbSA?r zL0T{G=I{Z)@^Z>&f;YKlpjS>WBbrb&&Y$@chsBMkv4%)Z?>7yiE7_S6x!pXMY@P<_ zWkm5oVapBHFdArZbSVH6+HRXV-5O|Bkdm|!qvv8I5n`i{WJ>osNt+DmG%JbJXlr&; z8oPxURD8*|eR@BZton1?KY?i00}bdA?Q^O=A21m}+SVAmxMc7P=KK>Xp&z&F$&(@5 z7OxxJ$#z*ne7jKae7uL!L4_~d5ju?V&XHCXd}jUix<x++c1k;Sko0Z<7hhvpt%gK+Dw+u0gA?y_%`)_AE!(9qg-a;N95{}Zd7<| zK~+HqGZ3{XRL_@U_I4a}8rQ9`W0>@+a){Z~nxT znd^;>8gY++K~`znL=0%3iPAD)r{Bj|NKt;gsq?r=ZO&ZrD7KDh$hCDB9pw(!Q;24f@6?!#)Ru=T24ma@l4=B!`P3Cj6F8KAQd_~Q@p*W5-WPWA3-EXCz7w0*c6{@*nk1-;qMsgC zL1d~(#`HZejZD8rZv4POMu%?v4f#pEMNRjBfISht&pj`V49#LCNdxyQQ)e|ugqiw; zgX9B%wf|mIp_JD^o9`u8Cu0u&H{6|BJTybO;a4@{NHVVRN80Shn1XY~33`AlsqMtK zV$IDJ-I3D7RL?uqnQ>nwP^W$sr6Q|XRSc*@AaAMfX_3=-Lxx$UTy+Pm`lT=yV@l->rg`qEg?2} z3s&+U-MiO`y02mLEJ5_oh*6-4s$OAwHIw_pGdX+Mt^SG`lsN@OWT0*(hfyVPn&RGG z_yIH~-B(Ua&3o4t)^U$P{s2IJ2|kvNZ9)y=+lv{?X0u{75Q}(rcFQs)v6>fHznqMyOfxitP~p!n^-M*2IKMpC%5^-h7%k1Ouq?vm5_66f0uXzc-TfV(ahr~<6yyOS{py!yjn zm*=S-*xNpJ7s|J!p>M0ZF~iQF_~SILbsiWZLCy5bU9#9kbQ-p)neFYWRP%FyfP63DtvEGD!J{P@qHB9=LfXon(;iaJru9-NP z5xh8jqe6b7-;3{1Qn<^)@k_O>J;tGie5eRPBE+xOYuY-hn0_B`n z_mc3A$5NqDQIMefdoiZ_k4L-3Z>P5r1rfH|&JXgy@^ zkX&J1<|a2q@cYQS=1kA~{nxvtpLHJq_yL^OT)Q&<;ujJE-iy0UhWpxVVAbz;=XYuQ$W}1%&!h8V_7_xwVf~AoB~h24 z%2#j7$$*r{=W+4DkL}fQh@-vZMa+DEX4e#A+CSbmIB;? zt4q;d1+w~n%i!9JKihk2huS)e%Re`o(X`N*_3B15QE`JALP3uqvn~@*2TxS4wZ5OI zJavtA%`SN7DUB6Pt@S?fCo$5VX2oMKWXNvW&^Z<3INzQ3cr2Pn;E1d4len4!o*hcO zR-RcLrvOGIsXrJw-(p^fj!aAXH{SGb-o)1?$x1^j%sCQF_(&XIbtIUxxP3S#zJu%% zYUKsKrjCk|`M(|+FBK=wj|M>>0DCiEVw=43QaF?2n+a&IG|UK zzF1xMn8mbjc@@AmxVR$iTVg$v_uk;?LFlJo4bu@iG2I{P>Gz6MXIABYR@W6t#Jri{ zNG?5~U-a(9#>%+D$B=!aHnQY;ugt$4?}3u6OBudaVsHDq8(4>az6RR@q!o+L$Glh=~rH(a_p8vlE%*lCEc>#iM&V8qmv6 z6{egJC^?(=h`#&I)eOZ1`M_q1S!rX^yVo1c3&DfgSAUqJ<`i8qL5&t=EB>N6bCy6@c{G)%q#3o#uB~S)`aRzhldswTLYtCf zPt624CKq4QPbMPcvM=rvx*LeIRGCQ7X7-HLWnBour+u+4RTDLos8^k~yog6M#EF{A zdlh$ux;v}g?4N5Ju?{}!5H(wz{rLbmR>y&}NOY}y@)^?iBV1V!PO5K_#jh`3jG=W> zKFn}7R(m|9J$@Dkb;=)qH`3_3M-385-{RIEbmcjWAtV}Lx_fSWa@5<-IW5ir&%2Q_ zOwG^YiW^c!_$c3FOXO-JzXIQ0XPb=`RVnt0Qcsi5Z})7pjK6$^oq)P$!tT{OVXvc^ z2D5$ujQOogaXluFTqoKT0twh|d446bNLgZuxDJ(x+_8XW&OVoww?(@k$4>k0;Cy^D zdRh_5wdwO^qe1qi6l!KELxSXH!|nyA@laQ(e?$clsrfDt|6wo7R5v00^&B!Kxd-3u1A<@~hWq_1Mv+ zAUtuujI4JDcZ7;WVTQ7B(jw!Sr9r!=G}lIkizeOazJuK01~;}qaWWMK*s_dXs$2?O z{``7&SLgO+>3T?IY<(N5{=^|t?@-mi=jtq3zAO1-=*7DUKRq)M|L@iZn~vO^-Y^wE zN+}bcz@OZ1r5!=-qGN79;;pJ1GgS%pGCtq2Y%hDh&A&^qDrU`U4y$VjJDUA+?%p&3 zwY+l5BbymY30FP6v>g%UD<_xeBB=2{7|PjRz^_>5)1S<=W@Ze1NKzVmaRbgCXI z5#Aj5=}aV3M?KZF9;^=Eg-`@9cW?*VJHlDA!Y&4jG>xU~u|l-@1Bim>p5IR|1^!f( zSj|Z@`6UrX^8IQ|$-7Y+yQUAFZNX_DRb!v!SZckn=gw@Mzs5qT^3GF=wh%#y#T6<$|+lNs(r4!6?uv6wm*b`MeZ<7`i9Y44Q zT5iA*6XZJqNBP-0=XYDiUh57fyz_+(9vHgb>Q1qLXK7q~IXmR|ZPw13u8xq$>JYpi z_5eUdfot!o?01Bl%xl{$ytR2jS;quX?K*Nb$)T)0I@midy+{}z%ECI~i_K-et;D(t zG8ZqeIeL_IBEx7M%1(`j14NY*_9LFS&sdY)i}WeC<=;h~Uw%%Rst88iS{r zEDb@T)i8Dc1HfD(?ZiS$Yr&yu6Lr}}<)u)n^Bh&n@arg7-rML(XgG1$raIL!kH;BqOV!CvFD|cddfrJx zaN-@RZNDV&{71I9mF^5@`bj_PXSMPTPn>p1Sm7JOV0B@>Aw`;RQw?u>+X6{)@el1A zdhvu(Y%*GKc9g4Pmq}w_vfBq_G@v%ML1*$jqI_l;Ws1#nW;u?K>~E#dXNWeU(^6>S zapv<3Vv2@ZA}lp}f9^<&j0VX7lPS3tCJXQdm~IQL?fHvz^Otsc6dQ*6Zy>26NQs(t zcQU)Xzb)m)^Js-iwXeS#JLoDua4bwPUV;^?oMuZ z95It7*E$cecR!yiTSymCjKhB)pZv#pBFN1$Kl_EB$}7hG>&&0qlmU$@(H-^1ZO71G z6aFn)ZZT6yGOiJ5ds^Zgw{PmaNsX{A>n#jQ zM^+XA2!@3*?bL=gSvb{`DoYI>u33w-**3;Zr&M@y0z~~7Z884X%=|GResvKQ6t5cO zyP*lLmNcjE_D|46Bm43pABk=vw=2*9o!k#cvro1I_xL;6^ZHXhNnHn_%c`RgLs#RD z6Uwe-nF4qJm=x9yds;C`(h$4vi9B1Pdr;Icoi`_T&ck%RR;dL^Ni|+Ow|k|>CXvgN z@|M+&F;o#U(AV=NVgdD0C$R%!fl~Fe-%aT(En+_i=YEj8*Xmh@huSNU`ioD$J*_f} z>At?6NzZi?BZZg{@h-hNte;BTvYrBz6THZRueY^a5FC$yPOb;lblJ>|)Q7$_hnu6* zijkh8o>pcYbXff)yAG+yEjQ3#(Km?r9;24(pu4)Su@{`>&oVEuno1#>zDn40bKnB& zsT39e>2{!KYj2$j>+7Uw&8?IW9xPEo6_WKO_m<&K>ycKp&35$Tf|I#rbC1l-*q3;G zA%jxhH(&c+n8NXW9ic{ufecDBr)BwpZlSx@ci9?U0b)%lMs-u_Gcy-_URtwX$NONc~FfQllv)@+B_nV$@E1`{lAiLdg-1K=0S z8j)$^hnUaQqlNh0@Vxa6AC=GNY-;&;)2-}%a#!+5d_(t>*<`#of+Wl2ql_t=LE`nN zDhDw)s$IuXYf2SSqK;c8zx|6B!~~E&Uwv#(<|C|Y@o^WInp@JnoxdyPa1d2F5$HCi zJK;NM`MsjpE+R?IJ%T{$$??HCi_4gCT_Vhw2A+zv9GJV~qhEOl1qGoxuvDHjB)Xq? z_IO_B51sfIr6jU_4Wb@HI0NV?x#LgAMmdsk{%VNt+GeTVd_%4R*FoWlU~bN+qihD* zB6Xc5G3@S+g6^69UfMtJ)dnYQeJp#Q(D-Y+otwv))D>s0f>^~YI@sj#*5>d^*SmW0 zY>$glj9PAFMpvZ8la+~frxU}Iet%k8hec1P7TYtdk~W39N`bK}(O=O(E$8sLdPcS` zL}A=LgXZUT_6IPbt9Cng$`I#0x0gSTj!Qj+BN|d%U`waVZ5&b~vOf%M68U4x9jWH$ zu83CCZqo13C2st8J8B8O&-et(2d>>*B{}3ertja0yKlIfWRa*u227Gla~ElhU1}JQ z->`X=kjq@EPg*T)C%t_0W-WZ*HtZ##rJMSUwnTx%myTmKP{=*&u~GR{77Z*-6aXT1 zFh)45xPV-&!+}NLv*GeIp?EHG8lyWVksUQ1;kna(_`-&7mp>|KX64aF;|#R%E4>Az zi}tEWMqXWiEAG|S79(2I&}q6}U_1ypE@Ln~v50cGfiSuTe7%yel?Zh;VD4Fu5mR(W z4(|zgJBnwB40GVG=c8)JOS*a6fGWfQDp%=K!L$tM#F>{}t0*Ax&f=WVg zclYKpfh)N=d;<(2RQ9phrk4h|5-i^VVa7b-2T_flD5ZM|=pWY4-`{;viFp9vRE!6% z*No|yLhtA>fbh1}hUO((s6CQ+xkqr?t!LA;qV(yZdLa__%=gI0d`=U>kMllo$q#@p>0OhCfqq7XCL`s#UJn4-rYP$ZiZJFr7s!D%&S_=OX~m!o ze!}4vzgU2oPPwRxmyXNs?BJQ1%3i{5q^>VzE$&`3%i&VRj=1hBou~8m)woHl6C9!m zHhjGGG{)fQb=N(w=AH8ePHDxq``S&q5s%vau+d0~f`i#ib9$J865G6pmum5RDsjKq z?owC?Zy+OqTjVZd$XZ}BzNEHH-h#Twjd?R7n0gM5WWlKla#Cv(e)GBmam(e}?MDcom93}5O2 zP$n7j?4oicaKRO7nZ7O&K1<@V^3JWCnmmr~n-LsI9KL;8(aZ}KC=vQGZ;(ZQ^WlE- zIxNDy#Xw14jjz?4hE$^(Gr;`beSHF+-32D(zxPOC9)Nmx-!}MImU$h$4>M?LoGL$c zegKFr9N8NS2k`ax*cN(8b*<_`Yf0_C z=@HWr*r5>EX)mM1@k`&%wTmpdHnT*JXY(|Kwn_}!gR;RRw|I9Rxi_p3)B9Fp?Jn_M zbrCmfY0!SRizI#X^$cFoR02zq{`LU>Jx8`;Yvbe(Z1+MQOS#lGoM%3`#;L0kNY>)4 zaMMzoH23_S;+={7qLG`RUL*n{h#@o$rg*DwoIj8gRd$G)h3&)9IZ9ycWun^7J+$1~* z(xsmqU!;iL8spJ$!N0|%u*w`%1mKu08et)`6n6m8Wrcn-BoiwX)h;Xv_+^62;YWSE zH9xRBZyQm!VC%ZspI{Q8>pBIcEDs2VGp?`1@cEOx&JCe8VWkq+yYf0Xt5~@pzKc2A z10m`y?NX2ip(_iuePT^_g{YZ0d-<+8hVHu+)Q2fl=}vp#!+QMeSqbaYohOUWv`$LC zswWq2Qv@Fm_E6%9I)31jiJ$O|v@xNB@u-7vkdDc(UsShSx!OGyIZ zqOXpUU14&%fzDaSIS+11U2IG>c``qBPX9wyKUF|$0l4jOC-IzYGqb`|I@{odIx zcT)Wo{s~zvE47mNE{9j*g%=WpB*Rm6Mc+_&pl`oVTjD(itWBhAa*wONtNl=UA3VI5 z24P?GQBTV>(oUlY6$sB?Uz6v(Nntp-d+f)#|7NeC!{O^%B_RFsVMeBkA@Wde^8-Z8 zIeI#(oSe;&gVt-HiVu0vzhsJhA=OLl#KiSH@Z(oJadj;t*N4<={YXnRa>%FdGOwX|NM>1`9Q`FsczPEcWs+{tM)DlGS0 zL2lLZ&hu&(DyP#u6h`#nJY)%;)-r7uT&S1tBriGoQkkwiLz^JPU&Hyqe^2zCBZ`p_ z?_oemz(6~rfn&&whJVUXW3rWR5aAIQ->y{;zvrch{-muhhr;OtV2;+{eg=+|w;LBz zQNvf4suscNiRTZ499Q~-y402p>$|m2+?V3=(ymCx>*~cy+{xp&Lt*nm0%#jr452dz z>!}TO!;xou^heU*EL-0GKb4 z9VGiA;DeH1tF%#au(X(VAx0|$u01uqdBwKnlHV<^Kio9NYNnTL&py?Mq8bk2EBj!@ zJRzEPZ)(MKN=)p0w`=Yhe`V#EoO61)S8zg+>ByTv3#(`mRF0I{SK1dmNS1AB`~W&GjhyO}RM{CP)<@ zT&u`WtmDL_%l5!mabg=Jd%QX1w6dmqWzY`*=k!6K#>tq;vqHB0%R?T)Ck*y@f@|U7 zQ&O`MZkFJ)a$!g>VS`Ukcl4vB-V|?tzxxB-q`CAlHCHak z*U!5|GcqDXf)9QRdnK_)mN>c@c}`V7wkdM-#R=}`m8V+Unv`W5)5#!Y$ryrPenKlh z>EPZXqSxAJRwqgNLXcSKzJBvsAjG06!j zaD_qR{INiUJ30Giq6%xf@3Sid>@!NJiEiLeF^Sa_{q2LWB18FN z{auoPcxuUQ2(fn3a|32Mi0Rer@-oI_lI)q6PUrPjwnhGqz%tb|dx7dG4-ck!U;l?T zmZ5G-%^h)_Qjsb+`jp|Q^p@Z62f(wUyG&8p8|7R3aW@fPE;B5E5|am>AXV@w$6nff zu!I>0oG~oq@k?Tu27{|z+S7atr}W8QIxiAut%wl|?m`|UjDia!+TLH|4T@Qd*VZrvttE$=r-X=)fJ0?jc$670JX3#B5W%O|VAK|dn z`&hcK3!ER(|KQ|ShPzS1nJT;cH=3uzBD6;bEUm}0w~SZgwPewS#I3&#)PP^#ad`EL zNp%Y}`;Vj)F)wW6F?X2EfGC0B%Pcb3ukuL<O63@^d39!Qj zb&x_lk29H`I;yI#WrxxOU^VkGX{jSdc`*!ko8__b@y}}dxJ9}kX zX5npga@c^$v0mVuipq)AuT0-=ANS;7^6RQZlritoU4-F4@q5m(idNcn%we$!i=Rxt z)mxt1UN_v;Qi0*1V^BALO+_|+%yeCJS(#rSobH9|W$6_VKF?m~7?Cy{``v*!rzuY6 z^mrsUmfLrx$zD-Nn#7DIO+%Kk5U3E(o05V-8{!gKp1D0yW%b)${Lfs&VaJ*_$2m&2 zOH4`XDnewXl_~v#{{+76?6&>Q&}p3kIb+Z{2rlK;awj#2>CKJC*1y}dQFYs)Uu{vo zsFEko*V9$q=<(6@G8_qTZ`pLSk#L|bT8mG~9J$Zuh*AMM)`zI^u!2)kuzuh>m`WeY zw%aPPISGm+IX2YFwdmf?T~A$`3_6La(C%rAYS`DRgCyAh(3*lSrB&qzxND9FHS)&8 z?_wW4%>L)p@7-OZJB#3l*qyp1rMU)g5L3sRiS&+w+tqNX%pjd3nCP-{*iHf)vOBF8c5mr~@Rbg^Tye4}9sOaBG!wXlNSdU0mYYh$h2;Rqf^TFT)e)XBx zStA5GL9o|eALo<;`7*l|Hy5ejboq65PXtM=e@jWm)b;xE>w{d$aSgaiDiCO z;`Pfwyw4Vn+uj}xH(zSWKGQ{IK#{Tnk~$ZF5)O@{^TZ#Q29pHwcL_abvX8&$&tvMA zm9JfGPBBm^If?N<4@k;X5BOZDT4w> z|BeifeSD9D1O)4(s6oij*RbR_ll;X>PZ;m9?T)7-y~26~;&ycs*vjjq3A`Fs6Py=k z58oSe7%VVNAF*j$_b$cDH)Zb%R-(kvlT@hW{?RNKP|`3UCk*$K#Ewn9h#904LKw)J zh6Ucf3Rn*$y(Ma`ubxVxc!L#S*kXuv8pQJZ)@ogkj{{>@JG|LHtuNpEh}wkpI!mJ5 zkMu_4R=#13_qUrGtu5E0;3=Po-Qa~4>LC*`-SxGOEYH`gSnD48T)69FPXJ}QUL|ye zh{Lq5`RjZ1mm|C8%S%r3sBtWtN)oEmd~Eu*rA?L3B%{+a;tJdRA&+Yaz}<`BQqZ_r z`;X02to629OIYy-aW4cv7Veqg!B2Yx^t;MjWX8O`W4>m|_wTYy7P7H_7$yyjNm~i? zqxUp5D>C{$ztm?4A+?vKSZ@Z+okDw?jt4J7kK3z%*j�)vQR9&nCTA5fX?FB}c2| z64sq51lJ4T8dQuGEZ){1JpgJ5_no1Ylr}yA_22*47Xe@FVp1+3{G`^k_)4*SwyQ91 zjeCa<=jbIg=7f{765>K!#JhX-Ft87~7Tx)7k01LqGQ22t@GfVXorA(!(P-%4Jlz$D z@6OVXrf%HN;@Cj?_&h5KyMA}4T9&o8m)6ZG*!d^1==k<^ zW)S-2=lk)T=#vbV?Xa@q2G@eHe+b%3=EUr7QNf&%ys&U*D7$h7M>71p0>%D z=Wg>Num^D1WNT_7=zf`&mu5SILy#sS_67GHNNE>Rd}+fR=l;~`=i|b;mW)I^N8`o%7u|>WvmmSeku5etHFsg zg+Z+<;?FJ6RCU2xurd{Taj`AcLk zG$u;49!~j%D><**w||a2H=>w`UqGy{Kl4v=*y?&$Q+0aPprx=oI~0apb3y(Mwamsf z8^87G7nuu*!+q0;Z^Y_Y(2}0cp*YrP1n%l6GdmqBrnD2*YSNVbDVNt?-*2^Bz z&YG|+(ZVOU>&|TNl$p--q7QA?`5MU=X*OKtvvaDa54-8^zr%iuo5IxjN%i^!i;W5i zPQ~pu`_#XO^g#o0)kmH6o*%)o3#xlSXHWQ84Ygq?bX}s=+(n@W6Lt zvqtHI%vo;YrcWJ>o;Yj#=wkJgS9MZe=8uzENwL>pPfO|o@xmLXT|Ai|8q#GrpL9E~ zt8F%J2bH2u&A!_#+TRyUI6fu{m+BX-O9ljW9!`TF6rxLKn8@gW#yE5%Ff%UXmA$ zmwNqV`c3gHTfl_&jCYpIV+dy@0-y(({ai`GT)4C{GGwVCmysx389Z=r z$wFuo42k&j8yg9b|KfHL}8+vUFm!?@H>L4-|@#ZjBW-$R!b5z;}c^ge9 z)$Q#CvB^T)FD8Z7No8N_6u$h4?EhoZp(iO)8z<_`RAj<75GLbu!n$xWXF>>hjSnt$ zWghD?{CM175foz7fB~oqT;|)|;G|g7ow^_E_hm;1agCZ^ zQ-Yg%S)I*-mZNKyV_p)~R(2l9i42Sl7glZlWKPzbOzDCpOotGpYE~vdcM-0nGZ_K3 zvMG~aU2j)>y`BAzIDJNj**XhrgGfuj0?f_@IFdBtN1tjOmRu^&wA~G{cfR8qrXW%? zkvtNv`e6O_pa-f0Io29V*t&d8)UI-VbhfK}Px%Z5!|3(pXUfUkz zol{a{SOWqFzr3Dg6hww`=HRWawor->s1^e+6L6K;P%vqF-We>OVP+;zr8DmAG)jNr?U*ncs#qNg7?8kvfedl)5kZvDb$=d&em~hjHXRZm0SQ+$YUUp!gL^Uo6Gv z7HW5v?lan>?FSpcL*h@>yQrlT{VcBoGpjY3Kv+bTqXp+KDE^c&WRD+PDtw#`+GDZ{ z^ouPC5aYh_p5nbW|Ef*`7^e*hiR=%la1aM%ucxic52(>@+P$+*!{))IuO}6xaZ+DZ zva`ocXHs}s+Ra^oHHSeKKQJmdwd?yg$_c_s;S1MNNxefRCpXsi>6qfz^+FhrOOF(p z<7KzwDV8dCg^LN5J!qeq@0c---*b2A*2MTIdxBexw(WPra0faB3yKJ+)bbVD!|d*c znOV`f5T^%yQcx71s+>{c^$?hbNP(SYa#q&Np(m>Nd)3pPgHssrhis>i?aJFuVrGO) z&8A7`32?tX-`1I%+F^UA9ev2D@6F)v?<&3B1D_rQbLw~rTK)gkh;u~XMTZiZE^?e96z|Tn(|nx%AEFiv);OD_JSPTW1;9VwOG$x zxumi3Tob+k4*vPO8m|U)x1__@tn9~OgQ$#C`M-2`aNc(REldITtCZB=h4k%)$V)BI zfytmB+77XSDdXJs4+H{L-<`3hSX<(*dcvBVXs_RlJMHo$z5$w%_jimqolr$D2#4wr z+Ez-7(bf)zC+)S-4+q{Q=e)+U#rXvtbeD%W8y~O@M}gg*TNTJx#q%%ZFLKyFYvl+s z>JsfQei5I{IVW-GpCkvL-`k`>c$y+Z5tKK5;#cM;>-K@~W;UW(udz@u1<;=}KF3YuCR z8w~YSshlc(i_FdL*c#e?Tf(Aqk__)=*M(x4z~q~0qP;M_?YHOlb$3TVFk^xmR&GzO zx5>FTS8UcPgYCk$wx{mk@H!}0Kh}uXy-3r283nLH zT_b|=<>yAZ{Vd&&{*wM#>~vkjH4Zj%+s|ju)n|5KU~2BClqqtE$!pD?ZH0whm+RcZ zBzHp@%}svJSeXy*O&v3t|HY&;pu-gAoBOQg!7m3|-JM{w{)Z8$hAAm$`6!x=rrHNV zE|o_#?%7xZq$~@LkSWX%a5C~WWO}bcM|~ruf0K`i#nuI0#U;#u%% ziPuDNcYd>M{b@|=#+I_*{Y8y&A!-BYiBj>a2JRM})8y3I<@&Ge=ptKhvmO&V79+@0 zVmZmfXSYZY03;yUqEmAoV7vfh-h!`w7_Qlz14oo7{nLoFnA7Se0tIehte0L-t@%gk zE&Q1dKuGHEk4*#!09q;wL3rox30^8&I++^HjH{1%+PYMSu#S1W8X?1=Oc&~%1Q}JU z7Tvm>Rv(h@oRoSWJe4eLb4h!I(M!LXybY1EtR1+@k)dzNYPL??A9jk%q6IzLD{gB-185pTPIy1I?NGUy0fP(vtJhC75aMhm@RvT~@$(>@fKL3aIU9abe!by3- z&Whku^Qw^{uhBBOSeh9R_jB}h>9Rw}vLk6#Da^Hw-@eP{r`z$8Y8ivD>h~?!lA1}) z^jtR=BHsIEb+j!HGTqga8N3|1|^O!V3=KtdhbL70inaO1QYb(p$LaKv-vyD*ja~AS|yTFQO z&T8+>dhl(4{qJ9;{3>bCDczPr^9}W(^fY zKQZ9Bmk@w-y(1M~9KIL|5j&{o@BK@m(>Kw-TzWiPl*1H7=Qc7Rq5>@}@(GO5-?VUD zwy2NvX0l7Rolr~(4eKNFKfs>tmMQQE>G`u;-SX+MP&6=4x+8w3O~u+--TILDzLg}= zIzvqlC$%S4fe8Vp+4{YjSPRZUgebC>G)RzSVrdhY=q-r-9{&ret-l1CIXeyb>y<#I z;R+o!n<3b!I12b+A#;NRHY0DG$zrb{QQ&DkR~n)muGtYk^N+0i>T6n{>P@wVU-ELF zsIXuYq7mV?J^p@Rd5}hNg9-fMzAsx=%6;*jPh{oS_Xs3u?XF8AWtI@@UHx6$9n1HY zQx`_>+^dtS03!I9Mw~*-65A6f1OC>R{?rFCwDFH0Jm!3f%#mGh-MY&BN9H(t@Yqk+ z*_+!gQ-`m)1GuK>I+4+iUu=)`mTTT=E;39~brhpxYQ~5!E#6jJBJ2yRjB`&*`Sbr> za?jYfn5cAoD*HB&&3KWhSc?aI7krT{RP^!ZRi680PcNDLg;C!k?p^K-pp}|P>-<#B zineiuhgyU({L7=F&nyHaR*P_X>ADEz!~rH!4Sj~nsySYjWEWzSkkSeKfe7;GBBibvMF0k8G8ofcm2Ir$5uZdO4ckTQ2Xh>9XoM- zVm61gU%Ao5kp9^}Io>@t_$2!mQ8FI6XGB*o78J|-SnHu#O5rt)iz@#yNutlhJynE^ zF#5&3HrXZe;1;fhA8r~x;qFaLl6<1B-s?7s?-=&496{+s`y!X;H^S}Ad~1@hdpA{M z8A5H`o8kNm1#;a-s}+N7kD)x1K=A^heEfN;kkY*y*PO$mCJJohTo_^6Ny&1p!!Oqe zSUL2~L?u*?GAc*4t|r-ob7?g(J}AI|MPcL8NhQ(5*@a!Hr{LU|`W;-u@S;=aq0^C> z`L82(=h$Z_XEY0bLfgIQcG$pG^}V~ve)N9oI~_ad-x4VGYp$KQ%G{eL51o@}G4BwB54gjW8Qu ztw|E8mOT&h<3U&W3H4NRC+p<8_DOS8t+|h8yqIJiT>{fg6SikG%^E;|4=7TnuW6Yv zpNW1rEoA2i>zr|6^hXy^$c}HL=*=w3Mk`Bf?=l9`u{7XXzn++@Jp7%pb zm*>u3}3~46Zdj-Xzki^A+fYd+icGq#WjjYB0n^I3%7M{WC~6> zzkK)(ZaJd$e6U7^q3^*V`b-Bo5j0Ss3-$H&iX}745bTH%rYI?Z@|zs2Dc+ z%QkNm+;4psDH-1`0uPw#A2Qx83XkD8*mcD4#`eQB4L}faFsa(emW%Rppjn)NkdZu1r zbjyiWqeQJ=>e~t77gr9_Yk-$b;~Ec;~qofoTlgM`4P z698d_by6pEJxMzmt#L29r6{@YxP5ZJFv{*K*5rZ-cBGqURp)GMSuTdS9|iFq1l4VrCcS8$v}-e>N2OL+EEMvw=PSA03E;{MS5-4SiT$ML{9>Srb>~tT=$?weHcedW7^Kcfz_SjXx zOAg=&hv%Z|q9}&=McZjcOB1aXe<#0*&_wSo4k);$@GAGY1P1XJ92;LMv|+>;$A_L>ZzFmHvJy zNu2tu((5r|la|rq-dj;F@l%z;{3*DF?Z%rNCIU;IU&!0U)I;R!=sRIZ0bo_aSR0^i zDUCE;)EUx0{f(dOZIg2zg|Z76(aEK8>o#PO3|h@#STNV`2m|j#XXQ7Wz!HO1GSP&w z26qFYF*OxRCfk2x( zFjB$r^Nhv3&=fb8%#L5|e6eefgpZwBaxlhmU3<=gvz~A49uENk}$ht<$-eP5*V8`EvHMImJ=hl)nzSRkF zIqBGLdX&5(cOb3EQZawe^}Bo^1!}Y? zs}XEmeA;I6_`*m_#!IyHSvm?l;=Y`|z#S1(E;Z=08lY?W8O!!irDBtMezsfyjSI6u zKEV>y6r*dnO3i}u zIs<34ox#WB=ZWA6BjPhPr9XJK-3Tlb8&yA0HoT}9#jJEk#^c>u2x|(lh6qkNYJUsJ zO#{FNrBrOAJ{$6IlV*jZ%vB2;{BZ5MVD?74bKg47Ga8hWK0KjZS#mKqWFWr@%r+%> zvY+6|x@M;$#(PqMnyg1dbsoC+!q!eYhP>6Zx=-ZKPDQUOTU0IuwoSwT6isjK!L~)N zG8Wbyx-xBswQpdvyep>`30!Ku|HvK(kLFT_)FV*qZK;}D3X>jPvH!?CME~9xYrBQ8 zV8wO|+UHNz`W%`|E!aJX^_Br#l0bo57SWh4(7j{O9fED9o|e4PrP8{XPvNdqtB!I% za;yf1W2o8rN0z{7`<>Q^u3B}V`i{EM#092@lu&?plJ-E{z4mc_n_{pW!46}B+Yt~A zhdA^(s-Fk=?4296hcE!;+l;M`{CPW*MELv5*#h;eQ_(=A85lt@8m6cMqZ^HhGOq5n zTnnNuR@6oAcVIV(ia;Zc= z>sIbM`R@&wjo<#^hDK9;!%^nPXma6?LHtuto#no-=(Kx`DIY#SshSfPS}J=3B>KxW z>}6|u$AWhgN1Z6OK(`@i+3@eS$zIW7CR~b$Pq06nE0pEI`=is(^}i&R=o3yW{>Z5( zsN)p!tB_6V1?;ajty92y|f-mWp%|GecMZJj=5Cc z(@^LX%(>`Dd21I_UI zgi%INt$$$n&G_O*3Y?80W4Fh^tPn#u8oa`KLeo-h2Igx$NzZ+$R;1=H*!yQCvd@0n z#?hK_l_}AHMYp;si`~`(-9OJ=UPyj{j22u? z_WD#G4B4v;15>eeHrc|CcrR5tpG2xD*30A@&~C_A^0SfXj*g%Z+r7Zkh^urW2<{g= zjMYN}p9=IY3FJym)4P12fI2f9tyXp>Orw+umn+moTRs^J$?hea7^9g`9frFv!YQDS z#J>zsC?KwQxQc+(wuIq#~eS?mKMJ(Dqa5_CkpcCiuT$Y z-4c_eczy=2P~0ln+|*zt zn&!F&R=rkINvUC^+qgF5$__$7MUEBw0+7}w-Zn?Jl4;A*X>t8l5yGbM#fUG zyr13K3SFURz`N8FhQ{ISi*-jjnJ%LJZ~d>tEvw2q z07)%zPYa{f$sUna%Hb8FSaY##vTp>8=qW>E_YX|;b0v%C>i!N2jyq5fzLqh{5|ib=W7}h4yQJ&)$wS zh(R`cRfGC8p`dt(2U2Z95NhTAFnZDUld~#kek>6I>9u6ckZ1M7#^t4B+&+3Am>Y0f z+j6-Dzldj}^2_1P>Pt0J*Cg{iy8n@tx547SmcU@tC`q)^LVw)%22*l z!ZNPc-@91^EMb0Z9S#M#u`;FF8g^AK^wEs@fx!(`%{Cg0z#P>8T{=7S!r~|(KlT)I zyS|0hM9_}$}I6qh=Kz=!NH2Z#CrE2-V_v>`8s3g zP2{}lG?=$=mhp?|uwUhd(e(qBOIg2>CEp&o1Zv2I*vwYz^Y2VDu;ES3m#g;ZwvrIK zIptq|dYZQ_8lO|}lny~jCGWN#=@SWdSgbo}&qR@?8JW6yxwSJ*pJod?Zds4Ry!Dgx z#J|ETLL`G9VmR@4&{Id^+&#L(Xk-A|nHR`&D@p3R5Es4fmTB(25WA2kDtJvj+^*y< z#?&(Z6qQ^*I5X8xEU&yOB-zKB7egN_{sJ#_xeZ>ja_QG`_^+P9i^l`#&si-B=3FNj z9gE`N4A6!yX6hsvwv>=pxj01-stJGa+k%>2@lCnhji{Zty+2Vj}8Un=<8<`qj zu2*Ds&RpW{-VsZ|l4@vcFyiWdNXHYPoPE(X>OwoMBqlS`IuXsw6#j$!OZO?mtzeos zf0KJILH>Afk72S|Q{OwYP7(jB8T<}dg!m4}p(J_q*vpiQ(F0;VT;UA<8cb(GWfXv=MI2!8jZgbMxo>UBR>ScQ!a64dj zkKoEOy`p9c^mxY2q@eG_QPc=9p)-xwqfL4E0Ac0&bJ20H(Uttm)HVf8N>RVWf|py4 z#JUE#sAFA?pI>wJD!;nehyrTGq%t4S zezS)|oGy@i=qD_#X20ej!YX5T5?3yZ?d6PbW7}SDofy@R9rx2(EO_9&|xc zt~?0Yqv!PVOT5#=33KuzS30b{Opv&F==DKv{5bB#!GW~_vM-%;p&5MFj}~jx7*B6> zza*J3>FJ=Hl30VYUhY|)`A#=Pbs^t2+-gSmE$r3&^+HK&$9dluaB77P?8*jV zO)k8NK5bM&y8{KUVU4Q(oSca|IF#VX7kGL-zXkZsjbjvJbeDRVi{LzTnpS$)vsd*I zgwb(+Zfn!CmAzh*iae-EE&(`W&ei9p^L#a7pvqgAVFa(BZj-U~(V6zY?~)pG`mtmb z0egZ#dDgt;m`pL)THhd5-O?+#loIaqI3N&m=O397#2wMFHxVMc6{G2yS_faZ%g(d( z8|e|0>UTk>h1dlx`lq?9eYDQ&gJ-u=J26U@ZunQB1dRBl-!RuOBsvu?4nSrwU$HLDK-Cl3t zd)}de0Ji9sN8_s7X|kp%_X8UAGZGxIWqcCjYDwE|d0 z3xW!_189hq9KGSM?!BF|I$3Es>dtvK;Z5nJ8J4jC@Jxy&33iY+&Z`-sjnwKB29uOFe4+0%mI0NK;wGE+Itaw2wFaj^qz(lvAR+?D;J-ZN!xYmK zuE9RMp^UmInt*NuQ#;_Mz>9pd73_CT^STcsAWMj5ks@);n;%UxR^1fifrD|@O*~@G$4TarMkYD0Z(7j8IDYj>@s}{Ee zG5Ywipd&DiQw|-TDa6sS$tx|lE$;IdDLHj=nVX1kj@Pfb$4ls#3RVto1dmAO&=Tr- zB3ux4hzII#5142$BH1aw$WeQj=mfZ;*$v>03epj=-BjSWF6xxI%9-%hDY` z`@=|}a1=3;8O%!n5`4RBRAX(8+XV(?o@287!qH=B$3jG6hWY9zIp0bjLNeBj#onMH z!@=r^>+&c0VSHDSUt84scyWo)fw&*XmM32;8yB@rNmdDtOT%ymhE>GZ$K|o_@n&bT zwzYMahQh)uOx%q-NJg(t_1U%eN{K|lnWm74G*tPl7E5=I4eDy`5&=X@WcY~Mj0KOv zRL3B8}FKzw`!@=>Ra>th5@!-B2odk=z3V8K|(}-odop#|mD= zVZi6uck|V{J;qaVL(Id?u`$h@7=Yc5cA8=C(EG91e<*BD;nmpAa*DJkH0?|8k7+yv z83kpj8Ozd!jmLGxzh5vc(BqV*|0{lTm5)sdY2werEhc;f$_JhXN@}Fj7Z%39TW@TO zyp{&vG1G9p0s5Gv0Gb-|&xu+Qr-XHn@cUo8{^<6F$!7#)4V%tgjw}}-uktZgRj&RN zT$c%WSX=nnA64*r+or0k-`A?OiK4XZyN6DU*cnj=Og|695;ujSEcQ$*zPY>$IJ z>c^9UQj$Hle=F~gyvZhqwbt^je;!8+=Q%rHzb&08Z|Q)mdw17WqneSzdH;Mqj$9%U1~Wr^W-l`e7;Ji z9g^*tBa1E?2ZAB)xB4GqPGe+U%nQB^QXSGv5nnadPZk})CX|~b4cmKPAqkkLD={dE z&@QPY=a%{}_P;YZztM-Yr2$HjdP3{IkuAc7D7dj-8Frwl?c2Fng;ulmQWoBF+zQLK zSNXs(jZTu!E|sl@8#|&HYH1B47>+ORw>8b!CiSY)Q9c!VB{+KsnAld}PTmc8^S7%? z@Zjbxr0y&m3{oY^3)PIOROoVN&e1eoGxn0EDA`CRpaJneolk-}uZDUJhWEu(sc2>i zWjmS^E}Zep23$?aL!oZ%KJ5?NKC{8x6}ZQeI*Q|ESX_Q81i%$co_StGXC(ezc(Umk zaJvt;&;}^bxQzLvYk*Xq_)rVqg<;GPP3mWAZub?WXS#~Skux4!y(7lPlX>=rJzzZ7 zsFTR-h)cfo21DFClF|Y9x4d8x74~~&Z2+-zePMF;sf3Ghs|17T&hYv&Y|E-M_*hsj z%>N+VJF4K1mjx)Fd}^9MjNq|g@~q#V zDJ6~qe+B6$`WL8kq)LAEA0$hX#5z{`Mf0R{$99j_^f%fJL#q<5t4Qrqd<0F=+E7n- zICi5`%3qF^uV8NDyMMkPZKfxCQ<_F%o5#K~>yka<;|#-zsDQrO1|pTyM}Jah-gS#$ zP#L*E<1DP}XhmJ_`F#3np^yl)91+=i5A;Qy{4iV4-8 zxA0<(_mfOGGk{VlGw)7_zu-MyeL$<_{$huS*;<@+w4T2$bHZ5k^gD~U=V0aI=59CS z&+W^!LBCa~qyZfxezJpI-1XsKoT_fWYG$>kNG@B$Oll zz9j>cA+fXp%I2fi&TllZnUI(}R$K}U`7HZ=ar(4s#9oBe9OMh}C2_5n&A)1!VGgrf zr?|eJ=>BvY?D$W3{fd%g#SnAqwkgorQqi>!!&K&{s_}7hy+QMV^ap{_e`JA`o8??U)g!qCozqS+-rorg%h16ckFgR*xV8S+Q5G2%i{`L z{7}z)M}7PPdws!;96tZbyh)u8IhD?=ClR~5u^VtA8q+1nr)gp7o!Dwo;CP_#_=V|m zVnrsD_xYK!nNQVSZdKj2-_lFotS7OREDYLpSrv;S<@AM~>4G-HYIt`8m(R^DiRmp3 zv*~xY)vUBI-fQD98a??w5e=F_5@XhD%=ra#WrJMrAZ29AZR^@$-`X{#tsPp&o#qW| z-p^S-$!n}nmg}OHgcjmd-|*tgQoKK9o6_QMkJY zwX){RiYfny{fqhTBpYL5N{W1J`Swj4Qb8E0)F}14%CoqWRXUu|P3@l1q z@Z;mr+b{5dPKRdB%#W$7Z@#hyfne4zx8FHQ*Bqlda)1at&{xd7+&|55RQ(4~sHfxF zo9Zj#o?wgfi-u$mryE5KY~D?w7e*w!{9&6zoaQUsLbBA%6GyH%7=!V~mynCeof*2` zQYP~;;aiFS$jWb|$>Si8^sC-A$#+@!8+AXP{cUJME{xg=&0RAOiS(+Tv7L~%4IlGv zdXw{G$fK2!fb=hP@(tsOcWgCzH{iQ>{^_~>RK~V&>kFYgD5*gMoqpvIBRdaV|4}a| z{FGqOqH(+S?@$xV>tSAl1e}O&A_ON zui_?yma#QYyQ0R<1}68#Sj?ZTfceVE+s(pBzZYiwJ@Q%akx(nf*}S1Z$ygRACJQCr zywv&1TOvgh;`R`w-A0$}YQB=#=N3k9;xFcIoJP7yRzCMaZt2cLc)G4%Tn~pY3H}+N zu3g$ckDSdtLe~QM1XpmiZiS02>B@XAm#uB+qFqO|ARVa&>k5MGcCYj7$B8CZA{(x( z_0~k;1P~-x&NEW{JCXi`&ks~elaEIh5mvS(u1;#JzB2&rNzNbB{?QkKAbPFC;C60~F4T;B#kC9wfN?@rpo|=Gb#NVyo{ds&nTw(cPEqyqD*0$jqsAh>^PfC~C zk9~nTuS>Y%D(M&dsEy934`=r~g>`5RD%;>Em?r{zSkl4(ctN6n!fKPb4RNZ+BZOY! zhgXmzT*10Z`4)=|Ag~=%UN}`d`r5{!i7Ud*{KCj=rR_$~3ivMfPiud!Z?Pq-e#Gq) zr%jalh<+E)V%ZPC?-)%S6Fca?Fp44B>CxX>QtS29>voq{rYBfaj_G1sq}=m0B;30= z1-gHJxHl!2aRj0#aDyIqc6{qvD?xEQ(txq^J6Ida&NH)xD{xu(@pl(!w$xmz{OO`` zoH!<5W6Kd&adI#kT_kuo%Wf(Ov0GCM6bw?-#GfCiIKS}qviAwDhgfpPgUPi3gVC|p zFEpg*+`;#WmJ`A)=7%$7^I%O^P$t*gFC!Eb5ci>=7iC0k5+|wa8&q01YvxDazwM5p zSt0GL5q+*g07^Y38IzNDUFFm%Z{0w$hdbKdJkN%+j}UeOxubP zOKTbzPVB^kCXxbwvz6os`l17s83Vq!WFiDZ=zZp#0&|S)c-$7|uG}Ew9b@Q?N1Tvc z4MG`o`YL|`pKgr}BBWnF+MCYwS*WK->&7j8hrfmRAP6?1>x;=QbjfSP=#Vu#Z z-7o(MrpBt~MPkXBu`{jIgl6ki2>L9s4@1r8V#mDK&q-fd+A0GX^E>K2z4*_AVO&d? zIhOG5Z3DAWgx}Hfs;pLqxj?^wuO85;xpCl=&Jp}xDOkT+O#Nu*mtl~sv8But?#r5& z=L|Y;R^WbtT_|gMyEsFwhWQ=K7bU@CXESth04-i;v@_G5u&apG(aknq5#+2Ag{`bH+kg*kbROE5MN%m`pq^1Jl~&e__ZCQ@3roN;|{Lil4DT{D0ScWYEPeV?o8Il9z)eE);8q z66uDA!wcF9b|M;k9XrM|tR*q4JO_08eP=YzmBC%KKg;7rQ5&BLE5BbZ;PzHi(KBIX zb=VJ`Ya#(3nkxzF-1Q>PDfOH_E)g_q3v7SHoswxl_)8ZfJbYZEDLznMOhXJNmZxyO zlJ722{E<-KGZUU(ijed3=i87A8&M{lnP~QX0TYK}w3VOMp-e-UZu9;)!`0!uJZoe&~D(#_`<% zX7@aIKFtGWggu2xd)N&G7}aMf{TIMe_-x`VEFCpKdxw{_r}M()c8k+1?s$*R;@ zPO>>)Sbbb1BB80p2(PTc3aEEvrLD8qjAV#cup^TMQCoOrJlT1yQ|h?iYWMxH{gP?@ z#*aPh6`BSwnTwb9EB`Nk=IX0&>f8+ImjYtD^)6wtY?E!8v|njTxCEBXZhvEI>4s*u zpEBtzn@qzfKLj9?T~B+`_aDUg->}`kAq@-$uysc!zkygNjb`qStr?dNQ+=Fdy0V;v z>ifDgNlt%9)n1KJJ-@#x)FPR-z%W5JhvKNftzLMu!R#ybe@1AbmnXb3P3rELBqWRH zx~yIUSkqmaR4e^U7-(|lDx$&%ntYLPag%HM#5Bz$^s~5vi98dIxnQi2Hl z8F=ns#=MEmOOLx&UtaQw=~$O=Fh%`E{+ab1S_|zAx!C!G^J()eA~^YED2kc2K9MI8 zmE*DQ=Ah4r>Z2ORDm6spbDeqrKDIbW5bWoqWFLxIU&cf^dsIzxood=R%tI#yBu+Xx6;I7vXk9*xG9*6x$HmTyR zik?{h{=##dWWyGok<<8nmEzskV)T#f*?z+^Or=Yd`mozoWh+Tu9ukXB*wa|DAXml- zkjQ(ZbZ-voAc;q_Mdb9ijKEm=nbrYOMfoi#~N+_3=d5vZnSca@?4dI94J{_k1zk2wVxW*Xa#!Ue@Cl-dicw% z34ejvdK{S;uK|j`wp}pOhgRfH!gw~Q(StHV z5a1;oPAi{^CzeGkoCMGoE&DVCVbXk7MEv8-d_r!R&OAzBn{L%~6gcak83XjTKsJLPkb!+QkBzMUORwCy%!BVKrAF!&w{%pbHPVmQInhS$V z4@X@>(}8jLytP9ulcQ}T)a@QwY%t}|F2 zxLFvVbprRU#mgU7Kw^D*d|02_{m!;EK5KfMsRkplb9wI3b5{>>g2w>~VINnM-Vgnz zbD9ZXXRw~7oisl=spPuK1sF}VJ~f+DSTpdrXFV)#;efv{gB&cQVm(>!pYgFTrRP;) zF&J8DS0hmqIxG5?I!n!^Fey9n%b~XU1H-2VM^pn9sAE{9#A2PJ6#vKpyH!0)@OSKs zphr%LYlaG7!jSAVq2^VF_*X|7`YUz#4>6{qB*}9%7XB;(rers1K>6%_^Q++IFfYlB zSk>`<{!8SIh&9U^g7(Wlrd-X2buq)2WrZy5my>bgdhv(XTiI**YOImU;XO>9TetG2j3bXqX_uyW|Z(|UO) ztw(_?U;0`p9buEvohyp*Wu6IZhCq&Qgv_^;qGG1A|G~PJ?Ark2J%vTtCu ztaNurY{$K(pX1HPBnHr1o*NvYy<`4JgwMr6*Sx~W+X>DZhnWS3u9-SN88%U(xTQtO zHmb}|5&qn6-Zic#p!d&6B2UHYGSWXeDLY(%?Vm{=JqkVZ4Xc{Oc@%2R?N9>`+WWP+wx~ ztDglLz5VrmPVD7UAo?F!Y^q7f*jGP`HQV+LnWbFocdoTl#LuK7FK)lR$N5afe|r-IV@ zlqWwO7zHys;t7F9%?)*sm^{e6pi5se9r`QB*HVTmy_S1Ju8-AQIq=tECB5$UKg+n9 zW&70rk(IUn#;s=-JIUT+=gI)wS2)lG1Bfqh_1P83UtoT<7Zd6KA;mDGj5JvR=9Z9N zK~>RpiEM>0bzMCw7AMm9Cwj74RR7gfuZs;ap7;l5Y!LnN%9l_4E4&&E14}6ecil?g z5<<m%wdWE$nh_3DT0Fw+q@!4ZC&!11M*9a2N{_(3LGHB{ za=oYk;56X#>b_1>VQ4bJXgzn&vqUSO;9+PRy+W%kGONDcdT~{Zi1KoPbJ`u{wdA5c z1pM4c1ShxElcGrwws?)6km`U05W2tOvx!S-_ex$ONMDqrWtqvy~qLbI<(qUao4&{VlB44obY0e!!lX1;f&tfDrvd7B4@w5k z4YO)X6f)ynViNsOYgu8sL!OHitl4O>-Io_f2&W@+$#xBb5t<2<` zj7Xb`I;t?QN@n6c4BW)VEc*^fp30klWUU7}w6q?ivz$^fLbuQTRT94weahS4uM{0BD@mjv>SnVV|4sLsQ(k}d2bzAKmPP)-_5XU?>oj+>?nd9p4FX8j|RJ#M?x zS=ahxf~SSQ&i{ufTQ?$3JwrcWZVJa1E7YRsRDX)wPaUM6f3;nbSGo`12P%7uqv*rc z&*!Hu^jm2lCCepYM8#55vo5!9s$W%j*sI;mb-IOP17OGKdVNRS)@l8(!+cdL-OodI zKWEv*w_Q2rA*WBLBZ-K`^&W0NE^p#_r5r>qh|P3zZ$7JUmYbR5KjZ;s+4~5TTFKOefd)mkQwL+s8ujjy8W-KB+?PcN^zraMS?|=0Y_EP>1fACp7aV zCZ0bvnMgU4jCJuLWgrtZSbFArz7C*Hk2@2`1@89vf**LJVEo;@C3C8Y!zF!*BlGu> z49GR}N67{K%OJMt{dka&_*&9pXIIINWhMS;mtmdW6y$A<%S1%#r8IRtJ*dc*D}DUg zSk+&rocoT2t@ZKt3TF3|KX;7iYmdP)^~QzRp_SitxxWR#?@FK1mt8ZU;cK{NHTf># z)E?i@M!Q61>r9JO@V)2QwtHU!2Xn0k&0(LqMOw7Tb5k(s3jaL46WybG;;x-$$bDsscC0Na(TS;)nHo(Yv8xh0));r_rURChkH_-32`0RHu z)UL#nZy91i2Oh$^?~l^nYW@bhl4f1d58a}SDL&@9Z+L2XvW=%R;Y2(g{1 z(z-2g;l;1Tom=~3R%s*Cw8g}B%fK~CYm&g=)71D&_SEq-mcAUn)YYW7TXbuM-~1#D z*W@?HFNzmm61+hrq6+V(agU+4yzfsrCEm5kCCbj2v(?U7oZ1KVMl- z3^^Y(cmxk#^)gIxjg^mY{fN9DeS7eJ-bo|*Zc7Nz^<%rOd)>FdZDQupE~Jb@0&$P6 zNB;l>z4&xIW#NU=?wf4-9L*ZwaU$?@{{W9y+`kY$3y=6v`sMYC$M&r_4VfKIK>oGM zT9a<)RT|M=>pnm5m%(O=6Xl}Z@=TdMJ*u3Z2GnePb^Vlq;kTC(KvbWUp5L8)CHolu z&v(8l@O|~ii6&8~&2;-bk+lM3?cn#tYLzuHH^+UJG57pq&|Ot&_AwTr;dfUOz9#^K+uTKT8p)N&t&P-)w~TUU6?V~zQjUIp;&t2B|b`PWa<6$J5t^sYDIM}-sq73)CJ$n!^WBC`(Xaj!xV ze788|E3?Y&yfGx#@?=8TJTo2%t=~1Y%|2*bY>oU(r#UsR;tdtNKLx>M5^?EVU6u5j zrl4c{v6GBd5@~2B?G3fQRaeRSSF6Pu-S}$G&fRh5NWN#e?_OD>+qC{Bx3L5+TXsjW zuSD?nsMB9YvGL`TA^M8Or(;TH*NFmXejAd*&OTk~^4uW7>Br=8T>i1)o2h&ar^pxW z5!%S`t4saU&1zp?w~72srN+^Pj7ZGj9=I8sZ~Vp;C>` z8p}&fCqY|Sk~X-y0p!{L04&F+=~+LtrL=mtfi(Mw8!~B9N0$TmNICvOu326Em&ATr zx9+0^FTFEaiXRmCS5UD#gI*B|^TKt(trx_ZM+NZ5#=qIJM-9TrV7OvQ9UV<+2!G)j zv$c8Qm+ZF#G0vwUc_4nZlcZ=k(w5#O-6o$aL6hHU>t2ay@!ohzEbf&VZIn6z{6PHQ z*1F}Zx;gz1Q1~Dt*Sslsio0(kwBBF-dIcc~jMa8U`>b)QJ zBEEn9n6+6n&ls()&9GcX(JN;sY2&4RRq#7X{=~kD`fQ)G$@X)OGaK{#z^+=6G^Zxn z+x&g_V-#`e_ja+yvRy>1n?^TCIXU|ARQxUHc-!H`x?GF@024bBE(C0Ok7?v*+-AMA z#JV-FhfUD;kKpy zqo`b2`7I=ZQMM;LRJa{8^d`2vL14Gq`1>v`{G10|{l*=6`sT8}Ab4)yz%?N&JbJHKs@vPqWG>>Cu%^O>=+{?a5TP3rO zMO?ktRm3u3+TGN{C<7lb=zm(w(5~CVm)B6X-y%r>L>!QPe;U~P3ThFod?a^2HV^QT zM>V$_Har31*fi@;vE4C>c;k5A<;XGf-|0nj_BPXK_Y>dA`^AKqCIofu$Kge4q-5Zm zxr5@b6)pC;HJG)ICAInY0n2(1*1A6c=y6U(n*QacK5UX6gaB)T@VvJd8nl;7ZqlXK z$&z#C9QORH)LTr4PSF?b06?=TX%}|lnBWh`KcymMtdB?in|xs>ignwI2(01rAeh9H zs_yc~b{|^yUx<1opZp?{Y3v$rFoh>SGo1FXH2sw1v+x(fd#Sv&S*|23B*)C;_7$$<@z0Ax}$R(7Xap*p^(J3VhE!pStxlZMe40t0#@Rf{{%?u>Qcvn9tC%4wT+v9)5 zT|pX6KSfC6w;(IB50@nL`~_*FH;P|=r8Jf{3Bq6#lk5jt<9vImY2F!@YwM+7vRbKI zOIAG1{{T%GdXwwvS;s|lYkx96G5-L9fWsPCTtO_JcH4P8;>W2g&TH|%#e>Q53&yA9 zR%~`DM`-&q{6ymvskX{?zJ@pKs%`!P z>zaPCZEmthB+Qo)IQe8up1+WTaKT%s#(G!Cx+T)y z3pi#&<(FrYMdWli{*>J|#?kykb@s$rt>aMPr5W4Q4h>f+Hb>doH^sjP{2=&XvujuS zZH$4Wm0DTC85EL19Qs$v-?bO*!(-zwfVUTVT=sVwlS=m*aNoLCT#$fwIqUe;_C5pf z&yKtjuY^$Jx6T zzNZKK8h*}Jv3NJeTGq8^G4VdI;(4UFU*6lESg6Rz9GczuPvB^6to%V{BSxCu?U?~l zlB}V+eMzrz)U4w0@5M=Dg@4iY8&#HIqm9Iz*OK_tOrOI)8|ApOX(NjA-^@azU z`(<6cW54TOH{x#!JYFKT`z$+r=Pc->_<1?42x#W~1lv4wbtI$AYkPKJPxpZp*?d0z z)t%MTAG(E^o1n?8FNU@Z{wDELyni%t>=_+6ts5(~gTgZy+vaE4w>@)Q^>;RkN;f*+ zhPK~sy}Z3|nQfCOdt{tfSEf-jZ>jj8NfzaX=W-G{20+>UYUhG{ zB-*vjg4{9p6349T4Wq0B$JL`Db<5|+77OLx#@(tsUepZ*^x5Dej(@M3T z8Wv_y!{#7zMk}(Mu8vtnsaVR>J{9Ty5thR9Pq?{juP}cWROhxU=x>D=^K0dzxm6KH z2btxr7>?hrE6hF>$8&MxJEJ69t?ikaEnIL$Z~z^v-@X%B+vxr^hTa@5l7Q& zCtA+7#*xtU={qN3?iT1Zq_%=;JovvQp01UkC4g6mCI3JC3v$u44 zY1bz4-@{vdUiL88cC+~ok*CbOWNyw)X?#EN0{CZGvzTG#xCAT`AD5@`4*XSrgX0x^4zkJaOtl{VJEl9}ZsV+U@Q7+!=KHdA?nvu>%L&)9|e2wTq_g zcea;s>vwWQ*73(SHi;B{!hY^Pg0E>~f5l0o_?fI`DGXQaes&xX2Oq6v%4jB`tXy0Ri(D_+?--<+UQzmI^ahI8 zy3=iM^c%>pCQCg^83~06+S2y~eL8*=Rp?JCM?&zzwv(j!azPryB8{xT6j9ZP_XG8- z-xF+B+Dmxmc_ET%0|J}_80(+M6zCo}E}=$-HsE}{$UOnBK3^Q&{7CT$lWy6WzRrqH zRa72GABAlgvzDa#A4YgK{Htv3?Es zK1)457~{yEMNra__eMKce|_OEvs${t^IXfrNZg!zQkBdcta(PS1?Bdjw-->zkmGC? zu@9a@+~d-`r{dqk_S72g^HL_x-4K%-by3@oTKbJNsQfo()69nE?fzJa8D$;uUQzMt z-KOx}+;0aFK^gYr9c;{3VE;2-`OYjb-Vqdd}8t1%N4zb zo)paTbS>A4?!)YJR*Q_eQS?{rL-2?G5swO4>NhJ7+pb&82;=4e^slOX9TPUUzsWvC zC|JK|U^;$v!gwpgcOD;(9U9gnHWNmQN~(j5k>0kc z9W)$vJ54r7S4*?DD%2Vrw)gikU9%OK zDiP>%Oltn@McwYF`y%*a(5bZf7-KM9jwz_ z24kLaQzdj!=pHq(I&JowsYt8|h@d06#~;qP4~d%e8az5J?6^ZVsF688duandt$IJi ztz9qtKcfAg1~y`?o=*UDuP6A!wmo{p&ub+13vMQTi@VBTNZpRzD;j#v-bG0$$C$?P zXNCUVs9r|a>$P8SBzxDE>s}m=8K%5pAImAWQU^m`%MXggtx0j+s`i%ddwv$u^D`%Wp1tw)=d;H8mxvP^Wn6 zaC*OjZZFasSYXD;fGP)4a4Smd;Z6RPWuwN{x$^fG$or!p=DvpTC&I|}oAzs{r1PnhHQaMPQYbzD8Z0Uo>SN5ei3 zxYg|S8#Rw@#kBFpppFXs-Rs{Y_-kRMYO>qkN9Co=jS6vvM^=(0E2GR!$p`N);g7FM^nDXTZ4<;Yqo?{j zOyQp$n%$i87&t>9_fdHdkIVE&b>tJz*^AJ}@Int!wP3mE5pqobAN z6aMyVs*BRcJ!rmboJNtMTWLQPEn4P9jii*o?ma-`KE#1uiFK~qT(5@i?AjFj46<(k z;{!cSa9%36{{Vz@;s5OK|X@tEWI zsiEW9BcPoA3h_$=^pNm*V*?eBU)!=8f2Vqy3ZZ&2*j;(b{`Ph2V{2 zviX)=@*`u$2fJieZL?$3Ep&)=FAG`f*HH(&vs6};4bgxxoS%MZtRDhj%`S^^V_ zFr~?9EgXE)AvqZDfG|G_E1stM0f(<$xrzL_15GlQb&a#LjIj2{H6QG;rd#;0;_a53 z8o!qid7W7D;CB4ZYmL;km^5~})8R4`b^DpF4VC8>s*h;PY*+?$Py^yl6-OwIj);kx=T6! z*(haL6sQ>Wt~1297aF&jI);!*%BlCItT#c3VQaQxS~`91#t1>Nz)e+YgJ>y~#> zq;hJq8+&NT-GIt{E9D>fB~OPm@AzizLi%vA-d$=83z73+1>hR{*ZvD-`yW5XPlT6$ zZMs$d&WZ0FuO$xLkLO<_e#id+3vGTKd08pgRgtWv zcXMN0@U+)I5OmwC9ZygeHp)S^f=(`QPI3oOYtz0Q%Wk(bO&z>#Gb1v{2hF(kAI`Wt zUkhAmTJ$=WlQ-I|?P4I4$Ci1>KT7n^g&IRzdC!HLax-pHFvRu6X*ceb67`9m{y1j3 zj%#?V&hMX%>_!D-_$uM9be}wEDl7AXXHbl6az5($R(QAHF#>^z#tlifDpVt_!+g3|W zXF_IO)~mDzDmnUAcY*Xfd#?`NJZz<;k(n3}N$hd@)yeeRh0@kJBv@nJjFHX<{{XJI zg8-Ye3xr|#6|WlQ1ZXD4fMXC1UQ@fDU!Uy#36+t6hDQ#>`NwXco0 zZDmKC>;z*Y)K@ocEYR!HZZV@?Fpfi?l}>s7Rn%yjiPpSVwk2n@(&7qc1GqS=trr`L zp03BEd=Vdk{B5BIw%2Nj$9F3TidF!8|i zyz6y|B@48Ixl(J>zh(V)ABg@K)imiO`#jedb3h&VRU}}3xULD`q1hQL9mj!uFDHhr zqFq81g+!6c&cgr$k^O1^0JK?#Z6{B?C z3jLAomPn)$Z|Tn-nEh+bekN#(q4-l$(9vgT^i*({3^;rZ+5Z5?ze)wU%lu-uxz@Zl z_OxQYU<`$LV8^fZuQm9mqQPw?ywTiwo=%vtf~O=&+*htxB%Ubn4T`VLB$n&60r!|M zU&vRM`1ivSHi2dt9wBjd3pxH3?^-b{rFK=l@ioP-gd%la@ZMS_reB=0u1Nm?$E(o3 z0c*e7-wo_^8J-zuvz69Z+-}$dk^uZVSDn~uPZxw%&NxzNt#t_ll6l*;Fh}K9J_q>s zPw>Z%rMH{r)HO-P)CWH*qJfKPb=f z9>0ZS{@1Zj7sEL2nmw1+DQzS$Og5L?$_{-xR(#qUM(J6aA0Mq6=SR_RVvSNYTyBr< zxWMa=>s7P}r-lgiD;XrX78vs(&)!DhaqMzyp!k>JwYm6NX)4Cc{hr=@s?r7AU*;-3 zY5xEQE|y;ss>F)cH`aw+&)p0&z&-0JYOHAz#5zou`re8a&Z{JQ_L6gh_z&qt zN2Y1e+FJ&=yoF@9%rGu_kvq0Mi9eMTa)&8{cj0X=Zv)ue37G1)lBA6n_Y|CrdY+Z^ z$LuGlX*yqwHBCC+OP{p)3eF}0e@gj3#2VeUlJ7n-z`zpbqHdSV{hI`AIt@E zKMH(NsrbUq>rlRBhTcg^S}KlSKE&2H#UBz+W8(cYP1D$|)#b{>Se~P){Q>-OTUEqK z)7x`~6jw7$>AYp)m~LSEC}{lNtsd@0a9UTy8=Y6jaQJ%O)m0bpNDAt3C4o#!`M`pL+6pZ7KW%@jd2kPS?-WFk|y3RoDkn&MStbB&T^Dn0Z0LqozbZ{i80)JS8>Gh>X`WBW$< zU+n(?4tz;xro$XkeUbdOCmnuY>s-Hq^tRG|8~Afia=2YF1iq!uU&e(lXxufvr=xg7 zLXSf5J+9DWyz?`R=K!4YDZ1Z?8uLK&q*YlVxY*b|c&i#E`oF?>Gs-6xxq~H7TwYKXWNZ3C7Y_Ca(MepFavUTd9~ML?Di2 z^#JpcSn&KywT92dRxr1gM(dczP2Vm$RQyP-(z)Uu0P{897wQ^Z%_EChTq_^FlGxAd zUhS*=8()X|^0ke`l3D6@!^{WxWc%6uE5-f@>9<$DC%V-nRVk>XjJr?Fqym4Xd(GF4 zTV2!5xZKQ$WCRbG04i}6B&3jG;@WJ}_#2=};jat$k5`3vO{qvA+{1tnFf|{*FA%na zYpBSo%jRm&f(8KRBz~3E_%dOp{55SaJGR7baDMh*B>w<9;XEk~)t!ts`+U23uS}(Y z`LcMfS#Hj#s~+j_&*HS2UBs7`_HFiE#4L{GjAyHUU{d(k$M*g<@OO(Yw)s1(=4Eg1 ze4u|rN#X4x%I&RuKOX45*$k77hXKd{a)01e=N}(no5DI3zLTZQ(Y&`}-tBXp@pU8d z-AXF#I$Y^~9P09EzYX-ewv#?hqGNEtIY2l+jbhDx4!5D+TV6*bn6@I^Zo>%&ew`|7 z$rn!Wb;L^>8*4b?iZo;L<7)K$>m$XN&{*g;?y^iTvT)eog#(YrHF6e=rSR`Tx4p5| zqm#@307Zpe!yFWH2>yb&-8aE2ZQ>0#TOMbbr`(GP2pC*5Z72T#9H60-t?8GM_-Dl1WRX(B>Eu)x+E@^C>(-66LzHfg&%!zh@r8}f zq4t?A!zQ=PFuAyNe;7gEIy6{D7e$JY$)$fY6D^oKytdYEd z58YFo@l$+8(rtDB02kZX-77V`kB#ZWtA!+1^SX99k&C0=!>f2}#BFaI+O52hutM9h zhR!RX_;Y`8;S2q9MZLJ)7Lz5ms}CHsd(~w8HpptfgFF=0O|EGzCK}?@%42M0$P8B} zu6V5vfe{}T{6X-%+T<_-6a!AHkUJE#^zDO5)xEOk$S8O_U z(2Cs{@R7RiT>dKc$*tY)h2YsQ=9xd@RKI2rr!63EI&|YXtj&?=8mER~@&5qARr6q2 zTH{Z9n3$3_x`DKx&b*7kT8^P(r0KRcYaP6wGl>cLSCShaP%GH}7GKBWzl9chC6w|r zS%as`BV%w=kaADct$B{E4y9#vt7%_lv$vkYVDYq{EQuQcK3s9m4OO(cH`d1fm*EKS zZ!Pb9(IZ>V*4{*emohOZ*axN@epFXIc-piV`gWmaWEyOUE$5JOBb`pfKBIuNRwl;- zWn~A6Ja2h!A(5uDX7i?FpE#3%xcZS^+2W|D_-&zSwsx_`(noa>JmUp{;E$$jg!o~h z+pmi4WRgGL>Q|elVtS};-_oys(tbI*@jktA6^iV)wY!37q>t}D2E&p%<8^vbmE~@S zlzoHn_v5@i9fC-2E!*snzmpup{azFd;<&wM;x?V*KiUgHy3%z4eHWhcI-&VuRYFdG zELV>FSNOlKd^pi8;M47G?)2Ezq_|mAbFftekbUdp%>(wx@xQ@;kMl%HmDJPP#hM2$00O$3u%kTIp z$HdPQe$w9`yfg6I;r6LA>Yi}9ztkg{l07sR3^$eT!AQr_u6$1YtUO8a`^6Jl_(w;+ z@Ez{H#V^bL{{YG>j!-b;AP&Cu=^q7t1nE8k4Gaoq@fM{DweG8M#qKVp0FuP^9)Ms9 zsJ5Nt4w{d;k1z2j?Avwmv*KFno+i9w7l!09*xDm+j%kNgr~ zmYr&3x4DF$HA3?+&l&nxv1$4>)P63%xQ60pYg<_^T{|0a&OEp043527r=UTe90zB>V* z@UJQH9jqa=^2_#*ISZ5@!gn1104nVCeI!V*uiE6rm}iZBI5_@wok;T_S{dIT@7q|4 z^2Jg`{{Tc_%%?mDLJ#HzW83JnO4hINS49Ucckt_(^^u_@2R7d3sKn zFPc>3?(-Le`5Nl9Z6+O0O48@Il52Sy8&JVq<+Hx78bEixr)y_z_P-2GXb{HBDOV$q zFf+wL9g5h;CZFaCRW<9tbl?S?{pQ5r`n~Th$J&JHsHzE zrf^5{sL|{)JqFD*DT4g6d5TD1KuvcO+oazKA&Hl7+h;cIgMy%w&2jhE0lY_JByJ=9 z(<%1>v;Awb*QZ!KDW==}rA^AXIq6TKQQuR8@jryFJOyzjg^8MbJz~;1go?NjkVzo= z@m%kU{u&)l2Gt;pM<mBqq%Orec72_lFt_$JQe`|OJ zZZAB{+z}qk`l!uv^Em2lPBE3y^N+(@tM3f_cDK{nE&jnho6Lbe`OHPSSG;^U(&n|Z zztkQw(8?pXL;c*yx&154{xIlp_%GvMfo^RFlrI3amQKJJ9dTbkX+qyc_%)`iQ!JOa zx{mjR{pCeM$N5&Zz3E0?MACMPiz({bvg!Jz)E4YyZGtxWdJI-Rv3Wh7m81UcqmFS5 zimHBWmKd&sQ;yNrc0`SCFCh+upPP0#6<6X8nKpy1ctcH*H*Jp9S&wWuJk~R99UREs z_;=!o*Wyi@$|P1x8-khV2jr~Z73uP8-x74Uxpv!idLtO;EO0P?DylW#?62BF&eA=t z3h9%x`f~A#)LmjY?fgZ1F4-*m2DJwHSF)ED*>YF;MR z?ak2+a^~27%Y3Lg{c5+2d|7?)>%^ClLa8e18kDfRGUo~i#yvPSi$96(;P~i`HN1?A z09GlL1c>kt@~JuvKiHWrHQ$YTqzuw|Hu`%Fy|=D2{Ed1y#BU58b6mK)wf)Q6$dhn7 zk-`3T!hXcsgqFXxKZvY+wS^?Hnq|*oa7eFb8f$ABT(UClnnq+HdiAYNWO+TOHrxNID$7-RU?(;hp~?EFb_;Z1TxR)omV%E0`R2aS}2`1ANuX4KuxdmkG^ z`$=gUb;QiJa%-@*oiRT!f@se4WAC?j=U#uQ=rDLMS-aEkuN~ITQ)?rz;mU!A$Dzoo z{uR41Jn&t&ogb3~8s3>CKPyDcOl%kMAFWB^duy#uJ@?BPlH$>3nn`~4oZ$ zOYaMVM=1KWk-iqDnyHq_GiOaA~PE|dQNTu=UiP1nEVBc%TTy(j*J zD5AR~x#r7btoYCW03b8|>Hd}S$HAZZIS&wj*Dvs*imX4tDEw!pc#r-^4LAO~kNpL! zb5Z{Qk@Y^G`zWG{seDCL_=?^U{{WGPR{sF6H~y8({9FG3Unl){ANm5(MFKfkzvL;T z{{YuHKlC=QPG7tK0DS)dF+~&s^M}H3`78eb8-9oX07Gi_PX_-0$F`sU0A99#`#ls< zAAp!1vk&}%t^WYu{E*uuKxhYR?q&q{{TgmR||BA9)^2`$c`v0{;Nm zQa|R^>Rt{10FjeP{{XL9KlB4d6{Nn;p_f)#pZ-I-fBken`Vg(J+7JFpPLKZpT_OH- zQCL*i)qHH@bzk{7NdExr8vg+LH7i5JpYiYFKlda50MJboQ613!&&=Ae`?@dvf=BW- zn|=QPANnu-d&T`|qO@BP9n4RNpYiD*{e>U@06~trT}%E(R{sF5EB^qX3Mi>IF@6Dm z$=+)p_2)nSgI_>+BmP34@H7046jsZj&D8l<;?Mk*JCFMR0Ghs(Z}~I74gUbxN)P=Q zXrijN<Xn+7o2py$LFG=X37m+Si5fBhXdha2jcM>UrbOb~xQl$9< z1O)`8H&MC*7M|amc{AUaYT+sA+&8AT0<)MNLcluOVtS08mhd z2C7LbWafN73dEizTi(=nKqst)pIYINa|wt(Byd(}yEeaH<${|Bw*1S&O9l9+@V_Mf zCk>E>_8%qA_D`M)NJ9k#{!9LU@lQbz2$i8>*EDnH5W25bejqHH)iiav0AK`C{VN>E z22cm=-l4Cc0)%tdJyhEqUoU$2(mS2iUGlf2noCbHsWM?xO_@PJu&RhqZB zlk$o1649s8O9Q-#{hMve{K13yGK{og2Pt+v)QiS-Lbxc3MV7{iebWoXpff&z*#7fc zDf8aJOO{5lQt#rG_-n34CiS1$XYTddpKx$XAYJ{zO_QAdFX@k2WMI|)V|u^)K2MRq zkz_o`A)@leZ*0s-pnbXk#{^%oE9p3niSf1vV~(6@St)lIksnSs{)o130Zrz9N@?+Y z`C6oR!;ACcY!Hb(V-GmMw*IK46m@FnHm&?!!3TGL{1HBM#h~};B|z%ww#!2snzZn# zTBQxi@`JcD2a7>e>o->3bgS!Qig%dQ`z)ea(+%i`wCqbj?!h0|w~dOaaWB?C#7^f>FNoN~OIW+? zij5CnBi&jAT$2y(IRAZ6AeNi*OdeD}nax_sAt&8!SD1uNy96XM+38oZGi)31NGJwn z@eSkg%dKvUSWrLA@x5u+qv)H9fsjV&_>UgJE3)sXC&(T4bkq^TXC zw|uGk^1u5HMK3Oc3;nUCkg<)1c`N_k79uymiX!sWtn7((#v%WF`Y@tqT$|l~vM9mY zL_svOimg4zJj)#V%<|22<-@O&J%~0gI*U(gG9)yATV~e&>1^15_8F84U z$o2yo)3)fSWeq=x+)|e&Z&7k?m7T(qMVNA1;7v~lt~H;Op0E$7SjVPELH|iEmjD1P zU56ZX8X~pw%bI*_)jyBf9Qr&=VaaI`(v{r5QNi7EC_Ek!@S?d&AYeE9ePo#KQ=oJ5 z+kpZ;l^lMpPTR9bnn#r1#WEkXaHZ+b9d_!uHEZqI8lX10?+ATfW!8=?)uxpl3dopX z`S)8emB`}hStOSdxMM8qIm635G;`H5KQ9)ac;~&w-7C#ADm*VGZG;+ooDDFO?VbL`Iq_vd|5 zVno4CvaS+U1qxP@Gh5h{w5B%s0+MPZ7`YR$iN>7OLj0n6W@0&tr8aL9&K}%PO^5)p z=F)X@#uImxx`y6Ij%q8du}8|v2MlLJ=<<_w24Z70fxfer4ke8|IU?2VajOoJM;wg- zl(~V<_T#s7xPs+F)~o6R0SC>=o6p4g#4H5>eA#x7VTO)Ve|c$rRLWEi?*eVsKBGS2 zN)?pM|5{8%hHMM_(X3Cfz8`#|Jm3n}(#%CwiqC3&8Y(Gv>4X>7E`klNrase{3$m7- zjd*T`o-`tApH8)gons?Lb8Ar$`&0AxG)>e5 zAxXeLRID<+9qTEYa|v+#A+gviw-r%&6i$V(sal>I4P z{p{#)KGkBmZ-7i6NOfz{#kBN)z;)Pi%tP4|#5CLeI*tKhWHrR9i{y5ESr* zT1A6rokW%pdCN5L2lQw4Hm|X07_r&NQ}J~VgBM`$w{iNg$#d!6kd^ zcJc$(P)u4yu(}g9x580|%@3y3gcs|Of~Q54`2pYVyB2X?b*SQG!2H|rCCABHkEuLO zYQsU9*KTm;ck9E29A7~6_mT!UJxr;;q;8x5*uN)tUjo=r8b}E7Y8{>;;5%ya8v5-X zt4S>=XovJLZp<`Os7(>NE4iirfh6`byWBnIfCRUbGvgS^Gl16B?dv&6D z@#gGi->!!JW+5AAQYZbcocVYn&1&^CVnKPvA`}frLrQah{T%QvAncWF$`7Cyh&xZZ z2DFS_wk9XTN)9zNZ({2cs^0c)hJs=*SSq0|8&PkXB4RaBSW(elDH^bq+i!KPC$9uY zMbxi7mw(~OPL<$nB>+iya~i$5iqt=Ko*9ILI#N|^aZfU~9dy9`B!`m9AA1iHJ>CgD zkD7trT>kA;T$-Vjm-({;_Ciu^zFC{q`}7H5m$}Z~Y6k-i9x<(c{NFxJ z&)D{0E>pf;#zVS9;iRy>0O|Sa@Q5d?N{_zDHSA>TX79aot(4yQ^gvSCG6A!Lx8(oX zhu3N2hzk3yZW45xGM?Tsg0&q%d<){>WcX1L?anyPS(B?Tg_hy zjoWx!2B2Mhols^y73Ht!4)2QkbEZr7^_LI0esV9!vrdsVG5ub(ef9Q5Nc-sb$$)ng z+e@u46xrBy69t}BhaWl*V9NpccCLNmW0%g1oH9>L#OUYWRg_`RotENS{ue0FNW zcC`}0cj6mOBu6xu)7a@dfr%w_?uzw)#QYlS7qA?EjIsl(zI8@nV`!V8#w*?M3KkdO z&-52RZf6iH-iUp+h(_htFg{%-0hU3&&(xC>^-XmqT`s{xxV( zlq(vQXZ3zs#!+<-67LxP5SX`gT2j<2wDT=TP;56vBpZB!nAZUg zhtsMZ;&)VWx$nvM%6xn653i^2Q?P@%UlI@vx-zF35b5pd`Ar;)=G$;9(JWZ8eIe9Udb*csu>^P;D$F3QlFXo152VpnfKbg`urvfQ*I27N zsAJrvf| zt3H&P)GQRWXd{VeG)LZ<{IIxcGpRTJX%{>CvIRd_%MhG$V|2mG<~!ZjvsBS5ru*ogpH8m9T*jm1z;1pjr03da$%R15s#S^%ERYu4`Efp( z)P~7xEp9&POakQIeR2s%=x23puMqE^nEbt(o$}AJB!Pp=0u&pswnmaEdV^y(u^#@+ z2`l_xtKJ!X!4vv|RCH=2S&3*@T3rCr$5V5FJeQL7Ip&$AE%W%28!?xgy-vEOIfep#(|aCs07++<@JKb#i6 zNS@5>Ta5Q~5TQiBZ1R^fU0qmsAv4vkHq`$OEP_vwl025iV&Af`3j!m(xzptmfgrmd}d@jDzicA5SS@0iH9UDfuiPU zkqrhP|ESq=YRC{PZybn54NbFU+WBLy#p-FkFhy+2WvRI}YT=>rO2f_iTnhYkp@V_< zaCxl{JL1rl2b)3Bvu*~AO#4;JuX;R2m70ZR`@Ny{>oHCi0!s>G_Pev#ABD*|sVZs@ zk1-FFD)0jv&vyd>GczrE#d(7Z&}4n|g8B4dwMv*)g;OM_j6T_I$7lgbq;9z~YR>XZ zKc8X9&08l?ow@$%B>+YBVXfOl(o9Uek$c0{&qzl%-dJPvg1P7+5bBKg#@_SHo~SvB zmmzP6fWaD4e*CA@#ZiBpc#|Y%*xo0+7JZXb-g&JhtoOZzCj4~|-TA=vY@<1@GIm{^ zqkat1JgVF~X53pC`y9+-kRIPH-2b4?Hh=#>E&_rZ-qfxRO9?_d)yXFj`mA&>0Rt}r zrf6cUo2J2qNfR|Uv1_yd8A922GQpq`_L+3=nT{-+6Jh>=XZc{$mH5kwe}pSNZn=RD zg7QR(9Yh^NR;j94WvbgB{s>&*;C%SZV8dg4K+!-^xhZTxIjj-p#TXpSBs&ew1n|)# zMp@LO-&LmAnoR#*6V8*I(f7<_nVgWf#n^p+WWCf zoSa$xWO!95(<;yW1j0u*Fr+N-Eb8yIJhyS&Moqv|L2NA+=K*+%7qy^rJvWH{L8F`C zEQQ)7;iEd#b|k8UzUoO1fRVPAr1=-oMOh>`^+}klzQB||6Kg)XBK_COp7eN4wHSJm zK~X3+X7U&8M~iN%u6p0G2oezc&<6&EdSU^Df(^Vu^q!Qd&>TT`SMhI!ybL__?GITV zj#NjK6`wuICG*SQ@edsageFDO@5)~7#mg{n7lAKkZ1xhCN1rQe8>5!CJbBYB)4AR`cGg4 zKxgZN+buVfle`~TxjfU{OYgb0VzXM;dS>2NO^95ReaoT;d|uMS^U~=6tKszD@JW(% zHNZdP&O3mBOLhIk^4vQxcdUxZAfA#FqWb#Pl-*&@d;tmUH6Dq#%zr)r;#1+NdbdDN z%&aKe`mC?QY^;y*vMFIM6&{>Ue13mMTNSjF(>!9MTJkz;Q2l}eN_x4ybY0=&N*z{s zU=VH#w_tUMUgQZnZB=Ez*Ro5SsP?Pm z>xx8sksg;Se+vAf!~fFg?&YD56z9NYq7a!?yGij$(Y+(Ra`V4y^*B=FCFH zof!sKAH%ezk;d$#UGp-SWe+*?cfD1BF+hC5Xj=EQiDi-PvuGXb_L{BeW?|o-)?^QU zi(FS1-!jj`q<4{;!RElfwCKukbL&Q<`BhbX)PsuN1S5Wrs;-zkHG=Ov!scj1iDc`N zN|`G=Krs)cYS1Mbu>>mb&v*B2WJ0w#<|(CpG9wVoZ$am=_%FEZ;JXQeM%s>a8IJcS zQ~`~RakSZ+V-I$ICN0^HJzl3UsLxFN?l&qaNwX+D1$0`r)aL5StGDsq-R)TYM>u9tcsR1RvG!5ZU`}vKh zW}L{%i$y8V_Uqhw3|{F}jJsDIzF0v?R(|^IhN&Xi9al5J4ujeZq@NCw{v%BgxTKx+~IUGK}MT` z>*qe~?*zCY--TXyT}8Q~uR@-Dyt*Cz(EsVmugA+xSH|h)f4}%j<1`z>ocq%5?~e$Z zDH_APXVl%Jz>73{3Pzw3`!1KI%jWd{i6B*Xw;AWEn)e^gJz+}(at4pR?mS#+sxfF{ zGcWF=(^TF*)K|1z8<}aJ5^(n5t3XO`UI2T^uDx~cMY>Uw|JOV`LM~@K)Q5QMR^_X+ zK$?6{6IkD;4;&ILR?`*sq)=c*Sd>PgWh=Iugfs%B}>5hL)l(8G-K8^6nM*kYM!AO4-{|E=5n zF`=NYY~B3#(qL%6(|}h$>Xx-u(9DAsUZp!QOAQpPB$6xQD0Hy%5y5k7_rPPUP+A-X zX4dUEx#^-4e68srjz^cI!b`5I%0)lc6sTzVu_4Wf1K#7sx-58ze8KaiqbqL-lh4HN zoGV(uU%TvDn@12$)#eZWdWhn62kQFQbIllmJytYyoP6(>nOy||NZ8TdB|uA54hSdD zMhZ8b|Ej>p8y^5e5+#+By~vW-KTlr4mZ=Z2u}hO*mI7&$+@qk6Dsr#dGo@p1m?tTamtE(howhrShV1^gxIfciD&+(6s@S|^@bn;nP}fXwj~w89Hj29! zw9o#QtsmUC$|X8h)IFamKclJJORS-06#hczXYV~Tmn*b@9XjS)in!_qjm1wr{N&k|+9hFb7}xyQB}xg=l#REz zdwyOur%ZpP1mu{@P{2Q7noli;d#S0&`ZiJGkzl}iGtzgq@$0^f^RS^oCPP7BxXDNAgF^P!4T@k&3IFs)=Te0|lEe4z zFa%>px4@|PqWqLak3X&M3D;aCah$$vUH&Cs$8262;x2sY>ErPYdA|F11c$n0I=ro# zJm>APG}nP#!iA>k7S>g@Mv#>Di3)ut`ecQL=TJM6L?Dy|Gcf(kr4&^$bb;!Nl*IV3 z`u(~Q`0}>d@y^Ad`Zte&es{y{Z)K6?LBx7Z08*K<^mTjc30Nsug*ql5U&Tr1F6vcn zcHgolftB!-1_Tic<}k=Cvm(Lc;FfS!4S(OT3^OJfF9;ob0+mGP4eWFMB^PxL5@0d^ zjV;^x4zB|z*3?Sld#4Tv5A{3`WP0CKMlPPNf&z|Sm`F7&NS3h8Axk%nY=3oP%q)A5 zm7jBON^tUPyv47x;vp6S#B7%V>KVZ_O@Esh?k_F%6cRgYrHAZia&@V5{wTjveNK6` zSK&e(bHb0iBr))Mot49zFo(_d(!{oyR{L3*n!q=8!gCm?2>jD1_NA2Ind;w%_R_Yvx(dLV=3-$O_xJm;YR!aQl$jJO(w+L|q)6 z(HhZeQQgVgd^n9hV__}lak2_*IDhE52lTM)0fPz(?sM=9wGb*SSoszv?MNKp=aInB z@TO756T@OO(GWG7bPKd>37y_C@4U` z#eAYOUFLmz@The`jaMzX#?#Ll4V1uAk4{MXyx@qwNnlZ1u%X4-YW*Q+#xc93FV3$F zBgnlThV_b14wnbGfH5z${X-Lzozk?md8LZ=gh~!457Ma8s-M(fz2+ZD?QbCDVy}P3 z7Zye1+YQEN`)vT^JE?*FARG9v>vtu%Ze?obB`1uR{?X!y>^j5yQV=s&++Q>US%Mtd zGI;I&tPy>yh#?HpfHe7t_pJ`npG=W3+q^vgpycf4w{=dO0D9oHHhMHY0C*Q|G9FgK z`ZQS8B3tuUzv10E%#E<%4M$drv^Jyh6h1j%dsevx@Hb|6xty_II6HO1Nih*+|CRSh zg|1Mt)WY*t{9vuaVo#jK&7g}d7LEjNR*pZVF7na|Tn;}#@&UQkv-r6!UVE{|{k)m5^TZ+6hdcVV z2-2K8zHswKki=WD`C^F&?g6GSV&-z@N^kU2XFT_$qhMD9V0-vvaOarb_oqBEs zjLUZh@bAzDytO*A%(5j^44r!b=wXqWLjq2w+z6r);+R?6k|zHynVfO&p-w#Y*ZbDV ztjWH^xw%JZ1@w-i^VB5$-;Mhz5%U8dO3d}_LOP=Ig{4nj-{w_Inj4t#Ccpa)B@!z; zQTd;wc-q5BBmbpqVa6e%px~)R(-@rs{Lj==&cU*0DmtBaiPZ>>j?Y|Y-B$`O#8$cM{O*9 z>4PGdH8*TH;%IRLjewlIjQ-V}=ZB$uj+66|sm+I2^U3b^N70q}qqmCmT|+%n;5m^Z z^*U%81dJAhGJAj;nC*8s-muDhJVxZqqcx!Ns$LRIw?7CVfPQEo9O#=8Kol^9UF*eD z{yIIcNJLlkEqSWW>xOzs1cRrByTlFpQ6JM!;{$hdfHMoSg|F4s4^q&CD%E2p@L#w5 zz-h(jR$d1Bbg~2QzEOvM0?5d&9+7e@Do2km(TkI4p0L3>8?alGoFtEGVzq+ocb)eoUZR_f|}Y9W&}c$oiUZ z_LKreM)PXC?ecM)m|#1?D0+bD(?<-u=b3|BP}Js3POy0)&Cz>1A3jk#cVV=Pj{M-> z{I)?Y7HlNvCb+n3N7>#;HIZ{)&#`Lo^hBe+$S@B{=$0%cokSmmb|>R{0|*GuJqN}0 zH)*j__~bMpohnUm*Y)(me7{0@5g(sN*zJFp>g)J&e~mk_3v^*!)Yr1^MVTcJUE>N! z9XW;yyxE6mjv~#uaMjDv*YsUM&KCIC@u!A=?LR$|biM?@zD+Rj@sxyucRB*p@{>>@ z8yAm1b5Cf+9|SiDTv=Y5(V5>Q3L=^Qdt6e( z87Vy?tc;N)Z%{Yn8Rgh#QK3SXUAf9X#j4re;(fF-D1>NovI;4~U-Q&BGE^gUkpRf#IntQPbM49>~psE|( z+P>Zx>vsDXVC5WCZaKbytW(F-`Z3icTWQoUuF2ww%^Du#5p4WumO6nIzv6DKOaf*? z!k{)FPBx!9Sx^TbNejQ9YW1?HeUQEDAYm@N9*G3!N_uHD;rOqvcka3hc~#&HB)wZ0 z{;d9c-1Wu)8#7$Jb6d@hb42YqD$WVzmjJ=j9dIycTyCqJHad`T;Gh$j1Va^N(1&Gm zK%bi7G0eO%9<`m8U9Eo2{xv%59M8@l&SlzIhsn3PUgpTJzXa($+zy3C=K zI0v<$_P&c5+fTc}%u1*%R+=6~n!z{F{O=zc&rn$CoO4L|p>8lJMOmY1@g5Gvl25=I z3tV~6vFRNpA4H&&K)(4^K4fnmF-{#6ohySp*2Mf8R2IF7C+C_{`XI~_W((8#RI<=c zcO1LOBZ%a%kOh>5afDGGHmLfJ+HU$kQ|-AY-26;e3g2Wfe3Kp0lvkPlkJjQvSV_ZB zzSnCrVP8mJHtzh6qED^e(mkU9Qiw+M9W0-9={`fd%+cmJMm~oe$@0(O`k-$6SFr!} z;KgBZ-IW@sbWeEWwS_7Z!y?xTXe(a)uWtCDJ=AkzDPm7i1bD6Aq4eYh%xJp$@wZHp{8#fI_qXU+y zNtf0~XJ(yr_gtCVfOq&FX(aHCF^$#t;nRIuXBp?6+YJpi+EK$>HB_kP#4F@i9JJ5h z0Uah)y%L8X+IrArN$5O{<gKYxTYJr;+PLQ{Sb3 zcGEt^f5(%&SKrJr?r`myE{R<(wwlZIhjHvKi;$Y0vI4%5N~xDqb$t0aj(EfG>&HjjrH$?j+Q{Ej7%bcJ-krC-S&s(;Yp{(Y0( zTn5tB92;8$QI5BT9SX;?$L3kB2E=&gjxOx8N{ot?XdMfR8^{tMzTb0%UpXqRDd~fg zuw~9IpU@kM3}ZQ|d~HDcmnSP+*t2~6dy+W*srmfK28UdeffAgbe3PMx%(BWmir*L; zCj?6CbP2)$#Mtad1$lAE&nU)<5J(26i0@7z#Wbn=WQU{)s+ z09+6brRow7m;AxhlWItg#nPI?8Gn&!%BkSuO8yE2jL%Che<6yL=f*?46_Elpt*F~4 zHZjJWCDBrKRAndWAw4X*m7)51f&8<_`5G2q^Bvbqm;@$g3>dO4V+MT5t7;c+BhEhX ztc@BYD;LK-TKV>@ELW2l3IZr?h=0pV%&*0W)M{yx-Mki_Bs8vv@+(?d;@(>D*VE8h zx!ATSb2?0TlueBz@?{`uF1&`Gc<8N$E_SU)4-(0%gffgtUR(~!HoXXZWHhw&U%#prBEnAnLkl4RQi6 zP-q>Sr*tipi*kpGO7gTu+&spjXjEa1`TqMPp?rQdJax9Ecu+J?nhJbdhz({afrLvC zDX;=%hoT0%19dVIS0337X+n)=6LTjo0bZz;kOohm)Y}Jn&D5GXZ6Z$g2z+$SAIh*k zU!GBl$%_2-yL2XpYTq-z4#JFbQBJcclk%`jz&lA)<&)%t8%W%JFA-av5j```ns-#@ zk)O)SOG(QUtt}g(&OlkP=!sB@!3jj_5|AUvHSPG&%f=k3vP0RtTGrbX? z4tMUm3x8*97KKZo5K`DU**Op&I6fzY`QSe{6D$dN9d2OIf{%_Z)ygN^flN z+?yF^EdOFQE}+)HviZ>@7MM0AwCe{eq`1Gp=qk8J$HLf2)X^kWli_ahfxXj;*ORMi zuDetIBEmrkuJ^Yw(?Q8>Fs*K`8N?f1hVvi*e9dz z2|1Rp?~Bp)H#)u%Id(a$CBic7j-=bn(*@G+b$1ABAh#Xj>?zp;|NL*3j)E$&4oR5}*V=Q<1k~d_WSzHz)ivbUi@0IV!VlO^=)UxvO2rUady!3PCR zW<6!j3pb|hm(Em9FY@Z?JC4rIIQ_y)f1~@qlWUiAY3|d0o|sxXvuv7kMn05dLpcY% zyx~H~S-!p!y7McLsaWZ!0X%t;-I%uL{jiFy;rQ8uNs4iFkYFP0VDd=SB`i&OEv7ez zHctuj*>HSW#@L+5W;$d`lO15wL6Ka2EZ-Q%BJ~hi>sxz;npVzs*v3{7$xFcF@vE({ zWUlqQQ>sa$G#g5et0YHLF)#pjvAS6Ig|#m#iHW`poDO`si;|1 z!|`T=nNK0}X3lzd01-{Jdo^xjrs_!$?6bQ>V!^*dk(esW!QJd1DH%ke*$fhyt)%6& ztoe^@8!W0S1@#cM`vCC5@>1htpP^<*5?CiH0NvBTzKU!yF0fq=!Eg}qCBr+5*13Ji zeZvf)Wlj=)99h8MscW&@Ap38!nt67#J|Koh(WvM2oMSHmi!T(9lt#P#l4)Y2@d#!& z(U=Qt&`E0g^9!jkrSD^UK4X>P2&X zzO$f}GOM;y>}ylQSrEYRGr*IxptybkGJj_x$A2S=h67pGK1wi}$U9PXEmju=jTC9( z4M*G<$Z-V)(acugPof*oz}b}NE*j#lMA9R4QBAl8?mph=#0+!BBL!3} zcN>L=&%BY)+lI{6S#Y8Qgbo##GLQ777_D!DVbL5^xN6(QuunDdk6c0SZ#kAA^=QnE zbsJ_KmI7^#5j7nSwaGRxIpouhgNdmtKQ=Eg5dY4*JLr~JN_lq z-DwI&zFq}l$((z2$xy9V(WxmBqc!f3pNRhR(6|QLY^7iM6nLk*h^}Z+k>WnhM1NF? z^$G;JMMPX(r`i=>4Ca*eFxKhWAdZ>yT>@4pzdFqJS?k3d-yUG&2cP@KVsSunjawlZ zU{VRaNk=c{?h=JF^AN!at>5kp-p#t=i`BC`l&!Xbzm%y3tFwVO2$^xBsl=IEM+s^T z#v|F=D9<*p6wL561{E6wSfbk(}tXt-13EI4>mT4wm7m;+u%h}?Z`6zm~ncSO-3 zTOi@fB3;%|A+H-zHId->GEfICY37^Dl@$l5(Zp zM_RmO$xn}Jx}np8bVh=4K*G(cRpzP?^NTpdU0ol=kDvbg1vvPRT1g{|3zQ!q+-b{LXqHukL+l+~RE$8--4i5@3vb;1iX~$seg*;qIJ< zYpI8ZJ6NKqAjUF>z!Dyo@*~|nvF|)^v%;m=Q&5ucX>V62t84jQK!XG?(IOzh44{64 zLx9FCxVuvJ?u}51^>Gc@E(c{)?cD1oIIZisu;9c)PCw##qTvutG*RODjO!Ds(qGQL zJ3}@QU3$T^x7_y!;!`ZrO0vbq0|rDG_-~IfE8oe{%};s7C`yl`UR0jk!4+#JF`fuz zRO++GER~Lstc4aOk?!Jfkm&;s0NSQY7xWX?w`K;?*svyd)@1YKn4=gLp&-o|yJ$RsrgqkexwD}1K_bPc zpFiULq6e)t28_cMRmV<}m@JT-za^(EC`emqXTUUs(6Eb!XbY?mr+TIAK@XD$D@PoK zYzxtiN?JtxIYHAMy&*Z2<%Mb{=)^yP&mxcMwwBD>B Date: Fri, 7 Apr 2023 15:09:19 +0100 Subject: [PATCH 453/628] [Test] Update mobilenet test image and assertion in testing --- tests/brevitas/king_charles.jpg | Bin 61443 -> 44529 bytes tests/brevitas/test_brevitas_mobilenet.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/brevitas/king_charles.jpg b/tests/brevitas/king_charles.jpg index e9db94acfc0d48b5e8df3a0af903e7c2c9169794..d3639a69e953954008284cb68cc6da1083e54710 100755 GIT binary patch delta 26776 zcmb5VWl)_#kT!a7cSw+fy9Rd)F2UV`26xxIIE28#U4y&3`@tO&G&sTCg5+a&zumoc z>;Ad(&Qv|q)m_!^RL@LJ&od#LaF?sFaqm70nMixvSb{)GN+2c>2!sqm1fhXo-w@qf z#|#D+1owtv-aZfv$N$J9VL1O24h5ROd41Ts2u zqB}C(TjS>E;S%KG27wUc;Qreh2OJ0gKbSos{y#AsKMx3$knq1au@?CqEDCnR2r@RZ zDknQ9mmoX0AUhug2e%*x&l?8)f63llK!kz&zwRTxxlas2_%Ancl3+;w2WKVVyvHbn zh5he;U~EXF!6}CO3%YKY#2JMF@UDX8c8=vc_(2YTHKT=xrciU8YjUo`EO_t$C^bA7JEf^57n7v6OG3BzaHF*Je0+ z8SOui$B4zTD}nf;$y!S&>^Te>hF#x$VdQ^QMiF6HlO3OdI|Rf-2>+<;e02Qn?hCOA zTn}s8J?O%Ramg|#`A+^(G59?-{L>7aLNe6t_ew}IxtBm5$k8-0K8iQHzpddv1A4M%j8)Ez9bc2I!jnsZkYS&B15@No*j> zI2Dxh5A>+8EDBI(w}~vQxbb1j@BU@ z@VkmlgHZTsKPY-LH))5oY+}vCHNXZGSbKb}ekK(A2{Q?V7@wImB#bQ}Vhsa>bPoQ? zeSHFWS^aEc$3BB2dX5p=+Y%o+7Y{VK{Rg_$P(JZ5!5=2U1CK4|2fZ7wBjSH;1iu@= zgDeCZ)_gJx1{vR#1v;+y!sOaIv3U1!gVKol>~r<)2#_4-Hf}(LxF~AL_r>Jm9H-(K zcL!uixx5?yKrs|}0$?(m%03yRNjO3uRRf?$L1x$D<}?EysYL)bC^}_Fq)vSBaDbJt zcjw%94s-x+p|34cyY}dNn*_;@ng^sdTAA6cOhT%#TcX#y=1pRxm%+Yffq46HDTXrw zV)Pp#ZlA<)dTC?Fytm`R4s%2p13QeL@}ofQ$Z)g(u?^|Z=)ZF?;xs1M>;j^V*#IKU zB#52~6H)%m8YXg0Zn}t_B)W8TSc$>%ZU`hX6woZdS%Dox>rTm>px2bd>#2DvS z%R~!GtkR<6PmPvNRr7cid&{Dr8UP_aF8q%Olf3)>`jz_-3P&7I<5(L{2583@G!xa`!qB z5t^Q7=uqn7_JmPz@`xy}400sLfvp^UY6vc4qYFB(=Qyc}dQokH_FaE-N1V4Ds8tO( z5}60-m=dSx6w{+`*8e45Yif_u=9Fa{Ce>+TMqjJ#jt=IlKKr9$?0fS8E&nuTSx!b^ zN{~1u)HjL#-Y(j`%%&<(640!!iVk58l>QX{rPU(t2RK7Z2Z^OPC}I=9QH+K#f}Wq# z(5`*w%tgER1Mz~rX*^4Ee4PmB=lHDFvlqict_ySaY9Em)GZL2)ud3w(Tb_IWKVKBz+T035VaXS>74$?-?T zeF8Xmpx6$2%eYvW2*;wL@3MwI&Dw5GbeO2)Q1zZ9JylG9M*0^HwW|)jhTV{{Iye08`huqjV9p*?CR98xt%t~8}L7!0+P&)91 z$cE7Bsq2tkmbLU4xZtBf039Sw>eOF4W^zShj8)*prp-x{j58^8BuG2^g{Yu9bIfSK ztbUt96m1O2;gUmv4^t-i2-SDQM43KR0;R!auVws46myvh&wi4A#v6fuBIez6gja>F z8!`GF#=a9X2B3oYWMh82V?w*Ag)P`zW$jVLKe~9K*VM?&$J5BKae4`Y*R~$2=^aoS{?>f&?-p1aCS;!Fq-2hVx>$~ zSIHO+n)>QEM zQw`wXs12#(#Cmk{&JESNgy`I?%A!tLfO3}0|4>a_j>MZ!0$lT6_~ZN~TxXBn#+@S4 zU({CU;&iN3N)DH<3WNql9XoYQx4fkx1rB=_jLsZ}Oa=Rb>pWwA?ye5a>R7%$?Y%S^ z8-KH!g)5+z!&bO#X@0LKG14Hbfh>dQ+$8c*Nyc3b+Cx8dQ2VR3*GB6a zlBz8+(R_HUwZ(y#q{=QE*W*I8le+w={LjQ83Vck_%luN=guPfoe2dsk#J0&$E5Oiv zyfi(L6gf{4S(_Xn_+E8nt`hQMI!e~Xl;4DBMdIiE+x*%sk+QO*WBZ~!w$nG`kD?@J zQPUJy3Q>`duBO3S5)0CD8yIGqz`afRM8cMQ-ArtFJwpvV0%)}Z+ z9-bDqg03tUVOpR|NO4;_;xP3_`yuaa=wjN;+QRJUFwA}v7i(g>=;o6Spt7Z8b}W~s zWWQ?~zLi6$?aD|tRP6r|S16WP3r?RRAC>}*oXU+DF1;y{?ZUZNA$1BOod%OL*j#LgJ2kFlZ>)5ku;1Owc&#K zd{v*Jqj@0LXP*jNjU&8lU>WU~?^3Ole0sygFi&*OyVAz0mW^g9ykATq`z^Fe_8U~s zgW!aYhVv+R)pm=vM%OOcsp^^k>UG1(z^?x?M+7Yv~F#Y}B&>(c4sCjZ%iG26Bo6mI5!#V`>XGw9NU{ z4HfU5SUIK%CcIm;Gx}GAJcJ2@sMi*MmnFz%q=gN1CW3Q21C;dAQDxh*O3@OS)0@|) z8Y>~ZuO}~M$wpbeRzbRnyt-LtUHVM3!hdTvs78y>rEM)#z=L4Ctfi?iXQ?X*mk6*` zU1_|cTi48Im3nr7n%V`l^uvsw(Qxv}zL`*=1zr`$ZQ<1&voGjn=W$j}KO=I_Y6U-R zy2voz0>>i!v6Vmy690|?q!t}@&@{eZ-_+kdN@MS`fO%_Um^kGTBO}{RLYft{*j7%h zn$)V+6nT7gQ-wQ|5VM3rpQEI1BEw%U!RJ0sx-_ycvMc~lHS%QgwuF6XKE>soaa#yr zG{iI!Dm+oWIW_Qg3usZvz%8Yj+9L!dFomjLxp2tn7~U$o8EJ)Q9Bv7mcSs#Q$w=m#_#LEN#1BwAbg`zoV1G(4l7Xh`AtH9)Q%(-MQi?iM(AEQr z`4V`k1!#br;8v$7-m6tR1}&2Dt8h&INleUs^@VG!QzyFn{W$Yhp4763aK)HIJZB&( znTao@xQU6)2%RVX`(0Jxk#8rM^+kAh5 zksRp^6<&hvN!0g^~&i30Ggc zJ_WQriXpECu}n^_?AST{rG7zU0*^_MuA`UmJ0LmPWlDZX11?Y3@fJ}!)~;?#_m=0^ zgV+G^(Bfzm1N=84+V5mBddm9m;r&D-^0RptsXLhJJ^FvD;YW@KN=tE@XSVJqaPwP% zG7g?K^83mUuM*1Vk|;TuR<&tAAB=say#R(wiVPwuCZ z#hd^J2@ZcFH?rKVisL52RU%cSvh7aH?IWA*-;9rG*ZO_9!{5V|IbB*ZI-G=+h(A4J z%NZ_(hG)CxpT?V^lSJQ@IhI6EA)&vI0+n%Eo>udHJ#iH%znf2OWpTUaRRT%2oSOt} z*c#~4Whm8ExcZ@H7IMcLJUDGJR{82fvVCwN zpK$hb#Afq4W*tdGBEY+NLB_nFS0DM<%Exs0M?`E7LuXM>qH#acH~VRMQUF;&Q8U3F zaeZ)sU#)g7iX@xS|0aGyCM5W3qj4T!jdNm*4!dE^hy9qwNH6(2>X5h56<;LAP2Sdl z^t&pfp39$`xsu}QIm7Y70P^-Ud*+(S>F zAn=kkJWBc_Q}+%z&PJl(P{Einn3dcwTFhQ!;Dliu_~F}P(?7;{`IIbCtjCQ#dhVdT(%x@ zxU7?j-gWlQPpfpdW6J0J2YScU4jzpUcA0KcqZKHbT(geC#FPih|Ar^b9~G#E!A(?L z=U=w^sW}TbKm`M;ip$%RxGFVT#_kfZ()?bSgVVUL8$j<}MsKRNZ@4U2T8|%8tA=Mc zN+p9(y^okb!m%h)BQgnqO)L|MZmu(JCtU$DhPLLec_ThBSqWIn1Lrz6no?|KP8v(d4{p-+x0N+vXDiVz`ch;N|#HZ=K$=lW+%r6cPtFP_5){f|oX zwg61IXcukPZi(OQ+SK5?sz`E{+2t{^3o6nu6^xOa_f!znq6OMq~7t~?JIh?7o zK~L4S+TSu~WIgHzGe!VwJ#Bufw;(MwNB&_7(Oei#=VV`#pC^P{x<9^c&2Ls=5BczF zmpr}wNyAKa!z4N@XtWA_EMBVKh=T|Q&@bB#@PDupZ(Yb!n)?T;_%4$650t3nNasz~ zQuf(yt{V8^E;&uP;u(9WSC2xq6@&y{TE!kgYw+OV_A@2P5gCdgI{hR;W7_T7^P$-G zxrKy6ch@P|2j|1bhZ(NrSCq)-)nWGX=DujW&-0`!0$1U8F2^f%M`{>rVJ2~(<{X4g zysF>p*`*pa&j_y8t*C#Df&S#4Aye5gqLM6Usbv84qD+%qnWvp_t@p0>M@)y2t8i!} z$E{R1lW$1vS}%Hn_ZG~fPE__n=(ITnqi1^mdJat6@mI0onX#BE&-V4!P(IXcez&fq zU3AE93AuL37rz9IOFc=sHUYWn9@N%wU*3P)Ty+0^(qVc< zea<4Q4gClDT;_;nK56b_KW{J2J>izXucimwbB@i4Wh|SIR>h;+GqwdW#|fgglfIMo zOB;Y?2mN7;Gz%Q6Mudgy$GRv#_`v2i${l!H6TUL2uQfQs6*osQ0D;bR@v?r^UG>h~ z7cp3ATR*cgGdRa*@!W9>-n3%qWX)sE+Nb9!O?5vEXFpFz-C<<2dja(X2p>_cy-qc9WSAF8+y*q!O!cL?o z~FRYzSN5hG+zWR35|?Ec+uENsz=^#zmNPEBIZ6bAL?$4ym} ziW)ci#N6x9sCWOd7-taP^qZ&I>ZY>mS-yA)hWl1*ScjBXLx=vI{v1B$$VD3n9PRlu zVMG`v!lk*@fH(Fr8Rnj!AfGaRM4?W~*pbQlhh@-svzCyrWvR66n0)Qc=d`ja*_97e zGK2`Fen=WK_NSAD@$EYhXJ&=FjRQRnMLYKyVX?2m7dy{(>0L_Uwkr-dOQVjCq(NH{ zAF&v-BQo-g;ngv_kg;>7k}GmR-^CZQLc|xpzOu?JH&xfV<@TUa#e*R;MzP~b_0%S! zWNT$!GxMxOFd+{=8)`+-U=^QpyQ(ib%^E)#-yS!ZBP-H_8V)sA{yj(>mP9pd(jZlU zGvS;2X-TxG^20KHiU&@qx+0$XyO~So@n4ulZZtM+=~tBo(E-f(QzOp+nFxksGuIKv zE}Pr@N>=?yMW8DJJG_5_iaxNs{L|HKis2Iq8E!mN54?`#k3XrXtmj%kQ3S(3zr1CP zVlC$vE8joyERZugS9W1olV&#GnHrwENojcR$}$O}GUEHX^_NHIxvaGP#vW@- zo7By|XsTN}u{(%dt|XJD2&9rz<7D}1B~bIU#i48q^!8~4#roPw1s~Eh)1&v6L-G?b zOA1%Z?7YWcHd}c-hnARw5`)%$a`6f3Zbxk;7xa;(7xm0logtkANuET7IYq1x$w7|Q zA#kR~#8o7O!iR8J<+_w&cDu8#{)nAV{&DddrGPDfVpWBzfW2>6)|t6*)&oVI8n4?k z4n)6Fl1413=D|EvSJ&vu$sBfhR;|?OYmW;%$7^hJe5ysNmz5nqWc+9DklZ3cdwi;6 z!or;ZO|N{VYeg35#`V1NC0@6;ps#fP370(m-x_h zjO0^2cOEt;)=_-TpxnZT1J%Yw^Oi+=#F^$L%hPjJf7PGBpeOrxp&}2rMG|4{W}3^N znY-Hb`;Im=D}xNfV%e%;UQyTty6>PL72EZy{((FTCd|V|ju^isv*Bo{8}k6}V?ASK znK22iF-R#;5LtL>LPa=>EM#}UzadZJu0F2VPzrp zOcP~^>y0)bM$LB2ht%<4sa8bRE`#Q8H*FnB9&Y?)FxoEfLewk(p!V~uYE)sA?u^wCUs@9mHk zXTD>^)8|x@XkPL4Q&oki_~#N3@kFB(TajAeIzxV++r+J+&=Xql962Y)eN?j@)Bffp z3i>Wka}m81np8MYX+pLrf8P+Qv2Z)dO*NqwDkMAg@=`H(BZeUMt@GWuJ;4Y3M3S7} zG3fRORnO$x$ko(34IGBOP@lNx*c%?QhAqWqr-HT!73)IiR8Xp8K)Fxw5cQUlgoD)D z+G?RUMLi>7)p=c2RhAmeRVf?~XS}>_qI424;IcP*kJ2~kc_(kT$0_Wy#y>J?$!n=% zTQ||Cy0cNPT-Aci8Bne#XOh12-3G~}m?oi(v2~|LUmB06CYK2Gym70#B2T`dEUiwN zHK3Qgz^3>-wd?Tr@^j{vbq{dej7oP4Tl0e+?lSm!v|NArd|U>NPqdY)^X!NoK^7hG zJlMOb`caqF!pzwF==(Gu&B=twzY=g1s`m|UfTV`4DwxT}LK{(T3iYBtkg?E#XN)Rl z;)ET+uPf_=sTD!Nk4I_{-?M+T zt`vrIOgEj7+$vPz8eY|~`K!i)c4Pv8MBd3f4;PiMmSF-zonejH&@W2rV5j-6=&sc0 z?BUe-%&k__*-BN5HPq2W{>7nR$LHtUf(XR&coOKD|W z0j`AG0x_C?vNnZPFTC$kSJ!2e3TvhdTu@F@aQ{oUjl5MX^{<7xz2VENCt^uJ+Mu+^ z3IRdM2HARwBblErIf}0JOPCirEP_STO~Qn^Y0$4lDm~o8Bd5WMj|cqKJyIl2)Q*AZ zd~>CVp%2i)LgMV~Z8_Ab2&6*@^Mtx(jEZ4A#Ln9O3+o7yr1r^a%+Lh8+^hbGx8)zC zD%BX0?>HsD6`k5Cl%1f@oNXsB$(A_fr~=o*cBeM_}qs0y^xuV z8y)R9%rqzc$)hp3so7oYrX8ZEO62NbKZm&n#$G6FA|p0~Ljiw&o|V(~cOm8)w9iF% zAi3|b0n+}x@OGUy>VxQu30709^a8KcL)xjXBdy>>&A^4a?$-G{bMP4utV|g4dG?}T zyKNTNhwya6Gx+O$Tj3qfvTd90cZYGlmXC&_d4uG>qi5<2Vv`Q%TCX2Gdar`23VuDZS3g*Q`xkeXDAfTKyIBNNhZ$NtiF;6c%7;nu&YG3fVd zs4?Ix5DYDU0OXY3ONXdTGqri%nlGib8gsrQq}WqZU?{(cZ1T21s<^8`4fSFz}$h?4i%wRENba<^|md@5h^SP8gS}MVSD`^$~n%khbchjhT;NTd^hf z*QBt8Mrt?I`dxw%9u7JK9#{U)7*tUWxD_aoA=jJN$I9uSU=)+ljgfNo3#C$uw3CLo zhGkB&$E4p~p%b`VH<(i|r3HIWm`ap;`<4`0cq>ujo_~`=lpHPeP{|I6YG59Uw!U!}8+~LGUd*oTg?ua* zj}>l68V%~DhuBVJnvuL0iqXJ<9jW6bj8pObOR^PGda)p?9}|%P{i|*!_;egB&?m6z zn=#Znzu%6K#>96oKPkxy_LygFT8PqM!7^N=%a#W$S0T2jG}j~~z3}c-S(mJ1jv6!O z_!#NR6Ww3B1>G_rJgluZhN{R3==hj9ZbRzxPf#;pX`?o`a+gw_d+e$9a^DzC-LoLS>YAhm>&SJRe zxvmH^nfcQ@4vEdvgPgaWIxemIY16YH&A&J!KUCE#N5s1908^tPLv<#Zey`fjc#g$m zoz0>x$k2Qj-kqoFmXcg(^+8*g$M3t38!f_cRX&O-+R8%MXzmR@T-aiA^aUdKWeeM|ddBjJ|5j zYZp(pjlH|bh%vx6I0o`v3YXjNl(r66;g;bG8inc?xB#Z5e>n9wWN|2cMu>{jR%Wi# zSXn6-by?^XoxdJMXWUJru2mdj%Q_|^Ynm`9{rK9XVul%P`E3o!LsM;pJ`vEOxAV(Q z#v2bq5;z(zEJ$8(@HFIIq`+KE%?<->^(0FmSM{OC$QacsrBn`3{C1uFr2xsyQ z633~uKoDKn!T~Px(BdUZ^IVRUI6nLXc}w{JP!?^}4JodV$D|qA^C2*sZJs8^_OV5k z`Ofxh!Tn|H)qNx`0@UoME)Bp(nQxRZdGn~BBy!WhdO@=woy5pyPI?5yv~*fR^3;j? zv!e_xVS3$`kG6qT-sxy<%6t3-x*WKO)BZT}F$DiW3u_lWeh?4xFy@fGZ!Pv=st3O;7E%)7QIj(6 z!i3?EG9Ga?T8)0@1O#|85*%d6B6uYDxX1T#neY~L)NEWx6iM)|;WARxc^tuQOk$xS zkMm~EM!}m&z6oJ%i4#u)S;RlMdu5%dqVW?QMPfMhM1H4&H!SReLG)xzT> zV){SuR5;W-5}Bazkm2$*>EO`@up`o3IJZs*+=%?bb%tLZkiq@6qqU5o+`^sKWqu_J z(HCJX$>FG#=zu`|Fz%GbiekU20WY_sF1kZ7Yu82PVk!}5ab;^~3x7LFyTFf>cGuW4 zZWL?8AVk#>&C$ZfaKsN2!Fohsbv%D;b>tqlZnD22a8JvQb2c0BaUijmmT;N!;mM?2 zpTgpflIGI^Sdi3i%Bsw+_5{utG521-XSoY&+Kba>=KX-tOWRFwOB@pNR7mDtW~r#; zd@1G>Ox~}jp(puRE|oCARrx(6xT(d#d#2OXCju#CQf^*|gyiD*KE&zMtLB3)+R#A+ zc#b8N${=WucT;cyPoh^2`S98-=w0)W8@?+n`v_bBV7nZ$OW6<>4JOymm)XlUwuDV0 zBD;A@*2o3&A47Z!xvgp@!SQ6yoCusho%N{9=i?ahg&{up>aA1T2L;+vgD-jLcRVsy zJY^l>OGNTM_#AKkE2iBO|(B|8{p^&A+nO^IxW2W zyDzgFfmHLOPYAz7$YUL)-A(r8uBXqAz8z6njSg)f!Ip$#HIRugVh{~{KY-rbA_F@Y za}2F0Pz`Cy+y)-94=}i*!rl(ff|9OZOx>MaHJzO7Mc*igv#N)%zl4>i$La zKe&Ue?A)yEe478wCuHO27WjV{{+FKc2UiPAL8&ip=B_r*?lw-24PVHK*b}i-2?01* z*#D?+1^%rfAR{2a!y}*~At558qoSjup`xK-VB%t9VB%n+p<%zn#=*lUARs`;dQbQc zpAZ+H0RP`AND>C_%?2I;1pxsC9|H{o|Nlq9{V&~ry&xQ9&>L|P4u+~>24@HccwtM# zuHd2{@r`qfKU8`WcPOveBkS{>uR59Pb0&?#j9M6lPsQjMS6}Y6->zq|>5SwZAb~|h zL{jkumIs(pNY=6%@J0*cyT=2+&=ziD=V#x!()4<;{R0ia0L-Kly{v2WGOfuobIJTI+l?;Ze*>)f(t|bS?`f6AnRdr&69iS z1}3j(0k%O`XEYX8G>p%GV3o?pyY$RSJ92x~<*`z*%V9cVc6VR7KW%7!pE7=sh)vSe zaKud;q3y_7fFVk+-LPRR2-}J6FNDYdx6aqmXH)zo3UdRs3ThD-)%*m2;Tqv`pCOko z83X7#eRYDy;q~xXZImNJ++bhpPf3P-f~i!@7{nWYWdA*ae-m|&2qP$l$V@?S&@Dmb zs?eV$mnzAGQvxE|R+L^8!8G7|yLn$qomNVZpqb?+ht!}@uDp$Z%U>c3`l6GxX&&vt z{f%p46QO?>?o*EX)SoI~=9I$MPrU=?m?MR24>r(?9yY(F^*VXYOg!5toL!{M^82Wk zwttHUDT7tUpGqI8=qxZgDk$D5zipFpr6AC6zFQqVfdJmuAvi zmTcgIc!Qol`IIeKG;s4Jd`P9{`20N@=J;VD^OKKw%)E$ifRZdRATx&$X@5Sh6;q^H zCQO!J>PZc;irW_8P2d+z+S8!M4itbR2`U|6TLQ5gZpJa$S?_X;&Bo(4SaTOuBDxg~6lb!yy$Y9#`gCeG{!tM#cP z2DqZ1Dym`BDoofs zviFTBmL7)iibIh;X)| z$*GuX0m7dKHei!`E#{au*(^w}++vG3rDauB-LlL@7^x-_>P%qohUjLg69&dVXSQ3W zs#2z#U;8IaK+Bl#tvzBCm>pxVv=MWek#)@cCs#hXsaBgtKNd`+{w{hC%UJBrAIMd< zrKq}kPh;votuCal_;&l#|Kp~2NJmXLg7zY$mVc5J=xt~#7(rt2Da}y%fX>@+OWnmc z$pV5V0V_9U?-V_hYOcvLL)jJ7`p&2Q9 z-KwR0BS;&EatJm=x<52-p#(Jc-xo|{^@I811%QpeL2rrKmbAeg+Z(9BT_OBxXHT{+ z$BjOqMv)*g^$*0s54TbQnzcFD-$)9v$odN>^^hP#I&6o-m?)n=_EGO<|~z)(Ocw`@D}e*mF(dPF~FG=v93EWW(!dx<%rH7p@td3DmFD0YrpLM|i2+Pu zGS`@bQ6OWnQAdd~r;l?54%f}^?f6uE%@W=EtD5-Q$bCvRlYdTFs{aG6O)&;emUaG! z>&W4|n{JYIp;2p%9GnkY>|$Jnk1sZ`v7G*o1o-#}jOR@sh1EtWVmntwqZ~0i$2A#% zT0NmGQ9VEJAj>oOuC_zPozg9#12Etx7(V)q(|9T>V02cXK}cI@1$TW@h-4x#(uel4 zaT8si=Ij>zwr3Z4Tb4a3JoqU^1WbQBkv#hcN^z}t1#2;g*?&N4JOsC|7krR>n(eqM z!#Q95PWfxF_U=$?Kbb1leV#WU%*UhVRQ}@c!-)?)1pFlGLdRib=qbK;Pti`RqW4z1p1{~@ceJon7`GrB&j(yDy1O0us$ zJ*Z_{qzkEv)>CaVU=4+u8u(=%MdaTkgh)#tF2bR&{wH;&AI*RZd>y1oE767?3f|c- zyCqzo+}qoGjJ+rw5`-tS5o+f35ltFeJMr5o!S8nYGaHn%&h<$uU#@su$_r+|GKJ{N zWAqn&f}_^`w3!>tWOI#i2Ld{HjxV&Me?9K=kQ_o^ugqq{z|~AHM1W}=F9$iJ@v&BV z=LACHL?XE}UY zDiqa<4(4b=D{TX=0ESZSDS41Nzl6`~p|mO_*LDaI&T-_#2mdz*>}CZb)X*}GM)xiS zUd(*;vW$>B{)EPXo+g+}6`uDUKstwPrY(tjev&X3A*=O z-jh!JA?*v5a&OrXh(>@Td+;&z;&dgx@_Cr~Ai*X3rCfy_NYiYL(h=Y%ATff+NYLej zi$?XrAz~nc9Vr2`JKnUg#En#&k3iQ*8RGiKf|9DIVty@owrwLf@~iEUSxlkG{p=P@ zl(VAWi3QtyekY%ErmNNep2NaP?4xlO&{%TH>a?v^)@M6znb}U2{F2wW zdX8&U($#vD(Ts zplju`K6P-;`yr2rLjLXGx^hLb?Nk)!Qmdh&W`&vy=K3Rbpux+JoTl5@UO(>%-o#Oo zN<8oxlxfY`rzWED1(wxz1D}2Zr~8~kR02f=`R1PB9_i$;st=tv%ori*c30C_sXgbwlm$zm&P2?SS5AvRt_P!&E{W$je=AIRtBH9X$@H$)D) zVY;UNIJMeb7VWjh|9n`M(jT!4exnM=!8=s`rcNRn<`^wYkIr?*mljh_X8O#{0`I~C zgn2tD#3Xk%2Rt7LM{DQ@F2`q5ydxr9W57dr^t6ER;Z2h1LxX9?`S?$jl;Z156N9p} zKKCCn%abYZ33cp81ywD7;~x&dATSe*siL0>S_LPYFNJ-#eGFQ_vb_`nR*narQYYpe zbzT<#fsPk*A8gN&`9^7z=W2|TfxovvL}P^OBaI*JV7S!x37UIVrgx@coP**bp3y&3 z2c9&&*<`eqo!fR;LZars-$h>-KK$mcXjzmdn-duto4PD+UfsXOrN3xB{yT5k-W+ef zL6j7mYEsSGE$mHv7$P#j37KfZ_zC?0zq;Br;bp;e_1;O~4*zjy&A;-nsRl<6u;H!m zJZ+=apx)gqCAHhkU9q~`$q+HId41nxm^Pv4#`yLo1utp`u&r6KGrIKr5m02ETJyym z99{LsZAs9Ur)Ay$|c_dY*O)Yc}7?To0_y-~`qLM5%q_O7$%O{hS5LCWrC_HmhgY7xw zQy3?X4xwDZn*Rs-sdIYE?4Xw-Bu69BrO=|k=P&N~&K&R4RcJtw?gds}b#C8h(j0z^ z>}oB&-hQ}fzn`*->1$_=u~@H1LWu%FNmh2~v11i>`bF!NPAnvBykOh{zy`Xd`?n~0 zs45-t%+1|bcHh6XyqlzM!D0dacPEu_I{wWUo^k#B5;98%Wz50^K~kjcySlltB^ z;QF0psJXG%>v#^A@w;rcYeb@ifJREjE3(X9mDH>k>DihVy&v>mAngJ}|EiqDVcqpKcW)zQ z_5rb?Pb+u7b}jucl-&I+8~H_Y)%>k?q%LyeT|9NBTD2a{rx5*g4rGUGR*p3j)mx*- z;U+uF^IZb50Naxu+n$whAp;`~={YuiBV|4mI{eoYvYmx%=u(du!1HiRkf8XqvuD)9 zQ0)tMY*Y(LwZs~^RWiSeUgSwJ6&X|zyBL!k>R+Xk#?AP@TrVFFl;B7*W4mxUfL;q@yd35Fzv5}3vVPHGe5Mj_7;ydUYwEJ?aHd2ls$1Rf6E>+QS8@cGR_CvrQ??hmL;4V z)3`H2(TJ+A5P!s%!cMWv%#VC_!o%Lp-_*p>OUQDPrx~t*46=nhM+O>_>y#3{v%)`u5(Sn&=S73=6PT z4OTE)k$YAHeRjUi%R*n1cYmFCg^zn+s~7(yx+~{glh8S<*V++IV0MtE^v!Nkmxp7Q zr~d34h2XB0z`qwA&pTx|^~&hI=`E)G4-^~DHX0xP)SX;#sWGhmiZ-{nvtk)vOT8X! zw>usAnQy`0w3H*MUNQ^ewz{3$U}4y`u5CLOSMenTxa;4_IgErS`*^10B1(0sVWc~R zYBllID8hGiv5B#;5wiRn4Tqj!QYnCHD9&|x^RG@B;oDhy^5wn$)qw$fW`?|s!Rm~T z8uvfYayOkO^o%lD^Jz2CF8U%#ZFuD`8l2RxC^`R+$FoVwKc(NCYD+J zk-e&6GsgIn;6bO|skq6_+>@d=} zIvziiR&u222x4TIa-i}gPuFw`ZR*&KIxMOYlDTpW`RNRZMIegk_g%n*h2X{dN9&MZ zqz>y_MR7me+Bm=`tmoHVWx@&cLYo4_w)_lOIfXo>#0OatI7A2bUXw3e3O?9)281a9 z;|U{Y!XHZk)5EfNt^{5gJ3Qy_c~V2qovwq`g0H7sbjfkMDZ3WJSJr;4Ux6@H^e>H9 zNVaM3L)|k)cXDvL=+k(WFy0mDDCTGd$8@eOLjB$CGNreXWljjv_H);=jWCY|I zqRrRPby)v_kXd}2dRA{q(n~UoJBWkvq+apq95k_Y9(!ls37?2;%LE>vA&m?O;6ktC4ra% z1c#!mmw2R>*7-S#$)iGE=pR}Xu8owPIJGG~|6XT2v5+K&?fP%f%5d4z*svJ@0u)Oz zG0UrUDic`B!*yOQc?2wO1j1lnvkqmh+5{EB8C*T`mz?0Pio355@fFVk3-A7c6tk0P z0u{;RaBF__)nr$C&}%bIwvr*&NIhs&avteut29D6E#8TFL=?Gc>2v8iel2_;xf?X- zLLd%6?kpFlwb;qKtM0~HSs5t@I{$Kk0nC4(*Cv0Bi-_!|Rs=ugA)1xsIt)3Hoj%^x+Zo{X&3^DAr;vDB-Xcf%8iE-FLS zSu2I6NCW@Qp3<&~#NV#GF6Dq%xQaKdju>j<{pumno(#SW~?`Bzc=5FUk6<$nzuNR7KSYIE(CU_c4qly2EHQY5lh-a|+OLV(K zO|b!R$85UzIdmSC(w_Sb`0&AV^v*(CcQN&kpy#O(_fL+{M&aMvVliyhO_76m^6q1z zH_P^Z33eH=!|z=UX%zP+W@9sPg7GtE+oSDf<%LlzXhOac2A%5aVE$!I@^xrx$@JoC zkY@^-27?0LhV?gLg&z21=x48&CrSNoBGmWbOH8T_#pX@*w0u|-U{{7P5h#m1&QwPK zhpFXTqYgxl$jo|ncvt%f!OjkMDzSIt-F2fkJ@2cJ7y6hVI;3gQU*TeHMLMP>68@|D z+V~zb+$99aaVGO%x<<{oQFZ-n|HMI^4RI3f9 zUkzbmzcSOULyKau0mW}sw~WN=weRU2`9(Kw4@EK3ybJpO#(bAW-FS~H^+Te89VrOf zqj0TQ{Ics|5<5}$gbg8p4%jz?g;ImLa+Q6cT>(f> ze9vf?Q(YB2**Q6>&IVk<339!mahoQ&pHTO!^scU&9TYNpIAwbJJ|I4{D9$FV%=LS% z;!7}68%AGu+zth3McSdLJbooZ~_k@4g%qH7d)%P}Oe;T6fBeh#) z`9P7ujuQk20houw2-KDTl7$}g(>kc6`HNhT>j*Yw-D_WQe|heoSPLU-x>h;eEBI{v zRWuOlMA7&zil9~@g|y;k+iu)W8;xb8dY#kvXR^QlgL#Sf)4==68K}sy_-tP*TnGfi zPMAm3fX*B&)s`rR`+ghi&M6XwXLXzYttIG4=V|V&2{R`ZNz3* zLjIGf_L~`st)Z@@5e&X=+LpSnMxmUwhgyo2-x?kwssiADj9SD}-7kE%+=jh+qXv;? zLbLy@0(C=%7gIS-x84g_@uF7dv$AE?9oly9f7Q0v_ZI0vFFD@cF>Au~f-AqTv~)6i zw5UfD5pg;bg8xqddnbh05Ni(Ufw*nGa%x>ZK^?$sW15PJ@neg&r`=x&37DT1TT6?2bw8FV`+@v)u7e1-z10SnUq%XYSX0+HCSh@*?D_<$$dCe3F%$hv~Mg zSn1Mg7mEUtl1L3y0p<4wr%A29xz%nl&ek51tB77b3 zCcUQX(Og=|2Z(e=SwFJfO3Ng9!xR|j92)DQ)vjZIJKXc=^_eZm1;|kzNa^0XJtI`K zn%3U-VYWqPSi^C~2S1)`lAlGn`&GnOF}<{qjJ$G;Kc+EIHR7ysMV-qEZB;)~f30^% zT~UGAcrRRvJ#K4wtpuwfE9QR)B=t3S#2U4$YDf;*(kv(d=RDU2+Qhc-qFb=2G>hevkO}oZl{U7l3XN~f#YQ}{yFR?t{R2zC)b2!f(aK9V zAyAR^HTJ*kr~4efxo;M;;%jJTw7Cl;Fv;_O4Y()f82Wxy_78{u0A&p;RKAMOLDQ{l zEunDNGO7jOb_W&b&l2MKV6Jvkp>@qCWPGvv6aLQnzmGg|X)dvEblPpY4wy5A-hp*t9x6pADgLnH>i&^*zl`;O`09_#Z)sPP0Wsh;;)W%D8`uUM;!sJY!XV zxws2tx!v>hsMTvFcEu?vX$=w|zwO72F;ZhKemKg3Iak>XDY z>Nf~(CWdGjl_x6M9X-Wl+3cft zWoLY0cMgZvvZZj$3a;98(w8NPJfB*Y!o{y9D4#pyJ4dB=_0;1oTiv#or%s6f0KK@v zk~=S_@~JK45b3-6ysf0)}&76J}(BDU87lUB7fa3XOEju@-@PS%% z1C*XNhHN>>ARPT^_WuAZfO^%PVlOV~H*=Vf_4XA*C-D_GTL}FP{e)wS#GegyYcI5j z+TQG+EsrNEI(}8_{{Ux_#_6DZ0I!VxHSl-r!SQGQ70#)$>&iuw%`hr*ox-8nZA-omd9%(N)vW*#TBP<=UGhfNowWe zJ4xF`S)VICdJE|7ZRCi5AKq;_tgBf(%dD=~eWag79w9n$(V&RfOOl?|PcS`0Md;z6Sg;)x1-uL+9TBEp0r1!@|nl&(H#DmG5Lj z*!ZXRtnhx1;qMy9;=3I}D-BLSBcDLLTuKfP&5$vSW37C-sw|qOr8KIm7npaFI0Gj< zS6}f<#=7UltJwTatUBE4cTQt(EpT|oK3_`XuOqp!`6Udmv~C1frv&1x&RG;@fT$bE z>GINB%tU52I4#_N)N)+OHVdAE0<>*By)^{47T#=nL14_ei-O3NZZ*#s6|1$B1G zCB2*%G9;2jgfii9g}%M<^sa8!#bzFHhcV~wgUPO<`&auun+>(hnTip)EMXW5ea~8n zYEk;B`xX3ShvK$}XQWwy9mSjolI3yR9ANub-JToLlf!m@20y(dpE+!SlkH!cKk!SK z?StNFl?Oc!YV{8gUc6d$(q7AOA#t?En9F@QubBQN z+-kQmi1e#}38K=Ze=V)L) z&U$n;;vd-)X)?6;dla0oP;zVQKiY%g-P3r=8_hm7hW+6JSmVnsG19(I@h!deo#D2> z43i<+6^1y;_jRE>&;jan!g}elkOJy=! zy}s!4f~}FqIpV)3ziKZD>RLyPBwO(xn5@8Ua7g6vd9U3s2>rg}L9)@cI6lt=Hp>(F zYmh>Js2xpw3;zHG2mO|`s~-*C_{&S4Up-&RRCwuRb9bY=9Tie_Y~!|k;@!p0h*vkLE+6#8#{j~i4rpgUqXM*EA;2!7mBX6 z?*Losx_bwNXOG{eCd^%KQUL8Uth1a zbWz<~#dQ_Nt!$IV%e4_k6>xfgaanh4*7G&IF?ZR89ewKkj+ZUQopAROd68S-`FY`Y zan`jUY)Z1U`jX-}6^KX_@K0_%D1*a(D_kJlT5Noy-n@?M_K~#Ln(k}8S~+d3;wB^m1k2C? z?TpvXKM=eX;vGjwx6@94r8KJGEV2hB*!pht?Ot=@{{Vxx-WAe*$u6YiNf4dmjFL#m z!v6rhpXFLp)!DK8H*2HauRKF-@fv5;uWsSAf#h})ebNvepnfD1TpqFTi$VC4rCnTz zVUghy8)<`WcfmMO>s}S&OFe5r(qfY4N#VAf0Fe@NfG|Iisk}RX;|~+~X3Z`a%-Lo? zbQqDjy65q()$XI8mEU90ejfZ6ia!uR1e#22z_R&{0acU^2j^c|crQVHGgi`c$*jeq z>{()--++V=8GSzRCcGQrU&kvwYRcbFzO`Fvr(}62Ck&+j01!U4^mm8ue#$LnhT$&l zE+KT4q-?T$=LJuHW48zEQ!1JZkovCH<5ALd)$*_8c-?lqZp5;mt$3fujZr*7;Jsr= zojk>w7E==va(d_Us+vE={{RyBUd4PrdXefjF{u{U@&mnwGP%#U7_UF^x5SA50I>Bt zy+$Rpg<@jzvB(+v*Dp2pE0;s_OIYxY_M77kNv&;`A0^m-F2@}~?ax}Hs%fzcTXK)( zPay_&4sz|>9&2C3-Y3(f@n*4eZ3mw;lNlqCq{0Im2ou%jS8q393Q1aZEzXbx8BcMnCQ}6N9I5`4cksB-7rRn z_T;*Kk{fjhv!>Sf6=T8+KBcWeV;`8#z?*+BsFVz^KU$@6rnS9{G0Y0GgN%0Oy=&oD z!tF{G(slTzk8Qr82$B=`h%TTU`e2H}PRO|KecSMV!s_qFKMb@DTIlb-Hu3rF!x?Y@ zC-kny^GJ@~*w1dkSa9N~_&ymtCkXJg(JPBcw#PJM3fS-LYpRpT^E|$*GV|?<@#VSQ zA2XbeBGIYZr;kc0*EV-H^So+)RX7w>T@_>PD~&ohZRLei34#TAAI3dGKMwe1FJT*J zj&R+7djMi_On@^w|~XevGDDJXj;5i5X|}W zE#wRe;QCizEzDM%pCtTr_?dt4KgQSEz1qgF<{09QGlfui?_O(psN5~Hs`ajA!fKv{yvP^SgK* zuckK%Bp^fTD`1RzAS{@#hW=IkDPFDqd2S<)_%=@ zd2p7{DYcQds=q`2KaF%;#ENT;F|K1>M$yad1s*oqrE$PKb5PA=40_GA&XnvDNfn}r zPFV@?H^TX z5Sg^QHlK-vVO#R9kHWW6*y?H-{It)1%-zi^Y0&=S>dl0<_Y0G^kPm8+v{Ysck^I=n zuMNAi(MPXZSXpZmLoK$~UJl&k{xt6vcn3|^5S!?eqN!n#$364ZWA(2(@Xo1qpvoqS zLXV%iPZ+O3kHyy7y{KE8osucY9gnR~H!Do&OQGQZ02r)v%NtwIF5F$4H%n`OMQk$d z*}0A}*C+I>%`f4tr-?i};orW6R84c!ByKz&I}CnR?RTCVwAChkk;gP;i*tj?>rz?x zUK=>rLmtH^4ixc@^`vUH5`tSFENb5l-$T?cVundm%W#|H;~*X@*uEI}!uHn9?Bwf>Nd= z8<4Q%el_zi?QQWG$)(!(S5CQBp5{RJ1`;yE`(6Sv`z(ADzr6Un;ayk6nsZ;TPJHP=et6F6sgq)(iGd|mL?^Wo3- z70u+E=hR~pn^!^#DFBS~?^)A$ayT@5ooWkFabV10CD{Codx8ypjr(KxO|*D?W8zI~ zO^;2~?zJVe(&1hL+bXaHRCa7Gc&~<%`sUi+Yetem1D*a>PTcX$dUWM@oU+lH+P8#l z?X-Ar?XFdtR@kum@sewQ3dYq%(nWSmDBKg$pqlDgVJo}toPeW&{uN@&b<}6hMv)On z4UFK`2yUlq{gtRm3~jwZ8yNRfT@H_^{fPLqeHGhUWZOmYAmcbw)c9@ZL!s&Q?wF^-^n6JE7ta2HDJjN=m1gGt;rGtwPo- zYhT@+s;$7UKH6A+>P+Yb%SrWwYa zeYVkXWO$_9Iq!`B06O7ytzESn7fB{(h<~e0laIo_zx}Yj5o$jR)=f$Ye$ityh>U(; z%gXgN@m;(tB+V&o!p8+YE7DduwJF~0xQ^9Be+T7U?ZZufEyCNz`3!Ivo|U_ByRM{* z?uA?}b1iEux5$X*GFz=&cM~`4Y|_=dr5|XxBP4;4E9{?zpR)Y_03APP$)nPeJ8uw1 z%=YuRzy~8EcWibN`uU#1>1w3%98HFr5BfIRK`SB?0|cU{{J z_zI^r%PP$~7NU=YTlRhNE~nzA)wNwV-q0&6NpA~(ub03Bp!!#+w}#@-v>PilOh+0j z?#DSb?3$E7MDl-l5^Iaptp)XiWD?52=kctiQc_x-5rSHynD}FU+HR;VZolK)ouoE! zI|}+w!xp+Zvz`g zzaVLUn;N!*9hLNp94r(bH;h&Wtqi_USXXp|`=fzYkBhIbnWUQ6%wCvRB-Pt8Zj{c0 z$sOZv^_z{&VYa3OdX2@Un2CdV#QM}0x0ja;@Oh83sl!#he^0!Y;a&m9b65O5rli)c zP^pqO!?&-kWy;M;*BTf0t)m2dOh-oVSS{gy`)@D{}X0BDasO^t64 z+9O5oafA9-gvH?K+r-I#BdY*+ZOQK<abF~Bwi7Evyh_47IUud)9C;H6&=VA8x@ zplguaE5)cl`(MAwv;`;DzAm@aWU`I^@Mc1TcYKU~Rq4_A5yw;FDZ?h%$&ZIQ1UG!s z-%YuO6`8h(-2VW4RcOFvell^zZg_KlMLNcMLJZ@S6BP#8)eq-7L;b>g|?tj(jfj$iv3n~-|ZTF@k(QtsLhdMhiHW9cuC z9vRSe{{VviBZtHV5g@aYRgyu-br}`-vmAD?{h{(Bg`_H>xbAUZw%;8zbn&;tonJy% zMY+4TXzk?~u@_x=;qr_yeeM{{pB4%5aCd9H}-p^{xrQ&pUQTIvnEtj8-^ z8i$z;)O!YGW1QC3tgEOsi!K^Slx{0kBlC3)NH)R|JCR$h1a!U?@m8nc-8Oc$)MvFz zt|oM1GCK|{^keoDzLQ$`d2guPO2#8_<~5PImz)+k>&<>?O%1-MeQ#-VBQ3XAC^LX7 z^uzWlFXB%QS;eKm);bk`fLH-x{^AW*~ux3bkc*{?yY>&;|Yv% z4)PISUwGQg-D#yHK#{8ai`1S!O8IZb`V`P!G?GKGc3!o^C8@l2Jo4#$-6Rq|bHE>s zWn03RO6aS|IX;HA<&C6}D)0f}4_eN%h}!BBWMLGL!|PJj(Y!W)FmE)|4BJsQvkdOV zV0elzFUpobnV0Wky)#g^S(Z5y??l*f>}!eEw0SM>n*;YUHtLf>G#x@;?DI1~s#F%- zy=v8-v2%F)jkzA1^Nf1^Y2gG~CE_6pF@VSLs{S9mc=7hP1yOp6mIjUHv2;{Lff&XK z0-@6{lSb0qhRd^m5(jd9O-|PlYJrSy6?2Wv)YetitF6IyAG=?bf5wqlAN{1a@3P~P z++?0Aoy0b_cTq-mv0@l!sH)Ek+DwYr*yncR+LGSQ^$X^o8Dy}#kTv5xQ!nG7q&{4b`K)Zi*I#PqMHG+z#`lWS{#5`ZqHT#`TC=}E}S*Jqf1 z)n5YV`0?=GJugm?W7Ra*dz)N=`_2wiC)DD;I`Dt&!{gmoRh|zOcqrOh>H1~H*%mMi z&LUukKg4%n*Xd$e&n1jf$GI9E%K78JHKfk>+6*%?;bI#Pe{9y2>&hy~a-$hLpPD`k z{e-oM{5zw6_>TRiOUUmtbEY&tVe&>f1F7WnuU`G2zh-gqJHy@vgTuD5>9-azwY>gJ z0u!@vIUwVln)=gGk)elAdqxgK+ZzWQ5sXyYoDHPM3tTj7D^3-@Za`|++fBQi@}{Ef zes^hKvi_mq&lN!h<+bLurr4>K;Z^?tUpG5fJn#m8Irgq=#9y< tkFH!ra_* zjC{86lg8ug$>Z9;M&`b28|#~9UpssGv9gS3uo>-3qTUU!6~bcTNlbf}1eQMaUfM+a zG;sd_2mT$w@XO);o8h3}JW&>&OmgxzOB0XF=qde(tz&6$-f4AG z{{S%=#xq4GrQsQx`estWOJr^7MP*4&+_?q8RH}L8*Y?WkRo;CxD{d3gs)_Vqt*m)%51K8J%{9gET;r{@O9u3qj^-Ve} zmD`9>huvY{>sv-iGGl#@!_N%r4|r8_s?GbdTl!Yb^2cDN7`GW8sjooMegNI+K04KZ zblc7LR?uEDqpV>N=IVI+P=h%OqYx1+fu<7^O!pnLU;6>is`P_O1uhzfVlEzEV zh8A*L+sAKXy|&nf8 zOvQ*EyjQ8}i>Iu??l9#!PIy}3yi@T8${1vZ$7ujy$+VOCn&*0(#QBF`nmrF%xrw1N zNRCGB-j&W=YSFVvAcUzWB!gStD2??ER_0PmGqG&`HO_f4`A-Q=t%43~6QxIgsiF4i zJioLmtH+J5qp1d^xYOd-EyS@Cm0hJzQCP4u*~J-oPT2=Btz5O#9?nH8#7D3H0A7m? zoVC=_LN4^uGqi>*YOS0maUmqEU}Cp*+i2$2WRf5@%)H{8X{W4gmdAiWcKj&{H#2mW z5={2T8G&mh`QrfLc~+o-g=>zt*;NS*0@U&ft=K zq?SN`GgKhF+oj87gyv6~Kf zxGsAd**h3ZR#DS@KG&LmwDH~UGTY7wQH*(PqaTqq(#_zgA-Z*lu$3G(?cEP^Tvv#- z+vvV0Exo?zZDd7P+B)ML=kTv!*P?Z6d2LXanjq4!?x*j6O0CGT=9;#G{{U>C?NcA}&`vj@(yytVXflLT7vlV;Bw7sQ^@eTW$3nBI4F>`1p%= zmyX!xrtNH6iOF1eLreQS<^wt~iw7iOm(!(9ZQ<)CvII?Wv=Noa>)W+zX}1tv>AO-a z&l=&MJdvK5s+xVv!FeUT$@{EianH3wl(shIjgGDhc-9!f6R`>gcMngc6`LNu>lj_X zcZw@J&$#`ZdmfX258CxDVJ$?gmhHIbw?ka*^J;TP_IOKMs=3P%UWsR+#ird4v&zyK z&jGz_2U@jzdyB2$5LY9L>eS;$$)6d#I|P?sY!>JzRVAbg)Q;x4jc3K49r2gLOU)x& zwX}Cg`3|eN9(e;CSEzXE%1u^VxF4p@G5FVk!|=}c#G1r^x^1GCo>gtUb-?LPGj~YU z*`wzTFT?sSo#ROab}e_Lt8)}9mM7O0`bF>uT#DjPw(2s!Ae=qNoHY>0BnJ!d8)1A1#5- zO?no+u3R+o7YtdsB$56V!Rm4=d4uL=&r?}D?sUa}=yTTWe%!Kc0Cd4TR_RurGBLe< z7POAgd3rdf4!-(v^}DGw+_Xu4yHq16r(CLD>!z=95;m8)SIj8_*GswbD;# z8(hW+$dqiV?WV^S(}zF1jB!YlnS#u!#{R3$YIyYaxCG-KtVdH@vfIGSpd2ks%89ll z{5|P^*oPuJ9X1|Qerv-#58}BaxrPTh z45vK$jw{R(kz(^>V?%W;)(d*`91c!5XueZHd;e)Axjmf~}%+S}ZH<1SR5!xfXc zx+_e{<~PpAo}f`#@?Kn9NYM}pLynYEvl;bPwG-P+Cyp?x8=bxD3tpZE@|}QxZOFlg z;wx`jW3oY>D}vOWzqJrS*;K5?4S}i1hye z>^r8ous>#b08&8X@~puyhfI5anQrE5x5)^nYM;uwKM>0KKRb5sT!r`&YEW()zFz$L z*Ic(SiRL~g*PdwP6EgKAs+@itSDwkN!!5$4!$%Z8-D|V)@-O;H+B4=iZ*yJ=t2=$B zL%8G)Ij%e1vv`}HEb$(dX*ZJuFgxwRE%Oj-j=PZ0^I?we=eOZmb_yham6?EM-hN^- zYQ4nLZ6kYQKBlJUL`GdhG?FTP^(UMyXy_IK&hg6v0}cVDvp?w!J8d<5bsoox&FCD@ zV_N}Z2OKM9397R)D>Sm4s`G=;nz4AqMx6RqCy5w~WEnW!RUOJBnpX22-M5Bps2zn$ z(|L^(#B+i(Skp5sFyN7Y$E|5;KkE>7$f|t|nU(P(c$-$VWFv8l4_qDOj2FvR#%rPR zqZ8seGIR2u>s-5YdI9HwTO$)wOw}ib>JqQDqsKng==vXtCDU##EU_2wF&*pDET)tDQ%yVDK3L}`(AHKu6tq1FnKcbf1*D>MQbS

RXaLxyllLlvbL%Du5V)k6J1*wgAKm? z-`zd?b#Hf{bL*bEU46Q%Zq@VD?epbVAsfi?gzA*E(mu9UAdr$0hy?@!p@UFBSfE!g z8sker8wnqT{Gubh93Ui~|6_muVh;$n1-<@1b`W~}0XYcm#fJS-RsezCw%2oDV528} zLuX7HL4QNdB`Ctr0|LFuL;C;j^#6IFSJks%yhTs?ibuwciA46&`2QLM;o&1O|3B4V z9_1C%|JxWI>8n@&ua5nkRE75&*$1RmuY*5`1ibS1xGt0|lIu^oeqdpg*gMZdO<{Tl z%_^;4j)Fk<(-7b>FPI4Ue0>adnEsuy+%G5S_bH1J!1W>&fli3HK+w- z6nLQD=5U|!94FY*x9&TAH_pgNSV^*<*6M!sJ9&Z0R8iah!QkRV|I&eFDH+G>& zKgGLK6u+J5pQPnrwPyBhU}Ip@BEEzrU1)E@-C%L~vqa z;k+YX!+QofzP0#ti**?b=e;-&4a#n?yUl%d8x&mg+1DsXoWQP;Uu z5kX^?unjR^o#4=SZ#P|7(%qxIEGvHUcmba{vTO|`>Q6PLeY9I1bT8(W(1xGRSQp-< zqhgQ9QL6)l;h2F@S5Jklr5oq8&kWPmO^4q-IXjM$UuMm)>2wo$t_=tSHda*pk<8(F zNg>NZk|Wx)67Pgg{Fttth^>R>$nj#du-bHD5`R3=*2q*wzxyHHlflv=?Os2_ zwl%|UJmw>{JR~5JbqT$w&VEo{@|imU>R>j<-gWuZINL6A#>4S0lBu6OZfK=NQQ^*; z89sAL6@~O6EDx6w(2P6a{+Lk2JAB)K@0%#KHr@ul>qMZany4YcQ9xIfY*lr{i9le{ zc7`{1Ga1Ap=M%-_VFMyN??A(c=+o>e2TI=zUc^bHa6k?D`HY3AQ4eCQ~ia%Po`Fr55H@TFXxcaVPq&Kbf;uwwGK{t_jJ zqa89#olg54(Tt8L^`$zl?YFXO(zb36Mu^xgMKVWBEG3q`k$W3q*Z9xKVFx@IVmcA4 z1|(PtWf-SV<6Xs{{^LWHHABzUZiv;a;bGJ%5sS&1FE=n?KkiC!_jV^y71+|obm0=A zlvMs%CNBHhy07Q}@Uz(6B1^Xn6PD23`-9>lOc@R#aPM83-EAyrN-|HeF~6`}kVo`5q3r0#@b{#+qfgbZif%z{vMT? z6N8lSU(ES(w5l{(uc&TZyB}&;3TLNCxSy?&mGxtf#ipAVT?PZw#0wHnA_C%$>NcHLQU-3Xg zaVIqIj1!#!k5M~Om)DRKFrgn#p9`vrGEm9<565sLA7Qp7sK?mcfET(gyUT1XZ1~L= zIaoQcJnyXfdo`PW2__W70QN_7qR&_6-!aFmdXW>= zpdT=*Fcyof9!6Xt7+Uk?ljlze2VD-jg|KME0s19y`M~@eRi%##9a>?hz0{bsW?((k zg8%B1@#x^u%SgP^QVF?Ew7n5I@@7ystK-TG6SEfjvp@ZCS^kfe)R-nlwuCTJfClui zAPN>c*G-bKWa)NfN;=)wOKBozA1Ntw|0Zm{pktQ*F=g&>C;rdau0(UG+01l0PuWPH zmTMaT8SZ`xPB_lD7fO%+ebvMK)x|aBYX6Iho2Z;w?CocQIF&J3@%wu%apN(e?F%BW zk!?4hd_mWS1eA^$uZ_s~PG2<1Y=Zx?1*1Z9Qyi@~{5bQe9{n0Qz~;Yf>!i)?-`hjz z6M~`SNe4j<9KWoy4g~)md=}tBrdu#yt|_4f)CfA!*;icE^;wO-NdNp1%(EhhLBEQotBWBkgz7mO8yw31aAp!*#Y9DRCi@T5sCQi%y2|9+y%y;Roc zeiQy#a=ggH#ggPB6KlVU>%udLq#e5Aj`x3im$Rc0TD!Ea^VK4_+j|x0Cby5B%Z9*4J*v{9zsh%#MMLjY*^M#JCZhQ?BN}0fj=OOHdQCmf2rr05jQJWKY_vgQ9X^3|KMY`vf#Y-M! z1NqXtzHF-8-D9}=ZMN=Y2fiHN9jJq+kQyaj(xIQ1eVux@FHQ|oqTBhxQI!4S{9eX$ zY3(n^HTmHtZ%*e=A&)FT7Iz_Jm_(z|5}9O7!c5Lu-rn|=ZP5&JGpqZ|o34qufWFEq z^4tubPfom0sXNVI5!E#Ae39=S@=mnXW<05%8Vgty)XB6NMtoyIR?$YrR3%yY~ai6ux@wMXCLkO+?i)kEEhX^v;o>U z_sI(Pa%kBc29Kd(`bT=D=8G8^^)pE1i%l2VQ4?HlX*0D;Xpin0L{YMvAjMmItNWHW zr&sW@l-a{H(_Wa*=o{M?gYWl+wfb2@HRy2c&-k?i9iJ0|dHlD@b1Sjm{ypsYrh}37 zdkSrMx#Std#HU^}Uhgi-iG;*i&nHg8m1HVTle8h>lBDEF47@@@`rrBg2r>%t{{;2* zYh)BOR5Y~zUFaBC80hGj=xAt|IGC7N*e`;H@dg(M`_2FK=Vg%8|J?uQ^>SjPqoMz= z#{XZz|Gz6fcZ2ZIk@At4k&$TY_3`_VfJbQ5WX9_YmgtMPX2+^oaiI{zPUip^IqAfIahU#P2O_V0Gm?S zxdYQ?!E>|WsZLt&H}woI#`%qMEeXq1QEKWe;Tj{VET$ z-bAiQOItkc7Q#8A(#V#d==&oRFpEvSk$wh!yPF;vbmlx26dyXPgzSt89uwg&^VyX% z-8=T^>IY=d(sMKSGs4F+1d^>{2Kh$2W4|wShQBxdvQ%Ed3simlXz`CF z=DK!&?p{iy>Zl&@U;4(BVI>Ie{k#nFBvFVO_Y55#6=j=yBywJRC=>Mr;<&7CQv}Hq zMg6r5WbO^CuK^L0OtRI_p`|NWlVcEfMF!~V_=y%`Ng~;(Dud99J6O zcz(c{{^6L0j%mVGb8Dv9Kb@mH^y$2;c7FsqOaw&pJ%iwrHxw2A*fSZ0fMcq| z^ka5ylhCBME5dHS>x481}YzSWh=2|vd7e8W*k&n4t74W^)Kv zrd6Um_+$b8dGZUSNjpmBUeh?p#Q+WKh&k`g%n#pftE8K>ToSAP*x^^lPTJANJV;pY3eE3Bcivhw@pw(S{5ddt*dGe#9^H@Q+9^xtt>UN$iQ zSZHzHRUy++HGF7FrYHUs-1B~~T^EZjYNE+acdyt?73!0uNGGsJ9NTujl+yMMxSpk# zwt)Xx9g4XugveJD^FUG|r%v}R`BnKX%b2C63utjaO-%S$cUY?E$cS>=PHf!;u8Cou zh#3Ic1C|n!_(TVRMX4wT~6MJ#I)8q8YLAtP4X%@}4I^5Db zC1l>@;Qm}^uk%mz-(UYS4|8}5&JSY+T6R*hRoDzbYV|D)dyNLKI0AN>)4O)kOjaue zM3+)Fc;jAYkBoL}-IsOgizRrr3TaD&>jl54nbIy*zbaWN z>o97|pzNy(vPzUYp1^Jv!Vdbm=}=P=qm*j=IpJoYExIL|UX$X$KK zPiN=oFibx!R~=LC^0Ab|nNDyVi({)`S0Y!zAljFML`%9Wv4CVbQBogN=@>ArJpCyw zsOoqk=zaZ)Qc$NyR3)VgIc~fEOx?$`!R`JD7?t2~%rpn!L`hxywoT`x0`SP${f4jP znXy=)*Yx8Vlwu}f8aQSb?d#=@8NI9M$uzQ^lk@W`N7#wfq#l(Si!X%E_0FSoGfcE3 z=Uumu!%03&&J7zbJ!DIKEQ4hq$0q*wzwswt5)#sXl{< zg!5SCd1aB48n#0QG>#QZ2u@8Ce!)o!w9Re|PLY(F8tnz*gy>`BR$lddKU6pR+*HRm zy;JR)UHG_pRrYV|V12ClalkmLT_C8z5cMDfvX~B}z$_}dm_Izoln>L&lB?Cg7vp?R zl-Q>J#+TDO?8yHSVt*(rnPbS<+y3s&i9;8=Y-h1NVJ~A8ymw@oYIi!evGz@qlPgL^ zL}}APG(s%oRLMS{O=?)eOWvfv&;X@5d@o(w(A@TQN$I|o?jgkY_bPq=XXx+MJWTdI zd-Gj@&v7RNPC#ADZ*0&OLGrooOZkJpoqc&4Z-&01rVr>Ab!I)as`&FV|p833rI9&9V&W+IqjW+6YQ0 zn5@!fwDL{e)uP)$tzfSkXiF>S70``C;@7%DjYT!rPH4uZJ+VvP(EXv-Fv6I$D-i;m zm92y^>}-3TJsi*0!m%@A#xcM*Jr2O%5f}O5KQ^7#X;Jy5(Y}TJQW^(T$k)!I2FR($%EupOsGDLN|5bT>3jCTr=1kZ*5zO6uE&=GWVj)YMU>S?}@<;uS?WM((Cn?e39@ms%ymBK6HK zVANj4Ua$UAlW6O`%*)s=O2Eg9K(NFYM-rx!(6IxDdYPyxBe#X2Uuw=ili&u~chYaT zbK9l)T`1wJa-+LEP)4DcH#z;K2VN8P^xitG*rSWS<8xW&iPCxn_b1OF)^{_0Q+z3P zL;Z4kSusRn@qsMgI4$qRj5S2-`qMOsZ)NAoVzit)QpUkaKIBI{_5TUj;u3wQkklhltt@aD~u?MToK>1MwvBY9XZdNbKa z<)p%&>kJ#k{Lh(A6@XlTZVOhMsfu)xdhZxZo#_k>9piW;w46IW=}4#F7*(k@00L8> z`-M|A6;Z~W$S9WV7PV&5-gDeuB?*F9x@PcE30Jt-_kVV?xsmLi<-~JqT0Z^`hq-<^ z9-%D-B94jT!`q3%tbh729Crs!`kfiMyUlOLD~+exnfxmZ0hT;=tn=opQn61PipP}a zUv-zH7n`}o#mme^kBnmcrKle2Q-|4Cayt+j(T!z%BWvcUOLP=UN@50^dJNizIXoho z?s!?JdwC$uHCW9~ihQ1}SZtYp%iym``jh~wHCBOF+yPoxC5(!Wm_G+ z)T|Eb(eQhXxw((RPTS2UJaQb|({ZkHZ51nWK>*(lKxeG$hpbiH(%Kx)(hEnLIay+H zhWrMTHhA5qw`Q+XY0JZu@*vMBx`Om zpw0h|yPGMGAPsMS9v|IMZuEVf1hR~K!l^cgF_zFmX`&wJR%$Qlvv_N3Rd19u3H2MG zOPvMpeIiZzPcdrpstv`U1|*8M#=bT=t^d;h_7|K6iQC5TqYTmhaIhXigEW_FGJnVH zHn*bR6WRhdu*oQ`w>g;NFFMS+BkAQQqK3OQt$3>4;ING}rP2}MP_$vE{L_8>kfi$o zzQh{L@R|e_mZa(vu2(mGG&+}eh|?YjC}vUR6_t>KdzGBVl^_|4hldfzu-Ht0IW0Bc z1ABPa6DGwo2<08*OggRwEtvl>b|RPEn$18w^7Z8d>x|?^0noUXTVVf0-RS5iXDl?d ztXC0!V;_g4_F#0o+lBqIngqyJ4*h-?G0YY5^~M4myx z5do^_pX(H1O2#f2)Z)|RL5)NDK}!H$QD+SBSFgNUQt(cZ;GsV1OxG&W%JH{+ZqqDd zD;N#&=0?vd@y*N>#@tihI{2b+y%@vS|`}Qc=Bh zFn3(#vHQ6y7-(IM`0Dd;DEtXG?H;`O3_8Z!5(iXltN9;g41n8zj1C3f>OgZ|8 z0e?g}**w?LY@6!ESUo90(V;dwu&Y6cqg?Fj)TT% z9mK6=Wm($KVu}M^2+Wy8w12Y9-=9+PrImIH*|olJ95)EEz_Si(gf5&=C~V2x=cfw& z8vCy`+xzM0!$J9X-DeO%5SI=2flQ#pwWOf$%0Y`^pr2;v8$@FfqIG#F?x25bzxfq= z7np!M(Pr7&-Lppq-4=^*BVe)nm$YM4JA~xV=~XGmTWX>3L12ZeEcymq{r+Pmxh>V% zReVCo%PMvf>g?!z9XsMA;=sfz#X3q`vjmriXG248im}o(B=m1PQf>DU>QZ#K0GoeW z7WZMo<>6-swDx&Cfras`mgVN0_xGAfirXwuN(M}sEt#MOM3Qp7&C^u%g?pTPZqa|< z(%3OHT3?d>03^tVnpH3OWW-_3&@~o zSs(EmML zfgi>u%Oyw(W5BRy(Ba&VxE7y+RIZeSRzkX44Ra!8VEYzm)40*{y~_v_{$YL7XCB+R z>r({Z=H`xaY>V?wIsAyP2W6OnH_b-s#P*stG8~qu&TYv3sck5cjQu#pnNofsPYXws z8mPz0zU@DZi<5CjOrZEjZ|BJM-&&-d9fFf>%9~naTo(t)q;>~sRzp?V5HCS3d&Evp zue}L~RnZz`RhMPxwa(>+(Z#N^Mox9vjfd55nxD(0l=Zw8D;+=7(SY52t1{z)!YJ5% z$Mik-Z|5kdD28@Y&C8pU|NF4bvK%s;d;5YFZQd@8;id+@LHRS^H9yic0w+p zY>-@3#_dSLFYExoU9C@a`L=C&ma<=4L6ld6jzNz$v$SX;p-9^pdE=ev!+MLU?+ zZVk-0kKBTsc8Qs<%>Q`?ovGs@SS5Sbzxj>m2N10r3}*xbGYM2 zl##y5_t_J>+sUsZ_O{sPq9hz|X^iMhJXlB4|ocCDvEo@ z@5gqN>^p0SPP<7uP1qYbRb6nE(4&0`SSRkU5Nx_pZrh63H3_<7>P9>Zy4NymL2yl(5u)t{63@AY26aqLh`q3>4E=uBy=y_8P5;qJ^hz1qxhRMfZR?pjjMvZo_eO*eTbI+To zMo&mrm}oRs7%LYY3a)tutanCpb8NI^+LQhl*aKm3Z*MC>uEGdklhI3;OGU_E-OV59 zJiII43ayT7>_F3>IzjFq0cwW6*XGFb-^!oCuK$|~&@&ee{AF{zl`u@nex90u1@G;T4lr5(ei)z zd~PH)LiOUteoTzNl0veFurBa;ByVq-plXd@k2Wp4teL6OrZ!GuANX@bHq!``jA#YE zU5aMwsHdAXLevolP|A?CE*`+q8Nr$pemz{GX)NW49jYx5L>#j4=4o~n_@gShk(Xie zS2CRR*X@LoZ?iN`-2evrvda;g#u4k8)K+odqxqIVhi9d+_(+P`Lg9bO7Z{R0xQ9lU zwxz7Y<`{~}{{jck0lFtlFje#{tp47@egLz2Mkj>xc)!Ajs4u#7$u1){CSiCNyw-#( zF2sKhi4L%LE$p?5yVD&`{Lddgd~E3cpgY6yi?w;>-Ta7i+Ps4eeFG7%^$FxC{27FX ziqJk#IqHfqS<<#!{%rS_s(~4*+H>k|l1Ej4dVF|Rew{c8jO1XS^T*}0JXB-f23trJ z=_23izzu!{NIPvT?3j z!OW|vx8IHch1|W2M=b=|z4SlH2HEiZIX(&lCHStE^VR0Jfia=eQMaK?gA7J*HCbB1 zB_2-{LCq>LU5&;aXRyCh zfo)nIu`|gs?vdz+S`wTO9~%);n{(~$t(LKA%uI8g$d9#>c99aScBAIz&C9*N#JhnF z<}shUd8c={A4D&O4>flt<`Ki^wv8PU_r~p3DUP3p>AGaUx`C?M5YgunWJWkvjg|)G zW9us*B*Ws^P8vhI9NgMzmDMIM_nejaTsvdt3u=5hLE=Hoj@UpP7J*o>fV!v(s!tut zYUpm< zbxzf@CR5}Y7@Nx0FnRrlR^m*KnI3U$H|I3Wt$}-MWB;Qo-Nvm%S5$32s76?epe!i$N zkL|s?o6F4i5GRA05c92mJZYTC*tMAfRT93pj}jF9d}v(A-@Z)%C9ugU)MK?{1vN~?xeeMq;VLM6Ue$GxsgsOmbpRHcYo{# z@2eJ*_~UV`Xzyr~4j<^IYzOkIB}InIRM3QF{V9B9cryB=7437K1Go|79=SYYb90Vm zUO&iTR8OsU{?}#*0)JUNi}MiQJ|4D%^ zbmD4a89;!rwcdo*w>!NEBAi6!8j0hc#X;20o;Wu5*7295R;dzOhmQ& zW1~c_*L68&Jr4?#JMzl;`c$XOx#9U>ARV34inmLf{Uvr;heAWO;KZ%i-xymh*NBBi zCib3JB6x=et#29}kKw|%oerK?|z@2m3xUqHl@13S1;B&IHks9e;e8*3B*-8 zQ!g#t5^rSO1DQ`4lJ|iJUG;>&=KMnBfzA8dWT$-R%%evM&uw>;98#62plLE`o)V3T z8x7;hdv>2P3Yi=AY3tR!b{*U;-Ngl);MT!cJr7`~gF#`wNTR99UW5RpF{KpNVzw9gCSsUwU441{p%VKH z!mXMFw(2Hy%wUi7n6`+Hji%OBdzd4#M5R|q#)EgutfKVAiFz?I&fKr47tW^z>G#!u zQ?hISs0Ytob^1M>^SS+B=agsAkIbHFLm&V!DmEFb)b)7=$+kq>oKuFg47fp$ZE!ED z`!1@6?FbT2b_K+P%ylZoRD5*Y4(5k1%~cK)52AGasp|0#TUk$5tM(;yH|V`x_iiUm z;#?3=O^D%3z8vQ8_+8HvpXQ_MHEwy;p6BL$rV+2&)2Pu{nSzt~Tx({yffDg^`Ndh1`QAtA)PoHLlK$bx4T~xL5!34{Me=iy2 zFx;CyP2Yt_dbSxT>8tU#`_htW)M5o$Jb7+SA#!^lL;_DD-%gS}n?)^pD;A{tf~n@rQsPi;Ne=tT3cg%~iYFB#nLw;L*>?QdTR%vVDPr$$ zWWh=gIhnD2TLgZ;l)_K z50aaYN&nTB@UWE!AN9I@Vrada!!MahWKGuJ8xnZp%vEe}p8k#FSgH)J0Y*SK(ih{a1B5qNSpfa@xn%4`Q>gmCs`bp(^{v=V#uV59d$4 zCU~AZdK^{Njs94Q-mzXh$ro67;F2oq-Y4?OD@O&!r?| z$Joj%lZaqwkA4b}P@;&_9_!U~EwIb1u)z{s0^*!68Dk~4l5hgk=Y*vhl8YCMX&09T z{WZbk44^sNT^c%AvX5+7ws+qdOf(77b)SJzRR)D1n6}nq`2$Hme9y zOxNksORe*=pXw>adz2w(!+livV$P=gG6_@u@!U1Xrf-x2Dqm1zb8~JtK*n~d?R=^0 zk(O3Myn$`1b8?&;%6AWnrO0l#;$I|2m-uvS^6nY*$3Su;SD*73Z zj|q@xEdvsXR{V989Ewvi6%4W(E;!ei%x~|fB>1JAw@#043$ScmUS(H?G{8a(!N*AV zwmn64h>oA3;VBGoT!se(D+7M%o#aVS*8bTbuHH_dqJ+`Qr=5^IYVpqQ~X$*&rnQVLB&M z!x(iUw_^$wca52iuB2c$1vlmr&O-k{xAo%gPUa zNn$Fk*szu26<95}D9^!}TsA%zudEUqJ)WNHpzAZit+-5S)Y5u~uD3_B>1PUvB&x7S z0sDE)M#Yuh>nQD7z6Cz5!j<&8CnB%>xK3C@X0^;Zg_axTyD7>}e^h5G&(S3c3)FF$ z1|Ew2=ZtD3%y$x07Btk!WZ)b+rxBPs(wt)LA53)0&3|CsC*XY}sy}V-?n3|gQw zc$z~X7aYXLR@L!0q^m`8dE*Ddp=Z^B0JukO&9Jdo``mLiz98e4bh4pQyv&m#VJ{56 zBrJ%&t;HBNcf6I})G!)#d00s*Nt1h2+rTD_f;OyqxsLJ2vAlQG7IpD`7#Z5ghtFao z(@Tvld(WVyGTCABAA)|Uh4o6?Wyh;48Q0=;GLZTUv-|h#yKaTO68fVp6Kv*sK#C*B z%mAut1eCwR)S6{VEaS<{n)!l+#P#vO!aL#C+Bqff;^wgEoHE;)FOkl+2CT9=pM%Kk zv3*}LY$pK8qa$drH6po}aWWiAl)j-}joB6#SwqyI6TV%5?N$lOMqakfI!4o$j?I&W zu}-xMZd8(OVN-tn+mxDc-s7kZJg`_whc{RSh=ivs)5i(yk>E%hW_CN~*)6ZlsqhfJ z#+R3Ymh4Nb)LUgimr`2EC9uz;%KGEi=>6Tge6loi(9UKua&p<1<;we~M&~oK_K&y3 z^%=+tZF8ypw1*H@N=%Mf4g;eXa=d+bV0Q{I-ea9-iNxCg$s2&ej8m{u=7sGC z*G9ZT&KVu?g*GE1W~6|5Ne?T?Wu*wTpQy>NuQy>RzO4XX%D1usV1u1iGv%zo)LRl9 zpALRwn6#F^P{DXX}v}0q!qy&)9@b{93$2}YZpDFMCSDe;k~yY!Kkw=vdY zcsh;FZ)@fP4ogYT$xuRsZ^6d1Zf)Mk`7`aGDs6+rnrj4EfVEs5=b=FF8th3 z?NY;o*i7u#*k(wo>6c5#yk9|W)mK>)&b#_PTw&eP)R@#Q4CIr@j*W1KT#)C^{W@bP z@SR$rZCd!m%neLW)o-tT!E|4=++J-}VHh-foXl7{6O8nv;Mhr0Ve9mNeQSVoNd+^} z1^$RjZlvn(9fy|~%9rXNkOn2teAdedaX3~C3MO~YY-SShU3z^0l?xj=yl0swwUa_@`K}~hp0cd zIW|a%*$ZEYI^=@$Fykpi(wq~)6dwALafW3uywRgQ%g=bhkm95BHfi3P;Jg2~Qw@oq%p!|*SE|-BMyzpz^zbE%gY8ckp7Kse& z)bagH1eCLJQjWk|T@h@2U2j=lSTpwO#AcRu5KD2-Gbm0(sPkn*R-4e*1bJ`YB-!Ll zYaPJ?eqgPR87=%L5}y7PNB?t~>np}T-2CbY4=Mz6b?@MI>uh+W_V|#M&1CMb@kWA{ zEQYXzP1;Z$@>1<L#D92JG7NzG|9G-R2IZ50yuQd2SMLfxV&v-hTI ztQQ?6{w*|}bZu&LUZLi?#g?USAVmSy)@BULfl~zb^Lvi>BNq(@6imTaV1$%M+oRMl zmM;%F``|&#cFjYVeywHYib{bze_u~+v)5Pmn+RmklU2+8cH*(NSUmw1OVkm+Ginw1 zOdqPo%LYkJ#r{p;WF~zg+i9=F?jj_b?A%l@*QWcha5r;jGVCI*LU*VwrU5wCtAiyu z{?VC%Z=_Y_hj{AFhBXQ%BOc>kdNRkqi-7+glRR04Oyl+&R+Sc-e8J3JnakM`~N7$E!-=gSXA3xZb!9bQ+nV={BVkl1(pyFp?Pc;O1&%` z_hwIhCOwl-!qk|Ua&`m}OtQ;P1F@MeMXo(HPOn@MF)~Gw_Y**S!}{}FOuhTbdJT-2 zCfE5QFV>?rmFyHS+QmwKS&4(QUEkkl8E_`;nVLfi>8izjt6stTXUCp`Ev5f(IMcfx z^I%82c9?FW9pjt>n0ncn#QDl#_z>(x)_k17I{stb{Ad4=1z32L09-#WrKJx2=Nw+p zG+9yDj77S_W^Mb#P~}N~?^M34Jk2xk231*ix)I$fn6Y)xx#1x}M&x3xd|#m6w$4Zn zQ$;ehfu+d{+31aBgtTcedV>3(kNG<;Zg?ryGD<$Ko8qmUC z7|GnwHJbCim>khjUik*P0c6*Xgc^LB6P0bGO4{v?h4itxnFrm+E=ltiN$cuZoGm3H zC4=!$zY#m2%Vs*$L+;_>#~aNZ_b9}CpFPzBJ<*4vid<`JM&dDft#Qy3!6nthd(mrX z>tnf?=gZHI9MR6cFW?K?NKU#lh3ba88T1ZH2A=8+bX2Q+H)3-=Nc#qe&}os);BJV+ zOzD_Rce}w3*u+qeojbSgHjiOy z_l;UBQ>6dgsH3J(8qzJ2DgtUG8rN*`zgs%*`Fb_o|EMSbP9L2GL(U0G?q0T)bZQ=7 zBKf*HoGe6eK;%7_d-g+r2}`%4a`Sd)hLKvyMO@%bP;#~+&-lOSCk(T5(Z5u9L0CRc zJw04NkE^S#IexnH&e;<#vMod>RSimUwTUgilN=~se$MoS<8U?` z)2s>VWgp)Svc_UK4=R7$R3E>8*68hDL-Nd8MYZ>Uj(x*K3H$* z@pEDxXh*aLW(*Yip3<1G-Q`GD29VurJjgdq@GW|%(b;pa2wm`tIt*W1qn$95(BIwZ z$nyTYjkD=v$Va%pAOzIeMwPI2VotM$)}LQ6-i;kttgX7pqs6oCC`qc$^0VvPm$v}b zuRq0PX2lnG1VUe21QDJk2q{?nyyMr_8Ma1ytyS!V_*YYRj+U0@5qz&F9276Ne>i(V zJ2HHz4Tv6B+Wq6=da)ST@DlwknE%l#fVN>W!1ByM`s^wv8K?1JzgCtFsPCuqa0zk! zLn1c6S^PkB_=M2{a}K);y@|`QacCbML|WR3szsV7bh&mD=Sl@OPne;Rs&&OH26VLV}bB(VI7rm^F-^LIK2k`MUh z*Ygt(G=;hke4X$GpRmu4yVl*`BW`ujX5!WwcX!|WCfe-xXx7(WKQ=2#94(KA!gidX z6CuXgXKlKwUV~h~N=OXE4sLL4)FA5Xkr^F0{5nk0_abp+&q~x3U}^c~b50|j24$l7 zJpWxidt5nd#<*DNJgHl4>EN(Tf2n^CZn?|Sg1k><6wNLsJt&PK`_g%{c75i2^!6{H(4D1 zzZkm8fTp@IjERMaO2?$7bL3zmA0+}J(lL={lF|&WiiD(qfV8y4hQw%T7>(5E9Gx3E zVBoi(_vg9yp7Wmfd7t1eNPZl#-+2cmtMejN2_H2d!XC@T$33>sy4!o1>k0abY@#N< zKF}ki@S({7Sa+NXM&IHYiC}u^A$3-J{<@n&udMg}T0Zp7fo$r;1|+6A1$bk@GK_Y5 zP6T&g(w{t6G!rQ-4FeWxGMPvsrGY)S7tDp%VeqJ5c^7{}UbX1bboSl%tAPdw$dV5W z!HaxBs0|QGPnf6^{(~|VH*O`r6Py>6iDWf~$|bBe9>7h43o^Y{CLlGheHT(Ta+rJ5 zl2r*15i;_UxmbFueGYh17Prsf%!m|JcCvkU)3{~>ljqhbLooAO&GF>e=zhojq%-K! z2qi;1PtrlBDiO?(E-d1L(E6`QYuB1+Td8ks&WVrZ+I%+7wTdhHQlvj{06=l(b|S zss6_`bkU(yl7-;#H7)A6D(iT~Teg%1Ad{(aj`l=@yDkB)i8!waXN(6*(H8Umu)8Dx zFsvB|s|ucH+g{W?ex+^c*Gdz%6 zw*HeP{`FWw8xn|_1c=fVOQX2FArCluowC&5i{8A+s{ z>8T(`oJRD}BaQunbLFX~TY#O@O_yLr(TdUd!4TEEt1tT8ds{HWjX}hX^A{xTGN=0| zi`%YSOoCrjiXNusmAjaEPO18?l#!-H1pmAeuKcPF=yt@Y3C;e2t)w^({m*%l& zYPli{_#;Eh9Lzblq0z@oz>WZ}P~+<(Q%jUiynldisdTR01DsvzZH|Vn4wA$G8e!?efl* zVxq|Nkhx3gxXym#qbn=B^vuz#uK}jR!UI5wC0cGXnsT9ZTO^-Y+JW_&`aV9a@q6YL z{fan0RYzch;ilboFn&*mXigc5kY2pNx|`lwGd0aS73Oqjhzt12uPSdCd)W`8B~jw0 znVpn1GZ;uJz8=+dr?3P@!anF2zFB(1QQVZ6s#!PYGz#ms304;P zi;mzrN4m?GBaYj=u`eN}*ScE<9gnEP=R|^Zh)qj{`B*D^gQMok@OwRP;xk_0Kv6yc zd+o&`^+tQ3fv+&vCzd%1Wzhn2+4CHBj~h7x4BN!I^SPtrIcFsIedFW-)a=eWCHjL2 zCJ0S+)hl{wcC2a_@@8r+ob3{a5Lbl$IWY=+vjwA3eXCznJyJx>eR04)*uvVL{!7c@5p!7Idx`=Pdlc7&TJ|>s`mCT12Xj@ z4f)*iG~E_^!ItQ3gJ$R3jNmO;`Cx0h2{p41tlf}F@`TCdxKirC>~YT%hpSGU^H8LF z|D>bCjly$HJf>5KFu1T60~^XElUBA{CQ0gSPWTL2wKP6rPxIciaO! z>?s61`(dzReF_Z4Ge5_r7V1+SL>is6Wdaio+kLG}$vmyVXR^OI$c+h6zcUE4S*w06T@|YRdm9CrxH^?teOdvZhx~p*i?X;Y`>Q6 zJ0HC1(D-zWPN}G^pg-7{GhM^MRdIj8D$}pnCC3~gP7JkkbK&w%#1A0vHCx&#@9YZj z8rQ;S|KksK;Jm__%53y&BhAfR8fanUY$E3ToQB`!$+4uJvD`Va>U-s9_xo3&fJ)UH zD_;Rnwgq$5<@{fd>A#Jp3N<%$-aP}a&tF5|mUIe0S|Nn65A}I($M|7fZc2ybhs+1T z#rLWOI{#AYbd7c|79LLLWiWrGcOC2zRYBzDdHF}^t(&_nnpcN;GTX-6i~>ptLBU-l zfxEbq?IJ~9;n%+Gme;*n%$4-b;%-WuXj8MbmN(uby=^4JxlRa^{W0w^RY;8Aak^fo zCeEC*4=skNBu^fsm|2?yM>}&OzeoSVXzMLNr%sOj{(8hvYq%hWOs9y}Dh`5PIQUFo zkM-b7CyK}m_*dBEYZqD)AdjziAWZ$EXutT9Ft>%}4{6}gW%$Q_IykSSU<6j)0 zkzZqsy0_~MN=vvSvGtLn`0_T9jKJM;j-^TyX1k@gjlXH})?(t!@Qqt}Tp3uD5K)U) zj936YL@*Mrf9_7a8$lQS;O+y?rIwRz@v!Un(6^+?3q^}kah~#^qb*_mb9GRp5|}=%?a=M@sNPCZC8G#@i%b{M@C; zy>)|!Je{#|QV-SDJ6(qeEd#!#gS|T8-k8PNwGdlV?}|9w&Q;Y&#vp6AdX&Iij(q#U za!Fs)0|f6FL?TBxn{b*a{QUNnOU?l?V@1#i*SHAXm{hUW{^v_HvKaAlv=kvv^)*Aa zsv_Q)a=&*yHn)jqZ8UW zAK}eTY%{XwqWt!)cpnBI^{tjI>~G0l^-Hd;SIRu=NB5lKp1E^R$;ccn#kVJ(SkTh# zs|XX*zI#qq^07wg#~D67)KYFuaf#rr4w=K*r75ZblflX@YX`I^cH0LF>PX0K{GL)p zR0^)+z!;{*2^3fIZ`Ia$%8M(aPm^Bw?LoM^e+X@Cze zw!}xcqm(CJC)1@%hNEo7Z779%jBRKE2GC9tH>WgBYoLGkC=(|y=~%`;k$mu4nASnI z8Kd0r_s&q@N~e1$+K>L@QoK6Wy8IwT<}7Pa7L|hm?(GE1tXo)@OPXY)G6r)~%75I5 zHPMULc~;1bq)u!&PGnE7*XWwQq-`RjWo$D(?`}jtHL$l{vy|z0+rMyrivH>ZymR43 zu3AG_T-~(k?(J}i&oDT%zjswMpdEGV6p}!|V=MQyTuYTG`>>G;wg6b zDD1H8{HybQqnMNW@lg<7&n0LIAait|Z-O2>NND5_hP|u#7GmR6%N&?+dVcQ_Yz|y6 zea2~L6+g0NJ`}$G;Xt}JbBQ5++v6yzaa)G{u=aK%x|oZG5?yS%k_!a?{F9P+S(KAYVLT$*%SUX#g;+67crjz1b8pNCZI54 z&^e8CyU5jvIY+i9Llzo}n1{akOXAODU$Wp>Qd4_szlS9Zlgh zRSe^dkn@zwNa#WeP!SSw&K}r@^%GiImaf}USv9{(xtq)RU3Z?Xvgk$6dZBSKsQ)-< z;S>^3wC-cN3xKFUZO&ub^Y%FC2s@*b|#S1CStD4v!gH+|Y@zlpPdlokcF|Alm zUDgD2sYa74joVk2|Pe*lG9>woJt%)i~X(hSEe z9$utY!a^@^4C_|*iWLjqv96(^TK>H|K|X|jBh<2<(QWGRuvUE5esJf2nsHsAX#Gmg zoyIp|Qqj$#D8GsBexvQYkVsFHADIZrRh zcN4L=sphxfrui<3!Pw95y8mbCh|B!e#u3Jp9 z93pA`Qr}b@%(#2&PUM)8)6damhu31h&T7JHvn6(P$8vyu_p}~Vm6-FvCfj#UipT0B zgY_kvsikBETN+t#z zoLeP*MBfp&mC_i+-7ZbVbJO*s+nHf{TfQb2Qm7@_EUi3aZNp+7aL#xXWovLs&*I)OKh0o$Or`(phLn)0C#*RN{1jsco<~K+V8#((nI6{`|3TO|V1d z2LQVyS>?b5lQreMM$~b1$~)>v6D(v3TUIc60&u7(JKn)?3OEULovhaB+?$4j9=Hg4 z$b%hFsLbA~uax})V$HNeg|SvjzvEwqYGQZhdlX#~_>{Yx{R0H@2`?iYxss)3C(J(| zd=0dj-Be&sL1a>cW4>5$rnUvLm%+_n0G;}Yt2a*@NuQob5s_n-g$ zTo60)Nu|?$&^jrl!L75TSn{VTrP(7`1L(@j3}zy07MVO2o4Aj85q>imBM2#r8EyhM zEhLf0k~;l*$G-_!kd09a5kVEK`WZ z4A;2n3lFQQP%+#5qnIq{)G9_OG!E^8JQBqM_q_4JZX@_#XxL*-q>3aTb5UWJ3?yX{ zu=(Tm{t^Onkf5HOReM5_u@>9mPx%$WE|rcl4@ePP?z|Qh4Z_wGJs*}UY$AKJd7MJa zcH7LXJ=_ja2dYg`=vl@60efJWue|)BpPNxR(aa~lP0RI~;BWlRx$G$VmIw0=52w&L z&aPl`_3-%+InJ#qVI-9vJ>Mx%=d)}qEgl)7Jd8%%#))ie=p(wXfBD>;fo}16y{ahy zE1S{ygL{`7pLS>Lb9d=3gMbq}P&bBfXDz&JoGwMqC0y=08_Os=;Rb-LYw(NDtfZ)Y zr2{x0wQSbik6)7ClTl(VnZ51uUBRD{Jcg#3AQrUkJNN~u)x8mWDDTzps}qjk6A6Xr zOU1}n2-VI%YBGLsW~e3WAy)V}xfeF*wwOG}6B`9WdPdO=9Wks7%rK0u9rgp#OfDfR z6?4j~jGW=N`tJ{)#KJ}mNsrZ@{~>_3LvhR?>TZZ!NM3#~%k!JE?r&BAwggfI2^M$I z{0f|z1V;8rtJr+~WWdWqo-q!wl+CRPptP$3*=uc2y{kBZ6WU%!JycAwvebMg(34#U z15F4W?Z$jyTd`FU=Q}Ft9jnG7bndxzB3F)D`aRXO+K&`Yj>RrY8&u8(H%&tRUrbNvR#WDA`$bS^>Vt=m; zH(f`Yv*CbtA-n8}O0Ru0={eiGk)E>Pb28K5x_S6`8}#-e^d`|J^|h9Q;ra7baj)EM z=|&ypZp?5663uwqA$_@OPx(!C!_l+x4zfoA`eD*t3Af4z*-c7;@IZ|msZ@{r~A>VD{J;KF}xs=c!k$*UssEPx3hbEuseU2^A zBOaIhkynpV$17%+Vd~U#*q^N_P)RKIcQob>WmRe#yy>;J^3;{s z+FWpT$fWk1L?|rw*6!y0qcG-_SUbHF3K)&B|4F{=XX@r~eV##Ao36qYx8tO}QXLxU zWm_mD;crUNBMvb^D}DV#u14qAQsS)*nA+WYriB?pu&^bzBie>?Qy72aQF7)}wLCRn zq0T=`VO@5U&OdPZN=UwLp)Nbi0v(o=G`H#><$D;)6^_P~i&V)P9JbL_S^UOp0D9*% zb8#-78xt(i|%lnQ{z43U-qz-Gt) zIP@Z!1V#A-4&Yv6A&&$*7X&k*Iz~ zK{|}LxI-ur_a#2}P%5G?c)5TBm3-4d7Pz<;bB7G4>+&JT&!mO<&-2eO|BjG^eooCe z()Gjr1?j{(?F_GrOHn>KL6s=JUA(UeBR87-AN~xkJz6*+J+fP(k0!H3=O1fkcg;N- zJJQ+S$k({4!A8OLZB_=tMCR`Allcs$YUfZN(?vxzK0;?ywJhNMkR$L=Vc_&ebG!hL zQu6{X=EX(WSOaS|5!za3k?7%1H!RF!Z(Sj^(AIUGwWibkhB|^KpS&J_GL)lsPi%(4 zX=%TV#s9Y|rE|j2MV9P~4Hoy-U-A-o?>1QZS|mw{;4BXciw}s~JO)*5`IQU|ceC7b z{pA;DRCKtOvJj}?1K6G{(RbRf{w2vdZ66#{$uTT(4)V{)u%$(71ri;}Lv9G`FJjP} z04t@D=~u?R6)!(kciz0IhDd>2!zZp>gmZ-?x1fLF zd(68z8$&~0w`E{o`DK4oIXWZL&AIB5lknInpzzy=-nDmuz};vC4OE+YO#cXqeZJ~I zC)HW3`<3s7ghg3#3plPJ>QQdEI>mj8Qh9>nSGG(XNbaSeAp=!V zA6!&c^0@1J&pk7JPAeNO*Ff%Q5VcPRUs_k9p}HnO@!{akdp5or^Zy$B1AB+a(3TeV z{jMjBit9a+>$p$tWvuaUbFCtyDGYb3Ndy8T+N{^IR1grwvAr8zp^eQMae2KoWUU?Y zT*qV^Jj;7YXZua~N9i##<#P3{tGVC8_>YbK{s31t=0qEVw$iyS+F>6Utfs8qT7wCa zq3Q?d(%YKl=6{6<;Ev%psvFphUt76V|6K|n9`#ta9Ak2+z;H(`1qt9TOk+O-iuR}& zdrN@=tL?pL6KGiKidp<$MMgygTG$Xy9k!QyYy> zzB9`r2i7&8F56+73IO^ULKA{A73;Pk|0(j#!`9BG?pRd|$PVFVnKT>tG zGApO*K2GPhT(=rQdg{f!miU4y0i*)&jdK!iVkZtHcsg_kuoyqA6CZ^4dYtrkVJ-&U z4U^2OYm}H~xrlL@`hN;}Cu6Yfm=pzJ@%+J2 zQ+;cblfmYwq@VM3y2l=m{rL6)N?Ep3%v6zqz_cD0@SQZ&I|zwsP2zKJQXbetTe|$5 zci5?Qx%PQtlae+euN#n@^Ki|OdRqn0A*_XB2Qh|7nwe^KsWgwJtiKHq{4}zfR8=GY z^-x#i=a_gT@(_b@i5tZSau}kOqcJf9ya7`~Z zjtr}Z54-8i=RCSzi*4$>=yS$UE!_on7&v`=VsCbEA{~9MJqPwH&toL7?t8qGA32QT z-rKX%$8;rg&eg+i`Ox7EYoi$q?-axn$3ECAC&X6Ztrk0$r@qtoQ=cg`4YZ%#IalmC zayjZ}O)$1HG>Tq~&Y@i)OH?3&iA;it-$=tI`8HjfG8^WDU%>OwSJA4kdtJP%uT}BC z+?d;``0LRtqYXrP-VtlU&JV{cP%JvxrGLnSl?Ts`uec4zn{G>mM4~t2L>w1=NwfmZ!|Px?FXhEXVYe zd5SO&5){>~Q#Rf^(f#*bN@GSZl7iB2M+l<8mbDm>Dh?o5y85WA7M`J`pW{D`fc@b& z|4|si-Ox2Vqk!B-gyx6DD%7fNdX|OHV26-&w=*^gunm~^O>$m&ZbiBbrz@YGk`y;BGNpdC$cl>JfQey0JMf{Q@ zYH^!Q*Y*~NT@p&SoflK)dlMDfcLoVKHgr=CKvXkB|6MS0LDI<_@3`@ux=k1OZ5(d- z`_EHdta4jO%KrNJo}rbCJJ3k`)Z`Y!f+F-LTSXs{2gDfeC#i^0~yXzcM5uzCLJ684)% z@@jsFK`$$2oJ!@^a2q(+m;gOSUNpGIh%-bF2OPj?9W&?&%z%SKokvn;Q`D!= z((>xpWUrz_I9|Nq87ZJ=&RN>K5;!Q8K}W3S4RuCWp{pt37G-ye# zxuB6NjnJdEGb(KQinvWF+agcgaz=?*^O@lqW;=%5;^Y$mOy(nC#&P@s|ArXhN&U-; z?rmyxVD@ESQ$WBKnLovr@CRYysxWRm0ZCe7FrH>@kDmTvD408h9!w47BSMJY?G>t#Hb%{Y zeX>u+(|kg(!&rx0bZm;*@<(~^QZHgW&Xm6N$7 zV@X0&bwFrRZ}GGiYkP)u@5RbF8iJKf@e;Eh4je+Nj%d`rS%}}zTw;}S;#q}<8Lk?A z?)0b70grF8(P@aZBd8TbGxzjm5h6l6dy}^BOZd?HexD-W%$DnR7){9cvkcTnM$~hT zgKf98fh2>>{Y7>(#kd_QBHW57S3 z{4f9IMK&%Ds1wM-&&RxnDEJ@yOKBuCjfyOe?5G}Umq0(~E+2%YHn&G@<` zL4|A|6!5)t`O)r;R7mkl8!(wUA6(4ATx5@10%b0~lw9XA1Y}dl$%DZn(-wiJ5@b82 z+J3|VUJa)=8{sdT?~)--q3~O#G^^qMab0NG;dF<+Uhj{GIfZ0daQ8;m9|hxe4ylpK zyY^PE2p5f((aU=>MN^RRq=#X)dJom>P45mu43eUpvm$zQvHg5z1gS%btzBhgx zA?IwC^R171pLT-uthRbA?*KWfTqkAF-1!Vc9DlSF(JL9$CLQP0Q2p8NcPi&Mh7eE^ zxDfMNc-1?sK_s^qW#m(Y>#1w{b}C*1Xw_RSq!BDeEODTn(z^~x^iuq`iJ%G|+@MmB zg%y%$FtWJYR5xW4*QrWR^+@=c(DXidbW@Qhe%tTm-?lQLy{p$Ty3-&eyiAM_q4~8` zvCWMoL(^o%$U}y*U@e}A1xNpMItt{x=c*QuYVx!zHfnQF_Ez)ZPsbPgIBjb+&xbifE)Lyn@;gU;D9Ym8BE$XE!< z*W!#-OvLAzl|J0oh1>Y#V*$X$v{5iV-k##|dBBEEr|*G?e6a6ci09XwKjJFPLhW|< zG1$7&8-2gwTH+m-F^Gb!OZH2xH!klSsR)TlzDT0`oblssU*?1;O2P%ao8+6L&XFkf z#kY?lP73Ex>J!eJ%oEu@RMA~)J%A{Sxhx|$QqiHbc`N-L?IF0eRzTWUo{c|eX6?Ig zwhvwE2ll!ojo2plU1hcfJM{Z0#v?I7J+(D-BBz($n9i)r22sB>Y>w7RMAyNRruf5? z$&0yMre3m|HhW%kY)2&fTbqO1?av;U1V+pzyf}sMcXeM}xbn1t4`;L+XUv%bkxrO; zb42=u>hS1-ST1(w1NNcQm3XW0*S(+4=R}lfE;AI*4Sk zJ2R9l=FLibx0AI5dw6+Q<9@ZL82?cD0&XOw>mstrT3&<}xEyM7teL<{z0%+;7WR%4 zyKo@;CissYLTS`&;F9jod%8N3ygFfv&;pZ_JtD7nq!Z$WuyBUV+lCZqisZr?G@YMD zJG)ljdQ@`eP-(#*_$2pze)70%&`y-i4C)Q{CbMPF>wi_&BkiX*j`3X`;oZsBxRIZz z>ZRvWCH(+P;-(42$wJAcYn-{rM^)qf*lLaDU72@+L;ooJOF#7ci90|&aM49;;RsbC z5u^{W?y1H7ptM5Ox?b{guaK=DK51 z?cttLn1UG%>PxTG4$qhBr$+tGuEVafx zS4NPuuNAsPHE8?Dgtbm1&d=i))|l0n?yD&IOAC|(x{1+k!6p=Q6O@5S6(;LqHR?Yh+8pE3=I&^Iq|CX9@P1Do6g zaI1YiXX^XJ<(FUB0-#8%r<-paWhxGPTQVSM0>oR~tk^fnU`YK3M7X2n(v$iN`nFJm z6L(F#yW^ESMv!M+(3v5byMC|9K1%Z$elA{m>Y)Qy6q3XW?Tq+MH;zIo4SM{AL!9E&fAvSTyvQS>euKu1 z%D??}tS<)m^keX%y0K(dzVIjTKr9PYnIh)8;b)aiR(B}Um$wBLHcYNRELe~HI{k*> zl@GRgwXHSGZu61je>VJPOT%u|i^Tn&oAPzfX1h(MZ81$}_4`XjvN|%GKj+IzoGraB znl~z82YzrQBUl94k-0)@e+02#WFjuM4Cmxs$-B~zq+a>qqK*)m7pB|dZh27Jz z>C6LcC4^sS318`&JKvBDDDyj?H#TAOwjI<0bfjynN{Disolev5N9))~Abe5d_0il> z2vn$;cQF5VEW;7M544arn}Eq9E^SI)995QmXN1_1B}OLQL)`vQ2Ceg0q7gy70biO#?s$LbE`qBcV<0$FAUv z$2m%AnY`29YhzQYL)d+eku6$%%GQKYmQnu>)}&xCL5SoVvs`CpO`7O%2N)!Ocmz10 z6s^jXud~X6{hP;&b0;c?URc}LafP~?of(=gHC^dgg5BczY30lHEwW(QhqQU*xZbNi zsMiKDU-SVB01n}#Ve!50Gs6h7oFDtGA+h?kde!#g(&Q*x!XbTRgS1<=hNN2?r(pZf zcef|xQx2dEL>}ma)|PK=D+RqA_cf600`^u0a69x}iP^VZR70#b$DPi?ek{FXk1T<6NKryg$+Y01oK|67FZNs8?i4~@T*9l49#cI5J zNADPu9EIp6*iIV|lW{s?R-XrTUMaTS+I&oJHc;# z{!b_|QZ*|KcZ~%%)ks6Ew_1i{Phz{qY51LOS$4WP8A=NqW#Pj<2c5@f|G6^)BU&OX zk;FHzYFG?IeGV3v<+M`F1iJ;jUqc-0YkNNG9H4F&!t~0;)ep9Q83f1~S;#)*Ij?wn z%Bb^l3FYJ8)@#LJ8)cwXGrMKMT@W~YGDRN+*5Y%*Ix+8vxQJRFTy5gRteDRS2g*2j z2eMQ*H2g6#{$BefU?HfM#OE&yJTBt#NC85Y@#A5(kP>f-a<<8vb>|vcl_MtbkgtIj zcWF+?yED;bu@Fc#w7EZzsJK#q&2wck&`AWd!h1car$tN%t3mlbWW(wHnp0t$Yw`8h zt`<*d6qg5-`*H3$3AFoJl)7-F)_c9-N)rWZgrwMs8!fJWh{XLwfm#4X>Yz47P&qoZ z&mJq<@nbp87geemzkO#P@UXgCyP_DXe=%LIps8huWw#YbZ^+b!`~P>>$)BkjUv=fU ztXAtuahk6oY*GJ26@vR`wD~x-%(kTUPjA0 zc^uuXjGF%8!764Joglw4o}~Ep-gFDuj$kP#A;HPnPdI|$jB6zmm3eDIc5;G$=zI*s z^Iw5xz{$-r@M)gOvZ}_ItC8%$68+2-R(4n#g3w0{ZDFh3NcA|WO&ye`q1)nK7&6B} zsw{DulVZjjSsfLIj;U)fBq(dJfvX+Z=&I~A!x*C#Z7HOnH0B;D54RrZ6guoS+J4_} zK4)IN@?!^gfu$u#WfEk3ivNqAy7=OqI5P$PrHBSL8*dTki`GGt)d&Oq=iVP$W3~CRHe+f5#K9ssWykb;1K>dD< z`NCohq37+!EH(MPxAJ0``pKPj;RdOsImS_nnO=?({PLM62x(WU_cK%ryEy8RYFu^G zI0lH9$aG%51hXYO*Qu8J7BJFg%#?f$>8bO^pd^eh8Df((1GD~gf2g*a41tlhdt4m^#!^>QSbod&oQa4UKTxBV`bp>CiK4j#OtV@ zKn_B{PQ%=rShA4hErvxc-i$u-WI|iyfmmcziCBl{rEFEh-+Uu)t|8Hzs+5@s1L4Lb zru5>#9gCq*K{@i#C{q-+!b<&!;F%`s^A)Ym-gzo*Xm*lJ}?ct}$FUgy9sf4tB`3FKrHtzY2eV_{6{WpjvB!Gdvnf zW$Z{V+J8SjA2}Qq+snkW@aNoZ7^t=RGtfU4yVl4##CuT)wWnfrIXwSk+I(zSq2=#R z_#LkP@!l`fI>OobI`4LZrJV!5yi-B3mD%*Jd+!}pscZ^uQGu5CU~D=5KP+HpZTe-O zU1my}?KUVqfD$jz^1(-QF(5edLmSt;?=h577INvC&v?GQ*5Sa+bO~1a1#CKxPwrb@ za}*(f5QcY)hymcIC={JSB7syCu6X1}m$&Fu6EL3SwIu2rW$Fc7F`2p_1Db5qbmi3j zGFQXpw^V1NeU_#;1WJXXV}kSXdASEg4b)tbO1hQRNV0@w5GR>&gJ>b#x zp>V6Yj`j`iiL8 zj#j&;yzB}Y_*4K^{r;t9H8Sx+XGzaOXQ93jDQzop-rqF4XYfe>fV!uo_YfH-IbY=< zEikypZduJ5_#MX`aNjX@#Xu28?3bG)Ry<3Q_~JmzaG{R+A@j80{awENI zoTp<@Rkr_Kuh)1r)K06Jk9i<~?^x8=pxbr&1rozH_Wfs+pbJrZShLXDXZj?u+<9_S z0EOSpuZxZ;=#qQZ%6ZvM8@l-At~cw{j?waU^$)|94ZQFuX5G*nWyAz)*_M%3iV|Be zPClo|=N;htSeSisumJLp;sx$Mw|c=yO&bcCy>8TCYht;h-;LE|j-3pn{nR?pLHM!_ zeg~Trh}|#kzx$|K!BOkJ|I+8KMk)v7Sa|D_QgqRWm=yyE$2Vf?D=IN@lj;AEZ3}iy z;E|5pywy}LJ$_`vdXKcnd{Bq4bgM=_-nkS}{sJKDq6AFk?uWNl6`(>scqw?l3Ln&d z@arJk_$JZR?MVQYM_qsZ&lG+~?1B1w>$_{6O&<4SF5sdhmEj|R%7IDV1jsdQHSBYh zy%8<;vKou3bg&|4$Caz?>(DsgI>Pv6nC^1T#=Ir!5_(2D8X3?Pb#+lktWu(k-iWZi>y4!;`BOa4a2xh~QJ=l#8YaBtH!@d}_*V$g%tm5FSQO*kc zsX6<$sVW~?kQhnA!n|O!x5!5c^~81-aEa>h>-;lV`eK)Kt+b6k6ls;*IL?#$B%$i_ zG>$hd@Lff@X*$hJ*`gL-<@6WL&YR52_1AHIlq4!Yx-gG<8skbQH|k_zuU(eNfT6QH zOQaWAzr%kN@alcsxzZ$4W>W95V)~ZpBIc`50~qJ8S{LI~qZOBCLreIS`S(&1FlM@q z)BNisNr2pQ{$!s|NmO58>Z+cG7{2=Tc1HZ^fWxd~ z1nCnwx#PyG+nkS8jBl>}Kq%p7Yki%qPF=VnH5qkJZb}5Hcn1nsGbL2QX&W3{M~lfm zW-d?vsE7ZxMvv1;y@t!8`jrU!6UNZw*$_-DRdYv99~EpsB~KF z^t(JzZ{#3c1{ZX?+5Ifys+a3h`$tjK_#3~Pn(rugot-NMd`EFl7X~H)-1zGB63j1{ zfExE`@_!id@gXMKG(R&7c&Cu6*s5f@;^(Ti4i)nw8N$QYa$3~?Rg|xa_cI;&`lqas zdeS^1#S;xzGj~1|Xk`=K4Q#@f=(I(r z)mIzOF7nY|J?v4Owg*`anZ57)ey+vB;+v|;CMjqeg8MpQRx-uf;g9;lzJJPK-^MJ6 zYYV>Wc=EEilHM6A2cuT4`mMH~zB|GDd$q)3vMhbVd3qd74vfsXg$!eebcZ`T30g4) z7{gV5bQgwjHVPnzvYn)$KS;y1sq2*F(9qiX+fzvFuV1|<(bRuDyyH&gl6P31a-&&d~)ESa58CBH59;GZK;PyDm*xEGxCRBmiQ($nV1JS>* z8lbb7P%=or%kxEwuoZsH*Il#ewm;>~Kj$c#8^r|>2%N0F69ygWWiii<7;FPtUpLMQ zB!8UVpm(dR5O^>vkUg}PvWpu`t|E-E+}QdLWhfy9;U6|L(Q;OuMW)4@+l78tF4&?P z)1OQQrW%e%Op=8^u+0mlk+Ufen{KvNH9j2WZ4jvP{UHWwM@Olr==se|;6ahX4N8vH z$N1gEK8D$6n*~{gyQp1=vZq8ZLx}q6?8KQ~BQ3mOu^^bFRA^$_=K4+biwZA$xtp0z zyGW!T@(^3C=YZchuKu;3txB!?so(bJG)My2bm5qVA3vH5BcbP4J9vD!JV~de@^JY8 zkjdE2Y+BdgaihF5FXtg@tHnanh1SP!U_))Pqxc7kLoX&NGd07v-yO!f^By9-l&Xn@ z_K9vL_C=c3zP?57Xmsl`pXQ-4b5fHCX}7yJs4E7Pzp2evdND;ak`W!PsM*?67!ZCC zbw?KbkK*ohAN62v8T+%XxzcH3_)@Qa=yplDve?X|Y-fOgg{fJ;*|vxvy%B!Pq^PTW zDl3)@y7WG4h&}4?1nxnMd9u5I2`XCZ9((JNgd&4Zuf6htJ*Tpv+JDKzKbc6qd#&YU ztpH~}-clMA2>ZH+L^@MXfYlLOfLTmEe{AgOBjeG86RAjNFS2hlNrSaxw&P1r@9|-4 z?1I9SkEmp7wrxPWK`dG`SlM6_!|un{RC^*s>@k zJZdwjdOZPuRpC4uns_clQ_TR)v*Ah}c|2V9*D>RcLvCYrw4I{qZRJlbz@VP?Ff#S^ zh%h^%^t&$4H$T)ZnG=ShOGYee1z)eG&?b`D;r;1Al;KIBEHpv>S(CYZySRy1oVw?Y zd!(H!W#mAhlF{@dY2D$qEE99`n`4de16?$GjXLPir-FO|AYNZRz{- zCgga_?!QMGPu^LD5Pco{0jz}i<8jxCP0)q`xoUiR!}(j@cY7?0=UyYDxfJ5%Ft|ou zNw=Gd?7Wrz=g!v4h&T8@YZJ>M4oLjf;Pb7I-Z9OW-wk5J?I78yqX(wxQ^E-z<*HV8 zp?x#jUWw#T*>O#Ls9wBg)9rj;_YMxH1Z&3YM|)nntqY5mWfLllEM^`*OM22+)gX0dIX@Lpd3pf2hY0}GKgjinS{(Iva+!Lk! zu4{iiQPjtQ1S89sa1`eDfl^yL~z0Kek@VMrM zkx`=N@hCwuv5}=|bS$^p)B_$oPvW{azATLq38#lWY*69`mYaqDQG6Fck*Vxtw%zA4 znCd?YDo-bDoWovO#fgPVx(Ap7oNAo-}>4xf?k}^qnt0ItSKosF5b4xjkj9vntP_w&sS6ieTA* zi_A7h51x9(B*l4>5zPv)G^TH><-EdC`wAc*UWpyX(a! z;#(Dmg|LyG47W4x0nT}^dju+kCC+rF<+>U}9P!!N%28zZF-n0cjmO|2rMk}|(S6OB z6BxGHRD)8TgpQH7R({OvKTV@X|72!e8goPVlD~#7U{h{Be3|_<3;!)d;GBap5q1W+Gka)ZbfbBxJYL zg41fnt%WtRms-v}#mWU$NBtanhq~R`1uWn(7N>;^W8UEKXD-vtmZkxHcClBuO{8DkWc|tfY z8nx9d-&;w%dluPMXs7Eq5pls8$(7JPsAj#sKXa#by?fcqic-FiQH)?J>|x)&f$JfZ z>XAnWTy1T!5hVTmuHmp~?S5dMxAr!=#iBBHSHrWfg1P=XaD^7LX*OG!^E_sdvrkW7 z=#iyaf?RDq%`z1gC)QoKZVs)=uL{D3%ASAbdf@V0D}|P*tw>Qvn_H7pI*=CFo!pC( zO)}<@U>2h3vvTDRHaD&wwE4E8*gMj+=%~}+EmIX*Ol>`{YTxMa@BsQQmw|=bmDPa@ zDO*@t>A}Lgfp?JAiFwWnBgu0Dvx3>8j}q}Nn>G>7dvgIgVmnE<;a`<#NDoQ)@%ok0 zdPOqxGvXb-iALdo$OaF-sySOq5ZxWhH{j~A?Nskb@Ia6rn|RHvMrT>-MicnxUNb;W ztu~h(;y|GX{!@U&lVNGF%6wpJT*J#IL-o!um;BhNir>qT<=!OOp6n!1O{#y;amd&@JVg5*OV9<6M@VW*nO;JL;*v z76hH({(sF$B41{8+@dM0%X4hlN6#bcrQ-h0UfxcHuiHGU>uZ=oiioLgjN zek#uY-l#@M`DHDS%%NaSVb* zxZ}ZV-0T?8exDXB^%t<{|Baq*PnmdDgTJQ(dcyrB1mK)Jzv_CU48iXYm18EvYeBGF zaw;^7ld_wJ<+IX}0ZRe+l^*E>{JMtEvD~Qh$u0b^^UjZ{f){OL%D zSjCU)AgR@{0@bG{G<`wLV%}|!~sVBt^iZ$eIe|4NTPGem5;PV=F29_U1@uDD{+h7VhIsk>@s&d8Mc{kTVYXbsb``x z@DHTV5tO*OZr-io0|OqYNmJMQckP^JfYl?hEWhbMmBsF2(LorM8TsMY9z$2*vWc6| zu5E5MH9;lv#O1r|1lycF-Z?(kkG^2u8+PEXao~Xj8XGgyN3yUo$?f^4D>i{k92hHd zCh|)Ao>1qp)Jz_VaW@<%+K-S~XxL?gJ+qj!)KN)PFsd?Lr{5|iydU~G$2CHof117Z zELM7zr`ACblcM+0!Qbu&QSqE7z^DD86(mYNC4x&t`6_-C$>O3WI4+qU%xd;}u6!k^ zs7tytNHZHcIc%6#FZwy!iC)zu{ugg{kv9J{rS&dG^^zc2ICF@&O=NyW;#wzzJ<6}6lQmO*|XKMs4K{{r(R%WjzPik z@k)wQg2wV&1S)$>*GrWxS$dOMoeQrCA#wejI zr#?gn<26>cNZ9Svu>-<@;Ug~Nd9hnme#L=hO?NF|j+DaF`$bEdjdhUt>yp}AI zqugxd9AFz$WhkWAlrS&f+wgh%1lHv)yAt&*R?nQ_!c5%GYV%iOF5-Y#q8Z~=*njR| z-DuacX(PYraaG0UUiG<{GGa~aZ!R%V5p~&lm2`1h#mARF7wCrt`@J$e31yu^sFDAC z`!foz4@=AV{;NzfyoQk?xc8zFkoDTS&7*+g60QEwCc`nk>NJH_^|3ngmy6VuSHg$a z;N6=Vn>^CsiT8u;Gfhp4dnFDPaukI5WC+OjMMOITm?W*fno3Q))Qf^3V{zTvu^-;F zrpoO8Vyl*O9dN@UPU3-t< z$5ju!pV1Qf`m+>@h3o)V5I5is{)?i@n*q2b^N}1jpky;)pJ^!$X_B&lZQJY`)veCr zWOIDhzIw-7Q?w!z9c!*pY}Pxf#=1_qk=%A*SX&3fNC9jw*J?!ndW5kbz*eZ{j~mon z_!TrqEFLY|ug2Q#<3$zM_$^V}X=&nCo|>K@o>F}CE%WKW6iE1@sIi{4yW9<}QJ9d$ zU=7d}|NHRo);K@5CP7CK+ZcgpGkn0id>*-glyQ0CV!^WvyqWgRjhFT5%9AP`#cqOA3iqy&jDawl`_`h1 z^i%FFK6WU}8_Mmce(oc^P%2ukQ3by=f zQam^Py`PNlb<4>`u!OWBchufvoRhZ$zMw|wcI%=sp{Uo5_SbQa&j?53CUvo*HBnXozPa3n0M;^h_n4(OxaHsqd%WdXpc(q zx7nDhJ*qTlHCygklJ?$}ME9w*=D|n< z7O07#TRHv?VA-Gbd|7g1 zD`(DnJhL>?Sh$F9KOW=UIe0EOID0*j%hzZ+&f+XawumI}INBe+x=k^Qxb48RXJfRt zFPopjSFE6cv4lyMNo?CL*mS1X5 z?whqZy;4H@wgrf``}2Zhc{s|_H1o!bqXWrDZ1uZGkOtyreiFve07XWiytG~}5ZWM_t(F6}*4|5Y2i z#7D`9Au2+n9c!l?VL;T0KXpm=EYm8y(9pu&YKHk-TZc9ecZ=@in+lxaK+iW1Bwa$ffK~`Ej?Hd%rGJ^y7Ui zXwc86sj*N2KWxiC*HpUF^&D|PUgEtAoyrUai01P;RH&nQwSOh+9lQ9}GE<^gfNZJM zZ#l<$>vzEQf#8#$)S3^H)wI-SeqkrlIf)}S%>>nl`6TyCE|$H|Ln5H&9MKMe2oygn z4A}4tYI*N2Il^W3s;lRn2>o+UQQgf2eM*u;F3>ezw|~OcbDn!=wK<$1)Yfy9aSSV3 ziis|v6*;06j~l7a{@lX6FzTz%X?R!HM(5bOA;in9W4K%2Fm;LOIhv8;Yo*#u!`rZ_ z)vm@|`)BKj3lazKtlI1P9($EEyNTx`!d~aQR!rlyR`UB}u6l(nWv&rAyJ7F3 zZt3-wt~rN&`bN_w;Ot1_{$W0KKAsxNS*r}?!xYTji!@O-}E6(y;(bjrpRJQKoL++tNDChzOd9o2_RY8|0A5x(eYD@^##7LMhd zvBCqiM60UrX(K|_c|ZCr)})nchn5w5ld>dR@?N1-7cNbktFWVNbk;2Abc(y^@~dkv zC|r6hBVVXn=js?NACp^4to|`$NhR!bm!Q{8cv{UEoz6H{DIoKJcc`^bzNLVRQ26|3 zMALNE%o^XFV})%StrA3FZCzF5KaifwtLExQEENy&G7rw;9#d;DLi>_HS9VDLtgUv- zH2ZID2lA?XZL6kGjdWaBod8=_!B}3681gqzS<~c!IIcG?OUC{bJAT4b(*J;YGs{oipn1iV#VU`fO* zZYO7OwxnDS{ARWf%=?iBVKHOXsj`0cqYDpi`JShtneSW(>ZsBa=njg-Wy1G8hFz`gN-(G_qk(b^m!WLGJFqu=m%<# zs4K0qErXSd#i3F%)f`#2{pL<$cXD74RQGTw4B&ih5jYf<;Vt9-T&m-!(?xzK>E=;H z7Dd6aJ{A^GHn^J|v!zLAPZU`|`wlo{_q(JWYxV}k2B4=Gv%ce+;|g&xkt5pIK~e#? z6-hK`=zPe}J%{8be(7yE^cn0K@NtVf0pQe?ds3UxOtZ%_3`4%?>gS&+o1FkYf?bHF>|6Lm4!QFs}6b#D^5Z%FDITVxjoioJ=-rWCeyzG`|}qn zkiu$A3>NO7mAQ4%j&ICV9xnEKbHrUb*G4M#adwhHAjNphIXz;$(X>XuscK~Lsv~W! zLMe)!D>4h+or}0cXbdW-TZ~Kc{@EbCqv>TqRkAPcxRdxNj9#x+)iR$n2-CA>oJ;ia z+eq#GJZRkXjSyg{dSCY(HrfI>ko~})DI9-f*ZI#?CfI!*@(_*rUmMUVT;(G_%LA3E?7b+FUJRB}$SYPg(u_a$#FAMmNUCXFR1WdQ^WVcx&WA5OQy3KxO#jbc=x))atLr1Z z!Pbp8${$y~X@12R7VxSYbA*8q3xBl21saEE0<#QK%ikCB>&%#<&G@tK6PO|~1N_OG zRWv85lD0jj$;6ZPn{uI!w|Vb+`7tO`Sa&U?Mf3fPoAw}`n2+~-tqF4$0tL326=-eh z4NHGF)4*DAcqVH8&h4z*PL)aHW;5=SvQ92`tmSdsN=sZ2vXJfvZpf*Eiy>xM^OE$K z7tw@fkBeB-HQu@O8B#aX!+uk5`r-*KmBjN95fCD@NDs7Mz9^PGoh*GZvy?eA3}+xs zhOL@cMPcY$xoXzScCN9GCTsG2EfZ_&-pk z8n=u8a2LSe6!omD-xJNIUI-2IF)qTD4;!Rc4`4pw;1liomu3+d`WmT_bbE{~0%@6ZduZql^wtn}&DE7K%P_Apmybkm^VhO*9NZn6)13F(@`imUrYuy~5zSphy86gjbBK}= zx*6M55mo9>^O8ie?adM@%t(Gb%z9{+QRb)Wrm$<7%gqj@rzvN1*~Ja*b*PG>OIglkf-s1-OIo~ljcyoI~txArX5`0OuIdaL34kMIW7ePO55Dj zrnEmN{zKKKvPpEQ>H(K`1LFQ7l~_0?0< zyGtRWn|PwAQ};Kh9)UTUFBaGlMx#+#gTwOndJ@v18?a^3;214!qJM$&88YPxwmOtC zcju#+|E-C+`gP5-M2_aXbRcqd*mregiv@+QDqC<=L3=ZAFa-;>Z(m(%jYm%<@Av02 zimX2L>Os@D;&{Fdp!%#W%5a(9@NoCi%XfI0YhZK8+e8ax@Ky6uAaX$Vlf=}Hp={|( z9nQxlp2&N9K`{0a>U(!~dmCkro`m)~ys8Bu9g9fp&Lr{q;HXlFoyX#pyfJONUKzXS z3A=MmqV3i{&^3gPQ$aiT^N@H_jSvsgcGIdQ^sr_G$wl-r^scp*zSx{UWZM-c;XDIr zF3b&htRAOMP(=g3?HUhcqSru~`@;43&zuJj5Aux7@77LEss@yD3Y@jH1uS3N4dL!c z1OH~EA(dVt3p5b&+f~4g*95$oUs&xZWgLid;$DtIehjofmR7#JJqq#Y<^`YIw#~kH zcEC3Sw_^0ALmH7vupcE61J{_PM8cmN`;1n@VbK+&D`Nn>EjjJ`w@=(5tDSHjxd%?S z2q*0_8!Z=)dbXEe3uJzo`M#y&{t?LvYqem(#l3?FqgkOo!dh3lMob0KB${mKRfuJA zD&LbkvGzxxgLpFnNv}G5sMACWo2fe+MJl*Kir6YGq(-+ zeB1No+nd!D*;2AvM{;lMyRHo0v{Hgd*0UGmWAmkc0XU@__i$AsI-i&+u z6?)`15!f$w1A=dd9v2r(VKGwHZhy)3khICyneyB0uiQ?>Q^^X?$@wtaxNe9yTXa4Z z>xgYC|C5_Yl}0qetlPO+yf0JU*Ukd+t)lQnCV5L;)zZj)AsSt+t!y~a^TFa`)rQKR6qXe9y`>g zbY(9rW-#YCO?)|7s*L`w!9x|(+lx|j7bcp!eOS>sukX(5eKD>lXuyzz=_ugOZe+FS z3mHgRd}Ao{?2F937YNM}I5lO2eR^_M6w(+Mv)*1B#&2>^@240al{?p78UX8ndvM`U zjbHv#pDqs{6#Y*00lXF%$UYiPwX`8Jxt}P!_SjP3yF5-fmi(|cZ zZVo++rb6fd6m#eeFdOL+0cgZ63x9JQU(+0;KS^qKpjTzy>3r?Jx;MpFKx}2r!zS0) z#PNGg&9bBma}chEgU2So=O#qQ^F?rL>W=uT9!hs{?!r^MZ{_`lkG4dsh*R^DM9o7T z*#J{FC12`+l7kRmzhgxaIr@c~-3gnYl3Y!LF1~DWyk01N$6e_yu%uWv5dW96F#Po% zm9}nt@XE|$$uKxIH9@XEv?_bBF{;%fHpeb+y+#hk)0iz2aplxBfeZXhbdthm+r8Kl z#ug;-bzjE(Lt!7Eo#%@$-iD*7z*y8ZD*3q7#nzz`o7MJeuPR$Cf(xW(c_4j%lD>c&`O;K109VC+=9lAW&*}DAeC>-j-#3E} zwcK}ACW76x4i5AYDjtFEcjF5x9S;iZvK12F)t<%MOf41_65%;6HPjyAh?egSl_k2+ z`M3huN8;<8X-wp{eFAiD!!)DdO;8aWKY~GyAQkKP)SA2i*2pbvb;En-ytrMpOYI3t zOTIxyNh|K3zJ`1toGdk}@x0ge5TD-q(vryCPPSoTT&!(!T9o^?1jR8tsvq&C31QFL zSIRkkW2|lfh?&Mxsktp^gu<}c{OC4(=Wy~*Uak2HpZ|<6ehJxJ8lXMy*=uP9;*M_+ zy?LlRbx0XdG2jJX216F__rLs4g?lga<YY;0&K-$- zP(sHs@4Hu5i)y|tUrUdxkCWUJ+M4#IV%#k@vIHCL*u$5lCvkipu`Veq2OQw%c@#)C z)m+B7Agde&;SQrV2=eCXd?2y=oTw=n8E zU+?M@Hs$*wCDZwcCvvh|Fnd)?%*XOH?BGE7q9;suu516Ypb4(L>>V|#@hvM5Rfx;a z{>(2UBRBtRqJYZfFB;x@pn7RQR`@-sA}~_h_MyK2t?4}cmAers9@9>8+aKP4PaN<% z8K-H~vg3E>BaaH7&{UFP6%&PKD#8WRqAy;Xs*?eOpBy}1whvv@ z@ft)(LD>tj6l{I$$w%^vhI;p@Np;h@rp=QPzo;nT4P3?gS^gN?mOcEv+-VS_iCTG} zYF1J%Wji;P3Vtd zK=4}_ociF1D(dQbV&$3lUgVhd8-;C>MzF+`ge+WL5p_%9zMPZdS!C+e0|#*iihTV6 zNin2gOM6FL($e+*>O)kl{%dQ^VT&F#-Zl2`8&Khn^twqS1n{2Y9~a-j2;wH9_!~Uc zUiDfEO{8}dc5#=g7dBsil82q#_qOkmzGR$_uqrb$5XF+q8ZBY|>f!Awn9vj&Zi z@wwyJY=1DA>+!jc3#V^Ke~mZnznX8-P^tIMgNJszSpPK}p!ly;fTVfD=q`q5eDDQ~ z?>}InH#EFEr>BI%IwEl>`I(|Zu*QA^Ctl>7OXG)%KIwnz#!djEbQXhQC1$9I>ySp}JtZFX zjd8Hx*_6rsqzd$-JMV+%Ur`{x0{oYlGXO$CmySiZ;h01|ELb6vt^fO%G8(nnCfJYh zMjH7}{HJlxjg%pw&yC5oRrlqho=VSMJ(>CLTM~0DXrbn4xM0gM z`}FEUCUpzFyAWfcx66P+pL^bYEFyinZRxh;0hd~cMZy@DYJ0J`%erhlM&4%z3!)cLo+7a~QIZLidT19|n% zHA03iP~&o+Ls$Q7*ibKuPo(y|Fy_!hS`F5`=AoPv6N_xh<9dMn7J7Cag~2gXbPcF1 z8SA^0w?<|!C;uSamV9kb6{r%=7Bv{;3?lltBYSUm5qiS+5A<7~#faxOWvCzb-+8?x z`MuUYBqQV>00N&$l7l#g-Dn$*XLraqMKSML|g zNh4{1Uvn_<(HfNU!S-PQA4NTzK>dTVnDpfgjGdY;4v~_SPTvPfH_zx7?vNX{S v91&Xvo0AB;q-1C477vsX^&=8pG>B;%;v&$KJ|_YbIAE*FNW Date: Fri, 7 Apr 2023 15:14:58 +0100 Subject: [PATCH 454/628] [Tests] Run pre-commit on changed files --- tests/brevitas/test_brevitas_mobilenet.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index 25276eac76..c7f0f4ebf9 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -120,4 +120,6 @@ def test_brevitas_mobilenet(): produced = odict[model.graph.output[0].name] produced_prob = odict["TopK_0_out0"] * a0 assert (produced.flatten() == expected_top5).all() - assert np.isclose(produced_prob.flatten(), expected_top5_prob, atol=2.2*1e-1).all() + assert np.isclose( + produced_prob.flatten(), expected_top5_prob, atol=2.2 * 1e-1 + ).all() From 11b046cd4e654e762dfa6e618016d26ae1d1143b Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Apr 2023 16:20:57 +0100 Subject: [PATCH 455/628] [Tests] Mark mobilenet export test as xfail --- tests/brevitas/test_brevitas_mobilenet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index c7f0f4ebf9..b469b197fa 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -54,6 +54,7 @@ @pytest.mark.brevitas_export +@pytest.mark.xfail def test_brevitas_mobilenet(): # get single image as input and prepare image img = Image.open(get_finn_root() + "/tests/brevitas/king_charles.jpg") From 30501464601528ea2ff14c4b94b6d55a44a3ae2f Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 11 Apr 2023 14:46:51 +0100 Subject: [PATCH 456/628] [notebooks] add pytests for testing all jupyter notebooks Signed-off-by: Fionn O'Donohoe --- .../advanced/0_custom_analysis_pass.ipynb | 6 +- .../1_custom_transformation_pass.ipynb | 5 +- .../1-train-mlp-with-brevitas.ipynb | 5 +- .../2-import-into-finn-and-verify.ipynb | 7 +- .../3-build-accelerator-with-finn.ipynb | 6 +- tests/notebooks/test_jupyter_notebooks.py | 83 +++++++++++++++++++ 6 files changed, 100 insertions(+), 12 deletions(-) create mode 100644 tests/notebooks/test_jupyter_notebooks.py diff --git a/notebooks/advanced/0_custom_analysis_pass.ipynb b/notebooks/advanced/0_custom_analysis_pass.ipynb index f8444520c3..0454010284 100644 --- a/notebooks/advanced/0_custom_analysis_pass.ipynb +++ b/notebooks/advanced/0_custom_analysis_pass.ipynb @@ -52,7 +52,9 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(\"../LFCW1A1.onnx\")" + "import os\n", + "print(os.getcwd())\n", + "showInNetron(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" ] }, { @@ -69,7 +71,7 @@ "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper('../LFCW1A1.onnx')" + "model = ModelWrapper(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" ] }, { diff --git a/notebooks/advanced/1_custom_transformation_pass.ipynb b/notebooks/advanced/1_custom_transformation_pass.ipynb index 391e852a71..8cdbabc34d 100644 --- a/notebooks/advanced/1_custom_transformation_pass.ipynb +++ b/notebooks/advanced/1_custom_transformation_pass.ipynb @@ -110,8 +110,9 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "import onnx\n", - "onnx_model = onnx.load('../LFCW1A1.onnx')\n", + "onnx_model = onnx.load(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "onnx_model = ModelWrapper(onnx_model)" ] @@ -122,7 +123,7 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron('../LFCW1A1.onnx')" + "showInNetron(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" ] }, { diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 9bb9e6761e..b99e9f16b2 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -483,13 +483,14 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "import torch\n", "\n", "# Make sure the model is on CPU before loading a pretrained state_dict\n", "model = model.cpu()\n", "\n", "# Load pretrained weights\n", - "trained_state_dict = torch.load(\"state_dict.pth\")[\"models_state_dict\"][0]\n", + "trained_state_dict = torch.load(os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/state_dict.pth\")[\"models_state_dict\"][0]\n", "\n", "model.load_state_dict(trained_state_dict, strict=False)" ] @@ -680,7 +681,7 @@ "from brevitas.export import export_finn_onnx\n", "from brevitas.quant_tensor import QuantTensor\n", "\n", - "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", + "ready_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", "input_shape = (1, 600)\n", "\n", "# create a QuantTensor instance to mark input as bipolar during export\n", diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index e4848a1f40..0efaf62e7f 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -62,9 +62,10 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "\n", - "ready_model_filename = \"cybsec-mlp-ready.onnx\"\n", + "ready_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", "model_for_sim = ModelWrapper(ready_model_filename)" ] }, @@ -151,7 +152,7 @@ "model_for_sim = model_for_sim.transform(InferDataTypes())\n", "model_for_sim = model_for_sim.transform(RemoveStaticGraphInputs())\n", "\n", - "verif_model_filename = \"cybsec-mlp-verification.onnx\"\n", + "verif_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-verification.onnx\"\n", "model_for_sim.save(verif_model_filename)" ] }, @@ -258,7 +259,7 @@ "\n", "# replace this with your trained network checkpoint if you're not\n", "# using the pretrained weights\n", - "trained_state_dict = torch.load(\"state_dict.pth\")[\"models_state_dict\"][0]\n", + "trained_state_dict = torch.load(os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/state_dict.pth\")[\"models_state_dict\"][0]\n", "# Uncomment the following line if you previously chose to train the network yourself\n", "#trained_state_dict = torch.load(\"state_dict_self-trained.pth\")\n", "\n", diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index a18cafd604..1c93e4f58b 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -115,7 +115,7 @@ "import os\n", "import shutil\n", "\n", - "model_file = \"cybsec-mlp-ready.onnx\"\n", + "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", "\n", "estimates_output_dir = \"output_estimates_only\"\n", "\n", @@ -272,7 +272,7 @@ "import os\n", "import shutil\n", "\n", - "model_file = \"cybsec-mlp-ready.onnx\"\n", + "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", "\n", "rtlsim_output_dir = \"output_ipstitch_ooc_rtlsim\"\n", "\n", @@ -412,7 +412,7 @@ "import os\n", "import shutil\n", "\n", - "model_file = \"cybsec-mlp-ready.onnx\"\n", + "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", "\n", "final_output_dir = \"output_final\"\n", "\n", diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py new file mode 100644 index 0000000000..3de586f1fd --- /dev/null +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -0,0 +1,83 @@ +import pytest + +import nbformat +from nbconvert.preprocessors import ExecutePreprocessor + +from finn.util.basic import get_finn_root + +notebook_basic_dir = get_finn_root() + "/notebooks/basics/" +notebook_advanced_dir = get_finn_root() + "/notebooks/advanced/" +notebook_cyber_dir = get_finn_root() + "/notebooks/end2end_example/cybersecurity/" +notebook_bnn_dir = get_finn_root() + "/notebooks/end2end_example/bnn-pynq/" + +basics_notebooks = [ + pytest.param( + notebook_basic_dir + "0_how_to_work_with_onnx.ipynb", + marks=pytest.mark.notebooks_basic, + ), + pytest.param( + notebook_basic_dir + "1a_brevitas_network_import_via_FINN-ONNX.ipynb", + marks=pytest.mark.notebooks_basic, + ), + pytest.param( + notebook_basic_dir + "1b_brevitas_network_import_via_QONNX.ipynb", + marks=pytest.mark.notebooks_basic, + ), +] + +advanced_notebooks = [ + pytest.param( + notebook_advanced_dir + "0_custom_analysis_pass.ipynb", + marks=pytest.mark.notebooks_advanced, + ), + pytest.param( + notebook_advanced_dir + "1_custom_transformation_pass.ipynb", + marks=pytest.mark.notebooks_advanced, + ), + pytest.param( + notebook_advanced_dir + "2_custom_op.ipynb", + marks=pytest.mark.notebooks_advanced, + ), +] + +cyber_notebooks = [ + pytest.param( + notebook_cyber_dir + "1-train-mlp-with-brevitas.ipynb", + marks=pytest.mark.notebooks_cyber, + ), + pytest.param( + notebook_cyber_dir + "2-import-into-finn-and-verify.ipynb", + marks=pytest.mark.notebooks_cyber, + ), + pytest.param( + notebook_cyber_dir + "3-build-accelerator-with-finn.ipynb", + marks=pytest.mark.notebooks_cyber, + ), +] + +bnn_notebooks = [ + pytest.param( + notebook_bnn_dir + "cnv_end2end_example.ipynb", marks=pytest.mark.notebooks_bnn + ), + pytest.param( + notebook_bnn_dir + "tfc_end2end_example.ipynb", marks=pytest.mark.notebooks_bnn + ), + pytest.param( + notebook_bnn_dir + "tfc_end2end_verification.ipynb", + marks=pytest.mark.notebooks_bnn, + ), +] + + +@pytest.mark.notebooks +@pytest.mark.parametrize( + "notebook", basics_notebooks + advanced_notebooks + cyber_notebooks + bnn_notebooks +) +def test_notebook_exec(notebook): + with open(notebook) as f: + nb = nbformat.read(f, as_version=4) + ep = ExecutePreprocessor(timeout=600, kernel_name="python3") + try: + assert ep.preprocess(nb) is not None, f"Got empty notebook for {notebook}" + except Exception: + assert False, f"Failed executing {notebook}" From 16d1e63e0ff0b016faa3c4aa0239740716de7180 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 12:23:09 +0100 Subject: [PATCH 457/628] [notebooks] increase notebook timeout to 1 hour Signed-off-by: Fionn O'Donohoe --- tests/notebooks/test_jupyter_notebooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index 3de586f1fd..d8669d7e7e 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -5,6 +5,7 @@ from finn.util.basic import get_finn_root +notebook_timeout_seconds = 3600 notebook_basic_dir = get_finn_root() + "/notebooks/basics/" notebook_advanced_dir = get_finn_root() + "/notebooks/advanced/" notebook_cyber_dir = get_finn_root() + "/notebooks/end2end_example/cybersecurity/" @@ -76,7 +77,9 @@ def test_notebook_exec(notebook): with open(notebook) as f: nb = nbformat.read(f, as_version=4) - ep = ExecutePreprocessor(timeout=600, kernel_name="python3") + ep = ExecutePreprocessor( + timeout=notebook_timeout_seconds, kernel_name="python3" + ) try: assert ep.preprocess(nb) is not None, f"Got empty notebook for {notebook}" except Exception: From 10f07ab573bc3847917984c2ec871d9b0544e36c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 13:37:36 +0100 Subject: [PATCH 458/628] [notebooks] tidy up paths to ONNX files by reusing variables Signed-off-by: Fionn O'Donohoe --- .../cybersecurity/1-train-mlp-with-brevitas.ipynb | 8 +++++--- .../cybersecurity/2-import-into-finn-and-verify.ipynb | 8 +++++--- .../cybersecurity/3-build-accelerator-with-finn.ipynb | 7 ++++--- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index b99e9f16b2..7bfedf4bbb 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -63,7 +63,9 @@ "outputs": [], "source": [ "import onnx\n", - "import torch" + "import torch\n", + "\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity\"" ] }, { @@ -490,7 +492,7 @@ "model = model.cpu()\n", "\n", "# Load pretrained weights\n", - "trained_state_dict = torch.load(os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/state_dict.pth\")[\"models_state_dict\"][0]\n", + "trained_state_dict = torch.load(model_dir + \"/state_dict.pth\")[\"models_state_dict\"][0]\n", "\n", "model.load_state_dict(trained_state_dict, strict=False)" ] @@ -681,7 +683,7 @@ "from brevitas.export import export_finn_onnx\n", "from brevitas.quant_tensor import QuantTensor\n", "\n", - "ready_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", + "ready_model_filename = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "input_shape = (1, 600)\n", "\n", "# create a QuantTensor instance to mark input as bipolar during export\n", diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index 0efaf62e7f..5546ea3d09 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -65,7 +65,8 @@ "import os\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "\n", - "ready_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity\"\n", + "ready_model_filename = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "model_for_sim = ModelWrapper(ready_model_filename)" ] }, @@ -152,7 +153,7 @@ "model_for_sim = model_for_sim.transform(InferDataTypes())\n", "model_for_sim = model_for_sim.transform(RemoveStaticGraphInputs())\n", "\n", - "verif_model_filename = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-verification.onnx\"\n", + "verif_model_filename = model_dir + \"/cybsec-mlp-verification.onnx\"\n", "model_for_sim.save(verif_model_filename)" ] }, @@ -259,7 +260,8 @@ "\n", "# replace this with your trained network checkpoint if you're not\n", "# using the pretrained weights\n", - "trained_state_dict = torch.load(os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/state_dict.pth\")[\"models_state_dict\"][0]\n", + "trained_state_dict = torch.load(model_dir + \"/state_dict.pth\")[\"models_state_dict\"][0]\n", + "\n", "# Uncomment the following line if you previously chose to train the network yourself\n", "#trained_state_dict = torch.load(\"state_dict_self-trained.pth\")\n", "\n", diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index 1c93e4f58b..8bd6993e53 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -115,7 +115,8 @@ "import os\n", "import shutil\n", "\n", - "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity\"\n", + "model_file = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "\n", "estimates_output_dir = \"output_estimates_only\"\n", "\n", @@ -272,7 +273,7 @@ "import os\n", "import shutil\n", "\n", - "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", + "model_file = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "\n", "rtlsim_output_dir = \"output_ipstitch_ooc_rtlsim\"\n", "\n", @@ -412,7 +413,7 @@ "import os\n", "import shutil\n", "\n", - "model_file = os.environ['FINN_ROOT'] + \"/notebooks/end2end_example/cybersecurity/cybsec-mlp-ready.onnx\"\n", + "model_file = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "\n", "final_output_dir = \"output_final\"\n", "\n", From e52e49e6909cf435536d79d7265e0b3fcb2a6b0e Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 13:39:11 +0100 Subject: [PATCH 459/628] [notebooks] add assertion checks to aid with CI Signed-off-by: Fionn O'Donohoe --- .../bnn-pynq/tfc_end2end_verification.ipynb | 30 ++++++++++--------- .../2-import-into-finn-and-verify.ipynb | 7 +++-- .../3-build-accelerator-with-finn.ipynb | 20 +++++++++++++ 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb index 6c3b796509..2f6cde6e5b 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_verification.ipynb @@ -121,12 +121,11 @@ "output_dict = oxe.execute_onnx(model_for_sim, input_dict, return_full_exec_context=False)\n", "output_pysim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "\n", - "\n", - "if np.isclose(output_pysim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", + "try:\n", + " assert np.isclose(output_pysim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all()\n", " print(\"Results are the same!\")\n", - "else:\n", - " print(\"The results are not the same!\")" + "except AssertionError:\n", + " assert False, \"The results are not the same!\"" ] }, { @@ -268,10 +267,11 @@ "output_dict = oxe.execute_onnx(parent_model, input_dict)\n", "output_cppsim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "if np.isclose(output_cppsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", + "try:\n", + " assert np.isclose(output_cppsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all()\n", " print(\"Results are the same!\")\n", - "else:\n", - " print(\"The results are not the same!\")" + "except AssertionError:\n", + " assert False, \"The results are not the same!\"" ] }, { @@ -356,10 +356,11 @@ "output_dict = oxe.execute_onnx(model_for_rtlsim, input_dict)\n", "output_rtlsim = output_dict[list(output_dict.keys())[0]]\n", "\n", - "if np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", + "try:\n", + " assert np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all()\n", " print(\"Results are the same!\")\n", - "else:\n", - " print(\"The results are not the same!\")" + "except AssertionError:\n", + " assert False, \"The results are not the same!\"" ] }, { @@ -430,10 +431,11 @@ "metadata": {}, "outputs": [], "source": [ - "if np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all():\n", + "try:\n", + " assert np.isclose(output_rtlsim, np.where(output_golden[0]==np.amax(output_golden[0])), atol=1e-3).all()\n", " print(\"Results are the same!\")\n", - "else:\n", - " print(\"The results are not the same!\")" + "except AssertionError:\n", + " assert False, \"The results are not the same!\"" ] } ], diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index 5546ea3d09..5f4924b309 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -368,10 +368,11 @@ "metadata": {}, "outputs": [], "source": [ - "if ok == n_verification_inputs:\n", + "try:\n", + " assert ok == n_verification_inputs\n", " print(\"Verification succeeded. Brevitas and FINN-ONNX execution outputs are identical\")\n", - "else:\n", - " print(\"Verification failed. Brevitas and FINN-ONNX execution outputs are NOT identical\")" + "except AssertionError:\n", + " assert False, \"Verification failed. Brevitas and FINN-ONNX execution outputs are NOT identical\"" ] }, { diff --git a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb index 8bd6993e53..80f3cd3819 100644 --- a/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb +++ b/notebooks/end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb @@ -149,6 +149,15 @@ "build.build_dataflow_cfg(model_file, cfg_estimates)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert os.path.exists(estimates_output_dir + \"/report/estimate_network_performance.json\")" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -306,6 +315,17 @@ "build.build_dataflow_cfg(model_file, cfg_stitched_ip)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "assert os.path.exists(rtlsim_output_dir + \"/report/ooc_synth_and_timing.json\")\n", + "assert os.path.exists(rtlsim_output_dir + \"/report/rtlsim_performance.json\")\n", + "assert os.path.exists(rtlsim_output_dir + \"/final_hw_config.json\")" + ] + }, { "cell_type": "markdown", "metadata": {}, From 79368dd1f2072d4ac3b0b85dcb2853811b13cfc6 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 13:44:16 +0100 Subject: [PATCH 460/628] [notebooks] add test markers to setup.cfg for Jupyter notebooks to prevent warnings during pytest runs Signed-off-by: Fionn O'Donohoe --- setup.cfg | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setup.cfg b/setup.cfg index 1893aa4231..c0d893d5b6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -127,6 +127,11 @@ markers = transform: mark tests that test transformations (before hls layers) fpgadataflow: mark tests related to hls layers end2end: mark tests that run the end2end flow + notebooks_basic: mark tests that execute all 'basic' Jupyter notebooks + notebooks_advanced: mark tests that execute all 'advanced' Jupyter notebooks + notebooks_cyber: mark tests that execute all 'cyber' Jupyter notebooks + notebooks_bnn: mark tests that execute all 'bnn' Jupyter notebooks + notebooks: mark tests that execute all Jupyter notebooks norecursedirs = dist build From 77dc1d4817d7e1b2bef36adfcc78b4ce8aa7dabe Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 13:55:43 +0100 Subject: [PATCH 461/628] [notebooks] tidy up paths to ONNX files by reusing variables - advanced notebooks Signed-off-by: Fionn O'Donohoe --- notebooks/advanced/0_custom_analysis_pass.ipynb | 6 +++--- notebooks/advanced/1_custom_transformation_pass.ipynb | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/notebooks/advanced/0_custom_analysis_pass.ipynb b/notebooks/advanced/0_custom_analysis_pass.ipynb index 0454010284..f915b11fa0 100644 --- a/notebooks/advanced/0_custom_analysis_pass.ipynb +++ b/notebooks/advanced/0_custom_analysis_pass.ipynb @@ -53,8 +53,8 @@ "outputs": [], "source": [ "import os\n", - "print(os.getcwd())\n", - "showInNetron(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" + "notebook_dir = os.environ['FINN_ROOT'] + \"/notebooks\"\n", + "showInNetron(notebook_dir + \"/LFCW1A1.onnx\")" ] }, { @@ -71,7 +71,7 @@ "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" + "model = ModelWrapper(notebook_dir + \"/LFCW1A1.onnx\")" ] }, { diff --git a/notebooks/advanced/1_custom_transformation_pass.ipynb b/notebooks/advanced/1_custom_transformation_pass.ipynb index 8cdbabc34d..7e4989c902 100644 --- a/notebooks/advanced/1_custom_transformation_pass.ipynb +++ b/notebooks/advanced/1_custom_transformation_pass.ipynb @@ -111,8 +111,10 @@ "outputs": [], "source": [ "import os\n", + "notebook_dir = os.environ['FINN_ROOT'] + \"/notebooks\"\n", + "\n", "import onnx\n", - "onnx_model = onnx.load(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")\n", + "onnx_model = onnx.load(notebook_dir + \"/LFCW1A1.onnx\")\n", "from qonnx.core.modelwrapper import ModelWrapper\n", "onnx_model = ModelWrapper(onnx_model)" ] @@ -123,7 +125,7 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(os.environ['FINN_ROOT'] + \"/notebooks/LFCW1A1.onnx\")" + "showInNetron(notebook_dir + \"/LFCW1A1.onnx\")" ] }, { From e9c89ffb712cea51b5bfa94aba0bf0a03c000b40 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 15:07:11 +0100 Subject: [PATCH 462/628] [notebooks] reduce notebooks markers to only 1 Signed-off-by: Fionn O'Donohoe --- setup.cfg | 4 -- tests/notebooks/test_jupyter_notebooks.py | 58 +++++------------------ 2 files changed, 12 insertions(+), 50 deletions(-) diff --git a/setup.cfg b/setup.cfg index c0d893d5b6..63dec2b8b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -127,10 +127,6 @@ markers = transform: mark tests that test transformations (before hls layers) fpgadataflow: mark tests related to hls layers end2end: mark tests that run the end2end flow - notebooks_basic: mark tests that execute all 'basic' Jupyter notebooks - notebooks_advanced: mark tests that execute all 'advanced' Jupyter notebooks - notebooks_cyber: mark tests that execute all 'cyber' Jupyter notebooks - notebooks_bnn: mark tests that execute all 'bnn' Jupyter notebooks notebooks: mark tests that execute all Jupyter notebooks norecursedirs = dist diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index d8669d7e7e..819b4ccde0 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -12,61 +12,27 @@ notebook_bnn_dir = get_finn_root() + "/notebooks/end2end_example/bnn-pynq/" basics_notebooks = [ - pytest.param( - notebook_basic_dir + "0_how_to_work_with_onnx.ipynb", - marks=pytest.mark.notebooks_basic, - ), - pytest.param( - notebook_basic_dir + "1a_brevitas_network_import_via_FINN-ONNX.ipynb", - marks=pytest.mark.notebooks_basic, - ), - pytest.param( - notebook_basic_dir + "1b_brevitas_network_import_via_QONNX.ipynb", - marks=pytest.mark.notebooks_basic, - ), + pytest.param(notebook_basic_dir + "0_how_to_work_with_onnx.ipynb"), + pytest.param(notebook_basic_dir + "1a_brevitas_network_import_via_FINN-ONNX.ipynb"), + pytest.param(notebook_basic_dir + "1b_brevitas_network_import_via_QONNX.ipynb"), ] advanced_notebooks = [ - pytest.param( - notebook_advanced_dir + "0_custom_analysis_pass.ipynb", - marks=pytest.mark.notebooks_advanced, - ), - pytest.param( - notebook_advanced_dir + "1_custom_transformation_pass.ipynb", - marks=pytest.mark.notebooks_advanced, - ), - pytest.param( - notebook_advanced_dir + "2_custom_op.ipynb", - marks=pytest.mark.notebooks_advanced, - ), + pytest.param(notebook_advanced_dir + "0_custom_analysis_pass.ipynb"), + pytest.param(notebook_advanced_dir + "1_custom_transformation_pass.ipynb"), + pytest.param(notebook_advanced_dir + "2_custom_op.ipynb"), ] cyber_notebooks = [ - pytest.param( - notebook_cyber_dir + "1-train-mlp-with-brevitas.ipynb", - marks=pytest.mark.notebooks_cyber, - ), - pytest.param( - notebook_cyber_dir + "2-import-into-finn-and-verify.ipynb", - marks=pytest.mark.notebooks_cyber, - ), - pytest.param( - notebook_cyber_dir + "3-build-accelerator-with-finn.ipynb", - marks=pytest.mark.notebooks_cyber, - ), + pytest.param(notebook_cyber_dir + "1-train-mlp-with-brevitas.ipynb"), + pytest.param(notebook_cyber_dir + "2-import-into-finn-and-verify.ipynb"), + pytest.param(notebook_cyber_dir + "3-build-accelerator-with-finn.ipynb"), ] bnn_notebooks = [ - pytest.param( - notebook_bnn_dir + "cnv_end2end_example.ipynb", marks=pytest.mark.notebooks_bnn - ), - pytest.param( - notebook_bnn_dir + "tfc_end2end_example.ipynb", marks=pytest.mark.notebooks_bnn - ), - pytest.param( - notebook_bnn_dir + "tfc_end2end_verification.ipynb", - marks=pytest.mark.notebooks_bnn, - ), + pytest.param(notebook_bnn_dir + "cnv_end2end_example.ipynb"), + pytest.param(notebook_bnn_dir + "tfc_end2end_example.ipynb"), + pytest.param(notebook_bnn_dir + "tfc_end2end_verification.ipynb"), ] From 63918e7165c8230411564425e6a918fc7a42bdcc Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 18 Apr 2023 15:09:25 +0100 Subject: [PATCH 463/628] [notebooks] remove notebooks from being tested during quicktest test suite Signed-off-by: Fionn O'Donohoe --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index b4ad37232f..466fcfb09d 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,7 +6,7 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - python setup.py test --addopts "-m 'not (vivado or slow or vitis or board)' --dist=loadfile -n $PYTEST_PARALLEL" + python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL" elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL" From 9ee018ff47b25e153e54c20a0c46d5961910434e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 18 Apr 2023 20:42:46 +0100 Subject: [PATCH 464/628] Streamlined memstream module tested with random interleaved parameter readback. --- finn-rtllib/memstream/component.xml | 1707 --- finn-rtllib/memstream/gui/memstream_v1_0.gtcl | 2 - finn-rtllib/memstream/hdl/Q_srl.v | 308 - finn-rtllib/memstream/hdl/memstream.sv | 176 + finn-rtllib/memstream/hdl/memstream.v | 327 - finn-rtllib/memstream/hdl/memstream_axi.sv | 136 + .../memstream/hdl/memstream_multiblock.v | 474 - .../memstream/hdl/memstream_singleblock.v | 246 - finn-rtllib/memstream/hdl/mux.v | 44 - finn-rtllib/memstream/hdl/ramb18_sdp.v | 96 - .../memstream/hdl/ramb18_wf_dualport.v | 111 - finn-rtllib/memstream/sim/gen_memblocks.sh | 39 - finn-rtllib/memstream/sim/golden.dat | 9216 ----------------- finn-rtllib/memstream/sim/memstream_tb.sv | 212 + finn-rtllib/memstream/sim/tb_memstream.v | 369 - .../memstream/sim/tb_memstream_writes.v | 486 - finn-rtllib/memstream/sim/test.sh | 32 - finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 394 - 18 files changed, 524 insertions(+), 13851 deletions(-) delete mode 100644 finn-rtllib/memstream/component.xml delete mode 100644 finn-rtllib/memstream/gui/memstream_v1_0.gtcl delete mode 100644 finn-rtllib/memstream/hdl/Q_srl.v create mode 100644 finn-rtllib/memstream/hdl/memstream.sv delete mode 100644 finn-rtllib/memstream/hdl/memstream.v create mode 100644 finn-rtllib/memstream/hdl/memstream_axi.sv delete mode 100644 finn-rtllib/memstream/hdl/memstream_multiblock.v delete mode 100644 finn-rtllib/memstream/hdl/memstream_singleblock.v delete mode 100644 finn-rtllib/memstream/hdl/mux.v delete mode 100644 finn-rtllib/memstream/hdl/ramb18_sdp.v delete mode 100644 finn-rtllib/memstream/hdl/ramb18_wf_dualport.v delete mode 100644 finn-rtllib/memstream/sim/gen_memblocks.sh delete mode 100644 finn-rtllib/memstream/sim/golden.dat create mode 100644 finn-rtllib/memstream/sim/memstream_tb.sv delete mode 100644 finn-rtllib/memstream/sim/tb_memstream.v delete mode 100644 finn-rtllib/memstream/sim/tb_memstream_writes.v delete mode 100755 finn-rtllib/memstream/sim/test.sh delete mode 100644 finn-rtllib/memstream/xgui/memstream_v1_0.tcl diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml deleted file mode 100644 index 63a8540a76..0000000000 --- a/finn-rtllib/memstream/component.xml +++ /dev/null @@ -1,1707 +0,0 @@ - - - xilinx.com - user - memstream - 1.0 - - - m_axis_0 - - - - - - - TDATA - - - m_axis_0_tdata - - - - - TVALID - - - m_axis_0_tvalid - - - - - TREADY - - - m_axis_0_tready - - - - - - m_axis_1 - - - - - - - TDATA - - - m_axis_1_tdata - - - - - TVALID - - - m_axis_1_tvalid - - - - - TREADY - - - m_axis_1_tready - - - - - - - true - - - - - - m_axis_2 - - - - - - - TDATA - - - m_axis_2_tdata - - - - - TVALID - - - m_axis_2_tvalid - - - - - TREADY - - - m_axis_2_tready - - - - - - - true - - - - - - m_axis_3 - - - - - - - TDATA - - - m_axis_3_tdata - - - - - TVALID - - - m_axis_3_tvalid - - - - - TREADY - - - m_axis_3_tready - - - - - - - true - - - - - - m_axis_4 - - - - - - - TDATA - - - m_axis_4_tdata - - - - - TVALID - - - m_axis_4_tvalid - - - - - TREADY - - - m_axis_4_tready - - - - - - - true - - - - - - m_axis_5 - - - - - - - TDATA - - - m_axis_5_tdata - - - - - TVALID - - - m_axis_5_tvalid - - - - - TREADY - - - m_axis_5_tready - - - - - - - true - - - - - - s_axilite - - - - - - - - - AWADDR - - - awaddr - - - - - AWPROT - - - awprot - - - - - AWVALID - - - awvalid - - - - - AWREADY - - - awready - - - - - WDATA - - - wdata - - - - - WSTRB - - - wstrb - - - - - WVALID - - - wvalid - - - - - WREADY - - - wready - - - - - BRESP - - - bresp - - - - - BVALID - - - bvalid - - - - - BREADY - - - bready - - - - - ARADDR - - - araddr - - - - - ARPROT - - - arprot - - - - - ARVALID - - - arvalid - - - - - ARREADY - - - arready - - - - - RDATA - - - rdata - - - - - RRESP - - - rresp - - - - - RVALID - - - rvalid - - - - - RREADY - - - rready - - - - - - - true - - - - - - aresetn - - - - - - - RST - - - aresetn - - - - - - POLARITY - ACTIVE_LOW - - - - - aclk - - - - - - - CLK - - - aclk - - - - - - ASSOCIATED_BUSIF - m_axis_0:m_axis_1:m_axis_2:m_axis_3:m_axis_4:m_axis_5:s_axilite - - - ASSOCIATED_RESET - aresetn - - - - - - - interface_aximm - - reg0 - 0 - 65536 - 32 - register - - - - - - - xilinx_anylanguagesynthesis - Synthesis - :vivado.xilinx.com:synthesis - Verilog - memstream - - xilinx_anylanguagesynthesis_view_fileset - - - - viewChecksum - 1fc5a310 - - - - - xilinx_anylanguagebehavioralsimulation - Simulation - :vivado.xilinx.com:simulation - Verilog - memstream - - xilinx_anylanguagebehavioralsimulation_view_fileset - - - - viewChecksum - d02d9990 - - - - - xilinx_xpgui - UI Layout - :vivado.xilinx.com:xgui.ui - - xilinx_xpgui_view_fileset - - - - viewChecksum - f960907f - - - - - xilinx_utilityxitfiles - Utility XIT/TTCL - :vivado.xilinx.com:xit.util - - xilinx_utilityxitfiles_view_fileset - - - - viewChecksum - d2aad2c5 - - - - - - - aclk - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - aresetn - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - awready - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - awvalid - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - awaddr - - in - - 15 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - awprot - - in - - 2 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - wready - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - wvalid - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - wdata - - in - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - wstrb - - in - - 3 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - bready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - bvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - bresp - - out - - 1 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - arready - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - arvalid - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - araddr - - in - - 15 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - arprot - - in - - 2 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - rready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - rvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - rresp - - out - - 1 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - rdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_0_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_0_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_0_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_0_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_1_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_1_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_1_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_1_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_2_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_2_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_2_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_2_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_3_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_3_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_3_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_3_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_4_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_4_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_4_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_4_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_5_afull - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 0 - - - - - - true - - - - - - m_axis_5_tready - - in - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - 1 - - - - - m_axis_5_tvalid - - out - - - std_logic - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - m_axis_5_tdata - - out - - 31 - 0 - - - - std_logic_vector - xilinx_anylanguagesynthesis - xilinx_anylanguagebehavioralsimulation - - - - - - - - CONFIG_EN - Config En - true - - - NSTREAMS - Nstreams - 6 - - - MEM_DEPTH - Mem Depth - 13824 - - - MEM_WIDTH - Mem Width - 32 - - - MEM_INIT - Mem Init - ./ - - - RAM_STYLE - Ram Style - auto - - - STRM0_WIDTH - Strm0 Width - 32 - - - STRM1_WIDTH - Strm1 Width - 32 - - - STRM2_WIDTH - Strm2 Width - 32 - - - STRM3_WIDTH - Strm3 Width - 32 - - - STRM4_WIDTH - Strm4 Width - 32 - - - STRM5_WIDTH - Strm5 Width - 32 - - - STRM0_DEPTH - Strm0 Depth - 2304 - - - STRM1_DEPTH - Strm1 Depth - 2304 - - - STRM2_DEPTH - Strm2 Depth - 2304 - - - STRM3_DEPTH - Strm3 Depth - 2304 - - - STRM4_DEPTH - Strm4 Depth - 2304 - - - STRM5_DEPTH - Strm5 Depth - 2304 - - - STRM0_OFFSET - Strm0 Offset - 0 - - - STRM1_OFFSET - Strm1 Offset - 2304 - - - STRM2_OFFSET - Strm2 Offset - 4608 - - - STRM3_OFFSET - Strm3 Offset - 6912 - - - STRM4_OFFSET - Strm4 Offset - 9216 - - - STRM5_OFFSET - Strm5 Offset - 11520 - - - AXILITE_ADDR_WIDTH - Axilite Addr Width - 16 - - - - - - choice_list_9d8b0d81 - ACTIVE_HIGH - ACTIVE_LOW - - - choice_list_e2bd1cd0 - auto - distributed - block - ultra - - - - - xilinx_anylanguagesynthesis_view_fileset - - hdl/axilite_if.v - verilogSource - - - hdl/memstream.v - verilogSource - - - hdl/memstream_multiblock.v - verilogSource - - - hdl/memstream_singleblock.v - verilogSource - - - hdl/mux.v - verilogSource - - - hdl/ramb18_sdp.v - verilogSource - - - hdl/ramb18_wf_dualport.v - verilogSource - CHECKSUM_9425c051 - - - - xilinx_anylanguagebehavioralsimulation_view_fileset - - hdl/memstream.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/axilite_if.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/memstream_singleblock.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/mux.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/ramb18_wf_dualport.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/memstream_multiblock.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - hdl/ramb18_sdp.v - verilogSource - USED_IN_ipstatic - xil_defaultlib - - - - xilinx_xpgui_view_fileset - - xgui/memstream_v1_0.tcl - tclSource - CHECKSUM_f960907f - XGUI_VERSION_2 - - - - xilinx_utilityxitfiles_view_fileset - - gui/memstream_v1_0.gtcl - GTCL - - - - memstream_v1_0 - - - CONFIG_EN - Config En - true - - - NSTREAMS - Nstreams - 6 - - - MEM_DEPTH - Mem Depth - 13824 - - - MEM_WIDTH - Mem Width - 32 - - - MEM_INIT - Mem Init - ./ - - - RAM_STYLE - Ram Style - auto - - - STRM0_WIDTH - Strm0 Width - 32 - - - STRM1_WIDTH - Strm1 Width - 32 - - - STRM2_WIDTH - Strm2 Width - 32 - - - STRM3_WIDTH - Strm3 Width - 32 - - - STRM4_WIDTH - Strm4 Width - 32 - - - STRM5_WIDTH - Strm5 Width - 32 - - - STRM0_DEPTH - Strm0 Depth - 2304 - - - STRM1_DEPTH - Strm1 Depth - 2304 - - - STRM2_DEPTH - Strm2 Depth - 2304 - - - STRM3_DEPTH - Strm3 Depth - 2304 - - - STRM4_DEPTH - Strm4 Depth - 2304 - - - STRM5_DEPTH - Strm5 Depth - 2304 - - - STRM0_OFFSET - Strm0 Offset - 0 - - - STRM1_OFFSET - Strm1 Offset - 2304 - - - STRM2_OFFSET - Strm2 Offset - 4608 - - - STRM3_OFFSET - Strm3 Offset - 6912 - - - STRM4_OFFSET - Strm4 Offset - 9216 - - - STRM5_OFFSET - Strm5 Offset - 11520 - - - AXILITE_ADDR_WIDTH - Axilite Addr Width - 16 - - - - false - - - - - - Component_Name - memstream_v1_0 - - - - - - aartix7 - akintex7 - artix7 - artix7l - azynq - kintex7 - kintex7l - kintexu - kintexuplus - qkintex7 - qkintex7l - qvirtex7 - qzynq - qzynqplus - versal - versalprime - virtex7 - virtexu - virtexuplus - virtexuplusHBM - virtexupluse58g - zynq - zynquplus - - - /UserIP - - memstream_v1_0 - package_project - 5 - 2020-10-09T15:31:57Z - - - 2020.1 - - - - - - - - - diff --git a/finn-rtllib/memstream/gui/memstream_v1_0.gtcl b/finn-rtllib/memstream/gui/memstream_v1_0.gtcl deleted file mode 100644 index a68b85e1f5..0000000000 --- a/finn-rtllib/memstream/gui/memstream_v1_0.gtcl +++ /dev/null @@ -1,2 +0,0 @@ -# This file is automatically written. Do not modify. -proc gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE {MEM_DEPTH MEM_WIDTH } {expr 2+ceil(log($MEM_DEPTH*pow(2,ceil(log(($MEM_WIDTH+31)/32)/log(2))))/log(2))} diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v deleted file mode 100644 index 11cef604e0..0000000000 --- a/finn-rtllib/memstream/hdl/Q_srl.v +++ /dev/null @@ -1,308 +0,0 @@ -// original source: -// https://github.com/nachiket/tdfc/blob/master/verilog/queues/Q_srl_oreg3_prefull_SIMPLE.v - - -// Copyright (c) 1999 The Regents of the University of California -// Copyright (c) 2010 The Regents of the University of Pennsylvania -// Copyright (c) 2011 Department of Electrical and Electronic Engineering, Imperial College London -// Copyright (c) 2020 Xilinx -// -// Permission to use, copy, modify, and distribute this software and -// its documentation for any purpose, without fee, and without a -// written agreement is hereby granted, provided that the above copyright -// notice and this paragraph and the following two paragraphs appear in -// all copies. -// -// IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR -// DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING -// LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, -// EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF -// SUCH DAMAGE. -// -// THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -// AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON -// AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO -// PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. -// - -// Q_srl_oreg3_prefull_SIMPLE.v -// -// - In-page queue with parameterizable depth, bit width -// - Stream I/O is triple (data, valid, back-pressure), -// with EOS concatenated into the data -// - Flow control for input & output is combinationally decoupled -// - 2 <= depth <= 256 -// * (depth >= 2) is required to decouple I/O flow control, -// where empty => no produce, full => no consume, -// and depth 1 would ping-pong between the two at half rate -// * (depth <= 256) can be modified -// by changing ''synthesis loop_limit X'' below -// and changing ''addrwidth'' or its log computation -// - 1 <= width -// - Queue storage is in SRL16E, up to depth 16 per LUT per bit-slice, -// plus output register (for fast output) -// - Queue addressing is done by ''addr'' up-down counter -// - Queue fullness is checked by comparator (addr==depth) -// - Queue fullness is pre-computed for next cycle -// - Queue input back-pressure is pre-computed for next cycle -// - Queue output valid (state!=state__empty) is pre-computed for next cycle -// (necessary since SRL data output reg requires non-boolean state) -// - FSM has 3 states (empty, one, more) -// - When empty, continue to emit most recently emitted value (for debugging) -// -// - Queue slots used = / (state==state_empty) ? 0 -// | (state==state_one) ? 1 -// \ (state==state_more) ? addr+2 -// - Queue slots used <= depth -// - Queue slots remaining = depth - used -// = / (state==state_empty) ? depth -// | (state==state_one) ? depth-1 -// \ (state==state_more) ? depth-2-addr -// -// - Synplify 7.1 / 8.0 -// - Eylon Caspi, 9/11/03, 8/18/04, 3/29/05 - - -`ifdef Q_srl -`else -`define Q_srl - - -module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); - - parameter depth = 16; // - greatest #items in queue (2 <= depth <= 256) - parameter width = 16; // - width of data (i_d, o_d) - - parameter addrwidth = $clog2(depth); - - input clock; - input reset; - - input [width-1:0] i_d; // - input stream data (concat data + eos) - input i_v; // - input stream valid - output i_r; // - input stream ready - wire i_b; // - input stream back-pressure - - output [width-1:0] o_d; // - output stream data (concat data + eos) - output o_v; // - output stream valid - input o_r; // - output stream ready - wire o_b; // - output stream back-pressure - - output [addrwidth:0] count; // - output number of elems in queue - output [addrwidth:0] maxcount; // - maximum observed count since reset - - reg [addrwidth:0] maxcount_reg; // - maximum count seen until now - reg [addrwidth-1:0] addr, addr_, a_; // - SRL16 address - // for data output - reg shift_en_; // - SRL16 shift enable - reg [width-1:0] srl [depth-2:0]; // - SRL16 memory - reg shift_en_o_; // - SRLO shift enable - reg [width-1:0] srlo_, srlo // - SRLO output reg - /* synthesis syn_allow_retiming=0 */ ; - - parameter state_empty = 2'd0; // - state empty : o_v=0 o_d=UNDEFINED - parameter state_one = 2'd1; // - state one : o_v=1 o_d=srlo - parameter state_more = 2'd2; // - state more : o_v=1 o_d=srlo - // #items in srl = addr+2 - - reg [1:0] state, state_; // - state register - - wire addr_full_; // - true iff addr==depth-2 on NEXT cycle - reg addr_full; // - true iff addr==depth-2 - wire addr_zero_; // - true iff addr==0 - wire o_v_reg_; // - true iff state_empty on NEXT cycle - reg o_v_reg // - true iff state_empty - /* synthesis syn_allow_retiming=0 */ ; - wire i_b_reg_; // - true iff !full on NEXT cycle - reg i_b_reg // - true iff !full - /* synthesis syn_allow_retiming=0 */ ; - - assign addr_full_ = (state_==state_more) && (addr_==depth-2); - // - queue full - assign addr_zero_ = (addr==0); // - queue contains 2 (or 1,0) - assign o_v_reg_ = (state_!=state_empty); // - output valid if non-empty - assign i_b_reg_ = addr_full_; // - input bp if full - assign o_d = srlo; // - output data from queue - assign o_v = o_v_reg; // - output valid if non-empty - assign i_b = i_b_reg; // - input bp if full - assign maxcount = maxcount_reg; - - assign i_r = !i_b; - assign o_b = !o_r; - - assign count = (state==state_more ? addr+2 : (state==state_one ? 1 : 0)); - - // - ''always'' block with both FFs and SRL16 does not work, - // since FFs need reset but SRL16 does not - - always @(posedge clock) begin // - seq always: FFs - if (reset) begin - state <= state_empty; - addr <= 0; - addr_full <= 0; - o_v_reg <= 0; - - i_b_reg <= 0; - maxcount_reg <= 0; - - end - else begin - state <= state_; - addr <= addr_; - addr_full <= addr_full_; - o_v_reg <= o_v_reg_; - i_b_reg <= i_b_reg_; - maxcount_reg <= (count > maxcount_reg ? count : maxcount_reg); - end - end // always @ (posedge clock) - - always @(posedge clock) begin // - seq always: srlo - // - infer enabled output reg at end of shift chain - // - input first element from i_d, all subsequent elements from SRL16 - if (reset) begin - srlo <= 0; - end - else begin - if (shift_en_o_) begin - srlo <= srlo_; - end - end - end // always @ (posedge clock) - - always @(posedge clock) begin // - seq always: srl - // - infer enabled SRL16E from shifting srl array - // - no reset capability; srl[] contents undefined on reset - if (shift_en_) begin - // synthesis loop_limit 256 - for (a_=depth-2; a_>0; a_=a_-1) begin - srl[a_] = srl[a_-1]; - end - srl[0] <= i_d; - end - end // always @ (posedge clock or negedge reset) - - always @* begin // - combi always - srlo_ <= 'bx; - shift_en_o_ <= 1'bx; - shift_en_ <= 1'bx; - addr_ <= 'bx; - state_ <= 2'bx; - case (state) - - state_empty: begin // - (empty, will not produce) - if (i_v) begin // - empty & i_v => consume - srlo_ <= i_d; - shift_en_o_ <= 1; - shift_en_ <= 1'bx; - addr_ <= 0; - state_ <= state_one; - end - else begin // - empty & !i_v => idle - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 1'bx; - addr_ <= 0; - state_ <= state_empty; - end - end - - state_one: begin // - (contains one) - if (i_v && o_b) begin // - one & i_v & o_b => consume - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 1; - addr_ <= 0; - state_ <= state_more; - end - else if (i_v && !o_b) begin // - one & i_v & !o_b => cons+prod - srlo_ <= i_d; - shift_en_o_ <= 1; - shift_en_ <= 1; - addr_ <= 0; - state_ <= state_one; - end - else if (!i_v && o_b) begin // - one & !i_v & o_b => idle - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 1'bx; - addr_ <= 0; - state_ <= state_one; - end - else if (!i_v && !o_b) begin // - one & !i_v & !o_b => produce - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 1'bx; - addr_ <= 0; - state_ <= state_empty; - end - end // case: state_one - - state_more: begin // - (contains more than one) - if (addr_full || (depth==2)) begin - // - (full, will not consume) - // - (full here if depth==2) - if (o_b) begin // - full & o_b => idle - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 0; - addr_ <= addr; - state_ <= state_more; - end - else begin // - full & !o_b => produce - srlo_ <= srl[addr]; - shift_en_o_ <= 1; - shift_en_ <= 0; -// addr_ <= addr-1; -// state_ <= state_more; - addr_ <= addr_zero_ ? 0 : addr-1; - state_ <= addr_zero_ ? state_one : state_more; - end - end - else begin // - (mid: neither empty nor full) - if (i_v && o_b) begin // - mid & i_v & o_b => consume - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 1; - addr_ <= addr+1; - state_ <= state_more; - end - else if (i_v && !o_b) begin // - mid & i_v & !o_b => cons+prod - srlo_ <= srl[addr]; - shift_en_o_ <= 1; - shift_en_ <= 1; - addr_ <= addr; - state_ <= state_more; - end - else if (!i_v && o_b) begin // - mid & !i_v & o_b => idle - srlo_ <= 'bx; - shift_en_o_ <= 0; - shift_en_ <= 0; - addr_ <= addr; - state_ <= state_more; - end - else if (!i_v && !o_b) begin // - mid & !i_v & !o_b => produce - srlo_ <= srl[addr]; - shift_en_o_ <= 1; - shift_en_ <= 0; - addr_ <= addr_zero_ ? 0 : addr-1; - state_ <= addr_zero_ ? state_one : state_more; - end - end // else: !if(addr_full) - end // case: state_more - - default: begin - srlo_ <= 'bx; - shift_en_o_ <= 1'bx; - shift_en_ <= 1'bx; - addr_ <= 'bx; - state_ <= 2'bx; - end // case: default - - endcase // case(state) - end // always @ * - -endmodule // Q_srl - - -`endif // `ifdef Q_srl diff --git a/finn-rtllib/memstream/hdl/memstream.sv b/finn-rtllib/memstream/hdl/memstream.sv new file mode 100644 index 0000000000..9cbef493a3 --- /dev/null +++ b/finn-rtllib/memstream/hdl/memstream.sv @@ -0,0 +1,176 @@ +/** + * Copyright (c) 2023, Xilinx + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of FINN nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @author Thomas B. Preußer + */ + +module memstream #( + int unsigned DEPTH, + int unsigned WIDTH, + + parameter INIT_FILE = "", + parameter RAM_STYLE = "auto" +)( + input logic clk, + input logic rst, + + // Configuration and readback interface - compatible with ap_memory + input logic config_ce, + input logic config_we, + input logic [31 :0] config_address, + input logic [WIDTH-1:0] config_d0, + + output logic config_rack, + output logic [WIDTH-1:0] config_q0, + + // Continuous output stream + input logic ordy, + output logic ovld, + output logic [WIDTH-1:0] odat +); + + typedef logic [$clog2(DEPTH)-1:0] addr_t; + typedef logic [WIDTH -1:0] data_t; + + uwire en; // Pipeline enable + uwire rollback; // Rollback stream reads if backpressure would block read back + + // Counter with pre-computed last indication for val == DEPTH-1 + typedef struct { + addr_t val; + logic lst; + } ptr_t; + + // Counter history to facilitate pipeline rollback + ptr_t Ptr[3] = '{ + 0: '{ val: 0, lst: DEPTH<2 }, + default: '{ default: 'x } + }; + + //----------------------------------------------------------------------- + // Stage #0: Address & Op + logic Wr1 = 0; // Write + logic Rb1 = 0; // Read back + logic Rs1 = 0; // Read stream + data_t Data1 = 'x; + if(1) begin : blkStage1 + // Increment for wrapping DEPTH-1 back to zero + localparam int unsigned WRAP_INC = 2**$bits(addr_t) - DEPTH + 1; + + uwire ptr_t ptr_eff = rollback? Ptr[2] : Ptr[0]; + uwire ptr_t ptr_nxt; + assign ptr_nxt.val = ptr_eff.val + (config_ce? 0 : !ptr_eff.lst? 1 : WRAP_INC); + assign ptr_nxt.lst = + DEPTH < 2? 1 : + config_ce? ptr_eff.lst : + ptr_eff.lst? 0 : + /* else */ ptr_eff.val == DEPTH-2; + + always_ff @(posedge clk) begin + if(rst) Ptr[0] <= '{ val: 0, lst: DEPTH<2 }; + else if(en) Ptr[0] <= ptr_nxt; + end + + // Issue next Memory Operation + always_ff @(posedge clk) begin + if(rst) begin + Wr1 <= 0; + Rb1 <= 0; + Rs1 <= 0; + Ptr[1] <= '{ default : 'x }; + Data1 <= 'x; + end + else if(en) begin + Wr1 <= 0; + Rb1 <= 0; + Rs1 <= 0; + if(config_ce) begin + if(config_we) Wr1 <= 1; + else Rb1 <= 1; + Ptr[1] <= '{ val: config_address, lst: 'x }; + Data1 <= config_d0; + end + else begin + Rs1 <= 1; + Ptr[1] <= ptr_eff; + Data1 <= 'x; + end + end + end + end : blkStage1 + + //----------------------------------------------------------------------- + // Stage #2: Memory Access + logic Rb2 = 0; + logic Rs2 = 0; + data_t Data2 = 'x; + if(1) begin : blkStage2 + (* RAM_STYLE = RAM_STYLE *) + data_t Mem[DEPTH]; + + // Optional Memory Initialization + if(INIT_FILE != "") initial $readmemh(INIT_FILE, Mem); + + // Execute Memory Operation + uwire addr_t addr = Ptr[1].val; + always_ff @(posedge clk) begin + if(en) begin + if(Wr1) Mem[addr] <= Data1; + Data2 <= Mem[addr]; + end + end + + // Copy Output Designation + always_ff @(posedge clk) begin + if(rst) begin + Rb2 <= 0; + Rs2 <= 0; + Ptr[2] <= '{ default: 'x }; + end + else if(en) begin + Rb2 <= Rb1; + Rs2 <= Rs1 && !rollback; + Ptr[2] <= Ptr[1]; + end + end + end : blkStage2 + + //----------------------------------------------------------------------- + // Output Interfaces + assign config_rack = Rb2; + assign config_q0 = Data2; + + assign ovld = Rs2; + assign odat = Data2; + + uwire backpressure = Rs2 && !ordy; + assign rollback = backpressure && (Rb1 || config_ce); + assign en = !backpressure || Rb1 || config_ce; + +endmodule : memstream diff --git a/finn-rtllib/memstream/hdl/memstream.v b/finn-rtllib/memstream/hdl/memstream.v deleted file mode 100644 index 2cd955f8d1..0000000000 --- a/finn-rtllib/memstream/hdl/memstream.v +++ /dev/null @@ -1,327 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -module memstream -#( -//parameters to enable/disable axi-mm, set number of streams, set readmemh for memory, set per-stream offsets in memory, set per-stream widths - parameter CONFIG_EN = 1, - parameter NSTREAMS = 6,//1 up to 6 - - parameter MEM_DEPTH = 13824, - parameter MEM_WIDTH = 32, - parameter MEM_INIT = "./", - parameter RAM_STYLE = "auto", - - //widths per stream - parameter STRM0_WIDTH = 32, - parameter STRM1_WIDTH = 32, - parameter STRM2_WIDTH = 32, - parameter STRM3_WIDTH = 32, - parameter STRM4_WIDTH = 32, - parameter STRM5_WIDTH = 32, - - //depths per stream - parameter STRM0_DEPTH = 2304, - parameter STRM1_DEPTH = 2304, - parameter STRM2_DEPTH = 2304, - parameter STRM3_DEPTH = 2304, - parameter STRM4_DEPTH = 2304, - parameter STRM5_DEPTH = 2304, - - //offsets for each stream - parameter STRM0_OFFSET = 0, - parameter STRM1_OFFSET = 2304, - parameter STRM2_OFFSET = 4608, - parameter STRM3_OFFSET = 6912, - parameter STRM4_OFFSET = 9216, - parameter STRM5_OFFSET = 11520, - - parameter AXILITE_ADDR_WIDTH = 2+$clog2(MEM_DEPTH*(1<<$clog2((MEM_WIDTH+31)/32))) -) - -( - input aclk, - input aresetn, - - output awready, - input awvalid, - input [AXILITE_ADDR_WIDTH-1:0] awaddr, - input [2:0] awprot, - //write data - output wready, - input wvalid, - input [31:0] wdata, - input [3:0] wstrb, - //burst response - input bready, - output bvalid, - output [1:0] bresp, - - //Read channels - //read address - output arready, - input arvalid, - input [AXILITE_ADDR_WIDTH-1:0] araddr, - input [2:0] arprot, - //read data - input rready, - output rvalid, - output [1:0] rresp, - output [31:0] rdata, - - //multiple output AXI Streams, TDATA width rounded to multiple of 8 bits - input m_axis_0_afull, - input m_axis_0_tready, - output m_axis_0_tvalid, - output [((STRM0_WIDTH+7)/8)*8-1:0] m_axis_0_tdata, - - input m_axis_1_afull, - input m_axis_1_tready, - output m_axis_1_tvalid, - output [((STRM1_WIDTH+7)/8)*8-1:0] m_axis_1_tdata, - - input m_axis_2_afull, - input m_axis_2_tready, - output m_axis_2_tvalid, - output [((STRM2_WIDTH+7)/8)*8-1:0] m_axis_2_tdata, - - input m_axis_3_afull, - input m_axis_3_tready, - output m_axis_3_tvalid, - output [((STRM3_WIDTH+7)/8)*8-1:0] m_axis_3_tdata, - - input m_axis_4_afull, - input m_axis_4_tready, - output m_axis_4_tvalid, - output [((STRM4_WIDTH+7)/8)*8-1:0] m_axis_4_tdata, - - input m_axis_5_afull, - input m_axis_5_tready, - output m_axis_5_tvalid, - output [((STRM5_WIDTH+7)/8)*8-1:0] m_axis_5_tdata - - -); - -wire [31:0] config_address; -wire config_ce; -wire config_we; -wire config_rack; -wire [MEM_WIDTH-1:0] config_d0; -wire [MEM_WIDTH-1:0] config_q0; - -generate -if(NSTREAMS <= 2) begin: singleblock - - -memstream_singleblock -#( - .CONFIG_EN(CONFIG_EN), - .NSTREAMS(NSTREAMS), - .MEM_DEPTH(MEM_DEPTH), - .MEM_WIDTH(MEM_WIDTH), - .MEM_INIT(MEM_INIT), - .RAM_STYLE(RAM_STYLE), - - //widths per stream - .STRM0_WIDTH(STRM0_WIDTH), - .STRM1_WIDTH(STRM1_WIDTH), - - //depths per stream - .STRM0_DEPTH(STRM0_DEPTH), - .STRM1_DEPTH(STRM1_DEPTH), - - //offsets for each stream - .STRM0_OFFSET(STRM0_OFFSET), - .STRM1_OFFSET(STRM1_OFFSET) -) -mem -( - .aclk(aclk), - .aresetn(aresetn), - - .config_address(config_address), - .config_ce(config_ce), - .config_we(config_we), - .config_d0(config_d0), - .config_q0(config_q0), - .config_rack(config_rack), - - .m_axis_0_tready(m_axis_0_tready), - .m_axis_0_tvalid(m_axis_0_tvalid), - .m_axis_0_tdata(m_axis_0_tdata), - - .m_axis_1_tready(m_axis_1_tready), - .m_axis_1_tvalid(m_axis_1_tvalid), - .m_axis_1_tdata(m_axis_1_tdata) -); - -assign m_axis_2_tvalid = 0; -assign m_axis_2_tdata = 0; -assign m_axis_3_tvalid = 0; -assign m_axis_3_tdata = 0; -assign m_axis_4_tvalid = 0; -assign m_axis_4_tdata = 0; -assign m_axis_5_tvalid = 0; -assign m_axis_5_tdata = 0; - -end else begin: multiblock - - -memstream_multiblock -#( - .CONFIG_EN(CONFIG_EN), - .NSTREAMS(NSTREAMS), - .MEM_DEPTH(MEM_DEPTH), - .MEM_WIDTH(MEM_WIDTH), - .MEM_INIT(MEM_INIT), - .RAM_STYLE(RAM_STYLE), - - //widths per stream - .STRM0_WIDTH(STRM0_WIDTH), - .STRM1_WIDTH(STRM1_WIDTH), - .STRM2_WIDTH(STRM2_WIDTH), - .STRM3_WIDTH(STRM3_WIDTH), - .STRM4_WIDTH(STRM4_WIDTH), - .STRM5_WIDTH(STRM5_WIDTH), - - //depths per stream - .STRM0_DEPTH(STRM0_DEPTH), - .STRM1_DEPTH(STRM1_DEPTH), - .STRM2_DEPTH(STRM2_DEPTH), - .STRM3_DEPTH(STRM3_DEPTH), - .STRM4_DEPTH(STRM4_DEPTH), - .STRM5_DEPTH(STRM5_DEPTH), - - //offsets for each stream - .STRM0_OFFSET(STRM0_OFFSET), - .STRM1_OFFSET(STRM1_OFFSET), - .STRM2_OFFSET(STRM2_OFFSET), - .STRM3_OFFSET(STRM3_OFFSET), - .STRM4_OFFSET(STRM4_OFFSET), - .STRM5_OFFSET(STRM5_OFFSET) -) -mem -( - .aclk(aclk), - .aresetn(aresetn), - - .config_address(config_address), - .config_ce(config_ce), - .config_we(config_we), - .config_d0(config_d0), - .config_q0(config_q0), - - .m_axis_0_afull(m_axis_0_afull), - .m_axis_0_tready(m_axis_0_tready), - .m_axis_0_tvalid(m_axis_0_tvalid), - .m_axis_0_tdata(m_axis_0_tdata), - - .m_axis_1_afull(m_axis_1_afull), - .m_axis_1_tready(m_axis_1_tready), - .m_axis_1_tvalid(m_axis_1_tvalid), - .m_axis_1_tdata(m_axis_1_tdata), - - .m_axis_2_afull(m_axis_2_afull), - .m_axis_2_tready(m_axis_2_tready), - .m_axis_2_tvalid(m_axis_2_tvalid), - .m_axis_2_tdata(m_axis_2_tdata), - - .m_axis_3_afull(m_axis_3_afull), - .m_axis_3_tready(m_axis_3_tready), - .m_axis_3_tvalid(m_axis_3_tvalid), - .m_axis_3_tdata(m_axis_3_tdata), - - .m_axis_4_afull(m_axis_4_afull), - .m_axis_4_tready(m_axis_4_tready), - .m_axis_4_tvalid(m_axis_4_tvalid), - .m_axis_4_tdata(m_axis_4_tdata), - - .m_axis_5_afull(m_axis_5_afull), - .m_axis_5_tready(m_axis_5_tready), - .m_axis_5_tvalid(m_axis_5_tvalid), - .m_axis_5_tdata(m_axis_5_tdata) - -); - - -end -endgenerate - -axi4lite_if -#( - .ADDR_WIDTH(AXILITE_ADDR_WIDTH), - .DATA_WIDTH(32), - .IP_DATA_WIDTH(MEM_WIDTH) -) -config_if -( - //system signals - .aclk(aclk), - .aresetn(aresetn), - - //Write channels - //write address - .awready(awready), - .awvalid(awvalid), - .awaddr(awaddr), - .awprot(awprot), - //write data - .wready(wready), - .wvalid(wvalid), - .wdata(wdata), - .wstrb(wstrb), - //burst response - .bready(bready), - .bvalid(bvalid), - .bresp(bresp), - - //Read channels - //read address - .arready(arready), - .arvalid(arvalid), - .araddr(araddr), - .arprot(arprot), - //read data - .rready(rready), - .rvalid(rvalid), - .rresp(rresp), - .rdata(rdata), - - //IP-side interface - .ip_en(config_ce), - .ip_wen(config_we), - .ip_addr(config_address), - .ip_wdata(config_d0), - .ip_rack(config_rack), - .ip_rdata(config_q0) -); - -endmodule diff --git a/finn-rtllib/memstream/hdl/memstream_axi.sv b/finn-rtllib/memstream/hdl/memstream_axi.sv new file mode 100644 index 0000000000..620d9ec1de --- /dev/null +++ b/finn-rtllib/memstream/hdl/memstream_axi.sv @@ -0,0 +1,136 @@ +/** + * Copyright (c) 2023, Xilinx + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of FINN nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @author Thomas B. Preußer + */ + +module memstream_axi #( + int unsigned DEPTH, + int unsigned WIDTH, + + parameter INIT_FILE = "", + parameter RAM_STYLE = "auto", + + localparam int unsigned AXILITE_ADDR_WIDTH = $clog2(DEPTH * (2**$clog2((WIDTH+31)/32))) + 2 +)( + // Global Control + input logic clk, + input logic rst, + + // AXI-lite Write + output logic awready, + input logic awvalid, + input logic [2:0] awprot, + input logic [AXILITE_ADDR_WIDTH-1:0] awaddr, + + output logic wready, + input logic wvalid, + input logic [31:0] wdata, + input logic [ 3:0] wstrb, + + input logic bready, + output logic bvalid, + output logic [1:0] bresp, + + // AXI-lite Read + output loigc arready, + input loigc arvalid, + input loigc [2:0] arprot, + input loigc [AXILITE_ADDR_WIDTH-1:0] araddr, + + input loigc rready, + output loigc rvalid, + output logic [ 1:0] rresp, + output loigc [31:0] rdata, + + // Continuous output stream + input logic m_axis_0_tready, + output logic m_axis_0_tvalid, + output logic [((WIDTH+7)/8)*8-1:0] m_axis_0_tdata +); + + //----------------------------------------------------------------------- + // AXI-lite to ap_memory Adapter + uwire [31:0] config_address; + uwire config_ce; + uwire config_we; + uwire config_rack; + uwire [WIDTH-1:0] config_d0; + uwire [WIDTH-1:0] config_q0; + axi4lite_if #( + .ADDR_WIDTH(AXILITE_ADDR_WIDTH), + .DATA_WIDTH(32), + .IP_DATA_WIDTH(WIDTH) + ) config_if ( + .aclk(clk), .aresetn(!rst), + + // Write Channels + .awready, .awvalid, .awaddr, .awprot, + .wready, .wvalid, .wdata, .wstrb, + .bready, .bvalid, .bresp, + + // Read Channels + .arready, .arvalid, .araddr, .arprot, + .rready, .rvalid, .rresp, .rdata, + + // IP-side Interface + .ip_en(config_ce), + .ip_wen(config_we), + .ip_addr(config_address), + .ip_wdata(config_d0), + .ip_rack(config_rack), + .ip_rdata(config_q0) + ); + + //----------------------------------------------------------------------- + // Streaming Memory Backend + memstream #( + .DEPTH(DEPTH), + .WIDTH(WIDTH), + .INIT_FILE(INIT_FILE), + .RAM_STYLE(RAM_STYLE) + ) mem ( + .clk, .rst, + + .config_address, + .config_ce, + .config_we, + .config_d0, + .config_q0, + .config_rack, + + .ordy(m_axis_0_tready), + .ovld(m_axis_0_tvalid), + .odat(m_axis_0_tdata[WIDTH-1:0]) + ); + if($bits(m_axis_0_tdata) > WIDTH) begin + assign m_axis_0_tdata[$left(m_axis_0_tdata):WIDTH] <= '0; + end + +endmodule : memstream_axi diff --git a/finn-rtllib/memstream/hdl/memstream_multiblock.v b/finn-rtllib/memstream/hdl/memstream_multiblock.v deleted file mode 100644 index 4e6167132d..0000000000 --- a/finn-rtllib/memstream/hdl/memstream_multiblock.v +++ /dev/null @@ -1,474 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -module memstream_multiblock -#( -//parameters to enable/disable axi-mm, set number of streams, set readmemh for memory, set per-stream offsets in memory, set per-stream widths - parameter CONFIG_EN = 1, - parameter NSTREAMS = 6,//1 up to 6 - - parameter MEM_DEPTH = 13824, - parameter MEM_WIDTH = 32, - parameter MEM_INIT = "./", - parameter RAM_STYLE = "auto", - - //widths per stream - parameter STRM0_WIDTH = 32, - parameter STRM1_WIDTH = 32, - parameter STRM2_WIDTH = 32, - parameter STRM3_WIDTH = 32, - parameter STRM4_WIDTH = 32, - parameter STRM5_WIDTH = 32, - - //depths per stream - parameter STRM0_DEPTH = 2304, - parameter STRM1_DEPTH = 2304, - parameter STRM2_DEPTH = 2304, - parameter STRM3_DEPTH = 2304, - parameter STRM4_DEPTH = 2304, - parameter STRM5_DEPTH = 2304, - - //offsets for each stream - parameter STRM0_OFFSET = 0, - parameter STRM1_OFFSET = 2304, - parameter STRM2_OFFSET = 4608, - parameter STRM3_OFFSET = 6912, - parameter STRM4_OFFSET = 9216, - parameter STRM5_OFFSET = 11520 -) - -( - input aclk, - input aresetn, - - //optional configuration interface compatible with ap_memory - input [31:0] config_address, - input config_ce, - input config_we, - input [31:0] config_d0, - output [31:0] config_q0, - output config_rack, - - //multiple output AXI Streams, TDATA width rounded to multiple of 8 bits - input m_axis_0_afull, - input m_axis_0_tready, - output m_axis_0_tvalid, - output [((STRM0_WIDTH+7)/8)*8-1:0] m_axis_0_tdata, - - input m_axis_1_afull, - input m_axis_1_tready, - output m_axis_1_tvalid, - output [((STRM1_WIDTH+7)/8)*8-1:0] m_axis_1_tdata, - - input m_axis_2_afull, - input m_axis_2_tready, - output m_axis_2_tvalid, - output [((STRM2_WIDTH+7)/8)*8-1:0] m_axis_2_tdata, - - input m_axis_3_afull, - input m_axis_3_tready, - output m_axis_3_tvalid, - output [((STRM3_WIDTH+7)/8)*8-1:0] m_axis_3_tdata, - - input m_axis_4_afull, - input m_axis_4_tready, - output m_axis_4_tvalid, - output [((STRM4_WIDTH+7)/8)*8-1:0] m_axis_4_tdata, - - input m_axis_5_afull, - input m_axis_5_tready, - output m_axis_5_tvalid, - output [((STRM5_WIDTH+7)/8)*8-1:0] m_axis_5_tdata - - -); - -//calculate number of RAMB18 blocks we need depth-wise -localparam NMEMBLOCKS = (MEM_DEPTH+1023) / 1024; //ceil(MEM_DEPTH/1024) - -//calculate width of address for each block -localparam BLOCKADRWIDTH = NMEMBLOCKS > 1 ? 10 : $clog2(MEM_DEPTH); - -//determine whether a stream needs to multiplex between memory blocks -localparam STRM0_MUX = ((STRM0_OFFSET/1024) != ((STRM0_OFFSET+STRM0_DEPTH)/1024)); -localparam STRM1_MUX = ((STRM1_OFFSET/1024) != ((STRM1_OFFSET+STRM1_DEPTH)/1024)); -localparam STRM2_MUX = ((STRM2_OFFSET/1024) != ((STRM2_OFFSET+STRM2_DEPTH)/1024)); -localparam STRM3_MUX = ((STRM3_OFFSET/1024) != ((STRM3_OFFSET+STRM3_DEPTH)/1024)); -localparam STRM4_MUX = ((STRM4_OFFSET/1024) != ((STRM4_OFFSET+STRM4_DEPTH)/1024)); -localparam STRM5_MUX = ((STRM5_OFFSET/1024) != ((STRM5_OFFSET+STRM5_DEPTH)/1024)); - -//determine what the base block of each stream is -localparam STRM0_BLOCK = (STRM0_OFFSET/1024); -localparam STRM1_BLOCK = (STRM1_OFFSET/1024); -localparam STRM2_BLOCK = (STRM2_OFFSET/1024); -localparam STRM3_BLOCK = (STRM3_OFFSET/1024); -localparam STRM4_BLOCK = (STRM4_OFFSET/1024); -localparam STRM5_BLOCK = (STRM5_OFFSET/1024); - -//determine what the end block of each stream is -localparam STRM0_END_BLOCK = ((STRM0_OFFSET+STRM0_DEPTH-1)/1024); -localparam STRM1_END_BLOCK = ((STRM1_OFFSET+STRM1_DEPTH-1)/1024); -localparam STRM2_END_BLOCK = ((STRM2_OFFSET+STRM2_DEPTH-1)/1024); -localparam STRM3_END_BLOCK = ((STRM3_OFFSET+STRM3_DEPTH-1)/1024); -localparam STRM4_END_BLOCK = ((STRM4_OFFSET+STRM4_DEPTH-1)/1024); -localparam STRM5_END_BLOCK = ((STRM5_OFFSET+STRM5_DEPTH-1)/1024); - -//determine the number of blocks spanned by each stream -localparam STRM0_NBLOCKS = STRM0_END_BLOCK - STRM0_BLOCK + 1; -localparam STRM1_NBLOCKS = STRM1_END_BLOCK - STRM1_BLOCK + 1; -localparam STRM2_NBLOCKS = STRM2_END_BLOCK - STRM2_BLOCK + 1; -localparam STRM3_NBLOCKS = STRM3_END_BLOCK - STRM3_BLOCK + 1; -localparam STRM4_NBLOCKS = STRM4_END_BLOCK - STRM4_BLOCK + 1; -localparam STRM5_NBLOCKS = STRM5_END_BLOCK - STRM5_BLOCK + 1; - -//TODO: check that memory width is equal to the widest stream -//TODO: check that the stream depths and offsets make sense, and that the memory depth is sufficient (or calculate depth here?) -initial begin - if((NSTREAMS < 1) | (NSTREAMS > 6)) begin - $display("Invalid setting for NSTREAMS, please set in range [1,6]"); - $finish(); - end -end - -//invert reset -wire rst; -assign rst = ~aresetn; - -//WARNING: pipeline depth is larger than the number of streams per port so we have in-flight writes that may see not-ready when they get executed -//solution: use prog-full to make sure we have an equal number of free slots in the stream to the read pipeline depth - -reg [$clog2(MEM_DEPTH)-1:0] strm0_addr = STRM0_OFFSET; -reg [$clog2(MEM_DEPTH)-1:0] strm1_addr = STRM1_OFFSET; -reg [$clog2(MEM_DEPTH)-1:0] strm2_addr = STRM2_OFFSET; -reg [$clog2(MEM_DEPTH)-1:0] strm3_addr = STRM3_OFFSET; -reg [$clog2(MEM_DEPTH)-1:0] strm4_addr = STRM4_OFFSET; -reg [$clog2(MEM_DEPTH)-1:0] strm5_addr = STRM5_OFFSET; - -reg strm0_incr_en; -reg strm1_incr_en; -reg strm2_incr_en; -reg strm3_incr_en; -reg strm4_incr_en; -reg strm5_incr_en; - -wire strm0_rst; -wire strm1_rst; -wire strm2_rst; -wire strm3_rst; -wire strm4_rst; -wire strm5_rst; - -reg strm0_ready; -reg strm1_ready; -reg strm2_ready; -reg strm3_ready; -reg strm4_ready; -reg strm5_ready; - -//arbiter: work on one stream at a time -//multiplex each port between (up to) half of the streams -reg [1:0] current_stream_porta = 0; -reg [1:0] current_stream_portb = 0; - -always @(posedge aclk) begin - if(rst) - current_stream_porta <= 0; - else case(current_stream_porta) - 0: current_stream_porta <= strm2_ready ? 1 : strm4_ready ? 2 : 0; - 1: current_stream_porta <= strm4_ready ? 2 : strm0_ready ? 0 : 1; - 2: current_stream_porta <= strm0_ready ? 0 : strm2_ready ? 1 : 2; - endcase - if(rst) - current_stream_portb <= 0; - else case(current_stream_portb) - 0: current_stream_portb <= strm3_ready ? 1 : strm5_ready ? 2 : 0; - 1: current_stream_portb <= strm5_ready ? 2 : strm1_ready ? 0 : 1; - 2: current_stream_portb <= strm1_ready ? 0 : strm3_ready ? 1 : 2; - endcase -end - -always @(posedge aclk) begin - if(rst) begin - strm0_incr_en <= 0; - strm1_incr_en <= 0; - strm2_incr_en <= 0; - strm3_incr_en <= 0; - strm4_incr_en <= 0; - strm5_incr_en <= 0; - end else begin - strm0_incr_en <= (current_stream_porta == 0) & strm0_ready; - strm1_incr_en <= (current_stream_portb == 0) & strm1_ready; - strm2_incr_en <= (current_stream_porta == 1) & strm2_ready; - strm3_incr_en <= (current_stream_portb == 1) & strm3_ready; - strm4_incr_en <= (current_stream_porta == 2) & strm4_ready; - strm5_incr_en <= (current_stream_portb == 2) & strm5_ready; - end -end - -assign strm0_rst = strm0_incr_en & (strm0_addr == (STRM0_OFFSET + STRM0_DEPTH-1)); -assign strm1_rst = strm1_incr_en & (strm1_addr == (STRM1_OFFSET + STRM1_DEPTH-1)); -assign strm2_rst = strm2_incr_en & (strm2_addr == (STRM2_OFFSET + STRM2_DEPTH-1)); -assign strm3_rst = strm3_incr_en & (strm3_addr == (STRM3_OFFSET + STRM3_DEPTH-1)); -assign strm4_rst = strm4_incr_en & (strm4_addr == (STRM4_OFFSET + STRM4_DEPTH-1)); -assign strm5_rst = strm5_incr_en & (strm5_addr == (STRM5_OFFSET + STRM5_DEPTH-1)); - -always @(posedge aclk) begin - strm0_ready <= ~m_axis_0_afull; - strm1_ready <= ~m_axis_1_afull & (NSTREAMS >= 2); - strm2_ready <= ~m_axis_2_afull & (NSTREAMS >= 3); - strm3_ready <= ~m_axis_3_afull & (NSTREAMS >= 4); - strm4_ready <= ~m_axis_4_afull & (NSTREAMS >= 5); - strm5_ready <= ~m_axis_5_afull & (NSTREAMS >= 6); -end - -//one address counter per stream; more LUTs but keeps routing short and local -always @(posedge aclk) begin - if(strm0_rst | rst) - strm0_addr <= STRM0_OFFSET; - else if(strm0_incr_en) - strm0_addr <= strm0_addr + 1; - if(strm1_rst | rst) - strm1_addr <= STRM1_OFFSET; - else if(strm1_incr_en) - strm1_addr <= strm1_addr + 1; - if(strm2_rst | rst) - strm2_addr <= STRM2_OFFSET; - else if(strm2_incr_en) - strm2_addr <= strm2_addr + 1; - if(strm3_rst | rst) - strm3_addr <= STRM3_OFFSET; - else if(strm3_incr_en) - strm3_addr <= strm3_addr + 1; - if(strm4_rst | rst) - strm4_addr <= STRM4_OFFSET; - else if(strm4_incr_en) - strm4_addr <= strm4_addr + 1; - if(strm5_rst | rst) - strm5_addr <= STRM5_OFFSET; - else if(strm5_incr_en) - strm5_addr <= strm5_addr + 1; -end - -reg [$clog2(MEM_DEPTH)-1:0] addra; -wire [MEM_WIDTH*NMEMBLOCKS-1:0] rdqa; - -reg [$clog2(MEM_DEPTH)-1:0] addrb; -wire [MEM_WIDTH*NMEMBLOCKS-1:0] rdqb; - -wire [NMEMBLOCKS-1:0] we; - -reg [1:0] addr_select_porta; -reg [1:0] addr_select_portb; - -//multiplex addresses of various streams into address ports of memory -always @(posedge aclk) begin - addr_select_porta <= current_stream_porta; - case(addr_select_porta) - 0: addra <= strm0_addr; - 1: addra <= strm2_addr; - 2: addra <= strm4_addr; - endcase - addr_select_portb <= current_stream_portb; - case(addr_select_portb) - 0: addrb <= strm1_addr; - 1: addrb <= strm3_addr; - 2: addrb <= strm5_addr; - endcase -end - -genvar g; -generate for(g=0; g 1) begin: multiblock - -wire [MEM_WIDTH-1:0] rdqmux[5:0]; - -reg [$clog2(MEM_DEPTH)-BLOCKADRWIDTH-1:0] rdblocka[2:0]; -reg [$clog2(MEM_DEPTH)-BLOCKADRWIDTH-1:0] rdblockb[2:0]; - -always @(posedge aclk) begin - rdblocka[0] <= addra[$clog2(MEM_DEPTH)-1:BLOCKADRWIDTH]; - rdblockb[0] <= addrb[$clog2(MEM_DEPTH)-1:BLOCKADRWIDTH]; - for(i=0; i<2; i=i+1) begin - rdblocka[i+1] <= rdblocka[i]; - rdblockb[i+1] <= rdblockb[i]; - end -end - -if(NSTREAMS >= 1) begin: en_strm0 - if(STRM0_MUX == 1) begin: mux0 - mux #(STRM0_NBLOCKS, MEM_WIDTH) m(rdqa[(STRM0_BLOCK+STRM0_NBLOCKS)*MEM_WIDTH-1:STRM0_BLOCK*MEM_WIDTH],rdqmux[0],rdblocka[1] - STRM0_BLOCK); - end else begin: nomux0 - assign rdqmux[0] = rdqa[(STRM0_BLOCK+1)*MEM_WIDTH-1:STRM0_BLOCK*MEM_WIDTH]; - end - assign m_axis_0_tdata = rdqmux[0][STRM0_WIDTH-1:0]; -end - -if(NSTREAMS >= 2) begin: en_strm1 - if(STRM1_MUX == 1) begin: mux1 - mux #(STRM1_NBLOCKS, MEM_WIDTH) m(rdqb[(STRM1_BLOCK+STRM1_NBLOCKS)*MEM_WIDTH-1:STRM1_BLOCK*MEM_WIDTH],rdqmux[1],rdblockb[1] - STRM1_BLOCK); - end else begin: nomux1 - assign rdqmux[1] = rdqb[(STRM1_BLOCK+1)*MEM_WIDTH-1:STRM1_BLOCK*MEM_WIDTH]; - end - assign m_axis_1_tdata = rdqmux[1][STRM1_WIDTH-1:0]; -end - -if(NSTREAMS >= 3) begin: en_strm2 - if(STRM2_MUX == 1) begin: mux2 - mux #(STRM2_NBLOCKS, MEM_WIDTH) m(rdqa[(STRM2_BLOCK+STRM2_NBLOCKS)*MEM_WIDTH-1:STRM2_BLOCK*MEM_WIDTH],rdqmux[2],rdblocka[1] - STRM2_BLOCK); - end else begin: nomux2 - assign rdqmux[2] = rdqa[(STRM2_BLOCK+1)*MEM_WIDTH-1:STRM2_BLOCK*MEM_WIDTH]; - end - assign m_axis_2_tdata = rdqmux[2][STRM2_WIDTH-1:0]; -end - -if(NSTREAMS >= 4) begin: en_strm3 - if(STRM3_MUX == 1) begin: mux3 - mux #(STRM3_NBLOCKS, MEM_WIDTH) m(rdqb[(STRM3_BLOCK+STRM3_NBLOCKS)*MEM_WIDTH-1:STRM3_BLOCK*MEM_WIDTH],rdqmux[3],rdblockb[1] - STRM3_BLOCK); - end else begin: nomux3 - assign rdqmux[3] = rdqb[(STRM3_BLOCK+1)*MEM_WIDTH-1:STRM3_BLOCK*MEM_WIDTH]; - end - assign m_axis_3_tdata = rdqmux[3][STRM3_WIDTH-1:0]; -end - -if(NSTREAMS >= 5) begin: en_strm4 - if(STRM4_MUX == 1) begin: mux4 - mux #(STRM4_NBLOCKS, MEM_WIDTH) m(rdqa[(STRM4_BLOCK+STRM4_NBLOCKS)*MEM_WIDTH-1:STRM4_BLOCK*MEM_WIDTH],rdqmux[4],rdblocka[1] - STRM4_BLOCK); - end else begin: nomux4 - assign rdqmux[4] = rdqa[(STRM4_BLOCK+1)*MEM_WIDTH-1:STRM4_BLOCK*MEM_WIDTH]; - end - assign m_axis_4_tdata = rdqmux[4][STRM4_WIDTH-1:0]; -end - -if(NSTREAMS >= 6) begin: en_strm5 - if(STRM5_MUX == 1) begin: mux5 - mux #(STRM5_NBLOCKS, MEM_WIDTH) m(rdqb[(STRM5_BLOCK+STRM5_NBLOCKS)*MEM_WIDTH-1:STRM5_BLOCK*MEM_WIDTH],rdqmux[5],rdblockb[1] - STRM5_BLOCK); - end else begin: nomux5 - assign rdqmux[5] = rdqb[(STRM5_BLOCK+1)*MEM_WIDTH-1:STRM5_BLOCK*MEM_WIDTH]; - end - assign m_axis_5_tdata = rdqmux[5][STRM5_WIDTH-1:0]; -end - -end else begin: singleblock - -if(NSTREAMS >= 1) begin: en_strm0_direct - assign m_axis_0_tdata = rdqa[STRM0_WIDTH-1:0]; -end -if(NSTREAMS >= 2) begin: en_strm1_direct - assign m_axis_1_tdata = rdqb[STRM1_WIDTH-1:0]; -end -if(NSTREAMS >= 3) begin: en_strm2_direct - assign m_axis_2_tdata = rdqa[STRM2_WIDTH-1:0]; -end -if(NSTREAMS >= 4) begin: en_strm3_direct - assign m_axis_3_tdata = rdqb[STRM3_WIDTH-1:0]; -end -if(NSTREAMS >= 5) begin: en_strm4_direct - assign m_axis_4_tdata = rdqa[STRM4_WIDTH-1:0]; -end -if(NSTREAMS >= 6) begin: en_strm5_direct - assign m_axis_5_tdata = rdqb[STRM5_WIDTH-1:0]; -end - -end -endgenerate - -//output to AXI Streams -reg tvalid_pipe0[2:0]; -reg tvalid_pipe1[2:0]; -reg tvalid_pipe2[2:0]; -reg tvalid_pipe3[2:0]; -reg tvalid_pipe4[2:0]; -reg tvalid_pipe5[2:0]; - -assign m_axis_0_tvalid = tvalid_pipe0[2]; -assign m_axis_1_tvalid = tvalid_pipe1[2]; -assign m_axis_2_tvalid = tvalid_pipe2[2]; -assign m_axis_3_tvalid = tvalid_pipe3[2]; -assign m_axis_4_tvalid = tvalid_pipe4[2]; -assign m_axis_5_tvalid = tvalid_pipe5[2]; - - -always @(posedge aclk) begin - tvalid_pipe0[0] <= strm0_incr_en; - tvalid_pipe1[0] <= strm1_incr_en; - tvalid_pipe2[0] <= strm2_incr_en; - tvalid_pipe3[0] <= strm3_incr_en; - tvalid_pipe4[0] <= strm4_incr_en; - tvalid_pipe5[0] <= strm5_incr_en; - for(i=0; i<2; i=i+1) begin: srl - tvalid_pipe0[i+1] <= tvalid_pipe0[i]; - tvalid_pipe1[i+1] <= tvalid_pipe1[i]; - tvalid_pipe2[i+1] <= tvalid_pipe2[i]; - tvalid_pipe3[i+1] <= tvalid_pipe3[i]; - tvalid_pipe4[i+1] <= tvalid_pipe4[i]; - tvalid_pipe5[i+1] <= tvalid_pipe5[i]; - end -end - -//dummy read, for now -assign config_q0 = 0; -assign config_rack = config_ce & ~config_we; - -endmodule diff --git a/finn-rtllib/memstream/hdl/memstream_singleblock.v b/finn-rtllib/memstream/hdl/memstream_singleblock.v deleted file mode 100644 index c9b8770aaa..0000000000 --- a/finn-rtllib/memstream/hdl/memstream_singleblock.v +++ /dev/null @@ -1,246 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* - Implements a lightweight streamer for up to 2 streams in a single block of memory -*/ - -module memstream_singleblock -#( - parameter CONFIG_EN = 1, - parameter NSTREAMS = 2,//1 up to 2 - - parameter MEM_DEPTH = 512, - parameter MEM_WIDTH = 32, - parameter MEM_INIT = "./", - parameter RAM_STYLE = "auto", - - //widths per stream - parameter STRM0_WIDTH = 32, - parameter STRM1_WIDTH = 32, - - //depths per stream - parameter STRM0_DEPTH = 256, - parameter STRM1_DEPTH = 256, - - //offsets for each stream - parameter STRM0_OFFSET = 0, - parameter STRM1_OFFSET = 256 -) - -( - input aclk, - input aresetn, - - //optional configuration interface compatible with ap_memory - input [31:0] config_address, - input config_ce, - input config_we, - input [MEM_WIDTH-1:0] config_d0, - output [MEM_WIDTH-1:0] config_q0, - output config_rack, - - //multiple output AXI Streams, TDATA width rounded to multiple of 8 bits - input m_axis_0_tready, - output m_axis_0_tvalid, - output [((STRM0_WIDTH+7)/8)*8-1:0] m_axis_0_tdata, - - input m_axis_1_tready, - output m_axis_1_tvalid, - output [((STRM1_WIDTH+7)/8)*8-1:0] m_axis_1_tdata - -); - - -//TODO: check that memory width is equal to the widest stream -//TODO: check that the stream depths and offsets make sense, and that the memory depth is sufficient (or calculate depth here?) -initial begin - if((NSTREAMS < 1) | (NSTREAMS > 2)) begin - $display("Invalid setting for NSTREAMS, please set in range [1,2]"); - $finish(); - end -end - -//invert reset -wire rst; -assign rst = ~aresetn; - -wire strm0_incr_en; -wire strm1_incr_en; - -assign strm0_incr_en = m_axis_0_tready | ~m_axis_0_tvalid; -assign strm1_incr_en = m_axis_1_tready | ~m_axis_1_tvalid; - -reg rack_shift[1:0]; - -generate -if(MEM_DEPTH > 1) begin: use_ram - -//calculate width of memory address, with a minimum of 1 bit -localparam BLOCKADRWIDTH = $clog2(MEM_DEPTH); - -reg [BLOCKADRWIDTH-1:0] strm0_addr = STRM0_OFFSET; -wire strm0_rst; -assign strm0_rst = strm0_incr_en & (strm0_addr == (STRM0_OFFSET + STRM0_DEPTH-1)); - -//one address counter per stream; more LUTs but keeps routing short and local -always @(posedge aclk) begin - if(strm0_rst | rst) - strm0_addr <= STRM0_OFFSET; - else if(strm0_incr_en) - strm0_addr <= strm0_addr + 1; -end - -if(NSTREAMS == 1) begin: sdp - -ramb18_sdp -#( - .ID(0), - .DWIDTH(MEM_WIDTH), - .AWIDTH(BLOCKADRWIDTH), - .DEPTH(MEM_DEPTH), - .MEM_INIT(MEM_INIT), - .RAM_STYLE(RAM_STYLE) -) -ram -( - .clk(aclk), - - .ena(config_ce), - .wea(config_we), - .addra(config_address[BLOCKADRWIDTH-1:0]), - .wdataa(config_d0), - - .enb(strm0_incr_en | config_ce), - .enqb(strm0_incr_en | rack_shift[0]), - .addrb(config_ce ? config_address[BLOCKADRWIDTH-1:0] : strm0_addr), - .rdqb(m_axis_0_tdata) -); - - -end else begin: tdp - -reg [BLOCKADRWIDTH-1:0] strm1_addr = STRM1_OFFSET; -wire strm1_rst; -assign strm1_rst = strm1_incr_en & (strm1_addr == (STRM1_OFFSET + STRM1_DEPTH-1)); - -always @(posedge aclk) begin - if(strm1_rst | rst) - strm1_addr <= STRM1_OFFSET; - else if(strm1_incr_en) - strm1_addr <= strm1_addr + 1; -end - -ramb18_wf_dualport -#( - .ID(0), - .DWIDTH(MEM_WIDTH), - .AWIDTH(BLOCKADRWIDTH), - .DEPTH(MEM_DEPTH), - .MEM_INIT(MEM_INIT), - .RAM_STYLE(RAM_STYLE) -) -ram -( - .clk(aclk), - - .wea(config_we), - .ena(strm0_incr_en | config_ce), - .enqa(strm0_incr_en | config_ce_r), - .addra(config_we ? config_address[BLOCKADRWIDTH-1:0] : strm0_addr), - .wdataa(config_d0), - .rdqa(m_axis_0_tdata), - - .web(1'b0), - .enb(strm1_incr_en), - .enqb(strm1_incr_en), - .addrb(strm1_addr), - .wdatab('d0), - .rdqb(m_axis_1_tdata) -); - -end - -end else begin: bypass - -reg [MEM_WIDTH-1:0] singleval[0:0]; -initial begin - `ifdef SYNTHESIS - $readmemh({MEM_INIT,"memblock_synth_0.dat"}, singleval, 0, 0); - `else - $readmemh({MEM_INIT,"memblock_sim_0.dat"}, singleval, 0, 0); - `endif -end - -always @(posedge aclk) - if(config_ce & config_we) - singleval[0] <= config_d0; - -assign m_axis_0_tdata = singleval[0]; -assign m_axis_1_tdata = singleval[0]; - -end -endgenerate - -//signal valid after 2 tready cycles after initialization -//then stay valid -reg [1:0] tvalid_pipe0 = 2'd0; -reg [1:0] tvalid_pipe1 = 2'd0; - -assign m_axis_0_tvalid = tvalid_pipe0[1]; -assign m_axis_1_tvalid = tvalid_pipe1[1]; - -always @(posedge aclk) begin - if(rst) begin - tvalid_pipe0 <= 0; - end else if(strm0_incr_en) begin - tvalid_pipe0[0] <= 1; - tvalid_pipe0[1] <= tvalid_pipe0[0]; - end -end - -always @(posedge aclk) begin - if(rst) begin - tvalid_pipe1 <= 0; - end else if(strm1_incr_en) begin - tvalid_pipe1[0] <= 1; - tvalid_pipe1[1] <= tvalid_pipe1[0]; - end -end - -always @(posedge aclk) begin - rack_shift[0] <= config_ce & ~config_we; - rack_shift[1] <= rack_shift[0]; -end - -assign config_rack = rack_shift[1]; -assign config_q0 = m_axis_0_tdata; - -endmodule diff --git a/finn-rtllib/memstream/hdl/mux.v b/finn-rtllib/memstream/hdl/mux.v deleted file mode 100644 index f7087f9735..0000000000 --- a/finn-rtllib/memstream/hdl/mux.v +++ /dev/null @@ -1,44 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -module mux -#( - parameter NINPUTS = 1, - parameter WIDTH = 16 -) -( - input [NINPUTS*WIDTH-1:0] in, - output [WIDTH-1:0] out, - input [$clog2(NINPUTS)-1:0] sel -); - -assign out = in >> (sel*WIDTH); - -endmodule diff --git a/finn-rtllib/memstream/hdl/ramb18_sdp.v b/finn-rtllib/memstream/hdl/ramb18_sdp.v deleted file mode 100644 index 8d2fbf9a98..0000000000 --- a/finn-rtllib/memstream/hdl/ramb18_sdp.v +++ /dev/null @@ -1,96 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -module ramb18_sdp -#( - parameter ID = 0, - parameter DWIDTH = 18, - parameter AWIDTH = 10, - parameter DEPTH = 2**AWIDTH, - parameter MEM_INIT = "", - parameter RAM_STYLE = "auto" -) -( - input clk, - - input ena, - input wea, - input [AWIDTH-1:0] addra, - input [DWIDTH-1:0] wdataa, - - input enb, - input enqb, - input [AWIDTH-1:0] addrb, - output reg [DWIDTH-1:0] rdqb -); - -(* ram_style = RAM_STYLE *) reg [DWIDTH-1:0] mem[0:DEPTH-1]; -reg [DWIDTH-1:0] rdatab; - -`ifdef SYNTHESIS -reg [7:0] idx = ID; -`else -reg [15:0] idx; -`endif - -//initialize memory -initial begin - //note the hacky way of adding a filename memblock_ID.dat to the path provided in MEM_INIT - //ID can go up to 99 - if (ID < 0 && ID > 99) begin - $display("ID out of range [0-99]"); - $finish(); - end - //MEM_INIT path must be terminated by / - `ifdef SYNTHESIS - if (ID < 10) - $readmemh({MEM_INIT,"memblock_synth_",idx+8'd48,".dat"}, mem, 0, DEPTH-1); - else - $readmemh({MEM_INIT,"memblock_synth_",(idx/10)+8'd48,(idx%10)+8'd48,".dat"}, mem, 0, DEPTH-1); - `else - $sformat(idx,"%0d",ID); - if (ID < 10) - $readmemh({MEM_INIT,"memblock_sim_",idx[7:0],".dat"}, mem, 0, DEPTH-1); - else - $readmemh({MEM_INIT,"memblock_sim_",idx,".dat"}, mem, 0, DEPTH-1); - `endif -end - -//memory ports, with output pipeline register -always @(posedge clk) begin - if(wea) - mem[addra] <= wdataa; - if(enb) - rdatab <= mem[addrb]; - if(enqb) - rdqb <= rdatab; -end - -endmodule diff --git a/finn-rtllib/memstream/hdl/ramb18_wf_dualport.v b/finn-rtllib/memstream/hdl/ramb18_wf_dualport.v deleted file mode 100644 index c7850106ae..0000000000 --- a/finn-rtllib/memstream/hdl/ramb18_wf_dualport.v +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -module ramb18_wf_dualport -#( - parameter ID = 0, - parameter DWIDTH = 18, - parameter AWIDTH = 10, - parameter DEPTH = 2**AWIDTH, - parameter MEM_INIT = "", - parameter RAM_STYLE = "auto" -) -( - input clk, - - input wea, - input ena, - input enqa, - input [AWIDTH-1:0] addra, - input [DWIDTH-1:0] wdataa, - output reg [DWIDTH-1:0] rdqa, - - input web, - input enb, - input enqb, - input [AWIDTH-1:0] addrb, - input [DWIDTH-1:0] wdatab, - output reg [DWIDTH-1:0] rdqb -); - -(* ram_style = RAM_STYLE *) reg [DWIDTH-1:0] mem[0:DEPTH-1]; -reg [DWIDTH-1:0] rdataa; -reg [DWIDTH-1:0] rdatab; - -`ifdef SYNTHESIS -reg [7:0] idx = ID; -`else -reg [15:0] idx; -`endif - -//initialize memory -initial begin - //note the hacky way of adding a filename memblock_ID.dat to the path provided in MEM_INIT - //ID can go up to 99 - if (ID < 0 && ID > 99) begin - $display("ID out of range [0-99]"); - $finish(); - end - //MEM_INIT path must be terminated by / - `ifdef SYNTHESIS - if (ID < 10) - $readmemh({MEM_INIT,"memblock_",idx+8'd48,".dat"}, mem, 0, DEPTH-1); - else - $readmemh({MEM_INIT,"memblock_",(idx/10)+8'd48,(idx%10)+8'd48,".dat"}, mem, 0, DEPTH-1); - `else - $sformat(idx,"%0d",ID); - if (ID < 10) - $readmemh({MEM_INIT,"memblock_",idx[7:0],".dat"}, mem, 0, DEPTH-1); - else - $readmemh({MEM_INIT,"memblock_",idx,".dat"}, mem, 0, DEPTH-1); - `endif -end - -//memory ports, with output pipeline register -always @(posedge clk) begin - if(ena) begin - if(wea) - mem[addra] <= wdataa; - rdataa <= mem[addra]; - end - if(enqa) - rdqa <= rdataa; -end -always @(posedge clk) begin - if(enb) begin - if(web) - mem[addrb] <= wdatab; - rdatab <= mem[addrb]; - end - if(enqb) - rdqb <= rdatab; -end - -endmodule diff --git a/finn-rtllib/memstream/sim/gen_memblocks.sh b/finn-rtllib/memstream/sim/gen_memblocks.sh deleted file mode 100644 index b6e6b656ad..0000000000 --- a/finn-rtllib/memstream/sim/gen_memblocks.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2020, Xilinx -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -NLINES=`cat $1 | wc -l` -NBLOCKS=$(( ($NLINES + 1023) / 1024 )) -rm memblock_*.dat - -for (( i=0; i<$NBLOCKS; i++ )) -do - START=$(( 1 + $i * 1024 )) - tail -n +$START $1 | head -n 1024 >> memblock_$i.dat -done diff --git a/finn-rtllib/memstream/sim/golden.dat b/finn-rtllib/memstream/sim/golden.dat deleted file mode 100644 index 1466271bca..0000000000 --- a/finn-rtllib/memstream/sim/golden.dat +++ /dev/null @@ -1,9216 +0,0 @@ -AFB2B66A -BB100CFF -1ED93E9B -1B8E800D -DA9E0150 -38B1C916 -93BC4E64 -860F8373 -B31D708B -C2934023 -739C9593 -4C898A3D -CCC8F4C5 -8FA275E6 -47732CC7 -6857ABF0 -31671013 -6BC4AA43 -73D4F790 -2C6158B6 -FDC3B5D -6DC755F2 -E0E7E8C9 -7862E17 -3D4FFE1E -9AFFF447 -C862FD7D -A4C4D89A -D7D6EF51 -10E5A31D -79DA9C63 -A83060A8 -EA988813 -6B411BCF -85544B5A -5AC91DE6 -586E6779 -8FE8161B -4C57CC92 -74C918A6 -36B20D44 -5CB62FC0 -62FDB2E1 -4B1CB514 -526B7CEC -B3FA61D0 -C95DDBE -CC2BA600 -2466CD1D -3354A056 -CCED3EAC -6FFA09EE -F9648FAF -18CB5358 -EA506270 -66F385A6 -5B0246E5 -26218A76 -BC7CECFD -5969F6FF -3DAF5901 -C53D05BD -1EDA2D76 -5C0C0010 -7A6C0C8C -BF99E997 -C964C884 -4DE417F4 -8637312 -133B8C3A -D637DB88 -297288F6 -CF1D00B3 -426BD0F3 -4D258120 -8F7EC898 -E15482D9 -DFDFC442 -16A5C4AE -7A6A14DF -5E9C2807 -31BD3EA2 -BD6DCDBC -E47CD35E -FA4FE42 -CCDE0036 -345EBCB7 -64686255 -AE1D77EB -D2B42B84 -CD5E5824 -8DABAB1F -4E07FFCA -7F3B4C13 -1A62C962 -CE08835F -E8E05318 -DC25C7BF -132E4308 -5D0122D6 -B7451ACE -829D2507 -19329C7F -39FCA8F0 -DCD1A574 -17E2EEE -B2B6583A -2181E65 -7013A2A7 -46535CDE -C85BF5D3 -2FD5EFC2 -E05C5D2E -244F0F96 -F01D711F -F1CBB67E -6DAE6666 -84AD6F4A -B95BC84E -9DD54B95 -5A7CA1B -7B1447F4 -44A8EDA7 -20929E9 -40E62E02 -3D03CC3E -81EEF8C4 -1E686D13 -17C13B3D -A14967BE -D8693E0E -15A7FDD1 -19F51C6D -249D0C21 -51424939 -BA05F551 -C614827A -32841A0D -2F8B041 -11A2806 -DBF24199 -F246D9EB -52FFB23D -F3061A47 -B6D51EF3 -2DE434C3 -E1D3F874 -85270B0A -CC405B14 -DD3E9F23 -A0352F98 -67EE5731 -96892C65 -6D67A443 -16354414 -17959F75 -A554F236 -C585076 -2B665011 -7D503509 -77A4530 -6A13C8DC -31996F5 -916AD400 -E761D000 -D23CFD32 -CF3A5154 -C575A1CB -B91ACDBF -BEE7F338 -44C26212 -8124CD5B -245F7451 -DD6D18BA -6B838EC6 -5247AB98 -2F41FDAA -A780BD3B -1FD2F95 -6CDA39C -C31FA5A0 -AB56A5E1 -87F50441 -47093971 -BEBD81EC -2A7F6977 -8C83BD29 -FB067DAC -5FEBDCDC -8FB43F72 -EE45FC6D -4088691C -34F235D0 -43AB8E4D -67FA8BB5 -FC2D2C02 -DA77044C -22E6FC7 -6B6039A9 -BA6E3C45 -46DEC612 -8E7E0FF7 -438DE467 -F4525025 -7937973A -9ABE4BEF -8F8DF841 -F74C5087 -7EDE1CA4 -FF3C7F98 -A025FE0B -59E5EDF6 -6DD27411 -65C080E6 -C86D872D -628B6B26 -B9316D56 -E09EFA8B -A8CD3F21 -C0CD8745 -F4D62BA7 -D4D7FB99 -E9174232 -7F068FC4 -767480FC -275BBBF7 -3470FF88 -E632ACD1 -85677507 -AE0E2C69 -E2C74DA9 -C307B72B -5FB5A769 -99C18162 -FAFB7660 -6E984733 -E17FD97B -EC5E6CA7 -3D659815 -30826B60 -300BE8E8 -86D0B096 -856F2CB0 -2A61ADE4 -24EEB996 -2FCB729B -8190CE0D -E64F7E6A -4D0D42F -CE29765B -C77DE893 -9264C299 -A200E419 -868B5EC6 -8452AC39 -59F7BDED -422E75B2 -74E6329A -38F053E8 -16F8BD5A -363A2E43 -8018AB7B -44AE4CF5 -C8F7B14B -52658A45 -7B46C7D8 -CD319C38 -19AC8957 -5F42CFAA -5DB4DBF7 -DF66DDBA -4FBCB611 -266DFB86 -4F0EE64C -1765E724 -E30C89CA -4705FCE8 -BB7636B3 -789EFEFC -AAC0F37F -424B1661 -234F05AB -1BC0ADF8 -7F9EC67E -500448E5 -BF4D1C45 -C5B64E3B -914F44FE -EB17F041 -1752165C -F5B72E31 -6D68C060 -4EF27C55 -8CEDFDC5 -E3996A56 -25C5C632 -430D930F -EE04DE4D -576E4921 -E13A2A6E -CFE21675 -B1067912 -4C888068 -3C3A1A6D -FCE12E0 -FAD6AD8B -F7DE2E0F -E8DC0DE7 -CC8721DF -34411355 -2C664D07 -ED034324 -F57FDA56 -8C70BCDF -3A6FF2C8 -C6440537 -8113D976 -A40176A1 -46D1D0D9 -877A407C -3FBCD395 -3E74C1D8 -72E22A13 -BA46116D -CFB14406 -21400896 -7AD34367 -2905F60C -C1F9C16F -2E0E5FCF -2EEB00A0 -9C2D94A9 -8DE1CF01 -5912596C -CF2CA22A -774E7D4F -805657AE -1BA223EF -236FD53F -C1ABFD4A -6B8DD778 -6A6E40D2 -70CF4F79 -950E8D35 -5E4F9545 -86AA4166 -28D056E9 -9C550D75 -CB435A3 -B875667E -F54E6E97 -BB7ACD6B -F11637E9 -C220E1FA -C7CAD54B -32853439 -65BA20C9 -1838F8C0 -C3CCE57D -7D2B69F9 -137AD6E9 -6C041B9 -296497AA -98C5E853 -D37AB835 -376764A9 -2F714011 -D24BE867 -B2BA4E -9EA785F9 -726FCED6 -6B4C6950 -44C6D5C0 -85DEA727 -733F5A86 -41785CFF -BB395E8A -100F8117 -276A08D3 -9268A16E -FBF63C19 -AA497F25 -E92E1DC3 -185B4692 -FE6377D6 -C50771B -D98BCD04 -50FC7D74 -BE5BC294 -2C9C4482 -12FBF6CD -D1E04AE4 -5C9679EE -889D2695 -3699F061 -933D06A9 -930DC367 -496D7A37 -C4161D19 -3E08728B -66388C70 -B2363734 -5D12926F -39B4AEF8 -1948B925 -321E08BC -27559FC2 -A543B709 -4D28BC0 -46C64305 -F7B7D459 -97C4966B -A027A9C8 -43CABFA9 -F7C3643D -1128AB2A -AA4A1419 -AC6F2B46 -8F6FEFEF -34284D4D -D951EB81 -77AC6B7C -70F6E0B2 -FD7BE3CE -77BE497E -4883FBD6 -FCAB08D4 -9BC032A4 -67DA8A5C -82037EC1 -E3EC6CC9 -481B7623 -DA1F3873 -CE9E8787 -785CD654 -1661CF27 -42BD0C3C -990F261A -49F18930 -FA336094 -FFD6FC06 -B71077A6 -204B911E -BA1586D6 -8A2F6DBC -36B184AD -76017CAB -DA7E891E -88A51A1A -97AC49CB -2482BE28 -CE6BD009 -C7776DE0 -4E960944 -64081AF2 -56512D55 -D6D1C640 -EE78145B -54CC5EE0 -BE5D3E1F -8FC8816C -1D6AC407 -5D98F8F1 -18FECC5C -F3DE9A29 -93A19068 -AB623B35 -43FF1A02 -AA26434C -B071FDD5 -45AB6A2E -C1275AA7 -EADA5CDA -E427C95E -AE6E5B77 -89F3CA30 -9648C00A -330A03A7 -20DB35D6 -AA9946BF -A0E3050E -DEBB5819 -5047E2E -9C8FBEB9 -6B70D173 -8A99428D -230C88FE -3B26DBD4 -8DBED704 -EFF1C946 -C2381970 -71087497 -2268599D -FCE50AAE -460A49E5 -EC65BC4C -5A83C23C -DD44120F -D6E81BEB -D10235B7 -9362A387 -B3C9220C -46F21F0 -3D04FBC0 -63A2B38D -8F7DEF26 -F326457D -21933DC1 -775197FB -8D6C7C5F -B2D7D570 -147F9FF7 -78666356 -BAB7D249 -69B45EC6 -F56634ED -34738794 -26DF0163 -188DA00 -D2035A36 -FFBB8062 -62852DCF -55FC882A -849388E6 -43BE6E2C -D53EA2A2 -A228BC21 -9112A960 -5FCDE2F1 -79F42B27 -8AE37179 -1D722815 -5AE6DD26 -A8531C6F -EF386673 -AC761B14 -23C6BC3A -488D93B -AE6B0D63 -A4F1CEAC -43F80A43 -D9681EF6 -BA959674 -CCB852B8 -D9F4D79E -6403622F -75FAECC6 -7F43813F -51FC7BE6 -896A3A28 -CAF31C60 -76000EE7 -C1135AAB -6E83B2E6 -2AED1966 -C4F88A86 -21219EA -8AF14AD6 -14014BA2 -BC0BE2D5 -78757CE8 -C09D83DC -6B2021FE -D5AD900 -3685A49F -FD8B4BA0 -7B005539 -2F0C36EF -B41DBA0D -1DCF61B0 -CB3DA1A6 -24C0ADAA -BED01B2B -59C8C334 -11CCA76C -6F962508 -ABE672A6 -3C281A24 -A6C3DC39 -A72517B1 -FBA81175 -9906CEE4 -E8177FE1 -338D0184 -CC6650DF -840D8CA0 -4C55C42B -6B40F9CC -57B7E7B7 -B7C42442 -4500E9B -8C788183 -9B8F5FCE -49D0AEE1 -426B2271 -EC25BCE3 -7D63A976 -2EFFF592 -32A9E43C -AF5AFA52 -3ABE1133 -35B75ED7 -8F4271A9 -725A6EF -7ED7EB40 -37BD3B -7A0A5AF2 -F6492D7D -C2856688 -9595C241 -C07F646A -7D394FDC -7A991B05 -2CE3AF30 -9929E6E6 -4AE66BD4 -F0F3D1A3 -F76F72E9 -6C2051E2 -72431DE4 -B1796A93 -E04FD748 -D19522B1 -71396A78 -4202F058 -4F2CEB1E -A186853F -8B4474AA -C679B644 -98E10D42 -E7CEB08C -733CA225 -3478B95C -A706A842 -9510B8EB -F47E426E -9A0A17EE -2DA8832B -E73536CC -E6CA4B40 -11A2708F -753AC1E1 -8C304DED -5FC83F07 -4F9A04C9 -E0737708 -9091DFDD -8E1B322 -2552D768 -7C894296 -EABDC081 -E3B2A37 -DEC7EC87 -37FFB6DC -2B2A0CD6 -7E797B13 -64ABD0C5 -1FF12252 -F81AFB24 -C16F1ABC -F0B5AAFC -F80281BA -E51C04D -EEF8BD3E -450A49DB -AC985D7B -CBD4D077 -CAA6370A -FDA6530C -20B71F06 -ED5A891E -BA51A622 -E9F8E132 -63C23719 -2F59EE96 -14D77539 -1A98FC31 -12FCC937 -F39AD8FB -3750DBA9 -564E45B -F74C47FD -1010AD3A -8BE0AED3 -28B27F7B -D5E8EEFA -DC0EFEFB -959F5394 -A10ECCB8 -5C366706 -3B82A5EE -74E377DD -9881CEF3 -D1A4BD88 -69106661 -B209B42 -B56EE86B -63F37839 -C5AB7736 -4AD627C4 -8A4C7E1C -F7CC6334 -3D6CAEC4 -A86A18D5 -8FD910B1 -972371C8 -A423E9B6 -CE8C76C7 -DF930841 -C9D4A7B0 -18521955 -F6F167FC -889F1625 -432C606A -CA5EB4D0 -AFE77C91 -EAF55F16 -6F9A9777 -33726C1D -DC7B1D64 -8031DC00 -CF13144F -84BF2AB -45F5FD45 -6AF06D8C -C50FBE6C -11B8A4A2 -16B780E1 -98033979 -8EFAAEC0 -DD984A5A -D6A80AFC -15C793A3 -EF458063 -B784551F -552CC380 -D1E05EBA -4A795261 -F2B25418 -66066848 -D935B481 -136D2C8F -7A25AEFB -7000439A -E147CC62 -68976C6E -69447DAB -C72506F3 -C6E3FE3B -4FB0FD96 -DB465740 -A254195C -B11EA223 -FC3C44B5 -A9A86F1C -8EED03E3 -24CFF3A -A1B488CE -FD75D002 -9FEF0461 -75DC6637 -B3D38CD2 -57C8F65D -C62026D0 -D6320A18 -5E961798 -80FE0097 -6DA57E68 -D1E8A3C7 -96D49CFC -A8D2DFBC -520D2C1 -151C3F1D -8180DCC7 -4461E43E -C895BF5C -18EE374 -33EA06D4 -75B9D006 -23B934C1 -C2E89F39 -444BCB75 -78077AA5 -ECA64716 -3C1E3FFD -F7DB9CEE -6EC313DD -9CABEC47 -675FA281 -16B8304D -3E38FEC -A9663BDE -8EF647F2 -B646C61C -2228E400 -2B411566 -7A72EB44 -88BD9AE9 -4EF4EBA3 -BCC822D9 -4668160D -695667C1 -CE51A675 -40DE9687 -877561EF -416F5AE6 -EF9304FE -34C1C9D3 -5B63E1BB -C50E9899 -1831810D -25DE2CC1 -10539A77 -EE51D9B2 -462E5A70 -B0F8C3B7 -CA16E410 -1796F2E5 -573F6B28 -E157A965 -2640969A -153B4909 -7FC1290F -ABCAC2F -2A42D17 -BFFA3865 -7B12D8B9 -9321F9EF -E560B7A9 -36E18DD2 -57710FF9 -FAE1F933 -F717FEF8 -E86BAF7E -D0CE3E89 -C8755650 -704BB6ED -6309F650 -E21DDB4F -7CBF531C -7E0AFB8E -D6A1128B -60F16A1B -534186AF -72971F2E -428A867C -F571D32C -CD522E7B -13F6443 -38CDC9EC -D01C51E6 -2E575D3F -7E86B596 -C1460B28 -1403B019 -76D89A66 -4F2D9465 -9B87B1 -172A00A4 -4669559C -105C8A19 -3CD2DD63 -EF054D76 -8B9AB48 -64136500 -71C56349 -B7AEEDF5 -4145D7AC -D6A3E4C7 -2F9E0DF4 -31E418C8 -D2C839DE -63E919D9 -2F4D0353 -8812C572 -B88E671F -54D2BBE0 -E166998 -B7487741 -64312607 -5ADF6F3E -31A86BF1 -D8A96C85 -22AA3021 -AD4719B5 -49EB0670 -93B76AAF -B109648 -FBC7346C -2530A7B5 -C8525175 -15EC0A76 -315FACCE -D8C21A6F -9EDEF96D -6495575D -722A0577 -51EDE2ED -8109F168 -6CBA0929 -1ED88DCD -D79A67E2 -CE62A29C -6FE2A87F -D1E6E3B9 -601988A0 -6A045849 -A7E30F35 -E0EE4424 -AA89C628 -33D7A7A3 -FCD27B7A -80CAF9A4 -2E7F1302 -69F19C -80DBDC64 -392FBDC -E5981A33 -B4AF4210 -1DBFDB9F -31E5DF02 -5C571556 -EE256151 -9F573818 -200D540B -87743240 -1335188F -5A1E9D1F -FA267CB -688D2302 -80D32C1 -195719E -EF151174 -772EEC93 -DD2E2E4E -D8EA362D -3B24FC06 -FFFCF7FC -C571F2F4 -A8DAC7D -3BA7880C -16FC184D -7DBC453C -8F355780 -65C7ED3D -2202E50E -9EC765A9 -9D8F8CDA -CFA71D0B -7A463A33 -AA94D750 -359750D8 -B9A4BEFD -B153CD8C -93AFB5F4 -2676E0A0 -78C0805 -347133 -3B229F4D -4486A7BE -F3A0FAF3 -D29E9349 -A62C0FB4 -574D3763 -BCDAEE6E -BA27D40D -896903EB -8AE6171C -A911D78E -970FB490 -33B8A631 -893F7E3B -700EDF9D -EA7AC6E6 -6041F473 -FC6702EE -F225A258 -96A21B4 -CCA94D4D -FA6D00B7 -35580441 -F5E42BA -EE9AB535 -50874EBA -4454B2B -30653468 -9ABFE240 -29A13784 -EBF5F88F -B1769BB8 -EF22637D -A2FEEE4E -4B39E8F8 -38AD4316 -A3FCB454 -7D6F402 -18CEA9F0 -956B2CCE -6559ADC4 -F00F696E -C878E2A3 -3AB31BE4 -FF2E6E3A -3767BE32 -37CFBCBC -C307A74B -ED6A132B -8D5A1B70 -774C41D1 -A45F1CA9 -3FCF576A -C1BBAB8C -5B11B23A -620B6C8E -A6F5CB83 -450BFF8B -FBB9620D -BD936B56 -2FBF9A89 -2E000CD5 -E508C955 -2FB99422 -5043B664 -1C43CF3B -2D7E713F -FAD8A72B -7CF2FA33 -8FDD90A6 -8B5CDCDE -6CBF908F -740425F6 -D142F4B9 -2B30DF9D -3808D354 -508C4729 -E6FB0279 -FA0F9DF5 -2FFA33E1 -8A93B18 -FE7C0855 -E69193B1 -AA7E4DA -DCDD121D -4E7CD1 -14C03D9 -ACB60232 -818C10F0 -D8CAA46E -2CBC53B4 -46F82991 -9B24E92B -E1DBF265 -C6649C -87D0CA2F -C24A605 -AEB470E -8DC36FE7 -2D6B856E -9B459A3A -5C204000 -C7CC0BA9 -E637D8C4 -1F8C7240 -41788DF4 -27B94DFA -BBA5B2CD -51E1AB57 -FB14B16B -B6821713 -F955BAB9 -44FEBDEF -A484D04E -FCC08A15 -A117E11E -CAE09305 -789A734A -338EAB60 -183825B -61931C6E -ECBBBA86 -1AC53895 -BCEFB579 -CC68D938 -217A4ED1 -3CC6F2DE -12E55EF5 -FAE1CE98 -CF89DDCE -8FEFFF33 -8C27552E -6D63AA8F -B094E27C -4E7632FE -5D9DDBD8 -8E2766E6 -2EF9333E -98B9A7D4 -20D98AB -C12C8047 -5995F2BB -BB30E14 -C769CC0E -632D8C76 -B7FBE051 -3170D046 -D595ACCF -190326FC -D1D03166 -DA4420CD -81FA57FA -D8615FD4 -33AEF793 -E2B32AB3 -E2B2D613 -5A37DB74 -EBF473BC -62C5F8CF -624D5D2D -9A9006D4 -8515BED2 -7DD650C8 -D0BABA59 -1E635B2C -690CBFF7 -E4028EC4 -E4E5B3C2 -57607B0E -D4087B2 -3C06022A -813133A2 -B206699 -3827A132 -985BF479 -6C11EA62 -F58DA68F -818CD2B6 -F204828B -64A0D011 -A6F07C40 -6816D54D -8B00F959 -3B6A1891 -EF20520A -B5B90BD0 -D70B3B4 -7B165E3F -FBE60B95 -50656296 -6250C189 -B50E29BC -7BBB35AE -124AD7B3 -BAD38F67 -A0CA136 -FB03F6CB -B88FB36D -9025524E -4EB80454 -D07FEA2B -D9385E1F -B1EDF69A -11D2AE5C -9EEC00C3 -55916263 -AAD5CF88 -2740548B -662FB2DE -173DFA86 -8D734BE9 -D4A27E13 -E92A39A2 -A58A3F4A -A71CE9AC -B43ED5F -1600E2AD -265C4182 -4EA4F91 -1E3A0BD5 -62650FD0 -BC6E23A1 -3BF3E963 -5F6AFA4A -6BA2B659 -5C00047A -E8F81B0A -C30BF4A0 -DFF059E0 -4E3F93FE -D688F348 -3220541C -F8A72F57 -6D78CAE6 -AF13AA11 -BDB3229D -936DA76F -749DB9C1 -EBF347A6 -BBFA776B -6472B218 -6144ECA8 -E66CD255 -274BC846 -64C0C67A -95748CF2 -25DE3E48 -29A685B3 -CC8C7B15 -F18FA7CF -5F2D1C01 -6DFEC90F -CF834DDD -A72D9439 -BC6D83C3 -9F888C34 -385D225F -168886B3 -98EF8EB2 -BD8ADDD1 -80DA0EE2 -F4196AC8 -6F020F21 -61136480 -4DA28475 -86A506E0 -1A75F4D7 -222C4645 -8C4486EE -98560E3C -944205C9 -D5E0BB3C -C9667421 -2932030 -BFE65EB0 -FB463370 -9FE77763 -DE8ED32D -FC9BDBEE -FD77E3F -288C605F -7475F3D -C3F75513 -C5AF2C40 -40FB62E2 -2C7C83E9 -A8A7E6CC -512E4560 -950C9D -EC507007 -65B7CEC6 -4A91094F -3BDA586B -7029FB6E -739B556A -678652AD -7B940AD3 -4A8728BC -76841FC0 -F53DEB4C -1B13B0F8 -80A5CFA8 -69C8B602 -6F984889 -14A53B17 -409BF6B7 -46D597EE -3502ED7D -315B1DE7 -E785791 -21871730 -78BE7E05 -D1536BC0 -F9708FE6 -EE4E143D -4E498B00 -A2113F88 -630DFE4E -3FA3D4B -F88D623D -3ADB0736 -BF25AD18 -CB89D619 -1D41D458 -EEFA6367 -7671EBAB -B98E8CFB -238D9F19 -C5155B -223C16B -E484FED9 -DD6A6680 -5192089B -CFF24757 -F2CD17B3 -CC3C7B1C -581E6ED2 -C2D7E5D2 -E9789543 -424EF913 -E6B10C7F -706C0B16 -6EC36BE6 -54C41CF4 -CD1EAD0D -17460ECA -452A78CC -D680E5A2 -57AA8EB1 -252EB084 -9DBB8E55 -BF759D75 -6E5E9F27 -30EBEFCA -C4514A4F -FE76382B -99A07A25 -F9017D0B -452226BA -3DD6111B -967464D -C0BAF41B -C4D39425 -767A57E4 -7183FC19 -844A33A5 -54F13F7 -C5854DAD -BE406FE9 -14340FCF -F665DC28 -701D2EA1 -A7B6AC6C -AC3167EF -C3CE6810 -C6844D77 -64887D7E -4EFF4E1C -8508CD3 -45CD4361 -3FAB9023 -9121F935 -46C5C6BE -272C83A9 -24762973 -EB858013 -FF2D23BA -6F5C8026 -A045E967 -7B844395 -2611E8E4 -8AF4659 -89FB4D33 -D9F50DF4 -CA6BD0F6 -A47A1386 -F78D3515 -2E73ABAE -36C0297B -DCF0FD32 -3930C7E1 -246799B2 -BF8BEEAF -7AD6D40C -7BDCB9B9 -7829D32C -EC826EC9 -ECE1D576 -4E3D613B -DCB44DB2 -67EA1BF2 -D1DE75BF -4609E175 -423132A3 -D33DD5F6 -D74829AF -FE0FB1F4 -C32939D9 -4FB97597 -1441DE62 -649D26B5 -4835C073 -1F67EAE0 -E28AE826 -DB808A84 -58FD0074 -1424245 -6BD9E7E1 -26476595 -E8C08661 -F1F0D3D5 -577263A7 -CB86C426 -EA57839B -C8B37BC9 -FBD2B525 -D033D0BC -A3A0474F -22EDE40F -CCD58291 -CB64AA7D -3176C162 -78DE2512 -ADD0A1B3 -EB41F141 -A7B5DAB1 -C68652ED -1F8E90D -31578AF4 -CFA12A8A -E20A88F2 -74AA9676 -3B353B5E -1956E731 -AA8B10C0 -63369269 -C833A9E5 -9425A8E4 -89DB1783 -1BE23F63 -D84221B9 -F8D9FE9B -EA1FD309 -E16516F3 -8F0EA801 -F5256123 -F21B02D8 -F3335520 -F7729F5D -B7F2AF17 -6B97F182 -806347D9 -962A011D -A5427014 -B7358896 -E9D6A1C6 -2E3DBDE7 -94B06EA1 -4B3D9107 -26F1956B -1726E033 -6660681C -39E4E3D5 -E8CD4742 -78D71E0E -15733521 -89D0606F -D449755F -A2753DF9 -AC7ED71 -7803B9A9 -87CCA2B4 -23003317 -2A91CE6 -C37B28F5 -CD9A436B -893C12E2 -C1FB04FB -3D8230BC -737002C2 -15314ACB -F4D74B95 -6C8BCBFC -292459A8 -1692BDFF -DC68FEB8 -48DEF854 -4BAE6B50 -8B850B23 -AEDD7125 -5B740DA0 -AA83A652 -474C59D4 -A4B2D4D3 -451C3B83 -D93BD101 -BF10B243 -8AB74771 -68C5891 -C8EE35CC -D22DC638 -5C7FA2D3 -54A2001A -747538DC -AC75ECD3 -F1BBFFB4 -844C0E4B -D7D25E9E -460EC0ED -688BA8D7 -CA6E35E7 -9396DBBA -3E9C3E0C -5D29B720 -3E5BB85D -F1CFA9A -8EF00E21 -28669B1B -98BE145D -2696E360 -F91E3763 -B0E3F6FE -45699C1 -F5945549 -2CB64CA4 -F3508C44 -653BABD0 -773F51CB -9D228D81 -E4FAB747 -1DC767E3 -89A77290 -8E2A722 -45D00328 -42E979FA -C19D28EB -C6645B54 -5AD41E9A -93587C5A -719944B2 -B10FF0A7 -A57FE070 -78C8DFAE -138BFBAF -1126A4D8 -C9DB256B -EE01D5FF -A8EB81AB -80AB24B4 -95B129FD -802078 -A6F71D37 -334BFF82 -32678187 -4AA896B0 -149226EB -5B8C446 -D1799EBD -74EA35A0 -FA9B52C8 -FAC6A436 -9E543685 -C1184EE -2D8CF846 -C2AFF300 -18EED386 -80C04036 -77FA6FF7 -5D1512F0 -D2C0C9B7 -22DBA873 -62468BB9 -42C90933 -F7EA7A3C -69449140 -7DD1B0F0 -52AAADFF -2F8B7479 -70B719F9 -CD8E1081 -4B46932 -DB933B74 -1E7A04BF -75DC735A -C3925701 -7EC84718 -DFEE049D -E8B3328A -3A9936EE -F2E22D2A -1F2B5894 -DB44DCE5 -4F1DD5B4 -B66F3E9F -943480BE -ABA71BB2 -E4F15D5B -4C9D7A9C -B751518B -24C9762E -F9DA3386 -D13AB9B6 -5CFC891C -CBEDF3E9 -395421ED -5A3570B8 -1641D0A0 -AF9A9981 -A07CC659 -4BA92C0 -D94C7431 -AA749489 -372456FB -690097AE -B5EF28F3 -1F8F313B -6C45ECE2 -24F4CAD9 -40C5200C -920AFACD -A2E0DD6A -CEC81C6C -DED2D22F -4AEA1A34 -7504D5DA -1F8E8F02 -72100835 -BB4AE282 -A0154848 -EF3ECE2D -6DA87A1A -46D17BF -DAE80D31 -FA8CA757 -8F75F943 -AFFB5EDD -F1A09255 -A80EDAB5 -5AC04A14 -B51A2E1E -FD9C51F4 -F99A5A90 -3EA5F0D -C4D40DFC -C0280AF9 -CEC83127 -FA1A5F6B -D603510E -3663D878 -A79682FB -B7313271 -7E37A2C7 -A1CB289D -C51B6F15 -EC66F0DA -80D5C268 -F3A52A28 -E056F895 -4A0A2418 -66E47974 -8E8CA911 -FD7E6D05 -70960317 -5D378166 -3A2D634 -CA6510C4 -93BBB6AB -4FE2CF83 -2273B7D4 -E372BB74 -8AD6B40E -496AA885 -11F4186 -8DEDF498 -5435E535 -5145EF8D -44AB3DF -7B449D2C -3489063E -F0A61E35 -A2F75775 -F691A0D2 -9CA997F2 -D64FFFB7 -DA79CC6A -2DEA4171 -D2E4D598 -C641D01 -79699CD2 -49FF5A89 -C967A1C4 -F4C7FF25 -9CD04F9A -374C3740 -7B6376BD -ECC505A1 -E76F3618 -42C0B205 -B28C63BC -2BA4280E -7278103B -83B861F6 -F862D563 -433B3F81 -358E4226 -2E9334B5 -2E9B7324 -23BF3CB0 -1E44A323 -BAA2480D -3B8483BD -419659C5 -91A9B2C2 -82574F8 -28A32CD0 -3534C89B -759FD52E -B260329C -82112334 -2D5B7F7B -816C0227 -ED5FAD1D -7BDFA5AE -B5C8006C -BD9691EA -36C28C33 -B8702558 -EB3E656A -D752A865 -FA94FF5E -AE5D43C3 -747587AD -6E5E5C96 -39312BCE -B13B468A -81543486 -1B57D2B3 -4D3D70A7 -2D4ECFBA -640E83F8 -4FD1588B -4EA4599A -E231E4F0 -A2D4437B -47D88CE6 -D048C6D1 -4CA7F923 -E9E435A8 -E93D6805 -C032C4A6 -E15934E3 -CB728ED0 -E7D65CEA -8E5D2F8B -1676D174 -B42D23CC -A1462E09 -CA718E2A -F5BA8F57 -EFA467ED -6DA31185 -895FB4A2 -649A7D89 -3B71CFA2 -C67F9D02 -DFBDDF09 -AAB8BDDB -870C617A -220F7717 -795DE75E -5C787D87 -BB94CBBC -99928778 -9D5C4DAB -4EEC433E -F4C08960 -F71FE87B -BF78D7C6 -671FB341 -4EAD6A0E -534B1D46 -1B4DE7CF -A7B45E06 -97F43041 -4B77382C -61EBC96C -336A9206 -E2A6FD02 -72E6EE51 -26144F77 -DD22DF66 -CBAFB596 -B9CE864D -CEBC372F -907981E8 -A9FA3C97 -6B1704B8 -B1160637 -FE603AC4 -274C6ED5 -6C317434 -77A16703 -2489D28D -2DBFB899 -4A3D882B -E81AF570 -1B8F583E -F1CFA601 -C7B776D2 -A26651A3 -303D5E43 -CD80678 -7E9DCEBA -E0F128C5 -4B1807BB -25B10534 -4117D98B -95079C39 -58C7BCE2 -AE0AF4E3 -331A0152 -DB3D821C -F4F11B78 -E2F55DDF -15BF23DA -15E7695F -1F40D321 -128A49CA -2D25CD8F -AE762164 -7EC8AC49 -1D9A1899 -97B6BAF0 -D7E07736 -A2566738 -A903EE89 -67CD354E -89C1C57A -97B3EF5C -240FC35D -52CE3A2C -15E8D7D2 -6A8A9E32 -4254550D -A345B8F1 -464C5420 -FD2E1DB2 -C629DA54 -81D24EFE -421E30F4 -E4008742 -62839D68 -AD78257A -23DBB6EE -49DAE0F2 -B1B07AAD -EC7791BA -3B4D3E2F -C241836D -C836E98A -EE9D6DA5 -33B5A570 -81D50D38 -6EE68232 -76677B3C -AF355302 -D2415D7 -1510CCAA -A6627F82 -A5A96453 -CD0B833E -5CF4C1E1 -C14866A -AFB8FE0E -B7D08BAC -4CBFF97E -F0191C3D -4E2A3EC -E76E048 -FF368683 -F4DF51 -8D0F29CD -91E431F5 -B6808051 -927E3404 -6ADBDD1 -5852A1E9 -394DFE4 -8990BE64 -A69026EF -3656791E -63C5AC11 -B9E88670 -9326F9CC -414EFA53 -B5028CB5 -22181175 -3B1A49C1 -22FEDBAC -A39731D2 -9C7E2E87 -E931F133 -D9AFCE3F -C2CC527A -A85B19BB -C66CB9EC -93558B54 -F5197362 -7EA88969 -B380F206 -56AC8890 -56D0C8A6 -B39C42A6 -7B966768 -1B6E37E5 -43429273 -668BAF0B -327CE28C -CEA34DC6 -EA727DD9 -2C1AE3E4 -802A7A51 -A1934827 -1A18C4BF -AEB9CA99 -D572EF76 -18DFC210 -11A4385C -671ED0D6 -D1E5D02E -9EE0AE12 -DF1EC812 -51BFF4B5 -CE089E79 -CE4BADF4 -75879327 -C98B6178 -D7B1E852 -95D6767 -1283D091 -20F90A2C -9020BD75 -504D84DD -D8982F3B -E41E0CF4 -55F4FE2E -2097DB6F -4B8B7790 -F3A1E487 -F4C274C1 -3452A00A -15587F21 -687D0671 -7EB3715 -945B9A90 -8C83F0D1 -8934F9BC -38A50D8A -7EF49EB5 -A45D34E3 -6C014201 -D4D19185 -821E216B -569485E9 -6DCC7357 -7711858C -852AA907 -591CCDF4 -775E7DDB -9463CA74 -DFF1EFEC -1F60E4B -2628AEE4 -EC89EF52 -49D232FB -E8BD7DD1 -EED418A8 -C35E3A33 -5C739CE7 -979E4B23 -B386E4FC -62F98F10 -2FEF090 -599508E2 -F3F9F428 -17A18287 -639B700A -AA9AA4A6 -B1AFC9E7 -FB6E8D34 -44F6A6D9 -EEFB7788 -9D616EA3 -78F3BDCF -A5E71361 -1D25ED7E -9059ACA7 -89118CEB -BDE78C2E -55B9E0E4 -FB6B9A -2DBAC44 -85C0DEFA -1E222914 -2413FBCA -C8569486 -E757EC3C -5ED9DB70 -3EA2086B -F4A4057D -E29E1B00 -C271490A -525A60E4 -9A286CE0 -61A42BC0 -D3F6ABE4 -9F31FB75 -335ADC59 -9EA61808 -232ACBB1 -270C7B13 -6EA6535D -F1D1B1A0 -AE9088BE -D9E4FD87 -3C8C0972 -5EAA57A -26997EF4 -3B02B885 -A4722715 -434BE51C -495165DA -BC9FC978 -18D8C1E -328203FD -12643D32 -65EFAAAF -71297EEC -EF8496AC -E5B7BF16 -2B2C5A0A -86B713DD -101E03D1 -14F4FB7E -34EBDF2E -2A9F4CF5 -7143B386 -448716E5 -C61C8469 -5F9F797D -6A89B910 -548E4139 -C48968FC -11F52973 -E18DC2B5 -7EEDA069 -2EE38156 -B8F99E97 -E066E1BB -ACC5C04E -6E645848 -98CA4890 -78191984 -84EC83C1 -C58D9987 -3AA63D1C -E17CA75A -CF8B5E23 -155BC19C -5809C3C5 -E2A7DAE3 -D55C1B6A -585BF6D2 -5D192255 -310467FC -ECA8FE97 -4ACDBA8C -E6319F8B -FD4F3E85 -47FF7B0 -B6FA3B69 -D75D49C2 -B831D3F4 -1D6282B8 -E335FE0A -C955B98D -87968F47 -B9600C1 -805AB6DD -2677ED62 -86AA7680 -836DD1B4 -82C073FF -F2664656 -DBE8C3BB -E4DA24B2 -AE14BE60 -1CF178AA -F2C661B -9ED5C4B4 -3B67F448 -426F85E0 -40195BA0 -66BDEE57 -3A128638 -A48D546B -7DC7834 -C7706566 -1E23F578 -CF55EC28 -F46031E2 -CFDD3546 -6CD58E9C -C40E02C2 -19558D54 -46E056B2 -C1581093 -20C057BD -34695F72 -1C4B7B13 -2FD3155E -152F2F86 -189E2F15 -31991472 -1B85405D -D1F72A1F -8AA93824 -CE409894 -9F6D30AD -E72C6DE5 -A31CC799 -694EB42E -C2D96633 -7F4776D2 -509C0781 -6A84F278 -E11739F5 -CC5EFAC4 -DDD81D37 -6960145A -E40C5DEC -70C068DF -1E6CC338 -592EDE93 -A19B8534 -DA27B1C9 -608D85FD -63AAE798 -509A13B -BAF29F05 -69342538 -5A2FD47D -5FA22C82 -AC7E3397 -4E546537 -4611C427 -DA39FAAC -445F1CE8 -5BC83B69 -64AB6C7D -F2B4EFB5 -DC0016AF -987EDDC1 -3354C952 -A5B9ECBD -E5B77548 -997279F9 -7C460F6 -82A1099 -B7CF0472 -ABC3726D -DD4155C0 -319B8C50 -CAE7E88C -910F1C5E -B1367D8E -56B78305 -8F4CB7A1 -8765A3AA -89624EB6 -22DE29BD -A12D4C67 -6BC56ADC -B587BB0F -3806EC0 -3C269C48 -9EA289A3 -B5EB4FDF -1ADB0729 -A991429C -CE574FF8 -CF071DB5 -CE0D372F -3D99AE5C -D6D56E7C -3A493434 -86AC7C63 -FAF8B585 -B9F1994 -89CB3A3D -7C8974F7 -2169640E -D74D62DA -8F0D850D -3B9D0225 -4E2CBB6A -BCA7006 -9DCE6E7B -3695D660 -EB344960 -F3D223F5 -6B8CA588 -45744961 -2F493968 -E9CBD376 -9B0FDE95 -F17603FE -B0825FF2 -5B1CCD35 -6F98639D -5CBBFA88 -890B3C42 -2DD4CA67 -DC9513B5 -A7B91C22 -83A897B6 -399ACDEC -AD11B2EF -11D76C5E -E170FB03 -9326B999 -87845BB9 -CA14B73D -943FE9FF -341ADB81 -D800A2CD -A7265DEE -1E7F3F7D -8AC49BD1 -CCE49B1F -58764B66 -D57DF0D7 -229BE279 -42DB683C -D8530314 -F1FE931 -DE1A4EEB -DF35B43B -3E90F80 -B3934E4A -FD658EFA -E6CF1CFA -472B47E9 -20F155AD -77571441 -9FE03233 -8BC0043E -80E9B238 -D325F7D2 -F0333147 -FC86E62F -A5451DCE -D9374B52 -674D4083 -9952E9AC -B529BFF5 -B7E072D6 -5BCD2886 -8381AC4 -5CD6C7FF -F24E3549 -9EBB5EB9 -23F47A79 -49D578D0 -6CA5874A -2F3C83E6 -D975C720 -FB484F11 -3BCFB5C0 -3A66DB47 -B3BB4F33 -D5136C2 -D4AB89C5 -8A782859 -C8FE9ADA -B5D57BA5 -9C8D2781 -7D0919B5 -D362A6D6 -1006FFAA -3BB31D71 -7709BEE4 -8A348C59 -44A704D7 -96F2AFF3 -592DF706 -F3247289 -3E9BC2A8 -570D8349 -2F615AFC -B3802616 -B54191C6 -DD155718 -455945B6 -C74C7DF8 -232005C5 -6185D2D2 -8FACE1C -73D27EB -770D2680 -DB913D28 -90FC0FA5 -9DE358EA -2BD3287A -D5C8095A -DE541F30 -D10F0F61 -4657627D -739F2E93 -F9F7B479 -DFC6490 -3D554A13 -D3C6C2EE -80145765 -D601408B -52EFFD8 -A44B597A -9E65E39 -2A5CB536 -A0420638 -EA752AFA -A7DE4743 -18480882 -A559B83D -2DC4B6C -8F33055B -7C4E3B8D -52C7F9F7 -9FFA0A63 -A0413C90 -ECA35002 -AB4A7AD9 -A829613 -71904BCD -9560A35E -118EC2D1 -CA730775 -A631E447 -F526588 -C415CDC9 -DE509745 -C2C64E6B -4A3350CF -CB04DB23 -8D3BA4E2 -3FC18EC6 -C8CFB2C4 -C2B600BF -FE36BBA5 -EB4B302E -F2BD24D2 -A820E2B0 -DDE54189 -744E33AA -9E63B141 -21C2E601 -2C12D5AF -85AAD794 -EE1F97C2 -9096006 -14132FBE -FDDA365D -E3623A52 -9F52F94C -18F84D8D -F866F6EB -9759E208 -38195047 -E31F1936 -9D7E9182 -CEC2787B -975EB96B -12F202B -CA36D8E3 -A694168A -F033E484 -DAEA79C6 -C465D02A -154EBBA3 -FFE408B5 -977F7FD7 -59992C2 -72DAEF3B -47AD9078 -11CEA76E -3B88B352 -BA2FF2D9 -2A7F4E47 -DD6B398A -164FCDDE -CB7284FE -9FCF9606 -34406791 -104CC89C -A2F32BB7 -213E9CB0 -1E1E0B37 -7226FA86 -20502886 -4C1C9E90 -2D4D0ADC -D843214D -57730409 -614341B4 -ECF30446 -330F5216 -5FBA2C4F -B4102EF6 -D6129240 -7D5DFBEA -EB01FCDB -7CA7342 -46DFED3F -5BE1B2D8 -2F40EF9D -59622E77 -A6AEA365 -78133A87 -7FEF9106 -3956BCC5 -8C6509F9 -79525FD -D3A518F9 -A76193BA -3F552EED -F974C309 -12A5B04E -A71DD6D4 -D9FE2B7D -95F822BA -EDBE32B0 -92BFA916 -79899BA5 -3FBDC933 -BC0E7C30 -6D7FEA47 -1F1954E -4F2F17AC -F6EA71E3 -B8E34FFE -3BCD8BD6 -695B7934 -D4CE8358 -26B0699 -784EC0DD -625BC98B -8861D087 -44DF0DE -35B7517A -A8FA9A12 -244B927 -AF7A58C -BE48CF00 -95C13C21 -9D8DBCFD -AE8B4798 -ED04535D -47A2219C -C8B87734 -8355D2A5 -B4127CD6 -DDA3394A -36846F2C -F38282D0 -177D3FF5 -EE8924CA -5E6CB3D2 -1F6C2C7F -3EACD843 -51A77194 -51D89AA4 -DCC17C24 -DB5043E9 -25D52B74 -1C7176E2 -1F483DAF -24B587EA -6188E94F -C886E2F7 -7B24254F -A761DFA7 -357C70B5 -6BC46A7 -31B8CF7C -BACB7205 -6C1B0387 -50685794 -7726ACF -64C49E4D -7AF06B7F -D1F2AD02 -E4F5BB37 -2A8A4925 -4245E047 -B7CD8000 -6C72A8DD -19590349 -7F7EDB49 -5DAF5458 -5EEBC5E9 -6E84757D -AD3868FA -F85A2B5D -A8569A1 -88F1F6BE -AF363178 -D9A61BFD -A2959EC8 -C1343E46 -B34A697B -22530AC3 -70213F56 -1DDEECA5 -4DF030F3 -78A4B8E6 -F93B20A6 -27AB7A7B -F43A2969 -AEB9E421 -75A8F820 -52CD9316 -CA166F29 -C28D14E7 -51E4C76A -50249FCB -3EDA432D -C6C3EEB3 -6CFF2A56 -5B50A9CE -D2CEB19B -2F16746B -1C19CB24 -9CD2076 -3F804860 -FE59323F -62F1F95 -2CF56FAE -E1A3437E -973F442F -DB62AE6C -C0AA4F87 -67224779 -A28378EA -6C5BE4D5 -97F75FF8 -49922E2 -19ECBBCB -C89000E7 -436496D2 -29C94230 -21A4D75 -3DF46E1A -A6D150BF -4EDE1CCF -37A996E3 -B0F73D3C -33E41F15 -14076103 -7BC6082F -E98E377E -1E787464 -16AB93F5 -B8E3ECD1 -4A944320 -41E77D61 -8B669E91 -20F1F65 -F4D26572 -81D9D4AD -99843F88 -7066E60C -4D6B9549 -C79BBF94 -F53252E4 -EDB94B9F -EA504F01 -9BE5AD3C -98F301D4 -C1C0ED35 -3F2734C7 -76351C26 -AEC02AAC -B9D4A014 -A01F14A1 -2DD27A90 -27C43590 -5A06F84E -64CC23AC -76387C33 -A07A8306 -3BC362BF -5ED88200 -CA6DC828 -4DBF3E47 -F633C85E -96F44176 -76B2A46B -CF414D71 -AD77A07A -9A1F71BC -FDEE86EE -7A8AC33B -AD3C257D -BEFBD214 -5B562E2C -3527654F -FAFCD066 -575BF8E0 -BC2A071A -C903C2CF -EB1AB30 -7B8C7CA1 -5ED6E493 -E1C822C6 -368B9DDE -91122C29 -5B1358F8 -6DCADBBF -ED845AC -61E42CB5 -732B420B -39154876 -C10442B5 -E1CC1A11 -875215B9 -AE9E4FEC -B2435F4C -DBC844A -10FDB0DA -F85D3FC4 -608B78A1 -DAE2B7B2 -DCD08039 -CC0962E7 -10602FA7 -62522FE1 -D3AFCD9D -2882BAA3 -70C31CD3 -A69E9A2A -975BB834 -2A35C91F -5FB2644F -69B2BF1 -9C365DDE -E4199E06 -ACCF8904 -DE105FEB -9C07AC45 -F75CF55 -EF6E3E9C -1FB088A2 -9A93BA86 -4E91C403 -E07827D7 -5F7593 -FC778EF4 -5B831E07 -354A60B2 -8D39DB34 -5C3C16CF -38489DCA -D83EBDED -F9E5BE76 -D2C7FCF3 -E868A2FA -D29E98A9 -5AFBCA1A -D01628BF -B2334643 -4EC99A5C -189E9585 -CC2B18FB -C692AC25 -A7F6B978 -C1530E03 -AC815E6 -6304151C -52EB83ED -C4921682 -96441A15 -56338D69 -5C82292 -FCA308FD -978D2310 -192DB3D1 -CA6B9EAA -7AD9F05D -E7C35D2B -AB5505FB -3DD6013C -532AAD00 -87EA4F8B -1AC88F4A -4BFC2053 -65356D9B -B03A54FF -6F585110 -2C75F6A4 -CFDC2733 -3E7BD30C -2DE068DD -F318385E -26CEC150 -532C4D5B -B264C41E -46229E71 -39E85376 -A074FDB6 -461E84CD -BADDA454 -77D4AD4E -479457C8 -F0E4F65E -DBA7730A -24D4FEE1 -9442683 -7725F0EA -F8647367 -5F4D5208 -6DC11B5C -4E65BE22 -EC0713FD -1D54F605 -4B0F99DD -E585AB57 -E14C5EA4 -B7909465 -12ABA66C -EEF519D -62F4CFD1 -48DEF31F -16B38659 -5528B313 -5C031870 -87ED6DE1 -55ACABF2 -FACEBE99 -3007B9E5 -F5C0C90F -E97F9A15 -951AE375 -67E41B2C -CF7F6BC3 -C7836B7F -88B077DB -DA60BEA0 -1FD6BE04 -95A08F39 -B7EA73B3 -10F6685D -A9C04118 -EAC17020 -CEEDC89 -7EFB007C -8D900B82 -4C2BCF1C -9B9BDFC5 -28846A96 -139B4D19 -32E0786A -72F19BF4 -66D61EB0 -609F7568 -3A785E09 -B6F2294F -96E73FE3 -99A0812E -1BBAE42 -9DF477DD -111FF2F7 -8A882B32 -2542FA4E -7BEAFF22 -405268CA -2427EDE6 -7D9F0726 -7EF6ABC7 -7F8DD904 -C3F2F4AB -213FB22D -62AD3732 -955CA4C7 -9E83055D -BE9C70CD -C0E6DDF0 -892D1B64 -56F3A648 -43547D3E -35EB967E -EBC18CA5 -D4DAC35A -9DDB564B -6DFD4F07 -CB02555B -425A1595 -B978D512 -B3D78E9F -A3EA970F -8E27124E -6A57B7D -26D405F2 -C8A1CED7 -7A6338C -A497AA49 -95602B8B -C6F1583D -CF5B6A58 -81F2D693 -A34B3C07 -B7180B4C -46C6E5CC -8C3736E9 -980482E6 -8A34B532 -B698520A -20E9DDDC -A5D8B27 -6A0B3989 -10071434 -C82002AE -8A343B26 -2FD61FC8 -C1257546 -FF154858 -1AFEAE33 -C2B1532D -D979A2DC -93F9FD3F -769B0DDF -4132C851 -A372D4CC -6A5532FB -E8F203C1 -A421B3A0 -B50F5C9F -AE5B067F -8CE6F896 -8BFFEABA -B0CCFB51 -D455681E -FDEEE781 -A4873A97 -E3FAC8DA -5039A29 -C703A1CF -E4E29AEE -39C0B0DB -DE5756E -303C7D43 -586246C -41ADBF9B -D1CD7207 -3BC8FD94 -7E50A650 -390914DC -ABD6170 -ECFBE529 -3D51360 -569802B4 -25F255D -1523D176 -9F98AEF0 -9DB1B681 -DAE01D8 -46D4F7B7 -47DD8DB6 -23BDB9D8 -90C47F30 -998BF564 -5D60F7E4 -309B5851 -9D246C3 -C1895130 -1F918DFB -6F303265 -71E0D0A7 -77F2FF64 -589BBF0D -A25C4510 -9F05AB6E -4990B583 -D335BD7 -6CBC0400 -D7894817 -36176CCF -1C6A98BE -53EE793B -4003C3B3 -9E46BEB5 -57647A51 -D5599FED -38156D3F -B1F425B1 -7AD6402D -74B619BE -A11B18AA -9C4211AF -DB076668 -7A94C4DD -6833F9A5 -A088A4AE -6A70BAFA -BC6740FF -B7F6508A -F3BAF225 -29BF8108 -7F074F1C -18B3D5C1 -8A948077 -BE0483D3 -46B195FE -D7AF0FD0 -C31414F4 -B5BD4871 -CFAC4C37 -57D2D42C -10A73F90 -407A80A8 -21C50A11 -22E165A0 -8361F9A8 -EDEA52BD -28F3650D -CAD63254 -9AB9033E -82BA1020 -E6E6A470 -9C829847 -BC3AB877 -A91A7C99 -1ABAB07E -583AD9D7 -9AFA901C -9AE116AB -27B4F5A6 -877D0225 -92DEB3AB -BAA1506D -EB04B325 -C275FBF2 -2331B6DD -74F623AE -933EC4BD -9470C6AF -6C0828EF -AAC0532D -318961A -29C176E6 -4011BAB1 -895DF78F -410AD703 -F363E54D -B4913DBE -6B5047EE -E7099A72 -E2961301 -E587CAE2 -1449E31A -EB048AC6 -D21BCEF -EACEF00E -EF09B5C6 -2C050BB2 -D660ACA0 -361BA74E -26D1A92E -10F1FD22 -DAD028BE -5DDB96F4 -A1C8F873 -66F44797 -DD6019B -618F707A -4E4525A0 -551B89EA -6A93FE33 -8219D90A -5E3E3FA6 -C9C25F24 -D4593D42 -CB12B9FF -B09814CE -DAF289CF -C59234E7 -6C96C435 -1E7337A5 -FE315E60 -451A4E00 -CC3E2B8 -EB1AABDF -B2D1AD85 -2A12A008 -B525A4EA -ABE700A4 -80603A44 -3E2E49F6 -48630509 -9673204F -7B0DEAD3 -B0B2B6D2 -68C0453E -BA31833B -4BD68812 -C64D0638 -A8987E25 -48850A6D -9B337E66 -1D99461A -D47AE0D1 -2E3023F7 -29CD452B -A211306A -15CD90B9 -D5D57C24 -727FA881 -51316FCD -BF62F735 -9E67B311 -51A2B90F -CF7C9936 -A537087E -3EB2EE91 -8F4D2C93 -F83E1906 -826C14F4 -6CBE676 -ED2DF931 -38270781 -4C567B1E -96BD9972 -E089656B -7DD03E9 -534E777F -695B12CF -338EDC74 -D5E3DFDD -13937C2C -A386AB68 -CADAD94A -B624A652 -9E4D0656 -3BDD26F4 -8B9D1ADD -180D5005 -E8744FCF -6CA71503 -20697624 -49269DB9 -B27B12B1 -AC181CE2 -9289684A -E5D3A21F -6A79B5AE -EE6DD5DE -355DA7A4 -C5B13162 -5FFA0324 -602F32A7 -85BA4032 -DCBEE18A -D76BFC80 -4B72BA0 -4101BC2D -A3CB1CE3 -4C6262A3 -59198E3D -AAD7C84F -4DFE129E -E8153DB5 -66EA03BA -D3247EB4 -750DAFC0 -68FB3A27 -67005B98 -C2255031 -1D9106CC -7FD4C833 -491CF81A -28D5F0BD -E2275FB1 -762FF58D -D9D940D7 -C6B5CBDC -810E0D6B -DAFD7E89 -15C3544B -D7B6A237 -3DA125A3 -3272795 -A7BCF9DD -4FE52CD5 -3FB69C23 -4F106EA9 -3632D2EE -9DA08D3C -5282D2C7 -9575F24E -D390A80B -2897EB0A -A4B9FBE0 -DA3FD83B -EAA2A95A -73FC7AEE -CCDBF4F9 -3EA97EA4 -A8AD7E75 -C533A490 -3FCE73 -D451BBF2 -6A71BE12 -76E1EC5A -1845E1F8 -CD2B7C0F -4D92E7BD -81B44E4B -65E1B458 -6B69FD73 -86CE76BD -88B1CA29 -EA1F0D7F -43D393F9 -C85E394 -B5C665F0 -AE373F77 -46196293 -E6057838 -7C63A634 -C3F66075 -1F15C3E1 -ED457843 -83F9BA3C -D8B8A399 -852DA2FC -3B81F785 -DFA3848 -877B985B -1C82BEF1 -6482EA27 -A4F94E9D -9FB72748 -47CF963D -C514BF88 -4D4B79D -232D2991 -3DEB3B5C -49784213 -9D79AAEC -EB89F7E9 -B9F9993 -71528CF1 -E1390DCC -F4655453 -97847A30 -3C30D55E -72649CB1 -F0647A6 -C6C8AC04 -FB48D1A -39EA9573 -70C70D43 -3F6BAD93 -342ACF49 -F37B506D -EE64D0B3 -4DC05CFD -79E116BD -5458D922 -3957971C -970D89F1 -9AF398C7 -A9A651DF -D3A64902 -27339129 -2FCC3329 -B1C70D5C -3FCCAD9E -C10A34 -80B546E -7EC04275 -512434B7 -526742B7 -E96DE8A8 -27CE6F9D -FD566C7B -8DB1FE12 -93F810FE -C660877D -348D5704 -BB3F2FD7 -9F859C53 -907BB57E -318DA95D -BF1CF416 -3E8BF68B -BB8CE4F6 -A9954212 -D1A396D6 -C33F5A44 -2DC0A59D -5B66EF45 -1CB288E0 -D6874F40 -E275F00B -E6B62E72 -6BB1EE97 -389CF9D6 -8C093ED1 -D4CB36E1 -12F4840B -F18A2F83 -782EB525 -12BFBACE -78F772C4 -91988F79 -55BE57F8 -6605D204 -5A7471F4 -355005FE -267A8C9 -CAB49590 -9479E9EA -BEE93B2A -34E95C45 -61788682 -6B99ED61 -33D4D3D8 -DD149E5D -D3BED775 -287B4087 -A2552A0E -477D609D -96765321 -2696E220 -3B6E26E8 -5CFFD0A4 -FDBF561C -4C41A4FC -B0637D44 -85DF60F0 -539171DD -9A1D1F12 -72ADB48A -D8C0C9CB -E4FE15BC -24EB5C50 -E1A9B3DC -360563C8 -F20C02CA -E9FBE774 -B2FEE97A -EF34194C -6DA8A0E1 -ED9FFA1 -4EB5D717 -47D296E0 -FA147414 -C1F868CB -761182D1 -6B9F8311 -7A99903C -95449FC9 -A349B21D -F2AA6E8E -CBD733B -1EAA2224 -C7CC9CD1 -DF3D1C7F -81343E5 -30682CA5 -65C5BDFE -811D5CC5 -8D2DEF35 -D8B4F4DD -9E121109 -FCA97592 -99E76951 -7CFB5D -8489CBDE -D7A8D721 -ADD1A5B5 -4A96DA59 -CE6C2C78 -17593D2D -F94AF7BA -6CE767D0 -DBCEDF25 -43629583 -CDB11A86 -BB630047 -8A579D2A -FC17AF19 -ED54597D -9BCAA00 -B7865C74 -BADFD092 -9AB0AF05 -AE371DB7 -EC0EE641 -A9781E96 -D1B8A429 -FE9A2043 -BA4C2CC0 -F243E36 -78A88066 -70925DF6 -97A35A05 -F18822EB -212A79D -666D7F82 -4558A3AC -FCF953EF -F8C6DD4A -C535BE4F -973A007C -4DB7E662 -C8995287 -B3527C60 -FA4F7A3A -D417AA12 -D861531D -11A81498 -5072EC65 -5886C667 -7EF848B3 -CA4ED80C -3DAEA7BC -34EC1028 -349C86EB -6423A583 -22A163C -339CC766 -E93138FD -7A79EA77 -E480913 -1220E06B -65ED8DDB -ADF487D5 -82CAE485 -A88E6546 -3A7F5961 -4672ECFA -425EB8F -AA3C4450 -44CA10FA -B1EAA942 -9EC93584 -E417CBF4 -B5F4C488 -EAB1DE5C -10446170 -C5F9C89A -391EF7F7 -10C62C73 -817FC74C -DA1A9F17 -FA38D673 -D2026552 -D7CD67A8 -4E0E21A6 -56812AAA -1D7294ED -575452A3 -90581C22 -82E00D73 -A8FECF07 -1CB1E500 -7F51D70F -F840E8D4 -DD73E72F -8DED415A -3F029F0D -C9CC871A -3388492A -AA1DEF8D -F2E93846 -F9CC596 -48221BB4 -6F7B2734 -F5A1010C -C0FB41C5 -8693416B -C8EAD749 -21ED8A7A -9FF52520 -613635AF -92C5E0FF -435C33AD -2550A70F -B17B7FE9 -9CC5F28E -690D4EB3 -5C5DCAC4 -25E14191 -B03B4C07 -50DCF2C0 -499BCF9A -5CCD6CF1 -ECBB2C48 -A2990792 -2105FDBF -3D62BECB -493AA5F0 -2CF5BAD2 -DFF53D23 -50D77C82 -35CDBF8D -E3BD4C29 -6A2FC510 -A9B2D0FD -404B053E -BF548C52 -E52081D2 -AD550AB1 -D4316A79 -776E6C42 -203A4395 -54DAB8DE -EB67FB95 -46E34074 -21679614 -C395F6BF -6D513D56 -93DDFEE7 -7D2866A -2283CD12 -12789536 -5C1F1037 -4170B23 -8BB451B5 -A9915ACA -784C0FE1 -50A95654 -CB574A -8A1690D5 -D9753D9A -3084718F -8E429880 -D1B7693E -A7613422 -C1707E97 -D658E57C -1C2A8F42 -21BE34EE -E545D5C3 -23DF7522 -B7AD16A3 -C6E7279A -2AD251D -FF0BA8C9 -E586EA40 -D86C394D -1A0D6737 -5AE27469 -8A0F53FE -1A0DC5E9 -8A56C2C4 -AD3214FD -DD999E92 -E53F55E7 -5AB39BDD -119C7046 -19B8238 -E21A4F81 -5DE3F0F9 -BFB5E145 -5020F616 -C2794F78 -9B7D9F3A -8FBBF3F1 -1D9C111C -49FEEDAE -1C83E386 -BB5B0273 -C290FD8 -52C788BC -86C12DD3 -6608E8F1 -313C6430 -142570B6 -F75B9552 -C8F1E8B8 -F3E5AAB1 -9E4D9E8A -7E48E48F -2182FBF -F21DC3 -BD6E45C0 -8DC88EA2 -D5B67DA1 -C592692A -979B0A6B -783D09B0 -C2231CCF -5CBB3057 -4C10986F -3F738112 -BED7BBF2 -A2577A6D -13128005 -3C71262B -BC8E920B -40C44CC9 -C6C4B496 -5AA9CBD6 -C7A9741 -2A8EDC58 -D2253A26 -F343439A -13F71CF9 -A4BB5CE3 -FB52ADA9 -1AF0749E -ADABA787 -C22B2194 -C5132023 -846C2188 -33A64D52 -E5CE9022 -CAA4C044 -E7032B82 -30251130 -22463302 -954AA98D -52D6F132 -11E0FDD7 -D62BAE17 -9844BF8B -68ECD60A -E637BA92 -1D7BA1A7 -F091F891 -CC96CCF3 -E2C50AF4 -149FAA77 -F16F7294 -27212569 -B96E1119 -E7806734 -15A5818F -4E05DAF0 -F022D5A0 -303D930 -B92CF71 -377DE596 -8835F16D -2D0B6E77 -2A89FF6F -9EA75369 -FCDF31A7 -8F674B8 -34D270E7 -BFE6FD70 -F165A645 -675B8D2D -318F8DAB -9F52E28A -A464F277 -B998CE45 -9E932DF9 -2918A97F -EA5C5130 -952FECC3 -7DCBA50B -DEE7C01D -96B96F4F -1C6106A0 -85A1AC4E -D62EECAE -6387F846 -271EB1BB -E1A2582 -D1E03035 -9EC6EA57 -300E10D3 -CB91419 -52652E8 -8291BE30 -E1D52680 -5044FC2D -35E58D3F -C6A01A83 -814DA7BE -97A50A83 -DB801411 -D4C43BF3 -BC3D29C -E4A072E8 -6F51D4C3 -21A5886A -F744A91A -5E12BC21 -F86FDFF8 -C320E6BC -3DEC9656 -F89A6364 -F668339E -44999436 -F40A8A0F -71837448 -B09D47B3 -2D2CAB19 -3FF04F12 -D8E5CC71 -33F39593 -160D74D7 -FB841949 -95F0E78B -B9A6102A -A4D3C679 -4774D90A -AC55693 -8F3CF617 -5BDA2B57 -A548BA77 -B1158C29 -FE9A4D00 -B52446D2 -E6DA1712 -3EFF4A4A -41EF9936 -D65FB56B -E3AED57C -BFF89053 -192E499D -DD703817 -C2B8C9A2 -65A8417 -670D3446 -2E936BCB -8A14CEFA -CF71A41D -842BD0E9 -628148DC -9733E864 -1C57CF93 -1A0CA311 -A1E13B05 -2C8F3844 -66C2361E -8981A417 -A4668A3C -271048C3 -6DD908BE -1A933D24 -BD0A78F8 -57C44DC3 -1EE04ABC -32275D51 -B25BCCC5 -509C83A2 -E5E1B85F -D45DFB17 -EF39D3BA -4F4F32D2 -8F1E52D -62A47A4F -7E4010A6 -189250D7 -CF3B51EF -5E9BE373 -E9719F77 -B2741A6D -CF19D7BA -993284DD -A1839978 -AC00E790 -ACD3A888 -1E74292 -6306A56B -F9EC26A3 -9FC5BC2 -2D6F22F -8CAAA98F -CD2135D6 -D2F5CD5A -CFCC3D48 -6AF7A18F -5A3EA067 -8DE9498F -A279E5FE -8C1D89E2 -5D15FE82 -AB291798 -40421279 -E101CFFC -D2D0D57B -5C977DF4 -68D4EF4D -22C36080 -81526010 -E5A41122 -160C517E -8BDCEC09 -5F12637A -F3714AF4 -D21C140F -B1EFABEE -E49A3E48 -E67BFC93 -C4BE9508 -21854565 -60757AA0 -FB5C43BB -150F6634 -115BE267 -3BE8F3E5 -EBF986EE -BA18FFF7 -82B52CF4 -50546F93 -118CCB96 -AA6603F1 -F434B7D1 -FC356F35 -C996ABD3 -CC8CF7C9 -4C2935D2 -2DC9EB76 -ECA4D776 -5D2D35A8 -7C747824 -ECAA990E -A6078345 -CF589355 -7E9AEC63 -859E12C -C2F31842 -6563A3BC -D43FE9EF -39D1717 -AB887505 -1AADAED9 -3D07A0C -7D2B456F -53C1B39B -DF349267 -FD9CC686 -5C1CB396 -89DD96DC -A0D8DA69 -F2A68012 -7F40A406 -1DBF2E24 -B31EAEB0 -5D5073EA -19C16D03 -10E50F00 -47D3D228 -A3C0E13B -5E801D5E -C58677AC -F6E9095C -E2C0938C -14CB070F -11B98703 -9FBA36D6 -5ADB369F -681BC767 -BEAE4008 -5A0AE129 -ACAD1673 -F9992AFA -2CA14EAA -F77F77B6 -2705BD3F -F9C3E6D6 -D3ED854E -4A5FB85D -54187218 -B9B8C83D -EBD38F57 -C0D17CF6 -8B464900 -3F8D26CA -C0FADB4A -7F79A367 -123EEC9B -99B683A9 -157062A4 -91DE43EF -65733625 -56DC9E5F -2C88A8E2 -83AE236C -DDBF0A9C -18873E45 -5040B3D7 -29927CA4 -B5A18202 -93CC4EA3 -5DC2F698 -A97A1713 -A104C149 -B9C5588A -AF182A52 -CFEC25AE -CB1C0A91 -143A132A -27C4A3B9 -D73DB7B0 -53AF7F76 -9A614866 -82A54DBB -D77A5A23 -AE3FA285 -8C2EEA1B -DD21D577 -186EBEF7 -DBACB855 -18E30376 -144A1FCD -773561F9 -F18F3C71 -4A13E021 -8738BA8E -1A9FF053 -56A546BF -860C6457 -9E5F2177 -B3CD57D8 -7A2CAF5E -F8D57DC7 -941CACB -E70A729F -7EDB09B5 -E972B09 -ADB7C542 -3832A659 -AF33DD9 -152082D4 -9A2A3452 -70B5EDBB -C6549E13 -D621FFE8 -15152F3A -7781B485 -67B0DEA1 -C787B62B -75B9A705 -C2A30FD7 -41CF8EA -3D2B2148 -CA0445C0 -802799F6 -FCBCCE57 -F539ADB0 -54952BE5 -B343804A -25752CC0 -3F276012 -7228715B -7F61944C -DCB8676E -132DC654 -CBA2782E -33016B92 -30F194E -F2D953D8 -15A92EA -495D2D8B -4366F311 -8F8DC099 -C4B2611B -D90839F0 -CEDA9833 -5CA78F56 -5D5F4751 -7F37FE54 -5B8F6537 -6B89CDD1 -6728B0EF -D2BED44C -60293190 -F41CF0F0 -8BF08F76 -861F32B8 -2053AB98 -315DF7D5 -58BAE934 -F38B7C9A -653396B3 -E2152002 -A4E66BCB -C1E3F151 -AE7AF50A -545F0684 -643CF8AE -BBC4B464 -7B8F849C -334A660 -3FFF02AA -7EFF666D -F80965DF -42D34429 -B8037A02 -36CA2FBE -539208E3 -D03932C7 -5C619FA4 -FC641E3E -D01051F3 -51DF9226 -116CF628 -8055029F -4A9130C9 -5A2701CF -89251BD3 -52D99785 -B2C16C02 -83581080 -57D8A09C -6D551FEA -EE6334BF -7D8061F0 -8556CEF4 -D9418360 -82DE39D1 -AA9CAE96 -8D3C1056 -8C67B490 -C7BA78F -D46697F3 -879107FB -88F4FC5A -E7B0C68A -3BD94FEA -648EAA00 -22724D11 -B6F00ECF -488584F7 -A104F52 -FEE79F3B -689DBC3C -2DFDA897 -411EFFAC -546F5C25 -45562F46 -C17613D7 -40CD3300 -9908DC56 -5AE62418 -4A3C1C82 -A28631C4 -4AA65060 -5614DE71 -6512AAA2 -5AE841E7 -B04094A1 -AA8F8123 -593A95CB -21919833 -DFFAC729 -106727F1 -273A2977 -85E6CD4A -E9751C6F -DC308E67 -40F7722C -1D8986DC -489D6002 -7A869A39 -6E02A88F -A04E30C2 -B98C740D -3672EB58 -9702EBCB -2CD4FB56 -A0CB2C94 -47299608 -6BB5451D -36EB4DEF -763593B9 -40029F5 -9392B153 -777DA521 -3125CFB6 -E60A4DE6 -98B9CB40 -819091F6 -83D23CD3 -ECE09D62 -22EE60D5 -29A3F86D -797C0E72 -1EC708F -76F78D62 -E527F0A5 -F11AD3D0 -BBF11E9D -5E944B45 -D090FFCF -4B8F7B5C -96ABDB47 -2F5379A2 -38FD509C -F49D4D2E -F5538B3E -BAD3E277 -E9C9831A -22D3C209 -CEE03CFC -EB55F3D7 -C61B5224 -6C4E6ACA -A63B52BD -695DBE54 -3C68D8AE -847F8449 -72B426E6 -95642CE7 -B021A768 -AB094E2E -90D8A573 -D3BFF1FB -460DD461 -EF32D23C -868AEBDA -6BEC2EC0 -34D18392 -6C9D6621 -6CE02624 -75E6AE8F -B5BE7494 -A033B3BE -EED6D471 -99D40A8A -BC742254 -530DDD69 -77698872 -E89F0ACA -39716DFA -C811D562 -FA7770AC -1F68B8E -7D325ECE -8CD870A9 -DE561FD2 -8D49A512 -979F1346 -CBC53E73 -E779994F -354561F2 -ECDDE60B -52EE9980 -46AC0C6F -555C8C8E -D382E1DE -2A9A602B -4F18FA80 -96068D7F -D1E5CBFA -957912AF -DC0A3107 -77CFB940 -E7161980 -EB44FE07 -C1597F4E -FFE737C9 -ECBD5506 -AF75488F -6D0BB14E -9ED0A181 -8EF54B6D -4E69EFD -9337A7B7 -A880D3A7 -97A5D09D -FD9F77A -7CECCBB1 -2869D0F4 -F1806C1 -F9FEB241 -7D368AA7 -FF972C5E -FEA0C745 -CC1413 -DD4CEA96 -FC8C6CEF -75727E51 -5A17C784 -422EDDB7 -6505031A -5662B865 -D7848124 -A93A9AC -D874DF58 -FEFDE7F8 -5B3E37E8 -5CDC346E -CAAFB037 -BF2135D8 -C6977D49 -8D61C84A -C6B1C620 -30AF013B -B98B3270 -CBBE51A9 -43E26F1 -99534D9A -11DEC7C2 -F3952B8C -52900E87 -80D2B350 -838A2A8C -F8BFC35A -AF0466F9 -CCFC01C9 -C4A559B8 -5FED8BFA -ECB87D1F -7BF187 -4662AA70 -1274E59B -41188FCB -A769BABA -38F43333 -D4645494 -3E464034 -6F3BBB27 -8149A2D5 -D3D96C7F -C04CB115 -DE3B6C40 -B94FC85F -E0E6291E -3E22885A -30D35E07 -81014DDD -A40ED586 -A713CBC9 -7E0CC084 -439FE695 -F4094931 -C293453E -741A83B0 -D9C2E5F3 -4E623673 -309436D5 -807620F7 -7DE3993B -8F31B5E7 -F12F65FD -66763A72 -D3606695 -ED7794EC -8BD7EF5B -5B3449BB -D9B93EBC -5CF89E53 -103CE7A -A1ADA14F -BD020E01 -F737C35B -8695E1B -2AAC416C -43B6BBD5 -31036C5F -E5A61222 -F3E01282 -9A93EECB -BA874043 -1D010D4C -3F45AF54 -662F04F8 -279C9BE3 -217787A0 -1D399000 -6669B218 -A8F4D699 -181ED599 -A584DCDF -97A49036 -C5D4A8F7 -3C7351B3 -E4A7A0A2 -9A13953B -A9649AB5 -E9B91DF8 -CA6E2F04 -F0B63E4F -C0F55BF2 -38EBAE63 -8D8A619A -1A798058 -E5C218FF -8B67C799 -A81704DD -2562EF33 -74B37ACB -B2C84D35 -2E0EC87 -5CAC361D -7FA10429 -DDC1672C -3574275D -A831D84E -65339BB4 -4B936FAF -8348EDC1 -B1802336 -601EDB14 -BB5E4EC -48CE4DD2 -4CC93BBC -E77987CA -6348CFF9 -90830A68 -1BF0414 -C2BC8AF9 -3EDED4A4 -66B38B85 -CD6A6E08 -92B71F79 -6BB2BA9D -B4EAF374 -5B723892 -C350B751 -D7A56661 -576B1A79 -C66D8E1D -442DA54F -ED0C819A -809EBE76 -413B884A -817EF987 -D76CDB84 -90F40F80 -2BEB3E69 -C2782488 -F07FF38C -93AD0DA3 -C3E8DFD3 -5B804608 -9CEFF79A -BC524335 -495E18F4 -7FEB37D1 -A8F15A96 -3AE50033 -9DC5D0BC -D4A241D8 -8F3CC38A -4573A224 -5A3DA58B -B446C862 -69EFCA93 -83B911B -CD50A370 -2E05D74A -407D2B79 -AD108E34 -95EA144B -EA3DE818 -7AF026A3 -21366692 -4D5B7972 -C7D14546 -B6EF2543 -48E7457F -6947E018 -F6B2DD01 -9FF698B9 -EA11BADF -741FB523 -70901C0E -6A71C468 -8BD95624 -1D98077E -EF7CE480 -21F44B08 -563A0A30 -D9165A -7F8E8474 -219FFBE2 -FE1D6D6E -F7B8D66C -CA49F15D -C481484B -85D5310D -3FF17830 -8F69C740 -590A3DE5 -867A85CD -21C9758 -2E625FDE -7CD5B8DA -8BF43699 -AA17B723 -C0DBB2D3 -617F6819 -4D6BE357 -A2D89B90 -C4B19255 -748BC770 -4BA5F90C -2AB43820 -CB75746F -FE7480E4 -239B7D6 -2567653F -7BD1399F -55A842E4 -572D6A8D -CD1600C -6C880525 -1C18F7EC -C9C74D53 -AB3AB21E -F5EA5F69 -F6F730D5 -FA454FEB -978E940C -64D4DE80 -2BB0D31F -10268273 -D060E295 -85A74B89 -A7A3AE03 -7B8883FC -D0615497 -9D637210 -105C40E7 -F9FB184B -B4E67A79 -373530B8 -30E04C2 -47A1D75 -A6A67936 -1B789F9D -AAC21CCB -E00A8B8 -517BDE82 -B1004DA3 -3F745A4A -8FD0E21A -529E48CB -BE6AE2A5 -DFD7DE91 -145FF288 -2B1AD7B5 -C2AE7259 -88B84292 -373D8796 -5E4B4FC5 -971622EA -3C6F40B5 -5FBCF21A -144B7DE0 -C588DF6D -804B7F0E -4B6714FC -C1C2E61 -1CB08E0B -6355112C -1912B0BF -22263C9C -954A5DE3 -4520505E -459D0661 -70FF554F -F1FED0C0 -D1F602A5 -AE5D07A5 -B86AAF05 -452536BA -B00C120F -1431099A -42F0959A -FF1EAB1E -9FD43C93 -5076B428 -ACB3DAA -5D0BA50 -16E00180 -90E21E72 -D497B8D8 -8414A6CD -B933AC93 -18B2DC20 -5BCC1468 -101CA9C -5AF125FB -E65A4FBE -A5B927FC -A8163208 -CBC14C7C -A00E7C50 -62DDE328 -3704BAEC -B354A1A8 -1FEFA49E -BFA928AF -73EBAEEF -F21664AB -B82DC773 -397C3EC7 -6DF7A081 -7B57E52F -43B47A0D -4BB8B26E -748CD62D -1D057255 -3A01A19E -ED35DB9E -B9192006 -9DAAEE03 -6F88BC5B -41F22AAE -DAF9FD8B -8A8D06B2 -99E4A71A -E0E5802 -AF2050EE -35D07382 -3CDB4F32 -1587CDF9 -29E0BC17 -F6641B4C -35557A67 -20B08FD9 -F89BE3B8 -994D534E -5084DC42 -B49E2B0B -25AD0456 -B05DABB3 -102657BF -FA7342E8 -508B7BD7 -FED0EFE6 -5EFAD4C0 -15101C27 -420BBBF4 -1783F9D0 -CA890820 -BD3539D3 -578ED490 -1DA8E967 -134F8B74 -D6C5A224 -8C8B1F06 -8977D881 -541937F5 -9013604E -4B54F163 -A9030FBF -A9EF1A9C -CB29FA97 -94A3F001 -4069BD15 -C0D5E43E -4E17F81E -90FFEC8B -32D0B0C7 -4044EC4C -7D7935C3 -BCFF474A -9AD1BF76 -2ED2D299 -263F8852 -4073932E -BEDCC036 -7A548119 -ADF45572 -7D8C451E -465569B8 -CA9E87A4 -731803CD -1DB59C5C -A90C6543 -A22221B0 -173A0706 -E040DBBC -941E546B -5503B9D7 -CC5D8948 -F7FE8FB5 -1AA3AAD0 -20229A2A -82CC4C33 -746BC086 -E9F90D08 -2B356E1A -14897456 -D9BC34FB -9056CB82 -1DD450BD -BF64BC9A -166164AD -94363CB2 -ED715F84 -CF4D9ACB -BC0EA0A1 -46E9697E -72428536 -D9569B91 -2B84C8EA -D4CDE0CD -E439EA2C -E19B71D5 -E45E8566 -541A4655 -845B296B -B2E478AE -1A35840C -C94F4E9F -A7AB9164 -AAF8D027 -82252CBF -20106216 -ACC1C08E -57E445D9 -FF68B8B3 -4DAE2000 -B5A7ACEC -1E9BE78A -88DC5BAF -C8A00837 -210B7F85 -E2A072CF -144DA567 -C6467799 -4BC0A056 -C60819E3 -B2B1ED7C -C0ADC696 -56F0E8AB -8D538C1E -879C3079 -6EE2F434 -7B9CD649 -94A30F21 -7DA211F1 -64035D90 -916A9128 -EC9C52F6 -92991BB2 -53F4309A -5AA71420 -F9B67D20 -45706BC1 -E71E83B -B091D34C -BE56577B -7D3CE09C -1A3F1DD2 -F90362F3 -3FD83E38 -E8274EA1 -CDFDF1C2 -62FD4CFB -C3A1DB75 -15E3C709 -B7F81AF6 -E58D41BC -5376E522 -698DCBFB -C76EBF96 -46682F6B -E5C0AE29 -50259284 -91A4E263 -4B03C104 -4B04D974 -914FF9B5 -783CEFF4 -4B232A85 -303E2F77 -6E902ACB -8D630D23 -9BE394EC -461237B1 -22760BF9 -B1F5BDC8 -F8557002 -9CA2BA41 -76418996 -B734B9D6 -C5D4B1EB -59F49A63 -4F9C6BB0 -219811DD -CB536800 -BDAC548A -824F1A42 -5CE7C68B -AC7A5DE8 -86D89A36 -49E127B3 -EE0E8BFB -4997152C -A43493BE -ED7179 -1049E699 -431EBDAC -379BEDAE -FBFB2AF6 -72C255F -F37B5D5C -2D15F748 -7759FCC8 -D6730ACA -52AE1913 -D709F4AA -581518C7 -BE85DA4D -1A24C4D7 -50ABC4ED -7B50804D -194F2CD7 -A56680A8 -1520F41A -A614FFCF -5F66A0AA -46877891 -4926E937 -74E93C8E -62515A1D -8F3F6DF7 -AA4D19C5 -8057E286 -8C90FAB5 -4AD3F2DF -D953B36F -37D20E08 -644A2AFC -5CF19FD -8C9431A7 -EEDC46C5 -F86BE6DC -6C12ED6C -5EDE86A5 -7E59C795 -5EB83E6 -6F36E55D -D9E35BDF -CC7E1D72 -21A42C4F -332994C1 -4E460BAE -C9A0955F -C080A0A0 -B2013D50 -E6CB68DE -E9C759D0 -4A1C7783 -D1028E6C -CEAC9773 -189398E7 -B57C20FE -D0D3E05C -6FEC2AAD -17643391 -1291E620 -978A16DB -37BE98F1 -9F773872 -1BEB32F2 -CF3DA84 -3088C11B -2BEB338A -1F308D75 -DD542BFE -C568D953 -BEFE8926 -B9E201D5 -EE6FA353 -826FBE38 -CC867513 -A00D32D6 -CE9B8989 -8D3CA53C -1718DB6C -CE2AABE9 -8FF0C7CD -DBEC0AA6 -E75EC71F -FF266269 -3D7D0B68 -D606EE1E -56F86B85 -6B67916A -B164B35A -D4E7337D -D7A68BBA -A39300CF -D7C72CA5 -A32F6380 -385F8023 -1FF83E95 -F4E55989 -6BED2F68 -C714269C -4D2E9366 -8C1A2FE6 -84756541 -6D353F18 -741B7419 -3BE84DCE -8FFA851F -FCA5E50F -519AC53 -2E36273C -995F9DF1 -A1A165BC -F5E804CE -DD395EDB -7B2D8A34 -FC3F84B1 -19EE5FEA -EB2CA6C2 -866CE073 -B60059C0 -35395446 -BD2B582E -C6E73349 -634D409 -B9AAD6A6 -81B516BC -6933344A -806F4464 -22AA3AB2 -A6FA442A -31DB2D66 -F64AFBC0 -480C5B8F -8CE98937 -F8BF9101 -395669D0 -A560F096 -C8A13D26 -9C62AC71 -C0EA2E1 -BDC5E76D -51C79BBC -E84416E5 -30CF1A91 -E87F3E55 -6CA51768 -4D09690F -D488F996 -ED850E82 -510DA36B -709F9D1 -A6AAD3D4 -E0C4B7BB -1A581776 -2F11B35C -748C7EFD -A2F0722A -A8C6D678 -915B88D8 -42E5FD90 -25B58AA4 -8FF166C2 -B5FC3947 -6427FBD0 -E1C01EC7 -91FD1568 -FE570CB2 -BBEE870B -811FA63F -BE89954D -C83ADB4F -C1B4D237 -65AC0055 -5E2B279A -3FC59820 -B1634DAF -AC02E4BB -B9D8412B -AB22C318 -9E528E95 -F4220FD4 -D83A7E2F -7C013BBC -23849524 -BEED0AF2 -C9AD6213 -4F367F0B -8FBA0438 -EC5899D7 -A4111441 -2D18DAF5 -E7349E7E -57AC8D6A -A27E98E3 -AA1A992A -5E7E0E0E -AE4AF437 -20A80262 -AE20A4C -2CA493A5 -FFC756B3 -68045EAC -A56BE46A -7B3EDB89 -BF17C1AB -445B3851 -FE16BE78 -23D0640A -694D05D9 -D76F0407 -AAC3808D -8D2609FF -BDBECF1E -D6074958 -7EA401E2 -CAD394F3 -4A67FBFE -A2A7FBED -59E0B573 -CEFE2B20 -2BE6EB1 -85FF9E57 -42C7617D -E9E01845 -43F02D16 -DF309F8A -880350B7 -65CE706E -CA6A2B8C -5C38AA9 -6C60FA8 -42BAB35F -9453366B -D5864332 -A25A3164 -F32EDF79 -C757635D -F6712B29 -4C43A3E0 -80D02D7C -A9DB16CA -55270F91 -3FE8F468 -AB0C835E -DD8A2F64 -D9551C26 -4642684D -69D1935E -9A7A2413 -E0BEC20B -14724D4 -B4A43613 -559418E -1E4A709B -A32F1E7E -EFEFB7A4 -5B26F487 -E6CBF46D -7139D0C0 -EC214DFF -7045BA9D -A9AB902A -CAE7661B -3B50F210 -A065F80E -B353DA84 -E6538D1B -965D76CE -E7F01488 -A1E57BCD -76920B33 -4EC379D2 -43909492 -8F621446 -C9033570 -FEEEB7B8 -E6FFA222 -E8CDDAA2 -3C5C0252 -A63AF91A -D545D3D7 -28ABECA4 -EA14F18F -23FF43B0 -F9F0198 -24568599 -71F0C3DD -63975EB3 -BF3AF93A -7B95B627 -9B0D74D5 -20967FF3 -A621FE0C -6CFF968B -909CF3B8 -79B5DFFF -FC87A4BC -5BB19840 -DB7D8F85 -D4641400 -54449140 -CA93FF98 -85668EF3 -C871B119 -58D44D70 -D93434A8 -453FD827 -906A01B7 -FD446B38 -CB63F172 -E4B0DFD8 -D4FE1E63 -C78583A2 -1D7463DC -7D69FEE0 -93EECB26 -337FCA9A -5D5D7447 -1ACDDE16 -C4CB8D59 -F178B39F -292E3426 -7A1A4318 -DCCE0A6D -EEC1FCB9 -3B264208 -F9D7CB6 -9A23DA53 -58B2B3A4 -654072EB -6CA920C5 -E145E547 -F5FF4A8E -AB7C553C -2A84E62D -6F6AE7B2 -322DB9DE -17E670D3 -7BDFB473 -7CD05987 -5B12A205 -5E9FB325 -542A1478 -FF46384C -69DE91C9 -65B4C13E -78DA8BBF -D85BC864 -3882BAC6 -444A8F13 -886DBD37 -2613D1CA -7CF2397E -513D4563 -1C57D4F0 -32B75B54 -E18B4953 -B59C2B91 -98F11972 -594CCC07 -39BE7B96 -B14E5D15 -ED093697 -953DA37C -6FDD4B93 -8D678AE0 -8B149A9C -B9ED6AC -E4FE210B -44EB15E9 -805CE5D6 -62FF689B -E6C011C6 -42C85768 -EC22FC81 -16858F65 -6A6BC5F1 -E5090FDE -482D0881 -65EAB7D8 -620494B9 -6160FAE2 -542E102 -81BCAF6F -C31AABA5 -BEFFEDB4 -A802765 -68A8ED5B -A47FADCE -3EC1897A -4DBCCC04 -83EAFD50 -6B8E05E7 -4FA1891A -9C2FCD23 -9ED7C877 -15FF9D1F -67DE6F18 -D2932D4B -E4B31601 -60B47713 -C1326724 -1F5FD6C9 -2A54C06B -599854F5 -C2121D8C -2D0FAD3B -762DB289 -CCE2E11E -622AD608 -29836424 -C9F1F838 -4E0F9445 -16C53328 -B9F2FC2E -28FFB831 -7C216796 -E065DC2C -561328B -92EEB73E -BBC5AE83 -2DE49E4B -BB32B7FC -E59D7B63 -B3375867 -5523615E -5532A7B5 -6890882D -21F33D70 -EA855CD7 -CBB7B3A1 -DD9C122E -5CEAC143 -E9E4332A -6F658BF6 -57E90D54 -715AA7A1 -DE7768FF -D8A3302B -1BECD73C -AD442F70 -EBBCB63 -5D25E0FB -EF9854C7 -DEBB6E96 -61591E99 -BE06EE6B -F74EDD0E -124B1712 -45833671 -1227307A -546B647C -9D2398D1 -DDB609E -EB68EAF7 -F05AFA0B -A6EABBB9 -60B5FC76 -992D25CF -A99743C -5FF72996 -E3D84005 -F47AC3D6 -D92BCBEB -3AD6BC2D -399AE49E -FFD7134A -80856732 -8C92A116 -D23F2A7F -1C1FF7CD -7E97215D -63CE5EAB -1E3D6441 -8CC7E1E2 -3144CABE -1B369565 -E681B9FD -3F72A224 -3146105D -68639F13 -61E4A798 -CF28AF43 -F18B6903 -F4D16333 -557BEB41 -F5DEEE8E -41F036AB -D0DBBD23 -E8E240CB -8FE50644 -8EF8CB38 -F8D6EBA6 -580EDAAC -25F0FEBF -1E09176D -CD156787 -8198153A -3D5D3DE3 -5132C51F -4B39B7FD -15BAA338 -AC2E0CAE -91DC2332 -3632CBA5 -2AD744AC -EF31B613 -6A9D8019 -17DE8C90 -E5CC66F7 -E81411C2 -C5B6931B -E8CF72F1 -ABF2E66 -5B7DEA27 -340E7880 -2B4ED84D -F6E86748 -9C181F92 -55DCA269 -1CEE9C9D -1DB0A271 -B1BB73B1 -2B802754 -596ED430 -25F4A422 -E186EA6C -A0793E1F -B54A8F34 -4EEA557C -A8085CD6 -276D7E7A -F711A6D4 -2534D88B -FA8CEFBD -A7E9E1C7 -EF6F2E -4620FD63 -7955C107 -50E0A968 -81DBA8B6 -92E0F3D4 -C78C01F7 -CFE5AB0F -C290FC3B -F12CC1D9 -56A9B1DA -69AC05FF -964D8EE -EB198C02 -A3D9435 -30D0BD52 -2A1A5868 -DF336813 -14C97AB3 -BA6717D1 -43FC05DC -32A6FFBC -C47276AB -DECB3B2F -1511FAA2 -155693C7 -E5BB37E4 -CB20ED97 -FDFD4014 -FFB25A3D -4F8B2CCE -8EC8D538 -A60DDEE4 -9E6196D0 -8895A4D -A2528B98 -D02F59B9 -47662556 -4FAB84CE -6C7FC2FC -F351CBF4 -F1917707 -B1F2737C -B46CC768 -F87757B9 -A24CA3F5 -74EC8337 -C46290C3 -77BBC380 -1B3087DC -C816F73C -6E2C562B -27C3E900 -4FB423EC -A77B1E37 -51063C80 -432108D2 -11F0367D -1D08F91D -D56068FA -F259DE46 -26CF3619 -6E6AF5EC -10AFB2EE -14F925E9 -5382204 -9F482CE6 -90B0897C -C768AA0B -654ED88C -AD60966B -8EB54FB3 -26275630 -A1C50A7E -21587F6E -9496FD06 -4B768A3F -1798404A -28C6B4D8 -5B579E3D -C79ECD09 -EC63FA6A -162A0135 -7FB7DDB1 -A0167E99 -196F14DB -CCD227F3 -3FB917CC -A3D30D38 -71874379 -E9E489BD -5DA989C2 -4F7C8E1 -F6E0502F -F8445D16 -25CC5FFA -FB06FF63 -CFEA3C99 -E41A8123 -6A5A256C -D7B67156 -50BDCCD2 -8165541 -F067F327 -B1E17258 -6901F3B0 -8B8CA0AC -CBA88A2D -4736E05D -DD5AD020 -35B501DF -73C67F6F -F2C513F -E6CF7C2D -E6A85B1B -8AE4F7E6 -1ACA7CFC -BCFCC182 -2930369B -642DC973 -990B6772 -681EC185 -164AC235 -9C676AC8 -B200AD7D -F13B8C8D -9D22DB12 -CE95663D -CE956E42 -29485F4F -BC5D5F8E -DAB561EF -C4C15BAA -77B9192C -86E8BF86 -5933ECE -E50B93C6 -F8B0CFB0 -3286711B -DD558ED9 -DD043899 -4AFAB231 -637BB2D7 -87036D19 -9A30430F -27798B63 -4D6E407D -CEE251F5 -ADFFB995 -B5C885B2 -7DF6519C -6EF51C85 -B95DAF30 -65EA99E7 -772FBB19 -49DBE1EC -F386A79B -EECD2F55 -8935CCEC -BAC4C120 -C71F82EF -2DF7E67D -9BA39901 -9614A4E1 -C6304402 -236FC777 -D47A5719 -8098EC85 -799E34F4 -896EBD9 -BAB10372 -32ED359C -6F9F763B -9D517447 -22B55AB9 -8E6F4104 -15BEC5D3 -6252E010 -23B5E8E7 -D0B113BA -965C42E7 -F2A0C19A -24CB582E -1F449982 -2E805DF0 -851608AC -755273C7 -3529A161 -6395258D -C5BD7D0C -27BABE75 -E1628E4A -47E5CD77 -EE797B13 -AB11893E -2F65151B -9CE2B20B -233C28A5 -749A0C91 -846BC1E1 -8C36F8FE -1489CF6A -70FB6BE0 -D0A84133 -9734B9B7 -FF166A04 -D118033F -BDDB2D63 -6F6691F0 -44FB36D0 -EFF2B14E -AC02C863 -ADFD2972 -905F6E84 -7C0008A8 -4A043A53 -D104FDC0 -1687FF25 -E6CF8FCF -120143AE -53F92C72 -19E2E798 -EE8C6B94 -15CEA57D -C8968EBD -D50EFBA3 -A8EA5FE1 -E2D073FB -B4EE195F -8928A91F -6B9EB970 -C24B509C -5D340563 -85FC3F3B -934FA012 -A2AB8533 -A6BD3187 -105DF0E3 -243ADD05 -49C299EF -7A42F84C -C90A1935 -3268B298 -CFA3B2EE -470C6457 -E579D2C4 -BB10428B -78D10FE4 -11F21813 -8424CE28 -EA2B114 -8239463D -9804414B -44B4FD1D -82D50F88 -10AED1B6 -E4768ADE -E7235A66 -C8705714 -936532B0 -15C63108 -92A91B17 -154B2415 -9BF0D15C -5F451388 -1DC102A8 -96CAFC23 -B076C0DE -3EBDCC3D -6B2EE523 -C6777AA9 -F7F48C4A -B1E8ADBD -FA30AC90 -5173D22A -D22827A6 -6504AED6 -3115E6F6 -E8937768 -C5ACC0E9 -366E15FD -AB81C84C -C27AFE96 -7361C8B1 -613A0811 -595F48E4 -1619DFA6 -233D2474 -4C174E1C -E7DCC63F -308FDED9 -502A0AB0 -C5004E90 -B7FBEFEB -918A77FF -F7235A04 -5CCB8B7E -3BA4B1ED -32F47DAC -FF7348B1 -996C8E7 -7203F1B0 -70583A2C -4D8046A0 -551119AD -BE5B31AE -35400CC7 -E8ECD409 -D1C104E0 -1A0858F -F26946 -458C8B3F -E8D66E91 -2F3F6384 -B36EC71B -289CD4C6 -6CA9E35 -B198A8B -816873F1 -346D66C9 -BD906E97 -802E5969 -261BBBD1 -9D7605C6 -72C2CDE6 -6C8DBDB5 -D7C8DD7C -F43FB2C8 -A9F384E6 -78FDC918 -6D20841A -20755F34 -F4C6AF99 -19393B53 -A525AE84 -CE881A38 -3D075300 -9B0E4DCA -7EB7E7A1 -4C4FD44A -78483ED6 -32D9D894 -1CCD379A -EA5FEB4B -F7E001D -44FA69A5 -E99F66B6 -9E16CD0B -CD098C41 -6DAAD279 -5FE50411 -CC855E2 -130C6563 -356CD9A1 -BFB318B8 -2E963C0F -DC5A046A -FE16FB -A599857C -F72FE561 -2914E4FE -B247AE8D -6A6F13C0 -B1052C98 -8086E53A -845345BA -D43D5F7A -82B30F5E -4206EB1B -89CCA1AE -86289F6 -567F22DE -25624C58 -6A78EC3F -7EC32D03 -8017213D -3A141336 -D1CA4E6E -FA84C2C -FE670E0 -3238E01 -18DF1794 -A7B900AD -1FCE47CD -14EFDCB1 -C21B04A8 -4C3343A2 -E5E611B7 -ADD06EF0 -32C81695 -201A9FEE -BA8925BB -5182EEED -7DA4917E -CC331235 -C304ABE9 -C2A16075 -937E1C4C -CCA0184E -9DB6C45A -3F2A79C9 -151B469E -162F22DA -D955D54E -E857CC0E -FFF2005B -60AD87FD -85512214 -E0A506A0 -FAF1A145 -9DA17F03 -332D26D1 -9EDF9643 -7BBF2D9D -3414FEA0 -A8FE5964 -D4841879 -3AE4E5EA -BC6B6D60 -950F4693 -70FD0254 -177C7A1F -635FE5B9 -C0C5B6CD -15D1D22F -BA495903 -CC100F38 -A5F1E225 -5AB4584F -AC4731FD -ABB04167 -A0E153B4 -5982BDA9 -8E2EE3AF -D635C631 -7C6154A2 -9F0EEFEE -429B22CA -B1346D4E -6B21663D -6A7EDD8A -DA34A355 -217132F0 -683BA78 -9CD46320 -A5D3BC4F -3194AB03 -DD66F958 -E7506C47 -17EE83A2 -4E4D80A0 -EB56662F -BE889C58 -6F5F6745 -2A05C12F -13D266A0 -3B2B18C9 -EF435E02 -5604DB7F -D35888A2 -CCC34421 -55E24355 -7F607F34 -E493720B -C6A492D7 -7DC6A789 -E01474B2 -97D35C32 -71F32335 -D3083D7 -2327D424 -35EA4BA1 -F5B20C6F -3ED28FCC -453A76AE -192A79A6 -2E64285D -A9463AEB -374E22E0 -92A5CF8F -E707F8E8 -B8E2FF36 -E8E959EC -91D9796C -F03960F6 -B62467FA -8836A487 -6418A93F -60932160 -3B72687C -37BBD7CB -1001C76F -201999EE -5955A1CA -925351D4 -767540E3 -570BBF27 -A073D4D8 -FE96246A -44784995 -232C0150 -AB7BCE2 -D47BF099 -BFA6A422 -70F4BC01 -C2139449 -F9ACB817 -26657111 -13263449 -7989D26A -2E972B3D -2F1C1C6 -930E479 -23243FE7 -BA7DDF9C -50C8AB43 -952377D6 -4C6C2B3A -BDAF48F3 -1C0BAE6E -7F6A8C04 -F529B9FA -9ECA4162 -342E6562 -9BD5EB52 -A14DB3C9 -14B1DC2 -4E1BB6D1 -9A1158D5 -73F84EC -685BD9F5 -8CE72161 -5F116605 -BA861D43 -A7150AC2 -391A105B -C8D798E8 -16633750 -33B29C4C -54211362 -34C2D5FB -CA197734 -A635990A -4E606FD7 -9D56673B -89976DD5 -5F2D2794 -81E95955 -9377829 -5DED53B7 -FEAD5592 -1CC6419B -BD3A45C6 -65FACDCA -7EAD0EF3 -EB856702 -D857FA75 -3B92DC0D -E66AE58C -51912618 -C63C75BC -ED05B556 -17EC2B32 -9F692578 -C706059B -D88D5576 -C2661C7B -6D7751C2 -119292CE -418700CA -2A2BC3D8 -CA20D341 -8A8F325D -D4A2DC8D -959FD62 -67883F8E -FBD3686B -6B862363 -F8C13880 -FCACA893 -8215D90C -67567E2D -3B501BED -7AFBFAF4 -2EC3CC34 -B360BFD9 -716C5E9A -907B1432 -E253CBD1 -4DB52F87 -6A37A21F -C860A6A2 -72DFE5D2 -84E0705D -80DDC195 -1ECD4E92 -2D2035A1 -B10A5B53 -C9AA9A79 -E999CC8D -C8C790EB -F7629DFA -93158872 -FAB6E7DF -58A0A3D -6104EAC7 -2BACDD14 -A8E3DE88 -AC4E16F4 -F7042189 -5AA6D923 -F491667D -C769767B -46EE7E69 -CE4BAE4E -FA1BE581 -2BF14278 -5356E813 -6225B503 -D33A6F26 -1A629247 -BD844A35 -E33ADFB -EFE720D6 -3D49752E -AD542CEB -EE36C608 -99FD833C -BA893EF7 -47E4A8A9 -B269C1DC -CEF39BB2 -91FD5B03 -C02E6C1D -29A3817F -70894875 -8C851D1B -8446E920 -8CBAB8AE -D9D7B185 -97987DFC -ADE83493 -4CD1FC4F -1D82738C -27665936 -CE3C907 -990136FD -E1E40CF2 -A3E15CA6 -DB7D4E0F -D8E87ED -FC23DA2F -76A6A0C0 -1C7F403F -380BCEC9 -C2BDE917 -74145443 -14C0823C -8D73C415 -BD7B9DB4 -C83449E7 -364D21C7 -7F01C97E -9ED9F208 -51417FC4 -D557CFF2 -5ED6B81F -BC0EBF41 -608D56CA -60AA90AF -8FC8A8D6 -809BE4D9 -47CD9035 -8CE71201 -B442C067 -A380EF4D -7B74A914 -513ADF78 -63E5C752 -6D4F2B4B -82717D99 -EC19F48C -7D0D1EC5 -944D936F -358B8D1F -D3A7E17D -5E6DFD92 -D6D2B538 -133AC914 -22C4BFCB -A9F4ABBF -7DDED93D -6836C5 -3F10AEBF -71713080 -A1868A02 -EC341DE1 -33D409F1 -41EA5D35 -47F18F89 -7C062A2E -1C66DC90 -D5E11362 -FACCDD77 -D96EA1F2 -31676D3 -B00B9D1D -36F80278 -754F427 -3D8C40A3 -D1FB426C -ED4869D3 -AD137726 -9704A7D6 -107A0E2D -AAD92A50 -58019B5B -F6FD55A -E876FBF7 -13451AEB -A530BF41 -11FCB24D -EF5D7F1B -BB65E3F3 -DCAF1904 -4262AE51 -8C2318E1 -96E7A13F -DDA281E3 -7B44E7BF -8048EB55 -AFC8D749 -D3F7E592 -23FF8DE -105E2923 -969758CE -B1BF840D -D301EDDB -42A3C6C4 -2C934ECA -B2FB9ACA -452302A4 -C96F49CB -D7342392 -48A6D82C -6B831657 -1A6989B2 -312D282B -9AC1D170 -3FB3070C -D83B178C -D894496D -5FFA91E8 -436E970D -54DC6812 -8CCA890F -96971388 -9CED7192 -216196F -BDBF8734 -441B7DC6 -8FCB2D4 -1C3375E3 -19EE1338 -E8BD4F25 -D65CD246 -85157D36 -34A4CE5A -BFF7BCD5 -41DD5123 -D92D0021 -C0265B3 -652BE05B -7B31FC27 -E8BBC732 -E5DB7686 -2D1EAFF8 -2283884 -CE0E4257 -1936BB27 -6ED44FBF -476ED2B -C249E9F6 -21C0827C -8DA28ECA -707E075B -10EFDAF6 -3DF4B474 -24AC5C3B -81F8A453 -8E1AF272 -E69E1816 -C40F1B4 -5AF2AD1A -C1236EE6 -78507240 -588C4851 -385396C3 -BE2210DE -E8FC3FE2 -B9E7C8F8 -A33939 -B9E8F7DB -F7DF1BA4 -400E6C2F -1139C2B3 -8195BA65 -A6052E5F -29E1F01D -512ABDD6 -ABE172A9 -350BB8FB -63D89399 -6C7CDD2F -F6E20A15 -36947843 -7D26A79A -133DF31B -AB375C67 -35D4F0E9 -8060F5A6 -94893A4F -1B4E1612 -431938A9 -F4F22D48 -E83BC91E -98D9DF02 -7CBB518A -947735EF -16DB6C38 -7BBEB95B -393A60CF -6984032C -F1879BA2 -F014440B -61CAEF50 -F9BAA90B -6D9CDB7A -4A4C3D3F -DD498DC8 -E27FE395 -AEA01257 -15FEAA99 -61A173A1 -28EFFD56 -A27152DF -10C613A7 -47AFE324 -5B4D4B5 -AF67027D -11ADBB9E -F8B22312 -4A9C0C1D -E94F39C8 -9AA4F0E2 -4C394A49 -41ABACE1 -6A96270B -171F3E81 -F29DB470 -A9E7F67E -6B445012 -B53EFB86 -B0AB92A -484432B2 -7C789E2 -116B012D -5A5434DA -83DD29B0 -418637F4 -C9E1FBB7 -FD84E0E9 -BB44A4ED -4847C699 -61807BB2 -F558A9F0 -264F9191 -697F6915 -EBC115CC -A1604C6E -9CD73651 -50ADAD72 -DE3698D8 -DAD728B2 -58F5527 -C58A4754 -C8CCF740 -A5CD4E0A -966E50B5 -6DEA9EAF -66DEDD5B -CE18EE1B -E0293294 -3C0C586C -ED04E099 -A1BB7722 -78AF5367 -3F0FBBB7 -4F623EEA -E3E1A85A -3C8EE1B0 -D2851D20 -F07248A0 -713EBA3 -8CCDC87C -B5ADE0C6 -54DC4354 -F7F43DE5 -AB512848 -69136DAC -71CEFCD8 -5F264F19 -D39D50DA -A184BC23 -57F38C31 -34DFEB30 -6B39F755 -60F7B6C8 -EA7FF406 -914CD331 -F4A15FC9 -68DB20A3 -6609D547 -18BD6EF6 -F5DDB763 -9E2C6236 -A9C0CD72 -EE8A864E -FA9A7891 -DCE7F5DE -4E5A9B63 -FBC574F8 -13C26C91 -70A2AD7F -9514018 -7786A6DF -708A442D -8AC98261 -57EC9F69 -D8B92F1F -5525E8BD -CFB927EB -47BA617A -4A71DA0F -9632F7DD -4A00D653 -3FC603A6 -A34C3C9F -EDFCB326 -BA31E996 -4158D5 -888F01B5 -F001473B -D67ACDF1 -587F7E20 -EC9AFA96 -6942D697 -76FEFEE9 -ED260881 -53D50BC9 -43FAA199 -DA4F8CB2 -D7FE8FC6 -7A659755 -394C88C8 -EFA3AFA -87710DA8 -DA1FF12A -C5D4E7F8 -4F0A47D7 -E7C2A799 -EE894D65 -20E4FD0E -8E51626 -17BB7611 -E48021B1 -4320CA45 -5315D225 -39684701 -3E943281 -B3B7B298 -A63E5C66 -11F2EAE5 -2E339781 -9BE79114 -187467D -9479787B -565D0658 -B43DBE73 -67F7EA80 -D1962413 -BF4B89AF -AC03F363 -1587941F -B7A14BD6 -AE1A36A4 -BF710690 -8009F7B0 -FB37D608 -58934215 -327E7B3E -A2BCED7 -57DB9C90 -3E7E56C9 -E554BE2A -6B6273A0 -766F5A68 -503BD141 -586BF1E1 -AF75978E -D93FB741 -75268390 -BDEAB299 -9871DD6A -9C042A7A -4CED46AC -706B559E -9C9CE827 -EFDAEFCB -A1AA3846 -330AAB65 -602F6FCE -DF14BBD9 -8BEF0FE8 -CEC4AC8B -28456573 -95AB0149 -43E11079 -B50D7970 -6F8F89C6 -B96DCC6C -E114C8BD -CF3F36AA -E02901C9 -8B452A2 -8AFEE7A2 -FD7C3D61 -4DA46DA5 -BD5C204A -83FB677D -42615EE0 -3783255C -9FA48033 -270F0FCB -157E94E0 -CC89D359 -715FCAEC -32EF8DFD -829D0BCF -E4FC364E -A629CB9D -7CE1FED6 -D6E9FEEA -24E55CE7 -8BB2DA23 -2FAEBFC0 -AD6EF205 -96142124 -6891653D -C5061A39 -9EA7F89C -D2CA9BBF -544A569 -E908D41E -EAA11FBF -4250EAF7 -6A5E60CF -5F84A53D -4324D154 -57320611 -DC3C692F -24685A97 -40F011E3 -25A224E -3712F01 -30F1AB94 -45F92B8A -450F8D4E -F3EFF92B -EA54D0BB -7E10A58D -D51BDF85 -FA6E7358 -A16E06FB -CA158DFF -9AAFDAD5 -AA48F649 -A4A78E50 -F2F73CFA -519FA6F5 -32933CF5 -9E55F1C2 -806019A2 -E56E0B7E -5F598AA3 -564C6D40 -757BDE5D -30757BFF -B906BD37 -52C6C503 -D2B00C73 -5969C7A1 -84FF193D -E668D8D1 -71E66078 -A200D7C6 -6585828A -FF8864E8 -B9EED36 -12C9F3AB -2F2C4A2D -2998FE0A -A1D47491 -59463A75 -1347C537 -77000037 -E6AC6FFE -C74CADE7 -83B75335 -767A69EF -4248CAAE -1DAA4A34 -BBCDEA3E -CE177B23 -59449B11 -A9DC563D -85589ACB -8926A959 -CADAB503 -6A1E5AD1 -E79EAAB5 -9C25D798 -B4750BE3 -249329AF -724F7831 -F4D2E094 -CD605F43 -CCC933E3 -4231A56 -8D15BB64 -A7B1E394 -FF2B04CB -7260C6F0 -A483E58C -35E5FBAC -A3D734E9 -64BF02D7 -24F8B625 -FBDA78F6 -6FA335D5 -5CAAE8EA -EBE22B69 -9BE5C3B2 -81028FF8 -E20FD2C2 -CC8506BD -E079C912 -BDE0AE94 -AA4AD182 -AE682162 -AADAA077 -C757CE81 -E4BBF694 -8ACFF53D -D1E85D5E -E29E9979 -9DC46E06 -A8FB412B -CA71D109 -987A6F6D -E5A13D87 -BCF3C6D6 -DA5A6320 -E78095AF -C0C4710D -7F06A362 -FF3D8A8F -428A02D8 -2EBFAF55 -D25B93D4 -344E75CC -ABC855A9 -E3577D95 -843C4274 -F5326A2D -EC6EB288 -7C4C82E6 -A70953D8 -8D8B314 -8772F0BB -3BA5025 -1BE5CFF -9592B505 -B9FE16F1 -EF77DAF1 -4C7B4119 -8B8FEB44 -3542576F -375EBF3E -D0927BE5 -2C6A3AAE -45D18D70 -6126FAB3 -58146389 -FBF50CF3 -3129860E -4B721C54 -95BCFF3C -DDF12106 -1E2428D3 -827395A7 -35266B84 -3CC089A3 -B8198C2A -B8EBD35B -7EBB213B -A93DCCAE -CBB25C42 -2A03D874 -46F6CAA -82986B02 -47EA89A6 -2C3E7BDC -852B0630 -A928EB9 -66A2BC66 -BBB43A54 -A6F55CB7 -FE990460 -5FA8BA0E -1CD34B74 -1C0F2BE4 -FE6C53A3 -C325B6C1 -A980B3D1 -9F031392 -31E17C1B -38B6D6A3 -E30D49E5 -E83F8C4F -BCF13E0E -28124F6E -57AF5DDB -691BCC17 -BD071C94 -DF4984C2 -8579EA0F -92150479 -7BB67579 -58D6EB84 -97754D0C -F569F71B -9990D0B5 -56DAB760 -9E988907 -9679988F -3EC5E4F4 -328D67D9 -317EB4E7 -5E6D7E6A -BFEE035F -D12E6060 -4F2A7A2D -F65F5B73 -54AE1242 -ADAD3A5B -61A81471 -FB09DC55 -72874DB5 -5302F1D1 -8B5F6A90 -82E98E7F -E808315D -DDF5B32F -C35356A6 -6F1FF7AC -1549941D -1460BF8A -D53684E0 -1A384C42 -D319924E -B0B1824A -2772DB36 -BA61B594 -712F9397 -41F5740B -C00A34B2 -F2FCE526 -4C874DC6 -FD5ED831 -301E874C -CE244111 -D6AEAE23 -516AF534 -FC101FD2 -EACEA514 -C23A0FCD -650BA0E6 -5C877E20 -ACB5DAE4 -5E56E78C -1AE6F2A -705046AF -7F53EEE7 -AAB30590 -2A1BD5B6 -300A6D8F -FECD64C6 -A8FF2EC9 -27B583C1 -29CAE718 -66D59871 -16E8C79F -14D20B3B -446862AA -1C5EBC93 -3831B437 -556E9FE -B877897C -D6FE7901 -D19ABB8C -964EB757 -D1DAC489 -B60AFF4D -31D01640 -A963359E -E233B856 -58D923CF -EF31455B -EC071BC8 -94F64E2E -F9384093 -36C8A1F -AC4A701F -657CD41F -731CAD58 -374B9753 -EC20E4D1 -E58959AF -E83E1021 -B7C14D53 -A651DDBA -D54BD80B -7291E323 -31310762 -A54A712F -482BD448 -1FC7B562 -EA69143D -4342848D -C4BB4C5F -B0B43A48 -962EF559 -5C395F65 -6C40A83D -AEC344E3 -881E5E3A -42D50FC5 -144B9CA5 -15DE8B4E -AB91DED2 -17FCB1B5 -87804536 -102205D0 -E57C9F29 -5D08E2E1 -A4AA0B4D -4FB1351D -F3BFE5C6 -5C439E04 -33A0A6AB -826A9A49 -D165E206 -229A4A83 -4897797B -396C7F04 -474B2792 -351AD33 -ECCFA3E6 -901B77BB -42B16DDA -FB3F707C -C6816341 -CE19D1AD -8297E119 -4458AB5 -FD9CA7B6 -250517BA -2E23BFF5 -F0D1C983 -699A7882 -557EB3B1 -D0D5822D -D1117539 -F271C507 -9364161D -6793E35B -8AF902C6 -DA5443B8 -EE1E1A0 -B941E448 -DE0E773A -4A41AF87 -D4AA88C2 -80B09F9E -53F2B381 -1C8EA42E -3D15C64F -93FE9251 -B242B629 -F7ED2942 -6AAE674C -EBF19F56 -E299D4A8 -4F22DB1F -20998388 -4742F182 -F6626B60 -992FB48A -26822FD4 -784D31DD -B84CAF35 -B8163E9E -2A27EE0C -FF09CF79 -81C74BBE -C914DAC2 -E768AAF6 -FFA5171 -CA93E6BF -E495891A -482A252B -18F8FD7D -DE52E34B -A4986019 -E363E1CB -EAF53373 -59FEDE9F -2FAEAEB6 -DCE56F6D -F10257B2 -7609DFE6 -4D0D263A -12696B9B -A56E0541 -8F12E1B7 -9E8E5761 -98C5816A -F2F8EFA5 -B91C1CF3 -59A19F9B -9235B967 -A58D23DB -71377517 -C50BCDB3 -60D31A7A -874811FA -58A69900 -CD8198EE -E4FA90EE -51352862 -3654B5D6 -B0442DA9 -5BA67D5E -A9B84B57 -FF61069A -21102ABD -8E6B59D -1DBF72C0 -9772AC77 -F26B2827 -E985C97D -CC311683 -E8216C66 -13E346BE -199D0C57 -578B8B90 -84462520 -7B33C9F9 -E18A5CC0 -8F70C75D -B9773D99 -8A8BDCAF -78B8631C -1AA0C9F2 -76FDD536 -8CECE336 -999E6F4F -29EB2768 -3417B854 -A56B87D4 -CA2F016B -69DED6A1 -8AF8128C -27732A2E -654939F8 -F0DE0291 -501F84CA -815055FE -99B595F6 -627F49E7 -2A7BE8CB -959032DB -7FD03C7E -54ADDCA0 -62EB2DA4 -6E458899 -2FE00E32 -B2E74808 -35803F87 -7369F52B -1586B4DD -61B61CC6 -1BDD1B8F -C6BAFAF5 -C4339DA2 -E1D3A0DC -8AD49CC3 -673B67FD -D81B434E -A41C5AA6 -BED70576 -22877C0D -71A3DC2A -FDE1F4AB -4FA1751E -DADBAFB0 -1C44975B -76EE876B -E3B81546 -86466730 -6A3F403E -255A72F8 -2D2AAE1D -77717644 -63E003E8 -40CDF1FA -FF37E1B5 -F0FC3CCA -45BE9807 -D8611D58 -D62AB82 -EE875225 -B8149434 -FFD0F0EB -2F3699E6 -7EBD4BFA -3E393CC6 -39777EAC -FE2A33EF -9AECBEB3 -322B14DC -DA2EB056 -1C942882 -C42C7C32 -A20E0D02 -E91D2834 -D465D9D1 -FC60192C -D3B7FCA1 -1E9B03FA -40323FF4 -DFA3D47B -2C26930E -391E6E18 -E340B164 -36FD76AB -204B0D9D -5F5027DD -FB05E9F -33C3443D -ABF1832A -152FEBC6 -FD83B071 -310222F3 -E07F3402 -61818FE6 -6E14F915 -F89FE609 -86FC4F17 -C860D97A -51B0EF08 -779B9BA3 -6D9C0908 -D14ED3D6 -692E8084 -233DEE29 -B85FF171 -12FAD29A -D37B7593 -AEDD969F -8E76CAF6 -A7FDDB58 -B5B7DFEF -A8881968 -50D65153 -D57A8EEC -7D144C49 -99B10DC -5660CCA2 -C02A1001 -7EE499CE -8C281511 -8B43EDB4 -31E58C4 -E9EAB787 -48BD8C20 -87C33E72 -9FD28F45 -9D8374B3 -3AEBB8FE -D25F7E5E -65B705F8 -ACB7BA8A -C7CE28F4 -1A365014 -12997929 -BAC3250 -3DA4DE9C -D90B5C3B -731BC23E -F952A129 -E5FECF74 -26D6A0 -B61C74A2 -B18937FA -E034B86 -6B3E73E1 -FC5891FE -E6F5F72B -BE380D96 -DB6DA2C1 -8BCAC0F9 -FCE57C36 -10230AAB -8E0B6278 -962C5A14 -4C257AA0 -95B50454 -478B67C6 -4BB1F24A -9DE453A7 -241965D7 -DE5E4EEB -77BCEB46 -A87FC004 -4EF35145 -35910ECD -8900342B -C9A653E2 -9AA2501F -DD4D16E8 -A2340ACF -F846821 -9A2A16D3 -33BF35C8 -185C4C5E -9A3A7865 -6CA5232C -8A93214E -8F9C13E3 -CF212018 -777D973A -3531924D -DAEBD9FA -4C4BA7D1 -C6DD4E96 -72F0CF35 -AD82F177 -B8486F78 -C89FE003 -991E4764 -F49CB023 -14C3A164 -B6B2733F -F78D6623 -F1C9D84E -6CE9487C -68F59E42 -B13A9862 -A60DF7FC -5680C3EE -8DBB03F3 -FE660987 -7F302425 -98915B -3EFAFEFE -819E3A26 -CF086D8 -EDDF6ADF -314D6342 -C7DC4A97 -231D9E12 -C8F0BB37 -E2A20026 -A9539B54 -E2047DA5 -3E5C9D4E -F91C18A5 -37B1EDB1 -DE88277F -765DEA9D -555D803F -6FAD1516 -41299623 -66D3E9F -B040E22F -28C55A65 -F5BBEB1 -8F85CC9 -C1F1FCFB -E0ACADA -FD138889 -F4E18B1B -6EAD0B49 -38441326 -17AEF5F -5A6EF970 -20ED5B3A -46A95C2B -CA7475C8 -8FA66C0 -3F831698 -E2C27DCC -7AB6C35D -9D979A50 -27F30FC -4FA19438 -321E637C -AD72B955 -C7BE128E -A428B5EC -48817E5 -7EBF668C -8DCEC036 -272C5582 -F8175767 -6ED7A880 -71E2497F -6EE3595D -D2579856 -15439021 -87C91FDA -A5682821 -E3FC8D77 -1545F959 -6341300 -D52520B7 -B0A0FAE6 -6F1C6BFB -226DE897 -4449D2DD -7E378981 -55A93F85 -91BFE157 -434EAE2F -AEC8DFBE -929F369C -DF654EA5 -CC2D5431 -152C1E93 -D800D93B -1969CB8D -46776BE7 -DF3D435C -2CD82C1F -241528BB -88B41461 -19463B47 -CD61AE6F -3C5DFE3 -8053B926 -5D0C9D00 -75240C8 -53A9DCF1 -B217E766 -616C0F89 -E73E36F5 -1E3E0BC3 -B6C474CC -9AFE8273 -AAA496CA -E9770A12 -9C3E2617 -3CB73C1B -2065FF5C -3A2B3E59 -280EF886 -B6A728CC -DDEE48DC -BE40F70 -449577CF -E5D72358 -5648EE48 -F6B9BB34 -F8E354C -84895AB6 -95DA9283 -882AF6A3 -4FBA089C -D27070D7 -17784421 -DDEBCE6E -4E6A43B3 -82AE90D7 -1A524C8F -D1C0C339 -993FA3FB -52CCA574 -523FF9E9 -764B2F69 -621F0749 -5C95BE3E -F2A36CAD -5C92ADE4 -F4238C46 -BDD0079D -CAE6D9F9 -5F3D1307 -9345998 -22C3C499 -631B8B0 -A6B9A88B -471749A7 -6BCD27C8 -5D371C05 -57081397 -F6CEF315 -1BACE19 -B7BF405 -5B6DD011 -BC74DA95 -781349E -F22A975C -72A5A101 -27BB6AED -933B9126 -14FBE3BB -50D095D9 -1CC937B1 -22CBC28 -1A6135EE -197E93EE -26A1CB1B -79BCF079 -A0134157 -9F232A75 -818BB26B -B2339659 -911E36A8 -AF2F9282 -347C34E8 -6255FF5B -1BB79854 -9A16AE8C -2A3D9B7D -93795FED -8284A6D4 -E58090F9 -A36C45A3 -F8065618 -4122FC06 -6F4DC90B -5336936D -F4E4BEDF -7A885091 -E19CB61D -9D398B7E -C9C4AF2D -A1C076FC -BF60AE9B -CBF56B80 -11038EE3 -4B78AA1C -59C72649 -D687CF08 -B182CC2E -43E4B13A -83126FE9 -EB042718 -627C8807 -47474E59 -3D317A4 -33919B88 -E00CD1A3 -3CC1F4AF -2E91597C -CDDAF2BE -3D3A18D6 -5BD6E47E -3D6A5286 -456410A0 -2B51CF4E -B55046FA -FA43946F -F90AC852 -A064AFA3 -F84235C4 -D316F3D2 -1BB0D769 -46905EBA -255EE03A -EB4D2C17 -6AFFB5CF -D755618F -ABECFB93 -594CBE9A -362C1B5 -ADFAAF67 -ECF2110C -E86FA43A -C789EFB4 -D9FDCC95 -F81FFEBB -C239F63C -16BBBF2F -B1AFC20E -B00BCEFB -D6B41A49 -A5856CBF -E2753B3C -8C03166E -537BA621 -B268C813 -C1B8E5B7 -1FCDD47C -BB257FF0 -37B89618 -6AD0F548 -C5EB6B1 -482EAE33 -1F898EA -C161076A -8112502F -77D0C22B -B1EF60B9 -D8122593 -D0ED144 -A258567E -7FCB11B8 -FC01313B -8A39DE11 -B9612887 -FAF9C5E9 -AFB24528 -C51F261D -15A83256 -E560FDB -5749D494 -61C88749 -F7C9978C -41583770 -73AF53AF -EDB828F7 -5B9A931F -B33EEF56 -3ED0DC67 -915BF5B -CD090180 -3659A346 -E09A572 -B0EB23 -F35F97ED -8708879A -E3761150 -FBCA868 -8EE5D700 -67931F7B -E3819B8F -FA9DD938 -3C3DD434 -FB62C866 -9D6A734E -2BE14923 -7ED6D7BE -423CF38D -CC4C4156 -898F3254 -405B1D62 -25995FCB -C062465 -12471B35 -6DB351F2 -5F23ABC5 -49EF7D2C -91B401B3 -85DE49E0 -81D81230 -9824E09D -767C5312 -E0744F5 -D99A77B9 -7657BA4F -46CA1289 -5D2AEFAC -ECDA74CB -DBA899D3 -AFC6E7B2 -DA79D8BB -F6508AA8 -6D0E5BF -76DD66F3 -DAA00B8F -C7EB98CF -65189199 -FC2F2235 -4F19D2CD -48D4E497 -67A7643D -777B5F1E -2F089D44 -4E841850 -2D371993 -B3ADA2E9 -421A44E9 -1D470C4D -81DA8998 -71D42D8D -E5F09965 -24BDEA19 -F8FB47FE -1CA01D53 -52A53F9B -B13279A7 -840C17AF -F27507D8 -36AA55D1 -29616808 -E5C25388 -404F7A96 -AF6CAD43 -AA2A8D86 -6D0D5DE5 -B60B5047 -F904AAE0 -9BCCB969 -73FFDDAF -AEC2E379 -DDC3B6E3 -85273FF -4F23EA7 -F1048821 -432CA7F7 -FEEFB49D -2749D00 -F0914942 -878203C4 -AB657B2F -FF754E6E -2A1B63BB -2B094F6C -8DD98DF4 -7E8810E3 -D17A81B6 -BF297F6D -FAE3391B -B28655B9 -2B4507BB -702B2563 -FFC8858A -B8DF3A03 -80018970 -4387C2E2 -81246EAC -1201F4B3 -9AF9F9B6 -29F63494 -98A87F7B -C637C322 -BCFB7066 -3505C623 -10BE77F4 -BE44797A -2EF31DB -C8DB4396 -FA7C2378 -AD3C30C3 -C3AEB714 -58183DA -5D961567 -1E42A328 -94430ED5 -866A3D67 -84B148EA -C823439 -80B57816 -D6395105 -B389CD22 -B574BF88 -F12CE1CF -C5B892E4 -94F6CE69 -9387A05E -C806C5C5 -B2823B0D -64F1253B -DD3B64F8 -4C6980E -BA9825C0 -573D9CE3 -A78DB442 -FB5510FE -C45DE1A4 -66DFA70F -47960901 -68D725DA -ACAE1E6B -60F9360 -8C9D39E -E78D5AE3 -A1A0BB75 -80E4ACAF -A0FD5042 -5E0CBC82 -C0474CF6 -840ADEA6 -6F972DE8 -5D16E0D1 -86688917 -E08A3150 -BB5FB87 -2EE82F9C -62867EB6 -B592C066 -64852270 -7A7634F0 -58C6FA6D -E83506E1 -7DC3ADA6 -E972E4D5 -4877FABF -CB37BA71 -7BD3131E -9CA64901 -C072094E -A28F50EC -CBBE833A -225D213F -D4266D98 -3DA08099 -22481B45 -899C4804 -3A8630B2 -7227F512 -FDA1F80E -E5515F91 -6EECC93B -4611F561 -47AD2CF3 -ED2A807A -D694C082 -6DEB43CE -9DBD4F70 -8C918F0D -28C5219F -EB23A332 -AAAACB21 -9B053C22 -6C5AEEBE -B1941AF2 -DEFAA083 -255DAF18 -B513F3E8 -CDE47DE0 -43DD2231 -71BA21A -AB772E2E -510C581D -93A91FFB -ED683872 -E561882C -C503A74E -E274473E -3F7D95C2 -AD48EE4C -887342AA -F4D0DC01 -68023FEA -F996EC8B -F4E33500 -8191511B -AFE0184C -8A6D392B -EDFEA13A -AC3E90B2 -94E7E8DF -76F491E4 -D45224EF -D32B9CD0 -C7167945 -2D56F7E1 -994E7AAB -65EDCC15 -AEAF497A -BA11EA7A -53D5812F -DF05201B -10A9356 -ADAEF92 -508293CC -B45B1908 -DD8C2367 -A385DBEF -A77E11BF -DE9B1792 -A9FFDB94 -AE48AD8B -E7798E96 -BAAF5B51 -44648397 -80303BBA -FBE848C0 -74F37EC6 -C9C0EE6E -1D80DBC0 -6CA37DEC -995387B6 -BA2D99D0 -D1869967 -39D0BB45 -36E391CD -12D6AB0F -4CB16A65 -8BED7413 -99987FE8 -55BD54E3 -5568C11B -F63606C4 -AC4D0747 -3032CADB -52407898 -C461B987 -1F3C8122 -C7E1B1FA -BC1BF34A -724843D7 -2DAB612E -F5180E4E -67FE89A9 -B7641E8E -185E5197 -5FDD9BA3 -C6AC4D7E -DB020625 -16ED5F8D -5A2DB8DB -58F7DE17 -8231D332 -9977723E -CFF39DC3 -A8B71C3E -3335D9BC -D34AE6FB -31559150 -E6494443 -D6C0C713 -515C9C4F -AA09B03F -EB32806D -981F48D -DAB324BE -33EDC165 -88011009 -F1120840 -48119894 -137409C1 -7F45314A -DD74A5A7 -C2251ABF -AA45B420 -4ACBA24E -D020B449 -50E55E0F -D78DD382 -F6E82B05 -9957DCE -1410E573 -CA93CF29 -83DBB1D9 -7AD6D5D4 -7921516F -8399BEB7 -DF07D89D -77AB752E -6D6DBA45 -890771BA -E87CBF52 -F90A7590 -78967761 -6617D522 -2EEDE919 -F28BA9E9 -E1E3AA90 -2CBEBEF8 -1D8A37FB -9CE04F02 -680B5A92 -561178BA -A19545D0 -DBDA24E8 -A7863CD1 -F1B829CD -2BCBD34A -B8DFF2A6 -2787D144 -A075B93E -AA7BC361 -B560CBA7 -F8E79316 -417B968B -9FF31C37 -F88ADDD1 -99A6E199 -D3D400B5 -79F33397 -4AF6EA07 -93EC79F3 -F7D9C5B8 -81D7EE3C -2898D7DC -4B8F67DB -D52D0F0B -10766E32 -E228EA2C -54C96B61 -74A99589 -7E60A886 -8FAF588 -634DD09 -1258CA8E -13E40785 -20861E8F -69BF3004 -E91E2BC8 -583A44C3 -36FD8D36 -572B4202 -BE43EB2C -65F871F3 -723C1C02 -65EBEF48 -8DD407C6 -513D6B1B -150993D3 -4C771124 -A18E6FE4 -C46071C8 -D824EA73 -7A54B17A -4AB1E70C -F7D078B5 -A315F9A4 -9A39A8C8 -CD34D2A6 -8CDEF63D -B273EFA6 -E15B8FB4 -BA2A092B -E540DF83 -33A3B82E -13BB16A4 -4AA79F4 -DCF1D80E -65B77A7E -80CB308 -9A407BA2 -D32D62B0 -DB34DA97 -109F323F -4B07538E -40AD97F -A810835D -6637380B -1ED7261B -DA642F4D -309A47D6 -9009C0E9 -7D9D6E1E -580CCE0B -67F92DAA -1936087F -342D9739 -A191FAF4 -2EF56C33 -EAB9AD66 -FB6E4FF8 -E58333E1 -E42B465D -2D61F572 -9FA12447 -848394C4 -599C9E50 -28675899 -8610332C -968735B8 -ACE06F66 -266C841B -8512CA53 -A25D3088 -D55264D0 -AC3678A9 -D1DF668E -5BEBD716 -DE986F08 -17DB60F5 -B88254C7 -BCA0E5B2 -E78B3459 -494B6F35 -5E0408F6 -A8638621 -62C27360 -8D98C864 -37EDB15B -ADC93344 -4197C21 -FEFE1A30 -ACD03EBB -A3A230A3 -45741EE4 -DE86AD8D -CDBB302B -303A5D5D -A42863D5 -9019ADA8 -EB8E036C -A5558A5D -A4D5AF4B -F04E0726 -C5AEA4BE -FCB9BC09 -3FF2E51A -53E510E9 -86FB3D5B -3031BBDC -1294451B -48879312 -972E95C1 -B8B861CE -FD180B55 -F2930D40 -31C5CF76 -8C132827 -CD696B0C -1446B194 -436D712D -9089677B -493A420F -DF82C186 -377516B8 -20ED2C1E -956EA0C3 -D26B4EEF -BFE59283 -B4D36719 -67B01DDD -6F3CA60 -BF6B98D -1B120FBA -7CF4D06 -83091BF6 -7D3F5D85 -D3E48FAD -E3025BBD -CA30F611 -64D1D991 -6A688C9 -D06F9682 -D346BF -E4DC58EB -4C4F7AB5 -9D5CBB9F -5536C074 -CCD9D1E4 -FADD0C6F -769C50EF -A1F0E40D -72EF3FEF -C421D7AC -182D7491 -3FDDA320 -49F136EE -4EFABBAA -7228A4DE -40A616A9 -EA37E4ED -5DADA164 -2F9C5671 -4D3D4CD3 -3A68B35E -7A26619D -11A14309 -D886253C -8F545687 -3666D9FB -131A5557 -9644C9A3 -FCC47DF7 -7CCDF226 -9FCBB958 -9DB97B96 -630B5596 -1B592B4C -2AB5341F -5817D559 -3C0A5FBE -F65E3830 -1D38ABAB -353E9D4 -41647BE0 -63DC6FC7 -CABC6846 -A7B8001D -2C018A1D -435D877E -3E5F838C -9709BC31 -ACA0EA75 -86A06AB -DBB06480 -2A09283F -D3A83953 -90967E13 -D055B4E1 -3365DA22 -E3FFD521 -50205ED7 -E907F5E6 -4D7D054C -C66CA376 -2A72C5C6 -793120B3 -170AC5FD -C4CFDAA2 -21A3CE3A -19F354F0 -FCE7F112 -279C9605 -AA9FBB98 -E269592C -B8E5DE7F -AE0A77D5 -45B4CF97 -6E9EE4C1 -C31F7C62 -D9E8C76C -75925FEC -EE34024B -73FEA2CD -BC601F7D -75776A1F -AC2A0090 -AA6E1956 -64C62B96 -D73C3066 -2F9C7E78 -7F1529BF -5974399A -79D31554 -2D559A9A -458A1BE -A820156A -26764010 -981D62C3 -A5C8534B -F8A5FAE0 -69EA2102 -2F62B77 -2AE14076 -88EB9A0A -36B5EF31 -73E63D55 -D6A15D81 -F5C8A216 -1EEFBC6A -8F16F5B6 -87064008 -7EEAA78F -35A4B04C -AE70F49 -9642CC0B -3199A9B1 -F0E6FE1C -F682DFA -E500C5B1 -AA1132D6 -3B3A2D9F -86C9A21E -BE1422DB -2218AF29 -64512A76 -C4624FF3 -F4E52FE4 -8473989E -269C4193 -B67528F3 -76FD1A6F -ACF6869B -DCEBBBFD -3ED92226 -3FEA0905 -2C4A131E -4CC5DF7B -63E3A62 -988BE035 -BB06A621 -61C2E087 -C2E46B3F -78010D43 -9EC6DFEB -3781CAAF -6D000EA0 -7E952EA8 -2874E849 -FAA54995 -45DB5F56 -8CB1094F -336FA04C -8CCD3F1C -A40704F0 -7AC652EF -83E998AF -8167F5FD -AA7527B6 -543AF979 -F21F16B6 -9A4E00F -1686D0AC -FB0EF404 -EBA9E0F4 -1A9BCC03 -F66D4C53 -4328EB30 -DF52A096 -4A61DDDE -3F19448E -5F3E0EDC -C9FEB2B1 -D8EDCB6 -4EAE672C -47FB8C0A -B4D64E67 -7F5AA323 -38796C27 -3ED30872 -6241EEE1 -AAFD55B6 -F31CA43A -54CE5828 -6D9103FC -665303B -ACD9B1CC -4961E187 -EEDB6D29 -544577B0 -9CC76FDC -718802FC -2EDC02F0 -6735768 -FC351962 -30F3C426 -7BD3050D -4C19A7C -97DC5F3C -720D7F42 -2F735FAA -B067A6FB -4F5EF847 -F500ABE8 -FD9E7B9E -8C37652E -B6189BE1 -BAEF411D -2584FC7F -FEA99C78 -873C71EE -51491598 -8BCC9600 -60A2176C -9D6D9475 -94E1A54E -78124EEF -4DDDA3D5 -DE77F79C -67E3A57B -1E75B5B5 -290C7ADC -30FDC46D -63BDBBD7 -9E61B234 -666593DE -8C7C1E27 -9C723CAF -EF1F2DDE -CA69CD52 -4DE571F3 -A0AD3A46 -902EB90 -D761B7BB -9F209F04 -15B1B5F -5C389CFF -B736B159 -97994EC -A2DBE074 -353360C5 -19E771B -94A72285 -2F4706A0 -64CC6476 -627BE8B7 -90FE94EA -7D02778 -2EEDEFD1 -9A5EF7C -E7B7B437 -F21A3517 -F33DF1F0 -7A865164 -4BFE70A7 -88A8B45C -C0D320E2 -E93442D3 -AA086067 -11B873ED -1BE002FE -2E799A3 -2AACAAA0 -EB1A91C7 -9FA88D6D -4D956843 -75FB8348 -1584A0EB -4C9D1E1A -413548BF -FA0CF448 -90D1256 -BEB74BF9 -EE7C6510 -765277BA -A6081E2D -E616DE16 -EDFB0495 -12EDC382 -DA64FCA3 -E258DCC3 -92E0B54B -B41B389A -D818F160 -F8F1A55D -17916C31 -DBC21683 -3272DA3 -931C08B3 -9F8EA606 -232CB0D7 -EC870992 -B5F586AB -3ECEF68A -BF7BE567 -2C009224 -C2BE6397 -90EE0A64 -FC3E6BC3 -F1190F98 -1D05D7F8 -52AA90F8 -FF7C45B0 -7F5579FE -6609C7B -9B56CD69 -4A6830B1 -ECF9E86F -62331FA4 -294B7FAB -DC7DFBA7 -4DFA98F8 -CA6447C5 -B0416FDF -5FAD4523 -BBBEA8BD -47DA6D1D -FB598321 -E4A1EBBB -DD0CD41D -77FC8F60 -E4D74C7F -E4B2B064 -52EF568C -91E87E37 -FAF6069 -6E28131E -4D39B103 -59A3C4EC -3AA49C6E -D90E743 -44FC3B9A -7D181041 -AD89A0E7 -616A565F -129B06C1 -907298A -5E98085E -9648A06 -4FE2BFCA -F73FCCCC -62DC849B -BB543EC0 -EF301310 -9801EC66 -43557EE0 -2C382E49 -5151FB5C -3C1DCC5B -DD1C153B -77B3F30 -FDE0F3E1 -C967E75E -D5C68278 -6CC1FA37 -A3FED046 -5DE77F4E -FB7F40F6 -2C9191BB -D089B672 -1E9C6BAC -756468C2 -13352B81 -D2CC73C6 -55B4D4BD -8D6BD8F4 -65F7C5C0 -34A629D9 -79424449 -1CE03FD7 -451FC3D3 -255B39FA -F5F01286 -D1623E81 -4B33EB3D -CB2326EC -9C1189DE -1ED995BA -1298FE00 -A5FDB07F -D80D48D -575374E6 -3664F373 -5ED3FE -2171B235 -413BEA38 -FD67D4A -34F10135 -F4544A59 -16BA37D6 -649879DE -EE8D839B -A545FEF1 -4573F79 -D53FE034 -F4418DBF -92181012 -FB81741F -376DF3DE -19763A21 -47FB6EB7 -7F997F6A -CB94D301 -36461AC2 -A3C2378C -2541AE5 -67D92471 -EC619D04 -3BE21ECC -A441FB3D -A19F0955 -39492084 -6C680626 -C8D37B17 -68B215A0 -8B3846B1 -9B21F1DE -8021097 -EBCC81B2 -E9310566 -AD50FB31 -AF65F01B -739CBC38 -35573201 -F7F58733 -4015ACA -6AA65104 -33202FD0 -B5B1AE8B -C1C66F1C -8BA3BEC9 -E55A2ED0 -49ABBD4B -42DD0652 -A936340A -8EE63409 -5C64BE2D -4D47E9F -745994DC -7CCF78A6 -516C7BF5 -395F9C6 -58E11E54 -73EAA341 -E2D4631A -C3552D0F -4CF36F47 -3FE7034B -EEFCB8C6 -8219943B -E800BB09 -55544B91 -A3292FE8 -89BC5746 -F63B4EE1 -E866DAF9 -E99B2D4B -BB57E938 -34FB7E1A -EBB559C1 -24838BA -48075561 -9E621607 -998E5D98 -DFCF97D6 -2ECF6FC5 -15EE774F -C3E53B77 -8EF5F879 -763B1F55 -5C90BD9 -267E7FCE -625E8032 -F12724C8 -635FC29F -36AF3D44 -B7D2299C -6E8F0DBE -A76006D5 -723C72E0 -ECA467C2 -5C7DFAD4 -23AC163E -F306D785 -67972062 -57D31D2C -4038D82E -D21756BD -257A9123 -BE96CEDC -917019D1 -362C4F33 -2A305FAF -D4389CC3 -4C435238 -D68F1F0C -372B2979 -A7D6B646 -53A2E4C2 -19E556E -62D716A7 -64918481 -4D3AA8F0 -BA8C6B54 -2468C102 -499AD5B3 -81AE28CD -42E94077 -C969675A -341B58FE -41159415 -ADE3FA94 -FF5F42BA -379C83ED -A7E678F -C2D60CBB -CC75230C -A12B9169 -9CF6EE67 -2DD905D3 -EACCF580 -367F9A41 -477BB16D -8438B576 -756D14EF -980599BD -C181C6AD -99A3EF95 -151D4F12 -CD85DFB7 -695F12C9 -4CF48772 -CB00E50D -B9E2AF4C -97EC19E3 -54810B59 -EC4F2D89 -ED77DA60 -19451088 -D5A52E95 -F6FAA3D3 -F2458DDF -D5AB6D8 -D4042924 -AEBEC90 -505DB6D0 -52505B2A -ED9CB8B3 -DB06312E -C508C5AF -4279ED2F -5C72A874 -15E22E84 -54E967EE -80A13FE3 -EE346264 -3569BCA7 -9AA9263B -2BEC95EA -966F3368 -B74F6A2B -25ADEA56 -30A1BCE9 -71EE7AB3 -74807D9C -E4C0D662 -A62305A1 -6B9FB6F0 -C2CAB758 -E3FA413E -5266648 -754C0A13 -C4FD0D47 -BEFA676C -786AFDA7 -297AA674 -F2895DA0 -72A98C20 -A662B307 -54DFB586 -8147050E -CF7C5819 -760EC4AA -F011339D -2D496BE5 -6FD43E03 -1DFD893E -814ADCDF -B7C38DCA -2149763D -EB58B9BA -9F1B81B2 -94C15E0C -5A9923B7 -6C4E0E11 -C63C3D44 -BF9AA840 -1A3E83C5 -B81CEED7 -7E9FD999 -C1A15CFF -B28F657F -287D5990 -8DB5B01E -E241144B -EB0EA64E -884A8775 -99F5DBEA -3DBB21D6 -CC9472CE -B932014E -22A35325 -7B22DCF6 -882BB2C3 -B47CDAE -28767633 -ED17CB12 -6302A17F -25D91C08 -4D61BFB6 -FA240AD0 -E9DBF560 -F0E9AD0E -835C152D -61E5F126 -C176F8FB -B793DC1C -622E04B -D9FB6072 -60124DA7 -8BEA323D -6C496459 -FBE1E578 -F1C73C9E -6A7C4C58 -43F1DB50 -E9BF93AC -B7DC5C72 -2E68083B -F3DE081F -AAA39D71 -73406424 -B99D0139 -E4FB0C67 -142AB82D -3312CC57 -7A3BEDB7 -6B6E42D2 -F8330EA0 -2FE05DA6 -3E6BB118 -3C73E09 -5FDB1471 -6A226A31 -88792727 -78708ED3 -7A095177 -9CCAD23E -C3B75180 -226F8D4C -46DD1DBE -D799BE11 -1F852432 -7361585D -97380EF8 -4F1A8127 -2EB7A73C -35B892A7 -933075A1 -2B6D3BEB -BCDCA6F1 -E9409A22 -3A8E5575 -E37AE0CA -97C2866C -BA575BC0 -C16049A3 -79FED5B1 -6356E153 -98789BE6 -47B95292 -FBDEC30C -2275A4D -632C436D -FDCBB3FE -4E0ACB8D -36A77186 -593FDA25 -D9B74A5D -18021557 -3919EF9B -DDD00927 -B0C6DFEE -F761C0C7 -886DBB5 -807A21DF -778F06D1 -27A67D08 -2CBBD43E -2696EC44 -1F916066 -DE884377 -1472CADD -F30A91AE -89C35DEC -84E5487E -792613D4 -1E59B1A9 -B18BF896 -8D7034AC -A144CE10 -F2FFC2AD -2F5FBA7D -FFEDDB97 -7C506BFD -85B811DE -CC3AD4C0 -B6CC2F1 -BFD63C90 -281E81D7 -89E82B39 -E5371DE9 -5BB68ED3 -3DA62382 -3C8CBB1D -4BE92297 -878783A4 -F925E76B -77DE554E -7EB5914E -9B3F869E -F47FA82D -23E861F2 -19E38BDE -C26E5CA7 -317C9C64 -B96B12FC -F6EB43AE -F979DCAE -DD5BE081 -5B11401 -3C4A8866 -38C6F309 -2FE6DD71 -84E2BDC8 -2FA36F63 -F0D171C -8AAD8CA5 -92D5E506 -D4CF4E62 -82DFFC21 -2C686264 -CDDA9A2B -98CF101 -847DC151 -C0FEC6AC -A1638360 -DD36C966 -A6A8635A -F700C63D -48377DC5 -138CB9D1 -857331B5 -4844609F -E29224CA -A5079F42 -3B39EA92 -F020BFFE -4859CF8E -7C1B1E1E -DD95482D -24C31760 -3555FB83 -B1D20BED -403E6587 -D04E4309 -74F63A1 -EAFDC6CD -781795C6 -BA9A1FD1 -60F61FF3 -B93EE92A -7BCCFCDF -477FB17A -B508142D -D2BC8CD8 -F11D8200 -24A8149A -8F00F213 -3822F374 -E37B6219 -4727F504 -12CD7551 -5FD2779 -E8EC01F6 -29CE5CE4 -1EDDBCF9 -69AFBC0F -11B3CB87 -E39AE82B -E66CDCBF -6824DB75 -7183BE54 -12A11956 -ADA59196 -437E5E61 -F1A7F4A1 -671FDE0A -9202817E -33ABACB2 -B0705AB1 -39952407 -D3672EB1 -A03BD94B -B46D2252 -1DC47573 -EE4C78D4 -B6E4D8E0 -12C2206A -5656E1EE -4D9D4988 -35E36416 -3AC9C8F2 -2161B02C -1B5A8615 -62587331 -CC4036C -EACDCEC6 -F40C98DC -9C8FFDE9 -D87FB3C0 -C55AABE7 -1BE31E0B -C0796911 -C08C311 -E41B196D -E4FFB7A3 -2483C766 -FD348C63 -F294631A -7B74B50A -D6416CD9 -66559F6C -A7CE68E0 -ACD88C63 -BB49939B -7987A018 -E1797428 -CE39ECE8 -D7B3DA7 -8F2A3F0C -37E3C72E -21F1A24E -57AFCEF2 -AB8CF2 -15B5A4E9 -94094315 -29C3AEB6 -A56B4233 -6D57E64E -3A7399D2 -103AE960 -8B93E67E -D5193079 -767DA47D -88AEDE6F -ABCFBF34 -2650782C -7A716475 -C86C9BBA -4423420D -3AF8FD02 -72E202EE -5A264F7B -4E103072 -4DA5A0E0 -59319F97 -B54F9AC -556DF0B3 -ABAD7DC0 -2A715C13 -9D443D0F -54BDC92C -1EC2B967 -80BE3AC2 -FA646E8A -2EE396F1 -8B0315E8 -9F52B6E -DAD30422 -2E9B6CDB -8686D47A -5D9DB3C7 -717E799B -20A4D4E5 -C2DC8AE4 -F630FADD -8C7DF047 -65F4928C -BE66D11E -6004484D -C1B509AB -FAA4C75F -B3D272A0 -7FE6F083 -A54B6584 -FC3292F -4D27DDFC -A1ABC224 -872FED55 -D235AEC -27ED8546 -1B170B2A -CE9E5C0 -2267B02 -285992BD -F855CC8 -8FFB1F6F -C7BDDF81 -349B4F5F -B9B28843 -D5D532A0 -8FD7BE3C -2DB04DE8 -C7D0C2FD -B6822987 -1FE0710D -8EADA490 -A03F99CF -F3E7F902 -F56CCCA3 -CED5B6BF -D6B3DC0D -92AA9FE8 -351208D -A1C9623B -5802547D -3480D77C -404D4E65 -679025BA -905FF962 -B7130CA8 -5AFA9CFE -2A654EFC -26218A8 -473A88A -5E3534CC -771FF1E1 -EADD6296 -DF7157B3 -D48E42E8 -3D6E848B -29CD6C -68732656 -A6C6D52A -B50279FF -705B645 -6DF7F119 -34152606 -72948D92 -18BEE72 -36BE21E3 -C34FD53A -9765DFF -E5C9B4AF -4604B155 -DEAC2388 -7841FE0C -2E275885 -3EE65330 -EB66439B -FF4AB5DE -67EDA5EA -BB722F57 -6A645B7 -DE9DD302 -5AC7601D -371B5D5B -42BAC84D -21C7AA9E -F4ECBE94 -554C8B8A -B7C8BB88 -4C77DB1D -D4D8F3AC -DAB292E5 -85D906E8 -47785703 -9CEE88D4 -7DB86DB7 -694B5A34 -DE77B361 -E8DE3CB9 -315EC35A -A71943BC -C297B8CA -55EA528C -A11AF15D -1490835E -19DA117B -403B0CC3 -FF7DE389 -ED6C22E8 -6F8A8782 -7BF2BA9B -6C95F5DF -F8270769 -AB421268 -F06B05EB -8FF7DE5F -F2AB2FCD -A5EDD602 -31F05712 -3C269177 -67D92F11 -38D8D3C5 -2047013B -8E8BA724 -EB6A773 -5AF14AD1 -49910D46 -C9D6F784 -B44B09CF -1AEA48EF -2F12BD47 -10E3F7C9 -39EA8108 -B88ADC9 -19DAC1B4 -554908DC -587A0A7E -109D1E5B -1920E3CF -BC49C914 -C1EB74A7 -A5E9A494 -5FA5B8C9 -320673C2 -CE643004 -720E4075 -FDFED2FE -89C22F8E -40887408 -3235FF6B -A906F59D -F6F98F12 -7122ECA4 -4CDFCB42 -391F2365 -53AE3667 -6CCCE2E2 -44877A8A -92561CAB -DA5DE0E7 -73B898D6 -2E37229E -ABAAED3C -21087331 -58C85412 -8BB37690 -1256467F -6EE9FAF7 -DB0895D6 -954EF968 -1C7693BC -5786650F -7D441E12 -10AA9174 -492C6A3B -34374CC9 -98E59E7C -5B7BD4E0 -D1124C9F -B5B3362F -8ECC58C7 -8EB0E23E -72991400 -13DF853B -789E8DFE -D85E60DC -A168D4D -C3B6FA3A -11443EE2 -F63F9FDD -1A14A7A5 -5EEBFD5 -B24D582D -AEA8F125 -4AA038EE -5F6A1A16 -CBADD812 -340605AA -8BD8F6E9 -B85F3A6A -A585AE8C -6D12D2B3 -17C97329 -DBB835B9 -789C3DF4 -E048D462 -BECE080A -506DE5CA -63C4FA5C -7C2D8103 -689A3516 -B218BADF -8B7F0BDE -85B17891 -8888A9C6 -3DFC9FA8 -5F2859CD -FF72AE34 -9EA3FFCA -CF2194D2 -53B56E7F -C7009619 -B127FD51 -3A513DF0 -E9147D4B -2FDF3C37 -22FA1629 -61480015 -57EE267A -EE04DA43 -EB2D289C -2C102144 -B012EED -B1B339C8 -AC1EA89 -3A4420D0 -5623907B -B0613D35 -A70F1B2C -589E3EA7 -F998AB7D -9566E921 -B133DB2D -A3106F6A -EFB4518 -6AA3FB8F -C505C8DF -65032E33 -6D3942DF -333553CC -BF392E2 -6C77F980 -39211AFC -9E0B71C9 -A3BB7123 -7CE16B9A -F15BB634 -BD68DE3E -77BB27AB -BB72659C -BFA916CA -7022CF20 -EA64C93D -B61C32CC -20201879 -148DDADC -58977 -8D5CC2E6 -76E678BD -5655B362 -587EAB4A -599E3DCF -7B470038 -E87E82DB -9088EC5E -ED9F9E4C -3DD98E27 -5AFA5052 -3DF313C4 -BB22A60D -44D97BDA -601409F3 -CD1D3CFE -7EAE52D0 -41ABBAA0 -A1D7C883 -FFE2B4C9 -13717374 -9DD27EC8 -29301EF0 -87953D6C -9309161C -C91DFE7C -DD5EC452 -F6C27DF2 -43B433FD -6D16B93F -92F09DBA -ABB598EF -B49A721A -3A03EE56 -3177D3AF -5D24FD94 -FEF88FB2 -52B3170F -64264DCC -18B683B7 -6B21935F -901A396C -4601FB55 -51F2547E -DD37C23B -35E6B3DF -31ABC979 -C7223449 -ABCA9CFB -A8F57AFA -A097240 -78704130 -7F1D7661 -456C2409 -63E31F62 -FD0D4BB1 -97FCC39 -951A7C93 -893165C9 -E86163CC -25F5694C -8890910A -43F3AE36 -55D414A1 -1ADDD3BA -C7EDFDDF -5A8607BA -219D3208 -27BD79E2 -2E9EA4B8 -5D8F951A -F9E880D5 -B2C7612A -862CCCF3 -7EDC71AC -1B6EA644 -EC3AA9A0 -970224FD -6C0DD16A -C589D1B6 -71AC91EE -C75B0206 -50232786 -316AAD4D -F4D5A31B -E30CCF43 -BD72BEAD -26DE4F8F -56E97741 -9243E978 -F7E2363D -BAE2CF31 -6367CFB1 -B72ED4E6 -75216393 -4626E74F -61194364 -8D6726A8 -458611B8 -1B536E4D -837AAD1F -F5A226D8 -8BB37701 -31F19003 -8E48DEEE -9DA11E9 -3BBB5BB4 -C6F15B5D -1A53A4EB -69AADAB -4FAE6295 -F0943601 -A449516E -BF7EE395 -176B1370 -F55873EE -553FEEF0 -9F3AB09 -2539B92E -F6803BC -BAA192FB -DBB0AD5A -B9C5415 -F92D0588 -88B9E738 -A033C767 -A1CA1EFF -5AC07200 -AC60C03D -17FE20F9 -B898B9AC -51AF425E -2706FC42 -F2A258E7 -353652D7 -CF3F89EE -63A13050 -5E6A7997 -153FD92F -1D0E8614 -6E504447 -5AAEC133 -9B6E5499 -64D5EAE6 -A29CFBAB -52B44B68 -8DC7C01A -704EB2F1 -395F1F7 -7D897418 -2FC66846 -ECCE81AE -21CD8E31 -B2EFA3D4 -16C4CD41 -D6A21ED0 -944897F9 -F495D730 -B4317C3C -8C074582 -22F6A9D9 -CE4425FB -FB08BCBA -DF07A006 -293AD5BA -BD224A44 -9DA6701B -DAB46DE4 -9F88773B -57CC02C7 -7A6B68E4 -55A54D48 -BCFC1C53 -DF64F920 -A9FE6014 -4C64DB55 -5FE9345F -412A1E48 -45D41945 -23B44D08 -8D5563A2 -26E5E437 -CECDF4D0 -1BE55025 -84329F92 -37C97F8F -C3CDE976 -580955A -C79E1131 -C5BC58E7 -7D14509B -3DE94089 -1B78FE71 -49A0ECD9 -501D09B1 -F30135CD -B0FA41B4 -33B11313 -32AB01B -635EBA76 -666D7FE5 -68CCC93 -59B0ADA5 -B305CBAA -1C553509 -5E564F7C -F057084C -52811FC8 -987465B2 -461DA750 -F0C471BB -3C9D3E64 -73C920AF -355A26B9 -3A1FDD13 -CEA3F7DD -66C0687 -1319291 -9045182D -174C724D -2A491012 -BA53519F -A62B41D8 -F6E1559E -25F93E6F -2A40C5F4 -C63D1AC2 -82598002 -2B81101A -63442848 -3788BB2D -74DDC016 -214CE0F4 -9CBAA8BD -9288E1AC -EF76E528 -719E7BAE -BD579EF6 -4E6B0C62 -6285F757 -9049BDA3 -80BFE3C1 -4344B7A7 -4552F1DD -DE2C0DAC -86346BE2 -A0A897E7 -1797D93 -6CF3C7F0 -7592D9E7 -CFB46F1E -17D6FF93 -87FF1727 -198FC755 -303540EF -78C07416 -46CB391E -8D441653 -3724DA3C -860D4DDF -A99F046E -4B167D86 -E2AFCBE9 -6608F2D2 -4E49A130 -3C64B760 -958BCEB3 -8C784B24 -5E07EF07 -7E6CAC6A -B69765D8 -65897B6D -60A8FB7D -6706E0E1 -142E4310 -15C4944C -F6A075AD -3CF66DF8 -CE1EFE72 -D6495864 -2BDEFA6B -9E511045 -F2E2E9A7 -B71B03EB -15DD8D69 -65E5A555 -52C644AE -301A8F69 -35075232 -17ADE8C4 -A2C808CC -F1A4C57B -D6EE3EF3 -85942F72 -26011F23 -D4211E97 -595E1A12 -6886CE0 -FBD6F396 -D10BD980 -6615476D -4662EB8F -F80BE955 -93A6E68E -4C3D4CAA -5838D0CB -756FB6E4 -F0BC8312 -EB89BE83 -D34E119E -34F860EC -F371DC73 -BB166E0D -CE86AF89 -C177E633 -A19C1D9B -B1DCBF1B -D7310057 -2452939E -120A830 -F92A9928 -64877B92 -3D69A585 -178187B6 -146C0495 -9A3D8886 -C79478AD -9A429976 -29795A97 -32BD0034 -1EE08CD -8982284A -ED362AC4 -4A1AC734 -6FD164B3 -422ADEBA -9374B593 -BBFA8568 -1C0B26A5 -5DF68365 -CFA1D689 -1C9509C2 -1056EAC4 -D492D000 -64076487 -2C1FB65B -9E1DEBC7 -C5AECD05 -39652664 -57A1B9F9 -3652484 -E8CCF72B -CB7EC405 -7DA97E78 -7ACE1B2C -A5DC0B75 -40C14422 -777B17AF -5AA3FEDF -319C2B1C -AB8EEE5F -159D66E5 -3E479D0 -12AF93DE -55EA550A -38853E1F -FB943864 -781FA52E -4FB9C9FA -377D8866 -8411E296 -641D997F -1933684F -27A62DEF -50E15F68 -755BCD7C -5DF3466F -494A937C -8763C6BD -C04B98E0 -E9E067FF -444151AB -C5FC7398 -5EC7D30E -E0610B7E -76CEBB5 -B15D9821 -37B2D1E2 -CC1249BF -3E064388 -246B17B3 -4A342228 -529E849B -F25F250D -31F3E925 -D1112DCA -DA6A8BC9 -2A7789D8 -C0C2C72D -4BB23226 -68166638 -4EC7519F -D559B4B7 -8035E823 -DFB06DE0 -2B4B86 -83D6F12F -84AC7F7B -7139E98B -C42D8AE3 -2992AD9C -E1E24DA1 -838772BD -CA28D517 -3606947F -B9FDFA59 -6C4F8489 -76DBFFD4 -3F0BFDF6 -1B04AD1B -8BA40134 -842A54F6 -621A0DFE -1F3729FC -C53AFEFE -CD5F1E79 -D2C0C70 -30A4FF4F -D384C76 -D73B9B17 -C74DC3F9 -E5ACD113 -901E6D5D -D376A71F -57BA08F9 -17E25669 -F7485021 -BCD1B9C5 -90C1A916 -EEF9DE6E -6AD37907 -40B05A7B -4A56C1D -901093E1 -5424EEE9 -3336300D -8B1767F3 -707A4B23 -37290194 -13A5E016 -C25902C0 -5C04C3AE -B7D84F4D -D57A495F -EE168042 -1584DB78 -7DBFDBD3 -DBE2218D -9EED8CD4 -2A562C0F -C76F7E04 -8FCA82B8 -7211C54F -8E76E82C -9BAF59A6 -C1E7B9CE -28E9E29F -6746FB40 -7841DDA1 -37D07C7 -88A5CF5 -4B0B8A4E diff --git a/finn-rtllib/memstream/sim/memstream_tb.sv b/finn-rtllib/memstream/sim/memstream_tb.sv new file mode 100644 index 0000000000..4b2e850415 --- /dev/null +++ b/finn-rtllib/memstream/sim/memstream_tb.sv @@ -0,0 +1,212 @@ +/** + * Copyright (c) 2023, Xilinx + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of FINN nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @author Thomas B. Preußer + */ + +module memstream_tb; + localparam int unsigned DEPTH = 256; + localparam int unsigned DATA_WIDTH = 32; + + // Global Control + logic clk = 0; + always #5ns clk = !clk; + logic rst; + + // Configuration Interface + logic [31:0] config_address; + logic config_ce; + logic config_we; + logic [DATA_WIDTH-1:0] config_d0; + uwire config_rack; + uwire [DATA_WIDTH-1:0] config_q0; + + // Streamed Output + logic ordy; + uwire ovld; + uwire [DATA_WIDTH-1:0] odat; + + initial begin + config_address = 'x; + config_ce = 0; + config_we = 0; + config_d0 = 'x; + + ordy = 0; + + rst = 1; + repeat(16) @(posedge clk); + rst <= 0; + + // Write Parameters + config_ce <= 1; + config_we <= 1; + for(int unsigned i = 0; i < DEPTH; i++) begin + config_address <= i; + config_d0 <= i; + @(posedge clk); + end + config_address <= 'x; + config_ce <= 0; + config_we <= 0; + config_d0 <= 'x; + + rst <= 1; + @(posedge clk); + rst <= 0; + + // One Round of Stream Read + ordy <= 1; + for(int unsigned i = 0; i < DEPTH; i++) begin + @(posedge clk iff ovld); + assert(odat == i) else begin + $error("Unexpected output: %0d instead of %0d", odat, i); + $stop; + end + end + ordy <= 0; + + // Full Parameter Readback + if(1) begin + automatic logic [DATA_WIDTH-1:0] Q[$] = {}; + + config_ce <= 1; + for(int unsigned i = 0; i < DEPTH; i++) begin + config_address <= i; + @(posedge clk); + Q.push_back(i); + + if(config_rack) begin + automatic logic [DATA_WIDTH-1:0] exp = Q.pop_front(); + assert(config_q0 == exp) else begin + $error("Readback mismatch: %0d instead of %0d", config_q0, exp); + $stop; + end + end + end + config_address <= 'x; + config_ce <= 0; + + while(Q.size) begin + automatic logic [DATA_WIDTH-1:0] exp = Q.pop_front(); + + @(posedge clk iff config_rack); + assert(config_q0 == exp) else begin + $error("Readback mismatch: %0d instead of %0d", config_q0, exp); + $stop; + end + end + end + + repeat(6) @(posedge clk); + + // Another Round of Stream Read + ordy <= 1; + for(int unsigned i = 0; i < DEPTH; i++) begin + @(posedge clk iff ovld); + assert(odat == i) else begin + $error("Unexpected output: %0d instead of %0d", odat, i); + $stop; + end + end + ordy <= 0; + + // A Round of Stream Read with intermittent Read Backs + if(1) begin + automatic logic [DATA_WIDTH-1:0] Q[$] = {}; + + for(int unsigned i = 0; i < DEPTH; i++) begin + do begin + // Randomly delayed Readiness + if($urandom()%5 != 0) ordy <= 1; + + // Issue and Check Random Read Backs + if($urandom()%9 == 0) begin + automatic int unsigned addr = $urandom() % DEPTH; + config_ce <= 1; + config_address <= addr; + Q.push_back(addr); + end + @(posedge clk); + config_ce <= 0; + config_address <= 'x; + + if(config_rack) begin + automatic logic [DATA_WIDTH-1:0] exp = Q.pop_front(); + assert(config_q0 == exp) else begin + $error("Readback mismatch: %0d instead of %0d", config_q0, exp); + $stop; + end + end + + end while(!ovld || !ordy); + ordy <= 0; + + assert(odat == i) else begin + $error("Unexpected output: %0d instead of %0d", odat, i); + $stop; + end + end + + while(Q.size) begin + automatic logic [DATA_WIDTH-1:0] exp = Q.pop_front(); + + @(posedge clk iff config_rack); + assert(config_q0 == exp) else begin + $error("Readback mismatch: %0d instead of %0d", config_q0, exp); + $stop; + end + end + end + ordy <= 0; + + repeat(2) @(posedge clk); + $display("Test completed."); + $finish; + end + + memstream #( + .DEPTH(DEPTH), + .WIDTH(DATA_WIDTH) + ) dut ( + .clk, .rst, + + .config_address, + .config_ce, + .config_we, + .config_d0, + .config_q0, + .config_rack, + + .ordy, + .ovld, + .odat + ); + +endmodule : memstream_tb diff --git a/finn-rtllib/memstream/sim/tb_memstream.v b/finn-rtllib/memstream/sim/tb_memstream.v deleted file mode 100644 index ad3efad5bd..0000000000 --- a/finn-rtllib/memstream/sim/tb_memstream.v +++ /dev/null @@ -1,369 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -`timescale 1ns/10ps - -module tb_memstream; - -//parameters to enable/disable axi-mm, set number of streams, set readmemh for memory, set per-stream offsets in memory, set per-stream widths -parameter CONFIG_EN = 1; -parameter NSTREAMS = 4;//1 up to 6 - -parameter MEM_DEPTH = 9216; -parameter MEM_WIDTH = 32; -parameter MEM_INIT = "./"; -parameter MEM_CHECK = "golden.dat"; - -//widths per stream -parameter STRM0_WIDTH = 32; -parameter STRM1_WIDTH = 32; -parameter STRM2_WIDTH = 32; -parameter STRM3_WIDTH = 32; -parameter STRM4_WIDTH = 1; -parameter STRM5_WIDTH = 1; - -//depths per stream -parameter STRM0_DEPTH = 2304; -parameter STRM1_DEPTH = 2304; -parameter STRM2_DEPTH = 2304; -parameter STRM3_DEPTH = 2304; -parameter STRM4_DEPTH = 1; -parameter STRM5_DEPTH = 1; - -//offsets for each stream -parameter STRM0_OFFSET = 0; -parameter STRM1_OFFSET = 2304; -parameter STRM2_OFFSET = 4608; -parameter STRM3_OFFSET = 6912; -parameter STRM4_OFFSET = 0; -parameter STRM5_OFFSET = 0; - - -reg clk; -reg rst; - -reg [31:0] config_address = 0; -reg config_ce = 0; -reg config_we = 0; -reg [31:0] config_d0 = 0; -wire [31:0] config_q0; - -//multiple wire AXI Streams -reg m_axis_0_afull; -reg m_axis_0_tready; -wire m_axis_0_tvalid; -wire [STRM0_WIDTH-1:0] m_axis_0_tdata; - -reg m_axis_1_afull; -reg m_axis_1_tready; -wire m_axis_1_tvalid; -wire [STRM1_WIDTH-1:0] m_axis_1_tdata; - -reg m_axis_2_afull; -reg m_axis_2_tready; -wire m_axis_2_tvalid; -wire [STRM2_WIDTH-1:0] m_axis_2_tdata; - -reg m_axis_3_afull; -reg m_axis_3_tready; -wire m_axis_3_tvalid; -wire [STRM3_WIDTH-1:0] m_axis_3_tdata; - -reg m_axis_4_afull; -reg m_axis_4_tready; -wire m_axis_4_tvalid; -wire [STRM4_WIDTH-1:0] m_axis_4_tdata; - -reg m_axis_5_afull; -reg m_axis_5_tready; -wire m_axis_5_tvalid; -wire [STRM5_WIDTH-1:0] m_axis_5_tdata; - -reg [MEM_WIDTH-1:0] golden[MEM_DEPTH-1:0]; -integer ptr0, ptr1, ptr2, ptr3, ptr4, ptr5; -integer done = 0; -reg [5:0] rng; - -//clock -initial begin - clk = 0; - forever #5 clk = ~clk; -end - -initial begin - rst = 1; - config_ce = 0; - m_axis_0_afull = 0; - m_axis_1_afull = 0; - m_axis_2_afull = 0; - m_axis_3_afull = 0; - m_axis_4_afull = 0; - m_axis_5_afull = 0; - m_axis_0_tready = 1; - m_axis_1_tready = 1; - m_axis_2_tready = 1; - m_axis_3_tready = 1; - m_axis_4_tready = 1; - m_axis_5_tready = 1; - repeat(100) @(negedge clk); - rst = 0; - #100 - fork - begin - $display("Starting to generate random AFULL"); - while(~done) begin - rng = $random; - m_axis_0_afull = rng[0]; - m_axis_1_afull = rng[1]; - m_axis_2_afull = rng[2]; - m_axis_3_afull = rng[3]; - m_axis_4_afull = rng[4]; - m_axis_5_afull = rng[5]; - @(negedge clk); - end - end - join -end - - -//DUT -memstream -#( - CONFIG_EN, - NSTREAMS, - MEM_DEPTH, - MEM_WIDTH, - MEM_INIT, - - //widths per stream - STRM0_WIDTH, - STRM1_WIDTH, - STRM2_WIDTH, - STRM3_WIDTH, - STRM4_WIDTH, - STRM5_WIDTH, - - //depths per stream - STRM0_DEPTH, - STRM1_DEPTH, - STRM2_DEPTH, - STRM3_DEPTH, - STRM4_DEPTH, - STRM5_DEPTH, - - //offsets for each stream - STRM0_OFFSET, - STRM1_OFFSET, - STRM2_OFFSET, - STRM3_OFFSET, - STRM4_OFFSET, - STRM5_OFFSET -) -dut -( - clk, - ~rst, - - //optional AXI-Lite interface - config_address, - config_ce, - config_we, - config_d0, - config_q0, - - //multiple output AXI Streams - m_axis_0_afull, - m_axis_0_tready, - m_axis_0_tvalid, - m_axis_0_tdata, - - m_axis_1_afull, - m_axis_1_tready, - m_axis_1_tvalid, - m_axis_1_tdata, - - m_axis_2_afull, - m_axis_2_tready, - m_axis_2_tvalid, - m_axis_2_tdata, - - m_axis_3_afull, - m_axis_3_tready, - m_axis_3_tvalid, - m_axis_3_tdata, - - m_axis_4_afull, - m_axis_4_tready, - m_axis_4_tvalid, - m_axis_4_tdata, - - m_axis_5_afull, - m_axis_5_tready, - m_axis_5_tvalid, - m_axis_5_tdata - - -); - -//stream checkers -initial begin - ptr0 = STRM0_OFFSET; - ptr1 = STRM1_OFFSET; - ptr2 = STRM2_OFFSET; - ptr3 = STRM3_OFFSET; - ptr4 = STRM4_OFFSET; - ptr5 = STRM5_OFFSET; - fork - //check stream 0 - begin - $display("Starting stream 0 checker"); - while(~done & (NSTREAMS > 0)) begin - @(negedge clk); - if(m_axis_0_tvalid) begin - if(m_axis_0_tdata != golden[ptr0]) begin - $display("Mismatch on stream 0"); - $stop(); - end - //increment pointer - ptr0 = ptr0 + 1; - //rewind pointer if it's reached end - if(ptr0 == (STRM0_OFFSET + STRM0_DEPTH)) - ptr0 = STRM0_OFFSET; - end - end - end - //check stream 1 - begin - $display("Starting stream 1 checker"); - while(~done & (NSTREAMS > 1)) begin - @(negedge clk); - if(m_axis_1_tvalid) begin - if(m_axis_1_tdata != golden[ptr1]) begin - $display("Mismatch on stream 1"); - $stop(); - end - //increment pointer - ptr1 = ptr1 + 1; - //rewind pointer if it's reached end - if(ptr1 == (STRM1_OFFSET + STRM1_DEPTH)) - ptr1 = STRM1_OFFSET; - end - end - end - - //check stream 2 - begin - $display("Starting stream 2 checker"); - while(~done & (NSTREAMS > 2)) begin - @(negedge clk); - if(m_axis_2_tvalid) begin - if(m_axis_2_tdata != golden[ptr2]) begin - $display("Mismatch on stream 2"); - $stop(); - end - //increment pointer - ptr2 = ptr2 + 1; - //rewind pointer if it's reached end - if(ptr2 == (STRM2_OFFSET + STRM2_DEPTH)) - ptr2 = STRM2_OFFSET; - end - end - end - //check stream 3 - begin - $display("Starting stream 3 checker"); - while(~done & (NSTREAMS > 3)) begin - @(negedge clk); - if(m_axis_3_tvalid) begin - if(m_axis_3_tdata != golden[ptr3]) begin - $display("Mismatch on stream 3"); - $stop(); - end - //increment pointer - ptr3 = ptr3 + 1; - //rewind pointer if it's reached end - if(ptr3 == (STRM3_OFFSET + STRM3_DEPTH)) - ptr3 = STRM3_OFFSET; - end - end - end - //check stream 4 - begin - $display("Starting stream 4 checker"); - while(~done & (NSTREAMS > 4)) begin - @(negedge clk); - if(m_axis_4_tvalid) begin - if(m_axis_4_tdata != golden[ptr4]) begin - $display("Mismatch on stream 4"); - $stop(); - end - //increment pointer - ptr4 = ptr4 + 1; - //rewind pointer if it's reached end - if(ptr4 == (STRM4_OFFSET + STRM4_DEPTH)) - ptr4 = STRM4_OFFSET; - end - end - end - //check stream 5 - begin - $display("Starting stream 5 checker"); - while(~done & (NSTREAMS > 5)) begin - @(negedge clk); - if(m_axis_5_tvalid) begin - if(m_axis_5_tdata != golden[ptr5]) begin - $display("Mismatch on stream 5"); - $stop(); - end - //increment pointer - ptr5 = ptr5 + 1; - //rewind pointer if it's reached end - if(ptr5 == (STRM5_OFFSET + STRM5_DEPTH)) - ptr5 = STRM5_OFFSET; - end - end - end - join -end - -initial begin - done = 0; - $readmemh(MEM_CHECK,golden); -// $dumpfile("wave.vcd"); -// $dumpvars(0,tb_memstream); - @(negedge rst); - #10000000 - $display("Test done!"); - done = 1; - #1000 - $finish(); -end - -endmodule diff --git a/finn-rtllib/memstream/sim/tb_memstream_writes.v b/finn-rtllib/memstream/sim/tb_memstream_writes.v deleted file mode 100644 index c66807454b..0000000000 --- a/finn-rtllib/memstream/sim/tb_memstream_writes.v +++ /dev/null @@ -1,486 +0,0 @@ -/* - Copyright (c) 2020, Xilinx - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of FINN nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -`timescale 1ns/10ps - -module tb_memstream_writes; - -//parameters to enable/disable axi-mm, set number of streams, set readmemh for memory, set per-stream offsets in memory, set per-stream widths -parameter CONFIG_EN = 1; -parameter NSTREAMS = 2;//1 up to 6 - -parameter MEM_DEPTH = 40; -parameter MEM_WIDTH = 70; - -//widths per stream -parameter STRM0_WIDTH = 70; -parameter STRM1_WIDTH = 32; -parameter STRM2_WIDTH = 32; -parameter STRM3_WIDTH = 32; -parameter STRM4_WIDTH = 1; -parameter STRM5_WIDTH = 1; - -//depths per stream -parameter STRM0_DEPTH = 20; -parameter STRM1_DEPTH = 20; -parameter STRM2_DEPTH = 2304; -parameter STRM3_DEPTH = 2304; -parameter STRM4_DEPTH = 1; -parameter STRM5_DEPTH = 1; - -//offsets for each stream -parameter STRM0_OFFSET = 0; -parameter STRM1_OFFSET = 20; -parameter STRM2_OFFSET = 4608; -parameter STRM3_OFFSET = 6912; -parameter STRM4_OFFSET = 0; -parameter STRM5_OFFSET = 0; - - -reg clk; -reg rst; - -wire awready; -reg awvalid; -reg [31:0] awaddr; -reg [2:0] awprot; -//write data -wire wready; -reg wvalid; -reg [31:0] wdata; -reg [3:0] wstrb; -//burst response -reg bready; -wire bvalid; -wire [1:0] bresp; - -//Read channels -//read address -wire arready; -reg arvalid; -reg [31:0] araddr; -reg [2:0] arprot; -//read data -reg rready; -wire rvalid; -wire [1:0] rresp; -wire [31:0] rdata; - -//multiple wire AXI Streams -reg m_axis_0_afull; -reg m_axis_0_tready; -wire m_axis_0_tvalid; -wire [STRM0_WIDTH-1:0] m_axis_0_tdata; - -reg m_axis_1_afull; -reg m_axis_1_tready; -wire m_axis_1_tvalid; -wire [STRM1_WIDTH-1:0] m_axis_1_tdata; - -reg m_axis_2_afull; -reg m_axis_2_tready; -wire m_axis_2_tvalid; -wire [STRM2_WIDTH-1:0] m_axis_2_tdata; - -reg m_axis_3_afull; -reg m_axis_3_tready; -wire m_axis_3_tvalid; -wire [STRM3_WIDTH-1:0] m_axis_3_tdata; - -reg m_axis_4_afull; -reg m_axis_4_tready; -wire m_axis_4_tvalid; -wire [STRM4_WIDTH-1:0] m_axis_4_tdata; - -reg m_axis_5_afull; -reg m_axis_5_tready; -wire m_axis_5_tvalid; -wire [STRM5_WIDTH-1:0] m_axis_5_tdata; - -reg [MEM_WIDTH-1:0] golden[MEM_DEPTH-1:0]; -reg [MEM_WIDTH-1:0] gword; -integer ptr0, ptr1, ptr2, ptr3, ptr4, ptr5; -integer done = 0; -integer i, j; -reg [5:0] rng; - -parameter NFOLDS_PER_WORD = (MEM_WIDTH+31)/32; - -task axi_write; - input [MEM_WIDTH-1:0] data; - input [31:0] adr; - begin - for(j=0; j<(1<<$clog2(NFOLDS_PER_WORD)); j=j+1) begin - @(negedge clk); - awvalid = 1; - wvalid = 1; - wdata = data>>(j*32); - awaddr = (adr*(1<<$clog2(NFOLDS_PER_WORD))+j)*4; - fork - begin - @(posedge awready); - @(posedge clk) awvalid = 0; - end - begin - @(posedge wready); - @(posedge clk) wvalid = 0; - end - join - @(posedge clk); - end - end -endtask - -task axi_read; - input [31:0] adr; - output [MEM_WIDTH-1:0] data; - begin - data = 0; - for(j=0; j 0)) begin - @(negedge clk); - if(m_axis_0_tvalid & m_axis_0_tready) begin - if(m_axis_0_tdata != golden[ptr0]) begin - $display("Mismatch on stream 0"); - $stop(); - end - //increment pointer - ptr0 = ptr0 + 1; - //rewind pointer if it's reached end - if(ptr0 == (STRM0_OFFSET + STRM0_DEPTH)) - ptr0 = STRM0_OFFSET; - end - end - end - //check stream 1 - begin - $display("Starting stream 1 checker"); - while(~done & (NSTREAMS > 1)) begin - @(negedge clk); - if(m_axis_1_tvalid & m_axis_1_tready) begin - if(m_axis_1_tdata != golden[ptr1]) begin - $display("Mismatch on stream 1"); - $stop(); - end - //increment pointer - ptr1 = ptr1 + 1; - //rewind pointer if it's reached end - if(ptr1 == (STRM1_OFFSET + STRM1_DEPTH)) - ptr1 = STRM1_OFFSET; - end - end - end - //check stream 2 - begin - $display("Starting stream 2 checker"); - while(~done & (NSTREAMS > 2)) begin - @(negedge clk); - if(m_axis_2_tvalid & m_axis_2_tready) begin - if(m_axis_2_tdata != golden[ptr2]) begin - $display("Mismatch on stream 2"); - $stop(); - end - //increment pointer - ptr2 = ptr2 + 1; - //rewind pointer if it's reached end - if(ptr2 == (STRM2_OFFSET + STRM2_DEPTH)) - ptr2 = STRM2_OFFSET; - end - end - end - //check stream 3 - begin - $display("Starting stream 3 checker"); - while(~done & (NSTREAMS > 3)) begin - @(negedge clk); - if(m_axis_3_tvalid & m_axis_3_tready) begin - if(m_axis_3_tdata != golden[ptr3]) begin - $display("Mismatch on stream 3"); - $stop(); - end - //increment pointer - ptr3 = ptr3 + 1; - //rewind pointer if it's reached end - if(ptr3 == (STRM3_OFFSET + STRM3_DEPTH)) - ptr3 = STRM3_OFFSET; - end - end - end - //check stream 4 - begin - $display("Starting stream 4 checker"); - while(~done & (NSTREAMS > 4)) begin - @(negedge clk); - if(m_axis_4_tvalid & m_axis_4_tready) begin - if(m_axis_4_tdata != golden[ptr4]) begin - $display("Mismatch on stream 4"); - $stop(); - end - //increment pointer - ptr4 = ptr4 + 1; - //rewind pointer if it's reached end - if(ptr4 == (STRM4_OFFSET + STRM4_DEPTH)) - ptr4 = STRM4_OFFSET; - end - end - end - //check stream 5 - begin - $display("Starting stream 5 checker"); - while(~done & (NSTREAMS > 5)) begin - @(negedge clk); - if(m_axis_5_tvalid & m_axis_5_tready) begin - if(m_axis_5_tdata != golden[ptr5]) begin - $display("Mismatch on stream 5"); - $stop(); - end - //increment pointer - ptr5 = ptr5 + 1; - //rewind pointer if it's reached end - if(ptr5 == (STRM5_OFFSET + STRM5_DEPTH)) - ptr5 = STRM5_OFFSET; - end - end - end - join -end - -initial begin - done = 0; - @(negedge rst); - $dumpfile("wave.vcd"); - $dumpvars(0,tb_memstream_writes); - #50000 - $display("Test done!"); - done = 1; - #1000 - $finish(); -end - -endmodule diff --git a/finn-rtllib/memstream/sim/test.sh b/finn-rtllib/memstream/sim/test.sh deleted file mode 100755 index 7cb0497d26..0000000000 --- a/finn-rtllib/memstream/sim/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2020, Xilinx -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -iverilog ../hdl/*.v tb_memstream_writes.v -o sim -./sim diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl deleted file mode 100644 index 87565bc561..0000000000 --- a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl +++ /dev/null @@ -1,394 +0,0 @@ - -# Loading additional proc with user specified bodies to compute parameter values. -source [file join [file dirname [file dirname [info script]]] gui/memstream_v1_0.gtcl] - -# Definitional proc to organize widgets for parameters. -proc init_gui { IPINST } { - ipgui::add_param $IPINST -name "Component_Name" - #Adding Page - set Page_0 [ipgui::add_page $IPINST -name "Page 0"] - ipgui::add_param $IPINST -name "AXILITE_ADDR_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "CONFIG_EN" -parent ${Page_0} - ipgui::add_param $IPINST -name "MEM_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "MEM_INIT" -parent ${Page_0} - ipgui::add_param $IPINST -name "MEM_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "NSTREAMS" -parent ${Page_0} - ipgui::add_param $IPINST -name "RAM_STYLE" -parent ${Page_0} -widget comboBox - ipgui::add_param $IPINST -name "STRM0_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM0_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM0_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM1_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM1_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM1_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM2_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM2_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM2_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM3_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM3_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM3_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM4_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM4_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM4_WIDTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM5_DEPTH" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM5_OFFSET" -parent ${Page_0} - ipgui::add_param $IPINST -name "STRM5_WIDTH" -parent ${Page_0} - - -} - -proc update_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.MEM_DEPTH PARAM_VALUE.MEM_WIDTH } { - # Procedure called to update AXILITE_ADDR_WIDTH when any of the dependent parameters in the arguments change - set AXILITE_ADDR_WIDTH ${PARAM_VALUE.AXILITE_ADDR_WIDTH} - set MEM_DEPTH ${PARAM_VALUE.MEM_DEPTH} - set MEM_WIDTH ${PARAM_VALUE.MEM_WIDTH} - set values(MEM_DEPTH) [get_property value $MEM_DEPTH] - set values(MEM_WIDTH) [get_property value $MEM_WIDTH] - set_property value [gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE $values(MEM_DEPTH) $values(MEM_WIDTH)] $AXILITE_ADDR_WIDTH -} - -proc validate_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH } { - # Procedure called to validate AXILITE_ADDR_WIDTH - return true -} - -proc update_PARAM_VALUE.CONFIG_EN { PARAM_VALUE.CONFIG_EN } { - # Procedure called to update CONFIG_EN when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.CONFIG_EN { PARAM_VALUE.CONFIG_EN } { - # Procedure called to validate CONFIG_EN - return true -} - -proc update_PARAM_VALUE.MEM_DEPTH { PARAM_VALUE.MEM_DEPTH } { - # Procedure called to update MEM_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.MEM_DEPTH { PARAM_VALUE.MEM_DEPTH } { - # Procedure called to validate MEM_DEPTH - return true -} - -proc update_PARAM_VALUE.MEM_INIT { PARAM_VALUE.MEM_INIT } { - # Procedure called to update MEM_INIT when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.MEM_INIT { PARAM_VALUE.MEM_INIT } { - # Procedure called to validate MEM_INIT - return true -} - -proc update_PARAM_VALUE.MEM_WIDTH { PARAM_VALUE.MEM_WIDTH } { - # Procedure called to update MEM_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.MEM_WIDTH { PARAM_VALUE.MEM_WIDTH } { - # Procedure called to validate MEM_WIDTH - return true -} - -proc update_PARAM_VALUE.NSTREAMS { PARAM_VALUE.NSTREAMS } { - # Procedure called to update NSTREAMS when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.NSTREAMS { PARAM_VALUE.NSTREAMS } { - # Procedure called to validate NSTREAMS - return true -} - -proc update_PARAM_VALUE.RAM_STYLE { PARAM_VALUE.RAM_STYLE } { - # Procedure called to update RAM_STYLE when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.RAM_STYLE { PARAM_VALUE.RAM_STYLE } { - # Procedure called to validate RAM_STYLE - return true -} - -proc update_PARAM_VALUE.STRM0_DEPTH { PARAM_VALUE.STRM0_DEPTH } { - # Procedure called to update STRM0_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM0_DEPTH { PARAM_VALUE.STRM0_DEPTH } { - # Procedure called to validate STRM0_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM0_OFFSET { PARAM_VALUE.STRM0_OFFSET } { - # Procedure called to update STRM0_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM0_OFFSET { PARAM_VALUE.STRM0_OFFSET } { - # Procedure called to validate STRM0_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM0_WIDTH { PARAM_VALUE.STRM0_WIDTH } { - # Procedure called to update STRM0_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM0_WIDTH { PARAM_VALUE.STRM0_WIDTH } { - # Procedure called to validate STRM0_WIDTH - return true -} - -proc update_PARAM_VALUE.STRM1_DEPTH { PARAM_VALUE.STRM1_DEPTH } { - # Procedure called to update STRM1_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM1_DEPTH { PARAM_VALUE.STRM1_DEPTH } { - # Procedure called to validate STRM1_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM1_OFFSET { PARAM_VALUE.STRM1_OFFSET } { - # Procedure called to update STRM1_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM1_OFFSET { PARAM_VALUE.STRM1_OFFSET } { - # Procedure called to validate STRM1_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM1_WIDTH { PARAM_VALUE.STRM1_WIDTH } { - # Procedure called to update STRM1_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM1_WIDTH { PARAM_VALUE.STRM1_WIDTH } { - # Procedure called to validate STRM1_WIDTH - return true -} - -proc update_PARAM_VALUE.STRM2_DEPTH { PARAM_VALUE.STRM2_DEPTH } { - # Procedure called to update STRM2_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM2_DEPTH { PARAM_VALUE.STRM2_DEPTH } { - # Procedure called to validate STRM2_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM2_OFFSET { PARAM_VALUE.STRM2_OFFSET } { - # Procedure called to update STRM2_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM2_OFFSET { PARAM_VALUE.STRM2_OFFSET } { - # Procedure called to validate STRM2_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM2_WIDTH { PARAM_VALUE.STRM2_WIDTH } { - # Procedure called to update STRM2_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM2_WIDTH { PARAM_VALUE.STRM2_WIDTH } { - # Procedure called to validate STRM2_WIDTH - return true -} - -proc update_PARAM_VALUE.STRM3_DEPTH { PARAM_VALUE.STRM3_DEPTH } { - # Procedure called to update STRM3_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM3_DEPTH { PARAM_VALUE.STRM3_DEPTH } { - # Procedure called to validate STRM3_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM3_OFFSET { PARAM_VALUE.STRM3_OFFSET } { - # Procedure called to update STRM3_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM3_OFFSET { PARAM_VALUE.STRM3_OFFSET } { - # Procedure called to validate STRM3_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM3_WIDTH { PARAM_VALUE.STRM3_WIDTH } { - # Procedure called to update STRM3_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM3_WIDTH { PARAM_VALUE.STRM3_WIDTH } { - # Procedure called to validate STRM3_WIDTH - return true -} - -proc update_PARAM_VALUE.STRM4_DEPTH { PARAM_VALUE.STRM4_DEPTH } { - # Procedure called to update STRM4_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM4_DEPTH { PARAM_VALUE.STRM4_DEPTH } { - # Procedure called to validate STRM4_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM4_OFFSET { PARAM_VALUE.STRM4_OFFSET } { - # Procedure called to update STRM4_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM4_OFFSET { PARAM_VALUE.STRM4_OFFSET } { - # Procedure called to validate STRM4_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM4_WIDTH { PARAM_VALUE.STRM4_WIDTH } { - # Procedure called to update STRM4_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM4_WIDTH { PARAM_VALUE.STRM4_WIDTH } { - # Procedure called to validate STRM4_WIDTH - return true -} - -proc update_PARAM_VALUE.STRM5_DEPTH { PARAM_VALUE.STRM5_DEPTH } { - # Procedure called to update STRM5_DEPTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM5_DEPTH { PARAM_VALUE.STRM5_DEPTH } { - # Procedure called to validate STRM5_DEPTH - return true -} - -proc update_PARAM_VALUE.STRM5_OFFSET { PARAM_VALUE.STRM5_OFFSET } { - # Procedure called to update STRM5_OFFSET when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM5_OFFSET { PARAM_VALUE.STRM5_OFFSET } { - # Procedure called to validate STRM5_OFFSET - return true -} - -proc update_PARAM_VALUE.STRM5_WIDTH { PARAM_VALUE.STRM5_WIDTH } { - # Procedure called to update STRM5_WIDTH when any of the dependent parameters in the arguments change -} - -proc validate_PARAM_VALUE.STRM5_WIDTH { PARAM_VALUE.STRM5_WIDTH } { - # Procedure called to validate STRM5_WIDTH - return true -} - - -proc update_MODELPARAM_VALUE.CONFIG_EN { MODELPARAM_VALUE.CONFIG_EN PARAM_VALUE.CONFIG_EN } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.CONFIG_EN}] ${MODELPARAM_VALUE.CONFIG_EN} -} - -proc update_MODELPARAM_VALUE.NSTREAMS { MODELPARAM_VALUE.NSTREAMS PARAM_VALUE.NSTREAMS } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.NSTREAMS}] ${MODELPARAM_VALUE.NSTREAMS} -} - -proc update_MODELPARAM_VALUE.MEM_DEPTH { MODELPARAM_VALUE.MEM_DEPTH PARAM_VALUE.MEM_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.MEM_DEPTH}] ${MODELPARAM_VALUE.MEM_DEPTH} -} - -proc update_MODELPARAM_VALUE.MEM_WIDTH { MODELPARAM_VALUE.MEM_WIDTH PARAM_VALUE.MEM_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.MEM_WIDTH}] ${MODELPARAM_VALUE.MEM_WIDTH} -} - -proc update_MODELPARAM_VALUE.MEM_INIT { MODELPARAM_VALUE.MEM_INIT PARAM_VALUE.MEM_INIT } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.MEM_INIT}] ${MODELPARAM_VALUE.MEM_INIT} -} - -proc update_MODELPARAM_VALUE.RAM_STYLE { MODELPARAM_VALUE.RAM_STYLE PARAM_VALUE.RAM_STYLE } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.RAM_STYLE}] ${MODELPARAM_VALUE.RAM_STYLE} -} - -proc update_MODELPARAM_VALUE.STRM0_WIDTH { MODELPARAM_VALUE.STRM0_WIDTH PARAM_VALUE.STRM0_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM0_WIDTH}] ${MODELPARAM_VALUE.STRM0_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM1_WIDTH { MODELPARAM_VALUE.STRM1_WIDTH PARAM_VALUE.STRM1_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM1_WIDTH}] ${MODELPARAM_VALUE.STRM1_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM2_WIDTH { MODELPARAM_VALUE.STRM2_WIDTH PARAM_VALUE.STRM2_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM2_WIDTH}] ${MODELPARAM_VALUE.STRM2_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM3_WIDTH { MODELPARAM_VALUE.STRM3_WIDTH PARAM_VALUE.STRM3_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM3_WIDTH}] ${MODELPARAM_VALUE.STRM3_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM4_WIDTH { MODELPARAM_VALUE.STRM4_WIDTH PARAM_VALUE.STRM4_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM4_WIDTH}] ${MODELPARAM_VALUE.STRM4_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM5_WIDTH { MODELPARAM_VALUE.STRM5_WIDTH PARAM_VALUE.STRM5_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM5_WIDTH}] ${MODELPARAM_VALUE.STRM5_WIDTH} -} - -proc update_MODELPARAM_VALUE.STRM0_DEPTH { MODELPARAM_VALUE.STRM0_DEPTH PARAM_VALUE.STRM0_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM0_DEPTH}] ${MODELPARAM_VALUE.STRM0_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM1_DEPTH { MODELPARAM_VALUE.STRM1_DEPTH PARAM_VALUE.STRM1_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM1_DEPTH}] ${MODELPARAM_VALUE.STRM1_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM2_DEPTH { MODELPARAM_VALUE.STRM2_DEPTH PARAM_VALUE.STRM2_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM2_DEPTH}] ${MODELPARAM_VALUE.STRM2_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM3_DEPTH { MODELPARAM_VALUE.STRM3_DEPTH PARAM_VALUE.STRM3_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM3_DEPTH}] ${MODELPARAM_VALUE.STRM3_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM4_DEPTH { MODELPARAM_VALUE.STRM4_DEPTH PARAM_VALUE.STRM4_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM4_DEPTH}] ${MODELPARAM_VALUE.STRM4_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM5_DEPTH { MODELPARAM_VALUE.STRM5_DEPTH PARAM_VALUE.STRM5_DEPTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM5_DEPTH}] ${MODELPARAM_VALUE.STRM5_DEPTH} -} - -proc update_MODELPARAM_VALUE.STRM0_OFFSET { MODELPARAM_VALUE.STRM0_OFFSET PARAM_VALUE.STRM0_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM0_OFFSET}] ${MODELPARAM_VALUE.STRM0_OFFSET} -} - -proc update_MODELPARAM_VALUE.STRM1_OFFSET { MODELPARAM_VALUE.STRM1_OFFSET PARAM_VALUE.STRM1_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM1_OFFSET}] ${MODELPARAM_VALUE.STRM1_OFFSET} -} - -proc update_MODELPARAM_VALUE.STRM2_OFFSET { MODELPARAM_VALUE.STRM2_OFFSET PARAM_VALUE.STRM2_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM2_OFFSET}] ${MODELPARAM_VALUE.STRM2_OFFSET} -} - -proc update_MODELPARAM_VALUE.STRM3_OFFSET { MODELPARAM_VALUE.STRM3_OFFSET PARAM_VALUE.STRM3_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM3_OFFSET}] ${MODELPARAM_VALUE.STRM3_OFFSET} -} - -proc update_MODELPARAM_VALUE.STRM4_OFFSET { MODELPARAM_VALUE.STRM4_OFFSET PARAM_VALUE.STRM4_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM4_OFFSET}] ${MODELPARAM_VALUE.STRM4_OFFSET} -} - -proc update_MODELPARAM_VALUE.STRM5_OFFSET { MODELPARAM_VALUE.STRM5_OFFSET PARAM_VALUE.STRM5_OFFSET } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.STRM5_OFFSET}] ${MODELPARAM_VALUE.STRM5_OFFSET} -} - -proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.AXILITE_ADDR_WIDTH } { - # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - set_property value [get_property value ${PARAM_VALUE.AXILITE_ADDR_WIDTH}] ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} -} From 39e4c313918ff85d1d2fb6105b10bef0424d29ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 18 Apr 2023 20:55:34 +0100 Subject: [PATCH 465/628] Adding FINN instantiation template for revised memstream module. --- finn-rtllib/memstream/hdl/memstream_axi.sv | 14 +-- .../memstream/hdl/memstream_axi_wrapper.v | 116 ++++++++++++++++++ 2 files changed, 123 insertions(+), 7 deletions(-) create mode 100644 finn-rtllib/memstream/hdl/memstream_axi_wrapper.v diff --git a/finn-rtllib/memstream/hdl/memstream_axi.sv b/finn-rtllib/memstream/hdl/memstream_axi.sv index 620d9ec1de..ee64bdd057 100644 --- a/finn-rtllib/memstream/hdl/memstream_axi.sv +++ b/finn-rtllib/memstream/hdl/memstream_axi.sv @@ -59,15 +59,15 @@ module memstream_axi #( output logic [1:0] bresp, // AXI-lite Read - output loigc arready, - input loigc arvalid, - input loigc [2:0] arprot, - input loigc [AXILITE_ADDR_WIDTH-1:0] araddr, + output logic arready, + input logic arvalid, + input logic [2:0] arprot, + input logic [AXILITE_ADDR_WIDTH-1:0] araddr, - input loigc rready, - output loigc rvalid, + input logic rready, + output logic rvalid, output logic [ 1:0] rresp, - output loigc [31:0] rdata, + output logic [31:0] rdata, // Continuous output stream input logic m_axis_0_tready, diff --git a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v new file mode 100644 index 0000000000..2982dd8672 --- /dev/null +++ b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v @@ -0,0 +1,116 @@ +/** + * Copyright (c) 2023, Xilinx + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of FINN nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @author Thomas B. Preußer + */ + +module memstream_axi_wrapper #( + parameter DEPTH = $DEPTH$, + parameter WIDTH = $WIDTH$, + + parameter INIT_FILE = $INIT_FILE$, + parameter RAM_STYLE = $RAM_STYLE$, + + localparam AXILITE_ADDR_WIDTH = $clog2(DEPTH * (2**$clog2((WIDTH+31)/32))) + 2 +)( + // Global Control + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0" *) + input ap_clk, + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0" *) + input ap_rst_n, + + // AXI-lite Write + output awready, + input awvalid, + input [2:0] awprot, + input [AXILITE_ADDR_WIDTH-1:0] awaddr, + + output wready, + input wvalid, + input [31:0] wdata, + input [ 3:0] wstrb, + + input bready, + output bvalid, + output [1:0] bresp, + + // AXI-lite Read + output arready, + input arvalid, + input [2:0] arprot, + input [AXILITE_ADDR_WIDTH-1:0] araddr, + + input rready, + output rvalid, + output [ 1:0] rresp, + output [31:0] rdata, + + // Continuous output stream + input m_axis_0_tready, + output m_axis_0_tvalid, + output [((WIDTH+7)/8)*8-1:0] m_axis_0_tdata +); + + memstream_axi #( + .DEPTH(DEPTH), .WIDTH(WIDTH), + .INIT_FILE(INIT_FILE), + .RAM_STYLE(RAM_STYLE) + ) core ( + .clk(ap_clk), .rst(!ap_rst_n), + + // AXI-lite Write + .awready(awready), + .awvalid(awvalid), + .awprot(awprot), + .awaddr(awaddr), + .wready(wready), + .wvalid(wvalid), + .wdata(wdata), + .wstrb(wstrb), + .bready(bready), + .bvalid(bvalid), + .bresp(bresp), + + // AXI-lite Read + .arready(arready), + .arvalid(arvalid), + .arprot(arprot), + .araddr(araddr), + .rready(rready), + .rvalid(rvalid), + .rresp(rresp), + .rdata(rdata), + + // Continuous output stream + .m_axis_0_tready(m_axis_0_tready), + .m_axis_0_tvalid(m_axis_0_tvalid), + .m_axis_0_tdata(m_axis_0_tdata) + ); + +endmodule : memstream_axi_wrapper From aa1ea2e95f9023d70e09ad2f9966a68cf4416a16 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 19 Apr 2023 16:31:09 +0100 Subject: [PATCH 466/628] [Tests] Run linting on rtl swg test --- tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index 2f3ad0a23d..e8236c0c6b 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -204,7 +204,7 @@ def test_fpgadataflow_slidingwindow_rtl( ) if (stride_h > k_h) or (stride_w > k_w) and not parallel_window: pytest.skip( - "Not all combinations for stride > k edge case supported in default mode" + "Not all combinations for stride > k edge case supported in default mode" ) if k_h == 1 and k_w == 1 and simd != ifm_ch: pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") From 1155ac40183efc6da784d22ce9da395e7c36a3ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 21 Apr 2023 15:16:24 +0100 Subject: [PATCH 467/628] Prepare for memstream instantiation as an IP core. --- finn-rtllib/memstream/component.xml | 835 ++++++++++++++++++ finn-rtllib/memstream/hdl/memstream_axi.sv | 2 +- .../memstream/hdl/memstream_axi_wrapper.v | 18 +- finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 76 ++ .../fpgadataflow/matrixvectoractivation.py | 42 +- 5 files changed, 932 insertions(+), 41 deletions(-) create mode 100644 finn-rtllib/memstream/component.xml create mode 100644 finn-rtllib/memstream/xgui/memstream_v1_0.tcl diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml new file mode 100644 index 0000000000..191454ed61 --- /dev/null +++ b/finn-rtllib/memstream/component.xml @@ -0,0 +1,835 @@ + + + amd.com + FINN + memstream + 1.0 + + + m_axis_0 + + + + + + + TDATA + + + m_axis_0_tdata + + + + + TVALID + + + m_axis_0_tvalid + + + + + TREADY + + + m_axis_0_tready + + + + + + aximm + + + + + + + + + AWADDR + + + awaddr + + + + + AWPROT + + + awprot + + + + + AWVALID + + + awvalid + + + + + AWREADY + + + awready + + + + + WDATA + + + wdata + + + + + WSTRB + + + wstrb + + + + + WVALID + + + wvalid + + + + + WREADY + + + wready + + + + + BRESP + + + bresp + + + + + BVALID + + + bvalid + + + + + BREADY + + + bready + + + + + ARADDR + + + araddr + + + + + ARPROT + + + arprot + + + + + ARVALID + + + arvalid + + + + + ARREADY + + + arready + + + + + RDATA + + + rdata + + + + + RRESP + + + rresp + + + + + RVALID + + + rvalid + + + + + RREADY + + + rready + + + + + + ap_rst_n + + + + + + + RST + + + ap_rst_n + + + + + + POLARITY + ACTIVE_LOW + + + ASSOCIATED_BUSIF + m_axis_0 + + + + + ap_clk + + + + + + + CLK + + + ap_clk + + + + + + ASSOCIATED_RESET + ap_rst_n + + + ASSOCIATED_BUSIF + m_axis_0:interface_aximm + + + + + + + interface_aximm + interface_aximm + + reg0 + reg0 + 0x0 + 4096 + 32 + register + + + + + + + xilinx_anylanguagesynthesis + Synthesis + :vivado.xilinx.com:synthesis + SystemVerilog + memstream_axi_wrapper + + xilinx_anylanguagesynthesis_view_fileset + + + + viewChecksum + e498d456 + + + + + xilinx_xpgui + UI Layout + :vivado.xilinx.com:xgui.ui + + xilinx_xpgui_view_fileset + + + + viewChecksum + 91d40e29 + + + + + + + ap_clk + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + + + ap_rst_n + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + + + awready + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + awvalid + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 0 + + + + + awprot + + in + + 2 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 0 + + + + + awaddr + + in + + 10 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 0 + + + + + wready + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + wvalid + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 0 + + + + + wdata + + in + + 31 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 0 + + + + + wstrb + + in + + 3 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 1 + + + + + bready + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 0 + + + + + bvalid + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + bresp + + out + + 1 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + + + arready + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + arvalid + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 0 + + + + + arprot + + in + + 2 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 0 + + + + + araddr + + in + + 10 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + 0 + + + + + rready + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 0 + + + + + rvalid + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + rresp + + out + + 1 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + + + rdata + + out + + 31 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + + + m_axis_0_tready + + in + + + std_logic + xilinx_anylanguagesynthesis + + + + 1 + + + + + m_axis_0_tvalid + + out + + + std_logic + xilinx_anylanguagesynthesis + + + + + + m_axis_0_tdata + + out + + 31 + 0 + + + + std_logic_vector + xilinx_anylanguagesynthesis + + + + + + + + DEPTH + Depth + 512 + + + WIDTH + Width + 32 + + + INIT_FILE + Init File + + + + RAM_STYLE + Ram Style + auto + + + AXILITE_ADDR_WIDTH + Axilite Addr Width + 11 + + + + + + choice_list_9d8b0d81 + ACTIVE_HIGH + ACTIVE_LOW + + + + + xilinx_anylanguagesynthesis_view_fileset + + hdl/axilite_if.v + verilogSource + + + hdl/memstream.sv + systemVerilogSource + + + hdl/memstream_axi.sv + systemVerilogSource + + + hdl/memstream_axi_wrapper.v + verilogSource + CHECKSUM_0ce7d8fc + + + + xilinx_xpgui_view_fileset + + xgui/memstream_v1_0.tcl + tclSource + CHECKSUM_91d40e29 + XGUI_VERSION_2 + + + + memstream + + + DEPTH + Depth + 512 + + + + required + + + + + + WIDTH + Width + 32 + + + + required + + + + + + INIT_FILE + Init File + + + + RAM_STYLE + Ram Style + auto + + + Component_Name + memstream_axi_wrapper_v1_0 + + + + + + virtex7 + qvirtex7 + versal + kintex7 + kintex7l + qkintex7 + qkintex7l + akintex7 + artix7 + artix7l + aartix7 + qartix7 + zynq + qzynq + azynq + spartan7 + aspartan7 + virtexu + zynquplus + virtexuplus + virtexuplusHBM + virtexuplus58g + kintexuplus + artixuplus + kintexu + + + /UserIP + + memstream + level_0 + package_project + AMD + 1 + + user.org:user:memstream_axi_wrapper:1.0 + + 2023-04-21T12:20:38Z + + + + + + 2022.1 + + + + + + + + + + + + + + diff --git a/finn-rtllib/memstream/hdl/memstream_axi.sv b/finn-rtllib/memstream/hdl/memstream_axi.sv index ee64bdd057..136bcb1d7e 100644 --- a/finn-rtllib/memstream/hdl/memstream_axi.sv +++ b/finn-rtllib/memstream/hdl/memstream_axi.sv @@ -130,7 +130,7 @@ module memstream_axi #( .odat(m_axis_0_tdata[WIDTH-1:0]) ); if($bits(m_axis_0_tdata) > WIDTH) begin - assign m_axis_0_tdata[$left(m_axis_0_tdata):WIDTH] <= '0; + assign m_axis_0_tdata[$left(m_axis_0_tdata):WIDTH] = '0; end endmodule : memstream_axi diff --git a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v index 2982dd8672..69d6b64dec 100644 --- a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v +++ b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v @@ -31,13 +31,13 @@ */ module memstream_axi_wrapper #( - parameter DEPTH = $DEPTH$, - parameter WIDTH = $WIDTH$, + parameter DEPTH = 512, + parameter WIDTH = 32, - parameter INIT_FILE = $INIT_FILE$, - parameter RAM_STYLE = $RAM_STYLE$, + parameter INIT_FILE = "", + parameter RAM_STYLE = "auto", - localparam AXILITE_ADDR_WIDTH = $clog2(DEPTH * (2**$clog2((WIDTH+31)/32))) + 2 + parameter AXILITE_ADDR_WIDTH = $clog2(DEPTH * (2**$clog2((WIDTH+31)/32))) + 2 )( // Global Control (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0" *) @@ -77,9 +77,15 @@ module memstream_axi_wrapper #( output [((WIDTH+7)/8)*8-1:0] m_axis_0_tdata ); + localparam INIT_FILTERED = +`ifdef SYNTHESIS + RAM_STYLE == "ultra"? "" : +`endif + INIT_FILE; + memstream_axi #( .DEPTH(DEPTH), .WIDTH(WIDTH), - .INIT_FILE(INIT_FILE), + .INIT_FILE(INIT_FILTERED), .RAM_STYLE(RAM_STYLE) ) core ( .clk(ap_clk), .rst(!ap_rst_n), diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl new file mode 100644 index 0000000000..7feac1fbe3 --- /dev/null +++ b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl @@ -0,0 +1,76 @@ +# Definitional proc to organize widgets for parameters. +proc init_gui { IPINST } { + ipgui::add_param $IPINST -name "Component_Name" + #Adding Page + set Page_0 [ipgui::add_page $IPINST -name "Page 0"] + ipgui::add_param $IPINST -name "DEPTH" -parent ${Page_0} + ipgui::add_param $IPINST -name "INIT_FILE" -parent ${Page_0} + ipgui::add_param $IPINST -name "RAM_STYLE" -parent ${Page_0} + ipgui::add_param $IPINST -name "WIDTH" -parent ${Page_0} + + +} + +proc update_PARAM_VALUE.DEPTH { PARAM_VALUE.DEPTH } { + # Procedure called to update DEPTH when any of the dependent parameters in the arguments change +} + +proc validate_PARAM_VALUE.DEPTH { PARAM_VALUE.DEPTH } { + # Procedure called to validate DEPTH + return true +} + +proc update_PARAM_VALUE.INIT_FILE { PARAM_VALUE.INIT_FILE } { + # Procedure called to update INIT_FILE when any of the dependent parameters in the arguments change +} + +proc validate_PARAM_VALUE.INIT_FILE { PARAM_VALUE.INIT_FILE } { + # Procedure called to validate INIT_FILE + return true +} + +proc update_PARAM_VALUE.RAM_STYLE { PARAM_VALUE.RAM_STYLE } { + # Procedure called to update RAM_STYLE when any of the dependent parameters in the arguments change +} + +proc validate_PARAM_VALUE.RAM_STYLE { PARAM_VALUE.RAM_STYLE } { + # Procedure called to validate RAM_STYLE + return true +} + +proc update_PARAM_VALUE.WIDTH { PARAM_VALUE.WIDTH } { + # Procedure called to update WIDTH when any of the dependent parameters in the arguments change +} + +proc validate_PARAM_VALUE.WIDTH { PARAM_VALUE.WIDTH } { + # Procedure called to validate WIDTH + return true +} + + +proc update_MODELPARAM_VALUE.DEPTH { MODELPARAM_VALUE.DEPTH PARAM_VALUE.DEPTH } { + # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value + set_property value [get_property value ${PARAM_VALUE.DEPTH}] ${MODELPARAM_VALUE.DEPTH} +} + +proc update_MODELPARAM_VALUE.WIDTH { MODELPARAM_VALUE.WIDTH PARAM_VALUE.WIDTH } { + # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value + set_property value [get_property value ${PARAM_VALUE.WIDTH}] ${MODELPARAM_VALUE.WIDTH} +} + +proc update_MODELPARAM_VALUE.INIT_FILE { MODELPARAM_VALUE.INIT_FILE PARAM_VALUE.INIT_FILE } { + # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value + set_property value [get_property value ${PARAM_VALUE.INIT_FILE}] ${MODELPARAM_VALUE.INIT_FILE} +} + +proc update_MODELPARAM_VALUE.RAM_STYLE { MODELPARAM_VALUE.RAM_STYLE PARAM_VALUE.RAM_STYLE } { + # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value + set_property value [get_property value ${PARAM_VALUE.RAM_STYLE}] ${MODELPARAM_VALUE.RAM_STYLE} +} + +proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_WIDTH } { + # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value + # WARNING: There is no corresponding user parameter named "AXILITE_ADDR_WIDTH". Setting updated value from the model parameter. +set_property value 11 ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} +} + diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index aa987384dd..68ef4cb6fb 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -867,29 +867,9 @@ def generate_params(self, model, path): self.make_weight_file(weights, "decoupled_npy", weight_filename_sim) if mem_mode == "decoupled": # also save weights as Verilog .dat file - # note that we provide two different .dat files, one for synth - # and one for synthesis. this is because URAM-based weights always - # need zero weights for synthesis, otherwise they get inferred - # as BRAM - weight_filename_rtl_synth = "{}/memblock_synth_0.dat".format( - code_gen_dir - ) - weight_filename_rtl_sim = "{}/memblock_sim_0.dat".format(code_gen_dir) - # sim weights are always the true weights - self.make_weight_file( - weights, "decoupled_verilog_dat", weight_filename_rtl_sim - ) - ram_style = self.get_nodeattr("ram_style") - if ram_style == "ultra": - # UltraRAM must have no memory initializer, or only zeroes - # otherwise BRAM will be inferred instead of URAM - # as a workaround we provide a zero-weight init here - synth_weights = np.zeros_like(weights, dtype=np.float32) - else: - synth_weights = weights - self.make_weight_file( - synth_weights, "decoupled_verilog_dat", weight_filename_rtl_synth - ) + # This file will be ignored when synthesizing UltraScale memory. + weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) + self.make_weight_file(weights, "decoupled_verilog_dat", weight_filename_rtl) else: raise Exception( """Please set mem_mode to "const", "decoupled", or "external", @@ -1387,24 +1367,18 @@ def code_generation_ipi(self): ) cmd.append( "set_property -dict [list " - "CONFIG.NSTREAMS {1} " - "CONFIG.MEM_DEPTH {%d} " - "CONFIG.MEM_WIDTH {%d} " - "CONFIG.MEM_INIT {%s} " + "CONFIG.DEPTH {%d} " + "CONFIG.WIDTH {%d} " + "CONFIG.INIT_FILE {%s} " "CONFIG.RAM_STYLE {%s} " - "CONFIG.STRM0_DEPTH {%d} " - "CONFIG.STRM0_WIDTH {%d} " - "CONFIG.STRM0_OFFSET {0} " "] [get_bd_cells /%s/%s]" % ( self.calc_wmem(), self.get_weightstream_width_padded(), - self.get_nodeattr("code_gen_dir_ipgen") + "/", + self.get_nodeattr("code_gen_dir_ipgen") + "/memblock.dat", self.get_nodeattr("ram_style"), - self.calc_wmem(), - self.get_weightstream_width_padded(), node_name, - strm_inst, + strm_inst ) ) cmd.append( From c179c0cdc03351339287a984ef771e513ee4cce9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 21 Apr 2023 16:20:43 +0100 Subject: [PATCH 468/628] Renaming configuration interface to s_axilite. --- finn-rtllib/memstream/component.xml | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 191454ed61..2705f61908 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -38,7 +38,7 @@ - aximm + s_axilite @@ -247,7 +247,11 @@ ASSOCIATED_BUSIF - m_axis_0:interface_aximm + m_axis_0:s_axilite + + + FREQ_TOLERANCE_HZ + -1 @@ -280,7 +284,7 @@ viewChecksum - e498d456 + 4d23c8e5 @@ -689,7 +693,7 @@ AXILITE_ADDR_WIDTH Axilite Addr Width - 11 + 11 @@ -718,7 +722,7 @@ hdl/memstream_axi_wrapper.v verilogSource - CHECKSUM_0ce7d8fc + CHECKSUM_a3b36ea4 @@ -808,22 +812,22 @@ level_0 package_project AMD - 1 + 2 user.org:user:memstream_axi_wrapper:1.0 - 2023-04-21T12:20:38Z + 2023-04-21T15:18:55Z 2022.1 - + - - - + + + From 763d876eba9240b52d92125772e6972342c0b5c5 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 21 Apr 2023 16:36:13 +0100 Subject: [PATCH 469/628] [finn-rtllib] Re-instantiate FIFO rtl implementation --- finn-rtllib/memstream/hdl/Q_srl.v | 308 ++++++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 finn-rtllib/memstream/hdl/Q_srl.v diff --git a/finn-rtllib/memstream/hdl/Q_srl.v b/finn-rtllib/memstream/hdl/Q_srl.v new file mode 100644 index 0000000000..11cef604e0 --- /dev/null +++ b/finn-rtllib/memstream/hdl/Q_srl.v @@ -0,0 +1,308 @@ +// original source: +// https://github.com/nachiket/tdfc/blob/master/verilog/queues/Q_srl_oreg3_prefull_SIMPLE.v + + +// Copyright (c) 1999 The Regents of the University of California +// Copyright (c) 2010 The Regents of the University of Pennsylvania +// Copyright (c) 2011 Department of Electrical and Electronic Engineering, Imperial College London +// Copyright (c) 2020 Xilinx +// +// Permission to use, copy, modify, and distribute this software and +// its documentation for any purpose, without fee, and without a +// written agreement is hereby granted, provided that the above copyright +// notice and this paragraph and the following two paragraphs appear in +// all copies. +// +// IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +// DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +// LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, +// EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF +// SUCH DAMAGE. +// +// THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +// AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON +// AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO +// PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +// + +// Q_srl_oreg3_prefull_SIMPLE.v +// +// - In-page queue with parameterizable depth, bit width +// - Stream I/O is triple (data, valid, back-pressure), +// with EOS concatenated into the data +// - Flow control for input & output is combinationally decoupled +// - 2 <= depth <= 256 +// * (depth >= 2) is required to decouple I/O flow control, +// where empty => no produce, full => no consume, +// and depth 1 would ping-pong between the two at half rate +// * (depth <= 256) can be modified +// by changing ''synthesis loop_limit X'' below +// and changing ''addrwidth'' or its log computation +// - 1 <= width +// - Queue storage is in SRL16E, up to depth 16 per LUT per bit-slice, +// plus output register (for fast output) +// - Queue addressing is done by ''addr'' up-down counter +// - Queue fullness is checked by comparator (addr==depth) +// - Queue fullness is pre-computed for next cycle +// - Queue input back-pressure is pre-computed for next cycle +// - Queue output valid (state!=state__empty) is pre-computed for next cycle +// (necessary since SRL data output reg requires non-boolean state) +// - FSM has 3 states (empty, one, more) +// - When empty, continue to emit most recently emitted value (for debugging) +// +// - Queue slots used = / (state==state_empty) ? 0 +// | (state==state_one) ? 1 +// \ (state==state_more) ? addr+2 +// - Queue slots used <= depth +// - Queue slots remaining = depth - used +// = / (state==state_empty) ? depth +// | (state==state_one) ? depth-1 +// \ (state==state_more) ? depth-2-addr +// +// - Synplify 7.1 / 8.0 +// - Eylon Caspi, 9/11/03, 8/18/04, 3/29/05 + + +`ifdef Q_srl +`else +`define Q_srl + + +module Q_srl (clock, reset, i_d, i_v, i_r, o_d, o_v, o_r, count, maxcount); + + parameter depth = 16; // - greatest #items in queue (2 <= depth <= 256) + parameter width = 16; // - width of data (i_d, o_d) + + parameter addrwidth = $clog2(depth); + + input clock; + input reset; + + input [width-1:0] i_d; // - input stream data (concat data + eos) + input i_v; // - input stream valid + output i_r; // - input stream ready + wire i_b; // - input stream back-pressure + + output [width-1:0] o_d; // - output stream data (concat data + eos) + output o_v; // - output stream valid + input o_r; // - output stream ready + wire o_b; // - output stream back-pressure + + output [addrwidth:0] count; // - output number of elems in queue + output [addrwidth:0] maxcount; // - maximum observed count since reset + + reg [addrwidth:0] maxcount_reg; // - maximum count seen until now + reg [addrwidth-1:0] addr, addr_, a_; // - SRL16 address + // for data output + reg shift_en_; // - SRL16 shift enable + reg [width-1:0] srl [depth-2:0]; // - SRL16 memory + reg shift_en_o_; // - SRLO shift enable + reg [width-1:0] srlo_, srlo // - SRLO output reg + /* synthesis syn_allow_retiming=0 */ ; + + parameter state_empty = 2'd0; // - state empty : o_v=0 o_d=UNDEFINED + parameter state_one = 2'd1; // - state one : o_v=1 o_d=srlo + parameter state_more = 2'd2; // - state more : o_v=1 o_d=srlo + // #items in srl = addr+2 + + reg [1:0] state, state_; // - state register + + wire addr_full_; // - true iff addr==depth-2 on NEXT cycle + reg addr_full; // - true iff addr==depth-2 + wire addr_zero_; // - true iff addr==0 + wire o_v_reg_; // - true iff state_empty on NEXT cycle + reg o_v_reg // - true iff state_empty + /* synthesis syn_allow_retiming=0 */ ; + wire i_b_reg_; // - true iff !full on NEXT cycle + reg i_b_reg // - true iff !full + /* synthesis syn_allow_retiming=0 */ ; + + assign addr_full_ = (state_==state_more) && (addr_==depth-2); + // - queue full + assign addr_zero_ = (addr==0); // - queue contains 2 (or 1,0) + assign o_v_reg_ = (state_!=state_empty); // - output valid if non-empty + assign i_b_reg_ = addr_full_; // - input bp if full + assign o_d = srlo; // - output data from queue + assign o_v = o_v_reg; // - output valid if non-empty + assign i_b = i_b_reg; // - input bp if full + assign maxcount = maxcount_reg; + + assign i_r = !i_b; + assign o_b = !o_r; + + assign count = (state==state_more ? addr+2 : (state==state_one ? 1 : 0)); + + // - ''always'' block with both FFs and SRL16 does not work, + // since FFs need reset but SRL16 does not + + always @(posedge clock) begin // - seq always: FFs + if (reset) begin + state <= state_empty; + addr <= 0; + addr_full <= 0; + o_v_reg <= 0; + + i_b_reg <= 0; + maxcount_reg <= 0; + + end + else begin + state <= state_; + addr <= addr_; + addr_full <= addr_full_; + o_v_reg <= o_v_reg_; + i_b_reg <= i_b_reg_; + maxcount_reg <= (count > maxcount_reg ? count : maxcount_reg); + end + end // always @ (posedge clock) + + always @(posedge clock) begin // - seq always: srlo + // - infer enabled output reg at end of shift chain + // - input first element from i_d, all subsequent elements from SRL16 + if (reset) begin + srlo <= 0; + end + else begin + if (shift_en_o_) begin + srlo <= srlo_; + end + end + end // always @ (posedge clock) + + always @(posedge clock) begin // - seq always: srl + // - infer enabled SRL16E from shifting srl array + // - no reset capability; srl[] contents undefined on reset + if (shift_en_) begin + // synthesis loop_limit 256 + for (a_=depth-2; a_>0; a_=a_-1) begin + srl[a_] = srl[a_-1]; + end + srl[0] <= i_d; + end + end // always @ (posedge clock or negedge reset) + + always @* begin // - combi always + srlo_ <= 'bx; + shift_en_o_ <= 1'bx; + shift_en_ <= 1'bx; + addr_ <= 'bx; + state_ <= 2'bx; + case (state) + + state_empty: begin // - (empty, will not produce) + if (i_v) begin // - empty & i_v => consume + srlo_ <= i_d; + shift_en_o_ <= 1; + shift_en_ <= 1'bx; + addr_ <= 0; + state_ <= state_one; + end + else begin // - empty & !i_v => idle + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 1'bx; + addr_ <= 0; + state_ <= state_empty; + end + end + + state_one: begin // - (contains one) + if (i_v && o_b) begin // - one & i_v & o_b => consume + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 1; + addr_ <= 0; + state_ <= state_more; + end + else if (i_v && !o_b) begin // - one & i_v & !o_b => cons+prod + srlo_ <= i_d; + shift_en_o_ <= 1; + shift_en_ <= 1; + addr_ <= 0; + state_ <= state_one; + end + else if (!i_v && o_b) begin // - one & !i_v & o_b => idle + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 1'bx; + addr_ <= 0; + state_ <= state_one; + end + else if (!i_v && !o_b) begin // - one & !i_v & !o_b => produce + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 1'bx; + addr_ <= 0; + state_ <= state_empty; + end + end // case: state_one + + state_more: begin // - (contains more than one) + if (addr_full || (depth==2)) begin + // - (full, will not consume) + // - (full here if depth==2) + if (o_b) begin // - full & o_b => idle + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 0; + addr_ <= addr; + state_ <= state_more; + end + else begin // - full & !o_b => produce + srlo_ <= srl[addr]; + shift_en_o_ <= 1; + shift_en_ <= 0; +// addr_ <= addr-1; +// state_ <= state_more; + addr_ <= addr_zero_ ? 0 : addr-1; + state_ <= addr_zero_ ? state_one : state_more; + end + end + else begin // - (mid: neither empty nor full) + if (i_v && o_b) begin // - mid & i_v & o_b => consume + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 1; + addr_ <= addr+1; + state_ <= state_more; + end + else if (i_v && !o_b) begin // - mid & i_v & !o_b => cons+prod + srlo_ <= srl[addr]; + shift_en_o_ <= 1; + shift_en_ <= 1; + addr_ <= addr; + state_ <= state_more; + end + else if (!i_v && o_b) begin // - mid & !i_v & o_b => idle + srlo_ <= 'bx; + shift_en_o_ <= 0; + shift_en_ <= 0; + addr_ <= addr; + state_ <= state_more; + end + else if (!i_v && !o_b) begin // - mid & !i_v & !o_b => produce + srlo_ <= srl[addr]; + shift_en_o_ <= 1; + shift_en_ <= 0; + addr_ <= addr_zero_ ? 0 : addr-1; + state_ <= addr_zero_ ? state_one : state_more; + end + end // else: !if(addr_full) + end // case: state_more + + default: begin + srlo_ <= 'bx; + shift_en_o_ <= 1'bx; + shift_en_ <= 1'bx; + addr_ <= 'bx; + state_ <= 2'bx; + end // case: default + + endcase // case(state) + end // always @ * + +endmodule // Q_srl + + +`endif // `ifdef Q_srl From eb31a30ce4a4a8d0b3b6b670ac40ca9524732063 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 21 Apr 2023 16:46:58 +0100 Subject: [PATCH 470/628] [CustomOp] Update Thresholding node to use new memstream implementation --- .../fpgadataflow/thresholding_batch.py | 45 +++++-------------- 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 292f70941a..eab50c2cbc 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -45,8 +45,6 @@ rtlsim_output_to_npy, ) -from . import templates - # ONNX i/o tensor shape assumptions for Thresholding: # input 0 is the input tensor, shape (..., NumChannels) # input 1 is the threshold tensor, shape (NumChannels, n_thres) @@ -59,7 +57,6 @@ class Thresholding_Batch(HLSCustomOp): def __init__(self, onnx_node, **kwargs): super().__init__(onnx_node, **kwargs) - self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): my_attrs = { @@ -457,26 +454,10 @@ def generate_params(self, model, path): weight_filename_sim = "{}/thresholds.npy".format(code_gen_dir) self.make_weight_file(thresholds, "decoupled_npy", weight_filename_sim) # also save weights as Verilog .dat file - # note that we provide two different .dat files, one for synth - # and one for synthesis. this is because URAM-based weights always - # need zero weights for synthesis, otherwise they get inferred - # as BRAM - weight_filename_rtl_synth = "{}/memblock_synth_0.dat".format(code_gen_dir) - weight_filename_rtl_sim = "{}/memblock_sim_0.dat".format(code_gen_dir) - # sim weights are always the true weights + # This file will be ignored when synthesizing UltraScale memory. + weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) self.make_weight_file( - thresholds, "decoupled_verilog_dat", weight_filename_rtl_sim - ) - ram_style = self.get_nodeattr("ram_style") - if ram_style == "ultra": - # UltraRAM must have no memory initializer, or only zeroes - # otherwise BRAM will be inferred instead of URAM - # as a workaround we provide a zero-weight init here - synth_thresholds = np.zeros_like(thresholds, dtype=np.float32) - else: - synth_thresholds = thresholds - self.make_weight_file( - synth_thresholds, "decoupled_verilog_dat", weight_filename_rtl_synth + thresholds, "decoupled_verilog_dat", weight_filename_rtl ) else: raise Exception("Unrecognized mem_mode") @@ -843,7 +824,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "xilinx.com:user:memstream:1.0" + strm_vlnv = "amd.com:FINN:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" @@ -851,22 +832,16 @@ def code_generation_ipi(self): ) cmd.append( "set_property -dict [list " - "CONFIG.NSTREAMS {1} " - "CONFIG.MEM_DEPTH {%d} " - "CONFIG.MEM_WIDTH {%d} " - "CONFIG.MEM_INIT {%s} " + "CONFIG.DEPTH {%d} " + "CONFIG.WIDTH {%d} " + "CONFIG.INIT_FILE {%s} " "CONFIG.RAM_STYLE {%s} " - "CONFIG.STRM0_DEPTH {%d} " - "CONFIG.STRM0_WIDTH {%d} " - "CONFIG.STRM0_OFFSET {0} " "] [get_bd_cells /%s/%s]" % ( self.calc_tmem(), self.get_weightstream_width_padded(), - self.get_nodeattr("code_gen_dir_ipgen") + "/", + self.get_nodeattr("code_gen_dir_ipgen") + "/memblock.dat", self.get_nodeattr("ram_style"), - self.calc_tmem(), - self.get_weightstream_width_padded(), node_name, strm_inst, ) @@ -877,11 +852,11 @@ def code_generation_ipi(self): % (node_name, strm_inst, node_name, node_name, sname) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_rst_n]" % (node_name, rst_name, node_name, strm_inst) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_clk]" % (node_name, clk_name, node_name, strm_inst) ) cmd.append( From 232e9147723e09389b5fa1c345bf6098a4fc0efa Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 21 Apr 2023 17:01:21 +0100 Subject: [PATCH 471/628] [CustomOp] Delete old decoupled wrapper and linting on MVAU --- .../custom_op/fpgadataflow/matrixvectoractivation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 68ef4cb6fb..d59b6826c2 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -46,8 +46,6 @@ rtlsim_output_to_npy, ) -from . import templates - # ONNX i/o tensor shape assumptions for MatrixVectorActivation: # input 0 is the input tensor, shape (.., i_size) = (..., MW) # input 1 is the weight tensor, shape (i_size, o_size) = (MW, MH) @@ -62,7 +60,6 @@ class MatrixVectorActivation(HLSCustomOp): def __init__(self, onnx_node, **kwargs): super().__init__(onnx_node, **kwargs) - self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): my_attrs = { @@ -869,7 +866,9 @@ def generate_params(self, model, path): # also save weights as Verilog .dat file # This file will be ignored when synthesizing UltraScale memory. weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) - self.make_weight_file(weights, "decoupled_verilog_dat", weight_filename_rtl) + self.make_weight_file( + weights, "decoupled_verilog_dat", weight_filename_rtl + ) else: raise Exception( """Please set mem_mode to "const", "decoupled", or "external", @@ -1378,7 +1377,7 @@ def code_generation_ipi(self): self.get_nodeattr("code_gen_dir_ipgen") + "/memblock.dat", self.get_nodeattr("ram_style"), node_name, - strm_inst + strm_inst, ) ) cmd.append( From a2e8e92892260184a48438fbdd252e17cfe373fd Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 21 Apr 2023 17:18:40 +0100 Subject: [PATCH 472/628] [CustomOp] Update custom ops to use new memstream component --- .../fpgadataflow/channelwise_op_batch.py | 3 - .../fpgadataflow/matrixvectoractivation.py | 2 +- src/finn/custom_op/fpgadataflow/templates.py | 101 ------------------ .../fpgadataflow/vectorvectoractivation.py | 40 ++----- 4 files changed, 9 insertions(+), 137 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index cde66f1ae2..7791647abf 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -39,8 +39,6 @@ rtlsim_output_to_npy, ) -from . import templates - # ONNX i/o tensor shape assumptions for channelwise ops: # input 0 is the input tensor, shape (..., NumChannels) # input 1 is the channelwise parameter tensor, shape (NumChannels, params_per_channel) @@ -87,7 +85,6 @@ class ChannelwiseOp_Batch(HLSCustomOp): def __init__(self, onnx_node, **kwargs): super().__init__(onnx_node, **kwargs) - self.decoupled_wrapper = templates.decoupled_wrapper def get_nodeattr_types(self): my_attrs = { diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index d59b6826c2..9abc933847 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1358,7 +1358,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "xilinx.com:user:memstream:1.0" + strm_vlnv = "amd.com:FINN:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" diff --git a/src/finn/custom_op/fpgadataflow/templates.py b/src/finn/custom_op/fpgadataflow/templates.py index c7bbc3f139..4e03e6daf9 100644 --- a/src/finn/custom_op/fpgadataflow/templates.py +++ b/src/finn/custom_op/fpgadataflow/templates.py @@ -109,107 +109,6 @@ exit 0 """ -# verilog wrapper for decoupled mem mode -decoupled_wrapper = """ -module $TOPNAME$( -ap_clk, -ap_rst_n, -in0_$HLS_SNAME$_TDATA, -in0_$HLS_SNAME$_TVALID, -in0_$HLS_SNAME$_TREADY, -out_$HLS_SNAME$_TDATA, -out_$HLS_SNAME$_TVALID, -out_$HLS_SNAME$_TREADY -); - -input ap_clk; -input ap_rst_n; -input $IN_RANGE$ in0_$HLS_SNAME$_TDATA; -input in0_$HLS_SNAME$_TVALID; -output in0_$HLS_SNAME$_TREADY; -output $OUT_RANGE$ out_$HLS_SNAME$_TDATA; -output out_$HLS_SNAME$_TVALID; -input out_$HLS_SNAME$_TREADY; - -reg [31:0] config_address = 0; -reg config_ce = 0; -reg config_we = 0; -reg [31:0] config_d0 = 0; -wire [31:0] config_q0; - -//multiple wire AXI Streams -wire m_axis_0_afull; -// FIFO count to generate programmable full -wire [5:0] fifo_0_count; -wire m_axis_0_tready; -wire m_axis_0_tvalid; -wire $WEIGHT_RANGE$ m_axis_0_tdata; - -//memstream component - -memstream -#( -//parameters to enable/disable axi-mm, set number of streams, set readmemh for -// memory, set per-stream offsets in memory, set per-stream widths -.CONFIG_EN(1), -.NSTREAMS(1), -.MEM_DEPTH($MEM_DEPTH$), -.MEM_WIDTH($WEIGHT_WIDTH$), -.MEM_INIT("./"), -.RAM_STYLE("$RAM_STYLE$"), - -//widths per stream -.STRM0_WIDTH($WEIGHT_WIDTH$), - -//depths per stream -.STRM0_DEPTH($WSTREAM_DEPTH$), - -//offsets for each stream -.STRM0_OFFSET(0) -) -mem -( -.aclk(ap_clk), -.aresetn(ap_rst_n), - -//optional configuration interface compatible with ap_memory -.config_address(config_address), -.config_ce(config_ce), -.config_we(config_we), -.config_d0(config_d0), -.config_q0(config_q0), - -//multiple output AXI Streams, TDATA width rounded to multiple of 8 bits -.m_axis_0_afull(m_axis_0_afull), -.m_axis_0_tready(m_axis_0_tready), -.m_axis_0_tvalid(m_axis_0_tvalid), -.m_axis_0_tdata(m_axis_0_tdata) - - -); - - -//MVA_Stream_Unit - -$LAYER_NAME$ -MVA_Stream_U -( -.ap_clk(ap_clk), //input -.ap_rst_n(ap_rst_n), //input -.in0_$HLS_SNAME$_TDATA(in0_$HLS_SNAME$_TDATA), //$IN_RANGE$ input -.in0_$HLS_SNAME$_TVALID(in0_$HLS_SNAME$_TVALID), //input -.in0_$HLS_SNAME$_TREADY(in0_$HLS_SNAME$_TREADY), //output -.weights_$HLS_SNAME$_TDATA(m_axis_0_tdata), //$WEIGHT_RANGE$ input -.weights_$HLS_SNAME$_TVALID(m_axis_0_tvalid), //input -.weights_$HLS_SNAME$_TREADY(m_axis_0_tready), //output -.out_$HLS_SNAME$_TDATA(out_$HLS_SNAME$_TDATA), //$OUT_RANGE$ output -.out_$HLS_SNAME$_TVALID(out_$HLS_SNAME$_TVALID), //output -.out_$HLS_SNAME$_TREADY(out_$HLS_SNAME$_TREADY) //input -); - -endmodule -""" - ip_package_tcl = """ ## IP Info set Vendor "xilinx.com" diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index da79933f26..afbaad5759 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -580,28 +580,10 @@ def generate_params(self, model, path): self.make_weight_file(weights, "decoupled_npy", weight_filename_sim) if mem_mode == "decoupled": # also save weights as Verilog .dat file - # note that we provide two different .dat files, one for synth - # and one for synthesis. this is because URAM-based weights always - # need zero weights for synthesis, otherwise they get inferred - # as BRAM - weight_filename_rtl_synth = "{}/memblock_synth_0.dat".format( - code_gen_dir - ) - weight_filename_rtl_sim = "{}/memblock_sim_0.dat".format(code_gen_dir) - # sim weights are always the true weights - self.make_weight_file( - weights, "decoupled_verilog_dat", weight_filename_rtl_sim - ) - ram_style = self.get_nodeattr("ram_style") - if ram_style == "ultra": - # UltraRAM must have no memory initializer, or only zeroes - # otherwise BRAM will be inferred instead of URAM - # as a workaround we provide a zero-weight init here - synth_weights = np.zeros_like(weights, dtype=np.float32) - else: - synth_weights = weights + # This file will be ignored when synthesizing UltraScale memory. + weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) self.make_weight_file( - synth_weights, "decoupled_verilog_dat", weight_filename_rtl_synth + weights, "decoupled_verilog_dat", weight_filename_rtl ) else: raise Exception( @@ -1068,7 +1050,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "xilinx.com:user:memstream:1.0" + strm_vlnv = "amd.com:FINN:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" @@ -1076,22 +1058,16 @@ def code_generation_ipi(self): ) cmd.append( "set_property -dict [list " - "CONFIG.NSTREAMS {1} " - "CONFIG.MEM_DEPTH {%d} " - "CONFIG.MEM_WIDTH {%d} " - "CONFIG.MEM_INIT {%s} " + "CONFIG.DEPTH {%d} " + "CONFIG.WIDTH {%d} " + "CONFIG.INIT_FILE {%s} " "CONFIG.RAM_STYLE {%s} " - "CONFIG.STRM0_DEPTH {%d} " - "CONFIG.STRM0_WIDTH {%d} " - "CONFIG.STRM0_OFFSET {0} " "] [get_bd_cells /%s/%s]" % ( self.calc_wmem(), self.get_weightstream_width_padded(), - self.get_nodeattr("code_gen_dir_ipgen") + "/", + self.get_nodeattr("code_gen_dir_ipgen") + "/memblock.dat", self.get_nodeattr("ram_style"), - self.calc_wmem(), - self.get_weightstream_width_padded(), node_name, strm_inst, ) From 91ae41aea2b0fe12aab9b2ad734716de0f934d72 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 24 Apr 2023 10:43:54 +0100 Subject: [PATCH 473/628] [Docs] Move rtd required packages from Dockerfile to requirements.txt --- docker/Dockerfile.finn | 2 -- requirements.txt | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index dbafba2476..f823d3c42b 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -88,8 +88,6 @@ RUN pip install jupyter==1.0.0 --ignore-installed RUN pip install markupsafe==2.0.1 RUN pip install matplotlib==3.3.1 --ignore-installed RUN pip install pytest-dependency==0.5.1 -RUN pip install sphinx==5.0.2 -RUN pip install sphinx_rtd_theme==0.5.0 RUN pip install pytest-xdist[setproctitle]==2.4.0 RUN pip install pytest-parallel==0.1.0 RUN pip install "netron>=5.0.0" diff --git a/requirements.txt b/requirements.txt index 6703c83d97..3cf829a171 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,6 +13,8 @@ pyscaffold==3.2.1 scipy==1.5.2 setupext-janitor>=1.1.2 sigtools==2.0.3 +sphinx==5.0.2 +sphinx_rtd_theme==0.5.0 toposort==1.5 vcdvcd==1.0.5 wget==3.2 From f494b60989447709ab88b313bdaf304d9680b8c3 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 24 Apr 2023 11:37:55 +0100 Subject: [PATCH 474/628] [Docs] Add sphinx and rtd theme installs to setup.cfg --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 1893aa4231..144a6a38d5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -81,6 +81,8 @@ docs = pytest netron vcdvcd + sphinx==5.0.2 + sphinx_rtd_theme==0.5.0 torchvision torch qonnx@git+https://github.com/fastmachinelearning/qonnx@main#egg=qonnx From cd8c6817b26bc319ff9c84fc9a92856e194255dc Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 24 Apr 2023 13:42:37 +0100 Subject: [PATCH 475/628] [rtllib] linting on tcl script --- finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 1 - 1 file changed, 1 deletion(-) diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl index 7feac1fbe3..4ad14af637 100644 --- a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl +++ b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl @@ -73,4 +73,3 @@ proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_ # WARNING: There is no corresponding user parameter named "AXILITE_ADDR_WIDTH". Setting updated value from the model parameter. set_property value 11 ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} } - From 44aed57b775223be77d8b281315094b8f33b9869 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 24 Apr 2023 13:43:43 +0100 Subject: [PATCH 476/628] [CustomOp] update rst and clk signal connection for updated memstrm --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 4 ++-- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 9abc933847..fd41e1f9ad 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1386,11 +1386,11 @@ def code_generation_ipi(self): % (node_name, strm_inst, node_name, node_name, sname) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_rst_n]" % (node_name, rst_name, node_name, strm_inst) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_clk]" % (node_name, clk_name, node_name, strm_inst) ) cmd.append( diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index afbaad5759..a7aaa186df 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -1078,11 +1078,11 @@ def code_generation_ipi(self): % (node_name, strm_inst, node_name, node_name, sname) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_rst_n]" % (node_name, rst_name, node_name, strm_inst) ) cmd.append( - "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]" + "connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/ap_clk]" % (node_name, clk_name, node_name, strm_inst) ) cmd.append( From 876bd89c97730e08d7dbd4f47004fd988fc35054 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 26 Apr 2023 13:08:23 +0100 Subject: [PATCH 477/628] [notebooks] import os library to use os.environ[] Signed-off-by: Fionn O'Donohoe --- .../cybersecurity/1-train-mlp-with-brevitas.ipynb | 1 + 1 file changed, 1 insertion(+) diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 7bfedf4bbb..0f90b8ee78 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -62,6 +62,7 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "import onnx\n", "import torch\n", "\n", From 19dafd217e6e1d6541a1eb5147865872502fa5c9 Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Wed, 3 May 2023 23:17:01 +0200 Subject: [PATCH 478/628] [SWG] move enum def to top, use enum values for codegen --- finn-rtllib/swg/swg_common.sv | 20 +++++++++++-------- .../swg/swg_template_default_dynamic.sv | 20 +++++++++++-------- .../convolutioninputgenerator_rtl.py | 10 +++++----- 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/finn-rtllib/swg/swg_common.sv b/finn-rtllib/swg/swg_common.sv index ff6778973c..d953078abe 100644 --- a/finn-rtllib/swg/swg_common.sv +++ b/finn-rtllib/swg/swg_common.sv @@ -29,6 +29,18 @@ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +`ifndef FINN_SWG_ENUM_DEFINED +`define FINN_SWG_ENUM_DEFINED +typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H +} state_e; +`endif + // loop controller used for both, "default" and "parallel", implementation styles module swg_controller #( int unsigned LOOP_H_ITERATIONS, @@ -61,14 +73,6 @@ module swg_controller #( ); // state and counters - typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H - } state_e; state_e State = INNERMOST_STATE; state_e state_next; diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv index 412f8689ba..c1647ef699 100644 --- a/finn-rtllib/swg/swg_template_default_dynamic.sv +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -1,3 +1,15 @@ +`ifndef FINN_SWG_ENUM_DEFINED +`define FINN_SWG_ENUM_DEFINED +typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H +} state_e; +`endif + module $TOP_MODULE_NAME$_controller #( int unsigned CNTR_BITWIDTH, int unsigned INCR_BITWIDTH, @@ -62,14 +74,6 @@ module $TOP_MODULE_NAME$_controller #( end // state and counters - typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H - } state_e; state_e State = $INNERMOST_STATE$; state_e state_next; diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index 173a157841..a1a32ba6af 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -617,13 +617,13 @@ def prepare_codegen_default(self): # skip innermost SIMD loop completely if loop_kw_iterations == 1: # skip innermost KW loop completely - code_gen_dict["$INNERMOST_STATE$"] = [str(3)] # STATE_LOOP_KH + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KH"] loop_kh_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(2)] # STATE_LOOP_KW + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_KW"] loop_kw_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(1)] # STATE_LOOP_SIMD + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_SIMD"] loop_simd_iterations -= 1 # -1 because state is initial state cntr_bitwidth = math.ceil( @@ -736,10 +736,10 @@ def prepare_codegen_parallel(self): loop_simd_iterations = 1 if loop_w_iterations == 1: - code_gen_dict["$INNERMOST_STATE$"] = [str(5)] # STATE_LOOP_H + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_H"] loop_h_iterations -= 1 # -1 because state is initial state else: - code_gen_dict["$INNERMOST_STATE$"] = [str(4)] # STATE_LOOP_W + code_gen_dict["$INNERMOST_STATE$"] = ["STATE_LOOP_W"] loop_w_iterations -= 1 # -1 because state is initial state # set head and tail address increment values From 40ce6b8b6a583e86e12873ba6da0d8348aa15f7f Mon Sep 17 00:00:00 2001 From: Yaman Umuroglu Date: Thu, 4 May 2023 10:01:37 +0100 Subject: [PATCH 479/628] [Test] add synth to one DSWG testcase to trigger (fixed) error --- .../test_fpgadataflow_convinputgenerator_rtl_dynamic.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index 7f7bf649a9..e586984b31 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -189,6 +189,10 @@ def write_swg_config(sim): "ofm": 64, "depthwise": True, "pad_mode": "SAME_UPPER", + # run synthesis for one configuration + # this helped expose a bug in enum decls previously + # (which config the synth runs on does not matter) + "do_synth": True, } cfg1 = { "idims": [(32, 16), (16, 8)], @@ -198,6 +202,7 @@ def write_swg_config(sim): "ofm": 8, "depthwise": False, "pad_mode": "SAME_UPPER", + "do_synth": False, } cfg2 = { "idims": [(64, 128), (2, 4)], @@ -207,6 +212,7 @@ def write_swg_config(sim): "ofm": 64, "depthwise": True, "pad_mode": "SAME_UPPER", + "do_synth": False, } @@ -215,6 +221,7 @@ def write_swg_config(sim): @pytest.mark.vivado @pytest.mark.fpgadataflow def test_fpgadataflow_conv_dynamic(cfg): + do_synth = cfg["do_synth"] pad_mode = cfg["pad_mode"] depthwise = cfg["depthwise"] idims = cfg["idims"] @@ -292,7 +299,7 @@ def test_fpgadataflow_conv_dynamic(cfg): model = model.transform(GiveReadableTensorNames()) model = model.transform(PrepareIP("xc7z020clg400-1", 5)) model = model.transform(HLSSynthIP()) - model = model.transform(CreateStitchedIP("xc7z020clg400-1", 5)) + model = model.transform(CreateStitchedIP("xc7z020clg400-1", 5, vitis=do_synth)) model.set_metadata_prop("exec_mode", "rtlsim") # loop through experiment configurations From 06ab3eccd0d010c3d3cbe968accc6ec8a6181c2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 5 May 2023 13:50:58 +0100 Subject: [PATCH 480/628] Have IPI recompute AXI-lite address width according to user-defined memory layout. --- finn-rtllib/memstream/component.xml | 149 +++++++++++------- finn-rtllib/memstream/gui/memstream_v1_0.gtcl | 2 + finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 27 +++- 3 files changed, 119 insertions(+), 59 deletions(-) create mode 100644 finn-rtllib/memstream/gui/memstream_v1_0.gtcl diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 2705f61908..7b9eff239f 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -249,10 +249,6 @@ ASSOCIATED_BUSIF m_axis_0:s_axilite - - FREQ_TOLERANCE_HZ - -1 - @@ -288,6 +284,31 @@ + + xilinx_anylanguagebehavioralsimulation + Simulation + :vivado.xilinx.com:simulation + SystemVerilog + memstream_axi_wrapper + + + viewChecksum + c6fe43e9 + + + + + xilinx_implementation + Implementation + :vivado.xilinx.com:implementation + memstream_axi_wrapper + + + viewChecksum + cd434062 + + + xilinx_xpgui UI Layout @@ -298,7 +319,21 @@ viewChecksum - 91d40e29 + 32cad48d + + + + + xilinx_utilityxitfiles + Utility XIT/TTCL + :vivado.xilinx.com:xit.util + + xilinx_utilityxitfiles_view_fileset + + + + viewChecksum + abaee39b @@ -312,6 +347,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -324,6 +360,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -336,6 +373,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -348,6 +386,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -367,6 +406,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -386,6 +426,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -401,6 +442,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -413,6 +455,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -432,6 +475,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -451,6 +495,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -466,6 +511,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -481,6 +527,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -497,6 +544,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -509,6 +557,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -521,6 +570,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -540,6 +590,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -559,6 +610,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -574,6 +626,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -589,6 +642,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -605,6 +659,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -621,6 +676,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -633,6 +689,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -648,6 +705,7 @@ std_logic xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -664,6 +722,7 @@ std_logic_vector xilinx_anylanguagesynthesis + xilinx_anylanguagebehavioralsimulation @@ -693,7 +752,7 @@ AXILITE_ADDR_WIDTH Axilite Addr Width - 11 + 11 @@ -730,10 +789,17 @@ xgui/memstream_v1_0.tcl tclSource - CHECKSUM_91d40e29 + CHECKSUM_32cad48d XGUI_VERSION_2 + + xilinx_utilityxitfiles_view_fileset + + gui/memstream_v1_0.gtcl + GTCL + + memstream @@ -741,25 +807,11 @@ DEPTH Depth 512 - - - - required - - - WIDTH Width 32 - - - - required - - - INIT_FILE @@ -771,6 +823,18 @@ Ram Style auto + + AXILITE_ADDR_WIDTH + Axilite Addr Width + 11 + + + + false + + + + Component_Name memstream_axi_wrapper_v1_0 @@ -778,57 +842,30 @@ - - virtex7 - qvirtex7 - versal - kintex7 - kintex7l - qkintex7 - qkintex7l - akintex7 - artix7 - artix7l - aartix7 - qartix7 - zynq - qzynq - azynq - spartan7 - aspartan7 - virtexu - zynquplus - virtexuplus - virtexuplusHBM - virtexuplus58g - kintexuplus - artixuplus - kintexu - /UserIP memstream - level_0 + level_1 package_project AMD - 2 + 1 user.org:user:memstream_axi_wrapper:1.0 - 2023-04-21T15:18:55Z + 2023-05-05T12:43:17Z 2022.1 - + - - - - + + + + diff --git a/finn-rtllib/memstream/gui/memstream_v1_0.gtcl b/finn-rtllib/memstream/gui/memstream_v1_0.gtcl new file mode 100644 index 0000000000..00fcee6045 --- /dev/null +++ b/finn-rtllib/memstream/gui/memstream_v1_0.gtcl @@ -0,0 +1,2 @@ +# This file is automatically written. Do not modify. +proc gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE {DEPTH WIDTH } {expr 2 + log($DEPTH*pow(2, log(($WIDTH+31)/32)/log(2)))/log(2)} diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl index 4ad14af637..1943a50399 100644 --- a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl +++ b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl @@ -1,8 +1,13 @@ + +# Loading additional proc with user specified bodies to compute parameter values. +source [file join [file dirname [file dirname [info script]]] gui/memstream_v1_0.gtcl] + # Definitional proc to organize widgets for parameters. proc init_gui { IPINST } { ipgui::add_param $IPINST -name "Component_Name" #Adding Page set Page_0 [ipgui::add_page $IPINST -name "Page 0"] + ipgui::add_param $IPINST -name "AXILITE_ADDR_WIDTH" -parent ${Page_0} ipgui::add_param $IPINST -name "DEPTH" -parent ${Page_0} ipgui::add_param $IPINST -name "INIT_FILE" -parent ${Page_0} ipgui::add_param $IPINST -name "RAM_STYLE" -parent ${Page_0} @@ -11,6 +16,22 @@ proc init_gui { IPINST } { } +proc update_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.DEPTH PARAM_VALUE.WIDTH } { + # Procedure called to update AXILITE_ADDR_WIDTH when any of the dependent parameters in the arguments change + + set AXILITE_ADDR_WIDTH ${PARAM_VALUE.AXILITE_ADDR_WIDTH} + set DEPTH ${PARAM_VALUE.DEPTH} + set WIDTH ${PARAM_VALUE.WIDTH} + set values(DEPTH) [get_property value $DEPTH] + set values(WIDTH) [get_property value $WIDTH] + set_property value [gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE $values(DEPTH) $values(WIDTH)] $AXILITE_ADDR_WIDTH +} + +proc validate_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH } { + # Procedure called to validate AXILITE_ADDR_WIDTH + return true +} + proc update_PARAM_VALUE.DEPTH { PARAM_VALUE.DEPTH } { # Procedure called to update DEPTH when any of the dependent parameters in the arguments change } @@ -68,8 +89,8 @@ proc update_MODELPARAM_VALUE.RAM_STYLE { MODELPARAM_VALUE.RAM_STYLE PARAM_VALUE. set_property value [get_property value ${PARAM_VALUE.RAM_STYLE}] ${MODELPARAM_VALUE.RAM_STYLE} } -proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_WIDTH } { +proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.AXILITE_ADDR_WIDTH } { # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value - # WARNING: There is no corresponding user parameter named "AXILITE_ADDR_WIDTH". Setting updated value from the model parameter. -set_property value 11 ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} + set_property value [get_property value ${PARAM_VALUE.AXILITE_ADDR_WIDTH}] ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} } + From 96277dba0bde072be12eeccc9b4186d50ed9f242 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 5 May 2023 13:55:10 +0100 Subject: [PATCH 481/628] [rtllib] Linting of updated tcl script --- finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl index 1943a50399..60cb44c99e 100644 --- a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl +++ b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl @@ -18,7 +18,7 @@ proc init_gui { IPINST } { proc update_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.DEPTH PARAM_VALUE.WIDTH } { # Procedure called to update AXILITE_ADDR_WIDTH when any of the dependent parameters in the arguments change - + set AXILITE_ADDR_WIDTH ${PARAM_VALUE.AXILITE_ADDR_WIDTH} set DEPTH ${PARAM_VALUE.DEPTH} set WIDTH ${PARAM_VALUE.WIDTH} @@ -93,4 +93,3 @@ proc update_MODELPARAM_VALUE.AXILITE_ADDR_WIDTH { MODELPARAM_VALUE.AXILITE_ADDR_ # Procedure called to set VHDL generic/Verilog parameter value(s) based on TCL parameter value set_property value [get_property value ${PARAM_VALUE.AXILITE_ADDR_WIDTH}] ${MODELPARAM_VALUE.AXILITE_ADDR_WIDTH} } - From 0bb289ca20ab33989f434191fb3083a991f6244a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 9 May 2023 11:26:40 +0100 Subject: [PATCH 482/628] Yet another fix of the address width expression for IP integrator. --- finn-rtllib/memstream/component.xml | 20 +++++++++++-------- finn-rtllib/memstream/gui/memstream_v1_0.gtcl | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 7b9eff239f..76f71cf878 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -249,6 +249,10 @@ ASSOCIATED_BUSIF m_axis_0:s_axilite + + FREQ_TOLERANCE_HZ + -1 + @@ -333,7 +337,7 @@ viewChecksum - abaee39b + 923e7b90 @@ -849,23 +853,23 @@ level_1 package_project AMD - 1 + 2 user.org:user:memstream_axi_wrapper:1.0 - 2023-05-05T12:43:17Z + 2023-05-09T10:21:56Z 2022.1 - + - - - - + + + + diff --git a/finn-rtllib/memstream/gui/memstream_v1_0.gtcl b/finn-rtllib/memstream/gui/memstream_v1_0.gtcl index 00fcee6045..271f9df453 100644 --- a/finn-rtllib/memstream/gui/memstream_v1_0.gtcl +++ b/finn-rtllib/memstream/gui/memstream_v1_0.gtcl @@ -1,2 +1,2 @@ # This file is automatically written. Do not modify. -proc gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE {DEPTH WIDTH } {expr 2 + log($DEPTH*pow(2, log(($WIDTH+31)/32)/log(2)))/log(2)} +proc gen_USERPARAMETER_AXILITE_ADDR_WIDTH_VALUE {DEPTH WIDTH } {expr 2 + ceil(log($DEPTH*pow(2, ceil(log(($WIDTH+31)/32)/log(2))))/log(2))} From 573c147065e40d1b334ec4d8f92c75380d97ead1 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 11 May 2023 15:25:13 +0100 Subject: [PATCH 483/628] [Jenkins] Update tool versions for CI --- docker/jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index e3e5b5f7f9..2954877c2a 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -5,8 +5,8 @@ node { checkout scm } withEnv([ - "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64", - "FINN_XILINX_VERSION=2022.1", + "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.2_1014_8888/installs/lin64", + "FINN_XILINX_VERSION=2022.2", "FINN_DOCKER_TAG=xilinx/finn:jenkins", "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" From d75e1fd2c4823458714c54e8bbedfb7bf36ef2d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Mon, 15 May 2023 17:42:27 +0100 Subject: [PATCH 484/628] Revised packaging with XCI cleanup and address map generation. --- .../fpgadataflow/create_stitched_ip.py | 95 +++++++++++++++---- 1 file changed, 79 insertions(+), 16 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index d1cb3c4af9..03212a9f15 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -385,6 +385,8 @@ def apply(self, model): "create_project %s %s -part %s" % (prjname, vivado_stitch_proj_dir, self.fpgapart) ) + # no warnings on long module names + tcl.append("set_msg_config -id {[BD 41-1753]} -suppress"); # add all the generated IP dirs to ip_repo_paths ip_dirs_str = " ".join(ip_dirs) tcl.append("set_property ip_repo_paths [%s] [current_project]" % ip_dirs_str) @@ -397,8 +399,7 @@ def apply(self, model): fclk_mhz = 1 / (self.clk_ns * 0.001) fclk_hz = fclk_mhz * 1000000 model.set_metadata_prop("clk_ns", str(self.clk_ns)) - tcl.append("set_property CONFIG.FREQ_HZ %f [get_bd_ports /ap_clk]" % fclk_hz) - tcl.append("regenerate_bd_layout") + tcl.append("set_property CONFIG.FREQ_HZ %d [get_bd_ports /ap_clk]" % round(fclk_hz)) tcl.append("validate_bd_design") tcl.append("save_bd_design") # create wrapper hdl (for rtlsim later on) @@ -450,6 +451,8 @@ def apply(self, model): ) % (vivado_stitch_proj_dir, block_vendor, block_library, block_name) ) + # Allow user to customize clock in deployment of stitched IP + tcl.append("set_property ipi_drc {ignore_freq_hz true} [ipx::current_core]"); # in some cases, the IP packager seems to infer an aperture of 64K or 4G, # preventing address assignment of the DDR_LOW and/or DDR_HIGH segments # the following is a hotfix to remove this aperture during IODMA packaging @@ -544,20 +547,80 @@ def apply(self, model): # add a rudimentary driver mdd to get correct ranges in xparameters.h later on example_data_dir = pk.resource_filename("finn.qnn-data", "mdd-data/") copytree(example_data_dir, vivado_stitch_proj_dir + "/data") - tcl.append("file copy -force data ip/") - tcl.append("ipx::add_file_group -type software_driver {} [ipx::current_core]") - tcl.append( - "set_property type mdd [ipx::add_file data/finn_design.mdd " - "[ipx::get_file_groups xilinx_softwaredriver -of_objects " - "[ipx::current_core]]]" - ) - tcl.append( - "set_property type tclSource [ipx::add_file data/finn_design.tcl " - "[ipx::get_file_groups xilinx_softwaredriver -of_objects " - "[ipx::current_core]]]" - ) - tcl.append("ipx::update_checksums [ipx::find_open_core %s]" % block_vlnv) - tcl.append("ipx::save_core [ipx::find_open_core %s]" % block_vlnv) + + ##### + # Core Cleanup Operations + tcl.append(""" +set core [ipx::current_core] + +# Add rudimentary driver +file copy -force data ip/ +set file_group [ipx::add_file_group -type software_driver {} $core] +set_property type mdd [ipx::add_file data/finn_design.mdd $file_group] +set_property type tclSource [ipx::add_file data/finn_design.tcl $file_group] + +# Remove all XCI references to subcores +set impl_files [ipx::get_file_groups xilinx_implementation -of $core] +foreach xci [ipx::get_files -of $impl_files {*.xci}] { + ipx::remove_file [get_property NAME $xci] $impl_files +} + +# Construct a single flat memory map for each AXI-lite interface port +foreach port [get_bd_intf_ports -filter {CONFIG.PROTOCOL==AXI4LITE}] { + set pin $port + set awidth "" + while { $awidth == "" } { + set pins [get_bd_intf_pins -of [get_bd_intf_nets -boundary_type lower -of $pin]] + set kill [lsearch $pins $pin] + if { $kill >= 0 } { set pins [lreplace $pins $kill $kill] } + if { [llength $pins] != 1 } { break } + set pin [lindex $pins 0] + set awidth [get_property CONFIG.ADDR_WIDTH $pin] + } + if { $awidth == "" } { + puts "CRITICAL WARNING: Unable to construct address map for $port." + } { + set range [expr 2**$awidth] + puts "INFO: Building address map for $port 0+:$range" + set name [get_property NAME $port] + set_property range $range [ipx::add_address_block Reg0 [ipx::add_memory_map $name $core]] + set_property slave_memory_map_ref $name [ipx::get_bus_interfaces $name -of $core] + } +} + +# Finalize and Save +ipx::update_checksums $core +ipx::save_core $core + +# Remove stale subcore references from component.xml +file rename -force ip/component.xml ip/component.bak +set ifile [open ip/component.bak r] +set ofile [open ip/component.xml w] +set buf [list] +set kill 0 +while { [eof $ifile] != 1 } { + gets $ifile line + if { [string match {**} $line] == 1 } { + foreach l $buf { puts $ofile $l } + set buf [list $line] + } elseif { [llength $buf] > 0 } { + lappend buf $line + + if { [string match {**} $line] == 1 } { + if { $kill == 0 } { foreach l $buf { puts $ofile $l } } + set buf [list] + set kill 0 + } elseif { [string match {**} $line] == 1 } { + set kill 1 + } + } else { + puts $ofile $line + } +} +close $ifile +close $ofile +"""); + # export list of used Verilog files (for rtlsim later on) tcl.append( "set all_v_files [get_files -filter {USED_IN_SYNTHESIS == 1 " From 5e940096e7299ac18675e8cb9e744250c8c4816f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 16 May 2023 08:45:28 +0100 Subject: [PATCH 485/628] Linting. --- .../fpgadataflow/create_stitched_ip.py | 85 ++++++++++--------- 1 file changed, 45 insertions(+), 40 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 03212a9f15..ef1afb95ca 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -386,7 +386,7 @@ def apply(self, model): % (prjname, vivado_stitch_proj_dir, self.fpgapart) ) # no warnings on long module names - tcl.append("set_msg_config -id {[BD 41-1753]} -suppress"); + tcl.append("set_msg_config -id {[BD 41-1753]} -suppress") # add all the generated IP dirs to ip_repo_paths ip_dirs_str = " ".join(ip_dirs) tcl.append("set_property ip_repo_paths [%s] [current_project]" % ip_dirs_str) @@ -399,7 +399,9 @@ def apply(self, model): fclk_mhz = 1 / (self.clk_ns * 0.001) fclk_hz = fclk_mhz * 1000000 model.set_metadata_prop("clk_ns", str(self.clk_ns)) - tcl.append("set_property CONFIG.FREQ_HZ %d [get_bd_ports /ap_clk]" % round(fclk_hz)) + tcl.append( + "set_property CONFIG.FREQ_HZ %d [get_bd_ports /ap_clk]" % round(fclk_hz) + ) tcl.append("validate_bd_design") tcl.append("save_bd_design") # create wrapper hdl (for rtlsim later on) @@ -452,7 +454,7 @@ def apply(self, model): % (vivado_stitch_proj_dir, block_vendor, block_library, block_name) ) # Allow user to customize clock in deployment of stitched IP - tcl.append("set_property ipi_drc {ignore_freq_hz true} [ipx::current_core]"); + tcl.append("set_property ipi_drc {ignore_freq_hz true} [ipx::current_core]") # in some cases, the IP packager seems to infer an aperture of 64K or 4G, # preventing address assignment of the DDR_LOW and/or DDR_HIGH segments # the following is a hotfix to remove this aperture during IODMA packaging @@ -550,7 +552,8 @@ def apply(self, model): ##### # Core Cleanup Operations - tcl.append(""" + tcl.append( + """ set core [ipx::current_core] # Add rudimentary driver @@ -567,25 +570,26 @@ def apply(self, model): # Construct a single flat memory map for each AXI-lite interface port foreach port [get_bd_intf_ports -filter {CONFIG.PROTOCOL==AXI4LITE}] { - set pin $port - set awidth "" - while { $awidth == "" } { - set pins [get_bd_intf_pins -of [get_bd_intf_nets -boundary_type lower -of $pin]] - set kill [lsearch $pins $pin] - if { $kill >= 0 } { set pins [lreplace $pins $kill $kill] } - if { [llength $pins] != 1 } { break } - set pin [lindex $pins 0] - set awidth [get_property CONFIG.ADDR_WIDTH $pin] - } - if { $awidth == "" } { - puts "CRITICAL WARNING: Unable to construct address map for $port." - } { - set range [expr 2**$awidth] - puts "INFO: Building address map for $port 0+:$range" - set name [get_property NAME $port] - set_property range $range [ipx::add_address_block Reg0 [ipx::add_memory_map $name $core]] - set_property slave_memory_map_ref $name [ipx::get_bus_interfaces $name -of $core] - } + set pin $port + set awidth "" + while { $awidth == "" } { + set pins [get_bd_intf_pins -of [get_bd_intf_nets -boundary_type lower -of $pin]] + set kill [lsearch $pins $pin] + if { $kill >= 0 } { set pins [lreplace $pins $kill $kill] } + if { [llength $pins] != 1 } { break } + set pin [lindex $pins 0] + set awidth [get_property CONFIG.ADDR_WIDTH $pin] + } + if { $awidth == "" } { + puts "CRITICAL WARNING: Unable to construct address map for $port." + } { + set range [expr 2**$awidth] + puts "INFO: Building address map for $port: 0+:$range" + set name [get_property NAME $port] + set addr_block [ipx::add_address_block Reg0 [ipx::add_memory_map $name $core]] + set_property range $range $addr_block + set_property slave_memory_map_ref $name [ipx::get_bus_interfaces $name -of $core] + } } # Finalize and Save @@ -599,27 +603,28 @@ def apply(self, model): set buf [list] set kill 0 while { [eof $ifile] != 1 } { - gets $ifile line - if { [string match {**} $line] == 1 } { - foreach l $buf { puts $ofile $l } - set buf [list $line] - } elseif { [llength $buf] > 0 } { - lappend buf $line + gets $ifile line + if { [string match {**} $line] == 1 } { + foreach l $buf { puts $ofile $l } + set buf [list $line] + } elseif { [llength $buf] > 0 } { + lappend buf $line - if { [string match {**} $line] == 1 } { - if { $kill == 0 } { foreach l $buf { puts $ofile $l } } - set buf [list] - set kill 0 - } elseif { [string match {**} $line] == 1 } { - set kill 1 - } - } else { - puts $ofile $line - } + if { [string match {**} $line] == 1 } { + if { $kill == 0 } { foreach l $buf { puts $ofile $l } } + set buf [list] + set kill 0 + } elseif { [string match {**} $line] == 1 } { + set kill 1 + } + } else { + puts $ofile $line + } } close $ifile close $ofile -"""); +""" + ) # export list of used Verilog files (for rtlsim later on) tcl.append( From f15c00d8e1dd94e0a2dc8724317f73b9aa29574c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 16 May 2023 15:54:41 +0100 Subject: [PATCH 486/628] Polish interface attributes. Use all-lower-case finn library name like other modules. --- finn-rtllib/memstream/component.xml | 20 ++++++++----------- .../memstream/hdl/memstream_axi_wrapper.v | 5 +++-- finn-rtllib/memstream/xgui/memstream_v1_0.tcl | 2 -- .../fpgadataflow/matrixvectoractivation.py | 2 +- .../fpgadataflow/thresholding_batch.py | 2 +- .../fpgadataflow/vectorvectoractivation.py | 2 +- 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 76f71cf878..7965c9ae61 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -1,7 +1,7 @@ amd.com - FINN + finn memstream 1.0 @@ -219,10 +219,6 @@ POLARITY ACTIVE_LOW - - ASSOCIATED_BUSIF - m_axis_0 - @@ -284,7 +280,7 @@ viewChecksum - 4d23c8e5 + 4c694b82 @@ -785,7 +781,7 @@ hdl/memstream_axi_wrapper.v verilogSource - CHECKSUM_a3b36ea4 + CHECKSUM_1dcfa744 @@ -853,20 +849,20 @@ level_1 package_project AMD - 2 + 3 user.org:user:memstream_axi_wrapper:1.0 - 2023-05-09T10:21:56Z + 2023-05-16T13:58:39Z - 2022.1 - + 2022.2 + - + diff --git a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v index 69d6b64dec..2d032ca159 100644 --- a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v +++ b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v @@ -40,9 +40,10 @@ module memstream_axi_wrapper #( parameter AXILITE_ADDR_WIDTH = $clog2(DEPTH * (2**$clog2((WIDTH+31)/32))) + 2 )( // Global Control - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0, ASSOCIATED_RESET ap_rst_n" *) + (* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 ap_clk CLK" *) input ap_clk, - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0" *) + (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_HIGH" *) input ap_rst_n, // AXI-lite Write diff --git a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl index 60cb44c99e..e802d81c79 100644 --- a/finn-rtllib/memstream/xgui/memstream_v1_0.tcl +++ b/finn-rtllib/memstream/xgui/memstream_v1_0.tcl @@ -12,8 +12,6 @@ proc init_gui { IPINST } { ipgui::add_param $IPINST -name "INIT_FILE" -parent ${Page_0} ipgui::add_param $IPINST -name "RAM_STYLE" -parent ${Page_0} ipgui::add_param $IPINST -name "WIDTH" -parent ${Page_0} - - } proc update_PARAM_VALUE.AXILITE_ADDR_WIDTH { PARAM_VALUE.AXILITE_ADDR_WIDTH PARAM_VALUE.DEPTH PARAM_VALUE.WIDTH } { diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index fd41e1f9ad..899bce98d2 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1358,7 +1358,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "amd.com:FINN:memstream:1.0" + strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index eab50c2cbc..12e635b3d6 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -824,7 +824,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "amd.com:FINN:memstream:1.0" + strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index a7aaa186df..ede572f1a4 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -1050,7 +1050,7 @@ def code_generation_ipi(self): % (self.get_nodeattr("ip_vlnv"), node_name, node_name) ) # instantiate a streamer and connect it to the HLS IP - strm_vlnv = "amd.com:FINN:memstream:1.0" + strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( "create_bd_cell -type ip -vlnv %s /%s/%s" From e5f4a2c5b54e058e52c42eeb9f0af456e34da40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Tue, 23 May 2023 21:02:02 +0100 Subject: [PATCH 487/628] Add missing simulation fileset. --- finn-rtllib/memstream/component.xml | 40 +++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 7965c9ae61..8ee591e187 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -290,10 +290,13 @@ :vivado.xilinx.com:simulation SystemVerilog memstream_axi_wrapper + + xilinx_anylanguagebehavioralsimulation_view_fileset + viewChecksum - c6fe43e9 + 4728d76a @@ -319,7 +322,7 @@ viewChecksum - 32cad48d + 6c92393d @@ -784,6 +787,33 @@ CHECKSUM_1dcfa744 + + xilinx_anylanguagebehavioralsimulation_view_fileset + + hdl/memstream.sv + systemVerilogSource + USED_IN_ipstatic + xil_defaultlib + + + hdl/memstream_axi.sv + systemVerilogSource + USED_IN_ipstatic + xil_defaultlib + + + hdl/axilite_if.v + verilogSource + USED_IN_ipstatic + xil_defaultlib + + + hdl/memstream_axi_wrapper.v + verilogSource + USED_IN_ipstatic + xil_defaultlib + + xilinx_xpgui_view_fileset @@ -849,11 +879,11 @@ level_1 package_project AMD - 3 + 4 user.org:user:memstream_axi_wrapper:1.0 - 2023-05-16T13:58:39Z + 2023-05-23T19:59:11Z @@ -862,7 +892,7 @@ 2022.2 - + From 30a0058fb50b5e2525687a28c2df7cae848ba3e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Wed, 24 May 2023 07:40:59 +0100 Subject: [PATCH 488/628] Revised control interface attributes. --- finn-rtllib/axi_info/component.xml | 27 ++++++--- finn-rtllib/axi_info/hdl/axi_info_top.sv | 3 + finn-rtllib/memstream/component.xml | 12 ++-- .../memstream/hdl/memstream_axi_wrapper.v | 2 +- finn-rtllib/swg/swg_template_wrapper.v | 55 +++++++++---------- .../swg/swg_template_wrapper_dynamic.v | 45 ++++++++++++--- 6 files changed, 90 insertions(+), 54 deletions(-) diff --git a/finn-rtllib/axi_info/component.xml b/finn-rtllib/axi_info/component.xml index d22637534f..c7632e2915 100644 --- a/finn-rtllib/axi_info/component.xml +++ b/finn-rtllib/axi_info/component.xml @@ -197,6 +197,10 @@ ASSOCIATED_BUSIF s_axi + + FREQ_TOLERANCE_HZ + -1 + @@ -228,7 +232,7 @@ viewChecksum - 7d682dfc + c9da9874 @@ -244,7 +248,7 @@ viewChecksum - 7d682dfc + c9da9874 @@ -258,7 +262,7 @@ viewChecksum - e11f9727 + 1e654f67 @@ -607,7 +611,7 @@ hdl/axi_info_top.sv systemVerilogSource - CHECKSUM_ec9ff0da + CHECKSUM_db6ccc10 @@ -692,17 +696,22 @@ axi_info_top_v1_0 package_project - 5 - 2022-05-30T14:16:13Z + 6 + 2023-05-24T06:36:33Z - 2022.1 - + 2022.2 + - + + + + + + diff --git a/finn-rtllib/axi_info/hdl/axi_info_top.sv b/finn-rtllib/axi_info/hdl/axi_info_top.sv index ab2cfc8bed..74aebe3ec7 100644 --- a/finn-rtllib/axi_info/hdl/axi_info_top.sv +++ b/finn-rtllib/axi_info/hdl/axi_info_top.sv @@ -38,7 +38,10 @@ module axi_info_top #( bit [31:0] CHECKSUM_COUNT )( //- Global Control ------------------ + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF s_axi, ASSOCIATED_RESET ap_rst_n" *) + (* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 ap_clk CLK" *) input logic ap_clk, + (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_LOW" *) input logic ap_rst_n, //- AXI Lite ------------------------ diff --git a/finn-rtllib/memstream/component.xml b/finn-rtllib/memstream/component.xml index 8ee591e187..722da1d803 100644 --- a/finn-rtllib/memstream/component.xml +++ b/finn-rtllib/memstream/component.xml @@ -280,7 +280,7 @@ viewChecksum - 4c694b82 + 04464096 @@ -296,7 +296,7 @@ viewChecksum - 4728d76a + 9e058959 @@ -784,7 +784,7 @@ hdl/memstream_axi_wrapper.v verilogSource - CHECKSUM_1dcfa744 + CHECKSUM_7caabca7 @@ -879,11 +879,11 @@ level_1 package_project AMD - 4 + 5 user.org:user:memstream_axi_wrapper:1.0 - 2023-05-23T19:59:11Z + 2023-05-24T06:34:57Z @@ -892,7 +892,7 @@ 2022.2 - + diff --git a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v index 2d032ca159..13f5c82d6e 100644 --- a/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v +++ b/finn-rtllib/memstream/hdl/memstream_axi_wrapper.v @@ -43,7 +43,7 @@ module memstream_axi_wrapper #( (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF m_axis_0, ASSOCIATED_RESET ap_rst_n" *) (* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 ap_clk CLK" *) input ap_clk, - (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_HIGH" *) + (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_LOW" *) input ap_rst_n, // AXI-lite Write diff --git a/finn-rtllib/swg/swg_template_wrapper.v b/finn-rtllib/swg/swg_template_wrapper.v index 0cc3579a25..11fa0a88cb 100644 --- a/finn-rtllib/swg/swg_template_wrapper.v +++ b/finn-rtllib/swg/swg_template_wrapper.v @@ -28,19 +28,19 @@ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -`timescale 1 ns / 1 ps module $TOP_MODULE_NAME$ ( -(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) -input ap_clk, -(* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V" *) -input ap_rst_n, -input [BUF_IN_WIDTH-1:0] in0_V_TDATA, -input in0_V_TVALID, -output in0_V_TREADY, -output [BUF_OUT_WIDTH-1:0] out_V_TDATA, -output out_V_TVALID, -input out_V_TREADY + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V, ASSOCIATED_RESET ap_rst_n" *) + (* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 ap_clk CLK" *) + input ap_clk, + (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_LOW" *) + input ap_rst_n, + input [BUF_IN_WIDTH-1:0] in0_V_TDATA, + input in0_V_TVALID, + output in0_V_TREADY, + output [BUF_OUT_WIDTH-1:0] out_V_TDATA, + output out_V_TVALID, + input out_V_TREADY ); // top-level parameters (set via code-generation) @@ -53,23 +53,20 @@ parameter MMV_OUT = $MMV_OUT$; parameter BUF_IN_WIDTH = BIT_WIDTH * SIMD * MMV_IN; parameter BUF_OUT_WIDTH = BIT_WIDTH * SIMD * MMV_OUT; -$TOP_MODULE_NAME$_impl -#( - .BIT_WIDTH(BIT_WIDTH), - .SIMD(SIMD), - .MMV_IN(MMV_IN), - .MMV_OUT(MMV_OUT) -) -impl -( - .ap_clk(ap_clk), - .ap_rst_n(ap_rst_n), - .in0_V_V_TDATA(in0_V_TDATA), - .in0_V_V_TVALID(in0_V_TVALID), - .in0_V_V_TREADY(in0_V_TREADY), - .out_V_V_TDATA(out_V_TDATA), - .out_V_V_TVALID(out_V_TVALID), - .out_V_V_TREADY(out_V_TREADY) +$TOP_MODULE_NAME$_impl #( + .BIT_WIDTH(BIT_WIDTH), + .SIMD(SIMD), + .MMV_IN(MMV_IN), + .MMV_OUT(MMV_OUT) +) impl ( + .ap_clk(ap_clk), + .ap_rst_n(ap_rst_n), + .in0_V_V_TDATA(in0_V_TDATA), + .in0_V_V_TVALID(in0_V_TVALID), + .in0_V_V_TREADY(in0_V_TREADY), + .out_V_V_TDATA(out_V_TDATA), + .out_V_V_TVALID(out_V_TVALID), + .out_V_V_TREADY(out_V_TREADY) ); -endmodule //TOP_MODULE_NAME +endmodule : $TOP_MODULE_NAME$ diff --git a/finn-rtllib/swg/swg_template_wrapper_dynamic.v b/finn-rtllib/swg/swg_template_wrapper_dynamic.v index ca870ace11..5c09e7c1b4 100644 --- a/finn-rtllib/swg/swg_template_wrapper_dynamic.v +++ b/finn-rtllib/swg/swg_template_wrapper_dynamic.v @@ -1,4 +1,33 @@ -`timescale 1 ns / 1 ps +/****************************************************************************** + * Copyright (C) 2022, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ module $TOP_MODULE_NAME$ #( // top-level parameters (set via code-generation) @@ -18,9 +47,10 @@ module $TOP_MODULE_NAME$ #( parameter integer C_s_axilite_ADDR_WIDTH = 6 ) ( - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) + (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite, ASSOCIATED_RESET ap_rst_n" *) + (* X_INTERFACE_INFO = "xilinx.com:signal:clock:1.0 ap_clk CLK" *) input ap_clk, - (* X_INTERFACE_PARAMETER = "ASSOCIATED_BUSIF in0_V:out_V:s_axilite" *) + (* X_INTERFACE_PARAMETER = "POLARITY ACTIVE_LOW" *) input ap_rst_n, input [BUF_IN_WIDTH-1:0] in0_V_TDATA, input in0_V_TVALID, @@ -113,17 +143,14 @@ $TOP_MODULE_NAME$_axilite # ( .cfg_reg15(cfg_last_write) ); -$TOP_MODULE_NAME$_impl -#( +$TOP_MODULE_NAME$_impl #( .BIT_WIDTH(BIT_WIDTH), .SIMD(SIMD), .MMV_IN(MMV_IN), .MMV_OUT(MMV_OUT), .CNTR_BITWIDTH(CNTR_BITWIDTH), .INCR_BITWIDTH(INCR_BITWIDTH) -) -impl -( +) impl ( .ap_clk(ap_clk), .ap_rst_n(ap_rst_n), .in0_V_V_TDATA(in0_V_TDATA), @@ -151,4 +178,4 @@ impl .cfg_last_write(cfg_last_write) ); -endmodule //TOP_MODULE_NAME +endmodule : $TOP_MODULE_NAME$ From 9f6d7b2700e0364c6a4a6a36bbfb234281df03f9 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Wed, 24 May 2023 10:03:04 +0200 Subject: [PATCH 489/628] Pin IPython dependency --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 3cf829a171..5bb4f4abc9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,3 +18,4 @@ sphinx_rtd_theme==0.5.0 toposort==1.5 vcdvcd==1.0.5 wget==3.2 +ipython==8.12.2 From 4443eeb823a0bbc35ec016d1a92574264f3981fe Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 25 May 2023 12:47:27 +0200 Subject: [PATCH 490/628] Update base image to Ubuntu 22.04 --- docker/Dockerfile.finn | 31 +++++++++++++++++++------------ docker/finn_entrypoint.sh | 5 +++-- fetch-repos.sh | 4 ++-- requirements.txt | 14 ++++++-------- run-docker.sh | 5 ++++- 5 files changed, 34 insertions(+), 25 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index f823d3c42b..d69ccc9725 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -26,10 +26,10 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-runtime +FROM ubuntu:jammy-20230126 LABEL maintainer="Yaman Umuroglu " -ARG XRT_DEB_VERSION="xrt_202210.2.13.466_18.04-amd64-xrt" +ARG XRT_DEB_VERSION="xrt_202220.2.14.354_22.04-amd64-xrt" WORKDIR /workspace @@ -57,12 +57,15 @@ RUN apt-get update && \ unzip \ zip \ locales \ - lsb-core + lsb-core \ + python3 \ + python-is-python3 \ + python3-pip RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config RUN locale-gen "en_US.UTF-8" # install Verilator from source to get the right version -RUN apt-get install -y git perl python3 make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlibc zlib1g zlib1g-dev +RUN apt-get install -y git perl make autoconf g++ flex bison ccache libgoogle-perftools-dev numactl perl-doc libfl2 libfl-dev zlib1g zlib1g-dev RUN git clone https://github.com/verilator/verilator RUN cd verilator && \ git checkout v4.224 && \ @@ -81,19 +84,23 @@ RUN rm /tmp/$XRT_DEB_VERSION.deb COPY requirements.txt . RUN pip install -r requirements.txt RUN rm requirements.txt + +# install PyTorch +RUN pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 + # extra Python package dependencies (for testing and interaction) -RUN pip install pygments==2.4.1 -RUN pip install ipykernel==5.5.5 +RUN pip install pygments==2.14.0 +RUN pip install ipykernel==6.21.2 RUN pip install jupyter==1.0.0 --ignore-installed RUN pip install markupsafe==2.0.1 -RUN pip install matplotlib==3.3.1 --ignore-installed +RUN pip install matplotlib==3.7.0 --ignore-installed RUN pip install pytest-dependency==0.5.1 -RUN pip install pytest-xdist[setproctitle]==2.4.0 -RUN pip install pytest-parallel==0.1.0 +RUN pip install pytest-xdist[setproctitle]==3.2.0 +RUN pip install pytest-parallel==0.1.1 RUN pip install "netron>=5.0.0" -RUN pip install pandas==1.1.5 -RUN pip install scikit-learn==0.24.1 -RUN pip install tqdm==4.31.1 +RUN pip install pandas==1.5.3 +RUN pip install scikit-learn==1.2.1 +RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading # extra dependencies from other FINN deps diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index b5c702111a..4e0266ca6b 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -54,8 +54,9 @@ recho () { echo -e "${RED}ERROR: $1${NC}" } -# qonnx -pip install --user -e ${FINN_ROOT}/deps/qonnx +# qonnx (using workaround for https://github.com/pypa/pip/issues/7953) +# to be fixed in future Ubuntu versions (https://bugs.launchpad.net/ubuntu/+source/setuptools/+bug/1994016) +pip install --no-build-isolation --no-warn-script-location -e ${FINN_ROOT}/deps/qonnx # finn-experimental pip install --user -e ${FINN_ROOT}/deps/finn-experimental # brevitas diff --git a/fetch-repos.sh b/fetch-repos.sh index e039ca9144..189693dd17 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="20a34289cf2297d2b2bbbe75d6ac152ece86e3b4" +QONNX_COMMIT="0c980ef410c7c99b33c5b96486233f5a723ca1bc" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" @@ -39,7 +39,7 @@ XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" KV260_BDF_COMMIT="98e0d3efc901f0b974006bc4370c2a7ad8856c79" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" -QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" +QONNX_URL="https://github.com/iksnagreb/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" BREVITAS_URL="https://github.com/Xilinx/brevitas.git" PYVERILATOR_URL="https://github.com/maltanar/pyverilator.git" diff --git a/requirements.txt b/requirements.txt index 3cf829a171..a9e691fea2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,19 +2,17 @@ bitstring==3.1.7 clize==4.1.1 dataclasses-json==0.5.7 gspread==3.6.0 -numpy==1.22.0 +numpy==1.24.1 onnx==1.13.0 onnxoptimizer -onnxruntime==1.11.1 -pre-commit==2.9.2 +onnxruntime==1.15.0 +pre-commit==3.3.2 protobuf==3.20.3 psutil==5.9.4 -pyscaffold==3.2.1 -scipy==1.5.2 +pyscaffold==4.4 +scipy==1.10.1 setupext-janitor>=1.1.2 sigtools==2.0.3 -sphinx==5.0.2 -sphinx_rtd_theme==0.5.0 -toposort==1.5 +toposort==1.7.0 vcdvcd==1.0.5 wget==3.2 diff --git a/run-docker.sh b/run-docker.sh index 381be35293..c24dcec724 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -86,7 +86,7 @@ SCRIPTPATH=$(dirname "$SCRIPT") : ${ALVEO_BOARD="U250"} : ${ALVEO_TARGET_DIR="/tmp"} : ${PLATFORM_REPO_PATHS="/opt/xilinx/platforms"} -: ${XRT_DEB_VERSION="xrt_202210.2.13.466_18.04-amd64-xrt"} +: ${XRT_DEB_VERSION="xrt_202220.2.14.354_22.04-amd64-xrt"} : ${FINN_HOST_BUILD_DIR="/tmp/$DOCKER_INST_NAME"} : ${FINN_DOCKER_TAG="xilinx/finn:$(git describe --always --tags --dirty).$XRT_DEB_VERSION"} : ${FINN_DOCKER_PREBUILT="0"} @@ -201,6 +201,9 @@ DOCKER_EXEC+="-e PYNQ_PASSWORD=$PYNQ_PASSWORD " DOCKER_EXEC+="-e PYNQ_TARGET_DIR=$PYNQ_TARGET_DIR " DOCKER_EXEC+="-e OHMYXILINX=$OHMYXILINX " DOCKER_EXEC+="-e NUM_DEFAULT_WORKERS=$NUM_DEFAULT_WORKERS " +# Workaround for FlexLM issue, see: +# https://community.flexera.com/t5/InstallAnywhere-Forum/Issues-when-running-Xilinx-tools-or-Other-vendor-tools-in-docker/m-p/245820#M10647 +DOCKER_EXEC+="-e LD_PRELOAD=/lib/x86_64-linux-gnu/libudev.so.1 " if [ "$FINN_DOCKER_RUN_AS_ROOT" = "0" ];then DOCKER_EXEC+="-v /etc/group:/etc/group:ro " DOCKER_EXEC+="-v /etc/passwd:/etc/passwd:ro " From a2a0ffedfad96a64fe102d7c9d3c4c01a3288121 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 25 May 2023 17:33:52 +0200 Subject: [PATCH 491/628] Update pre-commit config --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 126a4ac4b2..42a18b2737 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,11 +29,11 @@ exclude: '^docs/conf.py' default_language_version: - python: python3.8 + python: python3.10 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v4.4.0 hooks: - id: trailing-whitespace exclude: '\.dat$' @@ -56,13 +56,13 @@ repos: - id: isort - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.3.0 hooks: - id: black language_version: python3 - repo: https://github.com/PyCQA/flake8 - rev: 3.9.2 + rev: 6.0.0 hooks: - id: flake8 # black-compatible flake-8 config From b1b0db5f55e4430e7c4a5a5de022cbf4e75e2128 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 25 May 2023 17:46:52 +0200 Subject: [PATCH 492/628] Fix linting --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5bb4f4abc9..223138932e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ bitstring==3.1.7 clize==4.1.1 dataclasses-json==0.5.7 gspread==3.6.0 +ipython==8.12.2 numpy==1.22.0 onnx==1.13.0 onnxoptimizer @@ -18,4 +19,3 @@ sphinx_rtd_theme==0.5.0 toposort==1.5 vcdvcd==1.0.5 wget==3.2 -ipython==8.12.2 From 161cc20e25a71806d438290f3dca88a87e1213d2 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Fri, 26 May 2023 11:22:46 +0200 Subject: [PATCH 493/628] Update Brevitas, apply workarounds to fix quicktest --- fetch-repos.sh | 2 +- setup.cfg | 8 ++++---- tests/brevitas/test_brevitas_avg_pool_export.py | 4 ++-- tests/end2end/test_end2end_cybsec_mlp.py | 2 +- tests/transformation/streamline/test_streamline_cnv.py | 2 ++ tests/transformation/streamline/test_streamline_fc.py | 2 ++ tests/transformation/test_infer_data_layouts_cnv.py | 3 ++- 7 files changed, 14 insertions(+), 9 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 189693dd17..ddae4020ed 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -29,7 +29,7 @@ QONNX_COMMIT="0c980ef410c7c99b33c5b96486233f5a723ca1bc" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" -BREVITAS_COMMIT="c65f9c13dc124971f14739349531bbcda5c2a4aa" +BREVITAS_COMMIT="d30ba0d6b3db4a333072624fa3d10827a686488d" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="c17aa478ae574971d115afa9fa4d9c215857d1ac" diff --git a/setup.cfg b/setup.cfg index 50a91498ce..fb070a436e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,12 +34,12 @@ name = finn description = A Framework for Fast, Scalable Quantized Neural Network Inference author = Yaman Umuroglu -author-email = yamanu@xilinx.com +author_email = yamanu@xilinx.com license = new-bsd -long-description = file: README.md -long-description-content-type = text/markdown +long_description = file: README.md +long_description_content_type = text/markdown url = https://xilinx.github.io/finn/ -project-urls = +project_urls = Documentation = https://finn.readthedocs.io/ # Change if running only on Windows, Mac or Linux (comma-separated) platforms = any diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py index 9550031b32..898f1fb732 100644 --- a/tests/brevitas/test_brevitas_avg_pool_export.py +++ b/tests/brevitas/test_brevitas_avg_pool_export.py @@ -31,7 +31,7 @@ import os import torch from brevitas.export import export_qonnx -from brevitas.nn import QuantAvgPool2d, QuantIdentity, QuantReLU +from brevitas.nn import TruncAvgPool2d, QuantIdentity, QuantReLU from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_datatypes import InferDataTypes @@ -73,7 +73,7 @@ def test_brevitas_avg_pool_export( bit_width=input_bit_width, return_quant_tensor=True, ) - quant_avgpool = QuantAvgPool2d( + quant_avgpool = TruncAvgPool2d( kernel_size=kernel_size, stride=stride, bit_width=bit_width, diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index d2a4d0287f..1ab2d01228 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -146,7 +146,7 @@ def test_end2end_cybsec_mlp_export(QONNX_export): model.save(export_onnx_path) else: export_finn_onnx( - model_for_export, export_path=export_onnx_path, input_t=input_qt + model_for_export, export_path=export_onnx_path, input_t=input_qt, input_names=["onnx::Mul_0"] ) assert os.path.isfile(export_onnx_path) # fix input datatype diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index b7d6a825bb..c5d8e2517f 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -38,6 +38,7 @@ from qonnx.transformation.general import ( GiveReadableTensorNames, GiveUniqueNodeNames, + GiveUniqueParameterTensors, RemoveStaticGraphInputs, RemoveUnusedTensors, ) @@ -69,6 +70,7 @@ def test_streamline_cnv(size, wbits, abits): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(RemoveStaticGraphInputs()) # load one of the test vectors diff --git a/tests/transformation/streamline/test_streamline_fc.py b/tests/transformation/streamline/test_streamline_fc.py index 6131c3b03e..07c3a0f3cb 100644 --- a/tests/transformation/streamline/test_streamline_fc.py +++ b/tests/transformation/streamline/test_streamline_fc.py @@ -39,6 +39,7 @@ from qonnx.transformation.general import ( GiveReadableTensorNames, GiveUniqueNodeNames, + GiveUniqueParameterTensors, RemoveStaticGraphInputs, RemoveUnusedTensors, ) @@ -72,6 +73,7 @@ def test_streamline_fc(size, wbits, abits): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(RemoveStaticGraphInputs()) # load one of the test vectors diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py index 71822a2903..245980f958 100644 --- a/tests/transformation/test_infer_data_layouts_cnv.py +++ b/tests/transformation/test_infer_data_layouts_cnv.py @@ -35,7 +35,7 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul @@ -57,6 +57,7 @@ def test_infer_data_layouts_cnv(): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) model = model.transform(InferDataLayouts()) From a78cdb2859c2e8b5150ce07e41a907b93cba2bc4 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 29 May 2023 13:36:43 +0100 Subject: [PATCH 494/628] [CustomOp] Update naming of interfaces for code generation --- .../fpgadataflow/addstreams_batch.py | 55 +++++++++--- .../fpgadataflow/channelwise_op_batch.py | 36 +++++--- src/finn/custom_op/fpgadataflow/checksum.py | 35 +++++--- src/finn/custom_op/fpgadataflow/concat.py | 46 +++++++--- .../fpgadataflow/convolutioninputgenerator.py | 40 ++++++--- .../convolutioninputgenerator1d.py | 58 +++++++----- .../custom_op/fpgadataflow/downsampler.py | 39 +++++--- .../fpgadataflow/duplicatestreams_batch.py | 41 ++++++--- src/finn/custom_op/fpgadataflow/eltwise.py | 55 +++++++++--- .../custom_op/fpgadataflow/fmpadding_batch.py | 44 +++++++--- .../fpgadataflow/globalaccpool_batch.py | 36 +++++--- src/finn/custom_op/fpgadataflow/iodma.py | 88 ++++++++++++++----- .../fpgadataflow/labelselect_batch.py | 36 +++++--- src/finn/custom_op/fpgadataflow/lookup.py | 49 +++++++---- .../fpgadataflow/matrixvectoractivation.py | 70 ++++++++++----- src/finn/custom_op/fpgadataflow/pool_batch.py | 40 ++++++--- .../streamingdatawidthconverter_batch.py | 46 +++++++--- .../fpgadataflow/streamingmaxpool_batch.py | 47 +++++++--- .../fpgadataflow/thresholding_batch.py | 68 +++++++++----- src/finn/custom_op/fpgadataflow/upsampler.py | 42 ++++++--- .../fpgadataflow/vectorvectoractivation.py | 70 ++++++++++----- 21 files changed, 739 insertions(+), 302 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index af106d9c06..8fbdf9c452 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -268,37 +268,60 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) npy_in = "%s/input_1.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in1);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in1_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in1 ("in1");'.format(self.get_instream_width()) + 'hls::stream> in1_{} ("in1_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): node = self.onnx_node self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, {}, {}, {}, {}> (in0, in1, out, 1);""".format( + """{}<{}, {}, {}, {}, {}> (in0_{}, in1_{}, out_{}, 1);""".format( node.op_type, self.get_nodeattr("PE"), self.get_input_datatype().get_hls_datatype_str(), self.get_input_datatype().get_hls_datatype_str(), self.get_output_datatype().get_hls_datatype_str(), self.get_number_output_values(), + self.hls_sname(), + self.hls_sname(), + self.hls_sname(), ) ] @@ -315,12 +338,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -331,24 +355,27 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, hls::stream> &in1, - hls::stream> &out)""".format( + """void {}(hls::stream> &in0_{}, hls::stream> &in1_{}, + hls::stream> &out_{})""".format( self.onnx_node.name, self.get_nodeattr("PE") * self.get_input_datatype().bitwidth(), + self.hls_sname(), self.get_nodeattr("PE") * self.get_input_datatype().bitwidth(), + self.hls_sname(), self.get_nodeattr("PE") * self.get_output_datatype().bitwidth(), + self.hls_sname(), ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=in1 name=in1_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in1_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 7791647abf..71fc37b184 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -486,17 +486,28 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] # note: the innermost dim is reversed for the input self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0, false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -512,10 +523,12 @@ def docompute(self): raise Exception("""Unexpeted input shape""") self.code_gen_dict["$DOCOMPUTE$"] = [ """Thresholding_Batch<{}, NumChannels1, PE1, {}, {}> - (in0, out, threshs, numReps);""".format( + (in0_{}, out_{}, threshs, numReps);""".format( spatial_dim, tmpl_args["TSrcI"], tmpl_args["TDstI"], + self.hls_sname(), + self.hls_sname(), ) ] @@ -536,12 +549,13 @@ def dataoutstrm(self): # note: the innermost dim is not reversed for the output self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), shape_cpp_str, npy_out, ) @@ -552,21 +566,23 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index 99646274fa..c9d16c0011 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -241,17 +241,28 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] # note: the innermost dim is reversed for the input self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0, false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append("ap_uint<32> chk;") # set drain = false for cppsim @@ -259,7 +270,8 @@ def strm_decl(self): def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ - """checksum(in0, out, chk, drain);""" + """checksum(in0_%s, out_%s, chk, drain);""" + % (self.hls_sname(), self.hls_sname()) ] def dataoutstrm(self): @@ -279,12 +291,13 @@ def dataoutstrm(self): # note: the innermost dim is not reversed for the output self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), shape_cpp_str, npy_out, ), @@ -299,18 +312,18 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """using T = ap_uint;\n void {}(hls::stream &in0, - hls::stream &out, ap_uint<32> &chk, ap_uint<1> &drain)""".format( - self.onnx_node.name + """using T = ap_uint;\n void {}(hls::stream &in0_{}, + hls::stream &out_{}, ap_uint<32> &chk, ap_uint<1> &drain)""".format( + self.onnx_node.name, self.hls_sname(), self.hls_sname() ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS interface axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS interface axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS interface axis port=out name=out_" + self.hls_sname() + "#pragma HLS interface axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS interface s_axilite port=chk bundle=checksum" diff --git a/src/finn/custom_op/fpgadataflow/concat.py b/src/finn/custom_op/fpgadataflow/concat.py index 8b655b570d..c43e88d59d 100644 --- a/src/finn/custom_op/fpgadataflow/concat.py +++ b/src/finn/custom_op/fpgadataflow/concat.py @@ -278,8 +278,16 @@ def read_npy_data(self): packed_hls_type = "ap_uint<%d>" % packed_bits npy_in = "%s/input_%d.npy" % (code_gen_dir, i) self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in%d);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in, i) + 'npy2apintstream<%s, %s, %d, %s>("%s", in%d_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + i, + self.hls_sname(), + ) ) def strm_decl(self): @@ -288,21 +296,28 @@ def strm_decl(self): for i in range(n_inputs): packed_bits = self.get_instream_width(i) packed_hls_type = "ap_uint<%d>" % packed_bits - stream_name = "in%d" % i + stream_name = "in%d_%s" % (i, self.hls_sname()) self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream<%s> %s ("%s");' % (packed_hls_type, stream_name, stream_name) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [] n_inputs = self.get_n_inputs() - in_stream_names = ["in%d" % x for x in range(n_inputs)] - in_stream_names = ",".join(in_stream_names) - comp_call = "StreamingConcat(%s, out, NumReps);" % (in_stream_names) + in_streams = [] + for i in range(n_inputs): + in_streams.append("in%d_%s" % (i, self.hls_sname())) + in_stream_names = ",".join(in_streams) + comp_call = "StreamingConcat(%s, out_%s, NumReps);" % ( + in_stream_names, + self.hls_sname(), + ) self.code_gen_dict["$DOCOMPUTE$"] = [comp_call] def dataoutstrm(self): @@ -318,12 +333,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -337,10 +353,15 @@ def blackboxfunction(self): in_streams = [] for i in range(n_inputs): iwidth = self.get_instream_width(i) - in_streams.append("hls::stream> &in%d" % (iwidth, i)) + in_streams.append( + "hls::stream> &in%d_%s" % (iwidth, i, self.hls_sname()) + ) in_streams = ",".join(in_streams) total_width = self.get_input_datatype().bitwidth() * self.get_total_elems() - out_stream = "hls::stream> &out" % (total_width) + out_stream = "hls::stream> &out_%s" % ( + total_width, + self.hls_sname(), + ) blackbox_hls = "void %s(%s, %s)" % (self.onnx_node.name, in_streams, out_stream) self.code_gen_dict["$BLACKBOXFUNCTION$"] = [blackbox_hls] @@ -349,12 +370,11 @@ def pragmas(self): pragmas = [] for i in range(n_inputs): pragmas.append( - "#pragma HLS INTERFACE axis port=in%d name=in%d_%s" - % (i, i, self.hls_sname()) + "#pragma HLS INTERFACE axis port=in%d_%s" % (i, self.hls_sname()) ) self.code_gen_dict["$PRAGMAS$"] = pragmas self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index 6cc9208bb8..c80f79a8c9 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -401,17 +401,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -436,15 +447,15 @@ def docompute(self): if self.get_nodeattr("depthwise") == 1: self.code_gen_dict["$DOCOMPUTE$"] = [ """{}_dws (in0, out, numReps, {});""".format( - hls_call, hls_ram_style + OFMDim1, SIMD1, Stride1> (in0_{}, out_{}, numReps, {});""".format( + hls_call, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] else: self.code_gen_dict["$DOCOMPUTE$"] = [ """{} (in0, out, numReps, {});""".format( - hls_call, hls_ram_style + OFMDim1, SIMD1, Stride1> (in0_{}, out_{}, numReps, {});""".format( + hls_call, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] @@ -464,12 +475,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -480,18 +492,18 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out)""".format( - self.onnx_node.name + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{})""".format( + self.onnx_node.name, self.hls_sname(), self.hls_sname() ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index 6e792ca585..43e8df17b4 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -601,17 +601,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -630,40 +641,40 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, numReps, {});""".format( - swu_variant, hls_ram_style + (in0_{}, out_{}, numReps, {});""".format( + swu_variant, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] if swu_variant == "ConvolutionInputGenerator_1D": self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, numReps, {});""".format( - swu_variant, hls_ram_style + (in0_{}, out_{}, numReps, {});""".format( + swu_variant, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] if swu_variant == "ConvolutionInputGenerator_1D_dws": self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, numReps, {});""".format( - swu_variant, hls_ram_style + (in0_{}, out_{}, numReps, {});""".format( + swu_variant, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] if swu_variant == "ConvolutionInputGenerator_1D_dws_stride": self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, numReps, {});""".format( - swu_variant, hls_ram_style + (in0_{}, out_{}, numReps, {});""".format( + swu_variant, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] if swu_variant == "ConvolutionInputGenerator_1D_dws_naive": self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, numReps, {});""".format( - swu_variant, hls_ram_style + (in0_{}, out_{}, numReps, {});""".format( + swu_variant, self.hls_sname(), self.hls_sname(), hls_ram_style ) ] @@ -690,12 +701,13 @@ def dataoutstrm(self): multi_pixel_out = 1 self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", true, 1, %d);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", true, 1, %d);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, multi_pixel_out, @@ -708,26 +720,26 @@ def save_as_npy(self): def blackboxfunction(self): if self.use_parallel_window_output(): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, + """void {}(hls::stream> &in0_{}, hls::stream> - &out)""".format( - self.onnx_node.name + &out_{})""".format( + self.onnx_node.name, self.hls_sname(), self.hls_sname() ) ] else: self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out)""".format( - self.onnx_node.name + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{})""".format( + self.onnx_node.name, self.hls_sname(), self.hls_sname() ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index 255606ee7f..d42a076c30 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -212,24 +212,36 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): dim_var = "1D" if (self.get_nodeattr("is1D") == 1) else "2D" + sname = self.hls_sname() self.code_gen_dict["$DOCOMPUTE$"] = [ f"""ConvolutionInputGenerator_{dim_var}_kernel1 (in0, out, numReps);""" + IFMDim, SIMD,Stride> (in0_{sname}, out_{sname}, numReps);""" ] def dataoutstrm(self): @@ -248,12 +260,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -266,16 +279,22 @@ def blackboxfunction(self): packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_hls_type, packed_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_hls_type, + self.hls_sname(), + packed_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py index 312f5e7e4a..0d5d806dc5 100644 --- a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py @@ -309,18 +309,27 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): n_outputs = self.get_num_output_streams() self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) for i in range(n_outputs): - out_name = "out%d" % i + out_name = "out%d_%s" % (i, self.hls_sname()) self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> %s ("%s");' % (self.get_outstream_width(), out_name, out_name) @@ -328,8 +337,13 @@ def strm_decl(self): def docompute(self): n_outputs = self.get_num_output_streams() - ostreams = ["out%d" % x for x in range(n_outputs)] - dc = "DuplicateStreamsCustom(in0, %s);" % (",".join(ostreams)) + ostreams = [] + for i in range(n_outputs): + ostreams.append("out%d_%s" % (i, self.hls_sname())) + dc = "DuplicateStreamsCustom(in0_%s, %s);" % ( + self.hls_sname(), + ",".join(ostreams), + ) self.code_gen_dict["$DOCOMPUTE$"] = [dc] def dataoutstrm(self): @@ -346,7 +360,7 @@ def dataoutstrm(self): outstrm_code = [] for i in range(n_outputs): - out_name = "out%d" % i + out_name = "out%d_%s" % (i, self.hls_sname()) npy_out = "%s/output%d.npy" % (code_gen_dir, i) outstrm_code.append( 'apintstream2npy<%s, %s, %d, %s>(%s, %s, "%s");' @@ -371,10 +385,14 @@ def blackboxfunction(self): inp_streams = [] o_stream_w = self.get_outstream_width() i_stream_w = self.get_instream_width() - in_stream = "hls::stream > &in0" % (i_stream_w) + in_stream = "hls::stream > &in0_%s" % (i_stream_w, self.hls_sname()) inp_streams.append(in_stream) for i in range(n_outputs): - out_stream = "hls::stream > &out%d" % (o_stream_w, i) + out_stream = "hls::stream > &out%d_%s" % ( + o_stream_w, + i, + self.hls_sname(), + ) inp_streams.append(out_stream) self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ @@ -387,12 +405,11 @@ def blackboxfunction(self): def pragmas(self): n_outputs = self.get_num_output_streams() self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] for i in range(n_outputs): self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out%d name=out%d_%s" - % (i, i, self.hls_sname()) + "#pragma HLS INTERFACE axis port=out%d_%s" % (i, self.hls_sname()) ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index c96f12f06b..348e314792 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -354,25 +354,45 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type_0, elem_hls_type_0, elem_bits_0, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type_0, + elem_hls_type_0, + elem_bits_0, + npy_type, + npy_in, + self.hls_sname(), + ) ) npy_in = "%s/input_1.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in1);' - % (packed_hls_type_1, elem_hls_type_1, elem_bits_1, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in1_%s);' + % ( + packed_hls_type_1, + elem_hls_type_1, + elem_bits_1, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width(0)) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(0), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in1 ("in1");'.format(self.get_instream_width(1)) + 'hls::stream> in1_{} ("in1_{}");'.format( + self.get_instream_width(1), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -394,7 +414,7 @@ def docompute(self): out_hls_type, ) self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, {}, {}, {}, {}, {}>(in0, in1, out, {});""".format( + """{}<{}, {}, {}, {}, {}, {}>(in0_{}, in1_{}, out_{}, {});""".format( "StreamingEltwise", self.get_nodeattr("NumChannels"), self.get_nodeattr("PE"), @@ -402,6 +422,9 @@ def docompute(self): slice_in0, slice_in1, slice_out, + self.hls_sname(), + self.hls_sname(), + self.hls_sname(), eltwise_op_str, ) ] @@ -419,12 +442,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -435,24 +459,27 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, hls::stream> &in1, - hls::stream> &out)""".format( + """void {}(hls::stream> &in0_{}, hls::stream> &in1_{}, + hls::stream> &out_{})""".format( self.onnx_node.name, self.get_nodeattr("PE") * self.get_input_datatype(0).bitwidth(), + self.hls_sname(), self.get_nodeattr("PE") * self.get_input_datatype(1).bitwidth(), + self.hls_sname(), self.get_nodeattr("PE") * self.get_output_datatype().bitwidth(), + self.hls_sname(), ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=in1 name=in1_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in1_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index bdb5775c3e..ea9028d925 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -228,17 +228,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -256,8 +267,8 @@ def docompute(self): hls_call = node.op_type self.code_gen_dict["$DOCOMPUTE$"] = [ """{} (in0, out, numReps);""".format( - hls_call, in_t + {}> (in0_{}, out_{}, numReps);""".format( + hls_call, in_t, self.hls_sname(), self.hls_sname() ) ] else: @@ -265,8 +276,8 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ """{} (in0, out, numReps);""".format( - hls_call, in_t + SIMD1, {}> (in0_{}, out_{}, numReps);""".format( + hls_call, in_t, self.hls_sname(), self.hls_sname() ) ] @@ -286,12 +297,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -304,16 +316,22 @@ def blackboxfunction(self): packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_hls_type, packed_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_hls_type, + self.hls_sname(), + packed_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index 220856922c..e518507034 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -267,27 +267,40 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ - """AccPool_Batch<{}, {}, {}, {}, {}> (in0, out, 1);""".format( + """AccPool_Batch<{}, {}, {}, {}, {}> (in0_{}, out_{}, 1);""".format( self.get_normal_input_shape()[1], self.get_nodeattr("NumChannels"), self.get_input_datatype().get_hls_datatype_str(), self.get_nodeattr("PE"), self.get_output_datatype().get_hls_datatype_str(), + self.hls_sname(), + self.hls_sname(), ) ] @@ -304,12 +317,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -320,20 +334,22 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out)""".format( + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{})""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 8a756b630d..4b4ad28def 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -47,7 +47,7 @@ # Interfaces # - AXI-MM name specified by intfName unless this is set to "" (empty, the default) -# in which case output AXI-MM are named "out" and input AXI-MM are named "in0" +# in which case output AXI-MM are named "out_V" and input AXI-MM are named "in0_V" # - AXI-MM interface width (in bits) is specified by intfWidth # - AXI-Stream interface width (in bits) is specified by streamWidth # - If inftWidth and streamWidth are not equal, the DMA core performs @@ -254,15 +254,24 @@ def docompute(self): # DWCs depend on AXI MM and out interface width if strmw == intfw: # case 0: AXI MM width = out width, no DWCs needed - self.code_gen_dict["$DOCOMPUTE$"] = [dma_inst_template % ("in0", "out")] + self.code_gen_dict["$DOCOMPUTE$"] = [ + dma_inst_template + % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) + ] elif (strmw % intfw == 0) or (intfw % strmw == 0): # case 1: AXI MM width divisible by out width or vice versa # single DWC + single extra stream needed self.code_gen_dict["$DOCOMPUTE$"] = [ "hls::stream > dma2dwc;" % intfw, - dma_inst_template % ("in0", "dma2dwc"), + dma_inst_template % ("in0_" + self.hls_sname(), "dma2dwc"), dwc_inst_template - % (intfw, strmw, total_bits // intfw, "dma2dwc", "out"), + % ( + intfw, + strmw, + total_bits // intfw, + "dma2dwc", + "out_" + self.hls_sname(), + ), ] else: # case 2: AXI MM width not divisible by out width or vice versa @@ -271,26 +280,41 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ "hls::stream > dma2lcm;" % intfw, "hls::stream > lcm2out;" % width_lcm, - dma_inst_template % ("in0", "dma2lcm"), + dma_inst_template % ("in0_" + self.hls_sname(), "dma2lcm"), dwc_inst_template % (intfw, width_lcm, total_bits // intfw, "dma2lcm", "lcm2out"), dwc_inst_template - % (width_lcm, strmw, total_bits // width_lcm, "lcm2out", "out"), + % ( + width_lcm, + strmw, + total_bits // width_lcm, + "lcm2out", + "out_" + self.hls_sname(), + ), ] elif direction == "out": # in0 -> (DWCs) -> IODMA -> AXI MM # DWCs depend on AXI MM and out interface width if strmw == intfw: # case 0: in width = AXI MM width, no DWCs needed - self.code_gen_dict["$DOCOMPUTE$"] = [dma_inst_template % ("in0", "out")] + self.code_gen_dict["$DOCOMPUTE$"] = [ + dma_inst_template + % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) + ] elif (strmw % intfw == 0) or (intfw % strmw == 0): # case 1: AXI MM width divisible by in width or vice versa # single DWC + single extra stream needed self.code_gen_dict["$DOCOMPUTE$"] = [ "hls::stream > dwc2dma;" % intfw, dwc_inst_template - % (strmw, intfw, total_bits // strmw, "in0", "dwc2dma"), - dma_inst_template % ("dwc2dma", "out"), + % ( + strmw, + intfw, + total_bits // strmw, + "in0_" + self.hls_sname(), + "dwc2dma", + ), + dma_inst_template % ("dwc2dma", "out_" + self.hls_sname()), ] else: # case 2: AXI MM width not divisible by out width or vice versa @@ -300,10 +324,16 @@ def docompute(self): "hls::stream > in2lcm;" % width_lcm, "hls::stream > lcm2dma;" % intfw, dwc_inst_template - % (strmw, width_lcm, total_bits // strmw, "in0", "in2lcm"), + % ( + strmw, + width_lcm, + total_bits // strmw, + "in0_" + self.hls_sname(), + "in2lcm", + ), dwc_inst_template % (width_lcm, intfw, total_bits // width_lcm, "in2lcm", "lcm2dma"), - dma_inst_template % ("lcm2dma", "out"), + dma_inst_template % ("lcm2dma", "out_" + self.hls_sname()), ] else: raise Exception("Unknown IODMA direction: %s" % direction) @@ -316,13 +346,25 @@ def blackboxfunction(self): direction = self.get_nodeattr("direction") if direction == "in": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(%s *in0, hls::stream<%s > &out, unsigned int numReps)" - % (self.onnx_node.name, packed_hls_type_in, packed_hls_type_out) + "void %s(%s *in0_%s, hls::stream<%s > &out_%s, unsigned int numReps)" + % ( + self.onnx_node.name, + packed_hls_type_in, + self.hls_sname(), + packed_hls_type_out, + self.hls_sname(), + ) ] elif direction == "out": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, %s *out, unsigned int numReps)" - % (self.onnx_node.name, packed_hls_type_in, packed_hls_type_out) + "void %s(hls::stream<%s > &in0_%s, %s *out_%s, unsigned int numReps)" + % ( + self.onnx_node.name, + packed_hls_type_in, + self.hls_sname(), + packed_hls_type_out, + self.hls_sname(), + ) ] else: raise ValueError("Invalid IODMA direction, please set to in or out") @@ -339,32 +381,36 @@ def pragmas(self): if direction == "in": if intfname == "": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE m_axi offset=slave port=in0" + "#pragma HLS INTERFACE m_axi offset=slave port=in0_" + + self.hls_sname() ) else: self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE m_axi offset=slave port=%s" % (intfname) ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE s_axilite port=in0 bundle=control" + "#pragma HLS INTERFACE s_axilite port=in0_%s bundle=control" + % (self.hls_sname()) ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) elif direction == "out": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ) if intfname == "": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE m_axi offset=slave port=out" + "#pragma HLS INTERFACE m_axi offset=slave port=out_" + + self.hls_sname() ) else: self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE m_axi offset=slave port=%s" % (intfname) ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE s_axilite port=out bundle=control" + "#pragma HLS INTERFACE s_axilite port=out_%s bundle=control" + % (self.hls_sname()) ) else: raise ValueError("Invalid IODMA direction, please set to in or out") diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index 492cd01073..12a88dacd4 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -275,29 +275,42 @@ def read_npy_data(self): # Also notice that StreamingDataWidthConverter_Batch performs LE packing self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0,false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): node = self.onnx_node self.code_gen_dict["$DOCOMPUTE$"] = [ - """{}<{}, {}, {}, {}, {} > (in0, out, 1);""".format( + """{}<{}, {}, {}, {}, {} > (in0_{}, out_{}, 1);""".format( node.op_type, self.get_nodeattr("Labels"), self.get_nodeattr("PE"), self.get_nodeattr("K"), self.get_input_datatype().get_hls_datatype_str(), self.get_output_datatype().get_hls_datatype_str(), + self.hls_sname(), + self.hls_sname(), ) ] @@ -314,12 +327,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -330,21 +344,23 @@ def save_as_npy(self): def blackboxfunction(self): self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream > &out)""".format( + """void {}(hls::stream> &in0_{}, + hls::stream > &out_{})""".format( self.onnx_node.name, self.get_nodeattr("PE"), self.get_input_datatype().bitwidth(), + self.hls_sname(), self.get_output_datatype().bitwidth(), + self.hls_sname(), ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index ed560ac962..ecf630ef7f 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -206,8 +206,15 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def dataoutstrm(self): @@ -226,12 +233,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", %s);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", %s);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, "false", @@ -244,10 +252,14 @@ def save_as_npy(self): def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -255,12 +267,14 @@ def docompute(self): if mem_mode == "const": self.code_gen_dict["$DOCOMPUTE$"] = [ """StreamingLookup(in0, out, embeddings);""" + InputType, EmbeddingType >(in0_%s, out_%s, embeddings);""" + % (self.hls_sname(), self.hls_sname()) ] elif mem_mode == "external": self.code_gen_dict["$DOCOMPUTE$"] = [ - """StreamingLookup_ext(in0, out, mem, size, oob_count, + """StreamingLookup_ext(in0_%s, out_%s, mem, size, oob_count, oob_irq);""" + % (self.hls_sname(), self.hls_sname()) ] def blackboxfunction(self): @@ -271,26 +285,29 @@ def blackboxfunction(self): packed_output_hls_type = "ap_uint<%d>" % obits if mem_mode == "const": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_input_hls_type, packed_output_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_input_hls_type, + self.hls_sname(), + packed_output_hls_type, + self.hls_sname(), + ) ] elif mem_mode == "external": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ "void " + self.onnx_node.name - + "(hls::stream &in0, hls::stream &out, " + + "(hls::stream &in0_%s, hls::stream &out_%s, " + % (self.hls_sname(), self.hls_sname()) + "T_DST const *const mem, unsigned const size, " + "unsigned &oob_count, bool &oob_irq)" ] def pragmas(self): mem_mode = self.get_nodeattr("mem_mode") - my_pragmas = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() - ] - my_pragmas.append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() - ) + my_pragmas = ["#pragma HLS INTERFACE axis port=in0_" + self.hls_sname()] + my_pragmas.append("#pragma HLS INTERFACE axis port=out_" + self.hls_sname()) my_pragmas.append("#pragma HLS INTERFACE ap_ctrl_none port=return") if mem_mode == "const": my_pragmas.append( diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 899bce98d2..fae2d86d88 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1097,8 +1097,15 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] # note: the innermost dim is reversed for the input self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0, false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) mem_mode = self.get_nodeattr("mem_mode") @@ -1112,24 +1119,35 @@ def read_npy_data(self): npy_in = "%s/weights.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, numReps);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", weights_%s, false, numReps);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): mem_mode = self.get_nodeattr("mem_mode") self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) if mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> weights ("weights");'.format( - self.get_weightstream_width() + 'hls::stream> weights_{} ("weights_{}");'.format( + self.get_weightstream_width(), self.hls_sname(), self.hls_sname() ) ) @@ -1149,10 +1167,12 @@ def docompute(self): if mem_mode == "const": self.code_gen_dict["$DOCOMPUTE$"] = [ """Matrix_Vector_Activate_Batch - (in0, out, weights, {}, numReps, {});""".format( + (in0_{}, out_{}, weights, {}, numReps, {});""".format( tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], + self.hls_sname(), + self.hls_sname(), threshs, map_to_hls_mult_style[self.get_nodeattr("resType")], ) @@ -1166,11 +1186,14 @@ def docompute(self): wdtype_hls_str = export_wdt.get_hls_datatype_str() self.code_gen_dict["$DOCOMPUTE$"] = [ """Matrix_Vector_Activate_Stream_Batch - (in0, out, weights, {}, numReps, {});""".format( + (in0_{}, out_{}, weights_{}, {}, numReps, {});""".format( tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], wdtype_hls_str, + self.hls_sname(), + self.hls_sname(), + self.hls_sname(), threshs, map_to_hls_mult_style[self.get_nodeattr("resType")], ) @@ -1199,12 +1222,13 @@ def dataoutstrm(self): # note: the innermost dim is not reversed for the output self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), shape_cpp_str, npy_out, ) @@ -1217,25 +1241,30 @@ def blackboxfunction(self): mem_mode = self.get_nodeattr("mem_mode") if mem_mode == "const": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ """void {}( - hls::stream> &in0, - hls::stream> &weights, - hls::stream> &out + hls::stream> &in0_{}, + hls::stream> &weights_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_weightstream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] @@ -1249,10 +1278,10 @@ def pragmas(self): mem_mode = self.get_nodeattr("mem_mode") ram_style_thresholds = self.get_nodeattr("ram_style_thresholds") self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" @@ -1270,11 +1299,10 @@ def pragmas(self): ) elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=weights name=weights_" - + self.hls_sname() + "#pragma HLS INTERFACE axis port=weights_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=8 variable=weights" + "#pragma HLS stream depth=8 variable=weights_" + self.hls_sname() ) else: diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index 813f13e504..8ccfce7820 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -239,17 +239,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0,false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -281,8 +292,8 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] += [ """Pool_batch, Slice< {} > > - (in0,out, pool_fxn, OFMDimTotal*numReps);""".format( - i_hls_dt, o_hls_dt + (in0_{}, out_{}, pool_fxn, OFMDimTotal*numReps);""".format( + i_hls_dt, o_hls_dt, self.hls_sname(), self.hls_sname() ) ] @@ -302,12 +313,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s",false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -323,16 +335,22 @@ def blackboxfunction(self): packed_obits = self.get_outstream_width() packed_out_hls_type = "ap_uint<%d>" % packed_obits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_in_hls_type, packed_out_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_in_hls_type, + self.hls_sname(), + packed_out_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index a80d2bbefa..dc905658b1 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -236,14 +236,23 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) if self.needs_lcm(): self.code_gen_dict["$STREAMDECLARATIONS$"].append( @@ -252,7 +261,9 @@ def strm_decl(self): ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -263,13 +274,15 @@ def docompute(self): 'hls::stream> intermediate ("intermediate");'.format( self.get_iowidth_lcm() ), - "%s(in0, intermediate, numReps);" % (op), - "%s(intermediate, out, numReps);" - % (op), + "%s(in0_%s, intermediate, numReps);" + % (op, self.hls_sname()), + "%s(intermediate, out_%s, numReps);" + % (op, self.hls_sname()), ] else: self.code_gen_dict["$DOCOMPUTE$"] = [ - "%s(in0, out, numReps);" % (op) + "%s(in0_%s, out_%s, numReps);" + % (op, self.hls_sname(), self.hls_sname()) ] def dataoutstrm(self): @@ -288,12 +301,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -308,16 +322,22 @@ def blackboxfunction(self): out_packed_bits = self.get_outstream_width() out_packed_hls_type = "ap_uint<%d>" % out_packed_bits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, in_packed_hls_type, out_packed_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + in_packed_hls_type, + self.hls_sname(), + out_packed_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index a0e60931ed..78f4095cbe 100755 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -254,17 +254,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -275,7 +286,8 @@ def docompute(self): else: op = "StreamingMaxPool" self.code_gen_dict["$DOCOMPUTE$"] = [ - "%s(in0, out);" % (op) + "%s(in0_%s, out_%s);" + % (op, self.hls_sname(), self.hls_sname()) ] else: dtype = self.get_input_datatype() @@ -285,14 +297,14 @@ def docompute(self): op = "StreamingMaxPool_Precision_1d" self.code_gen_dict["$DOCOMPUTE$"] = [ """%s(in0, out);""" - % (op, dtype_hls, minval_str) + OutputSize, %s, %s>(in0_%s, out_%s);""" + % (op, dtype_hls, minval_str, self.hls_sname(), self.hls_sname()) ] else: op = "StreamingMaxPool_Precision" self.code_gen_dict["$DOCOMPUTE$"] = [ - "%s(in0, out);" - % (op, dtype_hls, minval_str) + "%s(in0_%s, out_%s);" + % (op, dtype_hls, minval_str, self.hls_sname(), self.hls_sname()) ] def dataoutstrm(self): @@ -311,12 +323,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -329,16 +342,22 @@ def blackboxfunction(self): packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_hls_type, packed_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_hls_type, + self.hls_sname(), + packed_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 12e635b3d6..fc5aa61d66 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -613,8 +613,15 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] # note: the innermost dim is reversed for the input self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0, false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) mem_mode = self.get_nodeattr("mem_mode") if mem_mode == "decoupled": @@ -627,23 +634,34 @@ def read_npy_data(self): npy_in = "%s/thresholds.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, ImgDim1);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", weights_%s, false, ImgDim1);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) mem_mode = self.get_nodeattr("mem_mode") if mem_mode == "decoupled": self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> weights ("weights");'.format( - self.get_weightstream_width() + 'hls::stream> weights_{} ("weights_{}");'.format( + self.get_weightstream_width(), self.hls_sname(), self.hls_sname() ) ) @@ -654,10 +672,12 @@ def docompute(self): if mem_mode == "const": self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, threshs, numReps);""".format( + (in0_{}, out_{}, threshs, numReps);""".format( node.op_type, tmpl_args["TSrcI"], tmpl_args["TDstI"], + self.hls_sname(), + self.hls_sname(), ) ] elif mem_mode == "decoupled": @@ -666,10 +686,13 @@ def docompute(self): # - for synth the unit runs continuously anyway (ap_ctrl_none) self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, weights, numReps);""".format( + (in0_{}, out_{}, weights_{}, numReps);""".format( "Thresholding_Stream_Batch", tmpl_args["TSrcI"], tmpl_args["TDstI"], + self.hls_sname(), + self.hls_sname(), + self.hls_sname(), ) ] else: @@ -692,12 +715,13 @@ def dataoutstrm(self): # note: the innermost dim is not reversed for the output self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), shape_cpp_str, npy_out, ) @@ -709,24 +733,29 @@ def save_as_npy(self): def blackboxfunction(self): if self.get_nodeattr("mem_mode") == "const": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] elif self.get_nodeattr("mem_mode") == "decoupled": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &weights, - hls::stream> &out + """void {}(hls::stream> &in0_{}, + hls::stream> &weights_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_weightstream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] else: @@ -734,10 +763,10 @@ def blackboxfunction(self): def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" @@ -789,8 +818,7 @@ def pragmas(self): ) elif self.get_nodeattr("mem_mode") == "decoupled": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=weights name=weights_" - + self.hls_sname() + "#pragma HLS INTERFACE axis port=weights_" + self.hls_sname() ) def code_generation_ipi(self): diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index b653b9386e..ab5a734e7c 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -187,17 +187,28 @@ def read_npy_data(self): npy_in = "%s/input_0.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"] = [] self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) def docompute(self): @@ -206,13 +217,15 @@ def docompute(self): if is_2d: self.code_gen_dict["$DOCOMPUTE$"] = [ """UpsampleNearestNeighbour_Batch > (in0, out, numReps);""" + ap_uint > (in0_%s, out_%s, numReps);""" + % (self.hls_sname(), self.hls_sname()) ] else: assert batch == 1, "1D upsampler currently needs numReps=1" self.code_gen_dict["$DOCOMPUTE$"] = [ """UpsampleNearestNeighbour_1D > (in0, out);""" + ap_uint > (in0_%s, out_%s);""" + % (self.hls_sname(), self.hls_sname()) ] def dataoutstrm(self): @@ -231,12 +244,13 @@ def dataoutstrm(self): oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}") self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s");' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), oshape_cpp_str, npy_out, ) @@ -249,16 +263,22 @@ def blackboxfunction(self): packed_bits = self.get_instream_width() packed_hls_type = "ap_uint<%d>" % packed_bits self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - "void %s(hls::stream<%s > &in0, hls::stream<%s > &out)" - % (self.onnx_node.name, packed_hls_type, packed_hls_type) + "void %s(hls::stream<%s > &in0_%s, hls::stream<%s > &out_%s)" + % ( + self.onnx_node.name, + packed_hls_type, + self.hls_sname(), + packed_hls_type, + self.hls_sname(), + ) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index ede572f1a4..64fb5dcbe1 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -802,8 +802,15 @@ def read_npy_data(self): self.code_gen_dict["$READNPYDATA$"] = [] # note: the innermost dim is reversed for the input self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", in0, false);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", in0_%s, false);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) mem_mode = self.get_nodeattr("mem_mode") @@ -817,23 +824,34 @@ def read_npy_data(self): npy_in = "%s/weights.npy" % code_gen_dir self.code_gen_dict["$READNPYDATA$"].append( - 'npy2apintstream<%s, %s, %d, %s>("%s", weights, false, numReps);' - % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in) + 'npy2apintstream<%s, %s, %d, %s>("%s", weights_%s, false, numReps);' + % ( + packed_hls_type, + elem_hls_type, + elem_bits, + npy_type, + npy_in, + self.hls_sname(), + ) ) def strm_decl(self): mem_mode = self.get_nodeattr("mem_mode") self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> in0 ("in0");'.format(self.get_instream_width()) + 'hls::stream> in0_{} ("in0_{}");'.format( + self.get_instream_width(), self.hls_sname(), self.hls_sname() + ) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> out ("out");'.format(self.get_outstream_width()) + 'hls::stream> out_{} ("out_{}");'.format( + self.get_outstream_width(), self.hls_sname(), self.hls_sname() + ) ) if mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream> weights ("weights");'.format( - self.get_weightstream_width() + 'hls::stream> weights_{} ("weights_{}");'.format( + self.get_weightstream_width(), self.hls_sname(), self.hls_sname() ) ) @@ -854,10 +872,12 @@ def docompute(self): if mem_mode == "const": self.code_gen_dict["$DOCOMPUTE$"] = [ """Vector_Vector_Activate_Batch - (in0, out, weights, {}, numReps, {});""".format( + (in0_{}, out_{}, weights, {}, numReps, {});""".format( tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], + self.hls_sname(), + self.hls_sname(), threshs, map_to_hls_mult_style[self.get_nodeattr("resType")], ) @@ -871,12 +891,15 @@ def docompute(self): wdtype_hls_str = export_wdt.get_hls_datatype_str() self.code_gen_dict["$DOCOMPUTE$"] = [ """{} - (in0, out, weights, {}, numReps, {});""".format( + (in0_{}, out_{}, weights_{}, {}, numReps, {});""".format( "Vector_Vector_Activate_Stream_Batch", tmpl_args["TSrcI"], tmpl_args["TDstI"], tmpl_args["TWeightI"], wdtype_hls_str, + self.hls_sname(), + self.hls_sname(), + self.hls_sname(), threshs, map_to_hls_mult_style[self.get_nodeattr("resType")], ) @@ -904,12 +927,13 @@ def dataoutstrm(self): # note: the innermost dim is not reversed for the output self.code_gen_dict["$DATAOUTSTREAM$"] = [ - 'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s", false);' + 'apintstream2npy<%s, %s, %d, %s>(out_%s, %s, "%s", false);' % ( packed_hls_type, elem_hls_type, elem_bits, npy_type, + self.hls_sname(), shape_cpp_str, npy_out, ) @@ -922,25 +946,30 @@ def blackboxfunction(self): mem_mode = self.get_nodeattr("mem_mode") if mem_mode == "const": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void {}(hls::stream> &in0, - hls::stream> &out + """void {}(hls::stream> &in0_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ """void {}( - hls::stream> &in0, - hls::stream> &weights, - hls::stream> &out + hls::stream> &in0_{}, + hls::stream> &weights_{}, + hls::stream> &out_{} )""".format( self.onnx_node.name, self.get_instream_width(), + self.hls_sname(), self.get_weightstream_width(), + self.hls_sname(), self.get_outstream_width(), + self.hls_sname(), ) ] else: @@ -952,10 +981,10 @@ def blackboxfunction(self): def pragmas(self): mem_mode = self.get_nodeattr("mem_mode") self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE ap_ctrl_none port=return" @@ -973,11 +1002,10 @@ def pragmas(self): ) elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=weights name=weights_" - + self.hls_sname() + "#pragma HLS INTERFACE axis port=weights_" + self.hls_sname() ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=8 variable=weights" + "#pragma HLS stream depth=8 variable=weights_" + self.hls_sname() ) else: raise Exception( From 7070653b608d2b8eeba196f2434b5cafa4fb1911 Mon Sep 17 00:00:00 2001 From: shashwat1198 Date: Mon, 29 May 2023 13:44:48 +0100 Subject: [PATCH 495/628] Draft PR for folding tutorial --- notebooks/advanced/Folding-Tutorial.ipynb | 2334 +++++++++++++++++++++ notebooks/advanced/finn-hw-arch.png | Bin 0 -> 110452 bytes 2 files changed, 2334 insertions(+) create mode 100644 notebooks/advanced/Folding-Tutorial.ipynb create mode 100644 notebooks/advanced/finn-hw-arch.png diff --git a/notebooks/advanced/Folding-Tutorial.ipynb b/notebooks/advanced/Folding-Tutorial.ipynb new file mode 100644 index 0000000000..409595d0d8 --- /dev/null +++ b/notebooks/advanced/Folding-Tutorial.ipynb @@ -0,0 +1,2334 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FINN - Folding\n", + "--------------------------------------\n", + "**Note: To run this notebook, you first need to run the build flow in the 3rd cybersecurity notebook as we utilize one of the intermediate models generated in that process in this notebook.** \n", + "\n", + "This notebook describes the use of FINN parallelization parameters (PE & SIMD) to efficiently streamline models so as to extract the maximum performance out of them.\n", + "\n", + "We'll use the utility function `showInNetron()` to visualize and interact with our network in the Jupyter Notebook and `showSrc()` to show source code of FINN library calls." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.visualization import showInNetron, showSrc" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: The build_flow in the cybsec_mlp notebook comprises a transformation step `step_target_fps_parallelization` that automatically sets custom parallelization parameters needed to achieve a given `target_fps` by invoking the `SetFolding` transformation.\n", + "\n", + "More details of the above step can be found here: https://github.com/Xilinx/finn/blob/main/src/finn/builder/build_dataflow_steps.py#L394\n", + "\n", + "This notebook shows the manual version of this step and explains how these attributes can improve performance and what are their effects on resource utilization for developers who need to maximize the performance of their network. \n", + "\n", + "* input : the 'step_convert_to_hls.onnx' file (we pick has gone through a series of transformation passes) to be analyzed in terms of clock cycles and resource utilization per layer\n", + "* analyze the estimated execution clock cycles and the resource utilization of each layer in the network" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### FINN-style Dataflow Architectures \n", + "\n", + "We start with a quick recap of FINN-style dataflow architectures. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, as illustrated in the figure below taken from the [FINN-R paper](https://arxiv.org/pdf/1809.04570.pdf):\n", + "\n", + "![](finn-hw-arch.png)\n", + "\n", + "In practice, the compute arrays are instantiated by function calls to optimized Vitis HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library. As these function calls can only handle certain patterns/cases, we need to transform the network into an appropriate form so that we can replace network layers with these function calls, which is the goal of the network preparation process." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Part-1 : Loading the ONNX model.\n", + "\n", + "The 'onnx' file needs to go through multiple transformations before it can be fed into our estimation functions.\n", + "\n", + "The 'onnx' file loaded here is taken from the cybersecurity end2end example notebook. The build_step in the notebook comprises several series of transformations that take place before the onnx file is used for bitstream generation.\n", + "We pick the onnx file `step_convert_to_hls` to which the necessary transformations have been applied for this notebook (Network layers mapped to necessary FINN-HLS blocks. In this case, the `MatrixVectorActivation` Units). \n", + "\n", + "More information on these transformations can be found in the tfc_end2end_example notebook.\n", + "\n", + "To interact with the 'onnx' file we use the `ModelWrapper()` helper function. This function gives access to different model attributes and allows us to apply custom tranformations to it.\n", + "In the below cell, we load our onnx file and view the cybersecurity MLP network in netron." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Serving './step_convert_to_hls_folding.onnx' at http://0.0.0.0:5901\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from qonnx.core.modelwrapper import ModelWrapper\n", + "model = ModelWrapper(\"./step_convert_to_hls.onnx\")\n", + "\n", + "showInNetron(\"./step_convert_to_hls.onnx\",localhost_url='xirxlabs53')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Part 2 : Parallelisation Attributes : PE & SIMD" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**PE & SIMD represent the amount of time-multiplexity to which we expose each of our network layers. \n", + "These parallelization attributes are subject to certain constraints and should be selected accordingly.**\n", + "\n", + "We see how they work through an example of a multiplication computation (Matrix-Vector) in the `MatrixVectorActivation` layer looks like.\n", + "\n", + "From the below block diagram, we observe that `SIMD` represents the parallelism within a single dot-product computation (the number of multiplications is a single clock cycle), while `PE` refers to how many such (Matrix-Vector?) dot-products execute in parallel.\n", + "\n", + "If `PE` & `SIMD` are set to 2 & 4 for a given layer that means, that within a dot-product 4 multiplications will happen in parallel and 2 such dot-products will execute in parallel.\n", + "\n", + "The base case of `PE` & `SIMD` both set as 1 suggest that there will be no parallelization therefore the resource utilization would be low (resources can be resued for differnt multiplication operations) when compared to settings where network layers have higher `PE` & `SIMD` values." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

\n", + "Question in the third line of the above cell.\n", + "
" + ] + }, + { + "attachments": { + "MVA-1.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABX4AAAMbCAMAAADNe32MAAAACXBIWXMAAB7CAAAewgFu0HU+AAAAV1BMVEX////v7++lpaUgICDd3d0bGxvh4eEAAAAQEBBKSkq7u7syMjLNzc1WVlYNDQ2YmJhnZ2dCQkLx8fG1tbUrKyvU1NSrq6t2dnaIiIg5OTnDw8Po6Oj5+fnQLuJiAAAgAElEQVR4Ae2djXqiOhRFbdVGW387ttXW93/OSYJBxBw4OxFF3dz7jUDOSWAR12Qi4mDAhQRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARI4J4J/H0vl69vhzP4+xrZtb+vr6/ylPbFxot9scvopSw4W9l/zUI9Z2XcQQIkQAIkcELgbW78spv53T9mZ1+3dlfp31djxnbfsogzZrKKO3b/M7EhH0U9J21wgwRIgARIoE7gb2jMdLWaWnH+urKfRdDvTwhdl/od2mVsIxf/Qlnldb+2Yl5tjKF/K1S4SgIkQAICgZUZ+rHs384M9zYmjH4nZnPIeLG+ndj1pRn6Pfv3oVlEFLv1Ne3nZn1I5AsJkAAJkIBMYGy+i8I3Y97tWhj9rsdh9mFppofJh0K/g8HLOKLY/WH4PDLmT26OJSRAAiRAAgWBgzTtxnztphTC6He6MofZh7V5rel38O5nGPYv5fI5GHwHJX/RvuxcJEACJNBO4MOsrTuPy/bw0dv0d1HMPvyZ9b+6fgdjs618FucnJ37crv2bm8DgQgIkQAIk0Erg236Qtvo+DljL0a9VrL/3YWte30/mfl2VczO3+p2MJ+Pif2vqqXn9sh/SmXn8tojWA2EACZAACTwZgdeFdabZrNzEr12Cftd2zc8+7Mzn+eh3ZaZF+PHPnflZmN3O3iXhbhzmQgIkQAIk0EbgZesGrcbs/GA36Hc6mPk7HUZ2nBvTr7s97WSxd5zt7MD3beNnL06KuEECJEACJBAn8Pn+Y+05ccPWMPdr7x4butmHrb0z4nzyYRU+ZzvWtzELP+yd8cbfIxSukQAJkEA7gX8TN59bTj7YyQU/+7BZ7COj351ZDQavm83w8L8dC++CkTdm2d4aI0iABEjgyQm8rkpXLv39DcfJh8GX/ebFl3Pye/3OB3uPr00rv4Xs73yYOyO7ZepugeBCAiRAAiTQSKCwrQ/5NQv7GvTrvrq2MbOtsTcD/6vf+WDFax+88/n29vbn/rf/uVmKw7fdPsxrY5MsJAESIAESGAx+jfOrX368P8Pcr7uzwc4+bCb2Rt766Hc0CSPdQ6p7+Tp8281+e463nlW4cJUESIAE4gTss3a27q7fz59CxNXR75cZe8+GOx/Ge7v8zbYTMz7eKFxWuzZTq+r99PyetDKEKyRAAiRAAoHAi73lwQx37k9/m28YA7vRr5198M+BCHc+2Jhi2fh71EIVh9fR2Ix/thsz4eC3RoabJEACJBAjsLfGdMuu+N5FGP16/W79Z27lnQ8+bjGeLuPfLC6eHDylfWOYuY8ESIAEIgT+Zu+zyGxCJLJl18vv70XqaWmGxSRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQAEnibr8AMhpMACZAACVyAgP0K3AVqYRUkQAIkQAIgAeoXBMZwEiABErgMAer3MhxZCwmQAAmABIrfHgKTGE4CJEACJJBLgKPfXILMJwESIIEkAtRvEjYmkQAJkEAuAeo3lyDzSYAESCCJgP01i/hzJpNqYxIJkAAJkICSgNVv8URgZTzDSIAESIAELkJgZsJvaV6kOlZCAiRAAiSgI2BHv/wFeR0qRpEACZDAJQl8m8nh5+AuWSvrIgESIAESaCbwPTHLpTGb8LP0zdEsJQESIAESuACB/a/7Oc75fvA7tAL+mfEOiAtAZRUkQAI9JrB/ue0yGs3eX5c/8w/3C8dj//PG+6UVsFmsV9vv39HbbQ+vx1eOh0YCJHCfBP7+bVfTzXjhpNeTZbP6F0a8+/dV8cP0PTi0yXA3/1n+ft7ndeZRkwAJ9IrA/nflh5eF2xaLyeSG/w+Hm/V8tX39fakxenF/QeyG48ktl4r+d9tR7Qi5SQIkQAIQgf1ybKWy2K2W/2ZvdedBNT1F8P5v9Pu9LeZG1vxKyFNcc54kCXREwH2wNfz5Df/M76iVx6v25XtlZ2qmf493ZjwjEiCB6xB4tfLlbV1prF+2CzPkDEQaPGaRwNMT+Ge/1MCBb3I3eNuZMce/yfiYSALPTOBzbLa0b0YP+Pww84x0ppIACTwtga35eNpzv8yJjxaG0w+XQclaSOC5COzM93Od8OXPdsVHAl0eKmskgccnsDeGd5plXuZXM82sgekkQAJPSODPjJ/wrC97yr9md9kKWRsJkMAzEHgzi2c4zU7P8ddsOq2flZMACTwkAeo3/7JSv/kMWQMJPCEB6jf/olO/+QxZAwk8IQHqN/+iU7/5DFkDCTwhAeo3/6JTv/kMWQMJPCEB6jf/olO/+QxZAwk8IQHqN/+iU7/5DFkDCTwhAeo3/6JTv/kMWQMJPCEB6jf/olO/+QxZAwk8IYFL6/fve7l8fTuA/Ptyz6L5+/r6Ksnui40X+2KXUfMXnt++7uJZbNRveXm5QgIkcCsCb/Pil9B2M38EP/7buFu7r/Svfba7+5bzMvxi2mQVXH1+zPZRYnyS7jkW7iEBEiCBMwJ/9leLpqvV1Lr11xX+LNzDEJx+f0LsutTv0C7+J+akX9rY74yR3Rwq5CsJkAAJkMBgZYbel387M3TTBmH0OykfivBiVTyxJUsz9Lz270OzKIbKZ/yctqnfMyzcQQIkQALnBMbh2cFvxrhfAA6j3/U4zD4szfQw+VDodzB4GZv1eU12z6/5MJx8iKLhThIgARKoETjMOdi987WbUgij3+kqzD6szWtNv4N3Y+zwd/9SLp++1v1wMeLotwaYmyRAAiQQJ/Bh1oU7D8Xbw0dv099F8UjGP7P+V9fvwP7YXOWzuGJyYmAnMpYDjn7jnLmXBEiABGoEvo1ZrL6PdyuUo1+rWH/vw9a8vp/M/boK5u6nKpeT8WRc/O9N/e2mJDj6rQHmJgmQAAkIBF4X9uMys1m5iV+7BP2u7Zq/92FnPs9Hv6vIb/X8TSbW4tRvwZF/kgAJkEArgZetvbPMLjs/2A36nQ5m/k6HkR3nxvR7/ls9a/8hHicfWoEzgARIgARKAp/vPxs7geu+7xbmfu08wtDNPmytVM8nH1bntz4s3XwER78lU66QAAmQgJLAv4n353H0W8w+bBb7yOh3Z1aDwetmMzz8vxt8mfHf/nP/aczo090/zIUESIAESKCJwOtqGYqX/v6Gin6/7DcvvpyT3+t3Ptifurdp5beQ3Z0P7gsXYSmrDFXzlQRIgARIoEagsK3f+et/QDno132vYmNmW2NvBv5Xv/PBivdlMPh8e3v7c//b/wbLcbHYLygPqd8aZW6SAAmQwBmBX+P86pcfP58b5n6ndp+992EzsTMJ9dHvaOLmHuIL73yIc+FeEiABEqgRsM/a2bq7fj9/ChFXR792Ptd7Ntz5MN7b5W+2ndh53lo15Sb1W6LgCgmQAAk0EXixtzyY4c796W/zDWNgN/q1sw/+ORDhzocwt2s2/h61aLW88SyKhTtJgARI4IzAfuvUa8yu+N5FGP16/W79Z27lnQ8+bjGeLhtubeDo94wwd5AACZCAROBv9j4TZxOkJO4nARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggdsRGK39r1nc7gDYMgmQAAk8J4Ev/9Nvz3nuPGsSIAESuCEB+/TJG7bOpkmABEjgaQlQv0976XniJEACtyVA/d6WP1snARJ4WgLU79Neep44CZDAbQnM+NHbbS8AWycBEnhWAhz9PuuV53mTAAncmMCXMQ2/MXTjg2PzJEACJPC4BKx+vx/37HhmJEACJNBbAla/694eHA+MBEiABB6XgNWv2T7u6fHMSIAESKCvBL7NxJgVp3/7en14XCRAAo9K4HVhlq/GbF4p4Ee9xDwvEiCB/hHY//4MjZnvB7ONMeOfXxq4f9eIR0QCJHATAp8vHSx/o9Hs99/r9me6Wdhp3+HSndp+aQVsFrvV9vV3NvrroNkXuv0mfYiNkgAJQATevn/mu42dku16+Vj9lgc2cyPhLpfFeDNdbd8/yxa5QgIkQAJ9IrD/txpbCbqBqTGTLpbh8GM3ndtx7ld9QPryu3XeH44v3+yiOCF3Uh8/X30CzmMhARIgAUfgc+vcO57+LN+//up2vHNEn2+zf9vVznl49+/Oz4WHTwIk8GgE3u2//4fbxx4c7t9Xdlpl/fdo147nQwIkcM8ElvZf5s8wLtxvJ2by2H/J3HM35LGTwBMS+GfMz4NNOEhX8W9txhz/SnS4nwRI4MoEPsdP9OXf/drwB5av3MHYHAmQgERga3ZS0QPuf1sYTj884HXlKZHAXRL4eK4HP66eaKx/l/2RB00Cz0Ngb8xTfSPh1Uyf5+LyTEmABPpM4M2M+3x4Fz+236eaa7k4PlZIAiRwOQJvZnG5yu6gpl+zuYOj5CGSAAk8AQHq9wkuMk+RBEigjwSo3z5eFR4TCZDAExCgfp/gIvMUSYAE+kiA+u3jVeExkQAJPAEB6vcJLjJPkQRIoI8EqN8+XhUeEwmQwBMQoH6f4CLzFEmABPpIgPrt41XhMZEACTwBAer3CS4yT5EESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESOA+CexfcpbP85OuVLg/L73unj4dy3XPnK2RAAn0n8CryVnm5ye4rNQ3XP2eB1xxz+mxzK7YMpsiARIggTYCVr+T1GVhpufVV5VnTTx9OQ+52p7ascwjg/WrHQsbIgESIIFTAq8xhZ6GiFttufvZz8Rs/sT8axbYYzHm45Z/F1zzbNkWCZDAHRBoU2jTKShy3zZmffMp4MM5vA3Nuul0WEYCJEAC1ySgUKh4OJrcv6HZihVcueBtbJZXbpLNXYvAW+UzB66SQDuByMzptTpr2Y5GoWVwbUWV+24W/Zh+sAf/z0w4/VC7iI+ySf22C4cRVQJPod/BtD/D38GOw99z3f5bbSbHbnle3tWe72q7i9xW3sxYfwvl1qz0wf2L/Lvz468QXZltZat5FYltrsmWqsaOuZ2yNT/nKHS5/8y49SiuFfBthtdq6l7a+fo4qtetXeu4Z5uTdi+hX/2hL81KHbx/wW+YwXKwaHfgW/OjPv4Q+PmCfgiDZwwGaM4KGBDpYpVH8C/jnoOANP9Vp9B4O8rcsenNHbf7sRnFz+VZ974vzHg7+xuVy5VA/FuY4UXbtaNf/aEj+k1RHZaDRbuzxDMGgx/4X6F4Bt6KTqnFldXF6qLsRORzTD4M7L8Z9G+MjiPnPTqWjk9VVf3XwtzkbujZwqzwQWXTKVG/TXRsGS5TPANvRSlLf266WF3U8+hXOUhu6T2XKX41kS/qXabqu6zlw8zRf5Je4jz3m4tfB+q35cLgMsUzqN+Wi1AvznGjMndkJvVWb7b9xcnfKvulGV52DFqtvGHdtntp61O/DbxdES5TPANvRTlW9eemi9VFPc/od7Awvbn1bG/Mpd/3Lb2+18Ub83qL49sPzfel26V+W4jiMsUzqN+Wi1AvVo5g62l+W5v70Z/P3gabHh1LFOo1d/6a8U3+Mnrv4N8g1G9Lz8Flimck6VfdBXXjWl3UE41+p5cf67R0Nbl4av7Jhc9Wskq4f+kSjLpol/ptuTK4TPGMJP22HPexWCdWXdQT6VcJ5Mi5w7X5bf653eEZZVQ9NLd5IOiwg3+CUL8tHQGXKZ5B/bZchHqxdgKhnue2tbkplzHW3iX29elYLnE+OXW8mOwvOyQ138lnsdRvy7XAez6eQf22XIR6sVah9Ty3rc1NuUk81t4l9vXpWC5xPjl1/LvRE+C+u7jjnfpt6Qq4TPEM6rflItSLtQqt57ltbS7yHaNYO5fcR/0eadonBxw3rrjWyTWgfluuIC5TPIP6bbkI9WKtQut5blubS/3G6N1+363m5DuZf6d+WzoULlM8g/ptuQj1Yq1C63luW5vbJ/326VhiTK+5b32jO1J25j35NF+/hFTqVwATduMyxTOo30Bb+apVaKw6bW6flNenY4kxvea+W90DPUx/7tF+KH1pnPpt6Tq4TPEM6rflItSLtQqt57ltbW6flNenY4kxvea+yY2+jZjRrv3xVGH4S/22dB1cpngG9dtyEerFWoXW89y2NrdPyuvTscSYXnPfrb6Anf5QYfttZelZPdRvS9fBZYpnUL8tF6FerFVoPc9ta3P7pLw+HUuM6RX32edfXLG1Y1MZtxvbwa80/KV+j4Sja7hM8QzqN4pe3qlVaKwGbW6flNenY4kxveK+lxs9iS69XTf4lYa/1G9L18FlimdQvy0XoV6sVWg9z21rc/ukvD4dS4zpFfd93uhLb5AoT3j4wa8w/IVqRXpByl3KWA4W7YjgGbgYUzLwHOTmR12sLuqJnvmAdPaTt1sHG306lg5OD6kyYxIAaeYsFhJlNbsY/ArDX6hWpBekqA7LwaIdETwDF2NKBp6jlKXvBrpYXRT1W31nXW0deeNd7aBu09Dd6ffVTT24ZRYBRv1GoFR34VMJeAb1WyWuWNdOIMSq0ub2SXl9OpYY0yvuuzf9hsFvfPhL/bZ0HVymeAb123IR6sVahdbz3LY2t0/K69OxxJhecd+96fcw8ysMf6nflq6DyxTPoH5bLkK9WKvQep7b1ub2SXl9OpYY0yvuuzf9+tse/ORD7Ilp1G9L18FlimdQvy0XoV6sVWg9z21rc/ukvD4dS4zpFffdmX7LmV9r4MX57C/129J1cJniGdRvy0WoF2sVWs9z29rcPimvT8cSY3rFffel3+PMrxsAT884Ub9nSE534DLFM6jfU+atW1qFxirS5vZJeX06lhjTK+67L/36we/ix4xXfv7hbPhL/bZ0HVymeAb123IR6sVahdbz3LY2t0/K69OxxJhecd9d6dcNfhc/L06yIyfgs+Ev9dvSdXCZ4hnUb8tFqBdrFVrPc9va3D4pr0/HEmN6xX13pd9XJ9/BoJCsE3B9+Ev9tnQdXKZ4BvXbchHqxVqF1vPctja3T8rr07HEmF5x3z3pd//x8+fQBMmO5vXn/oYSFUCkF6R8wwzLwaLdCeIZuBhTMvAc5XfU/FXVxeqi+K031Rvl0kHIG+/SbfesvnvS74sd+brlKNm3fbEn/HksCXsaXpFekKI6LAeLdqeFZ+BiTMnAc5Sy9NdSF6uLon4b3h7dFSFvvO6Oohc135N+AzBZsnJJyK28Ir0gRXVYDhbtTgPPwMWYkoHnKGXpr50uVhdF/VbeDtdbRd541zuqm7RE/aqwp6gOy8Gi3UHjGbgYUzLwHKUs/ZXSxeqiqF9V5790EPVbEqV+SxRNKymqw3KwaHeseAYuxpQMPEcpS3+BdLG6KOq3qc93Vkb9lmip3xJF00qK6rAcLNodK56BizElA89RytJfIF2sLor6berznZVRvyVa6rdE0bSSojosB4t2x4pn4GJMycBzlLL0F0gXq4uifpv6fGdl1G+J9lb6/TLD8hjQFfkDNrkk0gbSC1JUh+Vg0e508AxcjCkZeI5Slv4a6mJ1UdRv5G3R/S7kjdf90dy0hVb9fhXLyN9xaw91dNjhXsK+kzP4m80ON4id7K5tvJt1bY9+U5asXBKpHekFKarDcrBodzp4Bi7GlAw8RylLfw11sboo6jfytuh+F/LG6/5obtpCq37H/ukK9o/J6s0daeWJj+bn/ND/fbj49dd5yemenEsgS1YuOW3dbyGHkKI6LAeLdieAZ+BiTMnAc5Sy9FdNF6uL6o1+16PE5W17/s17j6n+B9LZ67mX3u7TsVz63MD6FPqdDN2yMGbs/Ds0xbbbt61962EwsE9D/1itrKJ/W45jZbYtEXKxLFm5JFIb0gtSVIflYNHudPAMXIwpGXiOUpb+GupidVG90e8kjHESXs8efBLp6u6NuYruv8XOPh3LLc6/0marfifmtQj/Xhj3Hd+hWVbSa6svE2/V/dRsaiX1zQ/zXt+l3pYlK5dEKkd6QYrqsBws2p0OnoGLMSUDz1HK0l9DXawuqj/6naQuC45+I2/tu9nVqt9x0K99u5vPFv0uD9r9E34IvsTyZxZnI+eysG1FlqxcEqkT0RcSG5rCcrBo1waegYsxJQPPUcrSg9XF6qJ6o1/dCDZ0rOorH7lTpXF36wr9htHuzDu1Nvrdv5SLdfN2syoIjFsGt1s/kk6kJUtWLok0hegLiQ1NYTlYtGsDz8DFmJKB5yhl6cHqYnVR1G/oq1d9Rf7ZedUDu35jrfotJx8Gv8aMBoONWVbHrZWfvpwcj75t9PvZpudjVZE1WbJySaQaRF9IbGgKy8GiXRt4Bi7GlAw8RylLD1YXq4uifkNfveor9VvibtXvcfJhZZxga6Pf5WQ8GRf/V6Z7f1ru6l2Zj/II8BVZsnJJpBVEX0hsaArLwaJdG3gGLsaUDDxHKUsPVheri6J+Q1+96iv1W+JW6NdPPnx+2aebuxvNhmYxPiwV35b1+ZV3Y75P95xu/cR+JvM0pGlLlqxcEqkP0RcSG5rCcrBo18Y1HoR+nVbm5ecLgZ38qoulfmsE+6S8Ph1LDdO1N1v1W7knxn9RonLfb2W24eSwfxeNd7l8TQ3wdjupudiQJSuXRKpBhIfEhqawHCzatUH9BtLnr9RvjUmflNenY6lhuvZmq34PX7tYjKevfs53aLajt8MSP9jDHWq1wuVPsczd9zLCp3m1GOWmLFm5JFI1Irw01f18lp9Ltq7YG6FbY04Dfoy+/rfDbf1ze/WwBc8YjZpzZmfL1GzP9kk7dLFrXY1L5W1bkd5zwV3auxdiTWpz+6S8Ph1LjOkV9yn0uxzs7X9hqc39vm42w8P/uyLE3p8W+TbcYFe9o3zRODcRmpJeZcm+VRvhOgm0EJhSv9KbrMP91G8Jt1W/xzsfipyafut3PuxXi/j32cLo142Bp2bR+q3k8gjPV+5GvwvgZnorisnC/qf834baX31W1z92X1K0y6LyncViT9ufeEZbKx9ny8QMz/ZJO3SxuqiPFfV7/vbqfA/1WyJu1e/xzocip6bfTzsP8ef+t/+5gLlZ/CsrF1fyun2Tfsdim2cF3U8+IN+rxqc38IzBQDknWmGFZ+Ct6D5OKw5KF6uL4p0PlQt9vVXqt2Tdqt9JbaLW6vc4E1FWE1bszWmzsN7wau8Ljj4trSGlUkT9ehjUb6VP1Fap3xqQPimvT8dSw3TtzVb9no9+t/tyqR+t/WbGv0Nhveh02344c7oD2aJ+PS3qV+401G+NTZ+U16djqWG69qZCv6e3KWwqn2Ys6ke7PhY2f7im/cC23oDfpn49Buo32jv8Tuq3xqZPyuvTsdQwXXuzVb/nH70dFXum32NR8/cuBiP/DbrEk6V+PTjqV+4/1G+NTZ+U16djqWG69marfjs6oEnG5C/16y/KA+l3cXioqaKzzVWxuih+9KYAfvkQ6rdkeiv97lofyF4e4tkK9euRPJB+gW9B6sa1uijq9+y9dY0d1G9J+Vb6ndduqCgPSLFC/XpI1K/cV6jfGps+Ka9Px1LDdO3NW+k3xR2BDfXrSaQgxO/ixTN432/oqMrXnM+htbl9Ul6fjkV5iboKu5V+cy4B9et7wwPpVzWfW7wFdLO6uihOPnSllcZ6c977jRXfX+Gt9Pua8XMX1K/vZw+kX879ppqDo99Ucr3Iu5V+/xn/+MokBtSvx0b9yr2Hc781Nn0acfbpWGqYrr15K/3+msMD0hJOmPr10Khfue9QvzU2fVJen46lhunam7fS71frT9HLJKhfz+aB9Mu5X7m3N5doJx9yJvuajwAvpX5LZrfS76jl1+DKA4ysUL8eygPpl3O/kW6u2kX9qjD1NehW+pUV2k5KzpVLIrXygZMRKGe7eOPZGZJL79AqNNauNpej3xi9m++jflWXIGWkieVg0e6g8Qz8jtyUDDxHOVPrr5QuVhf1RDee/evFg+WL9xonHwoO9k/qt0TRtJKiOiwHi3bHimfgYkzJwHOUd+n6C6SL1UU9lX7T7zRqemOklFG/JTXqt0TRtJKiOiwHi3bHimfgYkzJwHOUY1V/gXSxuqgn0u+v+Wjq31cto35L3NRviaJpJUV1WA4W7Y4Vz8DFmJKB5yhl6S+QLlYX9UT6HYl3Go1et8t/L4fOP/pyv0Lz9vU1Ouyw/z7++nor9n3ZZfRZFsRWKnmx4mIf9VuyoX5LFE0rKarDcrBod6x4Bi7GlAw8RylLf4F0sbqoJ9Lvnzl7OLfH+ftRPKN7Xvz4187/SvnUmHH5i2Jz47+gal+KZbiVDfytup+J+vXo3R/Ub4miaSVFdVgOFu2OFc/AxZiSgecoZ2r9BdLF6qKeSL8DY0qhVjr6u/0h6/lqtTNm6P1b6Nf9aM37IWo/CfpduB/FHtuioRsNx5Y3+wPTsf21fdRvCYT6LVE0raSoDsvBot2x4hm4GFMy8BzlWNVfIF2sLuqZ9Ds0X5EOvjFTb+XZpHgGy27xY6OmZmJWh+hvsziMfos9n8uF2QjjX6tt6vfATfdC/ao4pagOy8Gi3UHjGbgYUzLwHKUs/ZXSxeqinkm/69jvG3wZc5j1fTUL59Qw+WB/sdzTHgzmZnrQ7/ywx/6ebvx3crdmR/0eIClfqF8VqBTVYTlYtDtoPAMXY0oGnqOUpb9Sulhd1DPpN3KwBScAACAASURBVPr1mVk5JfH5MXVTCjtv1rXZbg6zD5+Lj5+T0a/veBM7ZP58CctnMRj+Wmz+Ub++k6r/oH5VqFJUh+Vg0e6g8QxcjCkZeI5yptZfKV2sLuqZ9BudcP0z/qO241sgjH6328Psw7fZBv2G0a/9nVwzc8PicvEl+42Z/Yr3VxybGAyih1INeJ516ld1rVNUh+Vg0e6g8QxcjCkZeI5yrOqvlC5WF/VM+o2b8cfO1v68H6dyi7lfO/q1P0XuJ4WnZhT0u/L83R8T90thq3FYJr5kZQfOvxz9lpBUK9SvClOK6rAcLNodNJ6BizElA89RytJfKV2sLuqZ9Ls3sd8W31v/2mX3c/hcrhz9Dj787MPnYmf7mRvdzv2f/hIMNpHJ33f3BNl36rcgpP2T+lWRSlEdloNFu4PGM3AxpmTgOUpZ+iuli9VFPZN+7e0M0Q/Mvn42TsCLuf8MLtz5sB0Usw9LO84N+l15/u6PTW3Kwu76Gy/sNzU4+i0Z6VaoXxWnFNVhOVi0O2g8AxdjSgaeo5yp9VdKF6uLeir92o/F/HTCeX//+17Z23l3rvQ4+n3z9z5M7ZA56Lec+x0Mz01e/HI59XtOt3HPnxk3lndVCD0asnYQcq5cUqvCbfKBkxEoZ7uin5ifRZ3uQHOUY1XfiC5WF/VU+rUfjc2/RsflOOFrue63xs3nHu98cKvv9ktZ9kE9Qb8rz98FG/d85tVmePhvs7Kfpq33n/tPp/iTekPKySs/eitxQMIqs/JXctqVc+WSyBFTvxEoZ7tQlboK0BylLP2x6WJ1UU+l38Fs4WYZyuWfxbmd21sYiqWY2z2Ofv3sg5t7KPVbjn7tjb92ouHkzgf7NeVyiX29I7TiX6nfEkfOj/6UlSSsQKKs1S/nyiW1Ktwm9RuBcrYLVamrAM1RytIfmy5WF/Vc+h3MCklOisV9q7iwree69Q8Eruj3z977sHbfyjgb/c7909Ne3v7sf/6Pl3AbhP2C8nhM/Xqgqj/8B5aqyMsGQaKsNS3nyiW1Ktwm9RuBcrYLVamrAM1RztT6Y9PF6qKeTL97O2Bd/Vav8NZMwvMbPvx8bkW/g7Upfkoj6DeMfr/93EO1mnKdc78lCt3K942egv9SfqtRd5zVKFmyckk1/7COfHSFxIamsBws2rWBZ+BiTMnAc5RjVQ9WF6uLejL92m8Sn8jXPnBrbMavbrJ2NDeLk2+92X1LM/a/wRf0u9rv959/7ysjK4P69X1U/8fN5mHKrzvqjzVEypKVS0Ju5RXRFxIbmsBysGjXBp6BizElA89RytKD1cXqop5Lv7+L88c+uPngxWY3tBO3bi64cueDvZfMFjk1B/2Wk7tT8dO1d37rzXdS9R8p72F15U2Bk9hd4E0JxzJZsnLJMbtcQ04diQ0NYDlYtGsDz8DFmJKB5yhl6cHqYnVRz6XfykRv6KJ2/OvuObPLvJixrU4+2NkHP99wot/FZh4eRXmspFx73NGve9S8XUbFY5HtPxeKbf9n2FdiOKy8fQl3+h0D18Xfescd11r7iD4AT9W6LFm5JFIxoi8kNjSF5WDRrg08AxdjSgaeo5yp9WB1sbqop9LvuxnHR62j3/dZvCT05Au/3uxf3BnnUfwlZf+emqz8bLn790JY3DM6I8to0T7CHJsw9x6poMtdUz+vlNSCLFm5JNIQoi8kNjSF5WDRrg08AxdjSgaeoxyrerC6WF1Ub/S7Pt6Oi629FTcshC7X9Lo6/6paU3iHZfep34l72vzQTtaMnTGHpth2+7bRQe7ePsO+za2zG33rwslD+Euj/brLkpVLIrUi+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXVRv9Gtv2EpepqG/Nb/aX60o7/Ftjuy89B71Owmjxe/i6fND/y2VJlT2iyyt+g3PlWuqp5uy9/SfXpUlK5dETgLRFxIbmsJysGjXBp6BizElA89RytKD1cXqovqj38O9uPjLQr4NIfTD4lU3LXua09HWPeq3uAnEAbFatVM1rfq1Py0dfcjRCdOd+T7Zvt7GfpE8+StLVi6JnBiiLyQ2NIXlYNGuDTwDF2NKBp4zB3qhLpZzv6EXhtftIty3G/bc7PU+9eu+k+0W+4h6+zFlTb/78Oj5l5diGn0/XNjHIrdMPszMIjpvUTTU7Z+r8uek0HZkycolkTYQfSGxoSksB4t2baBfbkjLuUYrU+DzX13snY1+lRMIoWdVXotvRlR2SKvFM3Gk0qvuv0f9lpMP9qlu7ivXG7OsmnN5nDsqfqXJvW1aR7/T9AnY7Av2ZRatX0+MNyJLVi6J1IQID4kNTWE5WLRr4xpivE4rOqUWXHWxujFybyYfrqDf3flNv6GjXvv1HvV7nHwofgavNvpdTsaTcfH/xuH8ds8qahv92l8xle5Zu8IlWZlNWuuyZOWSyPkgwutedXgLeIZ7TsprhETTLjwDb0Wn1OIodbG6qGfS78TNWPZjuU/9+smHzy/7pT93z8DQLMKPfXjfnpL9m0ys2lr0a78GEyY0TrOvs/W5McOk8a8sWbkkckqX0G9lzuc4+3NYs7++crZP3qGLfqvclzS3PwmDLvaZ29UqWtNn9jEt2xm62B+rgVLsDzwe4t9bFxvbGvP+rot6X2o/uIp0n8vtUk8gRJrU5uZ8yT/SbNaue9Rv5d4UO651+i2X8JvQFSZr/2FG4+TDp/0Ib1VJuf7qn33O/vx1VnGA7hhkyb6VTLhCAu0Epk+j35HqZ4B077/cqHvU7+FrF4vx9NXP+donzo/eDssZj2XxdcHY6Pf1xy+rufV5+7Phzmq+6I7Pn9N3SOTvkVh7fdKveKvQwn2bfiEW1wq00WN3m/dhWVTu/A772l4XplpDW/Rw+PExMeMPdJkYmwgsC7M5RK9bl4nZtMas17qo9epp9HurJxvG3r/3qd/lYG//C0tt7vd1Yx89X/y/G3yZ8Z97+Lz9jO7zmFFkVh+MbBa3nHxwx/O3nW6GpYqG4eSaX5v0O25OrZZeYvKhWl99HZubxaJdW3gGPiubkoHnKGdqPWBdrC7qieZ+//XiL5riLXKP+j3e+VCcQ02/J3c+2GmFcqkLdlmMfv2f9vmfs6K2u/qT+vWXi/qVey31W2PzWvmh4lrR1TfvUb/HOx8KXDX9ftp5iMOz5/8Gy8OHcnZ6YVjX7wnsH3d/xN0t1K+/ZNSv3HOp3xob6rcGBNyc1O5SsPqtzyuc1Rib+z0Jekn/6tlJPdfdoH497yT9LnjjWaWz9uOf5Nq7FyoHXq5qc/s04uzTsZQgW1bOR79b+/T5wyLkturXzh9uhdwe76Z+/cVJ0i/v+612bOq3SuNa6/ep39NpBHvTVrksBHCNN575HP/tDCG7t7upX39prqRfeLxsP3oDc5RTBf6sdbG6qCf66K1PyuvTsWgld/7RW2lfI+u35ZkP9gdFpFztcd0gjvr10K+kX3i8zDsfwPeEdgIhVq02t0/K69OxxJhecd84+bljVzzIWlPUrwdC/db6RWWTo98KDLfaJ+X16VhqmK69ebMfG8o4UerXw6N+5T5E/dbY9El5fboLo4bp2psp7+FrH2O9PerXE0m5dOisLD6P6w4NbUUpS3/WulhdFOd+PdJr/9GPDzyvfdbR9rY3fORk9IAUO6lfDylJv/BMLp94puiReSHa+dtYK9rcPo1++/QF6BjTK+7r02XRnjb160lRv3KH4ei3xqZP7/NZ+u+M1c7q7jfv8R8C1K/vdtSv/O6jfmts+qRf+f1bO+jH37zHfwjIl08uiVzJp3zkDnhHLj6P60Bz7jfS3eRd2gmEWA3a3D7p1z4KLHYqkX2j1+3y38uhYPTlfp3h7evL/tbPYXn5+nK31tp9bhk1PFD+82sW6gnJfXi9x38IyJKVSyKsn1K/nPut9oR+/NtPq9DqkYd1bW6f9DuYtP0GZXF2v/a3gt0yL34UZ+c/prJPbByXT1uwzwxzPyBqX4pluI0b+M9HHOoJ6Prw+mUiv5XRhwNrOAZZsnJJpDrqNwLlbBc/ejtDcukdWoXG2tXm9kq/ut+de7cPtJ6vVjtjht6/hX7XVrTvBxJ7+8zyQr8L9+hq90j0YeyLZqOxGa5W9inUcTnHuF5pHySsKx1TWzPyMcslkTqp3wiUs13U7xmSS+/QKjTWrja3V/rV9amNmfph7mxSPCxzt3A/sjY1k/JHeuxvVR70u/JsPpf2wf0Rxa59TX9DX0GM4s32QcK62VGeNiwfs1xyWoPfekr9cu632hM4+VClcbX1bSnQhia/jDnM1r6ahXNqmHwofmrYZc7N9KBfNwZ2i/0V+PNniP2aiXfysn93XPTpJ/gKgu1/ypKVSyK1PqV+Ofdb7QnUb5XG1dbfNY8ZnxlzmOP9/Ji6KQX786n2T/szrpvD7MPn4uPnZPRri3/MxGZ9lj9p+2nFGx7ruB/17tO3lzt85o4sWbkk0rWo3wiUs126fyiepqE5ytvEfCO6WF0Uv/V2et2utfVSDmwbWvwrftH9GBFGv9sweP4226DfMPodjPwP+JSfxfm5YT/V/BKbFD5WfqM16lcFPuUeWywHi3YHjWe4f669qs73GIRn4K0oZekPSheri6J+j9f5qmvr2m9HRBv/sR+k/bz7aQNfXsz92tHvyI9w3TTwKOh3Vdbgf5Zidfi5n/F4YksW5u/bPp938VPeMVFG33qF+lVdgRTVYTlYtDtoPMOKkXO/1evNyYcqjeutv5pNuwr31r922f18FQdWjn4HH3724XOxs3MNxZ0P5eh3sDmb/N3b+WAzWVsB79obvR4D3xL1qwKeojosB4t2B41n4OPSlAw8RzlW9VdKF6uL4uhX1fkvH7QfmvnX6Lgch7gnbX39+B+VWMz9nG2482E7KGYflnYIHfS7KvM2Zw+xsVMdZmVbsDeyuVsnerVQv6rLkaI6LAeLdgeNZ+BiTMnAc5Sy9FdKF6uLon5Vnb+DoNnCj2zDH//EJv6+V/Z2Xj9sPY5+38zEJkzNX6nf4+h3eDb6tfotxtpLnyY2dYsC6ldFPUV1WA4W7Q4az8DFmJKB5yhl6a+ULlYXRf2qOn8XQV/u6xPGTIolfI8i2tJ+a/xUcXnng7sJ4n3w4m6fOBv92pkG++nGajM8/LdZDdzkg6/Yfph3mMeItnOLndSvinqK6rAcLNodNJ5hxci53+r15txvlcY11z/tl4dXjTLczmfhgOZ+hvc4+vWzD27uodRvOfq1N/7aR0Kc3vlQ/k7bwpRVhqpv/Er9qi5AiuqwHCzaHTSegY9LUzLwHOVY1V8pXawuiqNfVefvIsh+d6JFhYVtfdtbM7WvFf3+2Xsf1u5bGWej37n/asXL25/9z/9hg+zNEr4aOwvRt7vPqF9V50pRHZaDRbuDxjNwMaZk4DlKWforpYvVRVG/qs7fQdC/9nkAe7dCkOWH92dFv9aoxbetg37D6Pfbzz3UDjh82+3VDGslN9+kflWXIEV1WA4W7Q4az8DFmJKB5yhl6a+ULlYXRf2qOn8HQYVQGyt+GZvxq7slYjQ3i5Nvvdl9SzP2d7AH/a72+/3n3/vK+HFyrd6Xib/l4e/8U7la4PU3n1i/iL6Q2HARsRws2rWBZ1gxcu43XB73yrnfKo3rrf+rPDRSbNXdHbHY7Ib2Ezp/Z0R19Gs/RfPPgQj69Z/juT+msXvY/i3MbvszMWve9yvS1hfIXy2WSyK1I/pCYkNTWA4W7drAM/BxaUoGnqMcq3qwulhdFPUb+uqVX+dnd4fFDuDF3XNml3nxEV1Vv3b2wc83nOh3sZkLt1D4JwcvVr2zr719YxE7817vkyUrl0ROCNEXEhuawnKwaNcGnoGLMSUDz1HK0oPVxeqiqN/QV6/7am8FO/5iRWPTo9/3WWw825gUK3x7/71IPbG6M/ZRvyp4KarDcrBod9B4Bi7GlAw8RylLf6V0sboo6lfV+S8e9N6/Jz9e/Bx1FVK/Kk4pqsNysGh30HiGFSPnfqvXm3O/VRpXWw+PLLtag71tiPpVXZoU1WE5WLQ7aDwDH5emZOA5yrGqv1K6WF1Ub0a/6+PjD7C1t+KW2PY+3Ktfu5jCz91rP8H7jKB+VdctRXVYDhbtDhrPwMWYkoHnKGXpr5QuVhfVG/3a3yxLXtw3EtqXXul32Lsv/7YD7CaC+lVxTVEdloNFu4PGM3AxpmTgOUpZ+iuli9VF9Ue/h2cf4C+L2I2ukS7dK/0a08ePwSLUOt9F/aoQp6gOy8Gi3UHjGVaMnPutXm/O/VZpXGvdfmX4Wk31vR3qV3WFUlSH5WDR7qDxDHxcmpKB5yjHqv5K6WJ1Ub0Z/eomEGI99R5/6XhmNrFTecZ91K/qqqeoDsvBot1B4xm4GFMy8BylLP2V0sXqoqhfVee/dNA/zS9tXrrRftZH/aquS4rqsBws2h30NX6F7TqtrM2v6iq4IF0s9VsD2qe539fiG2u1I3zKTepXddlxOaKjU7yFJP32cu7X/xSt6jq4xw5qVE391nD2Sb99OpYapmtvUr8q4kmqg+5uxFvAM9wPtMi/6xIHgWfgreiUWhyfLlY3RubkQ/yad7yX+i0BU78liqaVFNVhOVi0O1Y8AxdjSgaeo1NqcX10sboo6repz3dWRv2WaKnfEkXTSorqsBws2h0rnoGLMSUDz1HK0l8gXawuivpt6vOdlVG/JVrqt0TRtJKiOiwHi3bHit/Di4sxJQPPUcrSXyBdrC6K+m3q852VUb8lWuq3RNG0gssRHZ3iLeAZuBhTMvAcpSz9BdLF6qKo36Y+31kZ9VuipX5LFE0rKSNNLAeLdsdK/cpXjPqtsemT8vp0LDVM196kflXEU1SH5WDR7qDxDHxcmpKB5yhl6a+ULlYXxdGvqvNfOoj6LYlSvyWKphVJdfsXeZmbpVx4VqKLfqs8j9D+Ykt1s1Iir67NUi6MlMxmO7OdoQua82GWhybeW5eN2bbGvL/rot6XygfWNHWN/DLtF4djLWlz+6S8Ph1LjOkV9z2afpOf28fEJyQwpX6v6JrQFPUbSDzcb71d2yENjwi0P8baUFov0kWPh8dlYSbHDeXawlRraE/6+FiYzQe6oDnH+HXrMjEfrTHrtS5qvaJ+SxNcb4X6LVk/2uh3XJ5Z6wryNV9p8qGpESwHi3bt4hn4rGxKBp6jnKn1sHWxuijO/Tb1387KqN8SLfVbomhaSVEdloNFu2PFM3AxpmTgOUpZ+guki9VFUb9Nfb6zMuq3REv9liiaVlJUh+Vg0e5Y8QxcjCkZeI5Slv4C6WJ1UdRvU5/vrIz6LdFSvyWKphX8rlxUj7hMU44Jf4AOnkH9NvWkSJn27oVI6kCb2yfl9elYYkyvuI/6VcHG5XgF/UJPVCtOE5cpntGtfnXPMtNFPc/od98n5fXpWFTv/u6CqF8V2yT9Qs/WxceyKceEyxTP6Fa/umkFXdTz6HewNT+qjn6NIOq3pEz9liiaVlJUh+Vg0e5Y8QxcjCkZeI5Slv4C6WJ1UU+k3x+zberfVy2jfkvc1G+JomkFH5uiesRlmnJM+FgWz6B+m3pSpEw7fxtJVc/9IndZxtq55D7qt6RJ/ZYomlZwOV5Bv08596sb1+qinmj0m/K3aNMbIqeM+i3pUb8liqaVJP1y7rcJaaVMKUufoYvVRT2RfjdmVgF+21Xqt+RP/ZYomlaS9AuNTvEW8Ax8WiAlA89RytJfIF2sLuqJ9GvMS1P/vmoZ9Vvipn5LFE0rKfOsmB6xaHesKceE/xsUz6B+m3pSpOwKc79fBvg2fuQQL7qL+i1xUr8liqYVXI6c+23ieVqmHKv6JF2sLup5Rr9LMz9Ffsst6rekT/2WKJpWkvTLud8mpJUypSx9hi5WF/U8+rVPk67wvvEq9VteAOq3RNG0kqRfzv02Ia2UKWXpM3Sxuqin0e9+Yr4qvG+8Sv2WF4D6LVE0raTMs2LKxqLdsaYcEz6Ti2dw7repJ0XKup/7fTUfkXZvtYv6LclTvyWKphVcjpz7beJ5WqYcq/okXawu6mlGv7s+zT0MqN+y+1O/JYqmlST9cu63CWmlTClLn6GL1UU9i35fzWRfwX3rVeq3vALUb4miaSVJv5z7bUJaKVPK0mfoYnVRT6Lf2aRXg1+Ofo9dn/o9smhYS5lnxZSNRbtDTTkmfCYXz+Dcb0NHihV1PPf7O+nTXWcWAEe/ZS+gfksUTSu4HDn328TztEw5VvVJulhd1KOOfj9H5TJ7nRoz7dPUA/Vb6fxPrF9EqUhsoIuNTrFo10bKMeFjWTyDo9/QA5SvFx/9Lk9+73ux7Zd9Ofo99gvq98iiYS1FdVgOFu0OFc/AxZiSgecox6r++uhidVGPOvpdTsrlY7r9a+jXNyni5EOJnfotUTSt4GNTVI+4TFOOCR/L4hnUb1NPipRdfPQbaaNPu6jf8mpQvyWKphVcjlfQL3RnRXF2uEzxDOq3qSdFyl7NupyrBVfetmYaqbHfu6jf8vpQvyWKppUk/fK+3yaklTLlVIHP0MXqonoz+TA5mazFNqjfSk+6t1XqV3XFkvQLjU7xFvAMfFyakoHnKGX5wPot52rRlQVHv6o3cE+Dnlm/wOg0ZZ4V0yMW7XpTyjHhUwl4BvULvtc59wsCe5zwZ9YvMDrF5ci5X/2b5OlHv+kTCDnq1l+gy0Zy7rfkSf2WKJpWkvQLjK5TxrIpx4SPZfEMjn6belKkLEehObmRQ7nKLuq3xLw3ply/l5U38ZdT5JLIuSH6QmJDU1gOFu3awDNwMaZk4Dkc/YY+A79SvzCyXiUY07PvxLTTkSUrl0RqReZOkdjQFKZHLNq1kXJM+FgWz6B+Qw9QvuYoNCdXeXgXD+Po94h0Ynr3rZjjwcXXZMnKJZGaEOEhsaEpLAeLdm3gGbgYUzLwHI5+Q5+BX6lfGFmvEjZm1qvjURyMLFm5JFItoi8kNjSFjU6xaNdGyjHhY1k8g/oNPUD5mqPQnFzl4V08jKPfI9Kp+T5u3MeaLFm5JHJmiL6Q2NAUloNFuzbwDFyMKRl4Dke/oc/Ar9QvjKxXCat+PYpZw0aWrFwSqRcZbyKxoSlMj1i0a+Ma49LrtLIBfghSF6sU+r9efGkhR6E5uaGjXvuVo98j8a1ZHTfuY02WrFwSOTNEeEhsaArLwaJdG9RvIH3+Sv2eM+nNHur3eCn+mfVx4z7WZMnKJZEzQ4TXverwFvCMwUCppQotPANvRTeiLQ5KF6uL6s0zH/i1i0qPe6pV+72Le7vzTJasXBK5ptRvBMrZLur3DMmld+RMIOTkXvo8tPVx9FshNTS/la17WJUlK5dEzguZz00ZaWI5WLQ7HTwDH5emZOA5yrGqv4a6WF0UR7+Rt0X3u6jfCuOV+als3cOqLFm5JHJeHP1GoJztus7oV3/zo+5GSer37EL2Zwf1W7kWMzO5s9kHWbJySeWEwyr1G0g0vV5Hv19Nh3BSphOrLoqj3xO019qgfqukN/d265ksWbmkesKHdeo3AuVs173qVzWe5o1nZ9f7Cjuo3yrkVzN8qW73fl2WrFwSOSnO/UagnO26V/2qxtPU79n1vsIO6rcKeb8z87uafpAlK5dUT/iwztFvBMrZLur3DMmld+TcvZCTe+nz0NZH/Z6Q+lqY+T2Nf2XJyiUnJ1xsUL8RKGe7qN/PMya6Heq3VI5Cc3J1p3H5KOr3lOnvwox/fis/snpa3LctWbJySeQcqN8IlLNd19GvaqbWH9vV73x4+VidQdHs+J3804TZmByF5uQqD+/iYdRvDelod/rrqrXinm3+DXfCEWH6BX6NQrrHdv8iL2vzKheeleii3yp/R67NsrKlW/0w37rAQ9Rs9mGWM3RBc4bm+9DEe+syNMvWmPd3XdT7sv2ZD38fJulr+XZIY5T+zVFoTq7wJup8N/V7hvh3tRsef2X1rPhOdryd/i3CLRJoJDBt1e/fxlWwgvu/s6/WvzkKzcmFT+pCCdTvhUD2rZrr6/f4V9bZmjGLs33yDl30eHhc7IzRcUO5ZsCcj4+F2XygC5pzjF+3Lguza41Zr3VR61Wbft3Y1y0rsKcX9lX6N0ehObngOV0snPq9GMp+VYRNPgC/dCxNPjSdPpaDRbt28Qz868ApGXiO8ksSHrYuVhfV/rWLt2GhX9C/wb66Z2nnKDQnt6nzdllG/XZJ94Z1U78t8PEP0vCMR9LvIMm/mH350VtLp2XxnRCgflsuFC5TPOOh9Dv4O4x/F6sWtMdi0L7U7xEd1+6ZAPXbcvVwmeIZj6VffPyL2pf6bem0LL4TAtRvy4XCZYpnpOj38vf9qmpUfekYHP/C9qV+Wzoti++EAKbfC9z328QF+2gMi3bt4hm4GFMy8BzlB2Ueti5WF9X+0ZtvEpr/xe1L/XrK/OPuCWD65Z0Pigt+ndGv6gE5/mh1YtVFKfWLzP8m2Jf6VXRDhtwBAeq35SLhMsUzHm70Oxi8+S9fKO7/TbEv9dvSaVl8JwSo35YLhcsUz3hA/Zafv7Xc/5BkX+q3pdOy+E4IYPrl3K/islK/HpJq/JtmX+pX0Q0ZcgcEMP1y7ldxSa+jX9V9Cv5otU88U9WouvOhgBQ+f2sY/ybal/pVdEOG3AEB6rflIuEyxTMecfLBYg33n5mffRxyqn2p3zhP7r03AtRvyxXDZYpnPKh+j/5dRSEn25f6jfLkzrsjAOr3W32CKffYYjlYtDtwPAMXY0oGnqO8TcxfLV2sLkp749mhm5Tzv7Hxb7p9qd8DX77cOQFIv4i+kNjAEMvBol0beAYuxpQMPEcpSw9WF6uLAvVb3v8Qef5Zhn2p3/CO4et9E6B+W64fPpWAZzyufo/zD/Xxb459qd+WTsviOyFA/bZcKFymeEaKflX3Kfhzu9WdDwXY4+dvJ6Cz7Ev9nrDkRm8JLJfLwxv1e7mMzNxSvy2XDpcpnpGi335/6bgCtfTvB8UOEQAAIABJREFUqrIzz77UbwUlV3tMwH7tc1sc3tqY9fmBUr/nTE724DLFMx5av5X5hxJspn2p35IkV3pNgPqtXB5+9FaBUVvt5qM338jZ+DfXvtRv7eJxs6cEqN/KhaF+KzBqqx3qtz7+zbYv9Vu7eNzsKQHqt3JhqN8KjNpql/odFD8+b/viyraab1/qt3bxuNlTAtRv5cI8kn7v5c6HAv/x+xeXsC/1W+nUXO0xAeq3cnEeSb93c+dDwb/073xhu6RbIrfhVC5V82rOj8Xn5DYfVXel/KH57th2WrPt55u5X8a884H6lftap5MPttnSv969efa1o9/1KHF525qpTKGnJdRvTy9M22EdOnvxwhvP/rXxOivHbyPDMx77xrMD0vL+B98Xc8a+A6vfyUnHxjao37Nezh3dEDjpmNQv9St1s65Hv/b5k+H3h2yfzLOv02/ysuDoV+oD3H9pArarr37ff+3/H5x84OSD3L261+/x+Tu59uVHb/J1ZEmfCFj98ltv4YI8kn7v684HfwXKO87MT7giia85H5/l5CYebnYa536zEd6mAuq3wv2R9Htndz7Yq/Bbma/N9G+OQnNyK33pqqvU71VxX64x6rfCkvqtwKitdj75cBz72j6ZOf7NUWhObo3Z1Tap36uhvmxD1G+F59q8V7Z0q/h9DHjGM9z5cGrf4vtvuisQicpRaE5u5FCusov6vQrmyzdC/VaYXkOMuErdAaJHNjZvlfNqXtXFdjz6Le27DPc/5Mw/5Cg0J7cZdHel1G93bDut+Wb6TRlpYjlYtKOMSi4t5xqt6JRa9CxdbLf6Le37XX/+TnGM4J85Cs3JBQ/zYuHbzMmaix0IK8II7Nbr1yLjZ72LDDg6e95v9xLCW8AzBgOllipXBc/AW9EptTgoXawuCv2ttwOWin0r9/9GumMFY9NqjkJzcpuOqcsy6rdLujesm/ptgY/LFM94dP2e2PcS/s1RaE5uS1/prPgn3D3aWQus+CYEqN8W7LhM8YwH12/Nvhfwb45Cc3Jb+kpnxdRvZ2hvWzH128Iflyme8dj6PbNvvn9zFJqT29JXOiuem8MMYmctsOKbEKB+W7DjMsUzHlq/EftWnn+WNv+bo9Cc3Ja+0lkx/ilzZ4fCii9JgPptoYnLFM94ZP1G7Zs7/s1RaE5uS1/prHhoRp3VzYpvSID6bYG/MfqnKxRV4RlOv1gryvsU/AHpYnVR8J0Pgn2tf4f2nki3pIx/cxSak9vSV7oq/jRm31XdrPeWBKjfFvr4WBbPeNzRr2jfvPFvjkJzclv6SlfFM7PpqmrWe1MC1G8LflymeMbD6rfBvln+zVFoTm5LX+mqeGnmXVXNem9KgPptwY/LFM94VP022jfHvzkKzclt6StdFU9540NXaG9cL/XbcgFwmeIZD6rfFvtm+DdHoTm5LX2lo2I79fvXUdWs9rYEqN8W/rhM8YzH1G+rfdP9m6PQnNyWvtJR8dJEfiWso7ZY7VUJUL8tuNF7EpxKsbsY3AGgOcr7FPy56WJ1UcCdDwr7Jvs3R6E5uS19pZvi/YZzD92QvX2t1G/LNcDHsnjGI45+VfZN9W+OQnNyW/pKN8VbM+RtZ92gvXmt1G/LJcBlimc8oH6V9k30b45Cc3Jb+konxZYk/vvcnRwJK704AavfF/WyNq/q2J35F439G8nLh/mWC89KdNFfs+PyYZbHDeXa0LwqI4uw9/ehWb6jC5ozMa+HJr5bl4lZtsZ8f+uivre6n2lX2zfNvzkKzcm9+NuvvUJ7YVbtUYy4TwJvxVeP+CcJqAhMVfoF7Jvk3xyF5uRe5T3+V/nrfrk2vOf3KtRv04jV70S9GLPIjh0P5cWYptJ6ni5683FcFqa6ddzftIbmrNcLY59xDy5ozjF+3roYM22Nmc91UfOVRr+QfVP8m6PQnNyrvEeXJ38PTrZXaZSN3IQA535bsOMzuXhGt3O/Q9XvwumiVHc+gPZN8G+OQnNyW/rKZYpfy3HG5mO+/LxMpayllwSo35bLgssUz+hWv7pbynRRGv3C9sX9m6PQnNyWvsJiEsAIUL8tvHCZ4hkPpd8E+8L+zVFoTm5LX2ExCWAEqN8WXrhM8YxH0u/X4jB1+d0C9rT4L/z+vGqqM0ehObmnx8wtEsgkQP22AES/j+ZU+szfetvbD/Hcgtm3HP+Ov1quhy/OUWhOrubYGEMCNQL70VttT9iE9Iv8UDsSGw4Gy8GiXRt4Bj4uTcnAc5QztR6sLlYXpZj7LfyL2vfg37HuVx1yFJqTGzoqX0kAICBLVi6JVI/oC4kNTWE5WLRrA8/AxZiSgecoZenB6mJ1UQr9DgZ2/Ivb1/tXad9BjkJzckNH5SsJAARkycolkeoRfSGxoSksB4t2beAZuBhTMvAcpSw9WF2sLkql38Eqxb7Wv1Pd2HdA/YZ3DF/vgYAsWbkkcl6IvpDY0BSWg0W7NvAMXIwpGXiOUpYerC5WF6XTb7ienb3mjGBzcjs7IVb8yARkycolER6IvpDY0BSWg0W7NvAMXIwpGXiOUpYerC5WF0X9hr7KVxLQEpAlK5dE6kb0hcSGprAcLNq1gWc4MaL3MeAZeCtKWXqwulhdFPUb+ipfSUBLQJasXBKpG9EXEhuawnKwaNcGnoGPS1My8BylLD1YXawuivoNfZWvJKAlIEtWLonUjegLiQ1NYTlYtGsDz8DFmJKB5yhl6cHqYnVR1G/oq3wlAS0BWbJySaRuRF9IbGgKy8GiXRt4Bi7GlAw8RylLD1YXq4uifkNf5SsJaAnIkpVLInUj+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXRT1G/oqX0lAS0CWrFwSqRvRFxIbmsJysGjXBp6BizElA89RytKD1cXqoqjf0Ff5SgJaArJk5ZJI3Yi+kNjQFJaDRbs28AwnRt75EK6Pff2nedx6Jb6b1VezrvwkBLaq/Lmkbg6ctT4jAVmyckmEE6IvJDY0heVg0a4NPAMfl6Zk4DnKsaoHq4vVRfVGv5PiwT5Jf05Df+MrCVyDgCxZuSRyXIi+kNjQFJaDRbs28AxcjCkZeI5Slh6sLlYX1R/9qn/1qhJofyprsVj0Yvweujxfn4CALFm5JIIF0RcSG5rCcrBo1waegYsxJQPPUcrSg9XF6qJ6o9/0ESy/dBzebny9EgFZsnJJ5NAQfSGxoSksB4t2beAZuBhTMvAcpSw9WF2sLor6DX2VrySgJSBLVi6J1I3oC4kNTWE5WLRrA8/AxZiSgecoZenB6mJ1UdRv6Kt8JQEtAVmyckmkbkRfSGxoCsvBol0beIYTI+98CNfHvvblzgdOPlQuClf7TUCWrFwSOSNEX0hsaArLwaJdG3gGPi5NycBzlGNVD1YXq4uifkNf5SsJaAnIkpVLInUj+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXRT1G/oqX0lAS0CWrFwSqRvRFxIbmsJysGjXBp6BizElA89RytKD1cXqoqjf0Ff5SgJaArJk5ZJI3Yi+kNjQFJaDRbs2PuB5XFyMKRl4zsT8BWitr7pY6rcVJANIIImALFm5JNIQIjwkNjSF5WDRro2NUf2MeTgc/4rn4Bn4kU3M58lRNm3oYqnfJoYsI4F0ArJk5ZJIa4jwUkaaWA4W7U4nRYxDo/39xwAMzxgM0JyFeQnNtb7qYnVjZE4+tOJmAAnUCMiSlUtqVbhNRL8pqsNysGh3/HjGYKAcFbrqDwuegbeiU2pxQLpY3RiZ+g1Xma8koCUgS1YuidR9//pF7+HFxZiSgefolFpcQl2sLor6jbwtuIsEGgnIkpVLIhXev37xuV98LItnUL+Rzta0K+e5DTm5TcfEMhIQCMiSlUsiVWH6xUea2DfMsGh3Opx8iFzUwy6OfmU2LCGBHAKyZOWSSHuYfvGRJqZHLNqdDp6Bj0tTMvAcpSz9NdTF6qI4+RB5W3AXCTQSkCUrl0QqpH4jUM52cfLhDMmld+RMIOTkXvo8WN9TEJAlK5dEwFC/EShnu6jfMySX3pGj0JzcS58H63sKArJk5ZIImPvXLz4fjcsUz+DkQ6SzNe3KUWhObtMxsYwEBAKyZOWSSFX3r198PhqXKZ5B/UY6W9OuHIXm5DYdE8tIQCAgS1YuiVSF6RcfaWL3MmDR7nT40Vvkoh528aM3mQ1LSCCHgCxZuSTSHqZffKSJ6RGLdqeDZ+Dj0pQMPEcpS38NdbG6KN75EHlbcBcJNBKQJSuXRCqkfiNQznZx8uEMyaV35Ewg5ORe+jxY31MQkCUrl0TAUL8RKGe7qN8zJJfekaPQnNxLnwfrewoCsmTlkgiY+9cvPh+NyxTP4ORDpLM17cpRaE5u0zGxjAQEArJk5ZJIVfevX3w+GpcpnkH9Rjpb064chebkNh0Ty0hAICBLVi6JVIXpFx9pYvcyYNHudPjRW+SiHnbxozeZDUtIIIeALFm5JNIepl98pInpEYt2p4Nn4OPSlAw8RylLfw11sboo3vkQeVtwFwk0EpAlK5dEKqR+I1DOdnHy4QzJpXfkTCDk5F76PFjfUxCQJWtLXtTLzvxTx27MbzT2byQvG/NPLjwr0UV/zY7L0LweN5RrY/NPGVmEvb+Pzes7ukzAnIX5PjTx3boszGtrzPe3Lup7a6Y9eMfkKDQntwenzkO4PwJN+jVcSEBNYEr93t/bn0d8WwKN+p2oF2MW2bHjobwY01Raz9NFbz6Oy8JUt477m9YMmLNeG7Nbo8sCzDEmtDBvXYyZtsbM57qo+Yr6ve1bma3fH4Em/Y71p4N8dIXEhiPAcrBo1waegX8o5n61+C2ckPoVnS+uf1C29y3F/yxiXVm8vNjvo4qApqP+R/024WEZCZwToH49k+voF1WpOzQ0p67f80t+3KOL1UXxzocjV66RgI4A9es5Ub9yd6F+ZTYsIYEcAtSvp0f9yp2I+pXZsIQEcghcTL8z9VHg30lzc7P6+tFod+BY/cWpotMC+ESCawdtRSlLfwq6WF0UJx+KXsE/SUBP4GL61X+TLWWkieVg0Q4WnoGLMSUDz1HK0vcQXawuivrVv+kYSQIFAerXc6B+5TcE9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkELqZf/Z0JKXcZYDlYtMOHZ+AfiqVk4DlKWfo+o4vVRfGjt5y3IXOfk8DF9Ms7H9o7EHoTmasRzVHK0h+sLlYXRf22X39GkMApAerX8+Dkw2m3qG5Rv1UaXCeByxGgfj1L6lfuUtSvzIYlJJBDgPr19KhfuRPdmX7X0DPwq8H9eGC8fCFY8nAEqF9/SalfuWffmX4n6gfEnwf24fc65AvBkocjcDH98s6H9r6BfozmakRzlLL0B6uL1UX15qM35Ln8p8/pn/TiicXt3YgRD0PgYvrlnQ/tfQJVqasRzVHK0h+sLlYX1Rv9po9g+Vtv7V2YERclQP16nJx8kHsV9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkELqZf3vnQfhnQj9FcjWiOUpb+YHWxuih+9NZ+/RlBAqcELqbf+77z4Ro/AY+r1F0qVL/G7E+vcMOWLpb6bUDIIhLIIED9enio5FwSnoNn4K0Yo+8NuljqV0+UkSSAELiBfodmhByhj8VysGjXQIoYJ+YFPA88YzBAc3RKLQ5cF6sbI3PyAewMDCeBwQ30m6I6LAeLdr0AzxgMlKPCSifDM/BWdEotDkoXq4uifisXmqskoCJA/XpM1K/cW6hfmQ1LSCCHwMX0q7/zIUV1WA4W7fDhGfi4NCUDz1HK0vcZXawuiqPfnLchc5+TwMX0q7/zIUV1WA4W7S48noGLMSUDz1HK0vd2Xawuivp9ToHwrHMIUL+eHvUrdyLqV2bDEhLIIUD9enrUr9yJqF+ZDUtIIIcA9evpUb9yJ6J+ZTYsIYEcAtSvp0f9yp2I+pXZsIQEcghcTL+886H9MvC+33ZGmRE5j0zPyc08bKY/J4GL6Zd3PrR3IOq3nVFmRI5Cc3IzD5vpz0mA+vXXnZMPcvfn5IPMhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQuJh+eedD+2XgR2/tjDIjcj4+y8nNPGymPyeBC+kX+bWIlJEmloNFuwuPZ+BPY0jJwHOUY1Xf23Wxuig+8+E5BcKzziFwIf0i+kJiw6lhOVi0awPPwMWYkoHnKGXpwepidVHUb+irfCUBLQHq15OifuUOQ/3KbFhCAjkEqF9Pj/qVOxH1K7NhCQnkEGjU74t6GZuv7Ni/kbyMza9ceFaii/6aHZex+XfcUK4tzK8ysgh7f1+Y73d0QXOMCS18ty7GtIbYAF3U99ZMczrihXJzPj7Lyb3Q4bOa5yLQpF/DhQTUBKbU73Opg2ebT6BRvxP1YsxiMZlo/p9MpNjxUF6MaSqt5+miNx/HxZjq1nF/0xqas14bs4YXNOcYP29djGkNsQG6qPmK+s1/O7KG5yLQpN+xHgUyd4rEhiPAcrBo1waegd+TkJKB5yhnaj1YXawuinc+hL7KVxLQEqB+PSnqV+4w1K/MhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQoH49PepX7kTUr8yGJSSQQ4D69fSoX7kTUb8yG5aQQA4B6tfTo37lTkT9ymxYQgI5BKhfT4/6lTsR9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkEqF9Pj/qVO9Gd6XcNPQW0GtyPR2bKF4IlD0eA+vWXlPqVe/ad6dc+US956cMTi+ULwZKHI0D9+ktK/co9+870izyZ9PRJpZNePDJTvhAseTgC1K+/pNSv3LPvTL/pI1j+2oXcCVjSCQHq12OlfuXeRf3KbFhCAjkEqF9Pj/qVOxH1K7NhCQnkEKB+PT3qV+5E1K/MhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQoH49vYV5gSniOXgG/EvHL2ahPhFlLPWrJspAEoAIUL8eV5oYPyHUTqVoBpyjVKo/cGUs9QteZoaTgJLADfSbpjpkdIq3gGcMBkotVS4EngG3olSqPyhdrC5qMPjXi++M5Xx1Iie3cpW5SgJaAtSvJ0X9ih2G+hXRsIAE8ghQv54f9St2I+pXRMMCEsgjQP16ftSv2I2oXxENC0ggjwD16/lRv2I3on5FNCwggTwC1K/nR/2K3Yj6FdGwgATyCFC/nh/1K3Yj6ldEwwISyCNA/Xp+1K/YjahfEQ0LSCCPAPXr+VG/YjeifkU0LCCBPALUr+dH/YrdiPoV0bCABPIIUL+eH/UrdiPqV0TDAhLII0D9en7Ur9iNqF8RDQtIII/AhfQ7MX/q40hRHZaDRbsDxzPgpzHYVvjMB3UnSQ3MeW5DTm7q8TLvqQlcTL/6Z3mlqA7LwaLd9cczUmRK/Xb+XstRaE5u5yfGBh6RwIX0i+gLiQ3IsRws2rWBZ1C/4docXvnEsxoQbpJAGwHq1xOifsWOwrlfEQ0LSCCPAPXr+VG/YjeifkU0LCCBPALUr+dH/YrdiPoV0bCABPIIUL+eH/UrdiPqV0TDAhLII0D9en7Ur9iNqF8RDQtIII8A9ev5Ub9iN6J+RTQsIIE8Ao36fVEvC/OWHfs3kpeFmcmFZyW66K/ZcVmY3+OGcs0YZeAh7P3dmHd4AXO+zSI08d22vJpFW4gt10V9f2/5U5t5b0VmPx+BJv0aLiSgJjClfp9PHzzjPAKN+p2oF2PUoRMpdjyUF2PksvMSXfTm47gYU9067m9aM6ap9LxsvTZmDS9ozjF+3rZMjWkLseW6qPl8Rf3mvRWZ/XwEmvQ71uNA5k6R2HAEWA4W7drAM+7kW297TzD+53FWN14+GLj9xyhflfgHv/UmomEBCcQJUL+ey8PqN37Vi706seqiBgPqt4k1y0ggQoD69VCo30jfKHZRvyIaFpBAHgHq1/OjfsVuRP2KaFhAAnkEqF/Pj/oVuxH1K6JhAQnkEaB+PT/qV+xG1K+IhgUkkEeA+vX8qF+xG1G/IhoWkEAeAerX86N+xW5E/YpoWEACeQSoX8+P+hW7EfUromEBCeQRoH49P+pX7EbUr4iGBSSQR4D69fyoX7EbUb8iGhaQQB4B6tfzo37FbkT9imhYQAJ5BKhfz4/6FbsR9SuiYQEJ5BGgfj0/6lfsRtSviIYFJJBHgPr1/KhfsRvdm37X2EPwK9H9eGC8eCFY8HgEqF9/TalfsWvfm37t86STl6lIgQUk0AEB6tdDpX7FvnVv+m16av/5k/mreya9eGC8eCFY8HgEqF9/TalfsWvfm37TR7Cv1K/YC1jQCQHq12OlfsXeRf2KaFhAAnkEqF/Pj/oVuxH1K6JhAQnkEaB+Pb+F+YQ5GoOm4BnwL8r9mYn6qHSx1K8aKANJACNA/XpeVxCjbecKrbyZoboD6GKpXzVQBpIARuAm+sVHmtjoFIt2wBLEuIdz8Az7W8NgK/L1PO8XuljdGJk/tXnOl3tIoIWA/BaUSyJVInOnoFB8a1gOFu0awDPUP8B+pKUdRx4z9D/zHnKQq6aL1Y2Rqd9wBfhKAmoC8ltQLolUTv1GoNR33at+x/UTiW7zh+ajWLiTBGQCsmTlkkht1G8ESn0X9VsncvntnHt3c3Ivfyas8QkIyJKVSyJYqN8IlPou6rdO5PLbOQrNyb38mbDGJyAgS1YuiWChfiNQ6ruo3zqRy2/nKDQn9/JnwhqfgIAsWbkkgoX6jUCp76J+60Quv52j0Jzcy58Ja3wCArJk5ZIIFuo3AqW+i/qtE7n8do5Cc3Ivfyas8QkIyJKVSyJYqN8IlPou6rdO5PLbOQrNyb38mbDGJyAgS1YuiWAxZh/ZG9+VcI8teF8u3gKe8UT3/fLGs3hH5l4SyCQgS1YuiTSJ6AuJDU1hOVi0awPPoH7DtTm88r7fGhBukkAbAVmyckmkTkRfSGxoCsvBol0beAb1G67N4ZX6rQHhJgm0EZAlK5dE6kT0hcSGprAcLNq1gWdQv+HaHF6p3xoQbpJAGwFZsnJJpE5EX0hsaArLwaJdG3gG9RuuzeGV+q0B4SYJtBGQJSuXROpE9IXEhqawHCzatYFnUL/h2hxeqd8aEG6SQBsBWbJySaRORF9IbGgKy8GiXRt4BvUbrs3hlfqtAeEmCbQRkCUrl0TqRPSFxIamsBws2rWBZ1C/4docXqnfGhBukkAbAVmytmSkXoxRh46Q2FCrMTNgwaJdxUjGe7F8m8VhTfuCZ7y/t+R815elmdR3idu6WF3U9/e2F78TnPPViZzctvcZy0kgQqBJv4YLCagJTKnfyPuLu0iggUCjfifqxRh16ESKHQ/lxRi57LxEF735OC7GHNe1a2jOem3MGl7AnN0xft62TM2iLcSW66Lm8xX12/A+YxEJRAg06Vf3ZVNfKTJ3isSGI8ZysGjXBp6RNPer/xXicOLocyK0Pw3k6pevfWhdH8UfG6oy4zoJqAjIb0G5JFIxoi8kNjSF5WDRrg08I0m/i3A+6ldcv/q/NHVXWBdF/aovKQNJIBCQ31xyScitvCL6QmJDE1gOFu3awDOo33BtDq+886EGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h1fqtwaEmyTQRkCWrFwSqRPRFxIbmsJysGjXBp5B/YZrc3ilfmtAuEkCbQRkycolkToRfSGxoSksB4t2beAZ1G+4NodX6rcGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h1fqtwaEmyTQRkCWrFwSqRPRFxIbmsJysGjXBp5B/YZrc3ilfmtAuEkCbQRkycolkToRfSGxoSksB4t2beAZ1G+4NodX6rcGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h9e+6Hen/eXTs7ifXvxcUg0rNx+ZgCxZuSTCA9EXEhuawnKwaNcGnkH9hmtzeO2Lfu1vCSYv09o5cZMEOiUgS1YuiRwQoi8kNjSF5WDRrg08g/oN1+bw2hf9jrU/lXoWN+Tot3ZNudkxAVmycknkkBB9IbGhKSwHi3Zt4BnUb7g2h9e+6Dd9BPtK/dauKTc7JiBLVi6JHBKiLyQ2NIXlYNGuDTyD+g3X5vBK/daAcJME2gjIkpVLInUi+kJiQ1NYDhbt2sAzqN9wbQ6v1G8NCDdJoI2ALFm5JFInoi8kNjSF5WDRrg08g/oN1+bwSv3WgHCTBNoIyJKVSyJ1IvpCYkNTWA4WbdvYm0VoSf36AufgGQNY8l9moz4DXay2H1C/avAMJIGCgPzmkksi7BDhIbGhKSwHi7ZtXEOM12lFp9QCqy5W2w+o39BZ+UoCSgLym0suiVQNCC/rNZmcAAARtElEQVRlpInlYNHubFL0+2fGERBNu/CMwQDN0Sm1OEpdrC5qMKB+m649y0ggQkCWrFwSqQbQb4rqsBws2p0NnjEYQHw8MjwDb0UrS3dAulhdFPXrLzH/IAGEgKwEuSRSP/UbgVLfBRE9JKM5Wlm66nWxuijqt361uU0CrQTkt7dcEqmU+o1Aqe+CiB6S0RytLF31ulhdFPVbv9rcJoFWAvLbWy6JVEr9RqDUd0FED8lozgy480EXS/3WryO3SeBCBOS3t1xy3vSe+j2HcrYHIRqS0RytLF39ulhdFEe/4YrxlQTUBOS3t1xyXvmLmZzvFPakfMyF5WDR7jDxDPxDsZQMPEcrS3fWuljdGJn6dUS5kABEQJasXHLeAKIvJDa0hOVg0a4NPAMXY0oGnqNTasFVF6uLon5DX+UrCagJyJK1JfuRcpmZhTJyNGqI/ZoJy69ZCCWx3ero3/DE7W+zCKvq11czUccWgTbjG12WYM7WjMsmXpfNy48ZNwf40p+FJmq5XJmdutd1F5jz1LKc3O7OiDU/MIEm/Q7fkp9bzcQ7ILC48DEOe/A+yVFoTm4PTp2HcH8EmvQ7/htqF2O0kcOhHLs5ewL2YcfGGKkotl8bvVuHxZiwpn7dmYU6tgi0GXN0mYI562r8qnmZm0lzgC/VRa1WU+Cei+7eJzkKzcnt7oxY8wMTaNSv+ryRuVMkNhwAloNFuzbwDHxWNiUDz9HO1Lqz1n2opq2RXzp2TLmQAECA+nWwHke/OqUWHUQnVm2N1C/wtmMoCTgC1K+j8Dj61SnVnbP2xjNtjdRvQZV/koCaAPXrUFG/coehfmU2LCGBLALUr8NH/cqdiPqV2bCEBLIIUL8O33PqVzerS/1mvcGYTAIyAerXsXlO/erEqovit97k9xhLSEAgQP06MI+jX92ItugMOrFqa+RHb8JbjLtJQCJA/Toyj6NfnVKL3qCL1UVx9Cu9w7ifBEQC1K9DQ/2KHUT5XDTqVybIEhIQCFC/Dgz1K3QPu5ujX5kNS0ggiwD16/A9p351s7rUb9YbjMkkIBOgfh2b59SvTqy6KE4+yO8xlpCAQID6dWAeR7+6EW3RGXRi1dbIOx+Etxh3k4BEgPp1ZB5HvzqlFr1BF6uL4uhXeodxPwmIBKhfh4b6FTsIP3qT0bCEBPIIUL+OH/Ur9yKOfmU2LCGBLALUr8P3nPrVzepSv1lvMCaTgEyA+nVsnlO/OrHqojj3K7/HWEICAgHq14F5HP3qRrRFZ9CJVVsj73wQ3mLcTQISAerXkXkc/eqUWvQGXawuiqNf6R3G/SQgEqB+HRrqV+wgvPNBRsMSEsgjQP06ftSv3Is4+pXZsIQEsghQvw7fc+pXN6tL/Wa9wZhMAjIB6texeU796sSqi+Lcr/weYwkJCASoXwfmcfSrG9EWnUEnVm2NvPNBeItxNwlIBKhfR+Zx9KtTatEbdLG6qP6MfnfvqcuPmUpvE+4ngS4IUL+OKvUr96170+/EpC/Ur9wPWNIBAerXQaV+5a51b/odf6QuQ45+5W7Aki4IUL+O6nPqVzere2/6TR/BvlK/XSiGdcoEqF/H5jn1qxOrLqo/c7/Ur/xuZ0nPCFC/7oI8jn51I9qiE+rEqq2Rdz707K3Nw+k/AerXXaPH0a9OqUW/1MXqojj67f97nUfYOwLUr7sk1K/cMalfmQ1LSCCLAPXr8MkUZLh4Dp6BH9mv2cmHXCvRxVK/NWzcJIFLEZCVYEtG2mVmFtrQkRj7NROXX7MQy84LlNG/x9vzX83kuKFcg3O+l2byjS5oztZsDk28LtuWHzNsC7HlP2asiFout724byDn7oWc3Eu9H1nPUxFo0m/67evMfBwCC+WpTKnfpzIHT/YCBBr1O1QvxqhDh1LsRr5dfmOMXHhWoozerctlZxblunYFzpmvzWKOLmjOzkxCE6u2ZWrGbSG2XBe1WvXjK7s5I9ic3Au8FVnF8xFo0u9YjUOu5bwKJDZkYzlYtGsDzxioH0MeziEl45iz99W0/+nnc4uwY8PCmm7uVxfFOx8EyNxNAjIBWTxyyXltXcWGlpD6U2SK1V8clfYjqXAOR5Ue97Svoa1oZela1sXqoqjf9mvJCBKoEZDFI5fUqrCbXcWGlpD6saMpWsDqL3JQMVK/4Wp2+ZozgZCT2+U5se6HJSCLRy45h9FVbGgJqf9a+tV+Gyycw2CAZ+DK1o5V3VHpYnVRHP0erzPXSEBJQBabXHJedVexoSWk/mvpl6PfcHWKV37p+JQHt0iglYAsNrnkvNKuYkNLSP2PpV90xKwdqzqyulhdFEe/oa/ylQTUBGSxySXnlXcVG1pC6n8s/aJjbK0sHVldrC6K+g19la8koCYgi00uOa+8q9jQElI/9Ruotb3qxKqLon7baLOcBM4IyGKTS84q4Z0P50gie9CRrKsCzdHK0tWti9VFUb+OKBcSgAjIkpVLzhvoKja0hNR/rdEvOivLOx/C1ezyNefmsZzcLs+JdT8sAVlscsk5jK5iQ0tI/dfSLzouxUey7uzRVrRjVVe3LlYXxdGvI8qFBCACstjkkvMGuooNLSH1P5Z+0TG2VpaOrC5WF0X9hr7KVxJQE5DFJpecV95VbGgJqf+x9MvRb+gDutecCYScXN3RMYoETgjIYpNLTirwG13FhpaQ+qnfQK3tVTeu1UVx9NtGm+UkcEZAFptcclYJ73w4RxLZg45kXRVojlaWrm5drC6K+nVEuZAARECWrFxy3kBXsaElpP5rjX7RWVne+RCuZpevORMIObldnhPrflgCstjkknMYXcWGlpD6r6VfdFyKj2Td2aOtaMeqrm5drC6Ko19HlAsJqAi8/CvCSrHtv2sP6S5LFPV1FRuaRup/LP2iY2ytLB1ZXawuivoNfZWvJNBOYP3hBXwQ2345/KnlIMrrKjYcElL/Y+mXo9/QB3SvORMIObm6o2MUCRQEZvbn06yAvdisfM3ir0YGUV5XseGQkPqp30Ct7VU3rtVFcfTbRpvlJFAhMLW/YLt7t2Lbvw7tr9nWB7+d3c2AqbQ4YCwHi3Yt4Bn4rGxKBp6jlaU7a12sLor6dUS5kICSwK//AfEPM7E/PWzM4qWehiipq9hwTEj9KTLF6i+OCp2V5Z0P4Wp2+ZozgZCT2+U5se5HJOCGv+VyNviFRoSIvpDYgB3LwaJdG3gGPi5NycBztGNVd9a6WF0UR7+OKBcS0BIohr+FgM8Hv5CSEH0hseFUsBws2rWBZ+BiTMnAR8xaWbqz1sXqoqhfR5QLCagJrMux7/nML6YkRF9IbDgVLAeLdm3gGSkyRe9icEeG5mhl6erWxeqiqF9HlAsJqAkch7+RwS+kJERfSGw4FSwHi3Zt4Bm4GFMy8BytLN1Z62J1UdSvI8qFBPQEyuHv+cwvpiREX0hsOBUsB4t2beAZuBhTMvAcrSzdWetidVHUryPKhQT0BMLwNzb4hZSE6AuJDaeC5WDRrg08A5+VTcmgfkMP0L7m3L2Qk6s9PsaRQEngMPyNDX4hJSH6QmLDgWI5WLRrA8/AxZiSgedox6rurHWxuiiOfh1RLiQAECiGv9HBL6QkRF9IbDgVLAeLdm3gGbgYUzLwEbNWlu6sdbG6KOrXEeVCAggBP/yNDn4hJSH6QmLDqWA5WLRrA89IkSl6F4M7MjRHK0tXty5WF0X9OqJcSAAh8G7vPYsPfiElIfpCYsOpYDlYtGsDz8DFmJKB52hl6c5aF6uLon4dUS4kABHYRZ72UFSAKKmr2HAqSP0pMsXqL44KHZfiKnXtoK1oZenq1sXqoqhfR5QLCUAE3qXBLzQiRPSFxIZTwXKwaNcGnoHPyqZkUL+hB2hfc+5eyMnVHh/jSKBKYB2f+cWUhOgLiQ0HiuVg0a4NPAMXY0oGnqMdq7qz1sXqovoz+t29py4/ZuqwcCGBqxGY1Z/zG1pGlNRVbMqxpMgUOf5wTOi0AK5S1xL6XDWtLF3dulhdVH/0e/wmPb5G/bp+waUHBBAldRUbMCD1P5Z+UclrZenI6mJ1UX3R7/s6Z9mGDsdXErgtAUR5XcUGAkj91G+g1vaqE6suqi/6bTtnlpPAXRBAlNdVbACF1E/9Bmptrzqx6qKo3zbaLCcBgIBV3ki7/HYUG9pH6h+NsGjXBp4xGv0zm3B4ylc8A2/l23woj2Y00sXqokajJT+4At5dDCWBRgJv+CcXzHhqAvzgqvENxUIS0BP4G0/Ui/2pzk5iQ6VI/ZMJFu3awDNScq7RCtKGLlYX5Siu9J2LkSRAApci8DfcqavC5nGLarEcLNq1gBx/OFH0noS0G8/QVmbDeTjA1lddrHbut7U5BpAACdyaAC5H9MO0lBZwKqgYr6Nf/DzaMqjfNkIsJ4G7IZAy0sRysOhUcF9DdK4Tz7DKhltJPR8pTzdGlrK5nwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAS6I/C1XC7/XPV/dmW5d2tul19xG1xIgARI4CoEvryJrtJUTxqZGWO+3bEs7YqZubWVMWP3+uTLF/8KevIewNO/KoGv+cdV2+tDY/uFMT/uQOZOv1u39mGM/lfcXcJjLu+bVwr4MS8tz6p/BL6sgP7177C6PqK1MTvXxsTp1/2o8d6+Lrtu9R7q35khBXwPF4rHePcEnHzN8w1+B4OtPW87ynOTEMYs7Nqvff26++t5gRP4Z0FQwBcAySpIoJHAyP/Tu5gEbQx8vEJn25m3sB0HH9Y49euv884CoYAfr8vzjHpFYDS3E6B2ecbBb5hrmNoZB+tfO/lr/ybi1K/vn274axeOgHv1buXBPBSBkf2kv1j8HQAPdW6ak7FjvPnAfQL3Zech7OSvnQTm1G8Bzg9/KWBNL2IMCSQQeCvlazYJ6TdP+Xw5XT7hI/qx4zs34Tt287+L/ZvVjZ2M4GIJHIa/FDB7Awl0QKAiX3v/1ffJ4r6GcLJsa8tPfVnVlnl9ma5ry0dt2Qxry6S2FLMk1gfSgv8d4hzz92PMyg98Z9/WwRmkX94fadkcMXMKIqNXMJUEzgn8tNrs+Pbr6dqi5md/E9n5mTbs+bRn9s/+M/vVT/tu7b8G3O1nqYv7JO9Bl+FvKhTmkQAJnBH4O/Hv+nSwWhvKrlb1wW5tMLzdnoyV7cbr9/fp///qI8NZbfka1ZbTmYWXF3xu4eykz3bYr1m4GZg3/823qRWx//LFWZhux6w2nr/vzXHlbxJ+C0PXAxhFAloCVQH7z/vtja92cX8Wa37zsf8oZr/drMXITjxY4XDq93DBX47/OtrYfxxwIQESuCyBl3IEvHjSLxvY2V67+G8eD92a++4FF0fgx+FwC+XLDkEC3RB4+fFfuH3a213/vGH89639QHjdDeb7q/XvMPilfO/v2vGI74fAy7YQ8JMOf/2Y98VdLj8Qzpn6vZ9rrjjSH//3EuWrQMUQEsggUAj4Sb/t5ca8xR0TfiDMD/iLjuQHv5RvxruKqSSgJPDpRsDPOfz9t1uvD190m9sbkzn1W3QZO/ilfJXvHoaRQCaB/Xb8pMPfTHCPmf6yoHwf88ryrPpJ4HNrb37lQgKOwCtvNWNHIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIIGuCPwH9j3d4Kg1fY0AAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![MVA-1.png](attachment:MVA-1.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Based on the above block diagram we find that `PE` & `SIMD` parallelization attributes are subject to the following constraints. \n", + "If `W` is the width of the input and `H` is the height of the output in a Matrix-Vector Computation then:\n", + "\n", + " W % SIMD == 0\n", + " H % PE == 0\n", + " \n", + "For the above example, H = 12 and W = 12. The demonstrated PE & SIMD values adhere to the above constraints.\n", + "\n", + "We also define a term referred to as total folding which is defined as :\n", + "\n", + " Total folding = (H/PE) x (W/SIMD)\n", + "\n", + "The goal of adjusting these parameters is to get an almost balanced pipeline i.e. equalling the rate of producers and consumers in the generated dataflow architecture.\n", + "This can be achieved (or almost achieved) by keeping the `total folding` parameter approximately constant across all layers.\n", + "\n", + "We now explore how these parameters affect the estimated clock cycles and the resource utilization of the generated dataflow architectures.\n", + "We start with a naive case where `PE` & `SIMD` values across all layers are 1 and observe the above-mentioned numbers.\n", + "We define the utility functions (`exp_cycles_per_layer()`) and (`res_estimation()`) to estimate the number of clock cycles and resource utilization of each network layer." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "Should this line be added (The `exp_cycles_per_layer` formula is equal to the total folding in this case as the number of input vectors is 1 and the mmv value is also 1).\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer\n", + "from finn.analysis.fpgadataflow.res_estimation import res_estimation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now individually extract the `MatrixVectorActivation` blocks from the onnx file and set the config values manually (although this can be done automatically by Vivado tools also as mentioned in the introduction).\n", + "\n", + "In the first step, we set the `PE` & `SIMD` values for all the layers to be '1' to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", + "\n", + "We utilize from (`getCustomOp()`) as the helper function to set different properties of the node. The (`set_nodeattr()`) function within this function call helps us set these values." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.custom_op.registry import getCustomOp\n", + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", + "config = [\n", + " (1, 1, [16], [64], \"block\"),\n", + " (1, 1, [64], [64], \"auto\"),#8,8\n", + " (1, 1, [64], [64], \"auto\"),#8,8\n", + " (1, 1, [64], [1], \"distributed\"),\n", + "]\n", + "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", + " fcl_inst = getCustomOp(fcl)\n", + " fcl_inst.set_nodeattr(\"PE\", pe)\n", + " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", + " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", + " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After setting these parameters, we save the model and view it using `Netron`\n", + ". We can observe the values we set in the above step by clicking on any of the nodes and observing their properties." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping http://0.0.0.0:5901\n", + "Serving './cybsec_PE_SIMD_not_modified.onnx' at http://0.0.0.0:5901\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save(\"./cybsec_PE_SIMD_not_modified.onnx\")\n", + "showInNetron(\"./cybsec_PE_SIMD_not_modified.onnx\",localhost_url='xirxlabs53')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We pass our model to the `exp_cycles_per_layer()` and `res_estimation()` functions which iteratively go through all the layers in the graph and measure the expected execution clock cycles and resource utilization for each of them and return a dictionary with calculated values." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "cycles_dict = []\n", + "cycles_dict = exp_cycles_per_layer(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3cAAAHWCAYAAADU7HB0AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABpGklEQVR4nO3deVhV5d7/8c8GBQcmZxzIsVQUJSckcyhJHNOyQTPFsTS0lHI6ldpo2SnNnBpOUuenOaYNThEqalKZirOmpmkqOAKKCgj37w8f1nELKii6afd+Xde6nrPvda+1vmvtffv0YU02Y4wRAAAAAOBvzcXRBQAAAAAAbh3hDgAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAAAAAJwA4Q6AU2jVqpVatWrl6DLy1cGDB2Wz2RQZGVmg1pWTyMhI2Ww2/fbbb7dl/fnpdh+La9m7d6/atGkjb29v2Ww2LV68+I5u/05o1aqV6tat6+gyCrSs39+///3vm1reZrNp3Lhx+VsUAKdBuANwW2X9R/+1pp9//jnX69q5c6fGjRungwcP3r6Cb8K0adPueFDA309YWJi2bdumt956S//973/VqFEjR5fk9I4ePapx48YpLi7O0aUAwB1RyNEFAPhneP3111W1atVs7TVq1Mj1Onbu3KnXXntNrVq1UpUqVezm/fDDD7da4k2bNm2aSpcurd69ezusBhRsFy5cUGxsrF5++WUNHjzY0eX8Yxw9elSvvfaaqlSposDAQEeXAwC3HeEOwB3Rrl2723qmws3N7batG7hVJ06ckCT5+Pjk2zpTUlJUvHjxfFsfbq/MzEylpaU5uozb6uLFi3Jzc5OLCxeGAY7C6ANQYMyZM0cNGzaUp6envLy8FBAQoA8//FDS5cs7H3/8cUnSAw88YF3WuXr1aknZ77lbvXq1bDab5s2bp9dee00VK1aUp6enHnvsMSUlJSk1NVVDhw5V2bJl5eHhoT59+ig1NdWunpkzZ+rBBx9U2bJl5e7uLn9/f02fPt2uT5UqVbRjxw7FxMRYNV1ZR2JiooYOHSo/Pz+5u7urRo0aevfdd5WZmWm3nsTERPXu3Vve3t7y8fFRWFiYEhMTc33sEhMTNWzYMFWpUkXu7u6qVKmSevXqpZMnT153uZUrV6p58+YqXry4fHx81LlzZ+3atStbvyNHjqhfv36qUKGC3N3dVbVqVQ0aNOi6/7F65swZNWnSRJUqVdKePXtuuv5z586pePHieuGFF7It99dff8nV1VXjx4+/5WOxe/duPfbYYypZsqSKFCmiRo0a6dtvv7Xrk56ertdee0133323ihQpolKlSun+++9XVFTUNdc7btw4Va5cWZI0fPhw2Ww2uzPPmzdvVrt27eTl5SUPDw+1bt062+XKWZc3x8TE6LnnnlPZsmVVqVKl6+5Pamqqxo4dqxo1asjd3V1+fn4aMWLETf3OsyxbtkwtW7a0xmjjxo01e/bsbP127typBx54QMWKFVPFihU1YcKE69aaxWazafDgwVq8eLHq1q0rd3d31alTR8uXL8/W98iRI+rbt6/KlStn9fv888+t+atXr1bjxo0lSX369LHGZ2RkpCZPnixXV1e7Mfb+++/LZrMpIiLCasvIyJCnp6dGjhxptaWkpOjFF1+0xnTNmjX173//W8aYHPdl1qxZqlOnjtzd3XPcD0kyxuiZZ56Rm5ubvv7661wdqyx//vmnnnvuOdWsWVNFixZVqVKl9Pjjj9tdvv7HH3/IZrNp4sSJ2ZZfv369bDabvvrqK6vtRsdW+t+/sXPmzNErr7yiihUrqlixYkpOTs5T/QDyF2fuANwRSUlJ2f7j2mazqVSpUpKkqKgode/eXa1bt9a7774rSdq1a5d++uknvfDCC2rRooWef/55TZ48Wf/6179Uu3ZtSbL+77WMHz9eRYsW1ahRo7Rv3z599NFHKly4sFxcXHTmzBmNGzdOP//8syIjI1W1alWNGTPGWnb69OmqU6eOHn74YRUqVEjfffednnvuOWVmZio8PFySNGnSJA0ZMkQeHh56+eWXJUnlypWTJJ0/f14tW7bUkSNH9Oyzz+quu+7S+vXrNXr0aB07dkyTJk2SdPk/7Dp37qx169Zp4MCBql27thYtWqSwsLBcHdtz586pefPm2rVrl/r27asGDRro5MmT+vbbb/XXX3+pdOnSOS73448/ql27dqpWrZrGjRunCxcu6KOPPlKzZs20adMmK4AcPXpUTZo0UWJiop555hnVqlVLR44c0YIFC3T+/Pkcz5qePHlSDz30kE6fPq2YmBhVr179pusPDAzUI488orlz5+qDDz6Qq6urtexXX30lY4x69OhxS8dix44datasmSpWrKhRo0apePHimjdvnrp06aKFCxfqkUcekXQ5qI0fP179+/dXkyZNlJycrN9++02bNm3SQw89lOO6H330Ufn4+GjYsGHq3r272rdvLw8PD2u7zZs3l5eXl0aMGKHChQvr448/VqtWrRQTE6OgoCC7dT333HMqU6aMxowZo5SUlGse08zMTD388MNat26dnnnmGdWuXVvbtm3TxIkT9fvvv9s9zCU3v3PpcsDs27ev6tSpo9GjR8vHx0ebN2/W8uXL9dRTT1n9zpw5o7Zt2+rRRx/VE088oQULFmjkyJEKCAhQu3btrllzlnXr1unrr7/Wc889J09PT02ePFldu3bVoUOHrH8vEhIS1LRpUytAlSlTRsuWLVO/fv2UnJysoUOHqnbt2nr99dc1ZswYPfPMM2revLkk6b777lNSUpIyMzO1bt06dezYUZK0du1aubi4aO3atVYtmzdv1rlz59SiRQtJl8fqww8/rFWrVqlfv34KDAzUihUrNHz4cB05ciRbeFq5cqXmzZunwYMHq3Tp0tkuJ5cuB8i+fftq7ty5WrRokTp06HDDY3SlDRs2aP369erWrZsqVaqkgwcPavr06WrVqpV27typYsWKqVq1amrWrJlmzZqlYcOG2S0/a9YseXp6qnPnzrk+tld644035ObmppdeekmpqalcRQE4mgGA22jmzJlGUo6Tu7u71e+FF14wXl5e5tKlS9dc1/z5840ks2rVqmzzWrZsaVq2bGl9XrVqlZFk6tata9LS0qz27t27G5vNZtq1a2e3fHBwsKlcubJd2/nz57NtJzQ01FSrVs2urU6dOnbbzvLGG2+Y4sWLm99//92ufdSoUcbV1dUcOnTIGGPM4sWLjSQzYcIEq8+lS5dM8+bNjSQzc+bMbOu+0pgxY4wk8/XXX2ebl5mZaYwx5sCBA9nWFRgYaMqWLWtOnTpltW3ZssW4uLiYXr16WW29evUyLi4uZsOGDddcf9b3vGHDBnPs2DFTp04dU61aNXPw4MHr1p7b+lesWGEkmWXLltnNr1evnt2xv9lj0bp1axMQEGAuXrxo1/++++4zd999t9VWv35906FDhxvu09Wytvnee+/ZtXfp0sW4ubmZ/fv3W21Hjx41np6epkWLFlZb1vG9//77rztGsvz3v/81Li4uZu3atXbtM2bMMJLMTz/9ZLXl5neemJhoPD09TVBQkLlw4YJd36zjaszlcSjJfPnll1Zbamqq8fX1NV27dr1h3ZKMm5ub2bdvn9W2ZcsWI8l89NFHVlu/fv1M+fLlzcmTJ+2W79atm/H29rb2acOGDTmOoYyMDOPl5WVGjBhh7UOpUqXM448/blxdXc3Zs2eNMcZ88MEHxsXFxZw5c8YY87+x+uabb9qt77HHHjM2m82ubknGxcXF7Nixw67vlb+F9PR08+STT5qiRYuaFStW3PD4ZK137Nix1uecvr/Y2Nhs38PHH39sJJldu3ZZbWlpaaZ06dImLCzMasvtsc36N7ZatWo51gDAMbgsE8AdMXXqVEVFRdlNy5Yts+b7+PgoJSXlupe33YxevXqpcOHC1uegoCAZY9S3b1+7fkFBQTp8+LAuXbpktRUtWtT631lnHlu2bKk//vhDSUlJN9z2/Pnz1bx5c5UoUUInT560ppCQEGVkZGjNmjWSpKVLl6pQoUIaNGiQtayrq6uGDBmSq31cuHCh6tevb51dupLNZstxmWPHjikuLk69e/dWyZIlrfZ69erpoYce0tKlSyVdPgO0ePFiderUKcd7Jq9e/19//aWWLVsqPT1da9assS5HvNX6Q0JCVKFCBc2aNcuat337dm3dulVPP/10ntZ1tdOnT2vlypV64okndPbsWet7OnXqlEJDQ7V3714dOXJE0uXf6Y4dO7R3794b7teNZGRk6IcfflCXLl1UrVo1q718+fJ66qmntG7dumyXuA0YMMDuzOW1zJ8/X7Vr11atWrXsfnsPPvigJGnVqlVW39z8zqOionT27FmNGjVKRYoUsdvW1cfVw8PD7jtxc3NTkyZN9Mcff9ywbunyd33lmd569erJy8vLWt4Yo4ULF6pTp04yxtjtX2hoqJKSkrRp06brbsPFxUX33XefNQZ37dqlU6dOadSoUTLGKDY2VtLls3l169a17pVcunSpXF1d9fzzz9ut78UXX5Qxxu7fNElq2bKl/P39c6whLS1Njz/+uL7//nstXbpUbdq0ydXxudqV3196erpOnTqlGjVqyMfHx+44PPHEEypSpIjdGFqxYoVOnjxpfV83c2zDwsLsagDgWFyWCeCOaNKkyXUfqPLcc89p3rx5ateunSpWrKg2bdroiSeeUNu2bW9pu3fddZfdZ29vb0mSn59ftvbMzEwlJSVZl3799NNPGjt2rGJjY3X+/Hm7/klJSda6rmXv3r3aunWrypQpk+P848ePS7p8z0z58uWtS/Wy1KxZ8wZ7d9n+/fvVtWvXXPXN8ueff15zG7Vr19aKFSuUkpKic+fOKTk5OdfvLuvZs6cKFSqkXbt2ydfXN1fL5KZ+FxcX9ejRQ9OnT9f58+dVrFgxzZo1S0WKFLHuxcztuq62b98+GWP06quv6tVXX82xz/Hjx1WxYkW9/vrr6ty5s+655x7VrVtXbdu2Vc+ePVWvXr08bVO6/JCV8+fPX/M7yMzM1OHDh1WnTh2rPacnzuZk79692rVr1w1/e1Lufuf79++XpFz9DipVqpQt8JUoUUJbt27NVe1Xj9ms5c+cOSPp8nFLTEzUJ598ok8++STHdVy5f9fSvHlz63LktWvXqnz58mrQoIHq16+vtWvX6qGHHtK6dev0xBNPWMv8+eefqlChgjw9Pe3WlXV5eNa4ynK972v8+PE6d+6cli1bdkvv6Lxw4YLGjx+vmTNn6siRI3b3/l35RygfHx916tRJs2fP1htvvCHp8iWZFStWtEL/zRzb3P4mAdwZhDsABULZsmUVFxenFStWaNmyZVq2bJlmzpypXr166Ysvvrjp9V7rLMe12rP+w2j//v1q3bq1atWqpQ8++EB+fn5yc3PT0qVLNXHixGwPRMlJZmamHnroIY0YMSLH+ffcc08u9+Lv49FHH9WXX36pDz/80O4hJ/mhV69eeu+997R48WJ1795ds2fPVseOHW8Ysm8k67t86aWXFBoammOfrFd2tGjRQvv379c333yjH374QZ999pkmTpyoGTNmqH///rdUR27k9gxJZmamAgIC9MEHH+Q4P+uPG/nxO7/ajcbWrS6fVdPTTz99zftScxO277//fqWnpys2NlZr16617slr3ry51q5dq927d+vEiRNW+8243vcVGhqq5cuXa8KECWrVqlW2M6K5NWTIEM2cOVNDhw5VcHCwvL29ZbPZ1K1bt2zfX69evTR//nytX79eAQEB+vbbb/Xcc89ZT7e8mWPLWTugYCHcASgw3Nzc1KlTJ3Xq1EmZmZl67rnn9PHHH+vVV19VjRo1rnlZ3e3w3XffKTU1Vd9++63dmYQrL2fLcq26qlevrnPnzikkJOS626pcubKio6N17tw5u7N3N3rC5JXb2b59e676XrnNa21j9+7dKl26tIoXL66iRYvKy8sr1+sfMmSIatSooTFjxsjb21ujRo3Kt/rr1q2re++9V7NmzVKlSpV06NAhffTRRze1ritlXRJZuHDhG35XklSyZEn16dNHffr0sR62MW7cuDyHuzJlyqhYsWLX/A5cXFyynWHOrerVq2vLli1q3br1dcdNbn/nWZdJbt++PU/vprwdypQpI09PT2VkZNzw+7revjdp0kRubm5au3at1q5dq+HDh0u6HOA//fRTRUdHW5+zVK5cWT/++KPOnj1rd/Zu9+7d1vzcatq0qQYOHKiOHTvq8ccf16JFi1SoUN7/s2zBggUKCwvT+++/b7VdvHgxx6fttm3bVmXKlNGsWbMUFBSk8+fPq2fPntb8vBxbAAUT99wBKBBOnTpl99nFxcX6C3HWo9uz3umVl1cE3KysswdXX+I0c+bMbH2LFy+eY01PPPGEYmNjtWLFimzzEhMTrfv72rdvr0uXLtk9fj4jIyNbcLmWrl27asuWLVq0aFG2edc6W1K+fHkFBgbqiy++sKt9+/bt+uGHH9S+fXtJl7+HLl266LvvvtNvv/2Wq/W/+uqreumllzR69OhrPlL/Zuvv2bOnfvjhB02aNEmlSpXK9vTFmzkWZcuWVatWrfTxxx/r2LFj2eZnvaNOyv479fDwUI0aNbK9XiA3XF1d1aZNG33zzTd2j61PSEjQ7Nmzdf/998vLyyvP65Uu//aOHDmiTz/9NNu8CxcuWE/azO3vvE2bNvL09NT48eN18eJFu3m5PSOXX1xdXdW1a1ctXLgwxyB/5fd1vX8zihQposaNG+urr77SoUOH7M7cXbhwQZMnT1b16tVVvnx5a5n27dsrIyNDU6ZMsVvXxIkTZbPZcvU00CuFhIRozpw5Wr58uXr27HnTZ0qv/g4++ugjZWRkZOtbqFAhde/eXfPmzVNkZKQCAgLszsTl5dgCKJg4cwfgjli2bJn11+0r3XfffapWrZr69++v06dP68EHH1SlSpX0559/6qOPPlJgYKB1P0tgYKBcXV317rvvKikpSe7u7tb7ufJbmzZtrDOJzz77rM6dO6dPP/1UZcuWzRYAGjZsqOnTp+vNN99UjRo1VLZsWT344IMaPny4vv32W3Xs2FG9e/dWw4YNlZKSom3btmnBggU6ePCgSpcurU6dOqlZs2YaNWqUDh48KH9/f3399de5emiLdPndaQsWLNDjjz+uvn37qmHDhjp9+rS+/fZbzZgxQ/Xr189xuffee0/t2rVTcHCw+vXrZ70KwdvbW+PGjbP6vf322/rhhx/UsmVL67H6x44d0/z587Vu3bocX8z93nvvKSkpSeHh4fL09LR7wMat1P/UU09pxIgRWrRokQYNGmT3sJxbORZTp07V/fffr4CAAA0YMEDVqlVTQkKCYmNj9ddff2nLli2SJH9/f7Vq1UoNGzZUyZIl9dtvv2nBggUaPHjwNffvet58801FRUXp/vvv13PPPadChQrp448/Vmpqaq7fDZeTnj17at68eRo4cKBWrVqlZs2aKSMjQ7t379a8efO0YsUKNWrUKNe/cy8vL02cOFH9+/dX48aN9dRTT6lEiRLasmWLzp8/f0uXTt+Md955R6tWrVJQUJAGDBggf39/nT59Wps2bdKPP/6o06dPS7p8xtHHx0czZsyQp6enihcvrqCgIOs+sebNm+udd96Rt7e3AgICJF0O+zVr1tSePXvUu3dvu+126tRJDzzwgF5++WUdPHhQ9evX1w8//KBvvvlGQ4cOve4rP66lS5cu1iXoXl5e+vjjj/O0fMeOHfXf//5X3t7e8vf3V2xsrH788Ufr3uGr9erVS5MnT9aqVaus185cKbfHFkABdWcfzgngn+Z6r0LQFY8oX7BggWnTpo0pW7ascXNzM3fddZd59tlnzbFjx+zW9+mnn5pq1aoZV1dXu9ciXOtVCPPnz8+xnqsf6z927FgjyZw4ccJq+/bbb029evVMkSJFTJUqVcy7775rPv/8cyPJHDhwwOoXHx9vOnToYDw9PY0kuzrOnj1rRo8ebWrUqGHc3NxM6dKlzX333Wf+/e9/272i4dSpU6Znz57Gy8vLeHt7m549e5rNmzfn6lUIWcsPHjzYVKxY0bi5uZlKlSqZsLAw63HmOT3+3xhjfvzxR9OsWTNTtGhR4+XlZTp16mR27tyZbf1//vmn6dWrlylTpoxxd3c31apVM+Hh4SY1NfWaxzUjI8N0797dFCpUyCxevPiW6r9S+/btjSSzfv36fD0W+/fvN7169TK+vr6mcOHCpmLFiqZjx45mwYIFVp8333zTNGnSxPj4+JiiRYuaWrVqmbfeesvuu8zJtV6FYIwxmzZtMqGhocbDw8MUK1bMPPDAA9n27Vq/2+tJS0sz7777rqlTp45xd3c3JUqUMA0bNjSvvfaaSUpKsvrl9nee1fe+++6zfi9NmjQxX331lTW/ZcuWpk6dOtlqCQsLy/aqkZxIMuHh4dnaK1eubPe4fmOMSUhIMOHh4cbPz88ULlzY+Pr6mtatW5tPPvnErt8333xj/P39TaFChbJ970uWLDGSsr0apX///kaS+c9//pOtlrNnz5phw4aZChUqmMKFC5u7777bvPfee3avhLjevlzrtzBt2jQjybz00ks5Hpsr13vlqxDOnDlj+vTpY0qXLm08PDxMaGio2b17d47HLEudOnWMi4uL+euvv3Kcn5tje61/YwE4ls2YO3w9BQAAt+CRRx7Rtm3btG/fPkeXAvwt3XvvvSpZsqR1XyEA58E9dwCAv41jx45pyZIldg+BAJB7v/32m+Li4tSrVy9HlwLgNuDMHQCgwDtw4IB++uknffbZZ9qwYYP279+f6/foAbj8sKSNGzfq/fff18mTJ/XHH3/c9OsXABRcnLkDABR4MTEx6tmzpw4cOKAvvviCYAfk0YIFC9SnTx+lp6frq6++ItgBToozdwAAAADgBDhzBwAAAABOgHAHAAAAAE6gwLzE/J133tHo0aP1wgsvaNKkSZKkixcv6sUXX9ScOXOUmpqq0NBQTZs2TeXKlbOWO3TokAYNGqRVq1bJw8NDYWFhGj9+vAoV+t+urV69WhEREdqxY4f8/Pz0yiuvZHsx6dSpU/Xee+8pPj5e9evX10cffaQmTZrkuv7MzEwdPXpUnp6estlst3QsAAAAAPx9GWN09uxZVahQQS4ud/B8mgPfsWf59ddfTZUqVUy9evXMCy+8YLUPHDjQ+Pn5mejoaPPbb7+Zpk2bmvvuu8+af+nSJVO3bl0TEhJiNm/ebJYuXWpKly5tRo8ebfX5448/TLFixUxERITZuXOn+eijj4yrq6tZvny51WfOnDnGzc3NfP7552bHjh1mwIABxsfHxyQkJOR6Hw4fPnzdFzUzMTExMTExMTExMf2zpsOHD99aUMojhz9Q5dy5c2rQoIGmTZumN998U4GBgZo0aZKSkpJUpkwZzZ49W4899pgkaffu3apdu7ZiY2PVtGlTLVu2TB07dtTRo0ets3kzZszQyJEjdeLECbm5uWnkyJFasmSJtm/fbm2zW7duSkxM1PLlyyVJQUFBaty4saZMmSLp8lk4Pz8/DRkyRKNGjcrVfiQlJcnHx0eHDx+Wl5dXfh4iAAAAAH8jycnJ8vPzU2Jiory9ve/Ydh1+WWZ4eLg6dOigkJAQvfnmm1b7xo0blZ6erpCQEKutVq1auuuuu6xwFxsbq4CAALvLNENDQzVo0CDt2LFD9957r2JjY+3WkdVn6NChkqS0tDRt3LhRo0ePtua7uLgoJCREsbGx16w7NTVVqamp1uezZ89Kkry8vAh3AAAAAO747VoODXdz5szRpk2btGHDhmzz4uPj5ebmJh8fH7v2cuXKKT4+3upzZbDLmp8173p9kpOTdeHCBZ05c0YZGRk59tm9e/c1ax8/frxee+213O0oAAAAANxmDnta5uHDh/XCCy9o1qxZf8sXaY4ePVpJSUnWdPjwYUeXBAAAAOAfzGHhbuPGjTp+/LgaNGigQoUKqVChQoqJidHkyZNVqFAhlStXTmlpaUpMTLRbLiEhQb6+vpIkX19fJSQkZJufNe96fby8vFS0aFGVLl1arq6uOfbJWkdO3N3drUswuRQTAAAAgKM5LNy1bt1a27ZtU1xcnDU1atRIPXr0sP534cKFFR0dbS2zZ88eHTp0SMHBwZKk4OBgbdu2TcePH7f6REVFycvLS/7+/lafK9eR1SdrHW5ubmrYsKFdn8zMTEVHR1t9AAAAAKCgc9g9d56enqpbt65dW/HixVWqVCmrvV+/foqIiFDJkiXl5eWlIUOGKDg4WE2bNpUktWnTRv7+/urZs6cmTJig+Ph4vfLKKwoPD5e7u7skaeDAgZoyZYpGjBihvn37auXKlZo3b56WLFlibTciIkJhYWFq1KiRmjRpokmTJiklJUV9+vS5Q0cDAAAAAG6Nw5+WeT0TJ06Ui4uLunbtavcS8yyurq76/vvvNWjQIAUHB6t48eIKCwvT66+/bvWpWrWqlixZomHDhunDDz9UpUqV9Nlnnyk0NNTq8+STT+rEiRMaM2aM4uPjFRgYqOXLl2d7yAoAAAAAFFQOf8+ds0hOTpa3t7eSkpK4/w4AAAD4B3NUNnDYPXcAAAAAgPxDuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ1DI0QXg9rDZHF2BYxnj6AoAAACAO4szdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBBwa7qZPn6569erJy8tLXl5eCg4O1rJly6z5rVq1ks1ms5sGDhxot45Dhw6pQ4cOKlasmMqWLavhw4fr0qVLdn1Wr16tBg0ayN3dXTVq1FBkZGS2WqZOnaoqVaqoSJEiCgoK0q+//npb9hkAAAAAbgeHhrtKlSrpnXfe0caNG/Xbb7/pwQcfVOfOnbVjxw6rz4ABA3Ts2DFrmjBhgjUvIyNDHTp0UFpamtavX68vvvhCkZGRGjNmjNXnwIED6tChgx544AHFxcVp6NCh6t+/v1asWGH1mTt3riIiIjR27Fht2rRJ9evXV2hoqI4fP35nDgQAAAAA3CKbMcY4uogrlSxZUu+995769eunVq1aKTAwUJMmTcqx77Jly9SxY0cdPXpU5cqVkyTNmDFDI0eO1IkTJ+Tm5qaRI0dqyZIl2r59u7Vct27dlJiYqOXLl0uSgoKC1LhxY02ZMkWSlJmZKT8/Pw0ZMkSjRo3KcdupqalKTU21PicnJ8vPz09JSUny8vLKj0NxS2w2R1fgWAXrVw0AAIB/kuTkZHl7e9/xbFBg7rnLyMjQnDlzlJKSouDgYKt91qxZKl26tOrWravRo0fr/Pnz1rzY2FgFBARYwU6SQkNDlZycbJ39i42NVUhIiN22QkNDFRsbK0lKS0vTxo0b7fq4uLgoJCTE6pOT8ePHy9vb25r8/Pxu7QAAAAAAwC0o5OgCtm3bpuDgYF28eFEeHh5atGiR/P39JUlPPfWUKleurAoVKmjr1q0aOXKk9uzZo6+//lqSFB8fbxfsJFmf4+Pjr9snOTlZFy5c0JkzZ5SRkZFjn927d1+z7tGjRysiIsL6nHXmDgAAAAAcweHhrmbNmoqLi1NSUpIWLFigsLAwxcTEyN/fX88884zVLyAgQOXLl1fr1q21f/9+Va9e3YFVS+7u7nJ3d3doDQAAAACQxeGXZbq5ualGjRpq2LChxo8fr/r16+vDDz/MsW9QUJAkad++fZIkX19fJSQk2PXJ+uzr63vdPl5eXipatKhKly4tV1fXHPtkrQMAAAAACjqHh7urZWZm2j2o5EpxcXGSpPLly0uSgoODtW3bNrunWkZFRcnLy8u6tDM4OFjR0dF264mKirLu63Nzc1PDhg3t+mRmZio6Otru3j8AAAAAKMgcelnm6NGj1a5dO9111106e/asZs+erdWrV2vFihXav3+/Zs+erfbt26tUqVLaunWrhg0bphYtWqhevXqSpDZt2sjf3189e/bUhAkTFB8fr1deeUXh4eHWJZMDBw7UlClTNGLECPXt21crV67UvHnztGTJEquOiIgIhYWFqVGjRmrSpIkmTZqklJQU9enTxyHHBQAAAADyyqHh7vjx4+rVq5eOHTsmb29v1atXTytWrNBDDz2kw4cP68cff7SClp+fn7p27apXXnnFWt7V1VXff/+9Bg0apODgYBUvXlxhYWF6/fXXrT5Vq1bVkiVLNGzYMH344YeqVKmSPvvsM4WGhlp9nnzySZ04cUJjxoxRfHy8AgMDtXz58mwPWQEAAACAgqrAvefu78pR77K4Ft5z5+gKAAAA8E/1j3/PHQAAAADg5hHuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAk4NNxNnz5d9erVk5eXl7y8vBQcHKxly5ZZ8y9evKjw8HCVKlVKHh4e6tq1qxISEuzWcejQIXXo0EHFihVT2bJlNXz4cF26dMmuz+rVq9WgQQO5u7urRo0aioyMzFbL1KlTVaVKFRUpUkRBQUH69ddfb8s+AwAAAMDt4NBwV6lSJb3zzjvauHGjfvvtNz344IPq3LmzduzYIUkaNmyYvvvuO82fP18xMTE6evSoHn30UWv5jIwMdejQQWlpaVq/fr2++OILRUZGasyYMVafAwcOqEOHDnrggQcUFxenoUOHqn///lqxYoXVZ+7cuYqIiNDYsWO1adMm1a9fX6GhoTp+/PidOxgAAAAAcAtsxhjj6CKuVLJkSb333nt67LHHVKZMGc2ePVuPPfaYJGn37t2qXbu2YmNj1bRpUy1btkwdO3bU0aNHVa5cOUnSjBkzNHLkSJ04cUJubm4aOXKklixZou3bt1vb6NatmxITE7V8+XJJUlBQkBo3bqwpU6ZIkjIzM+Xn56chQ4Zo1KhRuao7OTlZ3t7eSkpKkpeXV34ekptiszm6AscqWL9qAAAA/JM4KhsUmHvuMjIyNGfOHKWkpCg4OFgbN25Uenq6QkJCrD61atXSXXfdpdjYWElSbGysAgICrGAnSaGhoUpOTrbO/sXGxtqtI6tP1jrS0tK0ceNGuz4uLi4KCQmx+uQkNTVVycnJdhMAAAAAOIrDw922bdvk4eEhd3d3DRw4UIsWLZK/v7/i4+Pl5uYmHx8fu/7lypVTfHy8JCk+Pt4u2GXNz5p3vT7Jycm6cOGCTp48qYyMjBz7ZK0jJ+PHj5e3t7c1+fn53dT+AwAAAEB+cHi4q1mzpuLi4vTLL79o0KBBCgsL086dOx1d1g2NHj1aSUlJ1nT48GFHlwQAAADgH6yQowtwc3NTjRo1JEkNGzbUhg0b9OGHH+rJJ59UWlqaEhMT7c7eJSQkyNfXV5Lk6+ub7amWWU/TvLLP1U/YTEhIkJeXl4oWLSpXV1e5urrm2CdrHTlxd3eXu7v7ze00AAAAAOQzh5+5u1pmZqZSU1PVsGFDFS5cWNHR0da8PXv26NChQwoODpYkBQcHa9u2bXZPtYyKipKXl5f8/f2tPleuI6tP1jrc3NzUsGFDuz6ZmZmKjo62+gAAAABAQefQM3ejR49Wu3btdNddd+ns2bOaPXu2Vq9erRUrVsjb21v9+vVTRESESpYsKS8vLw0ZMkTBwcFq2rSpJKlNmzby9/dXz549NWHCBMXHx+uVV15ReHi4dVZt4MCBmjJlikaMGKG+fftq5cqVmjdvnpYsWWLVERERobCwMDVq1EhNmjTRpEmTlJKSoj59+jjkuAAAAABAXjk03B0/fly9evXSsWPH5O3trXr16mnFihV66KGHJEkTJ06Ui4uLunbtqtTUVIWGhmratGnW8q6urvr+++81aNAgBQcHq3jx4goLC9Prr79u9alataqWLFmiYcOG6cMPP1SlSpX02WefKTQ01Orz5JNP6sSJExozZozi4+MVGBio5cuXZ3vICgAAAAAUVAXuPXd/V7znrmDhVw0AAABH+ce/5w4AAAAAcPMIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQcGu7Gjx+vxo0by9PTU2XLllWXLl20Z88euz6tWrWSzWazmwYOHGjX59ChQ+rQoYOKFSumsmXLavjw4bp06ZJdn9WrV6tBgwZyd3dXjRo1FBkZma2eqVOnqkqVKipSpIiCgoL066+/5vs+AwAAAMDt4NBwFxMTo/DwcP3888+KiopSenq62rRpo5SUFLt+AwYM0LFjx6xpwoQJ1ryMjAx16NBBaWlpWr9+vb744gtFRkZqzJgxVp8DBw6oQ4cOeuCBBxQXF6ehQ4eqf//+WrFihdVn7ty5ioiI0NixY7Vp0ybVr19foaGhOn78+O0/EAAAAABwi2zGGOPoIrKcOHFCZcuWVUxMjFq0aCHp8pm7wMBATZo0Kcdlli1bpo4dO+ro0aMqV66cJGnGjBkaOXKkTpw4ITc3N40cOVJLlizR9u3breW6deumxMRELV++XJIUFBSkxo0ba8qUKZKkzMxM+fn5aciQIRo1atQNa09OTpa3t7eSkpLk5eV1K4chX9hsjq7AsQrOrxoAAAD/NI7KBgXqnrukpCRJUsmSJe3aZ82apdKlS6tu3boaPXq0zp8/b82LjY1VQECAFewkKTQ0VMnJydqxY4fVJyQkxG6doaGhio2NlSSlpaVp48aNdn1cXFwUEhJi9blaamqqkpOT7SYAAAAAcJRCji4gS2ZmpoYOHapmzZqpbt26VvtTTz2lypUrq0KFCtq6datGjhypPXv26Ouvv5YkxcfH2wU7Sdbn+Pj46/ZJTk7WhQsXdObMGWVkZOTYZ/fu3TnWO378eL322mu3ttMAAAAAkE8KTLgLDw/X9u3btW7dOrv2Z555xvrfAQEBKl++vFq3bq39+/erevXqd7pMy+jRoxUREWF9Tk5Olp+fn8PqAQAAAPDPViDC3eDBg/X9999rzZo1qlSp0nX7BgUFSZL27dun6tWry9fXN9tTLRMSEiRJvr6+1v/Naruyj5eXl4oWLSpXV1e5urrm2CdrHVdzd3eXu7t77ncSAAAAAG4jh95zZ4zR4MGDtWjRIq1cuVJVq1a94TJxcXGSpPLly0uSgoODtW3bNrunWkZFRcnLy0v+/v5Wn+joaLv1REVFKTg4WJLk5uamhg0b2vXJzMxUdHS01QcAAAAACjKHnrkLDw/X7Nmz9c0338jT09O6R87b21tFixbV/v37NXv2bLVv316lSpXS1q1bNWzYMLVo0UL16tWTJLVp00b+/v7q2bOnJkyYoPj4eL3yyisKDw+3zqwNHDhQU6ZM0YgRI9S3b1+tXLlS8+bN05IlS6xaIiIiFBYWpkaNGqlJkyaaNGmSUlJS1KdPnzt/YAAAAAAgj/L8KoRNmzapcOHCCggIkCR98803mjlzpvz9/TVu3Di5ubnlfuPXeF7/zJkz1bt3bx0+fFhPP/20tm/frpSUFPn5+emRRx7RK6+8YvdI0T///FODBg3S6tWrVbx4cYWFhemdd95RoUL/y66rV6/WsGHDtHPnTlWqVEmvvvqqevfubbfdKVOm6L333lN8fLwCAwM1efJk6zLQG+FVCAULr0IAAACAozgqG+Q53DVu3FijRo1S165d9ccff6hOnTp65JFHtGHDBnXo0OGa76NzdoS7goVwBwAAAEf527zn7vfff1dgYKAkaf78+WrRooVmz56tyMhILVy4ML/rAwAAAADkQp7DnTFGmZmZkqQff/xR7du3lyT5+fnp5MmT+VsdAAAAACBX8hzuGjVqpDfffFP//e9/FRMTow4dOkiSDhw4kO0l4AAAAACAOyPP4W7SpEnatGmTBg8erJdfflk1atSQJC1YsED33XdfvhcIAAAAALixPD9Q5VouXrwoV1dXFS5cOD9W97fDA1UKFh6oAgAAAEf52zxQRZISExP12WefafTo0Tp9+rQkaefOnXYvEgcAAAAA3Dl5fon51q1b1bp1a/n4+OjgwYMaMGCASpYsqa+//lqHDh3Sl19+eTvqBAAAAABcR57P3EVERKhPnz7au3evihQpYrW3b99ea9asydfiAAAAAAC5k+dwt2HDBj377LPZ2itWrKj4+Ph8KQoAAAAAkDd5Dnfu7u5KTk7O1v7777+rTJky+VIUAAAAACBv8hzuHn74Yb3++utKT0+XJNlsNh06dEgjR45U165d871AAAAAAMCN5Tncvf/++zp37pzKli2rCxcuqGXLlqpRo4Y8PT311ltv3Y4aAQAAAAA3kOenZXp7eysqKkrr1q3T1q1bde7cOTVo0EAhISG3oz4AAAAAQC7k20vM/+l4iXnBwq8aAAAAjuKobJCrM3eTJ0/O9Qqff/75my4GAAAAAHBzcnXmrmrVqrlbmc2mP/7445aL+jvizF3Bwpk7AAAAOEqBPnN34MCB210HAAAAAOAW5PlpmQAAAACAgifP4a5r16569913s7VPmDBBjz/+eL4UBQAAAADImzyHuzVr1qh9+/bZ2tu1a6c1a9bkS1EAAAAAgLzJc7g7d+6c3NzcsrUXLlxYycnJ+VIUAAAAACBv8hzuAgICNHfu3Gztc+bMkb+/f74UBQAAAADIm1w9LfNKr776qh599FHt379fDz74oCQpOjpaX331lebPn5/vBQIAAAAAbizP4a5Tp05avHix3n77bS1YsEBFixZVvXr19OOPP6ply5a3o0YAAAAAwA3k6iXmuDFeYl6w8KsGAACAozgqG+T5nruwsDCeigkAAAAABUyew11SUpJCQkJ099136+2339aRI0duR10AAAAAgDzIc7hbvHixjhw5okGDBmnu3LmqUqWK2rVrpwULFig9Pf121AgAAAAAuIE8hztJKlOmjCIiIrRlyxb98ssvqlGjhnr27KkKFSpo2LBh2rt3b37XCQAAAAC4jpsKd1mOHTumqKgoRUVFydXVVe3bt9e2bdvk7++viRMn5leNAAAAAIAbyHO4S09P18KFC9WxY0dVrlxZ8+fP19ChQ3X06FF98cUX+vHHHzVv3jy9/vrrt6NeAAAAAEAO8vyeu/LlyyszM1Pdu3fXr7/+qsDAwGx9HnjgAfn4+ORDeQAAAACA3MhzuJs4caIef/xxFSlS5Jp9fHx8dODAgVsqDAAAAACQe7m+LDMjI0Nbt27VY489li3YnT9/Xlu3blVmZma+FwgAAAAAuLFch7v//ve/6tu3r9zc3LLNc3NzU9++fTV79ux8LQ4AAAAAkDu5Dnf/+c9/9NJLL8nV1TXbvEKFCmnEiBH65JNP8rU4AAAAAEDu5Drc7dmzR02bNr3m/MaNG2vXrl35UhQAAAAAIG9yHe5SUlKUnJx8zflnz57V+fPn87Tx8ePHq3HjxvL09FTZsmXVpUsX7dmzx67PxYsXFR4erlKlSsnDw0Ndu3ZVQkKCXZ9Dhw6pQ4cOKlasmMqWLavhw4fr0qVLdn1Wr16tBg0ayN3dXTVq1FBkZGS2eqZOnaoqVaqoSJEiCgoK0q+//pqn/QEAAAAAR8l1uLv77ru1fv36a85ft26d7r777jxtPCYmRuHh4fr5558VFRWl9PR0tWnTRikpKVafYcOG6bvvvtP8+fMVExOjo0eP6tFHH7XmZ2RkqEOHDkpLS9P69ev1xRdfKDIyUmPGjLH6HDhwQB06dNADDzyguLg4DR06VP3799eKFSusPnPnzlVERITGjh2rTZs2qX79+goNDdXx48fztE8AAAAA4BAml959911TqlQps2XLlmzz4uLiTKlSpcy7776b29Xl6Pjx40aSiYmJMcYYk5iYaAoXLmzmz59v9dm1a5eRZGJjY40xxixdutS4uLiY+Ph4q8/06dONl5eXSU1NNcYYM2LECFOnTh27bT355JMmNDTU+tykSRMTHh5ufc7IyDAVKlQw48ePz1XtSUlJRpJJSkrK417fHtI/ewIAAAAcxVHZINdn7oYNG6aAgAA1bNhQ7dq107BhwzRs2DC1a9dOjRo1Ut26dTVs2LBbCppJSUmSpJIlS0qSNm7cqPT0dIWEhFh9atWqpbvuukuxsbGSpNjYWAUEBKhcuXJWn9DQUCUnJ2vHjh1WnyvXkdUnax1paWnauHGjXR8XFxeFhIRYfa6Wmpqq5ORkuwkAAAAAHCXX4a5w4cL64Ycf9NZbb+nYsWP65JNP9PHHH+vYsWN666239MMPP6hw4cI3XUhmZqaGDh2qZs2aqW7dupKk+Ph4ubm5ycfHx65vuXLlFB8fb/W5Mthlzc+ad70+ycnJunDhgk6ePKmMjIwc+2St42rjx4+Xt7e3Nfn5+d3cjgMAAABAPiiUl86FCxfWiBEjNGLEiHwvJDw8XNu3b9e6devyfd23w+jRoxUREWF9Tk5OJuABAAAAcJg8hbvbZfDgwfr++++1Zs0aVapUyWr39fVVWlqaEhMT7c7eJSQkyNfX1+pz9VMts56meWWfq5+wmZCQIC8vLxUtWlSurq5ydXXNsU/WOq7m7u4ud3f3m9thAAAAAMhnub4s83Ywxmjw4MFatGiRVq5cqapVq9rNb9iwoQoXLqzo6Girbc+ePTp06JCCg4MlScHBwdq2bZvdUy2joqLk5eUlf39/q8+V68jqk7UONzc3NWzY0K5PZmamoqOjrT4AAAAAUJA59MxdeHi4Zs+erW+++Uaenp7W/W3e3t4qWrSovL291a9fP0VERKhkyZLy8vLSkCFDFBwcbL1QvU2bNvL391fPnj01YcIExcfH65VXXlF4eLh1Zm3gwIGaMmWKRowYob59+2rlypWaN2+elixZYtUSERGhsLAwNWrUSE2aNNGkSZOUkpKiPn363PkDAwAAAAB5ZDPGGIdt3GbLsX3mzJnq3bu3pMsvMX/xxRf11VdfKTU1VaGhoZo2bZrd5ZJ//vmnBg0apNWrV6t48eIKCwvTO++8o0KF/pddV69erWHDhmnnzp2qVKmSXn31VWsbWaZMmaL33ntP8fHxCgwM1OTJkxUUFJSrfUlOTpa3t7eSkpLk5eWVtwNxG1zj0P5jOO5XDQAAgH86R2WDPIe77du3W0+zvNrixYvVpUuX/Kjrb4dwV7AQ7gAAAOAojsoGeb7nLjQ0VAcOHMjWvnDhQvXo0SNfigIAAAAA5E2ew13//v0VEhJi9/63uXPnqlevXoqMjMzP2gAAAAAAuZTnB6q89tprOn36tEJCQrRmzRotX75c/fv313//+1917dr1dtQIAAAAALiBm3pa5kcffaQePXqoadOmOnLkiL766it17tw5v2sDAAAAAORSrsLdt99+m63t0Ucf1dq1a9W9e3fZbDarz8MPP5y/FQIAAAAAbihXT8t0ccndrXk2m00ZGRm3XNTfEU/LLFh4WiYAAAAcxVHZIFdn7jIzM293HQAAAACAW5Dnp2UCAAAAAAqePIe7559/XpMnT87WPmXKFA0dOjQ/agIAAAAA5FGew93ChQvVrFmzbO333XefFixYkC9FAQAAAADyJs/h7tSpU/L29s7W7uXlpZMnT+ZLUQAAAACAvMlzuKtRo4aWL1+erX3ZsmWqVq1avhQFAAAAAMibPL/EPCIiQoMHD9aJEyf04IMPSpKio6P1/vvva9KkSfldHwAAAAAgF/Ic7vr27avU1FS99dZbeuONNyRJVapU0fTp09WrV698LxAAAAAAcGO5eon5tZw4cUJFixaVh4dHftb0t8RLzAsWXmIOAAAARynQLzHPyYkTJ7Rnzx5JUq1atVS6dOl8KwoAAAAAkDd5fqBKSkqK+vbtq/Lly6tFixZq0aKFypcvr379+un8+fO3o0YAAAAAwA3kOdxFREQoJiZG3333nRITE5WYmKhvvvlGMTExevHFF29HjQAAAACAG8jzPXelS5fWggUL1KpVK7v2VatW6YknntCJEyfys76/De65K1i45w4AAACO4qhskOczd+fPn1e5cuWytZctW5bLMgEAAADAQfIc7oKDgzV27FhdvHjRartw4YJee+01BQcH52txAAAAAIDcyfPTMj/88EOFhoaqUqVKql+/viRpy5YtKlKkiFasWJHvBQIAAAAAbuym3nN3/vx5zZo1S7t375Yk1a5dWz169FDRokXzvcC/C+65K1i45w4AAACO8rd6z12xYsU0YMCA/K4FAAAAAHCTchXuvv3221yv8OGHH77pYgAAAAAANydX4a5Lly65WpnNZlNGRsat1AMAAAAAuAm5CneZmZm3uw4AAAAAwC3I86sQAAAAAAAFT67D3cqVK+Xv76/k5ORs85KSklSnTh2tWbMmX4sDAAAAAOROrsPdpEmTNGDAgBwf5ent7a1nn31WEydOzNfiAAAAAAC5k+twt2XLFrVt2/aa89u0aaONGzfmS1EAAAAAgLzJdbhLSEhQ4cKFrzm/UKFCOnHiRL4UBQAAAADIm1yHu4oVK2r79u3XnL9161aVL18+X4oCAAAAAORNrsNd+/bt9eqrr+rixYvZ5l24cEFjx45Vx44d87U4AAAAAEDu2IwxJjcdExIS1KBBA7m6umrw4MGqWbOmJGn37t2aOnWqMjIytGnTJpUrV+62FlxQJScny9vbW0lJSTk+dOZOs9kcXYFj5e5XDQAAAOQ/R2WDXL3EXJLKlSun9evXa9CgQRo9erSyMqHNZlNoaKimTp36jw12AAAAAOBouQ53klS5cmUtXbpUZ86c0b59+2SM0d13360SJUrcrvoAAAAAALmQ63vurlSiRAk1btxYTZo0uaVgt2bNGnXq1EkVKlSQzWbT4sWL7eb37t1bNpvNbrr6dQynT59Wjx495OXlJR8fH/Xr10/nzp2z67N161Y1b95cRYoUkZ+fnyZMmJCtlvnz56tWrVoqUqSIAgICtHTp0pveLwAAAAC4024q3OWXlJQU1a9fX1OnTr1mn7Zt2+rYsWPW9NVXX9nN79Gjh3bs2KGoqCh9//33WrNmjZ555hlrfnJystq0aaPKlStr48aNeu+99zRu3Dh98sknVp/169ere/fu6tevnzZv3qwuXbqoS5cu1306KAAAAAAUJLl+oMrtZrPZtGjRInXp0sVq6927txITE7Od0cuya9cu+fv7a8OGDWrUqJEkafny5Wrfvr3++usvVahQQdOnT9fLL7+s+Ph4ubm5SZJGjRqlxYsXa/fu3ZKkJ598UikpKfr++++tdTdt2lSBgYGaMWNGjttOTU1Vamqq9Tk5OVl+fn48UKWAKBi/agAAAPwTOeqBKg49c5cbq1evVtmyZVWzZk0NGjRIp06dsubFxsbKx8fHCnaSFBISIhcXF/3yyy9WnxYtWljBTpJCQ0O1Z88enTlzxuoTEhJit93Q0FDFxsZes67x48fL29vbmvz8/PJlfwEAAADgZhTocNe2bVt9+eWXio6O1rvvvquYmBi1a9dOGRkZkqT4+HiVLVvWbplChQqpZMmSio+Pt/pc/RTPrM836pM1PyejR49WUlKSNR0+fPjWdhYAAAAAbkGenpZ5p3Xr1s363wEBAapXr56qV6+u1atXq3Xr1g6sTHJ3d5e7u7tDawAAAACALAX6zN3VqlWrptKlS2vfvn2SJF9fXx0/ftyuz6VLl3T69Gn5+vpafRISEuz6ZH2+UZ+s+QAAAABQ0P2twt1ff/2lU6dOqXz58pKk4OBgJSYmauPGjVaflStXKjMzU0FBQVafNWvWKD093eoTFRWlmjVrWq9xCA4OVnR0tN22oqKiFBwcfLt3CQAAAADyhUPD3blz5xQXF6e4uDhJ0oEDBxQXF6dDhw7p3LlzGj58uH7++WcdPHhQ0dHR6ty5s2rUqKHQ0FBJUu3atdW2bVsNGDBAv/76q3766ScNHjxY3bp1U4UKFSRJTz31lNzc3NSvXz/t2LFDc+fO1YcffqiIiAirjhdeeEHLly/X+++/r927d2vcuHH67bffNHjw4Dt+TAAAAADgZjj0VQirV6/WAw88kK09LCxM06dPV5cuXbR582YlJiaqQoUKatOmjd544w27h5+cPn1agwcP1nfffScXFxd17dpVkydPloeHh9Vn69atCg8P14YNG1S6dGkNGTJEI0eOtNvm/Pnz9corr+jgwYO6++67NWHCBLVv3z7X++Kox51eC69CcHQFAAAA+KdyVDYoMO+5+7sj3BUs/KoBAADgKLznDgAAAABw0wh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQcGu7WrFmjTp06qUKFCrLZbFq8eLHdfGOMxowZo/Lly6to0aIKCQnR3r177fqcPn1aPXr0kJeXl3x8fNSvXz+dO3fOrs/WrVvVvHlzFSlSRH5+fpowYUK2WubPn69atWqpSJEiCggI0NKlS/N9fwEAAADgdnFouEtJSVH9+vU1derUHOdPmDBBkydP1owZM/TLL7+oePHiCg0N1cWLF60+PXr00I4dOxQVFaXvv/9ea9as0TPPPGPNT05OVps2bVS5cmVt3LhR7733nsaNG6dPPvnE6rN+/Xp1795d/fr10+bNm9WlSxd16dJF27dvv307DwAAAAD5yGaMMY4uQpJsNpsWLVqkLl26SLp81q5ChQp68cUX9dJLL0mSkpKSVK5cOUVGRqpbt27atWuX/P39tWHDBjVq1EiStHz5crVv315//fWXKlSooOnTp+vll19WfHy83NzcJEmjRo3S4sWLtXv3bknSk08+qZSUFH3//fdWPU2bNlVgYKBmzJiRq/qTk5Pl7e2tpKQkeXl55ddhuWk2m6MrcKyC8asGAADAP5GjskGBvefuwIEDio+PV0hIiNXm7e2toKAgxcbGSpJiY2Pl4+NjBTtJCgkJkYuLi3755RerT4sWLaxgJ0mhoaHas2ePzpw5Y/W5cjtZfbK2k5PU1FQlJyfbTQAAAADgKAU23MXHx0uSypUrZ9derlw5a158fLzKli1rN79QoUIqWbKkXZ+c1nHlNq7VJ2t+TsaPHy9vb29r8vPzy+suAgAAAEC+KbDhrqAbPXq0kpKSrOnw4cOOLgkAAADAP1iBDXe+vr6SpISEBLv2hIQEa56vr6+OHz9uN//SpUs6ffq0XZ+c1nHlNq7VJ2t+Ttzd3eXl5WU3AQAAAICjFNhwV7VqVfn6+io6OtpqS05O1i+//KLg4GBJUnBwsBITE7Vx40arz8qVK5WZmamgoCCrz5o1a5Senm71iYqKUs2aNVWiRAmrz5XbyeqTtR0AAAAAKOgcGu7OnTunuLg4xcXFSbr8EJW4uDgdOnRINptNQ4cO1Ztvvqlvv/1W27ZtU69evVShQgXriZq1a9dW27ZtNWDAAP3666/66aefNHjwYHXr1k0VKlSQJD311FNyc3NTv379tGPHDs2dO1cffvihIiIirDpeeOEFLV++XO+//752796tcePG6bffftPgwYPv9CEBAAAAgJvi0FchrF69Wg888EC29rCwMEVGRsoYo7Fjx+qTTz5RYmKi7r//fk2bNk333HOP1ff06dMaPHiwvvvuO7m4uKhr166aPHmyPDw8rD5bt25VeHi4NmzYoNKlS2vIkCEaOXKk3Tbnz5+vV155RQcPHtTdd9+tCRMmqH379rneF16FULDwKgQAAAA4iqOyQYF5z93fHeGuYOFXDQAAAEfhPXcAAAAAgJtGuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdQoMPduHHjZLPZ7KZatWpZ8y9evKjw8HCVKlVKHh4e6tq1qxISEuzWcejQIXXo0EHFihVT2bJlNXz4cF26dMmuz+rVq9WgQQO5u7urRo0aioyMvBO7BwAAAAD5pkCHO0mqU6eOjh07Zk3r1q2z5g0bNkzfffed5s+fr5iYGB09elSPPvqoNT8jI0MdOnRQWlqa1q9fry+++EKRkZEaM2aM1efAgQPq0KGDHnjgAcXFxWno0KHq37+/VqxYcUf3EwAAAABuhc0YYxxdxLWMGzdOixcvVlxcXLZ5SUlJKlOmjGbPnq3HHntMkrR7927Vrl1bsbGxatq0qZYtW6aOHTvq6NGjKleunCRpxowZGjlypE6cOCE3NzeNHDlSS5Ys0fbt2611d+vWTYmJiVq+fHmua01OTpa3t7eSkpLk5eV1azueD2w2R1fgWAX3Vw0AAABn56hsUODP3O3du1cVKlRQtWrV1KNHDx06dEiStHHjRqWnpyskJMTqW6tWLd11112KjY2VJMXGxiogIMAKdpIUGhqq5ORk7dixw+pz5Tqy+mSt41pSU1OVnJxsNwEAAACAoxTocBcUFKTIyEgtX75c06dP14EDB9S8eXOdPXtW8fHxcnNzk4+Pj90y5cqVU3x8vCQpPj7eLthlzc+ad70+ycnJunDhwjVrGz9+vLy9va3Jz8/vVncXAAAAAG5aIUcXcD3t2rWz/ne9evUUFBSkypUra968eSpatKgDK5NGjx6tiIgI63NycjIBDwAAAIDDFOgzd1fz8fHRPffco3379snX11dpaWlKTEy065OQkCBfX19Jkq+vb7anZ2Z9vlEfLy+v6wZId3d3eXl52U0AAAAA4Ch/q3B37tw57d+/X+XLl1fDhg1VuHBhRUdHW/P37NmjQ4cOKTg4WJIUHBysbdu26fjx41afqKgoeXl5yd/f3+pz5Tqy+mStAwAAAAD+Dgp0uHvppZcUExOjgwcPav369XrkkUfk6uqq7t27y9vbW/369VNERIRWrVqljRs3qk+fPgoODlbTpk0lSW3atJG/v7969uypLVu2aMWKFXrllVcUHh4ud3d3SdLAgQP1xx9/aMSIEdq9e7emTZumefPmadiwYY7cdQAAAADIkwJ9z91ff/2l7t2769SpUypTpozuv/9+/fzzzypTpowkaeLEiXJxcVHXrl2Vmpqq0NBQTZs2zVre1dVV33//vQYNGqTg4GAVL15cYWFhev31160+VatW1ZIlSzRs2DB9+OGHqlSpkj777DOFhobe8f0FAAAAgJtVoN9z93fCe+4KFn7VAAAAcBTecwcAAAAAuGkF+rJMAChIOCPu6ArgbBhTjq4AgLPhzB0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcLdVaZOnaoqVaqoSJEiCgoK0q+//urokgAAAADghgh3V5g7d64iIiI0duxYbdq0SfXr11doaKiOHz/u6NIAAAAA4LoId1f44IMPNGDAAPXp00f+/v6aMWOGihUrps8//9zRpQEAAADAdRVydAEFRVpamjZu3KjRo0dbbS4uLgoJCVFsbGy2/qmpqUpNTbU+JyUlSZKSk5Nvf7G4Ib4GIP8xroD8xZjC7eDt7egKHOv//pPc4bIygTHmjm6XcPd/Tp48qYyMDJUrV86uvVy5ctq9e3e2/uPHj9drr72Wrd3Pz++21Yjc+6f/wwbcDowrIH8xpoD8V9DG1dmzZ+V9B4si3N2k0aNHKyIiwvqcmZmp06dPq1SpUrLZbA6szPGSk5Pl5+enw4cPy8vLy9HlAE6BcQXkL8YUkP8YV/9jjNHZs2dVoUKFO7pdwt3/KV26tFxdXZWQkGDXnpCQIF9f32z93d3d5e7ubtfm4+NzO0v82/Hy8vrHD2wgvzGugPzFmALyH+Pqsjt5xi4LD1T5P25ubmrYsKGio6OttszMTEVHRys4ONiBlQEAAADAjXHm7goREREKCwtTo0aN1KRJE02aNEkpKSnq06ePo0sDAAAAgOsi3F3hySef1IkTJzRmzBjFx8crMDBQy5cvz/aQFVyfu7u7xo4dm+2yVQA3j3EF5C/GFJD/GFeOZzN3+vmcAAAAAIB8xz13AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3DlalShVNmjTJ0WX87Rw8eFA2m01xcXG3fVt8R38vfF83hzGF6+E7uzmMK1wL39fNYUzlgoEJCwszksyzzz6bbd5zzz1nJJmwsLBcrevAgQNGktm8eXOu+h8/ftykpKTkqm/Hjh1NaGhojvPWrFljJJktW7bkal3XsmrVKiPJnDlz5pbWc7Xz58+bEiVKmFKlSpmLFy/madmwsDDTuXNnu7ZLly6ZY8eOmfT09HyrcebMmcbb2ztbe16+o/wyZcoUU7lyZePu7m6aNGlifvnllzu6/VvFmPofxpR3tvY7PaZiYmJMx44dTfny5Y0ks2jRoju27fzEuPofxpV3tvY7Pa7efvtt06hRI+Ph4WHKlCljOnfubHbv3n3Htp8fGFP/w5jyztZ+p8fUtGnTTEBAgPH09DSenp6madOmZunSpXleD2fu/o+fn5/mzJmjCxcuWG0XL17U7Nmzddddd+X79tLS0iRJZcqUUbFixXK1TL9+/RQVFaW//vor27yZM2eqUaNGqlevXr7WebOMMbp06ZL1eeHChapTp45q1aqlxYsX3/L6XV1d5evrq0KFbv/bPPLyHeWHuXPnKiIiQmPHjtWmTZtUv359hYaG6vjx43eshvzAmMpfjKmbl5KSovr162vq1Kl3bJu3C+MqfzGubl5MTIzCw8P1888/KyoqSunp6WrTpo1SUlLuWA35gTGVvxhTN69SpUp65513tHHjRv3222968MEH1blzZ+3YsSNvK8rn0Pm3lPWXgbp165r/9//+n9U+a9YsU69ePdO5c2frLzfLli0zzZo1M97e3qZkyZKmQ4cOZt++fdYykuymli1b2m3jzTffNOXLlzdVqlQxxhhTuXJlM3HiRGPM5b+aFC5c2KxZs8Za37vvvmvKlClj4uPjTXp6uilXrpx544037Oo/e/as8fDwMNOnTzfGGLN27Vpz//33myJFiphKlSqZIUOGmHPnzln9L168aEaMGGEqVapk3NzcTPXq1c1nn31m/dXpyilrvy9evGiGDBliypQpY9zd3U2zZs3Mr7/+aq0z6y8+S5cuNQ0aNDCFCxc2q1atsua3atXKzJgxw0yfPt089NBD2b6D7du3mw4dOhhPT0/j4eFh7r//frNv3z4zduzYbDWtWrXK7i9kGRkZpmLFimbatGl269y0aZOx2Wzm4MGDxhhj3n//fVO3bl1TrFgxU6lSJTNo0CBz9uxZu/qvnMaOHZvtOzLGmD///NM8/PDDpnjx4sbT09M8/vjjJj4+3po/duxYU79+ffPll1+aypUrGy8vL/Pkk0+a5OTkbPudkyZNmpjw8HDrc0ZGhqlQoYIZP358rpYvCBhTjKmCNKaupL/5mTvGFeOqII4rYy6f5ZBkYmJibmp5R2BMMaYK8pgyxpgSJUqYzz77LE/LEO7M/wbeBx98YFq3bm21t27d2kycONFucC9YsMAsXLjQ7N2712zevNl06tTJBAQEmIyMDGOMMb/++quRZH788Udz7Ngxc+rUKWsbHh4epmfPnmb79u1m+/btxpjsP5zhw4ebypUrm8TERLNp0ybj5uZmvvnmG7v51atXN5mZmVbb559/booWLWoSExPNvn37TPHixc3EiRPN77//bn766Sdz7733mt69e1v9n3jiCePn52e+/vprs3//fvPjjz+aOXPmmEuXLpmFCxcaSWbPnj3m2LFjJjEx0RhjzPPPP28qVKhgli5danbs2GHCwsJMiRIlrP3LGhz16tUzP/zwg9m3b581b9++fcbd3d2cPn3anDp1yhQpUsQacMYY89dff5mSJUuaRx991GzYsMHs2bPHfP7552b37t3m7Nmz5oknnjBt27Y1x44dM8eOHTOpqanZLn946aWXzP3332/3vb744ot2bRMnTjQrV640Bw4cMNHR0aZmzZpm0KBBxhhjUlNTzaRJk4yXl5e1nayBf+V3lJGRYQIDA839999vfvvtN/Pzzz+bhg0bWv+IG3N5cHt4eJhHH33UbNu2zaxZs8b4+vqaf/3rX9f8DWZJTU01rq6u2f7js1evXubhhx++4fIFBWOKMVVQxtTVnCHcMa4YVwVtXBljzN69e40ks23btpta3hEYU4ypgjqmLl26ZL766ivj5uZmduzYkadlCXfmf4P7+PHjxt3d3Rw8eNAcPHjQFClSxJw4ccJucF/txIkTdv+YXeua67CwMFOuXDmTmppq13714E5NTTWBgYHmiSeeMP7+/mbAgAF2/Xft2mX99SJL8+bNzdNPP22MMaZfv37mmWeesVtm7dq1xsXFxVy4cMHs2bPHSDJRUVE57k9O11yfO3fOFC5c2MyaNctqS0tLMxUqVDATJkywW27x4sXZ1vmvf/3LdOnSxfrcuXNn668ixhgzevRoU7VqVZOWlpZjTTldc331cd68ebOx2Wzmzz//NMYY6685WX/Nysn8+fNNqVKlrM/Xuub6yu/ohx9+MK6urubQoUPW/B07dhhJ1l+yxo4da4oVK2b3l5rhw4eboKCga9aS5ciRI0aSWb9+vV378OHDTZMmTW64fEHBmPofxpR3tn53ckxdzRnCHeOKcVXQxlVGRobp0KGDadasWZ6XdSTG1P8wpryz9XPEmNq6daspXry4cXV1Nd7e3mbJkiW5XjYL99xdoUyZMurQoYMiIyM1c+ZMdejQQaVLl7brs3fvXnXv3l3VqlWTl5eXqlSpIkk6dOjQDdcfEBAgNze36/Zxc3PTrFmztHDhQl28eFETJ060m1+rVi3dd999+vzzzyVJ+/bt09q1a9WvXz9J0pYtWxQZGSkPDw9rCg0NVWZmpg4cOKC4uDi5urqqZcuWuT0s2r9/v9LT09WsWTOrrXDhwmrSpIl27dpl17dRo0Z2nzMyMvTFF1/o6aefttqefvppRUZGKjMzU5IUFxen5s2bq3Dhwrmu6WqBgYGqXbu2Zs+eLenyvQDHjx/X448/bvX58ccf1bp1a1WsWFGenp7q2bOnTp06pfPnz+d6O7t27ZKfn5/8/PysNn9/f/n4+NgdiypVqsjT09P6XL58+b/dPXP5gTGVM8bU/zCm8o5xlTPG1f/c6XEVHh6u7du3a86cOXletiBgTOWMMfU/d2pM1axZU3Fxcfrll180aNAghYWFaefOnbleXuJVCNn07dtXkZGR+uKLL9S3b99s8zt16qTTp0/r008/1S+//KJffvlF0v9ukL2e4sWL56qG9evXS5JOnz6t06dPZ5vfr18/LVy4UGfPntXMmTNVvXp1a7CeO3dOzz77rOLi4qxpy5Yt2rt3r6pXr66iRYvmqoabdfU+rlixQkeOHNGTTz6pQoUKqVChQurWrZv+/PNPRUdHS1K+1dSjRw9rcM+ePVtt27ZVqVKlJF1+dG7Hjh1Vr149LVy4UBs3brQerpCb7y6vrv6HymazWf+YXU/p0qXl6uqqhIQEu/aEhAT5+vrma413CmPq1jCmLrvZMeWsGFe3hnF1WX6Mq8GDB+v777/XqlWrVKlSpfws745iTN0axtRltzqm3NzcVKNGDTVs2FDjx49X/fr19eGHH+apBsLdVdq2bau0tDSlp6crNDTUbt6pU6e0Z88evfLKK2rdurVq166tM2fO2PXJ+stMRkbGTW1///79GjZsmD799FMFBQUpLCws24/iiSeekIuLi2bPnq0vv/xSffv2lc1mkyQ1aNBAO3fuVI0aNbJNbm5uCggIUGZmpmJiYnLcfk71V69eXW5ubvrpp5+stvT0dG3YsEH+/v7X3Z///Oc/6tatm90/NnFxcerWrZv+85//SJLq1auntWvXKj09/Zo15eZ4PvXUU9q+fbs2btyoBQsWqEePHta8jRs3KjMzU++//76aNm2qe+65R0ePHs3zdmrXrq3Dhw/r8OHDVtvOnTuVmJh4w2ORG25ubmrYsKH1D58kZWZmKjo6WsHBwbe8fkdgTDGmrud2jylnxbhiXF3PnRhXxhgNHjxYixYt0sqVK1W1atV8Wa+jMKYYU9fjqP9flZmZqdTU1LwtlOcLOZ3Q1df0JiUlmaSkJOtz1jXXGRkZplSpUubpp582e/fuNdHR0aZx48Z293Ckp6ebokWLmjfffNPEx8dbN6TmdN2wMfbX8166dMk0bdrUdO3a1RhjzNGjR02pUqWs65qv1K9fP1OiRAnj6upqjhw5YrVv2bLFFC1a1ISHh5vNmzeb33//3SxevNju6Yu9e/c2fn5+ZtGiReaPP/4wq1atMnPnzjXGXL651WazmcjISHP8+HHrptIXXnjBVKhQwSxbtszuhtrTp08bY3K+Vvv48eOmcOHCZtmyZdnqX7p0qXF3dzenTp0yJ0+eNKVKlbJuqP3999/Nl19+ab0v56233jJ33XWX2b17tzlx4oRJS0u75rXtzZo1M/Xr1zeenp7m/PnzVntcXJyRZCZNmmT2799vvvzyS1OxYkW7mn/66SfrZugTJ05Y7za58jvKzMw0gYGBpnnz5mbjxo3ml19+yfGG2vr169vVNXHiRFO5cuVsxyEnc+bMMe7u7iYyMtLs3LnTPPPMM8bHx8fuiUwFHWOKMWVMwRlTZ8+eNZs3bzabN282kswHH3xgNm/ebN2j8XfBuGJcGVNwxtWgQYOMt7e3Wb16tfUgimPHjtntT0HHmGJMGVNwxtSoUaNMTEyMOXDggNm6dasZNWqUsdls5ocffsjV8lkId+baAy/LlTfURkVFmdq1axt3d3dTr149s3r16mw36H/66afGz8/PuLi4ZHsU7tWu/OG89tprpnz58ubkyZPW/IULFxo3NzcTFxdnt9z69euNJNO+ffts6/z111/NQw89ZDw8PEzx4sVNvXr1zFtvvWXNv3Dhghk2bJgpX768cXNzMzVq1DCff/65Nf/11183vr6+xmazWft94cIFM2TIEFO6dOnrPgr3ysH973//2/j4+OR4o2xqaqrx8fExH374oTHm8j9Kbdq0McWKFTOenp6mefPmZv/+/caYy/9IZO2PcngU7pWmTZtmJJlevXpl2+YHH3xgypcvb4oWLWpCQ0PNl19+ma3mgQMHmlKlSuXLo3CvlJfBbYwxH330kbnrrruMm5ubadKkifn5559zvWxBwJhiTGUpCGMqp0ddS7l/OXFBwbhiXGUpCOMqpzElycycOTNXyxcEjCnGVJaCMKb69u1rKleubNzc3EyZMmVM69at8xzsjDHGZowxeTvXBwAAAAAoaLjnDgAAAACcAOEOuIMOHTpk95jiq6fcPFIZwP8wpoD8x7gC8tedHFNclgncQZcuXdLBgwevOb9KlSoqVKjQnSsI+JtjTAH5j3EF5K87OaYIdwAAAADgBLgsEwAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAIBb1KpVKw0dOtTRZQAA/uEIdwAAh+ndu7dsNpveeecdu/bFixfLZrPlaV1VqlTRpEmT8rG62+fgwYOy2WyKi4tzdCkAACdCuAMAOFSRIkX07rvv6syZM44uJc/S0tIcXUK+Sk9Pd3QJAIBbQLgDADhUSEiIfH19NX78+Ov2W7dunZo3b66iRYvKz89Pzz//vFJSUiRdvizyzz//1LBhw2Sz2WSz2WSMUZkyZbRgwQJrHYGBgSpfvrzdOt3d3XX+/HlJ0qFDh9S5c2d5eHjIy8tLTzzxhBISEqz+48aNU2BgoD777DNVrVpVRYoUybHWJUuWyNvbW7NmzbqpY7J//3517txZ5cqVk4eHhxo3bqwff/zRmv/666+rbt262ZYLDAzUq6++an3+7LPPVLt2bRUpUkS1atXStGnTrHlZZw/nzp2rli1bqkiRIpo1a5b+/PNPderUSSVKlFDx4sVVp04dLV269Kb2AwBwZxHuAAAO5erqqrffflsfffSR/vrrrxz77N+/X23btlXXrl21detWzZ07V+vWrdPgwYMlSV9//bUqVaqk119/XceOHdOxY8dks9nUokULrV69WpJ05swZ7dq1SxcuXNDu3bslSTExMWrcuLGKFSumzMxMde7cWadPn1ZMTIyioqL0xx9/6Mknn7SrZd++fVq4cKG+/vrrHC+rnD17trp3765Zs2apR48eN3VMzp07p/bt2ys6OlqbN29W27Zt1alTJx06dEiS1LdvX+3atUsbNmywltm8ebO2bt2qPn36SJJmzZqlMWPG6K233tKuXbv09ttv69VXX9UXX3xht61Ro0bphRde0K5duxQaGqrw8HClpqZqzZo12rZtm9599115eHjc1H4AAO6sQo4uAACARx55RIGBgRo7dqz+85//ZJs/fvx49ejRw3poyd13363JkyerZcuWmj59ukqWLClXV1d5enrK19fXWq5Vq1b6+OOPJUlr1qzRvffeK19fX61evVq1atXS6tWr1bJlS0lSdHS0tm3bpgMHDsjPz0+S9OWXX6pOnTrasGGDGjduLOnypZhffvmlypQpk63OqVOn6uWXX9Z3331nrfdm1K9fX/Xr17c+v/HGG1q0aJG+/fZbDR48WJUqVVJoaKhmzpxp1TVz5ky1bNlS1apVkySNHTtW77//vh599FFJUtWqVbVz5059/PHHCgsLs9Y9dOhQq490+exl165dFRAQIEnW+gAABR9n7gAABcK7776rL774Qrt27co2b8uWLYqMjJSHh4c1hYaGKjMzUwcOHLjmOlu2bKmdO3fqxIkTiomJUatWrdSqVSutXr1a6enpWr9+vVq1aiVJ2rVrl/z8/KxgJ0n+/v7y8fGxq6ly5co5BrsFCxZo2LBhioqKuqVgJ10+c/fSSy+pdu3a8vHxkYeHh3bt2mWduZOkAQMG6KuvvtLFixeVlpam2bNnq2/fvpKklJQU7d+/X/369bM7Zm+++ab2799vt61GjRrZfX7++ef15ptvqlmzZho7dqy2bt16S/sCALhzCHcAgAKhRYsWCg0N1ejRo7PNO3funJ599lnFxcVZ05YtW7R3715Vr179musMCAhQyZIlFRMTYxfuYmJitGHDBqWnp+u+++7LU53FixfPsf3ee+9VmTJl9Pnnn8sYk6d1Xu2ll17SokWL9Pbbb2vt2rWKi4tTQECA3QNcOnXqJHd3dy1atEjfffed0tPT9dhjj0m6fLwk6dNPP7U7Ztu3b9fPP/983f3p37+//vjjD/Xs2VPbtm1To0aN9NFHH93S/gAA7gwuywQAFBjvvPOOAgMDVbNmTbv2Bg0aaOfOnapRo8Y1l3Vzc1NGRoZdm81mU/PmzfXNN99ox44duv/++1WsWDGlpqbq448/VqNGjaxwU7t2bR0+fFiHDx+2zt7t3LlTiYmJ8vf3v2Ht1atX1/vvv69WrVrJ1dVVU6ZMyevuW3766Sf17t1bjzzyiKTLYe3gwYN2fQoVKqSwsDDNnDlTbm5u6tatm4oWLSpJKleunCpUqKA//vjjpu778/Pz08CBAzVw4ECNHj1an376qYYMGXLT+wMAuDMIdwCAAiMgIEA9evTQ5MmT7dpHjhyppk2bavDgwerfv7+KFy+unTt3KioqygpRVapU0Zo1a9StWze5u7urdOnSki7fd/fiiy+qUaNG1oNBWrRooVmzZmn48OHWNkJCQqztT5o0SZcuXdJzzz2nli1bZrt08VruuecerVq1Sq1atVKhQoVu+N69PXv2ZGurU6eO7r77bn399dfq1KmTbDabXn31VWVmZmbr279/f9WuXVvS5UB4pddee03PP/+8vL291bZtW6Wmpuq3337TmTNnFBERcc2ahg4dqnbt2umee+7RmTNntGrVKmsbAICCjcsyAQAFyuuvv54tyNSrV08xMTH6/fff1bx5c917770aM2aMKlSoYLfcwYMHVb16dbt74lq2bKmMjAzr3jrpcuC7us1ms+mbb75RiRIl1KJFC4WEhKhatWqaO3dunuqvWbOmVq5cqa+++kovvvjidft269ZN9957r92UkJCgDz74QCVKlNB9992nTp06KTQ0VA0aNMi2/N1336377rtPtWrVUlBQkN28/v3767PPPtPMmTMVEBCgli1bKjIyUlWrVr1uTRkZGQoPD1ft2rXVtm1b3XPPPXavUAAAFFw2c6s3BgAAAIcwxujuu+/Wc889d92zcQCAfwYuywQA4G/oxIkTmjNnjuLj46132wEA/tkIdwAA/A2VLVtWpUuX1ieffKISJUo4uhwAQAFAuAMA4G+IuyoAAFfjgSoAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBP4/da6Cud3tvjMAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "\n", + "layers = list(cycles_dict.keys())\n", + "cycles = list(cycles_dict.values())\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(layers, cycles, color ='blue', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"Clock Cycles\")\n", + "plt.title(\"Estimated clock cycles for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "res_dict = []\n", + "res_dict = res_estimation(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABeo0lEQVR4nO3deXxN1/7/8feRSEJGQRJDzFVCIkoRU7SGGKuXVqkSY1WjLb60dDC2TWe0F61W0Vuq5rYuVWOoeSw1U1NLUENiqJBk/f7wy76OBAlhG17Px+M8mrPW2nt/9jlnpd7Zw3EYY4wAAAAAALbJYXcBAAAAAPCgI5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAG4KXXq1FGdOnXsLiNb7d+/Xw6HQ+PHj7e7FFvxOmTe+PHj5XA4tH///huO/fnnnxUeHi4PDw85HA6dPn36ttd3pzkcDvXo0cPuMu5qaZ+ZdevWZXnZJUuWyOFwaMmSJdlfGADbEcyA+0za//Sv9Vi1alWm17Vt2zYNGjQoU//ovJNGjRpla2hI+8fRtGnTrjnmev9AnTZtmvWPq7R1ZeaBe9eJEyfUqlUr5cqVSyNHjtR//vMfeXp62l3WfW/FihUaNGjQfRmCAdx/XO0uAMDtMWTIEBUvXjxde6lSpTK9jm3btmnw4MGqU6eOihUr5tT3yy+/3GqJN23UqFHKly+fOnToYFsN2aVs2bL6z3/+49TWv39/eXl56Y033rCpKmS3tWvX6syZMxo6dKjq1atndzkPjBUrVmjw4MHq0KGD/Pz87C4HAK6LYAbcpxo1aqTKlSvftvW7ubndtnU/SAIDA/Xcc885tb333nvKly9funbcu44dOyZJ2RoOzp07x1G3e8iFCxfu+9+bfCaBW8OpjMADbPLkyapUqZK8vb3l4+Oj0NBQjRgxQtLlUyKffvppSdJjjz1mnU6Xdm3D1deYpZ2SN2XKFA0ePFiFChWSt7e3nnrqKSUkJCgpKUk9e/ZUQECAvLy81LFjRyUlJTnVM27cOD3++OMKCAiQu7u7QkJCNHr0aKcxxYoV09atWxUXF2fVdGUdp0+fVs+ePRUcHCx3d3eVKlVK77//vlJTU53Wc/r0aXXo0EG+vr7y8/NTdHT0PXm609GjR+Xq6qrBgwen69u5c6ccDof+/e9/S5JOnjypPn36KDQ0VF5eXvLx8VGjRo3022+/3XA717qmsEOHDumOpqampmr48OEqV66cPDw8FBgYqG7duunUqVNO49atW6eoqCjly5dPuXLlUvHixdWpU6cb1uJwODRo0KB07cWKFXM6inrp0iUNHjxYDz30kDw8PJQ3b17VrFlT8+fPd1pux44deuqpp+Tv7y8PDw9VrlxZP/74Y7r1b926VY8//rhy5cqlwoUL6+233073ucpInTp1FB0dLUl69NFH5XA4nOqcOnWqKlWqpFy5clmB/K+//nJaR4cOHeTl5aW9e/eqcePG8vb2Vtu2ba+73b/++kudOnVSYGCg3N3dVa5cOX399ddOYy5evKgBAwaoUqVK8vX1laenp2rVqqXFixenW19qaqpGjBih0NBQeXh4KH/+/GrYsGGG10rNmjVL5cuXt7b7888/3/B1uvJ3yDvvvKPChQvLw8NDdevW1Z49e9KNX716tRo2bChfX1/lzp1bkZGRWr58udU/aNAg9e3bV5JUvHhx6/fF/v371aJFCz3yyCNO62vWrJkcDofTe7969Wo5HA7NnTvXavvjjz/09NNPy9/fX7lz51a1atX03//+N8N9mTx5st58800VKlRIuXPnVmJiYob7furUKVWpUkWFCxfWzp07b/haXWnZsmV6+umnVaRIEbm7uys4OFi9evXSP//8Y40ZN26cHA6HNm7cmG75d999Vy4uLk6fuRu9ttLl19fhcGjbtm169tlnlSdPHtWsWTNLtQNwxhEz4D6VkJCgv//+26nN4XAob968kqT58+erTZs2qlu3rt5//31J0vbt27V8+XK98sorql27tl5++WV9+umnev3111W2bFlJsv57LbGxscqVK5f69eunPXv26LPPPlPOnDmVI0cOnTp1SoMGDdKqVas0fvx4FS9eXAMGDLCWHT16tMqVK6cnnnhCrq6u+umnn/Tiiy8qNTVVMTExkqThw4frpZdecjrVLzAwUJJ0/vx5RUZG6q+//lK3bt1UpEgRrVixQv3799eRI0c0fPhwSZIxRs2bN9evv/6qF154QWXLltXMmTOtfzzfSwIDAxUZGakpU6Zo4MCBTn3ff/+9XFxcrID9xx9/aNasWXr66adVvHhxHT16VF988YUiIyO1bds2FSxYMFtq6tatm8aPH6+OHTvq5Zdf1r59+/Tvf/9bGzdu1PLly5UzZ04dO3ZMDRo0UP78+dWvXz/5+flp//79mjFjRrbUIF3+h2NsbKy6dOmiKlWqKDExUevWrdOGDRtUv359SZfDVo0aNVSoUCH169dPnp6emjJlip588klNnz5d//rXvyRJ8fHxeuyxx5ScnGyNGzNmjHLlynXDOt544w09/PDDGjNmjHWKccmSJSXJep0effRRxcbG6ujRoxoxYoSWL1+ujRs3Oh1hS05OVlRUlGrWrKmPPvpIuXPnvuY2jx49qmrVqlnXOubPn19z585V586dlZiYqJ49e0qSEhMT9dVXX6lNmzbq2rWrzpw5o7FjxyoqKkpr1qxReHi4tc7OnTtr/PjxatSokbp06aLk5GQtW7ZMq1atcjo6/+uvv2rGjBl68cUX5e3trU8//VQtW7bUwYMHrd8/1/Pee+8pR44c6tOnjxISEvTBBx+obdu2Wr16tTVm0aJFatSokSpVqqSBAwcqR44c1h92li1bpipVqqhFixbatWuXvvvuOw0bNkz58uWTJOXPn1+1atXSDz/8oMTERPn4+MgYo+XLlytHjhxatmyZnnjiCUmXQ0+OHDlUo0YN63WtXr26zp8/r5dffll58+bVhAkT9MQTT2jatGnW5yXN0KFD5ebmpj59+igpKSnDI2Z///236tevr5MnTyouLs76bGTW1KlTdf78eXXv3l158+bVmjVr9Nlnn+nPP//U1KlTJUlPPfWUYmJiNHHiRFWsWNFp+YkTJ6pOnToqVKhQpl/bKz399NN66KGH9O6778oYk6XaAVzFALivjBs3zkjK8OHu7m6Ne+WVV4yPj49JTk6+5rqmTp1qJJnFixen64uMjDSRkZHW88WLFxtJpnz58ubixYtWe5s2bYzD4TCNGjVyWj4iIsIULVrUqe38+fPpthMVFWVKlCjh1FauXDmnbacZOnSo8fT0NLt27XJq79evn3FxcTEHDx40xhgza9YsI8l88MEH1pjk5GRTq1YtI8mMGzcu3bqvlLavU6dOveYYSSYmJibDvuu9rtfbv2v54osvjCSzZcsWp/aQkBDz+OOPW88vXLhgUlJSnMbs27fPuLu7myFDhji1Xf06XP1+p4mOjnZ6H5ctW2YkmYkTJzqN+/nnn53aZ86caSSZtWvXZno/00gyAwcOTNdetGhREx0dbT2vUKGCadKkyXXXVbduXRMaGmouXLhgtaWmpprq1aubhx56yGrr2bOnkWRWr15ttR07dsz4+voaSWbfvn3X3U7avLxyfy9evGgCAgJM+fLlzT///GO1z54920gyAwYMsNqio6ONJNOvX7/rbidN586dTYECBczff//t1N66dWvj6+trzbXk5GSTlJTkNObUqVMmMDDQdOrUyWpbtGiRkWRefvnldNtKTU21fpZk3NzczJ49e6y23377zUgyn3322XVrTptXZcuWdappxIgRTp/v1NRU89BDD5moqCinbZ8/f94UL17c1K9f32r78MMPM3x/1q5daySZOXPmGGOM2bx5s5Fknn76aVO1alVr3BNPPGEqVqxoPU/7HCxbtsxqO3PmjClevLgpVqyYNb/S9qVEiRLpfq9d+Vk4cuSIKVeunClRooTZv3//dV+fK9d75e+OjH5vxsbGGofDYQ4cOGC1tWnTxhQsWNDpd8CGDRuc5npWXtuBAwcaSaZNmzY3rBtA5nAqI3CfGjlypObPn+/0uPJ0HD8/P507dy7daV23qn379sqZM6f1vGrVqjLGpDtFrWrVqjp06JCSk5OttiuPPqQd8YuMjNQff/yhhISEG2576tSpqlWrlvLkyaO///7betSrV08pKSlaunSpJGnOnDlydXVV9+7drWVdXFz00ksv3fR+26lFixZydXXV999/b7X9/vvv2rZtm5555hmrzd3dXTlyXP61n5KSohMnTsjLy0sPP/ywNmzYkC21TJ06Vb6+vqpfv77Te1CpUiV5eXlZp8ilHQmaPXu2Ll26lC3bvpqfn5+2bt2q3bt3Z9h/8uRJLVq0SK1atdKZM2esWk+cOKGoqCjt3r3bOr1rzpw5qlatmtPRgvz589/wdMLrWbdunY4dO6YXX3xRHh4eVnuTJk1UpkyZdKfHSXL6zF6LMUbTp09Xs2bNZIxxeh+ioqKUkJBgvd8uLi7WUZzU1FSdPHlSycnJqly5stNnYvr06XI4HOmOykpKd8fQevXqOR31CQsLk4+Pj/74448b1i5JHTt2dDqyVKtWLUmylt+0aZN2796tZ599VidOnLD27dy5c6pbt66WLl16w1NMK1asKC8vL+t3wrJly1S4cGG1b99eGzZs0Pnz52WM0a+//mptX7r8OahSpYrTKXteXl56/vnntX//fm3bts1pO9HR0dc8qvrnn38qMjJSly5d0tKlS1W0aNFMvT5Xu3L9586d099//63q1avLGON06mL79u11+PBhp9NUJ06cqFy5cqlly5aSbu61feGFF26qbgDpcSojcJ+qUqXKdW/+8eKLL2rKlClq1KiRChUqpAYNGqhVq1Zq2LDhLW23SJEiTs99fX0lScHBwenaU1NTlZCQYJ3etHz5cg0cOFArV67U+fPnncYnJCRY67qW3bt3a/PmzcqfP3+G/Wk3YDhw4IAKFCggLy8vp/6HH374BnuXvbLrFvj58uVT3bp1NWXKFA0dOlTS5dMYXV1d1aJFC2tc2jVCo0aN0r59+5SSkmL1ZeYUs8zYvXu3EhISFBAQkGF/2nsQGRmpli1bavDgwRo2bJjq1KmjJ598Us8++6zc3d2zpZYhQ4aoefPmKl26tMqXL6+GDRuqXbt2CgsLkyTt2bNHxhi99dZbeuutt65Zb6FChXTgwAFVrVo1Xf+tfGYOHDhwzXWUKVNGv/76q1Obq6urChcufMP1Hj9+XKdPn9aYMWM0ZsyYDMekvQ+SNGHCBH388cfasWOHU0i+8q6ue/fuVcGCBeXv73/D7V/9O0CS8uTJk+4aw8wunydPHkmylk8L2tc79TghIcFaLiMuLi6KiIjQsmXLJF0OZrVq1VLNmjWVkpKiVatWKTAwUCdPnnQKZtf6HKSd4n3gwAGVL1/eas/ozrhp2rVrJ1dXV23fvl1BQUHXHHcjBw8e1IABA/Tjjz+me42v/INW/fr1VaBAAU2cOFF169ZVamqqvvvuOzVv3lze3t6Sbu61vd4+AsgaghnwgAoICNCmTZs0b948zZ07V3PnztW4cePUvn17TZgw4abX6+LikqV28/+vSdi7d6/q1q2rMmXK6JNPPlFwcLDc3Nw0Z84cDRs2LFM3WUhNTVX9+vX16quvZthfunTpTO7FrXN3d3e6+P5KaaHzyqMkt6p169bq2LGjNm3apPDwcE2ZMkV169a1rquRLl/k/9Zbb6lTp04aOnSo/P39lSNHDvXs2fOGr6/D4cjw+pErw510+T0ICAjQxIkTM1xPWmhO+x64VatW6aefftK8efPUqVMnffzxx1q1alW60JwZV9dSu3Zt7d27Vz/88IN++eUXffXVVxo2bJg+//xzdenSxdrnPn36KCoqKsN1ZuXrJW63K494Xk/afj333HPX/Ad2Wjj99ttv1aFDBz355JPq27evAgIC5OLiotjYWO3du/em6rzRXL/V5dP278MPP3S6Bu5Kmfn81KxZU++8844uXLigZcuW6Y033pCfn5/Kly+vZcuWWdeuXhnMsup61yC2aNFC33zzjUaMGKHY2NibWn9KSop1fdprr72mMmXKyNPTU3/99Zc6dOjgNK9dXFz07LPP6ssvv9SoUaO0fPlyHT582Onurzfz2mbmOksAmUMwAx5gbm5uatasmZo1a6bU1FS9+OKL+uKLL/TWW2+pVKlSd/RLjX/66SclJSXpxx9/dPqLeUZ3h7tWXSVLltTZs2dv+D1RRYsW1cKFC3X27Fmnf2Rk9W5oN9rGtdaX1n6zpy5l5Mknn1S3bt2s0xl37dql/v37O42ZNm2aHnvsMY0dO9ap/fTp004BLiN58uTJ8FS0tKM+aUqWLKkFCxaoRo0amfoHW7Vq1VStWjW98847mjRpktq2bavJkyerS5cu163l6jtoXrx4UUeOHEk31t/fXx07dlTHjh119uxZ1a5dW4MGDVKXLl1UokQJSVLOnDkz9ZnJ6JTIW/nMpL3/O3fu1OOPP55uvTf7+cifP7+8vb2VkpJyw/2aNm2aSpQooRkzZjjNq6tPWSxZsqTmzZunkydPZuqo2e2Udpqkj4/PDffver/DatWqpYsXL+q7777TX3/9ZQWw2rVrW8GsdOnSVkCTrj2vd+zYYfVn1ksvvaRSpUppwIAB8vX1Vb9+/TK9bJotW7Zo165dmjBhgtq3b2+1X+sU9fbt2+vjjz/WTz/9pLlz5yp//vxOf5TIymsLIPtxjRnwgDpx4oTT8xw5clh/RU+7jX3a99HcidvIp/2V/Mq/qickJGjcuHHpxnp6emZYU6tWrbRy5UrNmzcvXd/p06et69kaN26s5ORkp1vxp6Sk6LPPPrvV3bA0btxYq1at0vr169PVMXHiRIWHh9/S6UtX8/PzU1RUlKZMmaLJkyfLzc1NTz75pNMYFxeXdEctpk6dmu7W7BkpWbKkduzYoePHj1ttv/32W7pbaLdq1UopKSnWKZVXSk5Ott63U6dOpasl7S/0V3+NQka1pF0blGbMmDHpjphd/Rn38vJSqVKlrPUHBASoTp06+uKLLzIMdVfua9r7uWbNGqf+ax0ZzIzKlSsrICBAn3/+udM+z507V9u3b1eTJk1uar0uLi5q2bKlpk+frt9//z1d/5X7ldG8W716tVauXOm0TMuWLWWMyfBrGTJ7JCy7VKpUSSVLltRHH32ks2fPpuu/cv+u9zusatWqypkzp95//335+/urXLlyki4HtlWrVikuLi7d0bLGjRtrzZo1Tq/PuXPnNGbMGBUrVkwhISFZ2pe33npLffr0Uf/+/dN9NUhmZPT+GWOsrz25WlhYmMLCwvTVV19p+vTpat26tVxd//c3+qy8tgCyH0fMgPvU3Llzrb/iXql69eoqUaKEunTpopMnT+rxxx9X4cKFdeDAAX322WcKDw+3rpcIDw+Xi4uL3n//fSUkJMjd3d36nrHs1qBBA+sIXrdu3XT27Fl9+eWXCggISPeP5kqVKmn06NF6++23VapUKQUEBOjxxx9X37599eOPP6pp06bq0KGDKlWqpHPnzmnLli2aNm2a9u/fr3z58qlZs2aqUaOG+vXrp/379yskJEQzZszI1A1GrjR9+vQMX+Po6Gj169dPU6dOVe3atdWtWzeVKVNGhw8f1vjx43XkyJEMA+eteuaZZ/Tcc89p1KhRioqKSvdlxk2bNtWQIUPUsWNHVa9eXVu2bNHEiROtI0fX06lTJ33yySeKiopS586ddezYMX3++ecqV66c03czRUZGqlu3boqNjdWmTZvUoEED5cyZU7t379bUqVM1YsQIPfXUU5owYYJGjRqlf/3rXypZsqTOnDmjL7/8Uj4+PmrcuPF1a+nSpYteeOEFtWzZUvXr19dvv/2mefPmpTvqFxISojp16qhSpUry9/fXunXrNG3aNPXo0cMaM3LkSNWsWVOhoaHq2rWrSpQooaNHj2rlypX6888/re94e/XVV/Wf//xHDRs21CuvvGLdLr9o0aLavHnzDV+/jKSFgo4dOyoyMlJt2rSxbpdfrFgx9erV66bWK12+5fzixYtVtWpVde3aVSEhITp58qQ2bNigBQsW6OTJk5IufyZmzJihf/3rX2rSpIn27dunzz//XCEhIU7/MH/sscfUrl07ffrpp9q9e7caNmyo1NRULVu2TI899pjTa3q75ciRQ1999ZUaNWqkcuXKqWPHjipUqJD++usvLV68WD4+Pvrpp58kXf5dIV3+yoLWrVsrZ86catasmTw9PZU7d25VqlRJq1atsr7DTLp8xOzcuXM6d+5cumDWr18/fffdd2rUqJFefvll+fv7a8KECdq3b5+mT5+eqVNNr/bhhx8qISFBMTEx8vb2ztIXy5cpU0YlS5ZUnz599Ndff8nHx0fTp0+/7vV87du3V58+fSQp3bay8toCuA3u9G0gAdxe17tdvq64LfK0adNMgwYNTEBAgHFzczNFihQx3bp1M0eOHHFa35dffmlKlChhXFxcnG7TfK3b5V99C/mMbhNuzP9utXz8+HGr7ccffzRhYWHGw8PDFCtWzLz//vvm66+/Tne76/j4eNOkSRPj7e1tJDnVcebMGdO/f39TqlQp4+bmZvLly2eqV69uPvroI6fb+J84ccK0a9fO+Pj4GF9fX9OuXTuzcePGLN0u/1qPtFtp//nnn6ZLly6mUKFCxtXV1fj7+5umTZuaVatWXXf9Wb1dfprExESTK1cuI8l8++236fovXLhg/u///s8UKFDA5MqVy9SoUcOsXLky3XuZ0e3yjTHm22+/NSVKlDBubm4mPDzczJs3L93t8tOMGTPGVKpUyeTKlct4e3ub0NBQ8+qrr5rDhw8bYy7fprtNmzamSJEixt3d3QQEBJimTZuadevW3XA/U1JSzGuvvWby5ctncufObaKiosyePXvS3S7/7bffNlWqVDF+fn4mV65cpkyZMuadd95x+hwYY8zevXtN+/btTVBQkMmZM6cpVKiQadq0qZk2bZrTuM2bN5vIyEjj4eFhChUqZIYOHWrGjh1707fLT/P999+bihUrGnd3d+Pv72/atm1r/vzzT6cx0dHRxtPT84avzZWOHj1qYmJiTHBwsMmZM6cJCgoydevWNWPGjLHGpKammnfffdcULVrUuLu7m4oVK5rZs2dn+L4mJyebDz/80JQpU8a4ubmZ/Pnzm0aNGpn169dbY3SNr4m4+r3JyLV+h1zr87hx40bTokULkzdvXuPu7m6KFi1qWrVqZRYuXOg0bujQoaZQoUImR44c6d6rvn37Gknm/fffd1qmVKlSRpLZu3dvujr37t1rnnrqKePn52c8PDxMlSpVzOzZszO1L8Zk/FlISUkxbdq0Ma6urmbWrFk3fI2uvF3+tm3bTL169YyXl5fJly+f6dq1q/UVBRn9Ljty5IhxcXExpUuXvuZ2MvPaZvQ7HMCtcRjDtwECAAA8CP7++28VKFBAAwYMuObdSAHYg2vMAAAAHhDjx49XSkqK2rVrZ3cpAK7CNWYAAAD3uUWLFmnbtm1655139OSTT6pYsWJ2lwTgKpzKCAAAcJ+rU6eOVqxYoRo1aujbb79VoUKF7C4JwFUIZgAAAABgM64xAwAAAACbEcwAAAAAwGbc/ENSamqqDh8+LG9vb+sLJgEAAAA8eIwxOnPmjAoWLHhTXxx/swhmkg4fPqzg4GC7ywAAAABwlzh06JAKFy58x7ZHMJPk7e0t6fKL7+PjY3M1AAAAAOySmJio4OBgKyPcKQQzyTp90cfHh2AGAAAA4I5f4sTNPwAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbOZqdwFIzzHYYXcJtjIDjd0lAAAAAHcUR8wAAAAAwGZ3TTB777335HA41LNnT6vtwoULiomJUd68eeXl5aWWLVvq6NGjTssdPHhQTZo0Ue7cuRUQEKC+ffsqOTn5DlcPAAAAADfvrghma9eu1RdffKGwsDCn9l69eumnn37S1KlTFRcXp8OHD6tFixZWf0pKipo0aaKLFy9qxYoVmjBhgsaPH68BAwbc6V0AAAAAgJtmezA7e/as2rZtqy+//FJ58uSx2hMSEjR27Fh98sknevzxx1WpUiWNGzdOK1as0KpVqyRJv/zyi7Zt26Zvv/1W4eHhatSokYYOHaqRI0fq4sWL19xmUlKSEhMTnR4AAAAAYBfbg1lMTIyaNGmievXqObWvX79ely5dcmovU6aMihQpopUrV0qSVq5cqdDQUAUGBlpjoqKilJiYqK1bt15zm7GxsfL19bUewcHB2bxXAAAAAJB5tgazyZMna8OGDYqNjU3XFx8fLzc3N/n5+Tm1BwYGKj4+3hpzZShL60/ru5b+/fsrISHBehw6dOgW9wQAAAAAbp5tt8s/dOiQXnnlFc2fP18eHh53dNvu7u5yd3e/o9sEAAAAgGux7YjZ+vXrdezYMT3yyCNydXWVq6ur4uLi9Omnn8rV1VWBgYG6ePGiTp8+7bTc0aNHFRQUJEkKCgpKd5fGtOdpYwAAAADgbmdbMKtbt662bNmiTZs2WY/KlSurbdu21s85c+bUwoULrWV27typgwcPKiIiQpIUERGhLVu26NixY9aY+fPny8fHRyEhIXd8nwAAAADgZth2KqO3t7fKly/v1Obp6am8efNa7Z07d1bv3r3l7+8vHx8fvfTSS4qIiFC1atUkSQ0aNFBISIjatWunDz74QPHx8XrzzTcVExPDqYoAAAAA7hm2BbPMGDZsmHLkyKGWLVsqKSlJUVFRGjVqlNXv4uKi2bNnq3v37oqIiJCnp6eio6M1ZMgQG6sGAAAAgKxxGGOM3UXYLTExUb6+vkpISJCPj4/d5cgx2GF3CbYyAx/4jyQAAABsYlc2sP17zAAAAADgQUcwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm93V32MGANmBr6DgKyiQ/ZhXzCsA2YsjZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANrM1mI0ePVphYWHy8fGRj4+PIiIiNHfuXKu/Tp06cjgcTo8XXnjBaR0HDx5UkyZNlDt3bgUEBKhv375KTk6+07sCAAAAADfN1c6NFy5cWO+9954eeughGWM0YcIENW/eXBs3blS5cuUkSV27dtWQIUOsZXLnzm39nJKSoiZNmigoKEgrVqzQkSNH1L59e+XMmVPvvvvuHd8fAAAAALgZtgazZs2aOT1/5513NHr0aK1atcoKZrlz51ZQUFCGy//yyy/atm2bFixYoMDAQIWHh2vo0KF67bXXNGjQILm5uWW4XFJSkpKSkqzniYmJ2bRHAAAAAJB1d801ZikpKZo8ebLOnTuniIgIq33ixInKly+fypcvr/79++v8+fNW38qVKxUaGqrAwECrLSoqSomJidq6des1txUbGytfX1/rERwcfHt2CgAAAAAywdYjZpK0ZcsWRURE6MKFC/Ly8tLMmTMVEhIiSXr22WdVtGhRFSxYUJs3b9Zrr72mnTt3asaMGZKk+Ph4p1AmyXoeHx9/zW32799fvXv3tp4nJiYSzgAAAADYxvZg9vDDD2vTpk1KSEjQtGnTFB0drbi4OIWEhOj555+3xoWGhqpAgQKqW7eu9u7dq5IlS970Nt3d3eXu7p4d5QMAAADALbP9VEY3NzeVKlVKlSpVUmxsrCpUqKARI0ZkOLZq1aqSpD179kiSgoKCdPToUacxac+vdV0aAAAAANxtbA9mV0tNTXW6MceVNm3aJEkqUKCAJCkiIkJbtmzRsWPHrDHz58+Xj4+PdTokAAAAANztbD2VsX///mrUqJGKFCmiM2fOaNKkSVqyZInmzZunvXv3atKkSWrcuLHy5s2rzZs3q1evXqpdu7bCwsIkSQ0aNFBISIjatWunDz74QPHx8XrzzTcVExPDqYoAAAAA7hm2BrNjx46pffv2OnLkiHx9fRUWFqZ58+apfv36OnTokBYsWKDhw4fr3LlzCg4OVsuWLfXmm29ay7u4uGj27Nnq3r27IiIi5OnpqejoaKfvPQMAAACAu52twWzs2LHX7AsODlZcXNwN11G0aFHNmTMnO8sCAAAA7ijHYIfdJdjKDDR2l2C7u+4aMwAAAAB40BDMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGa2BrPRo0crLCxMPj4+8vHxUUREhObOnWv1X7hwQTExMcqbN6+8vLzUsmVLHT161GkdBw8eVJMmTZQ7d24FBASob9++Sk5OvtO7AgAAAAA3zdZgVrhwYb333ntav3691q1bp8cff1zNmzfX1q1bJUm9evXSTz/9pKlTpyouLk6HDx9WixYtrOVTUlLUpEkTXbx4UStWrNCECRM0fvx4DRgwwK5dAgAAAIAscxhjjN1FXMnf318ffvihnnrqKeXPn1+TJk3SU089JUnasWOHypYtq5UrV6patWqaO3eumjZtqsOHDyswMFCS9Pnnn+u1117T8ePH5ebmlqltJiYmytfXVwkJCfLx8blt+5ZZjsEOu0uwlRl4V30kcR9gTjGnkP2YV8wrZC/m1N0zp+zKBnfNNWYpKSmaPHmyzp07p4iICK1fv16XLl1SvXr1rDFlypRRkSJFtHLlSknSypUrFRoaaoUySYqKilJiYqJ11C0jSUlJSkxMdHoAAAAAgF1sD2ZbtmyRl5eX3N3d9cILL2jmzJkKCQlRfHy83Nzc5Ofn5zQ+MDBQ8fHxkqT4+HinUJbWn9Z3LbGxsfL19bUewcHB2btTAAAAAJAFtgezhx9+WJs2bdLq1avVvXt3RUdHa9u2bbd1m/3791dCQoL1OHTo0G3dHgAAAABcj6vdBbi5ualUqVKSpEqVKmnt2rUaMWKEnnnmGV28eFGnT592Omp29OhRBQUFSZKCgoK0Zs0ap/Wl3bUxbUxG3N3d5e7uns17AgAAAAA3x/YjZldLTU1VUlKSKlWqpJw5c2rhwoVW386dO3Xw4EFFRERIkiIiIrRlyxYdO3bMGjN//nz5+PgoJCTkjtcOAAAAADfD1iNm/fv3V6NGjVSkSBGdOXNGkyZN0pIlSzRv3jz5+vqqc+fO6t27t/z9/eXj46OXXnpJERERqlatmiSpQYMGCgkJUbt27fTBBx8oPj5eb775pmJiYjgiBgAAAOCeYWswO3bsmNq3b68jR47I19dXYWFhmjdvnurXry9JGjZsmHLkyKGWLVsqKSlJUVFRGjVqlLW8i4uLZs+ere7duysiIkKenp6Kjo7WkCFD7NolAAAAAMgyW4PZ2LFjr9vv4eGhkSNHauTIkdccU7RoUc2ZMye7SwMAAACAO+auu8YMAAAAAB40BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm7nezEK7d+/W4sWLdezYMaWmpjr1DRgwIFsKAwAAAIAHRZaD2Zdffqnu3bsrX758CgoKksPhsPocDgfBDAAAAACyKMvB7O2339Y777yj11577XbUAwAAAAAPnCxfY3bq1Ck9/fTTt6MWAAAAAHggZTmYPf300/rll19uRy0AAAAA8EDK8qmMpUqV0ltvvaVVq1YpNDRUOXPmdOp/+eWXs604AAAAAHgQZDmYjRkzRl5eXoqLi1NcXJxTn8PhIJgBAAAAQBZlOZjt27fvdtQBAAAAAA+sW/qCaWOMjDHZVQsAAAAAPJBuKph98803Cg0NVa5cuZQrVy6FhYXpP//5T3bXBgAAAAAPhCwHs08++UTdu3dX48aNNWXKFE2ZMkUNGzbUCy+8oGHDhmVpXbGxsXr00Ufl7e2tgIAAPfnkk9q5c6fTmDp16sjhcDg9XnjhBacxBw8eVJMmTZQ7d24FBASob9++Sk5OzuquAQAAAIAtsnyN2WeffabRo0erffv2VtsTTzyhcuXKadCgQerVq1em1xUXF6eYmBg9+uijSk5O1uuvv64GDRpo27Zt8vT0tMZ17dpVQ4YMsZ7nzp3b+jklJUVNmjRRUFCQVqxYoSNHjqh9+/bKmTOn3n333azuHgAAAADccVkOZkeOHFH16tXTtVevXl1HjhzJ0rp+/vlnp+fjx49XQECA1q9fr9q1a1vtuXPnVlBQUIbr+OWXX7Rt2zYtWLBAgYGBCg8P19ChQ/Xaa69p0KBBcnNzy1JNAAAAAHCnZflUxlKlSmnKlCnp2r///ns99NBDt1RMQkKCJMnf39+pfeLEicqXL5/Kly+v/v376/z581bfypUrFRoaqsDAQKstKipKiYmJ2rp1a4bbSUpKUmJiotMDAAAAAOyS5SNmgwcP1jPPPKOlS5eqRo0akqTly5dr4cKFGQa2zEpNTVXPnj1Vo0YNlS9f3mp/9tlnVbRoURUsWFCbN2/Wa6+9pp07d2rGjBmSpPj4eKdQJsl6Hh8fn+G2YmNjNXjw4JuuFQAAAACyU5aDWcuWLbV69WoNGzZMs2bNkiSVLVtWa9asUcWKFW+6kJiYGP3+++/69ddfndqff/556+fQ0FAVKFBAdevW1d69e1WyZMmb2lb//v3Vu3dv63liYqKCg4NvrnAAAAAAuEVZDmaSVKlSJX377bfZVkSPHj00e/ZsLV26VIULF77u2KpVq0qS9uzZo5IlSyooKEhr1qxxGnP06FFJuuZ1ae7u7nJ3d8+GygEAAADg1mXqGrMrr8G6+tqsW7lWyxijHj16aObMmVq0aJGKFy9+w2U2bdokSSpQoIAkKSIiQlu2bNGxY8esMfPnz5ePj49CQkKyVA8AAAAA2CFTR8zy5MmjI0eOKCAgQH5+fnI4HOnGGGPkcDiUkpKS6Y3HxMRo0qRJ+uGHH+Tt7W1dE+br66tcuXJp7969mjRpkho3bqy8efNq8+bN6tWrl2rXrq2wsDBJUoMGDRQSEqJ27drpgw8+UHx8vN58803FxMRwVAwAAADAPSFTwWzRokXWnRIXL16cbRsfPXq0pMtfIn2lcePGqUOHDnJzc9OCBQs0fPhwnTt3TsHBwWrZsqXefPNNa6yLi4tmz56t7t27KyIiQp6enoqOjnb63jMAAAAAuJtlKphFRkZaPxcvXlzBwcHpjpoZY3To0KEsbdwYc93+4OBgxcXF3XA9RYsW1Zw5c7K0bQAAAAC4W2T5e8yKFy+u48ePp2s/efJkpq4RAwAAAAA4y3IwS7uW7Gpnz56Vh4dHthQFAAAAAA+STN8uP+17vxwOh9566y3lzp3b6ktJSdHq1asVHh6e7QUCAAAAwP0u08Fs48aNki4fMduyZYvc3NysPjc3N1WoUEF9+vTJ/goBAAAA4D6X6WCWdjfGjh07asSIEfLx8bltRQEAAADAgyTTwSzNuHHjbkcdAAAAAPDAynIwk6R169ZpypQpOnjwoC5evOjUN2PGjGwpDAAAAAAeFFm+K+PkyZNVvXp1bd++XTNnztSlS5e0detWLVq0SL6+vrejRgAAAAC4r2U5mL377rsaNmyYfvrpJ7m5uWnEiBHasWOHWrVqpSJFityOGgEAAADgvpblYLZ37141adJE0uW7MZ47d04Oh0O9evXSmDFjsr1AAAAAALjfZTmY5cmTR2fOnJEkFSpUSL///rsk6fTp0zp//nz2VgcAAAAAD4As3/yjdu3amj9/vkJDQ/X000/rlVde0aJFizR//nzVrVv3dtQIAAAAAPe1LAezf//737pw4YIk6Y033lDOnDm1YsUKtWzZUm+++Wa2FwgAAAAA97ssBzN/f3/r5xw5cqhfv37ZWhAAAAAAPGiyfI3Zhg0btGXLFuv5Dz/8oCeffFKvv/56uu80AwAAAADcWJaDWbdu3bRr1y5J0h9//KFnnnlGuXPn1tSpU/Xqq69me4EAAAAAcL/LcjDbtWuXwsPDJUlTp05VZGSkJk2apPHjx2v69OnZXR8AAAAA3PeyHMyMMUpNTZUkLViwQI0bN5YkBQcH6++//87e6gAAAADgAZDlYFa5cmW9/fbb+s9//qO4uDjry6b37dunwMDAbC8QAAAAAO53WQ5mw4cP14YNG9SjRw+98cYbKlWqlCRp2rRpql69erYXCAAAAAD3uyzfLj8sLMzproxpPvzwQ7m4uGRLUQAAAADwIMlyMLsWDw+P7FoVAAAAADxQMhXM/P39tWvXLuXLl0958uSRw+G45tiTJ09mW3EAAAAA8CDIVDAbNmyYvL29JV2+xgwAAAAAkH0yFcyio6Mz/BkAAAAAcOsyFcwSExMzvUIfH5+bLgYAAAAAHkSZCmZ+fn7Xva5MuvzF0w6HQykpKdlSGAAAAAA8KDIVzBYvXny76wAAAACAB1amgllkZOTtrgMAAAAAHliZCmabN29W+fLllSNHDm3evPm6Y8PCwrKlMAAAAAB4UGQqmIWHhys+Pl4BAQEKDw+Xw+GQMSbdOK4xAwAAAICsy1Qw27dvn/Lnz2/9DAAAAADIPpkKZkWLFrV+PnDggKpXry5XV+dFk5OTtWLFCqexAAAAAIAby5HVBR577DGdPHkyXXtCQoIee+yxbCkKAAAAAB4kWQ5mad9XdrUTJ07I09MzS+uKjY3Vo48+Km9vbwUEBOjJJ5/Uzp07ncZcuHBBMTExyps3r7y8vNSyZUsdPXrUaczBgwfVpEkT5c6dWwEBAerbt6+Sk5OzumsAAAAAYItMncooSS1atJB0+QYfHTp0kLu7u9WXkpKizZs3q3r16lnaeFxcnGJiYvToo48qOTlZr7/+uho0aKBt27ZZIa9Xr17673//q6lTp8rX11c9evRQixYttHz5cmvbTZo0UVBQkFasWKEjR46offv2ypkzp959990s1QMAAAAAdsh0MPP19ZV0+YiZt7e3cuXKZfW5ubmpWrVq6tq1a5Y2/vPPPzs9Hz9+vAICArR+/XrVrl1bCQkJGjt2rCZNmqTHH39ckjRu3DiVLVtWq1atUrVq1fTLL79o27ZtWrBggQIDAxUeHq6hQ4fqtdde06BBg+Tm5palmgAAAADgTst0MBs3bpwkqVixYurTp0+WT1vMjISEBEmSv7+/JGn9+vW6dOmS6tWrZ40pU6aMihQpopUrV6patWpauXKlQkNDFRgYaI2JiopS9+7dtXXrVlWsWDHddpKSkpSUlGQ9T0xMzPZ9AQAAAIDMyvI1ZgMHDrwtoSw1NVU9e/ZUjRo1VL58eUlSfHy83Nzc5Ofn5zQ2MDBQ8fHx1pgrQ1laf1pfRmJjY+Xr62s9goODs3lvAAAAACDzMh3M8uTJI39//3SP4sWLKyoqSvPnz7+lQmJiYvT7779r8uTJt7SezOjfv78SEhKsx6FDh277NgEAAADgWjJ9KuPw4cMzbD99+rTWr1+vpk2batq0aWrWrFmWi+jRo4dmz56tpUuXqnDhwlZ7UFCQLl68qNOnTzsdNTt69KiCgoKsMWvWrHFaX9pdG9PGXM3d3d3p5iUAAAAAYKdMB7Po6Ojr9oeHhys2NjZLwcwYo5deekkzZ87UkiVLVLx4caf+SpUqKWfOnFq4cKFatmwpSdq5c6cOHjyoiIgISVJERITeeecdHTt2TAEBAZKk+fPny8fHRyEhIZmuBQAAAADskuVrzK6ladOm2rFjR5aWiYmJ0bfffqtJkybJ29tb8fHxio+P1z///CPp8p0gO3furN69e2vx4sVav369OnbsqIiICFWrVk2S1KBBA4WEhKhdu3b67bffNG/ePL355puKiYnhqBgAAACAe0Kmj5jdSFJSUpZvTT969GhJUp06dZzax40bpw4dOkiShg0bphw5cqhly5ZKSkpSVFSURo0aZY11cXHR7Nmz1b17d0VERMjT01PR0dEaMmTILe0PAAAAANwp2RbMxo4dq/Dw8CwtY4y54RgPDw+NHDlSI0eOvOaYokWLas6cOVnaNgAAAADcLTIdzHr37p1he0JCgjZs2KBdu3Zp6dKl2VYYAAAAADwoMh3MNm7cmGG7j4+P6tevrxkzZqS7eQcAAAAA4MYyHcwWL158O+sAAAAAgAdWtt2VEQAAAABwcwhmAAAAAGAzghkAAAAA2IxgBgAAAAA2y3Qw69Spk86cOXM7awEAAACAB1Kmg9mECRP0zz//3M5aAAAAAOCBlOlgZoy5nXUAAAAAwAMr099jJklnzpyRh4fHdcf4+PjcUkEAAAAA8KDJUjArXbr0NfuMMXI4HEpJSbnlogAAAADgQZKlYDZt2jT5+/vfrloAAAAA4IGUpWBWo0YNBQQE3K5aAAAAAOCBxPeYAQAAAIDNMh3MihYtKhcXl9tZCwAAAAA8kDJ9KuO+fftuZx0AAAAA8MDKdDDLkyePHA5HunZfX1+VLl1affr0Uf369bO1OAAAAAB4EGQ6mA0bNizDYHb69GmtX79eTZs21bRp09SsWbNsLRAAAAAA7neZDmYdOnS4bn94eLhiY2MJZgAAAACQRdl2V8amTZtqx44d2bU6AAAAAHhgZFswS0pKkpubW3atDgAAAAAeGNkWzMaOHavw8PDsWh0AAAAAPDAyfY1Z7969M2xPSEjQhg0btGvXLi1dujTbCgMAAACAB0Wmg9nGjRszbPfx8VH9+vU1Y8YMFS9ePNsKAwAAAIAHRaaD2eLFi6/b/+eff+r555/XmDFjbrkoAAAAAHiQZNs1ZidOnNDYsWOza3UAAAAA8MDItmAGAAAAALg5BDMAAAAAsBnBDAAAAABslumbf7Ro0eK6/adPn77VWgAAAADggZTpYObr63vD/vbt299yQQAAAADwoMl0MBs3btztrAMAAAAAHlhcYwYAAAAANrM1mC1dulTNmjVTwYIF5XA4NGvWLKf+Dh06yOFwOD0aNmzoNObkyZNq27atfHx85Ofnp86dO+vs2bN3cC8AAAAA4NbYGszOnTunChUqaOTIkdcc07BhQx05csR6fPfdd079bdu21datWzV//nzNnj1bS5cu1fPPP3+7SwcAAACAbJPpa8xuh0aNGqlRo0bXHePu7q6goKAM+7Zv366ff/5Za9euVeXKlSVJn332mRo3bqyPPvpIBQsWzPaaAQAAACC73fXXmC1ZskQBAQF6+OGH1b17d504ccLqW7lypfz8/KxQJkn16tVTjhw5tHr16muuMykpSYmJiU4PAAAAALDLXR3MGjZsqG+++UYLFy7U+++/r7i4ODVq1EgpKSmSpPj4eAUEBDgt4+rqKn9/f8XHx19zvbGxsfL19bUewcHBt3U/AAAAAOB6bD2V8UZat25t/RwaGqqwsDCVLFlSS5YsUd26dW96vf3791fv3r2t54mJiYQzAAAAALa5q4+YXa1EiRLKly+f9uzZI0kKCgrSsWPHnMYkJyfr5MmT17wuTbp83ZqPj4/TAwAAAADsck8Fsz///FMnTpxQgQIFJEkRERE6ffq01q9fb41ZtGiRUlNTVbVqVbvKBAAAAIAssfVUxrNnz1pHvyRp37592rRpk/z9/eXv76/BgwerZcuWCgoK0t69e/Xqq6+qVKlSioqKkiSVLVtWDRs2VNeuXfX555/r0qVL6tGjh1q3bs0dGQEAAADcM2w9YrZu3TpVrFhRFStWlCT17t1bFStW1IABA+Ti4qLNmzfriSeeUOnSpdW5c2dVqlRJy5Ytk7u7u7WOiRMnqkyZMqpbt64aN26smjVrasyYMXbtEgAAAABkma1HzOrUqSNjzDX7582bd8N1+Pv7a9KkSdlZFgAAAADcUffUNWYAAAAAcD8imAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDNbg9nSpUvVrFkzFSxYUA6HQ7NmzXLqN8ZowIABKlCggHLlyqV69epp9+7dTmNOnjyptm3bysfHR35+furcubPOnj17B/cCAAAAAG6NrcHs3LlzqlChgkaOHJlh/wcffKBPP/1Un3/+uVavXi1PT09FRUXpwoUL1pi2bdtq69atmj9/vmbPnq2lS5fq+eefv1O7AAAAAAC3zNXOjTdq1EiNGjXKsM8Yo+HDh+vNN99U8+bNJUnffPONAgMDNWvWLLVu3Vrbt2/Xzz//rLVr16py5cqSpM8++0yNGzfWRx99pIIFC2a47qSkJCUlJVnPExMTs3nPAAAAACDz7tprzPbt26f4+HjVq1fPavP19VXVqlW1cuVKSdLKlSvl5+dnhTJJqlevnnLkyKHVq1dfc92xsbHy9fW1HsHBwbdvRwAAAADgBu7aYBYfHy9JCgwMdGoPDAy0+uLj4xUQEODU7+rqKn9/f2tMRvr376+EhATrcejQoWyuHgAAAAAyz9ZTGe3i7u4ud3d3u8sAAAAAAEl38RGzoKAgSdLRo0ed2o8ePWr1BQUF6dixY079ycnJOnnypDUGAAAAAO52d20wK168uIKCgrRw4UKrLTExUatXr1ZERIQkKSIiQqdPn9b69eutMYsWLVJqaqqqVq16x2sGAAAAgJth66mMZ8+e1Z49e6zn+/bt06ZNm+Tv768iRYqoZ8+eevvtt/XQQw+pePHieuutt1SwYEE9+eSTkqSyZcuqYcOG6tq1qz7//HNdunRJPXr0UOvWra95R0YAAAAAuNvYGszWrVunxx57zHreu3dvSVJ0dLTGjx+vV199VefOndPzzz+v06dPq2bNmvr555/l4eFhLTNx4kT16NFDdevWVY4cOdSyZUt9+umnd3xfAAAAAOBm2RrM6tSpI2PMNfsdDoeGDBmiIUOGXHOMv7+/Jk2adDvKAwAAAIA74q69xgwAAAAAHhQEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALDZXR3MBg0aJIfD4fQoU6aM1X/hwgXFxMQob9688vLyUsuWLXX06FEbKwYAAACArLurg5kklStXTkeOHLEev/76q9XXq1cv/fTTT5o6dari4uJ0+PBhtWjRwsZqAQAAACDrXO0u4EZcXV0VFBSUrj0hIUFjx47VpEmT9Pjjj0uSxo0bp7Jly2rVqlWqVq3aNdeZlJSkpKQk63liYmL2Fw4AAAAAmXTXHzHbvXu3ChYsqBIlSqht27Y6ePCgJGn9+vW6dOmS6tWrZ40tU6aMihQpopUrV153nbGxsfL19bUewcHBt3UfAAAAAOB67upgVrVqVY0fP14///yzRo8erX379qlWrVo6c+aM4uPj5ebmJj8/P6dlAgMDFR8ff9319u/fXwkJCdbj0KFDt3EvAAAAAOD67upTGRs1amT9HBYWpqpVq6po0aKaMmWKcuXKddPrdXd3l7u7e3aUCAAAAAC37K4+YnY1Pz8/lS5dWnv27FFQUJAuXryo06dPO405evRohtekAQAAAMDd6p4KZmfPntXevXtVoEABVapUSTlz5tTChQut/p07d+rgwYOKiIiwsUoAAAAAyJq7+lTGPn36qFmzZipatKgOHz6sgQMHysXFRW3atJGvr686d+6s3r17y9/fXz4+PnrppZcUERFx3TsyAgAAAMDd5q4OZn/++afatGmjEydOKH/+/KpZs6ZWrVql/PnzS5KGDRumHDlyqGXLlkpKSlJUVJRGjRplc9UAAAAAkDV3dTCbPHnydfs9PDw0cuRIjRw58g5VBAAAAADZ7566xgwAAAAA7kcEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALDZfRPMRo4cqWLFisnDw0NVq1bVmjVr7C4JAAAAADLlvghm33//vXr37q2BAwdqw4YNqlChgqKionTs2DG7SwMAAACAG7ovgtknn3yirl27qmPHjgoJCdHnn3+u3Llz6+uvv7a7NAAAAAC4IVe7C7hVFy9e1Pr169W/f3+rLUeOHKpXr55WrlyZ4TJJSUlKSkqynickJEiSEhMTb2+xmXXB7gLsdde8D7h/MKfsLgH3I+aV3SXgfsOcsrsES1otxpg7ut17Ppj9/fffSklJUWBgoFN7YGCgduzYkeEysbGxGjx4cLr24ODg21Ijssb3PV+7SwDuK8wpIPsxr4DsdTfOqTNnzsjX987Vdc8Hs5vRv39/9e7d23qempqqkydPKm/evHI4HDZWZr/ExEQFBwfr0KFD8vHxsbsc4J7HnAKyH/MKyF7MKWfGGJ05c0YFCxa8o9u954NZvnz55OLioqNHjzq1Hz16VEFBQRku4+7uLnd3d6c2Pz+/21XiPcnHx4eJCWQj5hSQ/ZhXQPZiTv3PnTxSluaev/mHm5ubKlWqpIULF1ptqampWrhwoSIiImysDAAAAAAy554/YiZJvXv3VnR0tCpXrqwqVapo+PDhOnfunDp27Gh3aQAAAABwQ/dFMHvmmWd0/PhxDRgwQPHx8QoPD9fPP/+c7oYguDF3d3cNHDgw3ameAG4OcwrIfswrIHsxp+4ODnOn7wMJAAAAAHByz19jBgAAAAD3OoIZAAAAANiMYAYAAAAANiOY3aRixYpp+PDhdpdxz9m/f78cDoc2bdp027fFe3Tv4T27OcwrXAvv181hTuF6eM9uDvMqE8w9LDo62kgy3bp1S9f34osvGkkmOjo6U+vat2+fkWQ2btyYqfHHjh0z586dy9TYpk2bmqioqAz7li5daiSZ3377LVPrupbFixcbSebUqVO3tJ6rnT9/3uTJk8fkzZvXXLhwIUvLRkdHm+bNmzu1JScnmyNHjphLly5lW43jxo0zvr6+6dqz8h5ll3//+9+maNGixt3d3VSpUsWsXr36jm4/OzCv/od55Zuu/U7Pq7i4ONO0aVNToEABI8nMnDnzjm07uzCn/oc55Zuu/U7PqXfffddUrlzZeHl5mfz585vmzZubHTt23LHtZxfm1f8wr3zTtd/peTVq1CgTGhpqvL29jbe3t6lWrZqZM2dOltdzzx8xCw4O1uTJk/XPP/9YbRcuXNCkSZNUpEiRbN/exYsXJUn58+dX7ty5M7VM586dNX/+fP3555/p+saNG6fKlSsrLCwsW+u8WcYYJScnW8+nT5+ucuXKqUyZMpo1a9Ytr9/FxUVBQUFydb3939SQlfcoO3z//ffq3bu3Bg4cqA0bNqhChQqKiorSsWPH7lgN2YV5lb2YVzfv3LlzqlChgkaOHHnHtnk7MKeyF3Pq5sXFxSkmJkarVq3S/PnzdenSJTVo0EDnzp27YzVkF+ZV9mJe3bzChQvrvffe0/r167Vu3To9/vjjat68ubZu3Zq1FWVzYLyj0tJ4+fLlzbfffmu1T5w40YSFhZnmzZtbfy2ZO3euqVGjhvH19TX+/v6mSZMmZs+ePdYykpwekZGRTtt4++23TYECBUyxYsWMMcYULVrUDBs2zBhz+S8VOXPmNEuXLrXW9/7775v8+fOb+Ph4c+nSJRMYGGiGDh3qVP+ZM2eMl5eXGT16tDHGmGXLlpmaNWsaDw8PU7hwYfPSSy+Zs2fPWuMvXLhgXn31VVO4cGHj5uZmSpYsab766ivrLz1XPtL2+8KFC+all14y+fPnN+7u7qZGjRpmzZo11jrT/soyZ84c88gjj5icOXOaxYsXW/116tQxn3/+uRk9erSpX79+uvfg999/N02aNDHe3t7Gy8vL1KxZ0+zZs8cMHDgwXU2LFy92+qtUSkqKKVSokBk1apTTOjds2GAcDofZv3+/McaYjz/+2JQvX97kzp3bFC5c2HTv3t2cOXPGqf4rHwMHDkz3HhljzIEDB8wTTzxhPD09jbe3t3n66adNfHy81T9w4EBToUIF880335iiRYsaHx8f88wzz5jExMR0+52RKlWqmJiYGOt5SkqKKViwoImNjc3U8ncL5hXz6m6aV1fSPXzEjDnFnLob55Qxl48sSDJxcXE3tbxdmFfMq7t5XhljTJ48ecxXX32VpWXui2D2ySefmLp161rtdevWNcOGDXOalNOmTTPTp083u3fvNhs3bjTNmjUzoaGhJiUlxRhjzJo1a4wks2DBAnPkyBFz4sQJaxteXl6mXbt25vfffze///67MSb9G963b19TtGhRc/r0abNhwwbj5uZmfvjhB6f+kiVLmtTUVKvt66+/Nrly5TKnT582e/bsMZ6enmbYsGFm165dZvny5aZixYqmQ4cO1vhWrVqZ4OBgM2PGDLN3716zYMECM3nyZJOcnGymT59uJJmdO3eaI0eOmNOnTxtjjHn55ZdNwYIFzZw5c8zWrVtNdHS0yZMnj7V/aR/qsLAw88svv5g9e/ZYfXv27DHu7u7m5MmT5sSJE8bDw8OaKMYY8+effxp/f3/TokULs3btWrNz507z9ddfmx07dpgzZ86YVq1amYYNG5ojR46YI0eOmKSkpHSnC/Tp08fUrFnT6X39v//7P6e2YcOGmUWLFpl9+/aZhQsXmocffth0797dGGNMUlKSGT58uPHx8bG2kzZhr3yPUlJSTHh4uKlZs6ZZt26dWbVqlalUqZL1y9eYy5PSy8vLtGjRwmzZssUsXbrUBAUFmddff/2an8E0SUlJxsXFJd0/Gtu3b2+eeOKJGy5/N2FeMa/ulnl1tXs9mDGnmFN325wyxpjdu3cbSWbLli03tbxdmFfMq7t1XiUnJ5vvvvvOuLm5ma1bt2Zp2fsimB07dsy4u7ub/fv3m/379xsPDw9z/Phxp0l5tePHjzv9IrrW+cXR0dEmMDDQJCUlObVfPSmTkpJMeHi4adWqlQkJCTFdu3Z1Gr99+3brLwZpatWqZZ577jljjDGdO3c2zz//vNMyy5YtMzly5DD//POP2blzp5Fk5s+fn+H+ZHR+8dmzZ03OnDnNxIkTrbaLFy+aggULmg8++MBpuVmzZqVb5+uvv26efPJJ63nz5s2tv0QYY0z//v1N8eLFzcWLFzOsKaPzi69+nTdu3GgcDoc5cOCAMcZYf0FJ+wtSRqZOnWry5s1rPb/W+cVXvke//PKLcXFxMQcPHrT6t27daiRZfz0aOHCgyZ07t9NfR/r27WuqVq16zVrS/PXXX0aSWbFihVN73759TZUqVW64/N2EefU/zCvfdOPu5Ly62r0ezJhTzKm7bU6lpKSYJk2amBo1amR5Wbsxr/6HeeWbbpwd82rz5s3G09PTuLi4GF9fX/Pf//4308umueevMZMun0fapEkTjR8/XuPGjVOTJk2UL18+pzG7d+9WmzZtVKJECfn4+KhYsWKSpIMHD95w/aGhoXJzc7vuGDc3N02cOFHTp0/XhQsXNGzYMKf+MmXKqHr16vr6668lSXv27NGyZcvUuXNnSdJvv/2m8ePHy8vLy3pERUUpNTVV+/bt06ZNm+Ti4qLIyMjMvizau3evLl26pBo1alhtOXPmVJUqVbR9+3ansZUrV3Z6npKSogkTJui5556z2p577jmNHz9eqampkqRNmzapVq1aypkzZ6Zrulp4eLjKli2rSZMmSbp87vuxY8f09NNPW2MWLFigunXrqlChQvL29la7du104sQJnT9/PtPb2b59u4KDgxUcHGy1hYSEyM/Pz+m1KFasmLy9va3nBQoUuCevEcsOzKuMMa/+h3mVNcypjDGn/udOz6mYmBj9/vvvmjx5cpaXvVswrzLGvPqfOzWvHn74YW3atEmrV69W9+7dFR0drW3btmV6eek+ul1+p06dNH78eE2YMEGdOnVK19+sWTOdPHlSX375pVavXq3Vq1dL+t+FnNfj6emZqRpWrFghSTp58qROnjyZrr9z586aPn26zpw5o3HjxqlkyZLWJDt79qy6deumTZs2WY/ffvtNu3fvVsmSJZUrV65M1XCzrt7HefPm6a+//tIzzzwjV1dXubq6qnXr1jpw4IAWLlwoSdlWU9u2ba1JOWnSJDVs2FB58+aVdPnWqk2bNlVYWJimT5+u9evXWzcByMx7l1VX/4JxOBzWL6HryZcvn1xcXHT06FGn9qNHjyooKChba7yTmFe3hnl12c3Oq/sRc+rWMKcuy4451aNHD82ePVuLFy9W4cKFs7O8O455dWuYV5fd6rxyc3NTqVKlVKlSJcXGxqpChQoaMWJElmq4b4JZw4YNdfHiRV26dElRUVFOfSdOnNDOnTv15ptvqm7duipbtqxOnTrlNCbtryEpKSk3tf29e/eqV69e+vLLL1W1alVFR0enezNbtWqlHDlyaNKkSfrmm2/UqVMnORwOSdIjjzyibdu2qVSpUukebm5uCg0NVWpqquLi4jLcfkb1lyxZUm5ublq+fLnVdunSJa1du1YhISHX3Z+xY8eqdevWTr8kNm3apNatW2vs2LGSpLCwMC1btkyXLl26Zk2ZeT2fffZZ/f7771q/fr2mTZumtm3bWn3r169XamqqPv74Y1WrVk2lS5fW4cOHs7ydsmXL6tChQzp06JDVtm3bNp0+ffqGr0VmuLm5qVKlStYvLElKTU3VwoULFRERccvrtwvzinl1Pbd7Xt2PmFPMqeu5E3PKGKMePXpo5syZWrRokYoXL54t67UT84p5dT12/b8qNTVVSUlJWVsoyyc/3kWuPn81ISHBJCQkWM/Tzi9OSUkxefPmNc8995zZvXu3WbhwoXn00Uedrle4dOmSyZUrl3n77bdNfHy8deFkRufIGuN87mpycrKpVq2aadmypTHGmMOHD5u8efNa5/BeqXPnziZPnjzGxcXF/PXXX1b7b7/9ZnLlymViYmLMxo0bza5du8ysWbOc7vLXoUMHExwcbGbOnGn++OMPs3jxYvP9998bYy5fhOlwOMz48ePNsWPHrIsfX3nlFVOwYEEzd+5cpws/T548aYzJ+LzkY8eOmZw5c5q5c+emq3/OnDnG3d3dnDhxwvz9998mb9681oWfu3btMt988431fSjvvPOOKVKkiNmxY4c5fvy4uXjx4jXP465Ro4apUKGC8fb2NufPn7faN23aZCSZ4cOHm71795pvvvnGFCpUyKnm5cuXWxftHj9+3Preiivfo9TUVBMeHm5q1apl1q9fb1avXp3hhZ8VKlRwqmvYsGGmaNGi6V6HjEyePNm4u7ub8ePHm23btpnnn3/e+Pn5Od31517AvGJeGXP3zKszZ86YjRs3mo0bNxpJ5pNPPjEbN260rkm4FzCnmFPG3D1zqnv37sbX19csWbLEumHCkSNHnPbnXsC8Yl4Zc/fMq379+pm4uDizb98+s3nzZtOvXz/jcDjML7/8kqnl09xXwexqV174OX/+fFO2bFnj7u5uwsLCzJIlS9JdSP7ll1+a4OBgkyNHjnS3Sr3alW/44MGDTYECBczff/9t9U+fPt24ubmZTZs2OS23YsUKI8k0btw43TrXrFlj6tevb7y8vIynp6cJCwsz77zzjtX/zz//mF69epkCBQoYNzc3U6pUKfP1119b/UOGDDFBQUHG4XBY+/3PP/+Yl156yeTLl++6t0q9clJ+9NFHxs/PL8MLOpOSkoyfn58ZMWKEMebyL5MGDRqY3LlzG29vb1OrVi2zd+9eY8zlyZ22P8rgVqlXGjVqlJFk2rdvn26bn3zyiSlQoIDJlSuXiYqKMt988026ml944QWTN2/ebLlV6pWyMimNMeazzz4zRYoUMW5ubqZKlSpm1apVmV72bsG8Yl6luRvmVUa3Q5Yy/8WxdwPmFHMqzd0wpzKaT5LMuHHjMrX83YJ5xbxKczfMq06dOpmiRYsaNzc3kz9/flO3bt0shzJjjHEYY0zWjrEBAAAAALLTfXONGQAAAADcqwhmQCYcPHjQ6Ta2Vz8yc8tdAM6YV0D2Yk4B2e9OzitOZQQyITk5Wfv3779mf7FixeTq6nrnCgLuA8wrIHsxp4DsdyfnFcEMAAAAAGzGqYwAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAIAHWp06ddSzZ0+7ywAAPOAIZgCAm9KhQwc5HA699957Tu2zZs2Sw+HI0rqKFSum4cOHZ2N1t8/+/fvlcDi0adMmu0sBANxHCGYAgJvm4eGh999/X6dOnbK7lCy7ePGi3SVkq0uXLtldAgDgFhDMAAA3rV69egoKClJsbOx1x/3666+qVauWcuXKpeDgYL388ss6d+6cpMunEh44cEC9evWSw+GQw+GQMUb58+fXtGnTrHWEh4erQIECTut0d3fX+fPnJUkHDx5U8+bN5eXlJR8fH7Vq1UpHjx61xg8aNEjh4eH66quvVLx4cXl4eGRY63//+1/5+vpq4sSJN/Wa7N27V82bN1dgYKC8vLz06KOPasGCBVb/kCFDVL58+XTLhYeH66233rKef/XVVypbtqw8PDxUpkwZjRo1yupLO2r3/fffKzIyUh4eHpo4caIOHDigZs2aKU+ePPL09FS5cuU0Z86cm9oPAMCdRTADANw0FxcXvfvuu/rss8/0559/Zjhm7969atiwoVq2bKnNmzfr+++/16+//qoePXpIkmbMmKHChQtryJAhOnLkiI4cOSKHw6HatWtryZIlkqRTp05p+/bt+ueff7Rjxw5JUlxcnB599FHlzp1bqampat68uU6ePKm4uDjNnz9ff/zxh5555hmnWvbs2aPp06drxowZGZ6KOGnSJLVp00YTJ05U27Ztb+o1OXv2rBo3bqyFCxdq48aNatiwoZo1a6aDBw9Kkjp16qTt27dr7dq11jIbN27U5s2b1bFjR0nSxIkTNWDAAL3zzjvavn273n33Xb311luaMGGC07b69eunV155Rdu3b1dUVJRiYmKUlJSkpUuXasuWLXr//ffl5eV1U/sBALizXO0uAABwb/vXv/6l8PBwDRw4UGPHjk3XHxsbq7Zt21o32HjooYf06aefKjIyUqNHj5a/v79cXFzk7e2toKAga7k6deroiy++kCQtXbpUFStWVFBQkJYsWaIyZcpoyZIlioyMlCQtXLhQW7Zs0b59+xQcHCxJ+uabb1SuXDmtXbtWjz76qKTLpy9+8803yp8/f7o6R44cqTfeeEM//fSTtd6bUaFCBVWoUMF6PnToUM2cOVM//vijevToocKFCysqKkrjxo2z6ho3bpwiIyNVokQJSdLAgQP18ccfq0WLFpKk4sWLa9u2bfriiy8UHR1trbtnz57WGOnyUcOWLVsqNDRUkqz1AQDufhwxAwDcsvfff18TJkzQ9u3b0/X99ttvGj9+vLy8vKxHVFSUUlNTtW/fvmuuMzIyUtu2bdPx48cVFxenOnXqqE6dOlqyZIkuXbqkFStWqE6dOpKk7du3Kzg42AplkhQSEiI/Pz+nmooWLZphKJs2bZp69eql+fPn31Ioky4fMevTp4/Kli0rPz8/eXl5afv27dYRM0nq2rWrvvvuO124cEEXL17UpEmT1KlTJ0nSuXPntHfvXnXu3NnpNXv77be1d+9ep21VrlzZ6fnLL7+st99+WzVq1NDAgQO1efPmW9oXAMCdQzADANyy2rVrKyoqSv3790/Xd/bsWXXr1k2bNm2yHr/99pt2796tkiVLXnOdoaGh8vf3V1xcnFMwi4uL09q1a3Xp0iVVr149S3V6enpm2F6xYkXlz59fX3/9tYwxWVrn1fr06aOZM2fq3Xff1bJly7Rp0yaFhoY63WykWbNmcnd318yZM/XTTz/p0qVLeuqppyRdfr0k6csvv3R6zX7//XetWrXquvvTpUsX/fHHH2rXrp22bNmiypUr67PPPrul/QEA3BmcyggAyBbvvfeewsPD9fDDDzu1P/LII9q2bZtKlSp1zWXd3NyUkpLi1OZwOFSrVi398MMP2rp1q2rWrKncuXMrKSlJX3zxhSpXrmwFk7Jly+rQoUM6dOiQddRs27ZtOn36tEJCQm5Ye8mSJfXxxx+rTp06cnFx0b///e+s7r5l+fLl6tChg/71r39Juhy09u/f7zTG1dVV0dHRGjdunNzc3NS6dWvlypVLkhQYGKiCBQvqjz/+uKnr3IKDg/XCCy/ohRdeUP/+/fXll1/qpZdeuun9AQDcGQQzAEC2CA0NVdu2bfXpp586tb/22muqVq2aevTooS5dusjT01Pbtm3T/PnzrQBUrFgxLV26VK1bt5a7u7vy5csn6fJ1Zv/3f/+nypUrWzexqF27tiZOnKi+ffta26hXr561/eHDhys5OVkvvviiIiMj053udy2lS5fW4sWLVadOHbm6ut7we9V27tyZrq1cuXJ66KGHNGPGDDVr1kwOh0NvvfWWUlNT043t0qWLypYtK+lymLvS4MGD9fLLL8vX11cNGzZUUlKS1q1bp1OnTql3797XrKlnz55q1KiRSpcurVOnTmnx4sXWNgAAdzdOZQQAZJshQ4akCyFhYWGKi4vTrl27VKtWLVWsWFEDBgxQwYIFnZbbv3+/SpYs6XQNWGRkpFJSUqxryaTLYe3qNofDoR9++EF58uRR7dq1Va9ePZUoUULff/99lup/+OGHtWjRIn333Xf6v//7v+uObd26tSpWrOj0OHr0qD755BPlyZNH1atXV7NmzRQVFaVHHnkk3fIPPfSQqlevrjJlyqhq1apOfV26dNFXX32lcePGKTQ0VJGRkRo/fryKFy9+3ZpSUlIUExOjsmXLqmHDhipdurTTbfYBAHcvh7nVk+kBAECWGWP00EMP6cUXX7zuUTAAwIOBUxkBALjDjh8/rsmTJys+Pt767jIAwIONYAYAwB0WEBCgfPnyacyYMcqTJ4/d5QAA7gIEMwAA7jCuIgAAXI2bfwAAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANvt/0FgPyPFgHAkAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layers = list(res_dict.keys())\n", + "utilisation = list(res_dict.values())\n", + "lut_values = [] #Initializing a list to store LUT values.\n", + "for i in range(len(layers)):\n", + " x = list(utilisation[i].values()) #Extracting the resource utilisation for each layer as a list.\n", + " lut_values.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", + " \n", + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(layers, lut_values, color ='green', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"LUT Utilisation\")\n", + "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note, from the above result we observe that the bottleneck in the execution of the model on hardware would come from the execution of the first layer which takes estimated 38400 clock cycles to execute one set of its inputs.\n", + "No matter how quickly the layers execute the (throughput or latency?) will be defined by the first layer's execution latency.\n", + "\n", + "So our goal to adjust the folding parameters would be to expand the computation of the first layer to reduce its latency at the expense an of increase in resource utilization." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "Question in the first line of the above cell.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# QuickNote : StreamingDataWidthConverter Layer" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Instream Width = 1 Outstream Width = 2\n", + "Instream Width = 2 Outstream Width = 2\n", + "Instream Width = 2 Outstream Width = 2\n", + "Instream Width = 2 Outstream Width = 1\n" + ] + } + ], + "source": [ + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "for fcl in fc_layers:\n", + " fcl_inst = getCustomOp(fcl)\n", + " print('Instream Width =',(fcl_inst.get_instream_width()),'Outstream Width =',int(fcl_inst.get_outstream_width()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also view the `instream_width` and `outstream_width` of each layer using the `get_instream_width()` and `get_outstream_width()` helper functions. These widths are of particular importance as for a (balanced pipeline?) these width's should be the same.\n", + "\n", + "For example, the outwidth of a given layer of the network should match the inwidth of the next layer for the (pipeline to be stable?). If they are not the same then the FINN compiler adds an extra `streamingdatawidthconverter` (which increases the overall resource utilization of the design slightly) layer to make sure these widths match.\n", + "\n", + "Note, that if these widths are the same then even if we call the `InsertDWC()` transformation on our model (responsible for adding the above layer), the datawidth conversion layers will not be a part of our model as shown in the below cells. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "Question in the first and the second line of the above cell.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from finn.transformation.fpgadataflow.insert_dwc import InsertDWC\n", + "model = model.transform(InsertDWC())" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping http://0.0.0.0:5901\n", + "Serving './cybsec_DWC_not_inserted.onnx' at http://0.0.0.0:5901\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save(\"./cybsec_DWC_not_inserted.onnx\")\n", + "showInNetron(\"./cybsec_DWC_not_inserted.onnx\",localhost_url='xirxlabs53')#localhost_url='xirxlabs60'" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "class MatrixVectorActivation(HLSCustomOp):\n", + " \"\"\"Class that corresponds to finn-hls Matrix_Vector_Activate(_Stream)_Batch\n", + " function.\"\"\"\n", + "\n", + " def __init__(self, onnx_node):\n", + " super().__init__(onnx_node)\n", + " self.decoupled_wrapper = templates.decoupled_wrapper\n", + "\n", + " def get_nodeattr_types(self):\n", + " my_attrs = {\n", + " \"PE\": (\"i\", True, 0),\n", + " \"SIMD\": (\"i\", True, 0),\n", + " \"MW\": (\"i\", True, 0),\n", + " \"MH\": (\"i\", True, 0),\n", + " \"resType\": (\"s\", False, \"lut\", {\"auto\", \"lut\", \"dsp\"}),\n", + " \"ActVal\": (\"i\", False, 0),\n", + " # FINN DataTypes for inputs, weights, outputs\n", + " \"inputDataType\": (\"s\", True, \"\"),\n", + " \"weightDataType\": (\"s\", True, \"\"),\n", + " \"outputDataType\": (\"s\", True, \"\"),\n", + " # FINN DataType for accumulator -- auto-computed and updated\n", + " \"accDataType\": (\"s\", False, \"INT32\"),\n", + " # use xnor-popcount for binary weights/inputs, thus treating them\n", + " # as bipolar\n", + " \"binaryXnorMode\": (\"i\", False, 0, {0, 1}),\n", + " # no-activation mode (produce accumulators)\n", + " \"noActivation\": (\"i\", False, 0, {0, 1}),\n", + " # number of input vectors, examples:\n", + " # [1] is a single vector (like a FC layer with batch=1)\n", + " # [4] is four vectors (like a FC layer with batch=4)\n", + " # [1, 4, 4] is four * four vectors (like a conv layer with batch=1)\n", + " \"numInputVectors\": (\"ints\", False, [1]),\n", + " # memory mode for the FC weights\n", + " # const -- embedded weights, default, long compile/synth times\n", + " # decoupled -- streaming weights with weight streamer packaged inside IP\n", + " # external -- streaming weights with external streamer\n", + " \"mem_mode\": (\"s\", False, \"const\", {\"const\", \"decoupled\", \"external\"}),\n", + " # FPGA resource type for memories in decoupled mode\n", + " # auto -- let Vivado decide\n", + " # block -- use BRAM\n", + " # distributed -- use LUTRAM\n", + " # ultra -- use UltraRAM (URAM), must have runtime_writeable_weights=1\n", + " # see also https://www.xilinx.com/support/answers/38070.html\n", + " \"ram_style\": (\n", + " \"s\",\n", + " False,\n", + " \"auto\",\n", + " {\"auto\", \"block\", \"distributed\", \"ultra\"},\n", + " ),\n", + " # FPGA resource type for threshold memories (if noActivation is False)\n", + " # auto -- let Vivado decide\n", + " # block -- use BRAM\n", + " # distributed -- use LUTRAM\n", + " \"ram_style_thresholds\": (\n", + " \"s\",\n", + " False,\n", + " \"auto\",\n", + " {\"auto\", \"block\", \"distributed\"},\n", + " ),\n", + " # (mem_mode = decoupled only) whether weights will be writable through\n", + " # an AXI-lite interface during runtime\n", + " # 1 for enabled, 0 for disabled.\n", + " # see finn-rtllib/memstream/doc/README for more about the memory\n", + " # address map used for writable weights\n", + " # IMPORTANT: After using AXI lite to either read or write the weights,\n", + " # always \"flush\" the accelerator by first passing a dummy input\n", + " # vector through the accelerator. This will get rid of any old\n", + " # weight data from the weight FIFOs.\n", + " \"runtime_writeable_weights\": (\"i\", False, 0, {0, 1}),\n", + " }\n", + " my_attrs.update(super().get_nodeattr_types())\n", + " return my_attrs\n", + "\n", + " def calc_wmem(self):\n", + " \"\"\"Calculates and returns WMEM.\"\"\"\n", + " mw = self.get_nodeattr(\"MW\")\n", + " mh = self.get_nodeattr(\"MH\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", + " assert mw % simd == 0, \"Requirement MW divisable by SIMD is violated.\"\n", + " wmem = mw * mh // (pe * simd)\n", + " return wmem\n", + "\n", + " def calc_tmem(self):\n", + " \"\"\"Calculates and returns TMEM.\"\"\"\n", + " if self.get_nodeattr(\"noActivation\") == 1:\n", + " return 0\n", + " else:\n", + " mh = self.get_nodeattr(\"MH\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " return mh // pe\n", + "\n", + " def make_shape_compatible_op(self, model):\n", + " oshape = self.get_normal_output_shape()\n", + " return super().make_const_shape_op(oshape)\n", + "\n", + " def infer_node_datatype(self, model):\n", + " node = self.onnx_node\n", + " idt = model.get_tensor_datatype(node.input[0])\n", + " if idt != self.get_input_datatype():\n", + " warn_str = \"inputDataType changing for %s: %s -> %s \" % (\n", + " node.name,\n", + " str(self.get_input_datatype()),\n", + " str(idt),\n", + " )\n", + " warnings.warn(warn_str)\n", + " self.set_nodeattr(\"inputDataType\", idt.name)\n", + " # set output datatype from property\n", + " odt = self.get_output_datatype()\n", + " model.set_tensor_datatype(node.output[0], odt)\n", + "\n", + " def verify_node(self):\n", + " info_messages = []\n", + " # verify that \"backend\" is set to \"fpgadataflow\"\n", + " backend_value = self.get_nodeattr(\"backend\")\n", + " if backend_value == \"fpgadataflow\":\n", + " info_messages.append(\"Attribute backend is set correctly\")\n", + " else:\n", + " info_messages.append('Attribute backend should be set to \"fpgadataflow\"')\n", + "\n", + " # verify that all necessary attributes exist\n", + " # TODO collect automatically from get_nodeattr_types\n", + " try:\n", + " self.get_nodeattr(\"code_gen_dir_cppsim\")\n", + " self.get_nodeattr(\"executable_path\")\n", + " self.get_nodeattr(\"resType\")\n", + " self.get_nodeattr(\"MW\")\n", + " self.get_nodeattr(\"MH\")\n", + " self.get_nodeattr(\"SIMD\")\n", + " self.get_nodeattr(\"PE\")\n", + " self.get_nodeattr(\"inputDataType\")\n", + " self.get_nodeattr(\"weightDataType\")\n", + " self.get_nodeattr(\"outputDataType\")\n", + " info_messages.append(\"All necessary attributes exist\")\n", + " except Exception:\n", + " info_messages.append(\n", + " \"\"\"The required MatrixVectorActivation attributes do not exist.\"\"\"\n", + " )\n", + "\n", + " # verify the number of inputs depending on noActivation value\n", + " # check noActivation value to determine the number of inputs\n", + " no_act = self.get_nodeattr(\"noActivation\")\n", + "\n", + " if no_act == 1:\n", + " if len(self.onnx_node.input) == 2:\n", + " info_messages.append(\"The number of inputs is correct\")\n", + " else:\n", + " info_messages.append(\n", + " \"\"\"MatrixVectorActivation needs in no\n", + " activation mode 2 inputs (data input and weights)\"\"\"\n", + " )\n", + " elif no_act == 0:\n", + " if len(self.onnx_node.input) == 3:\n", + " info_messages.append(\"The number of inputs is correct\")\n", + " else:\n", + " info_messages.append(\n", + " \"\"\"MatrixVectorActivation needs 3 inputs\n", + " (data input and weights and threshold values)\"\"\"\n", + " )\n", + " else:\n", + " info_messages.append(\n", + " \"\"\"noActivation attribute contains {} should\n", + " be 0 or 1\"\"\".format(\n", + " no_act\n", + " )\n", + " )\n", + "\n", + " return info_messages\n", + "\n", + " def uram_estimation(self):\n", + " P = self.get_nodeattr(\"PE\")\n", + " Q = self.get_nodeattr(\"SIMD\")\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " D_in = self.get_nodeattr(\"MW\")\n", + " D_out = self.get_nodeattr(\"MH\")\n", + " omega = (D_in * D_out) / (Q * P)\n", + " mem_width = Q * W * P\n", + " mmode = self.get_nodeattr(\"mem_mode\")\n", + " mstyle = self.get_nodeattr(\"ram_style\")\n", + " if (\n", + " (mmode == \"decoupled\" and mstyle != \"ultra\")\n", + " or (mmode == \"const\" and self.calc_wmem() <= 128)\n", + " or (mmode == \"external\")\n", + " ):\n", + " return 0\n", + " width_multiplier = math.ceil(mem_width / 72)\n", + " depth_multiplier = math.ceil(omega / 4096)\n", + " return width_multiplier * depth_multiplier\n", + "\n", + " def bram_estimation(self):\n", + " \"\"\"Calculates resource estimation for BRAM based on:\n", + " - FINN-R: An End-to-End Deep-Learning Framework for Fast\n", + " Exploration of Quantized Neural Networks\n", + " - M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,\n", + " Y. Umuroglu, M. Leeser and K. Vissers\n", + " - 12. Sep 2018\n", + " \"\"\"\n", + " # TODO add in/out FIFO contributions\n", + " P = self.get_nodeattr(\"PE\")\n", + " Q = self.get_nodeattr(\"SIMD\")\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " D_in = self.get_nodeattr(\"MW\")\n", + " D_out = self.get_nodeattr(\"MH\")\n", + " omega = (D_in * D_out) / (Q * P)\n", + " mem_width = Q * W * P\n", + " mmode = self.get_nodeattr(\"mem_mode\")\n", + " mstyle = self.get_nodeattr(\"ram_style\")\n", + " if (\n", + " (mmode == \"decoupled\" and mstyle in [\"distributed\", \"ultra\"])\n", + " or (mmode == \"const\" and self.calc_wmem() <= 128)\n", + " or (mmode == \"external\")\n", + " ):\n", + " return 0\n", + " # assuming SDP mode RAMB18s (see UG573 Table 1-10)\n", + " # assuming decoupled (RTL) memory, which is more efficient than const (HLS)\n", + " if mem_width == 1:\n", + " return math.ceil(omega / 16384)\n", + " elif mem_width == 2:\n", + " return math.ceil(omega / 8192)\n", + " elif mem_width <= 4:\n", + " return (math.ceil(omega / 4096)) * (math.ceil(mem_width / 4))\n", + " elif mem_width <= 9:\n", + " return (math.ceil(omega / 2048)) * (math.ceil(mem_width / 9))\n", + " elif mem_width <= 18 or omega > 512:\n", + " return (math.ceil(omega / 1024)) * (math.ceil(mem_width / 18))\n", + " else:\n", + " return (math.ceil(omega / 512)) * (math.ceil(mem_width / 36))\n", + "\n", + " def bram_efficiency_estimation(self):\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " D_in = self.get_nodeattr(\"MW\")\n", + " D_out = self.get_nodeattr(\"MH\")\n", + " bram16_est = self.bram_estimation()\n", + " if bram16_est == 0:\n", + " return 1\n", + " wbits = W * D_in * D_out\n", + " bram16_est_capacity = bram16_est * 36 * 512\n", + " return wbits / bram16_est_capacity\n", + "\n", + " def uram_efficiency_estimation(self):\n", + " \"\"\"Function for URAM efficiency estimation: actual parameter storage\n", + " needed divided by the allocated URAM storage (from estimation)\"\"\"\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " D_in = self.get_nodeattr(\"MW\")\n", + " D_out = self.get_nodeattr(\"MH\")\n", + " uram_est = self.uram_estimation()\n", + " if uram_est == 0:\n", + " return 1\n", + " wbits = W * D_in * D_out\n", + " uram_est_capacity = uram_est * 72 * 4096\n", + " return wbits / uram_est_capacity\n", + "\n", + " def lut_estimation(self):\n", + " \"\"\"Calculates resource estimations for LUTs based on:\n", + " - FINN-R: An End-to-End Deep-Learning Framework for Fast\n", + " Exploration of Quantized Neural Networks\n", + " - M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,\n", + " Y. Umuroglu, M. Leeser and K. Vissers\n", + " - 12. Sep 2018\n", + " \"\"\"\n", + " # TODO add in/out FIFO contributions\n", + " P = self.get_nodeattr(\"PE\")\n", + " Q = self.get_nodeattr(\"SIMD\")\n", + " MW = self.get_nodeattr(\"MW\")\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " # determine tdt with input and weight data types\n", + " idt = self.get_input_datatype()\n", + " A = idt.bitwidth()\n", + " # parameters from experiments in paper mentioned above\n", + " c0 = 300\n", + " c1 = 1.1\n", + " c2 = 0\n", + " mmode = self.get_nodeattr(\"mem_mode\")\n", + " mstyle = self.get_nodeattr(\"ram_style\")\n", + " if (mmode == \"decoupled\" and mstyle == \"distributed\") or (\n", + " mmode == \"const\" and self.calc_wmem() <= 128\n", + " ):\n", + " c2 = (P * Q * W) * math.ceil(self.calc_wmem() / 64)\n", + "\n", + " # multiplication\n", + " res_type = self.get_nodeattr(\"resType\")\n", + " if res_type == \"dsp\":\n", + " mult_luts = 0\n", + " else:\n", + " mult_luts = Q * (2 * math.ceil((W + A) / 6) - 1) * (W + A)\n", + " # adder tree\n", + " addertree_luts = (W + A) * (2 * Q - 1)\n", + " # accumulator\n", + " acc_bits = W + A + np.ceil(math.log(MW, 2))\n", + " acc_luts = acc_bits\n", + " # thresholds and threshold comparators\n", + " thr_luts = 0\n", + " comp_luts = 0\n", + " noact = self.get_nodeattr(\"noActivation\")\n", + " if noact == 0:\n", + " odt = self.get_output_datatype()\n", + " B = odt.bitwidth()\n", + " thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)\n", + " comp_luts = (2**B - 1) * acc_bits\n", + "\n", + " return int(\n", + " c0\n", + " + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts))\n", + " + c2\n", + " )\n", + "\n", + " def dsp_estimation(self):\n", + " # multiplication\n", + " P = self.get_nodeattr(\"PE\")\n", + " res_type = self.get_nodeattr(\"resType\")\n", + " Q = self.get_nodeattr(\"SIMD\")\n", + " wdt = self.get_weight_datatype()\n", + " W = wdt.bitwidth()\n", + " idt = self.get_input_datatype()\n", + " A = idt.bitwidth()\n", + " if res_type == \"dsp\":\n", + " mult_dsp = P * Q * np.ceil((W + A) / 48) # TODO: more accurate modelling\n", + " else:\n", + " mult_dsp = 0\n", + " return int(mult_dsp)\n", + "\n", + " def get_exp_cycles(self):\n", + " pe = self.get_nodeattr(\"PE\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " num_inp_vec = self.get_nodeattr(\"numInputVectors\")\n", + " mh = self.get_nodeattr(\"MH\")\n", + " mw = self.get_nodeattr(\"MW\")\n", + " # since mmv != 1 is not supported yet, we set mmv for now to 1\n", + " mmv = 1\n", + " exp_cycles = (mh / pe) * (mw / simd) * np.prod(num_inp_vec) / mmv\n", + " return int(exp_cycles)\n", + "\n", + " def get_input_datatype(self, ind=0):\n", + " \"\"\"Returns FINN DataType of input.\"\"\"\n", + " # when performing FIFO insertion on an FC layer with ext weights, the ind\n", + " # parameter can be > 0 (referring to the weights) so handle that here\n", + " if ind == 0:\n", + " return DataType[self.get_nodeattr(\"inputDataType\")]\n", + " elif ind == 1:\n", + " return DataType[self.get_nodeattr(\"weightDataType\")]\n", + " else:\n", + " raise Exception(\"Undefined input ind for this layer type\")\n", + "\n", + " def get_weight_datatype(self):\n", + " \"\"\"Returns FINN DataType of weights.\"\"\"\n", + " return DataType[self.get_nodeattr(\"weightDataType\")]\n", + "\n", + " def get_output_datatype(self, ind=0):\n", + " \"\"\"Returns FINN DataType of output.\"\"\"\n", + " return DataType[self.get_nodeattr(\"outputDataType\")]\n", + "\n", + " def get_instream_width(self, ind=0):\n", + " i_bits = self.get_input_datatype().bitwidth()\n", + " in_width = i_bits * self.get_nodeattr(\"SIMD\")\n", + " return in_width\n", + "\n", + " def get_outstream_width(self, ind=0):\n", + " o_bits = self.get_output_datatype().bitwidth()\n", + " out_width = o_bits * self.get_nodeattr(\"PE\")\n", + " return out_width\n", + "\n", + " def get_weightstream_width(self):\n", + " \"\"\"Returns weight stream width. Used only in decoupled mode.\"\"\"\n", + " if (\n", + " self.get_nodeattr(\"mem_mode\") == \"decoupled\"\n", + " or self.get_nodeattr(\"mem_mode\") == \"external\"\n", + " ):\n", + " pe = self.get_nodeattr(\"PE\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " wp = self.get_weight_datatype().bitwidth()\n", + " w_width = pe * simd * wp\n", + " return w_width\n", + " else:\n", + " return 0\n", + "\n", + " def get_weightstream_width_padded(self):\n", + " \"\"\"Returns weight stream width padded to a multiple of 8. This is required\n", + " by the AXI Stream spec. Used in decoupled mode.\"\"\"\n", + " weight_width = self.get_weightstream_width()\n", + " return roundup_to_integer_multiple(weight_width, 8)\n", + "\n", + " def get_ap_int_max_w(self):\n", + " # base class impl (max of inp/out stream widths)\n", + " max_of_io = super().get_ap_int_max_w()\n", + " # decoupled mode weight stream\n", + " weightstream = self.get_weightstream_width()\n", + " # single PE weight entry\n", + " weight_bits = self.get_weight_datatype().bitwidth()\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " single_pe_w = simd * weight_bits\n", + " return max([weightstream, max_of_io, single_pe_w])\n", + "\n", + " def get_folded_input_shape(self, ind=0):\n", + " mw = self.get_nodeattr(\"MW\")\n", + " mh = self.get_nodeattr(\"MH\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " sf = mw // simd\n", + " nf = mh // pe\n", + " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", + "\n", + " if ind == 0:\n", + " # calculate shape of input 0\n", + " folded_input_shape = tuple(vecs + [sf, simd])\n", + " elif ind == 1 and self.get_nodeattr(\"mem_mode\") == \"external\":\n", + " # calculate shape of input 1 (weights)\n", + " folded_input_shape = tuple(vecs + [sf * nf, simd * pe])\n", + " else:\n", + " raise Exception(\"Undefined input shape for requested input\")\n", + "\n", + " return folded_input_shape\n", + "\n", + " def get_folded_output_shape(self, ind=0):\n", + " mh = self.get_nodeattr(\"MH\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " nf = mh // pe\n", + " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", + " folded_output_shape = tuple(vecs + [nf, pe])\n", + " return folded_output_shape\n", + "\n", + " def get_normal_input_shape(self, ind=0):\n", + " mw = self.get_nodeattr(\"MW\")\n", + " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", + " normal_input_shape = tuple(vecs + [mw])\n", + " return normal_input_shape\n", + "\n", + " def get_normal_output_shape(self, ind=0):\n", + " mh = self.get_nodeattr(\"MH\")\n", + " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", + " normal_output_shape = tuple(vecs + [mh])\n", + " return normal_output_shape\n", + "\n", + " def get_number_output_values(self):\n", + " nf = np.prod(self.get_folded_output_shape()[:-1])\n", + " return nf\n", + "\n", + " def get_template_param_values(self):\n", + " \"\"\"Returns the template parameter values according to input, output and weight\n", + " data types.\"\"\"\n", + " ret = dict()\n", + " inp_hls_str = self.get_input_datatype().get_hls_datatype_str()\n", + " out_hls_str = self.get_output_datatype().get_hls_datatype_str()\n", + " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", + " # out_is_binary = self.get_output_datatype() == DataType[\"BINARY\"]\n", + " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", + " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", + " if (inp_is_binary or wt_is_binary) and (not bin_xnor_mode):\n", + " raise Exception(\"True binary (non-bipolar) inputs not yet supported\")\n", + " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", + " # out_is_bipolar = self.get_output_datatype() == DataType[\"BIPOLAR\"]\n", + " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", + " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", + " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", + " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", + " # fill in TSrcI and TWeightI\n", + " # TODO check these with Giulio\n", + " # TODO handle non-bipolar binary inputs\n", + " if inp_is_bipolar and wt_is_bipolar:\n", + " ret[\"TSrcI\"] = \"Recast\"\n", + " ret[\"TWeightI\"] = \"Identity\"\n", + " elif (not inp_is_bipolar) and wt_is_bipolar:\n", + " ret[\"TSrcI\"] = \"Slice<%s>\" % inp_hls_str\n", + " ret[\"TWeightI\"] = \"Recast\"\n", + " elif inp_is_bipolar and (not wt_is_bipolar):\n", + " ret[\"TSrcI\"] = \"Recast\"\n", + " ret[\"TWeightI\"] = \"Identity\"\n", + " elif (not inp_is_bipolar) and (not wt_is_bipolar):\n", + " ret[\"TSrcI\"] = \"Slice<%s>\" % inp_hls_str\n", + " ret[\"TWeightI\"] = \"Identity\"\n", + "\n", + " # fill in TDstI\n", + " ret[\"TDstI\"] = \"Slice<%s>\" % out_hls_str\n", + "\n", + " return ret\n", + "\n", + " def get_hls_compatible_weight_tensor(self, orig_weight_matrix):\n", + " \"\"\"Convert the original numpy weight matrix orig_weight_matrix into\n", + " a form suitable for passing to the hlslib call:\n", + " * ensure MH % PE == 0 and MW % SIMD == 0\n", + " * for bipolar {-1,+1} weights, convert to binary {0, 1}\n", + " * interleave rows between PEs\n", + " * reshape into (1, PE, WMEM, SIMD) and return\n", + " \"\"\"\n", + " mw = self.get_nodeattr(\"MW\")\n", + " mh = self.get_nodeattr(\"MH\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " wmem = self.calc_wmem()\n", + " assert orig_weight_matrix.shape == (\n", + " mw,\n", + " mh,\n", + " ), \"\"\"Weights matrix doesn't\n", + " have expected shape (mw, mh)\"\"\"\n", + " assert mw % simd == 0, \"Requirement MH divisable by SIMD is violated.\"\n", + " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", + " # start by transposing the original weight matrix, since ONNX and\n", + " # finn-hlslib use different assumptions\n", + " # ONNX uses (in_features, out_features) and matmul(x, W)\n", + " # finn-hlslib uses (out_features, in_features) and matmul(W, x)\n", + " ret = orig_weight_matrix.T\n", + " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", + " # convert bipolar to binary\n", + " ret = (ret + 1) / 2\n", + " # interleave rows between PEs and reshape\n", + " # distribute rows between PEs\n", + " ret = interleave_matrix_outer_dim_from_partitions(ret, pe)\n", + " # create SIMD as innermost dimension and add a dummy outer dim\n", + " ret = ret.reshape(1, pe, wmem, simd)\n", + " # reverse the SIMD dimension\n", + " ret = np.flip(ret, axis=-1)\n", + " return ret\n", + "\n", + " def minimize_accumulator_width(self, model):\n", + " weights = model.get_initializer(self.onnx_node.input[1])\n", + " # since in the calculation the values of the weight matrix are used,\n", + " # for the bipolar case they need to be converted to bipolar\n", + " if self.get_nodeattr(\"binaryXnorMode\"):\n", + " weights = 2 * weights - 1\n", + " if len(self.onnx_node.input) > 2:\n", + " thresholds = model.get_initializer(self.onnx_node.input[2])\n", + " else:\n", + " thresholds = None\n", + " idt = self.get_input_datatype()\n", + " # calculate minimum and maximum values of accumulator\n", + " (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt)\n", + " if thresholds is not None:\n", + " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", + " # set threshold datatype (and accumulator datatype implicitly)\n", + " min_threshold = thresholds.min()\n", + " max_threshold = thresholds.max()\n", + " # clip threshold values\n", + " clip_upper = None\n", + " clip_lower = None\n", + " if max_threshold > acc_max + 1:\n", + " clip_upper = acc_max + 1\n", + " if min_threshold < acc_min:\n", + " clip_lower = acc_min\n", + " if (clip_lower is not None) or (clip_upper is not None):\n", + " warnings.warn(\"Clipping some thresholds in %s\" % self.onnx_node.name)\n", + " thresholds = np.clip(thresholds, clip_lower, clip_upper)\n", + " model.set_initializer(self.onnx_node.input[2], thresholds)\n", + " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", + " min_threshold = thresholds.min()\n", + " max_threshold = thresholds.max()\n", + " # get range required by threshold values\n", + " tdt_min = min(acc_min, min_threshold)\n", + " tdt_max = max(acc_max, max_threshold)\n", + " if tdt_min < 0:\n", + " if abs(tdt_min) > tdt_max:\n", + " tdt = DataType.get_smallest_possible(tdt_min)\n", + " else:\n", + " tdt = DataType.get_smallest_possible(-tdt_max - 1)\n", + " else:\n", + " tdt = DataType.get_smallest_possible(tdt_max)\n", + " assert np.vectorize(tdt.allowed)(\n", + " threshold_tensor\n", + " ).all(), \"Thresholds in %s can't be expressed with type %s\" % (\n", + " self.onnx_node.name,\n", + " str(tdt),\n", + " )\n", + " self.set_nodeattr(\"accDataType\", tdt.name)\n", + " else:\n", + " if acc_min < 0:\n", + " if abs(acc_min) > acc_max:\n", + " adt = DataType.get_smallest_possible(acc_min)\n", + " else:\n", + " adt = DataType.get_smallest_possible(-acc_max - 1)\n", + " else:\n", + " adt = DataType.get_smallest_possible(acc_max)\n", + " # ensure a datatype divisible by 8-bits in case this is the last node\n", + " bw = roundup_to_integer_multiple(adt.bitwidth(), 8)\n", + " new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw))\n", + " adt = DataType[new_adt_name]\n", + " self.set_nodeattr(\"accDataType\", adt.name)\n", + " # for no-activation nodes, output dt = acc dt\n", + " self.set_nodeattr(\"outputDataType\", adt.name)\n", + " return DataType[self.get_nodeattr(\"accDataType\")]\n", + "\n", + " def get_hls_compatible_threshold_tensor(self, orig_thres_matrix):\n", + " \"\"\"Convert the original numpy weight matrix orig_weight_matrix into\n", + " a form suitable for passing to the hlslib call:\n", + " * ensure MH % PE == 0\n", + " * for bipolar weights&inputs, ensure thresholds are positive\n", + " * interleave rows between PEs\n", + " * reshape into (PE, TMEM, n_thres_steps) and return\n", + " \"\"\"\n", + " mh = self.get_nodeattr(\"MH\")\n", + " pe = self.get_nodeattr(\"PE\")\n", + " tmem = mh // pe\n", + " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", + " assert (\n", + " orig_thres_matrix.ndim == 2\n", + " ), \"\"\"Threshold matrix dimension is\n", + " not as expected (2).\"\"\"\n", + " n_thres_steps = orig_thres_matrix.shape[1]\n", + " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", + " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", + " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", + " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", + " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", + " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", + " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", + " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", + " if inp_is_bipolar and wt_is_bipolar:\n", + " # ensure all thresholds are nonnegative\n", + " assert (orig_thres_matrix >= 0).all()\n", + " # ensure all thresholds are integer\n", + " assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all()\n", + " ret = orig_thres_matrix\n", + " # workaround for vivado_hls threshold bug\n", + " if ret[0][0] == 0 and n_thres_steps == 1:\n", + " ret = np.copy(ret)\n", + " ret[0][0] = 1\n", + " warnings.warn(\n", + " \"Setting 0-valued first threshold to 1 to avoid vivado_hls bug\"\n", + " )\n", + " # ensure channels = mh , duplicating if necessary\n", + " if ret.shape[0] == 1:\n", + " ret = np.tile(ret, (mh, 1))\n", + " assert (\n", + " ret.shape[0] == mh\n", + " ), \"Channels of threshold matrix are not as expected (mh)\"\n", + " # distribute rows between PEs\n", + " ret = interleave_matrix_outer_dim_from_partitions(ret, pe)\n", + " assert (\n", + " ret.shape[0] == pe\n", + " ), \"\"\"First dimension after distribution of the\n", + " rows between PEs is not as expected (pe)\"\"\"\n", + " assert (\n", + " ret.shape[1] == tmem\n", + " ), \"\"\"Second dimension after distribution of the\n", + " rows between PEs is not as expected (tmem)\"\"\"\n", + " assert (\n", + " ret.shape[2] == n_thres_steps\n", + " ), \"\"\"Third dimension after distribution of the\n", + " rows between PEs is not as expected (n_thres_steps)\"\"\"\n", + " return ret.reshape(1, pe, tmem, n_thres_steps)\n", + "\n", + " def make_weight_file(self, weights, weight_file_mode, weight_file_name):\n", + " \"\"\"Produce a file containing given weights in appropriate format for this\n", + " layer. This file can be used for either synthesis or run-time reconfig\n", + " of weights.\n", + "\n", + " Arguments:\n", + "\n", + " * weights : numpy array with weights to be put into the file\n", + " * weight_file_mode : one of {hls_header, decoupled_verilog_dat,\n", + " decoupled_runtime}\n", + " * weight_file_name : filename for the weight file to be generated\n", + "\n", + " \"\"\"\n", + " # convert weights into hlslib-compatible format\n", + " weight_tensor = self.get_hls_compatible_weight_tensor(weights)\n", + " export_wdt = self.get_weight_datatype()\n", + " # we have converted bipolar weights to binary for export,\n", + " # so use it as such for weight generation\n", + " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", + " export_wdt = DataType[\"BINARY\"]\n", + " if weight_file_mode == \"hls_header\":\n", + " weight_hls_code = numpy_to_hls_code(\n", + " weight_tensor, export_wdt, \"weights\", True, True\n", + " )\n", + " # write weights into C++ header file as dictated by finn-hlslib\n", + " f_weights = open(weight_file_name, \"w\")\n", + " if export_wdt.bitwidth() != 1:\n", + " f_weights.write(\n", + " \"const FixedPointWeights<{},{},{},{}> weights = \".format(\n", + " self.get_nodeattr(\"SIMD\"),\n", + " export_wdt.get_hls_datatype_str(),\n", + " self.get_nodeattr(\"PE\"),\n", + " self.calc_wmem(),\n", + " )\n", + " )\n", + " else:\n", + " f_weights.write(\n", + " \"const BinaryWeights<{},{},{}> weights = \".format(\n", + " self.get_nodeattr(\"SIMD\"),\n", + " self.get_nodeattr(\"PE\"),\n", + " self.calc_wmem(),\n", + " )\n", + " )\n", + " f_weights.write(weight_hls_code)\n", + " f_weights.close()\n", + " elif \"decoupled\" in weight_file_mode:\n", + " # create a weight stream for various flavors of decoupled mode:\n", + " # transpose weight tensor from (1, PE, WMEM, SIMD) to (1, WMEM, PE, SIMD)\n", + " weight_tensor_unflipped = np.transpose(weight_tensor, (0, 2, 1, 3))\n", + " # reverse SIMD flip for saving weights in .npy\n", + " weight_tensor_simd_flipped = np.flip(weight_tensor_unflipped, axis=-1)\n", + " # PE flip for saving weights in .dat\n", + " weight_tensor_pe_flipped = np.flip(weight_tensor_unflipped, axis=-2)\n", + " # reshape weight tensor (simd_flipped and pe_flipped) to desired shape\n", + " pe = self.get_nodeattr(\"PE\")\n", + " simd = self.get_nodeattr(\"SIMD\")\n", + " # simd_flipped\n", + " weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape(\n", + " 1, -1, pe * simd\n", + " )\n", + " weight_tensor_simd_flipped = weight_tensor_simd_flipped.copy()\n", + " # flipped\n", + " weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape(\n", + " 1, -1, pe * simd\n", + " )\n", + " weight_tensor_pe_flipped = weight_tensor_pe_flipped.copy()\n", + " if weight_file_mode == \"decoupled_npy\":\n", + " # save weight stream into npy for cppsim\n", + " np.save(weight_file_name, weight_tensor_simd_flipped)\n", + " elif weight_file_mode == \"decoupled_verilog_dat\":\n", + " # convert weight values into hexstring\n", + " weight_width = self.get_weightstream_width()\n", + " # pad to nearest 4 bits to get hex strings\n", + " weight_width_padded = roundup_to_integer_multiple(weight_width, 4)\n", + " weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string(\n", + " weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix=\"\"\n", + " )\n", + " # add zeroes to pad out file to 1024 entries\n", + " weight_stream = weight_tensor_pe_flipped.flatten()\n", + " weight_stream = weight_stream.copy()\n", + " with open(weight_file_name, \"w\") as f:\n", + " for val in weight_stream:\n", + " f.write(val + \"\\n\")\n", + " elif weight_file_mode == \"decoupled_runtime\":\n", + " # memstream axi-lite interface will map each mem line to\n", + " # one or multiple 32-bit words\n", + " weight_width = self.get_weightstream_width()\n", + " words_per_memwidth = 2 ** math.ceil(math.log2(weight_width / 32))\n", + " if words_per_memwidth < 1:\n", + " words_per_memwidth = 1\n", + " weight_width_padded = words_per_memwidth * 32\n", + " # first, pack and ensure padding to 32 bits\n", + " weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string(\n", + " weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix=\"\"\n", + " )\n", + " weight_stream = weight_tensor_pe_flipped.flatten()\n", + " weight_stream = weight_stream.copy()\n", + " with open(weight_file_name, \"w\") as f:\n", + " for val in weight_stream:\n", + " # split into groups of 8 hex digits (= 32 bits)\n", + " words_32b = textwrap.wrap(val, 8)\n", + " words_32b.reverse()\n", + " for word_32b in words_32b:\n", + " f.write(word_32b + \"\\n\")\n", + " else:\n", + " raise Exception(\"Unknown weight_file_mode\")\n", + "\n", + " else:\n", + " raise Exception(\"Unknown weight_file_mode\")\n", + "\n", + " def generate_params(self, model, path):\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " code_gen_dir = path\n", + " # weights, if not external\n", + " weights = model.get_initializer(self.onnx_node.input[1])\n", + " if mem_mode == \"const\":\n", + " # save hlslib-compatible weights in params.h\n", + " weight_filename = \"{}/params.h\".format(code_gen_dir)\n", + " self.make_weight_file(weights, \"hls_header\", weight_filename)\n", + " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " weight_filename_sim = \"{}/weights.npy\".format(code_gen_dir)\n", + " # save decoupled weights for cppsim\n", + " self.make_weight_file(weights, \"decoupled_npy\", weight_filename_sim)\n", + " if mem_mode == \"decoupled\":\n", + " # also save weights as Verilog .dat file\n", + " # note that we provide two different .dat files, one for synth\n", + " # and one for synthesis. this is because URAM-based weights always\n", + " # need zero weights for synthesis, otherwise they get inferred\n", + " # as BRAM\n", + " weight_filename_rtl_synth = \"{}/memblock_synth_0.dat\".format(\n", + " code_gen_dir\n", + " )\n", + " weight_filename_rtl_sim = \"{}/memblock_sim_0.dat\".format(code_gen_dir)\n", + " # sim weights are always the true weights\n", + " self.make_weight_file(\n", + " weights, \"decoupled_verilog_dat\", weight_filename_rtl_sim\n", + " )\n", + " ram_style = self.get_nodeattr(\"ram_style\")\n", + " if ram_style == \"ultra\":\n", + " # UltraRAM must have no memory initializer, or only zeroes\n", + " # otherwise BRAM will be inferred instead of URAM\n", + " # as a workaround we provide a zero-weight init here\n", + " synth_weights = np.zeros_like(weights, dtype=np.float32)\n", + " else:\n", + " synth_weights = weights\n", + " self.make_weight_file(\n", + " synth_weights, \"decoupled_verilog_dat\", weight_filename_rtl_synth\n", + " )\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", + " currently no other parameter value is supported!\"\"\"\n", + " )\n", + "\n", + " # save thresholds in thresh.h\n", + " if len(self.onnx_node.input) > 2:\n", + " thresholds = model.get_initializer(self.onnx_node.input[2])\n", + " if thresholds is not None:\n", + " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", + " # use UINT32 threshold export for bipolar times bipolar\n", + " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", + " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", + " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", + " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", + " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", + " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", + " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", + " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", + " # get computed threshold datatype from attribute\n", + " tdt = DataType[self.get_nodeattr(\"accDataType\")]\n", + "\n", + " assert np.vectorize(tdt.allowed)(\n", + " threshold_tensor\n", + " ).all(), \"Thresholds in %s can't be expressed with type %s\" % (\n", + " self.onnx_node.name,\n", + " str(tdt),\n", + " )\n", + " thresholds_hls_code = numpy_to_hls_code(\n", + " threshold_tensor, tdt, \"thresholds\", False, True\n", + " )\n", + " # write thresholds into thresh.h\n", + " f_thresh = open(\"{}/thresh.h\".format(code_gen_dir), \"w\")\n", + " tdt_hls = tdt.get_hls_datatype_str()\n", + " # use binary to export bipolar activations\n", + " export_odt = self.get_output_datatype()\n", + " if self.get_output_datatype() == DataType[\"BIPOLAR\"]:\n", + " export_odt = DataType[\"BINARY\"]\n", + " odt_hls = export_odt.get_hls_datatype_str()\n", + " f_thresh.write(\n", + " \"static ThresholdsActivation<{},{},{},{},{},{},{}> threshs \\\n", + " = \".format(\n", + " self.calc_tmem(),\n", + " self.get_nodeattr(\"PE\"),\n", + " threshold_tensor.shape[-1],\n", + " tdt_hls,\n", + " odt_hls,\n", + " self.get_nodeattr(\"ActVal\"),\n", + " \"comp::less_equal<%s, %s>\" % (tdt_hls, tdt_hls),\n", + " )\n", + " )\n", + " f_thresh.write(thresholds_hls_code)\n", + " f_thresh.close()\n", + "\n", + " def execute_node(self, context, graph):\n", + " mode = self.get_nodeattr(\"exec_mode\")\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " node = self.onnx_node\n", + "\n", + " # TODO ensure codegen dir exists\n", + " if mode == \"cppsim\":\n", + " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", + " elif mode == \"rtlsim\":\n", + " code_gen_dir = self.get_nodeattr(\"code_gen_dir_ipgen\")\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Invalid value for attribute exec_mode! Is currently set to: {}\n", + " has to be set to one of the following value (\"cppsim\", \"rtlsim\")\"\"\".format(\n", + " mode\n", + " )\n", + " )\n", + "\n", + " # create a npy file fore each input of the node (in_ind is input index)\n", + " in_ind = 0\n", + " for inputs in node.input:\n", + " # it is assumed that the first input of the node is the data input\n", + " # the second input are the weights\n", + " # the third input are the thresholds\n", + " if in_ind == 0:\n", + " assert (\n", + " str(context[inputs].dtype) == \"float32\"\n", + " ), \"\"\"Input datatype is\n", + " not float32 as expected.\"\"\"\n", + " expected_inp_shape = self.get_folded_input_shape()\n", + " reshaped_input = context[inputs].reshape(expected_inp_shape)\n", + " if self.get_input_datatype() == DataType[\"BIPOLAR\"]:\n", + " # store bipolar activations as binary\n", + " reshaped_input = (reshaped_input + 1) / 2\n", + " export_idt = DataType[\"BINARY\"]\n", + " else:\n", + " export_idt = self.get_input_datatype()\n", + " # make copy before saving the array\n", + " reshaped_input = reshaped_input.copy()\n", + " np.save(\n", + " os.path.join(code_gen_dir, \"input_{}.npy\".format(in_ind)),\n", + " reshaped_input,\n", + " )\n", + " elif in_ind > 2:\n", + " raise Exception(\"Unexpected input found for MatrixVectorActivation\")\n", + " in_ind += 1\n", + "\n", + " if mode == \"cppsim\":\n", + " # execute the precompiled model\n", + " super().exec_precompiled_singlenode_model()\n", + " # load output npy file\n", + " super().npy_to_dynamic_output(context)\n", + " # reinterpret binary output as bipolar where needed\n", + " if self.get_output_datatype() == DataType[\"BIPOLAR\"]:\n", + " out = context[node.output[0]]\n", + " out = 2 * out - 1\n", + " context[node.output[0]] = out\n", + " assert (\n", + " context[node.output[0]].shape == self.get_normal_output_shape()\n", + " ), \"cppsim did not produce expected output shape\"\n", + " elif mode == \"rtlsim\":\n", + " sim = self.get_rtlsim()\n", + " nbits = self.get_instream_width()\n", + " inp = npy_to_rtlsim_input(\n", + " \"{}/input_0.npy\".format(code_gen_dir), export_idt, nbits\n", + " )\n", + " super().reset_rtlsim(sim)\n", + " super().toggle_clk(sim)\n", + " if mem_mode == \"external\" or mem_mode == \"decoupled\":\n", + " wnbits = self.get_weightstream_width()\n", + " export_wdt = self.get_weight_datatype()\n", + " # we have converted bipolar weights to binary for export,\n", + " # so use it as such for weight generation\n", + " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", + " export_wdt = DataType[\"BINARY\"]\n", + " wei = npy_to_rtlsim_input(\n", + " \"{}/weights.npy\".format(code_gen_dir), export_wdt, wnbits\n", + " )\n", + " num_w_reps = np.prod(self.get_nodeattr(\"numInputVectors\"))\n", + " io_dict = {\n", + " \"inputs\": {\"in0\": inp, \"weights\": wei * num_w_reps},\n", + " \"outputs\": {\"out\": []},\n", + " }\n", + " self.rtlsim_multi_io(sim, io_dict)\n", + " output = io_dict[\"outputs\"][\"out\"]\n", + " else:\n", + " output = self.rtlsim(sim, inp)\n", + " odt = self.get_output_datatype()\n", + " target_bits = odt.bitwidth()\n", + " packed_bits = self.get_outstream_width()\n", + " out_npy_path = \"{}/output.npy\".format(code_gen_dir)\n", + " out_shape = self.get_folded_output_shape()\n", + " rtlsim_output_to_npy(\n", + " output, out_npy_path, odt, out_shape, packed_bits, target_bits\n", + " )\n", + "\n", + " # load and reshape output\n", + " output = np.load(out_npy_path)\n", + " oshape = self.get_normal_output_shape()\n", + " output = np.asarray([output], dtype=np.float32).reshape(*oshape)\n", + " context[node.output[0]] = output\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Invalid value for attribute exec_mode! Is currently set to: {}\n", + " has to be set to one of the following value (\"cppsim\", \"rtlsim\")\"\"\".format(\n", + " mode\n", + " )\n", + " )\n", + "\n", + " def global_includes(self):\n", + " self.code_gen_dict[\"$GLOBALS$\"] = ['#include \"weights.hpp\"']\n", + " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"activations.hpp\"']\n", + "\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " if mem_mode not in [\"const\", \"decoupled\", \"external\"]:\n", + " raise Exception(\n", + " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", + " currently no other parameter value is supported!\"\"\"\n", + " )\n", + " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"mvau.hpp\"']\n", + " if self.calc_tmem() != 0:\n", + " # TODO find a better way of checking for no pregenerated thresholds\n", + " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"thresh.h\"']\n", + "\n", + " def defines(self, var):\n", + " # Only ipgen mode: Make sure that SIMD parameter satisfies minimum requirements.\n", + " if var == \"ipgen\":\n", + " SIMD = self.get_nodeattr(\"SIMD\")\n", + " MW = self.get_nodeattr(\"MW\")\n", + " condition = SIMD >= (MW / 1024)\n", + " msg = (\n", + " f\"HLS synthesis of MatrixVectorActivation requires: \"\n", + " f\"SIMD >= MW / 1024. This is not fulfilled with: SIMD={SIMD} \"\n", + " f\"and MW={MW} for node: {self.onnx_node.name}.\"\n", + " )\n", + " assert condition, msg\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " numInputVectors = list(self.get_nodeattr(\"numInputVectors\"))\n", + " numReps = np.prod(numInputVectors)\n", + " self.code_gen_dict[\"$DEFINES$\"] = [\n", + " \"\"\"#define MW1 {}\\n #define MH1 {}\\n\n", + " #define SIMD1 {}\\n #define PE1 {}\\n #define WMEM1 {}\\n\n", + " #define TMEM1 {}\\n #define numReps {}\"\"\".format(\n", + " self.get_nodeattr(\"MW\"),\n", + " self.get_nodeattr(\"MH\"),\n", + " self.get_nodeattr(\"SIMD\"),\n", + " self.get_nodeattr(\"PE\"),\n", + " self.calc_wmem(),\n", + " self.calc_tmem(),\n", + " numReps,\n", + " )\n", + " ]\n", + " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " wdt = self.get_weight_datatype()\n", + " self.code_gen_dict[\"$DEFINES$\"].append(\n", + " \"#define WP1 {}\\n\".format(wdt.bitwidth())\n", + " )\n", + "\n", + " def read_npy_data(self):\n", + " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", + " dtype = self.get_input_datatype()\n", + " if dtype == DataType[\"BIPOLAR\"]:\n", + " # use binary for bipolar storage\n", + " dtype = DataType[\"BINARY\"]\n", + " elem_bits = dtype.bitwidth()\n", + " packed_bits = self.get_instream_width()\n", + " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", + " elem_hls_type = dtype.get_hls_datatype_str()\n", + " npy_type = \"float\"\n", + " npy_in = \"%s/input_0.npy\" % code_gen_dir\n", + " self.code_gen_dict[\"$READNPYDATA$\"] = []\n", + " # note: the innermost dim is reversed for the input\n", + " self.code_gen_dict[\"$READNPYDATA$\"].append(\n", + " 'npy2apintstream<%s, %s, %d, %s>(\"%s\", in0, false);'\n", + " % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in)\n", + " )\n", + "\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " wdt = self.get_weight_datatype()\n", + " elem_bits = wdt.bitwidth()\n", + " packed_bits = self.get_weightstream_width()\n", + " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", + " elem_hls_type = wdt.get_hls_datatype_str()\n", + " npy_type = \"float\"\n", + " npy_in = \"%s/weights.npy\" % code_gen_dir\n", + "\n", + " self.code_gen_dict[\"$READNPYDATA$\"].append(\n", + " 'npy2apintstream<%s, %s, %d, %s>(\"%s\", weights, false, numReps);'\n", + " % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in)\n", + " )\n", + "\n", + " def strm_decl(self):\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " self.code_gen_dict[\"$STREAMDECLARATIONS$\"] = []\n", + " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", + " 'hls::stream> in0 (\"in0\");'.format(self.get_instream_width())\n", + " )\n", + " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", + " 'hls::stream> out (\"out\");'.format(self.get_outstream_width())\n", + " )\n", + "\n", + " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", + " 'hls::stream> weights (\"weights\");'.format(\n", + " self.get_weightstream_width()\n", + " )\n", + " )\n", + "\n", + " def docompute(self):\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " map_to_hls_mult_style = {\n", + " \"auto\": \"ap_resource_dflt()\",\n", + " \"lut\": \"ap_resource_lut()\",\n", + " \"dsp\": \"ap_resource_dsp()\",\n", + " }\n", + " tmpl_args = self.get_template_param_values()\n", + " if self.calc_tmem() == 0:\n", + " odtype_hls_str = self.get_output_datatype().get_hls_datatype_str()\n", + " threshs = \"PassThroughActivation<%s>()\" % odtype_hls_str\n", + " else:\n", + " threshs = \"threshs\"\n", + " if mem_mode == \"const\":\n", + " self.code_gen_dict[\"$DOCOMPUTE$\"] = [\n", + " \"\"\"Matrix_Vector_Activate_Batch\n", + " (in0, out, weights, {}, numReps, {});\"\"\".format(\n", + " tmpl_args[\"TSrcI\"],\n", + " tmpl_args[\"TDstI\"],\n", + " tmpl_args[\"TWeightI\"],\n", + " threshs,\n", + " map_to_hls_mult_style[self.get_nodeattr(\"resType\")],\n", + " )\n", + " ]\n", + " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " wdt = self.get_weight_datatype()\n", + " if wdt == DataType[\"BIPOLAR\"]:\n", + " export_wdt = DataType[\"BINARY\"]\n", + " else:\n", + " export_wdt = wdt\n", + " wdtype_hls_str = export_wdt.get_hls_datatype_str()\n", + " self.code_gen_dict[\"$DOCOMPUTE$\"] = [\n", + " \"\"\"Matrix_Vector_Activate_Stream_Batch\n", + " (in0, out, weights, {}, numReps, {});\"\"\".format(\n", + " tmpl_args[\"TSrcI\"],\n", + " tmpl_args[\"TDstI\"],\n", + " tmpl_args[\"TWeightI\"],\n", + " wdtype_hls_str,\n", + " threshs,\n", + " map_to_hls_mult_style[self.get_nodeattr(\"resType\")],\n", + " )\n", + " ]\n", + "\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", + " currently no other parameter value is supported!\"\"\"\n", + " )\n", + "\n", + " def dataoutstrm(self):\n", + " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", + " dtype = self.get_output_datatype()\n", + " if dtype == DataType[\"BIPOLAR\"]:\n", + " # use binary for bipolar storage\n", + " dtype = DataType[\"BINARY\"]\n", + " elem_bits = dtype.bitwidth()\n", + " packed_bits = self.get_outstream_width()\n", + " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", + " elem_hls_type = dtype.get_hls_datatype_str()\n", + " npy_type = \"float\"\n", + " npy_out = \"%s/output.npy\" % code_gen_dir\n", + " shape = self.get_folded_output_shape()\n", + " shape_cpp_str = str(shape).replace(\"(\", \"{\").replace(\")\", \"}\")\n", + "\n", + " # note: the innermost dim is not reversed for the output\n", + " self.code_gen_dict[\"$DATAOUTSTREAM$\"] = [\n", + " 'apintstream2npy<%s, %s, %d, %s>(out, %s, \"%s\", false);'\n", + " % (\n", + " packed_hls_type,\n", + " elem_hls_type,\n", + " elem_bits,\n", + " npy_type,\n", + " shape_cpp_str,\n", + " npy_out,\n", + " )\n", + " ]\n", + "\n", + " def save_as_npy(self):\n", + " self.code_gen_dict[\"$SAVEASCNPY$\"] = []\n", + "\n", + " def blackboxfunction(self):\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " if mem_mode == \"const\":\n", + " self.code_gen_dict[\"$BLACKBOXFUNCTION$\"] = [\n", + " \"\"\"void {}(hls::stream> &in0,\n", + " hls::stream> &out\n", + " )\"\"\".format(\n", + " self.onnx_node.name,\n", + " self.get_instream_width(),\n", + " self.get_outstream_width(),\n", + " )\n", + " ]\n", + " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " self.code_gen_dict[\"$BLACKBOXFUNCTION$\"] = [\n", + " \"\"\"void {}(\n", + " hls::stream> &in0,\n", + " hls::stream> &weights,\n", + " hls::stream> &out\n", + " )\"\"\".format(\n", + " self.onnx_node.name,\n", + " self.get_instream_width(),\n", + " self.get_weightstream_width(),\n", + " self.get_outstream_width(),\n", + " )\n", + " ]\n", + "\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Please set mem_mode to \"const\" or \"decoupled\", currently no other\n", + " parameter value is supported!\"\"\"\n", + " )\n", + "\n", + " def pragmas(self):\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " ram_style_thresholds = self.get_nodeattr(\"ram_style_thresholds\")\n", + " self.code_gen_dict[\"$PRAGMAS$\"] = [\n", + " \"#pragma HLS INTERFACE axis port=in0 name=in0_\" + self.hls_sname()\n", + " ]\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " \"#pragma HLS INTERFACE axis port=out name=out_\" + self.hls_sname()\n", + " )\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " \"#pragma HLS INTERFACE ap_ctrl_none port=return\"\n", + " )\n", + "\n", + " if mem_mode == \"const\":\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append('#include \"params.h\"')\n", + " # the weight tensor is ap_uint [PE][WMEM]\n", + " # partition for parallel access along the PE dimension (dim 1)\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " (\n", + " \"#pragma HLS ARRAY_PARTITION variable=weights.m_weights \"\n", + " \"complete dim=1\"\n", + " )\n", + " )\n", + " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " \"#pragma HLS INTERFACE axis port=weights name=weights_\"\n", + " + self.hls_sname()\n", + " )\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " \"#pragma HLS stream depth=8 variable=weights\"\n", + " )\n", + "\n", + " else:\n", + " raise Exception(\n", + " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or external,\n", + " currently no other parameter value is supported!\"\"\"\n", + " )\n", + "\n", + " # the threshold tensor is acc_type [PE][TMEM][N_THRES]\n", + " # partition for parallel access along PE and N_THRES\n", + " # dimensions (dims 1 and 3)\n", + " if self.calc_tmem() != 0:\n", + " # TODO find a better way of checking for no pregenerated thresholds\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " (\n", + " \"#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds \"\n", + " \"complete dim=1\"\n", + " )\n", + " )\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " (\n", + " \"#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds \"\n", + " \"complete dim=3\"\n", + " )\n", + " )\n", + " # add resource pragma for thresholds if set\n", + " if ram_style_thresholds == \"distributed\":\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " (\n", + " \"#pragma HLS RESOURCE variable=threshs.m_thresholds \"\n", + " \"core=ROM_2P_LUTRAM\"\n", + " )\n", + " )\n", + " elif ram_style_thresholds == \"block\":\n", + " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", + " (\n", + " \"#pragma HLS RESOURCE variable=threshs.m_thresholds \"\n", + " \"core=ROM_2P_BRAM\"\n", + " )\n", + " )\n", + " elif ram_style_thresholds == \"auto\":\n", + " # no pragma needed\n", + " pass\n", + " else:\n", + " raise Exception(\n", + " \"Unrecognized ram_style_thresholds value:\" + ram_style_thresholds\n", + " )\n", + "\n", + " def code_generation_ipi(self):\n", + " cmd = []\n", + " # add streamer if needed\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " if mem_mode == \"decoupled\":\n", + " runtime_writable = self.get_nodeattr(\"runtime_writeable_weights\") == 1\n", + " if self.get_nodeattr(\"ram_style\") == \"ultra\":\n", + " assert (\n", + " runtime_writable == 1\n", + " ), \"Layer with URAM weights must have runtime_writeable_weights=1\"\n", + " node_name = self.onnx_node.name\n", + " sname = self.hls_sname()\n", + " # create a hierarchy for this layer, with the same port names\n", + " clk_name = self.get_verilog_top_module_intf_names()[\"clk\"][0]\n", + " rst_name = self.get_verilog_top_module_intf_names()[\"rst\"][0]\n", + " dout_name = self.get_verilog_top_module_intf_names()[\"m_axis\"][0][0]\n", + " din_name = self.get_verilog_top_module_intf_names()[\"s_axis\"][0][0]\n", + " cmd.append(\"create_bd_cell -type hier %s\" % node_name)\n", + " cmd.append(\"create_bd_pin -dir I -type clk /%s/%s\" % (node_name, clk_name))\n", + " cmd.append(\"create_bd_pin -dir I -type rst /%s/%s\" % (node_name, rst_name))\n", + " cmd.append(\n", + " \"create_bd_intf_pin -mode Master \"\n", + " \"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s\"\n", + " % (node_name, dout_name)\n", + " )\n", + " cmd.append(\n", + " \"create_bd_intf_pin -mode Slave \"\n", + " \"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s\" % (node_name, din_name)\n", + " )\n", + " # instantiate the hls ip\n", + " cmd.append(\n", + " \"create_bd_cell -type ip -vlnv %s /%s/%s\"\n", + " % (self.get_nodeattr(\"ip_vlnv\"), node_name, node_name)\n", + " )\n", + " # instantiate a streamer and connect it to the HLS IP\n", + " strm_vlnv = \"xilinx.com:user:memstream:1.0\"\n", + " strm_inst = node_name + \"_wstrm\"\n", + " cmd.append(\n", + " \"create_bd_cell -type ip -vlnv %s /%s/%s\"\n", + " % (strm_vlnv, node_name, strm_inst)\n", + " )\n", + " cmd.append(\n", + " \"set_property -dict [list \"\n", + " \"CONFIG.NSTREAMS {1} \"\n", + " \"CONFIG.MEM_DEPTH {%d} \"\n", + " \"CONFIG.MEM_WIDTH {%d} \"\n", + " \"CONFIG.MEM_INIT {%s} \"\n", + " \"CONFIG.RAM_STYLE {%s} \"\n", + " \"CONFIG.STRM0_DEPTH {%d} \"\n", + " \"CONFIG.STRM0_WIDTH {%d} \"\n", + " \"CONFIG.STRM0_OFFSET {0} \"\n", + " \"] [get_bd_cells /%s/%s]\"\n", + " % (\n", + " self.calc_wmem(),\n", + " self.get_weightstream_width_padded(),\n", + " self.get_nodeattr(\"code_gen_dir_ipgen\") + \"/\",\n", + " self.get_nodeattr(\"ram_style\"),\n", + " self.calc_wmem(),\n", + " self.get_weightstream_width_padded(),\n", + " node_name,\n", + " strm_inst,\n", + " )\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_intf_net [get_bd_intf_pins %s/%s/m_axis_0] \"\n", + " \"[get_bd_intf_pins %s/%s/weights_%s]\"\n", + " % (node_name, strm_inst, node_name, node_name, sname)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]\"\n", + " % (node_name, rst_name, node_name, strm_inst)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]\"\n", + " % (node_name, clk_name, node_name, strm_inst)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]\"\n", + " % (node_name, rst_name, node_name, node_name, rst_name)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]\"\n", + " % (node_name, clk_name, node_name, node_name, clk_name)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", + " \"[get_bd_intf_pins %s/%s/%s]\"\n", + " % (node_name, din_name, node_name, node_name, din_name)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", + " \"[get_bd_intf_pins %s/%s/%s]\"\n", + " % (node_name, dout_name, node_name, node_name, dout_name)\n", + " )\n", + " if runtime_writable:\n", + " # expose axi lite interface for writeable weights\n", + " axilite_name = self.get_verilog_top_module_intf_names()[\"axilite\"][0]\n", + " cmd.append(\n", + " \"create_bd_intf_pin -mode Slave \"\n", + " \"-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s\"\n", + " % (node_name, axilite_name)\n", + " )\n", + " cmd.append(\n", + " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", + " \"[get_bd_intf_pins %s/%s/%s]\"\n", + " % (node_name, axilite_name, node_name, strm_inst, axilite_name)\n", + " )\n", + " # TODO calculate and pass in segment size here\n", + " cmd.append(\"assign_bd_address\")\n", + " cmd.append(\"save_bd_design\")\n", + " elif mem_mode == \"const\" or mem_mode == \"external\":\n", + " # base class impl sufficient for const/external modes\n", + " return super().code_generation_ipi()\n", + " else:\n", + " raise Exception(\"Unrecognized mem_mode for MatrixVectorActivation\")\n", + " return cmd\n", + "\n", + " def get_verilog_top_module_intf_names(self):\n", + " intf_names = super().get_verilog_top_module_intf_names()\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " sname = self.hls_sname()\n", + " if mem_mode == \"external\":\n", + " intf_names[\"s_axis\"].append(\n", + " (\"weights_\" + sname, self.get_weightstream_width_padded())\n", + " )\n", + " if mem_mode == \"decoupled\":\n", + " # only expose axilite interface if attribute is set\n", + " runtime_writable = self.get_nodeattr(\"runtime_writeable_weights\") == 1\n", + " if runtime_writable:\n", + " intf_names[\"axilite\"] = [\"s_axilite\"]\n", + " return intf_names\n", + "\n", + " def get_op_and_param_counts(self):\n", + " in_features = self.get_nodeattr(\"MW\")\n", + " out_features = self.get_nodeattr(\"MH\")\n", + " weight_bits = self.get_weight_datatype().bitwidth()\n", + " inp_bits = self.get_input_datatype().bitwidth()\n", + " num_inp_vec = self.get_nodeattr(\"numInputVectors\")\n", + " num_repetitions = int(np.prod(num_inp_vec))\n", + " mac_count = in_features * out_features * num_repetitions\n", + " # cannonicalize op type: highest bitwidth operand first s.t.\n", + " # e.g. mac_8bx4b and mac_4bx8b don't appear as two different op types\n", + " bw1 = min(inp_bits, weight_bits)\n", + " bw2 = max(inp_bits, weight_bits)\n", + " mac_op_type = \"op_mac_%dbx%db\" % (bw1, bw2)\n", + " weight_param_type = \"param_weight_%db\" % (weight_bits)\n", + " weight_count = in_features * out_features\n", + " ret_dict = {mac_op_type: mac_count, weight_param_type: weight_count}\n", + " if self.get_nodeattr(\"noActivation\") == 0:\n", + " tdt = DataType[self.get_nodeattr(\"accDataType\")]\n", + " thres_bits = tdt.bitwidth()\n", + " thres_param_type = \"param_threshold_%db\" % (thres_bits)\n", + " thres_count = out_features\n", + " ret_dict[thres_param_type] = thres_count\n", + " return ret_dict\n", + "\n", + " def derive_characteristic_fxns(self, period):\n", + " n_inps = np.prod(self.get_folded_input_shape()[:-1])\n", + " io_dict = {\n", + " \"inputs\": {\n", + " \"in0\": [0 for i in range(n_inps)],\n", + " },\n", + " \"outputs\": {\"out\": []},\n", + " }\n", + " mem_mode = self.get_nodeattr(\"mem_mode\")\n", + " if mem_mode in [\"decoupled\", \"external\"]:\n", + " n_weight_inps = self.calc_wmem()\n", + " num_w_reps = np.prod(self.get_nodeattr(\"numInputVectors\"))\n", + " io_dict[\"inputs\"][\"weights\"] = [\n", + " 0 for i in range(num_w_reps * n_weight_inps)\n", + " ]\n", + " super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict)\n", + "\n" + ] + } + ], + "source": [ + "#To view the source code of the matrix vector activation function\n", + "from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation\n", + "showSrc(MatrixVectorActivation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Modify Parameters\n", + "\n", + "We now modify the parallelization attributes of the first network layer to reduce its overall latency." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\") \n", + "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", + "config = [\n", + " (2, 5, [16], [64], \"block\"),\n", + " (1, 1, [64], [64], \"auto\"),#8,8\n", + " (1, 1, [64], [64], \"auto\"),#8,8\n", + " (1, 1, [64], [1], \"distributed\"),\n", + "]\n", + "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", + " fcl_inst = getCustomOp(fcl)\n", + " fcl_inst.set_nodeattr(\"PE\", pe)\n", + " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", + " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", + " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", + " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)\n", + " num_inp_vec = fcl_inst.get_nodeattr(\"numInputVectors\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We again save the model and view it. On expanding the first `MatrixVectorActivation` we can view the updated `PE` & `SIMD` parameters for that layer." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping http://0.0.0.0:5901\n", + "Serving './cybsec_PE_SIMD_modified.onnx' at http://0.0.0.0:5901\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save(\"./cybsec_PE_SIMD_modified.onnx\")\n", + "showInNetron(\"./cybsec_PE_SIMD_modified.onnx\",localhost_url='xirxlabs53')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the above total folding formula, we have reduced the total folding of our layer from `600 x 64` to `120 x 32`. Hence, resulting in an estimated `10x` decrease in the execution latency of our layer. \n", + "This can be observed in the new estimated clock cycles." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "cycles_dict_updated = []\n", + "cycles_dict_updated = exp_cycles_per_layer(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA28AAAHWCAYAAADglbFoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABknklEQVR4nO3de3zO9f/H8edlszls1+a4mS2nFcYQwnIsMoz4pvqSmFBh9EVJvt9y6qD0LVLR6Zvp+yPH6CA0pyGrhDkTIsJG2OY4s71/f7jt83XZsM3m2sXjfrtdt7ren/fn83l9Ptf1nj33OdmMMUYAAAAAgEKtiLMLAAAAAADcGOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDUCh16pVK7Vq1crZZeSrAwcOyGazKTo6ulAtKzvR0dGy2Wz69ddfC2T5+amg98W17NmzR23btpWPj49sNpsWLlx4S9d/K7Rq1Uq1a9d2dhmFWub379///nee5rfZbBozZkz+FgXgtkJ4A5Bnmb/UX+v1008/5XhZO3bs0JgxY3TgwIGCKzgPpkyZcsuDAFxPZGSktm7dqtdff13//e9/1bBhQ2eXdNs7cuSIxowZo/j4eGeXAgC3jLuzCwDg+saNG6cqVapkaQ8ODs7xMnbs2KGxY8eqVatWqly5ssO0H3744WZLzLMpU6aobNmy6t27t9NqQOF2/vx5xcXF6V//+pcGDRrk7HLuGEeOHNHYsWNVuXJl1atXz9nlAMAtQXgDcNPat29foEcaPDw8CmzZwM06fvy4JMnX1zfflnn27FmVLFky35aHgpWRkaGLFy86u4wCdeHCBXl4eKhIEU7aApyJEQjglpg1a5YaNGggb29v2e12hYaG6r333pN0+fTLxx57TJL0wAMPWKddrlq1SlLWa95WrVolm82mOXPmaOzYsapYsaK8vb316KOPKjk5WampqRoyZIjKly8vLy8vPfXUU0pNTXWoZ9q0aXrwwQdVvnx5eXp6KiQkRFOnTnXoU7lyZW3fvl2xsbFWTVfWkZSUpCFDhigoKEienp4KDg7WW2+9pYyMDIflJCUlqXfv3vLx8ZGvr68iIyOVlJSU432XlJSkoUOHqnLlyvL09FRgYKB69eqlv/7667rzrVixQs2bN1fJkiXl6+urzp07a+fOnVn6HT58WH379lVAQIA8PT1VpUoVDRgw4Lq/jJ46dUqNGjVSYGCgdu/enef6z5w5o5IlS+of//hHlvn+/PNPubm5afz48Te9L3bt2qVHH31UpUuXVrFixdSwYUN98803Dn3S0tI0duxY3X333SpWrJjKlCmjZs2aKSYm5prLHTNmjCpVqiRJGj58uGw2m8OR402bNql9+/ay2+3y8vJS69ats5xOnHn6cWxsrAYOHKjy5csrMDDwutuTmpqq0aNHKzg4WJ6engoKCtKLL76Yp+95psWLF6tly5bWGL3vvvs0c+bMLP127NihBx54QCVKlFDFihU1YcKE69aayWazadCgQVq4cKFq164tT09P1apVS0uWLMnS9/Dhw+rTp4/8/Pysfp9//rk1fdWqVbrvvvskSU899ZQ1PqOjozV58mS5ubk5jLF33nlHNptNw4YNs9rS09Pl7e2tESNGWG1nz57V888/b43p6tWr69///reMMdluy4wZM1SrVi15enpmux2SZIzRM888Iw8PD3311Vc52leZ/vjjDw0cOFDVq1dX8eLFVaZMGT322GMOp5f//vvvstlsmjhxYpb5161bJ5vNpi+//NJqu9G+lf73M3bWrFl6+eWXVbFiRZUoUUIpKSm5qh9A/uPIG4CblpycnOWXZ5vNpjJlykiSYmJi1L17d7Vu3VpvvfWWJGnnzp368ccf9Y9//EMtWrTQc889p8mTJ+uf//ynatasKUnWf69l/PjxKl68uF566SXt3btX77//vooWLaoiRYro1KlTGjNmjH766SdFR0erSpUqGjVqlDXv1KlTVatWLT388MNyd3fXt99+q4EDByojI0NRUVGSpEmTJmnw4MHy8vLSv/71L0mSn5+fJOncuXNq2bKlDh8+rGeffVZ33XWX1q1bp5EjR+ro0aOaNGmSpMu/uHXu3Flr165V//79VbNmTS1YsECRkZE52rdnzpxR8+bNtXPnTvXp00f169fXX3/9pW+++UZ//vmnypYtm+18y5YtU/v27VW1alWNGTNG58+f1/vvv6+mTZtq48aNVsA4cuSIGjVqpKSkJD3zzDOqUaOGDh8+rHnz5uncuXPZHvX866+/9NBDD+nkyZOKjY1VtWrV8lx/vXr19Le//U2zZ8/Wu+++Kzc3N2veL7/8UsYY9ejR46b2xfbt29W0aVNVrFhRL730kkqWLKk5c+aoS5cumj9/vv72t79JuhzExo8fr379+qlRo0ZKSUnRr7/+qo0bN+qhhx7KdtmPPPKIfH19NXToUHXv3l0dOnSQl5eXtd7mzZvLbrfrxRdfVNGiRfXxxx+rVatWio2NVePGjR2WNXDgQJUrV06jRo3S2bNnr7lPMzIy9PDDD2vt2rV65plnVLNmTW3dulUTJ07Ub7/95nCzlJx8z6XLAbJPnz6qVauWRo4cKV9fX23atElLlizRE088YfU7deqU2rVrp0ceeUSPP/645s2bpxEjRig0NFTt27e/Zs2Z1q5dq6+++koDBw6Ut7e3Jk+erK5du+rgwYPWz4vExEQ1adLECkjlypXT4sWL1bdvX6WkpGjIkCGqWbOmxo0bp1GjRumZZ55R8+bNJUn333+/kpOTlZGRobVr16pjx46SpDVr1qhIkSJas2aNVcumTZt05swZtWjRQtLlsfrwww9r5cqV6tu3r+rVq6elS5dq+PDhOnz4cJZwtGLFCs2ZM0eDBg1S2bJls5zuLV0OiH369NHs2bO1YMECRURE3HAfXWn9+vVat26dunXrpsDAQB04cEBTp05Vq1attGPHDpUoUUJVq1ZV06ZNNWPGDA0dOtRh/hkzZsjb21udO3fO8b690quvvioPDw+98MILSk1N5SwIoDAwAJBH06ZNM5KyfXl6elr9/vGPfxi73W4uXbp0zWXNnTvXSDIrV67MMq1ly5amZcuW1vuVK1caSaZ27drm4sWLVnv37t2NzWYz7du3d5g/LCzMVKpUyaHt3LlzWdYTHh5uqlat6tBWq1Yth3VnevXVV03JkiXNb7/95tD+0ksvGTc3N3Pw4EFjjDELFy40ksyECROsPpcuXTLNmzc3ksy0adOyLPtKo0aNMpLMV199lWVaRkaGMcaY/fv3Z1lWvXr1TPny5c2JEyests2bN5siRYqYXr16WW29evUyRYoUMevXr7/m8jM/5/Xr15ujR4+aWrVqmapVq5oDBw5ct/ac1r906VIjySxevNhhep06dRz2fV73RevWrU1oaKi5cOGCQ//777/f3H333VZb3bp1TURExA236WqZ63z77bcd2rt06WI8PDzMvn37rLYjR44Yb29v06JFC6stc/82a9bsumMk03//+19TpEgRs2bNGof2jz76yEgyP/74o9WWk+95UlKS8fb2No0bNzbnz5936Ju5X425PA4lmS+++MJqS01NNf7+/qZr1643rFuS8fDwMHv37rXaNm/ebCSZ999/32rr27evqVChgvnrr78c5u/WrZvx8fGxtmn9+vXZjqH09HRjt9vNiy++aG1DmTJlzGOPPWbc3NzM6dOnjTHGvPvuu6ZIkSLm1KlTxpj/jdXXXnvNYXmPPvqosdlsDnVLMkWKFDHbt2936HvldyEtLc38/e9/N8WLFzdLly694f7JXO7o0aOt99l9fnFxcVk+h48//thIMjt37rTaLl68aMqWLWsiIyOttpzu28yfsVWrVs22BgDOw2mTAG7ahx9+qJiYGIfX4sWLrem+vr46e/bsdU8/y4tevXqpaNGi1vvGjRvLGKM+ffo49GvcuLEOHTqkS5cuWW3Fixe3/j/zyGHLli31+++/Kzk5+Ybrnjt3rpo3b65SpUrpr7/+sl5t2rRRenq6Vq9eLUn6/vvv5e7urgEDBljzurm5afDgwTnaxvnz56tu3brW0aEr2Wy2bOc5evSo4uPj1bt3b5UuXdpqr1Onjh566CF9//33ki4fwVm4cKE6deqU7TWLVy//zz//VMuWLZWWlqbVq1dbpwvebP1t2rRRQECAZsyYYU3btm2btmzZoieffDJXy7rayZMntWLFCj3++OM6ffq09TmdOHFC4eHh2rNnjw4fPizp8vd0+/bt2rNnzw2360bS09P1ww8/qEuXLqpatarVXqFCBT3xxBNau3ZtllPQnn76aYcjj9cyd+5c1axZUzVq1HD47j344IOSpJUrV1p9c/I9j4mJ0enTp/XSSy+pWLFiDuu6er96eXk5fCYeHh5q1KiRfv/99xvWLV3+rK88UlunTh3Z7XZrfmOM5s+fr06dOskY47B94eHhSk5O1saNG6+7jiJFiuj++++3xuDOnTt14sQJvfTSSzLGKC4uTtLlo3G1a9e2rlX8/vvv5ebmpueee85hec8//7yMMQ4/0ySpZcuWCgkJybaGixcv6rHHHtN3332n77//Xm3bts3R/rnalZ9fWlqaTpw4oeDgYPn6+jrsh8cff1zFihVzGENLly7VX3/9ZX1eedm3kZGRDjUAcD5OmwRw0xo1anTdG5YMHDhQc+bMUfv27VWxYkW1bdtWjz/+uNq1a3dT673rrrsc3vv4+EiSgoKCsrRnZGQoOTnZOjXrxx9/1OjRoxUXF6dz58459E9OTraWdS179uzRli1bVK5cuWynHzt2TNLla1YqVKhgnUqXqXr16jfYusv27dunrl275qhvpj/++OOa66hZs6aWLl2qs2fP6syZM0pJScnxs7t69uwpd3d37dy5U/7+/jmaJyf1FylSRD169NDUqVN17tw5lShRQjNmzFCxYsWsayFzuqyr7d27V8YYvfLKK3rllVey7XPs2DFVrFhR48aNU+fOnXXPPfeodu3aateunXr27Kk6derkap3S5ZuYnDt37pqfQUZGhg4dOqRatWpZ7dndsTU7e/bs0c6dO2/43ZNy9j3ft2+fJOXoexAYGJgl0JUqVUpbtmzJUe1Xj9nM+U+dOiXp8n5LSkrSJ598ok8++STbZVy5fdfSvHlz63ThNWvWqEKFCqpfv77q1q2rNWvW6KGHHtLatWv1+OOPW/P88ccfCggIkLe3t8OyMk/fzhxXma73eY0fP15nzpzR4sWLb+oZlefPn9f48eM1bdo0HT582OHauyv/yOTr66tOnTpp5syZevXVVyVdPmWyYsWKVqjPy77N6XcSwK1DeANQ4MqXL6/4+HgtXbpUixcv1uLFizVt2jT16tVL06dPz/Nyr3WU4lrtmb/47Nu3T61bt1aNGjX07rvvKigoSB4eHvr+++81ceLELDccyU5GRoYeeughvfjii9lOv+eee3K4Fa7jkUce0RdffKH33nvP4SYi+aFXr156++23tXDhQnXv3l0zZ85Ux44dbxiibyTzs3zhhRcUHh6ebZ/MR1q0aNFC+/bt09dff60ffvhBn332mSZOnKiPPvpI/fr1u6k6ciKnRzgyMjIUGhqqd999N9vpmX+8yI/v+dVuNLZudv7Mmp588slrXheakzDdrFkzpaWlKS4uTmvWrLGuiWvevLnWrFmjXbt26fjx41Z7Xlzv8woPD9eSJUs0YcIEtWrVKssRzZwaPHiwpk2bpiFDhigsLMx6CHy3bt2yfH69evXS3LlztW7dOoWGhuqbb77RwIEDrbtD5mXfctQNKHwIbwBuCQ8PD3Xq1EmdOnVSRkaGBg4cqI8//livvPKKgoODr3naW0H49ttvlZqaqm+++cbhSMCVp5tlulZd1apV05kzZ9SmTZvrrqtSpUpavny5zpw543D07UZ3aLxyPdu2bctR3yvXea117Nq1S2XLllXJkiVVvHhx2e32HC9/8ODBCg4O1qhRo+Tj46OXXnop3+qvXbu27r33Xs2YMUOBgYE6ePCg3n///Twt60qZpywWLVr0hp+VJJUuXVpPPfWUnnrqKetmFmPGjMl1eCtXrpxKlChxzc+gSJEiWY4Q51S1atW0efNmtW7d+rrjJqff88zTGLdt25arZzMWhHLlysnb21vp6ek3/Lyut+2NGjWSh4eH1qxZozVr1mj48OGSLgf0Tz/9VMuXL7feZ6pUqZKWLVum06dPOxx927VrlzU9p5o0aaL+/furY8eOeuyxx7RgwQK5u+f+V6558+YpMjJS77zzjtV24cKFbO9W265dO5UrV04zZsxQ48aNde7cOfXs2dOanpt9C6Dw4po3AAXuxIkTDu+LFCli/YU389bmmc+0ys0t9PMq86//V5+CNG3atCx9S5YsmW1Njz/+uOLi4rR06dIs05KSkqzr6zp06KBLly453J49PT09SzC5lq5du2rz5s1asGBBlmnXOtpRoUIF1atXT9OnT3eofdu2bfrhhx/UoUMHSZc/hy5duujbb7/Vr7/+mqPlv/LKK3rhhRc0cuTIa95yPq/19+zZUz/88IMmTZqkMmXKZLl7YV72Rfny5dWqVSt9/PHHOnr0aJbpmc9ok7J+T728vBQcHJzl9vs54ebmprZt2+rrr792uK17YmKiZs6cqWbNmslut+d6udLl797hw4f16aefZpl2/vx5606VOf2et23bVt7e3ho/frwuXLjgMC2nR9Tyi5ubm7p27ar58+dnG9Sv/Lyu9zOjWLFiuu+++/Tll1/q4MGDDkfezp8/r8mTJ6tatWqqUKGCNU+HDh2Unp6uDz74wGFZEydOlM1my9HdNK/Upk0bzZo1S0uWLFHPnj3zfKTz6s/g/fffV3p6epa+7u7u6t69u+bMmaPo6GiFhoY6HEnLzb4FUHhx5A3ATVu8eLH11+kr3X///apatar69eunkydP6sEHH1RgYKD++OMPvf/++6pXr551PUm9evXk5uamt956S8nJyfL09LSeT5Xf2rZtax0JfPbZZ3XmzBl9+umnKl++fJZf8Bs0aKCpU6fqtddeU3BwsMqXL68HH3xQw4cP1zfffKOOHTuqd+/eatCggc6ePautW7dq3rx5OnDggMqWLatOnTqpadOmeumll3TgwAGFhIToq6++ytFNUaTLzw6bN2+eHnvsMfXp00cNGjTQyZMn9c033+ijjz5S3bp1s53v7bffVvv27RUWFqa+fftajwrw8fHRmDFjrH5vvPGGfvjhB7Vs2dK67fzRo0c1d+5crV27NtsHT7/99ttKTk5WVFSUvL29HW5gcTP1P/HEE3rxxRe1YMECDRgwwOFmNDezLz788EM1a9ZMoaGhevrpp1W1alUlJiYqLi5Of/75pzZv3ixJCgkJUatWrdSgQQOVLl1av/76q+bNm6dBgwZdc/uu57XXXlNMTIyaNWumgQMHyt3dXR9//LFSU1Nz/Gy07PTs2VNz5sxR//79tXLlSjVt2lTp6enatWuX5syZo6VLl6phw4Y5/p7b7XZNnDhR/fr103333acnnnhCpUqV0ubNm3Xu3LmbOrU5L958802tXLlSjRs31tNPP62QkBCdPHlSGzdu1LJly3Ty5ElJl48Y+vr66qOPPpK3t7dKliypxo0bW9dpNW/eXG+++aZ8fHwUGhoq6XKYr169unbv3q3evXs7rLdTp0564IEH9K9//UsHDhxQ3bp19cMPP+jrr7/WkCFDrvtIjGvp0qWLdYq43W7Xxx9/nKv5O3bsqP/+97/y8fFRSEiI4uLitGzZMuva3av16tVLkydP1sqVK63Hslwpp/sWQCF2a29uCeB2cr1HBeiKW3jPmzfPtG3b1pQvX954eHiYu+66yzz77LPm6NGjDsv79NNPTdWqVY2bm5vDYwOu9aiAuXPnZlvP1be9Hz16tJFkjh8/brV98803pk6dOqZYsWKmcuXK5q233jKff/65kWT2799v9UtISDARERHG29vbSHKo4/Tp02bkyJEmODjYeHh4mLJly5r777/f/Pvf/3Z4hMGJEydMz549jd1uNz4+PqZnz55m06ZNOXpUQOb8gwYNMhUrVjQeHh4mMDDQREZGWrf7zu72+MYYs2zZMtO0aVNTvHhxY7fbTadOncyOHTuyLP+PP/4wvXr1MuXKlTOenp6matWqJioqyqSmpl5zv6anp5vu3bsbd3d3s3Dhwpuq/0odOnQwksy6devydV/s27fP9OrVy/j7+5uiRYuaihUrmo4dO5p58+ZZfV577TXTqFEj4+vra4oXL25q1KhhXn/9dYfPMjvXelSAMcZs3LjRhIeHGy8vL1OiRAnzwAMPZNm2a31vr+fixYvmrbfeMrVq1TKenp6mVKlSpkGDBmbs2LEmOTnZ6pfT73lm3/vvv9/6vjRq1Mh8+eWX1vSWLVuaWrVqZaklMjIyy6M4siPJREVFZWmvVKmSw+3sjTEmMTHRREVFmaCgIFO0aFHj7+9vWrdubT755BOHfl9//bUJCQkx7u7uWT73RYsWGUlZHh3Sr18/I8n85z//yVLL6dOnzdChQ01AQIApWrSoufvuu83bb7/t8MiE623Ltb4LU6ZMMZLMCy+8kO2+uXK5Vz4q4NSpU+app54yZcuWNV5eXiY8PNzs2rUr232WqVatWqZIkSLmzz//zHZ6TvbttX7GAnA+mzG3+JwIAACu4W9/+5u2bt2qvXv3OrsUwCXde++9Kl26tHVdH4DbC9e8AQAKhaNHj2rRokUON1kAkHO//vqr4uPj1atXL2eXAqCAcOQNAOBU+/fv148//qjPPvtM69ev1759+3L8HDkAl29GtGHDBr3zzjv666+/9Pvvv+f58QQACjeOvAEAnCo2NlY9e/bU/v37NX36dIIbkEvz5s3TU089pbS0NH355ZcEN+A2xpE3AAAAAHABHHkDAAAAABdAeAMAAAAAF8BDunMgIyNDR44ckbe3t2w2m7PLAQAAAOAkxhidPn1aAQEBKlLk1h4LI7zlwJEjRxQUFOTsMgAAAAAUEocOHVJgYOAtXSfhLQe8vb0lXf6A7Ha7k6sBAAAA4CwpKSkKCgqyMsKtRHjLgcxTJe12O+ENAAAAgFMup+KGJQAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4ALcnV0AABQGNpuzK3AuY5xdAW5HjCtnVwDgdsORNwAAAABwAYQ3AAAAAHABhDcAAAAAcAGENwAAAABwAYUmvL355puy2WwaMmSI1XbhwgVFRUWpTJky8vLyUteuXZWYmOgw38GDBxUREaESJUqofPnyGj58uC5duuTQZ9WqVapfv748PT0VHBys6OjoW7BFAAAAAJB/CkV4W79+vT7++GPVqVPHoX3o0KH69ttvNXfuXMXGxurIkSN65JFHrOnp6emKiIjQxYsXtW7dOk2fPl3R0dEaNWqU1Wf//v2KiIjQAw88oPj4eA0ZMkT9+vXT0qVLb9n2AQAAAMDNshnj3BvZnjlzRvXr19eUKVP02muvqV69epo0aZKSk5NVrlw5zZw5U48++qgkadeuXapZs6bi4uLUpEkTLV68WB07dtSRI0fk5+cnSfroo480YsQIHT9+XB4eHhoxYoQWLVqkbdu2Wevs1q2bkpKStGTJkhzVmJKSIh8fHyUnJ8tut+f/TgDgdNzS3NkV4HbEuHJ2BQAKgjOzgdOPvEVFRSkiIkJt2rRxaN+wYYPS0tIc2mvUqKG77rpLcXFxkqS4uDiFhoZawU2SwsPDlZKSou3bt1t9rl52eHi4tYzspKamKiUlxeEFAAAAAM7k1Id0z5o1Sxs3btT69euzTEtISJCHh4d8fX0d2v38/JSQkGD1uTK4ZU7PnHa9PikpKTp//ryKFy+eZd3jx4/X2LFj87xdAAAAAJDfnHbk7dChQ/rHP/6hGTNmqFixYs4qI1sjR45UcnKy9Tp06JCzSwIAAABwh3NaeNuwYYOOHTum+vXry93dXe7u7oqNjdXkyZPl7u4uPz8/Xbx4UUlJSQ7zJSYmyt/fX5Lk7++f5e6Tme9v1Mdut2d71E2SPD09ZbfbHV4AAAAA4ExOC2+tW7fW1q1bFR8fb70aNmyoHj16WP9ftGhRLV++3Jpn9+7dOnjwoMLCwiRJYWFh2rp1q44dO2b1iYmJkd1uV0hIiNXnymVk9slcBgAAAAC4Aqdd8+bt7a3atWs7tJUsWVJlypSx2vv27athw4apdOnSstvtGjx4sMLCwtSkSRNJUtu2bRUSEqKePXtqwoQJSkhI0Msvv6yoqCh5enpKkvr3768PPvhAL774ovr06aMVK1Zozpw5WrRo0a3dYAAAAAC4CU69YcmNTJw4UUWKFFHXrl2Vmpqq8PBwTZkyxZru5uam7777TgMGDFBYWJhKliypyMhIjRs3zupTpUoVLVq0SEOHDtV7772nwMBAffbZZwoPD3fGJgEAAABAnjj9OW+ugOe8Abc/nkfl7ApwO2JcObsCAAXhjn7OGwAAAADgxghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAgr1Q7pxbTw7x9kVAAAAALcWR94AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABTg1vE2dOlV16tSR3W6X3W5XWFiYFi9ebE1v1aqVbDabw6t///4Oyzh48KAiIiJUokQJlS9fXsOHD9elS5cc+qxatUr169eXp6engoODFR0dfSs2DwAAAADyjbszVx4YGKg333xTd999t4wxmj59ujp37qxNmzapVq1akqSnn35a48aNs+YpUaKE9f/p6emKiIiQv7+/1q1bp6NHj6pXr14qWrSo3njjDUnS/v37FRERof79+2vGjBlavny5+vXrpwoVKig8PPzWbjAAAAAA5JHNGGOcXcSVSpcurbffflt9+/ZVq1atVK9ePU2aNCnbvosXL1bHjh115MgR+fn5SZI++ugjjRgxQsePH5eHh4dGjBihRYsWadu2bdZ83bp1U1JSkpYsWZLtclNTU5Wammq9T0lJUVBQkJKTk2W32/NvY2+CzebsCpyrcH1rcTtgTDm7AtyOGFfOrgBAQUhJSZGPj49TskGhueYtPT1ds2bN0tmzZxUWFma1z5gxQ2XLllXt2rU1cuRInTt3zpoWFxen0NBQK7hJUnh4uFJSUrR9+3arT5s2bRzWFR4erri4uGvWMn78ePn4+FivoKCg/NpMAAAAAMgTp542KUlbt25VWFiYLly4IC8vLy1YsEAhISGSpCeeeEKVKlVSQECAtmzZohEjRmj37t366quvJEkJCQkOwU2S9T4hIeG6fVJSUnT+/HkVL148S00jR47UsGHDrPeZR94AAAAAwFmcHt6qV6+u+Ph4JScna968eYqMjFRsbKxCQkL0zDPPWP1CQ0NVoUIFtW7dWvv27VO1atUKrCZPT095enoW2PIBAAAAILecftqkh4eHgoOD1aBBA40fP15169bVe++9l23fxo0bS5L27t0rSfL391diYqJDn8z3/v7+1+1jt9uzPeoGAAAAAIWR08Pb1TIyMhxuFnKl+Ph4SVKFChUkSWFhYdq6dauOHTtm9YmJiZHdbrdOvQwLC9Py5csdlhMTE+NwXR0AAAAAFHZOPW1y5MiRat++ve666y6dPn1aM2fO1KpVq7R06VLt27dPM2fOVIcOHVSmTBlt2bJFQ4cOVYsWLVSnTh1JUtu2bRUSEqKePXtqwoQJSkhI0Msvv6yoqCjrtMf+/fvrgw8+0Isvvqg+ffpoxYoVmjNnjhYtWuTMTQcAAACAXHFqeDt27Jh69eqlo0ePysfHR3Xq1NHSpUv10EMP6dChQ1q2bJkmTZqks2fPKigoSF27dtXLL79sze/m5qbvvvtOAwYMUFhYmEqWLKnIyEiH58JVqVJFixYt0tChQ/Xee+8pMDBQn332Gc94AwAAAOBSCt1z3gojZz7L4Vp4do6zK8DthjHl7ApwO2JcObsCAAWB57wBAAAAAK6L8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAuwKnhberUqapTp47sdrvsdrvCwsK0ePFia/qFCxcUFRWlMmXKyMvLS127dlViYqLDMg4ePKiIiAiVKFFC5cuX1/Dhw3Xp0iWHPqtWrVL9+vXl6emp4OBgRUdH34rNAwAAAIB849TwFhgYqDfffFMbNmzQr7/+qgcffFCdO3fW9u3bJUlDhw7Vt99+q7lz5yo2NlZHjhzRI488Ys2fnp6uiIgIXbx4UevWrdP06dMVHR2tUaNGWX3279+viIgIPfDAA4qPj9eQIUPUr18/LV269JZvLwAAAADklc0YY5xdxJVKly6tt99+W48++qjKlSunmTNn6tFHH5Uk7dq1SzVr1lRcXJyaNGmixYsXq2PHjjpy5Ij8/PwkSR999JFGjBih48ePy8PDQyNGjNCiRYu0bds2ax3dunVTUlKSlixZkqOaUlJS5OPjo+TkZNnt9vzf6Dyw2ZxdgXMVrm8tbgeMKWdXgNsR48rZFQAoCM7MBoXmmrf09HTNmjVLZ8+eVVhYmDZs2KC0tDS1adPG6lOjRg3dddddiouLkyTFxcUpNDTUCm6SFB4erpSUFOvoXVxcnMMyMvtkLiM7qampSklJcXgBAAAAgDM5Pbxt3bpVXl5e8vT0VP/+/bVgwQKFhIQoISFBHh4e8vX1dejv5+enhIQESVJCQoJDcMucnjnten1SUlJ0/vz5bGsaP368fHx8rFdQUFB+bCoAAAAA5JnTw1v16tUVHx+vn3/+WQMGDFBkZKR27Njh1JpGjhyp5ORk63Xo0CGn1gMAAAAA7s4uwMPDQ8HBwZKkBg0aaP369Xrvvff097//XRcvXlRSUpLD0bfExET5+/tLkvz9/fXLL784LC/zbpRX9rn6DpWJiYmy2+0qXrx4tjV5enrK09MzX7YPAAAAAPKD04+8XS0jI0Opqalq0KCBihYtquXLl1vTdu/erYMHDyosLEySFBYWpq1bt+rYsWNWn5iYGNntdoWEhFh9rlxGZp/MZQAAAACAK3DqkbeRI0eqffv2uuuuu3T69GnNnDlTq1at0tKlS+Xj46O+fftq2LBhKl26tOx2uwYPHqywsDA1adJEktS2bVuFhISoZ8+emjBhghISEvTyyy8rKirKOnLWv39/ffDBB3rxxRfVp08frVixQnPmzNGiRYucuekAAAAAkCtODW/Hjh1Tr169dPToUfn4+KhOnTpaunSpHnroIUnSxIkTVaRIEXXt2lWpqakKDw/XlClTrPnd3Nz03XffacCAAQoLC1PJkiUVGRmpcePGWX2qVKmiRYsWaejQoXrvvfcUGBiozz77TOHh4bd8ewEAAAAgrwrdc94KI57zVvjwrUV+Y0w5uwLcjhhXzq4AQEHgOW8AAAAAgOsivAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAvIdXjbuHGjtm7dar3/+uuv1aVLF/3zn//UxYsX87U4AAAAAMBluQ5vzz77rH777TdJ0u+//65u3bqpRIkSmjt3rl588cV8LxAAAAAAkIfw9ttvv6levXqSpLlz56pFixaaOXOmoqOjNX/+/PyuDwAAAACgPIQ3Y4wyMjIkScuWLVOHDh0kSUFBQfrrr7/ytzoAAAAAgKQ8hLeGDRvqtdde03//+1/FxsYqIiJCkrR//375+fnle4EAAAAAgDyEt0mTJmnjxo0aNGiQ/vWvfyk4OFiSNG/ePN1///35XiAAAAAAIA/hrU6dOtq6dauSk5M1evRoq/3tt9/W9OnTc7Ws8ePH67777pO3t7fKly+vLl26aPfu3Q59WrVqJZvN5vDq37+/Q5+DBw8qIiJCJUqUUPny5TV8+HBdunTJoc+qVatUv359eXp6Kjg4WNHR0bnbcAAAAABwojw95y0pKUmfffaZRo4cqZMnT0qSduzYoWPHjuVqObGxsYqKitJPP/2kmJgYpaWlqW3btjp79qxDv6efflpHjx61XhMmTLCmpaenKyIiQhcvXtS6des0ffp0RUdHa9SoUVaf/fv3KyIiQg888IDi4+M1ZMgQ9evXT0uXLs3L5gMAAADALWczxpjczLBlyxa1bt1avr6+OnDggHbv3q2qVavq5Zdf1sGDB/XFF1/kuZjjx4+rfPnyio2NVYsWLSRdPvJWr149TZo0Kdt5Fi9erI4dO+rIkSPWNXcfffSRRowYoePHj8vDw0MjRozQokWLtG3bNmu+bt26KSkpSUuWLLlhXSkpKfLx8VFycrLsdnuety8/2WzOrsC5cvetBW6MMeXsCnA7Ylw5uwIABcGZ2SDXR96GDRump556Snv27FGxYsWs9g4dOmj16tU3VUxycrIkqXTp0g7tM2bMUNmyZVW7dm2NHDlS586ds6bFxcUpNDTU4WYp4eHhSklJ0fbt260+bdq0cVhmeHi44uLisq0jNTVVKSkpDi8AAAAAcCb33M6wfv16ffzxx1naK1asqISEhDwXkpGRoSFDhqhp06aqXbu21f7EE0+oUqVKCggI0JYtWzRixAjt3r1bX331lSQpISEhy10uM99n1nOtPikpKTp//ryKFy/uMG38+PEaO3ZsnrcFAAAAAPJbrsObp6dntkeifvvtN5UrVy7PhURFRWnbtm1au3atQ/szzzxj/X9oaKgqVKig1q1ba9++fapWrVqe13c9I0eO1LBhw6z3KSkpCgoKKpB1AQAAAEBO5Pq0yYcffljjxo1TWlqaJMlms+ngwYMaMWKEunbtmqciBg0apO+++04rV65UYGDgdfs2btxYkrR3715Jkr+/vxITEx36ZL739/e/bh+73Z7lqJt0OaDa7XaHFwAAAAA4U67D2zvvvKMzZ86ofPnyOn/+vFq2bKng4GB5e3vr9ddfz9WyjDEaNGiQFixYoBUrVqhKlSo3nCc+Pl6SVKFCBUlSWFiYtm7d6nCny5iYGNntdoWEhFh9li9f7rCcmJgYhYWF5apeAAAAAHCWXN9tMtPatWu1ZcsWnTlzRvXr189yQ5CcGDhwoGbOnKmvv/5a1atXt9p9fHxUvHhx7du3TzNnzlSHDh1UpkwZbdmyRUOHDlVgYKBiY2MlXX5UQL169RQQEKAJEyYoISFBPXv2VL9+/fTGG29IuvyogNq1aysqKkp9+vTRihUr9Nxzz2nRokUKDw+/YZ3cbbLw4Q5eyG+MKWdXgNsR48rZFQAoCM7MBnkOb/my8mv8VJ82bZp69+6tQ4cO6cknn9S2bdt09uxZBQUF6W9/+5tefvllhx31xx9/aMCAAVq1apVKliypyMhIvfnmm3J3/98lfatWrdLQoUO1Y8cOBQYG6pVXXlHv3r1zVCfhrfDhH0TkN8aUsyvA7Yhx5ewKABSEQh/eJk+enOMFPvfcczdVUGFEeCt8+AcR+Y0x5ewKcDtiXDm7AgAFodCHt5xciyZdPpL2+++/33RRhQ3hrfDhH0TkN8aUsyvA7Yhx5ewKABQEZ2aDHD0qYP/+/QVdBwAAAADgOnJ9t0kAAAAAwK2X6/DWtWtXvfXWW1naJ0yYoMceeyxfigIAAAAAOMp1eFu9erU6dOiQpb19+/ZavXp1vhQFAAAAAHCU6/B25swZeXh4ZGkvWrSoUlJS8qUoAAAAAICjXIe30NBQzZ49O0v7rFmzFBISki9FAQAAAAAc5ehuk1d65ZVX9Mgjj2jfvn168MEHJUnLly/Xl19+qblz5+Z7gQAAAACAPIS3Tp06aeHChXrjjTc0b948FS9eXHXq1NGyZcvUsmXLgqgRAAAAAO54OXpI952Oh3QXPnxrkd8YU86uALcjxpWzKwBQEJyZDXJ9zVtkZCR3lQQAAACAWyzX4S05OVlt2rTR3XffrTfeeEOHDx8uiLoAAAAAAFfIdXhbuHChDh8+rAEDBmj27NmqXLmy2rdvr3nz5iktLa0gagQAAACAO16uw5sklStXTsOGDdPmzZv1888/Kzg4WD179lRAQICGDh2qPXv25HedAAAAAHBHy1N4y3T06FHFxMQoJiZGbm5u6tChg7Zu3aqQkBBNnDgxv2oEAAAAgDtersNbWlqa5s+fr44dO6pSpUqaO3euhgwZoiNHjmj69OlatmyZ5syZo3HjxhVEvQAAAABwR8r1c94qVKigjIwMde/eXb/88ovq1auXpc8DDzwgX1/ffCgPAAAAACDlIbxNnDhRjz32mIoVK3bNPr6+vtq/f/9NFQYAAAAA+J8cnzaZnp6uLVu26NFHH80S3M6dO6ctW7YoIyMj3wsEAAAAAOQivP33v/9Vnz595OHhkWWah4eH+vTpo5kzZ+ZrcQAAAACAy3Ic3v7zn//ohRdekJubW5Zp7u7uevHFF/XJJ5/ka3EAAAAAgMtyHN52796tJk2aXHP6fffdp507d+ZLUQAAAAAARzkOb2fPnlVKSso1p58+fVrnzp3Ll6IAAAAAAI5yHN7uvvturVu37prT165dq7vvvjtfigIAAAAAOMpxeHviiSf08ssva8uWLVmmbd68WaNGjdITTzyRr8UBAAAAAC6zGWNMTjqmpaWpbdu2Wrt2rdq0aaMaNWpIknbt2qVly5apadOmiomJUdGiRQu0YGdISUmRj4+PkpOTZbfbnV2OJMlmc3YFzpWzby2Qc4wpZ1eA2xHjytkVACgIzswGOQ5v0uUAN3HiRM2cOVN79uyRMUb33HOPnnjiCQ0ZMiTbxwjcDghvhQ//ICK/MaacXQFuR4wrZ1cAoCC4THi7UxHeCh++tchvjClnV4DbEePK2RUAKAjOzAY5vuYNAAAAAOA8hDcAAAAAcAGENwAAAABwAYQ3AAAAAHABuQ5v27Ztu+a0hQsX3kwtAAAAAIBryHV4Cw8P1/79+7O0z58/Xz169MiXogAAAAAAjnId3vr166c2bdooISHBaps9e7Z69eql6OjoXC1r/Pjxuu++++Tt7a3y5curS5cu2r17t0OfCxcuKCoqSmXKlJGXl5e6du2qxMREhz4HDx5URESESpQoofLly2v48OG6dOmSQ59Vq1apfv368vT0VHBwcK5rBQAAAABnynV4Gzt2rDp06KA2bdro5MmTmjlzpp566il98cUXeuyxx3K1rNjYWEVFRemnn35STEyM0tLS1LZtW509e9bqM3ToUH377beaO3euYmNjdeTIET3yyCPW9PT0dEVEROjixYtat26dpk+frujoaI0aNcrqs3//fkVEROiBBx5QfHy8hgwZon79+mnp0qW53XwAAAAAcIo8P6S7R48eWr9+vQ4fPqyZM2eqc+fON13M8ePHVb58ecXGxqpFixZKTk5WuXLlNHPmTD366KOSpF27dqlmzZqKi4tTkyZNtHjxYnXs2FFHjhyRn5+fJOmjjz7SiBEjdPz4cXl4eGjEiBFatGiRw/V63bp1U1JSkpYsWXLDunhId+HDg0+R3xhTzq4AtyPGlbMrAFAQnJkN3HPS6ZtvvsnS9sgjj2jNmjXq3r27bDab1efhhx/OczHJycmSpNKlS0uSNmzYoLS0NLVp08bqU6NGDd11111WeIuLi1NoaKgV3KTL1+UNGDBA27dv17333qu4uDiHZWT2GTJkSLZ1pKamKjU11XqfkpKS520CAAAAgPyQo/DWpUuXa077/PPP9fnnn0uSbDab0tPT81RIRkaGhgwZoqZNm6p27dqSpISEBHl4eMjX19ehr5+fn3XNXUJCgkNwy5yeOe16fVJSUnT+/HkVL17cYdr48eM1duzYPG0HAAAAABSEHF3zlpGRkaNXXoObJEVFRWnbtm2aNWtWnpeRX0aOHKnk5GTrdejQIWeXBAAAAOAOl6MjbwVt0KBB+u6777R69WoFBgZa7f7+/rp48aKSkpIcjr4lJibK39/f6vPLL784LC/zbpRX9rn6DpWJiYmy2+1ZjrpJkqenpzw9PfNl2wAAAAAgP+T6bpPPPfecJk+enKX9gw8+uOY1ZNdijNGgQYO0YMECrVixQlWqVHGY3qBBAxUtWlTLly+32nbv3q2DBw8qLCxMkhQWFqatW7fq2LFjVp+YmBjZ7XaFhIRYfa5cRmafzGUAAAAAQGGX6/A2f/58NW3aNEv7/fffr3nz5uVqWVFRUfq///s/zZw5U97e3kpISFBCQoLOnz8vSfLx8VHfvn01bNgwrVy5Uhs2bNBTTz2lsLAwNWnSRJLUtm1bhYSEqGfPntq8ebOWLl2ql19+WVFRUdbRs/79++v333/Xiy++qF27dmnKlCmaM2eOhg4dmtvNBwAAAACnyPWjAooVK6Zt27YpODjYoX3v3r2qXbu2Lly4kPOVX+MewtOmTVPv3r0lXX5I9/PPP68vv/xSqampCg8P15QpU6xTIiXpjz/+0IABA7Rq1SqVLFlSkZGRevPNN+Xu/r+zQletWqWhQ4dqx44dCgwM1CuvvGKt40Z4VEDhw+2Xkd8YU86uALcjxpWzKwBQEJyZDXId3mrXrq3+/ftr0KBBDu3vv/++pk6dqh07duRrgYUB4a3w4R9E5DfGlLMrwO2IceXsCgAUhEL/nLcrDRs2TIMGDdLx48f14IMPSpKWL1+ud955R5MmTcrv+gAAAAAAykN469Onj1JTU/X666/r1VdflSRVrlxZU6dOVa9evfK9QAAAAABAHk6bvNLx48dVvHhxeXl55WdNhQ6nTRY+nIqC/MaYcnYFuB0xrpxdAYCC4FKnTWY6fvy4du/eLUmqUaOGypYtm29FAQAAAAAc5fpRAWfPnlWfPn1UoUIFtWjRQi1atFCFChXUt29fnTt3riBqBAAAAIA7Xq7D27BhwxQbG6tvv/1WSUlJSkpK0tdff63Y2Fg9//zzBVEjAAAAANzxcn3NW9myZTVv3jy1atXKoX3lypV6/PHHdfz48fysr1DgmrfCh+sIkN8YU86uALcjxpWzKwBQEJyZDXJ95O3cuXPy8/PL0l6+fHlOmwQAAACAApLr8BYWFqbRo0frwoULVtv58+c1duxYhYWF5WtxAAAAAIDLcn23yffee0/h4eEKDAxU3bp1JUmbN29WsWLFtHTp0nwvEAAAAACQh/BWu3Zt7dmzRzNmzNCuXbskSd27d1ePHj1UvHjxfC8QAAAAAJDH57yVKFFCTz/9dH7XAgAAAAC4hhyFt2+++SbHC3z44YfzXAwAAAAAIHs5Cm9dunTJ0cJsNpvS09Nvph4AAAAAQDZyFN4yMjIKug4AAAAAwHXk+lEBAAAAAIBbL8fhbcWKFQoJCVFKSkqWacnJyapVq5ZWr16dr8UBAAAAAC7LcXibNGmSnn76adnt9izTfHx89Oyzz2rixIn5WhwAAAAA4LIch7fNmzerXbt215zetm1bbdiwIV+KAgAAAAA4ynF4S0xMVNGiRa853d3dXcePH8+XogAAAAAAjnIc3ipWrKht27Zdc/qWLVtUoUKFfCkKAAAAAOAox+GtQ4cOeuWVV3ThwoUs086fP6/Ro0erY8eO+VocAAAAAOAymzHG5KRjYmKi6tevLzc3Nw0aNEjVq1eXJO3atUsffvih0tPTtXHjRvn5+RVowc6QkpIiHx8fJScnZ3vDFmew2ZxdgXPl7FsL5BxjytkV4HbEuHJ2BQAKgjOzQY4e0i1Jfn5+WrdunQYMGKCRI0cqM/PZbDaFh4frww8/vC2DGwAAAAAUBjkOb5JUqVIlff/99zp16pT27t0rY4zuvvtulSpVqqDqAwAAAAAol+EtU6lSpXTffffldy0AAAAAgGvI8Q1LAAAAAADOQ3gDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABfg1PC2evVqderUSQEBAbLZbFq4cKHD9N69e8tmszm82rVr59Dn5MmT6tGjh+x2u3x9fdW3b1+dOXPGoc+WLVvUvHlzFStWTEFBQZowYUJBbxoAAAAA5CunhrezZ8+qbt26+vDDD6/Zp127djp69Kj1+vLLLx2m9+jRQ9u3b1dMTIy+++47rV69Ws8884w1PSUlRW3btlWlSpW0YcMGvf322xozZow++eSTAtsuAAAAAMhv7s5cefv27dW+ffvr9vH09JS/v3+203bu3KklS5Zo/fr1atiwoSTp/fffV4cOHfTvf/9bAQEBmjFjhi5evKjPP/9cHh4eqlWrluLj4/Xuu+86hLwrpaamKjU11XqfkpKSxy0EAAAAgPxR6K95W7VqlcqXL6/q1atrwIABOnHihDUtLi5Ovr6+VnCTpDZt2qhIkSL6+eefrT4tWrSQh4eH1Sc8PFy7d+/WqVOnsl3n+PHj5ePjY72CgoIKaOsAAAAAIGcKdXhr166dvvjiCy1fvlxvvfWWYmNj1b59e6Wnp0uSEhISVL58eYd53N3dVbp0aSUkJFh9/Pz8HPpkvs/sc7WRI0cqOTnZeh06dCi/Nw0AAAAAcsWpp03eSLdu3az/Dw0NVZ06dVStWjWtWrVKrVu3LrD1enp6ytPTs8CWDwAAAAC5VaiPvF2tatWqKlu2rPbu3StJ8vf317Fjxxz6XLp0SSdPnrSuk/P391diYqJDn8z317qWDgAAAAAKG5cKb3/++adOnDihChUqSJLCwsKUlJSkDRs2WH1WrFihjIwMNW7c2OqzevVqpaWlWX1iYmJUvXp1lSpV6tZuAAAAAADkkVPD25kzZxQfH6/4+HhJ0v79+xUfH6+DBw/qzJkzGj58uH766ScdOHBAy5cvV+fOnRUcHKzw8HBJUs2aNdWuXTs9/fTT+uWXX/Tjjz9q0KBB6tatmwICAiRJTzzxhDw8PNS3b19t375ds2fP1nvvvadhw4Y5a7MBAAAAINdsxhjjrJWvWrVKDzzwQJb2yMhITZ06VV26dNGmTZuUlJSkgIAAtW3bVq+++qrDDUhOnjypQYMG6dtvv1WRIkXUtWtXTZ48WV5eXlafLVu2KCoqSuvXr1fZsmU1ePBgjRgxIsd1pqSkyMfHR8nJybLb7Te30fnEZnN2Bc7lvG8tbleMKWdXgNsR48rZFQAoCM7MBk4Nb66C8Fb48K1FfmNMObsC3I4YV86uAEBBcGY2cKlr3gAAAADgTkV4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABfg1PC2evVqderUSQEBAbLZbFq4cKHDdGOMRo0apQoVKqh48eJq06aN9uzZ49Dn5MmT6tGjh+x2u3x9fdW3b1+dOXPGoc+WLVvUvHlzFStWTEFBQZowYUJBbxoAAAAA5CunhrezZ8+qbt26+vDDD7OdPmHCBE2ePFkfffSRfv75Z5UsWVLh4eG6cOGC1adHjx7avn27YmJi9N1332n16tV65plnrOkpKSlq27atKlWqpA0bNujtt9/WmDFj9MknnxT49gEAAABAfrEZY4yzi5Akm82mBQsWqEuXLpIuH3ULCAjQ888/rxdeeEGSlJycLD8/P0VHR6tbt27auXOnQkJCtH79ejVs2FCStGTJEnXo0EF//vmnAgICNHXqVP3rX/9SQkKCPDw8JEkvvfSSFi5cqF27duWotpSUFPn4+Cg5OVl2uz3/Nz4PbDZnV+BcheNbi9sJY8rZFeB2xLhydgUACoIzs0GhveZt//79SkhIUJs2baw2Hx8fNW7cWHFxcZKkuLg4+fr6WsFNktq0aaMiRYro559/tvq0aNHCCm6SFB4ert27d+vUqVPZrjs1NVUpKSkOLwAAAABwpkIb3hISEiRJfn5+Du1+fn7WtISEBJUvX95huru7u0qXLu3QJ7tlXLmOq40fP14+Pj7WKygo6OY3CAAAAABuQqENb840cuRIJScnW69Dhw45uyQAAAAAd7hCG978/f0lSYmJiQ7tiYmJ1jR/f38dO3bMYfqlS5d08uRJhz7ZLePKdVzN09NTdrvd4QUAAAAAzlRow1uVKlXk7++v5cuXW20pKSn6+eefFRYWJkkKCwtTUlKSNmzYYPVZsWKFMjIy1LhxY6vP6tWrlZaWZvWJiYlR9erVVapUqVu0NQAAAABwc5wa3s6cOaP4+HjFx8dLunyTkvj4eB08eFA2m01DhgzRa6+9pm+++UZbt25Vr169FBAQYN2RsmbNmmrXrp2efvpp/fLLL/rxxx81aNAgdevWTQEBAZKkJ554Qh4eHurbt6+2b9+u2bNn67333tOwYcOctNUAAAAAkHtOfVTAqlWr9MADD2Rpj4yMVHR0tIwxGj16tD755BMlJSWpWbNmmjJliu655x6r78mTJzVo0CB9++23KlKkiLp27arJkyfLy8vL6rNlyxZFRUVp/fr1Klu2rAYPHqwRI0bkuE4eFVD4cPtl5DfGlLMrwO2IceXsCgAUBGdmg0LznLfCjPBW+PCtRX5jTDm7AtyOGFfOrgBAQeA5bwAAAACA6yK8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyjU4W3MmDGy2WwOrxo1aljTL1y4oKioKJUpU0ZeXl7q2rWrEhMTHZZx8OBBRUREqESJEipfvryGDx+uS5cu3epNAQAAAICb4u7sAm6kVq1aWrZsmfXe3f1/JQ8dOlSLFi3S3Llz5ePjo0GDBumRRx7Rjz/+KElKT09XRESE/P39tW7dOh09elS9evVS0aJF9cYbb9zybQEAAACAvCr04c3d3V3+/v5Z2pOTk/Wf//xHM2fO1IMPPihJmjZtmmrWrKmffvpJTZo00Q8//KAdO3Zo2bJl8vPzU7169fTqq69qxIgRGjNmjDw8PG715gAAAABAnhTq0yYlac+ePQoICFDVqlXVo0cPHTx4UJK0YcMGpaWlqU2bNlbfGjVq6K677lJcXJwkKS4uTqGhofLz87P6hIeHKyUlRdu3b7/mOlNTU5WSkuLwAgAAAABnKtThrXHjxoqOjtaSJUs0depU7d+/X82bN9fp06eVkJAgDw8P+fr6Oszj5+enhIQESVJCQoJDcMucnjntWsaPHy8fHx/rFRQUlL8bBgAAAAC5VKhPm2zfvr31/3Xq1FHjxo1VqVIlzZkzR8WLFy+w9Y4cOVLDhg2z3qekpBDgAAAAADhVoT7ydjVfX1/dc8892rt3r/z9/XXx4kUlJSU59ElMTLSukfP3989y98nM99ldR5fJ09NTdrvd4QUAAAAAzuRS4e3MmTPat2+fKlSooAYNGqho0aJavny5NX337t06ePCgwsLCJElhYWHaunWrjh07ZvWJiYmR3W5XSEjILa8fAAAAAPKqUJ82+cILL6hTp06qVKmSjhw5otGjR8vNzU3du3eXj4+P+vbtq2HDhql06dKy2+0aPHiwwsLC1KRJE0lS27ZtFRISop49e2rChAlKSEjQyy+/rKioKHl6ejp56wAAAAAg5wp1ePvzzz/VvXt3nThxQuXKlVOzZs30008/qVy5cpKkiRMnqkiRIuratatSU1MVHh6uKVOmWPO7ubnpu+++04ABAxQWFqaSJUsqMjJS48aNc9YmAQAAAECe2IwxxtlFFHYpKSny8fFRcnJyobn+zWZzdgXOxbcW+Y0x5ewKcDtiXDm7AgAFwZnZwKWueQMAAACAOxXhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcgLuzCwAAAABwYzabsytwLmOcXYHzceQNAAAAAFzAHRXePvzwQ1WuXFnFihVT48aN9csvvzi7JAAAAADIkTsmvM2ePVvDhg3T6NGjtXHjRtWtW1fh4eE6duyYs0sDAAAAgBu6Y8Lbu+++q6efflpPPfWUQkJC9NFHH6lEiRL6/PPPnV0aAAAAANzQHXHDkosXL2rDhg0aOXKk1VakSBG1adNGcXFxWfqnpqYqNTXVep+cnCxJSklJKfhikSN8FED+YkwB+Y9xBeSvwjKmMjOBccIdVO6I8PbXX38pPT1dfn5+Du1+fn7atWtXlv7jx4/X2LFjs7QHBQUVWI3IHR8fZ1cA3F4YU0D+Y1wB+auwjanTp0/L5xYXdUeEt9waOXKkhg0bZr3PyMjQyZMnVaZMGdnu9Hu06vJfG4KCgnTo0CHZ7XZnlwO4PMYUkP8YV0D+Ykz9jzFGp0+fVkBAwC1f9x0R3sqWLSs3NzclJiY6tCcmJsrf3z9Lf09PT3l6ejq0+fr6FmSJLslut9/xgxfIT4wpIP8xroD8xZi67FYfcct0R9ywxMPDQw0aNNDy5cuttoyMDC1fvlxhYWFOrAwAAAAAcuaOOPImScOGDVNkZKQaNmyoRo0aadKkSTp79qyeeuopZ5cGAAAAADd0x4S3v//97zp+/LhGjRqlhIQE1atXT0uWLMlyExPcmKenp0aPHp3l1FIAecOYAvIf4wrIX4ypwsFmnHGPSwAAAABArtwR17wBAAAAgKsjvAEAAACACyC8AQAAAIALILwVoMqVK2vSpEnOLsPlHDhwQDabTfHx8QW+Lj4j18NnljeMK1wLn1feMKZwPXxmecO4ygFzm4uMjDSSzLPPPptl2sCBA40kExkZmaNl7d+/30gymzZtylH/Y8eOmbNnz+aob8eOHU14eHi201avXm0kmc2bN+doWdeycuVKI8mcOnXqppZztXPnzplSpUqZMmXKmAsXLuRq3sjISNO5c2eHtkuXLpmjR4+atLS0fKtx2rRpxsfHJ0t7bj6j/PLBBx+YSpUqGU9PT9OoUSPz888/39L15wfG1f8wrnyytN/qcRUbG2s6duxoKlSoYCSZBQsW3LJ15xfG1P8wpnyytN/qMfXGG2+Yhg0bGi8vL1OuXDnTuXNns2vXrlu2/vzCuPofxpVPlvZbPa6mTJliQkNDjbe3t/H29jZNmjQx33//fa6Xc0cceQsKCtKsWbN0/vx5q+3ChQuaOXOm7rrrrnxf38WLFyVJ5cqVU4kSJXI0T9++fRUTE6M///wzy7Rp06apYcOGqlOnTr7WmVfGGF26dMl6P3/+fNWqVUs1atTQwoULb3r5bm5u8vf3l7t7wT/JIjefUX6YPXu2hg0bptGjR2vjxo2qW7euwsPDdezYsVtWQ35hXOUvxlXenT17VnXr1tWHH354y9ZZEBhT+YsxlXexsbGKiorSTz/9pJiYGKWlpalt27Y6e/bsLashvzCu8hfjKu8CAwP15ptvasOGDfr111/14IMPqnPnztq+fXvuFpTPobLQyUz1tWvXNv/3f/9ntc+YMcPUqVPHdO7c2fqry+LFi03Tpk2Nj4+PKV26tImIiDB79+615pHk8GrZsqXDOl577TVToUIFU7lyZWOMMZUqVTITJ040xlz+i0fRokXN6tWrreW99dZbply5ciYhIcGkpaUZPz8/8+qrrzrUf/r0aePl5WWmTp1qjDFmzZo1plmzZqZYsWImMDDQDB482Jw5c8bqf+HCBfPiiy+awMBA4+HhYapVq2Y+++wz6y9GV74yt/vChQtm8ODBply5csbT09M0bdrU/PLLL9YyM/9a8/3335v69eubokWLmpUrV1rTW7VqZT766CMzdepU89BDD2X5DLZt22YiIiKMt7e38fLyMs2aNTN79+41o0ePzlLTypUrHf66lZ6ebipWrGimTJnisMyNGzcam81mDhw4YIwx5p133jG1a9c2JUqUMIGBgWbAgAHm9OnTDvVf+Ro9enSWz8gYY/744w/z8MMPm5IlSxpvb2/z2GOPmYSEBGv66NGjTd26dc0XX3xhKlWqZOx2u/n73/9uUlJSsmx3dho1amSioqKs9+np6SYgIMCMHz8+R/MXFowrxlVhGldXkgsfeWNMMaYK45gy5vIRCkkmNjY2T/M7C+OKcVWYx5UxxpQqVcp89tlnuZrnjglv7777rmndurXV3rp1azNx4kSHgTtv3jwzf/58s2fPHrNp0ybTqVMnExoaatLT040xxvzyyy9Gklm2bJk5evSoOXHihLUOLy8v07NnT7Nt2zazbds2Y0zWL8Xw4cNNpUqVTFJSktm4caPx8PAwX3/9tcP0atWqmYyMDKvt888/N8WLFzdJSUlm7969pmTJkmbixInmt99+Mz/++KO59957Te/eva3+jz/+uAkKCjJfffWV2bdvn1m2bJmZNWuWuXTpkpk/f76RZHbv3m2OHj1qkpKSjDHGPPfccyYgIMB8//33Zvv27SYyMtKUKlXK2r7ML36dOnXMDz/8YPbu3WtN27t3r/H09DQnT540J06cMMWKFbMGkzHG/Pnnn6Z06dLmkUceMevXrze7d+82n3/+udm1a5c5ffq0efzxx027du3M0aNHzdGjR01qamqWUxNeeOEF06xZM4fP9fnnn3domzhxolmxYoXZv3+/Wb58ualevboZMGCAMcaY1NRUM2nSJGO32631ZA7qKz+j9PR0U69ePdOsWTPz66+/mp9++sk0aNDA+gFtzOWB6+XlZR555BGzdetWs3r1auPv72/++c9/XvM7mCk1NdW4ubll+cWyV69e5uGHH77h/IUJ44pxVVjG1dVcPbwxphhThW1MGWPMnj17jCSzdevWPM3vLIwrxlVhHVeXLl0yX375pfHw8DDbt2/P1bx3THg7duyY8fT0NAcOHDAHDhwwxYoVM8ePH3cYuFc7fvy4ww+ra53vHBkZafz8/ExqaqpD+9UDNzU11dSrV888/vjjJiQkxDz99NMO/Xfu3Gn95SFT8+bNzZNPPmmMMaZv377mmWeecZhnzZo1pkiRIub8+fNm9+7dRpKJiYnJdnuyO9/5zJkzpmjRombGjBlW28WLF01AQICZMGGCw3wLFy7Mssx//vOfpkuXLtb7zp07W3/RMMaYkSNHmipVqpiLFy9mW1N25ztfvZ83bdpkbDab+eOPP4wxxvpLTOZforIzd+5cU6ZMGev9tc53vvIz+uGHH4ybm5s5ePCgNX379u1GkvVXqNGjR5sSJUo4/JVl+PDhpnHjxtesJdPhw4eNJLNu3TqH9uHDh5tGjRrdcP7ChHH1P4wrnyz9buW4upqrhzfGFGOqsI2p9PR0ExERYZo2bZrreZ2NcfU/jCufLP2cMa62bNliSpYsadzc3IyPj49ZtGhRjufNdEdc8yZdPq81IiJC0dHRmjZtmiIiIlS2bFmHPnv27FH37t1VtWpV2e12Va5cWZJ08ODBGy4/NDRUHh4e1+3j4eGhGTNmaP78+bpw4YImTpzoML1GjRq6//779fnnn0uS9u7dqzVr1qhv376SpM2bNys6OlpeXl7WKzw8XBkZGdq/f7/i4+Pl5uamli1b5nS3aN++fUpLS1PTpk2ttqJFi6pRo0bauXOnQ9+GDRs6vE9PT9f06dP15JNPWm1PPvmkoqOjlZGRIUmKj49X8+bNVbRo0RzXdLV69eqpZs2amjlzpqTL5+IfO3ZMjz32mNVn2bJlat26tSpWrChvb2/17NlTJ06c0Llz53K8np07dyooKEhBQUFWW0hIiHx9fR32ReXKleXt7W29r1Chgktes5YfGFfZY1z9D+MqdxhT2WNM/c+tHlNRUVHatm2bZs2alet5CwvGVfYYV/9zq8ZV9erVFR8fr59//lkDBgxQZGSkduzYkeP5pTvsUQF9+vRRdHS0pk+frj59+mSZ3qlTJ508eVKffvqpfv75Z/3888+S/nfx6fWULFkyRzWsW7dOknTy5EmdPHkyy/S+fftq/vz5On36tKZNm6Zq1apZA/HMmTN69tlnFR8fb702b96sPXv2qFq1aipevHiOasirq7dx6dKlOnz4sP7+97/L3d1d7u7u6tatm/744w8tX75ckvKtph49elgDd+bMmWrXrp3KlCkj6fJtZTt27Kg6depo/vz52rBhg3Xjgpx8drl19Q8hm81m/aC6nrJly8rNzU2JiYkO7YmJifL398/XGm8lxtXNYVxdltdxdTtiTN0cxtRl+TGmBg0apO+++04rV65UYGBgfpZ3yzGubg7j6rKbHVceHh4KDg5WgwYNNH78eNWtW1fvvfdermq4o8Jbu3btdPHiRaWlpSk8PNxh2okTJ7R79269/PLLat26tWrWrKlTp0459Mn8q0p6enqe1r9v3z4NHTpUn376qRo3bqzIyMgsH/jjjz+uIkWKaObMmfriiy/Up08f2Ww2SVL9+vW1Y8cOBQcHZ3l5eHgoNDRUGRkZio2NzXb92dVfrVo1eXh46Mcff7Ta0tLStH79eoWEhFx3e/7zn/+oW7duDj9I4uPj1a1bN/3nP/+RJNWpU0dr1qxRWlraNWvKyf584okntG3bNm3YsEHz5s1Tjx49rGkbNmxQRkaG3nnnHTVp0kT33HOPjhw5kuv11KxZU4cOHdKhQ4esth07digpKemG+yInPDw81KBBA+uHmiRlZGRo+fLlCgsLu+nlOwvjinF1PQU9rm5HjCnG1PXcijFljNGgQYO0YMECrVixQlWqVMmX5ToT44pxdT3O+rcqIyNDqampuZsp1ydaupirz6dNTk42ycnJ1vvM853T09NNmTJlzJNPPmn27Nljli9fbu677z6H6yfS0tJM8eLFzWuvvWYSEhKsiz2zO2fXGMdzaS9dumSaNGliunbtaowx5siRI6ZMmTLWOcVX6tu3rylVqpRxc3Mzhw8ftto3b95sihcvbqKiosymTZvMb7/9ZhYuXOhw98LevXuboKAgs2DBAvP777+blStXmtmzZxtjLl84arPZTHR0tDl27Jh1weY//vEPExAQYBYvXuxwserJkyeNMdmfJ33s2DFTtGhRs3jx4iz1f//998bT09OcOHHC/PXXX6ZMmTLWxaq//fab+eKLL6znxbz++uvmrrvuMrt27TLHjx83Fy9evOZ55U2bNjV169Y13t7e5ty5c1Z7fHy8kWQmTZpk9u3bZ7744gtTsWJFh5p//PFH60Lj48ePW8/1uPIzysjIMPXq1TPNmzc3GzZsMD///HO2F6vWrVvXoa6JEyeaSpUqZdkP2Zk1a5bx9PQ00dHRZseOHeaZZ54xvr6+DnczcgWMK8aVMYVnXJ0+fdps2rTJbNq0yUgy7777rtm0aZN1jYQrYEwxpowpPGNqwIABxsfHx6xatcq6ycPRo0cdtscVMK4YV8YUnnH10ksvmdjYWLN//36zZcsW89JLLxmbzWZ++OGHHM2f6Y4Lb1e78mLVmJgYU7NmTePp6Wnq1KljVq1aleXi908//dQEBQWZIkWKZLlN7NWu/FKMHTvWVKhQwfz111/W9Pnz5xsPDw8THx/vMN+6deuMJNOhQ4csy/zll1/MQw89ZLy8vEzJkiVNnTp1zOuvv25NP3/+vBk6dKipUKGC8fDwMMHBwebzzz+3po8bN874+/sbm81mbff58+fN4MGDTdmyZa97m9grB+6///1v4+vrm+1FqKmpqcbX19e89957xpjLP3Datm1rSpQoYby9vU3z5s3Nvn37jDGXfwBkbo+yuU3slaZMmWIkmV69emVZ57vvvmsqVKhgihcvbsLDw80XX3yRpeb+/fubMmXK5MttYq+Um4FrjDHvv/++ueuuu4yHh4dp1KiR+emnn3I8b2HBuGJcZSoM4yq7W0FLOX/4bmHAmGJMZSoMYyq78STJTJs2LUfzFxaMK8ZVpsIwrvr06WMqVapkPDw8TLly5Uzr1q1zHdyMMcZmjDG5O1YHAAAAALjV7qhr3gAAAADAVRHegHxy8OBBh1v4Xv3Kye2GAThiXAH5izEF5L9bOa44bRLIJ5cuXdKBAweuOb1y5cpyd3e/dQUBtwHGFZC/GFNA/ruV44rwBgAAAAAugNMmAQAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AALiBVq1aaciQIc4uAwBwhyO8AQAKTO/evWWz2fTmm286tC9cuFA2my1Xy6pcubImTZqUj9UVnAMHDshmsyk+Pt7ZpQAAbiOENwBAgSpWrJjeeustnTp1ytml5NrFixedXUK+SktLc3YJAICbQHgDABSoNm3ayN/fX+PHj79uv7Vr16p58+YqXry4goKC9Nxzz+ns2bOSLp+2+Mcff2jo0KGy2Wyy2WwyxqhcuXKaN2+etYx69eqpQoUKDsv09PTUuXPnJEkHDx5U586d5eXlJbvdrscff1yJiYlW/zFjxqhevXr67LPPVKVKFRUrVizbWhctWiQfHx/NmDEjT/tk37596ty5s/z8/OTl5aX77rtPy5Yts6aPGzdOtWvXzjJfvXr19Morr1jvP/vsM9WsWVPFihVTjRo1NGXKFGta5tG/2bNnq2XLlipWrJhmzJihP/74Q506dVKpUqVUsmRJ1apVS99//32etgMAcGsR3gAABcrNzU1vvPGG3n//ff3555/Z9tm3b5/atWunrl27asuWLZo9e7bWrl2rQYMGSZK++uorBQYGaty4cTp69KiOHj0qm82mFi1aaNWqVZKkU6dOaefOnTp//rx27dolSYqNjdV9992nEiVKKCMjQ507d9bJkycVGxurmJgY/f777/r73//uUMvevXs1f/58ffXVV9me9jhz5kx1795dM2bMUI8ePfK0T86cOaMOHTpo+fLl2rRpk9q1a6dOnTrp4MGDkqQ+ffpo586dWr9+vTXPpk2btGXLFj311FOSpBkzZmjUqFF6/fXXtXPnTr3xxht65ZVXNH36dId1vfTSS/rHP/6hnTt3Kjw8XFFRUUpNTdXq1au1detWvfXWW/Ly8srTdgAAbi13ZxcAALj9/e1vf1O9evU0evRo/ec//8kyffz48erRo4d1U5C7775bkydPVsuWLTV16lSVLl1abm5u8vb2lr+/vzVfq1at9PHHH0uSVq9erXvvvVf+/v5atWqVatSooVWrVqlly5aSpOXLl2vr1q3av3+/goKCJElffPGFatWqpfXr1+u+++6TdPlUyS+++ELlypXLUueHH36of/3rX/r222+t5eZF3bp1VbduXev9q6++qgULFuibb77RoEGDFBgYqPDwcE2bNs2qa9q0aWrZsqWqVq0qSRo9erTeeecdPfLII5KkKlWqaMeOHfr4448VGRlpLXvIkCFWH+ny0ceuXbsqNDRUkqzlAQAKP468AQBuibfeekvTp0/Xzp07s0zbvHmzoqOj5eXlZb3Cw8OVkZGh/fv3X3OZLVu21I4dO3T8+HHFxsaqVatWatWqlVatWqW0tDStW7dOrVq1kiTt3LlTQUFBVnCTpJCQEPn6+jrUVKlSpWyD27x58zR06FDFxMTcVHCTLh95e+GFF1SzZk35+vrKy8tLO3futI68SdLTTz+tL7/8UhcuXNDFixc1c+ZM9enTR5J09uxZ7du3T3379nXYZ6+99pr27dvnsK6GDRs6vH/uuef02muvqWnTpho9erS2bNlyU9sCALh1CG8AgFuiRYsWCg8P18iRI7NMO3PmjJ599lnFx8dbr82bN2vPnj2qVq3aNZcZGhqq0qVLKzY21iG8xcbGav369UpLS9P999+fqzpLliyZbfu9996rcuXK6fPPP5cxJlfLvNoLL7ygBQsW6I033tCaNWsUHx+v0NBQhxukdOrUSZ6enlqwYIG+/fZbpaWl6dFHH5V0eX9J0qeffuqwz7Zt26affvrputvTr18//f777+rZs6e2bt2qhg0b6v3337+p7QEA3BqcNgkAuGXefPNN1atXT9WrV3dor1+/vnbs2KHg4OBrzuvh4aH09HSHNpvNpubNm+vrr7/W9u3b1axZM5UoUUKpqan6+OOP1bBhQyu81KxZU4cOHdKhQ4eso287duxQUlKSQkJCblh7tWrV9M4776hVq1Zyc3PTBx98kNvNt/z444/q3bu3/va3v0m6HMYOHDjg0Mfd3V2RkZGaNm2aPDw81K1bNxUvXlyS5Ofnp4CAAP3+++95uu4uKChI/fv3V//+/TVy5Eh9+umnGjx4cJ63BwBwaxDeAAC3TGhoqHr06KHJkyc7tI8YMUJNmjTRoEGD1K9fP5UsWVI7duxQTEyMFZIqV66s1atXq1u3bvL09FTZsmUlXb7u7fnnn1fDhg2tG2+0aNFCM2bM0PDhw611tGnTxlr/pEmTdOnSJQ0cOFAtW7bMcmrhtdxzzz1auXKlWrVqJXd39xs+d2737t1Z2mrVqqW7775bX331lTp16iSbzaZXXnlFGRkZWfr269dPNWvWlHQ58F1p7Nixeu655+Tj46N27dopNTVVv/76q06dOqVhw4Zds6YhQ4aoffv2uueee3Tq1CmtXLnSWgcAoHDjtEkAwC01bty4LEGlTp06io2N1W+//abmzZvr3nvv1ahRoxQQEOAw34EDB1StWjWHa9Jatmyp9PR069o26XKgu7rNZrPp66+/VqlSpdSiRQu1adNGVatW1ezZs3NVf/Xq1bVixQp9+eWXev7556/bt1u3brr33nsdXomJiXr33XdVqlQp3X///erUqZPCw8NVv379LPPffffduv/++1WjRg01btzYYVq/fv302Wefadq0aQoNDVXLli0VHR2tKlWqXLem9PR0RUVFqWbNmmrXrp3uueceh0cMAAAKL5u52RP3AQBAgTDG6O6779bAgQOvezQNAHBn4LRJAAAKoePHj2vWrFlKSEiwnu0GALizEd4AACiEypcvr7Jly+qTTz5RqVKlnF0OAKAQILwBAFAIcVUDAOBq3LAEAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXMD/A+rVYcrBq9R7AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layers_updated = list(cycles_dict_updated.keys())\n", + "cycles_updated = list(cycles_dict_updated.values())\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(layers_updated, cycles_updated, color ='blue', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"Clock Cycles\")\n", + "plt.title(\"Estimated clock cycles for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "res_dict_updated = []\n", + "res_dict_updated = res_estimation(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABaUElEQVR4nO3de3zP9f//8ft759nRsM1hmFNMYyLMoSmHEVJEfMScSpoKH4rvJ+ei+nyK6oMijT6Rckjlk0oIOR9L5pxTZVPG5pCx7fn7w2+vj7cNG+Mlu10vl/cl7+fz+Xq9Hq/3+/1cu+91eDuMMUYAAAAAANu42F0AAAAAABR2BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwDXpUmTJmrSpIndZRSogwcPyuFwaMaMGXaXYiteh7ybMWOGHA6HDh48eM2xX331laKiouTl5SWHw6GTJ0/e9PpuNYfDof79+9tdxm0t+zOzadOmfC/73XffyeFw6Lvvviv4wgDYjmAG3GGy/6d/pce6devyvK7ExESNGjUqT7903kqTJ0+2NTRk/3I0b968K4652i+o8+bNs365yl5XXh746zp+/Lg6deokb29vTZo0Sf/5z3/k4+Njd1l3vDVr1mjUqFF3ZAgGcOdxs7sAADfHmDFjFB4enqO9UqVKeV5HYmKiRo8erSZNmqh8+fJOfd98882NlnjdJk+erOLFi6tHjx621VBQqlWrpv/85z9ObcOGDZOvr6/+8Y9/2FQVCtrGjRt16tQpjR07Vs2aNbO7nEJjzZo1Gj16tHr06KHAwEC7ywGAqyKYAXeoVq1aqU6dOjdt/R4eHjdt3YVJSEiIHn/8cae2V155RcWLF8/Rjr+uY8eOSVKBhoMzZ85w1O0v5Ny5c3f8z00+k8CN4VRGoBCbM2eOateuLT8/P/n7+ysyMlJvvvmmpIunRHbs2FGSdP/991un02Vf23D5NWbZp+R98sknGj16tEqXLi0/Pz89+uijSk1NVXp6ugYMGKDg4GD5+vqqZ8+eSk9Pd6onISFBDzzwgIKDg+Xp6amIiAhNmTLFaUz58uW1Y8cOrVixwqrp0jpOnjypAQMGKCwsTJ6enqpUqZJeffVVZWVlOa3n5MmT6tGjhwICAhQYGKi4uLi/5OlOycnJcnNz0+jRo3P07d69Ww6HQ//+978lSSkpKRo8eLAiIyPl6+srf39/tWrVSj/88MM1t3Olawp79OiR42hqVlaWJk6cqOrVq8vLy0shISHq27evTpw44TRu06ZNio2NVfHixeXt7a3w8HD16tXrmrU4HA6NGjUqR3v58uWdjqJeuHBBo0ePVuXKleXl5aVixYqpUaNGWrJkidNyu3bt0qOPPqqgoCB5eXmpTp06+vzzz3Osf8eOHXrggQfk7e2tMmXK6KWXXsrxucpNkyZNFBcXJ0m699575XA4nOqcO3euateuLW9vbyuQ//rrr07r6NGjh3x9fbV//349+OCD8vPzU9euXa+63V9//VW9evVSSEiIPD09Vb16db3//vtOY86fP68RI0aodu3aCggIkI+Pjxo3bqzly5fnWF9WVpbefPNNRUZGysvLSyVKlFDLli1zvVZq4cKFuvvuu63tfvXVV9d8nS79GfLyyy+rTJky8vLyUtOmTbVv374c49evX6+WLVsqICBARYoUUUxMjFavXm31jxo1SkOGDJEkhYeHWz8vDh48qPbt2+uee+5xWl/btm3lcDic3vv169fL4XBo8eLFVtvPP/+sjh07KigoSEWKFFH9+vX13//+N9d9mTNnjl588UWVLl1aRYoUUVpaWq77fuLECdWtW1dlypTR7t27r/laXWrVqlXq2LGjypYtK09PT4WFhWngwIH6888/rTEJCQlyOBzaunVrjuXHjRsnV1dXp8/ctV5b6eLr63A4lJiYqL/97W8qWrSoGjVqlK/aATjjiBlwh0pNTdUff/zh1OZwOFSsWDFJ0pIlS9SlSxc1bdpUr776qiRp586dWr16tZ577jndd999evbZZ/XWW2/p//7v/1StWjVJsv57JePHj5e3t7eGDh2qffv26e2335a7u7tcXFx04sQJjRo1SuvWrdOMGTMUHh6uESNGWMtOmTJF1atX10MPPSQ3Nzd98cUXevrpp5WVlaX4+HhJ0sSJE/XMM884neoXEhIiSTp79qxiYmL066+/qm/fvipbtqzWrFmjYcOG6ejRo5o4caIkyRijdu3a6fvvv9dTTz2latWq6dNPP7V+ef4rCQkJUUxMjD755BONHDnSqe/jjz+Wq6urFbB//vlnLVy4UB07dlR4eLiSk5P17rvvKiYmRomJiSpVqlSB1NS3b1/NmDFDPXv21LPPPqsDBw7o3//+t7Zu3arVq1fL3d1dx44dU4sWLVSiRAkNHTpUgYGBOnjwoBYsWFAgNUgXf3EcP368+vTpo7p16yotLU2bNm3Sli1b1Lx5c0kXw1bDhg1VunRpDR06VD4+Pvrkk0/08MMPa/78+XrkkUckSUlJSbr//vuVkZFhjZs6daq8vb2vWcc//vEP3XXXXZo6dap1inHFihUlyXqd7r33Xo0fP17Jycl68803tXr1am3dutXpCFtGRoZiY2PVqFEj/etf/1KRIkWuuM3k5GTVr1/futaxRIkSWrx4sXr37q20tDQNGDBAkpSWlqb33ntPXbp00RNPPKFTp05p+vTpio2N1YYNGxQVFWWts3fv3poxY4ZatWqlPn36KCMjQ6tWrdK6deucjs5///33WrBggZ5++mn5+fnprbfeUocOHXT48GHr58/VvPLKK3JxcdHgwYOVmpqq1157TV27dtX69eutMcuWLVOrVq1Uu3ZtjRw5Ui4uLtYfdlatWqW6deuqffv22rNnjz766CNNmDBBxYsXlySVKFFCjRs31meffaa0tDT5+/vLGKPVq1fLxcVFq1at0kMPPSTpYuhxcXFRw4YNrde1QYMGOnv2rJ599lkVK1ZMM2fO1EMPPaR58+ZZn5dsY8eOlYeHhwYPHqz09PRcj5j98ccfat68uVJSUrRixQrrs5FXc+fO1dmzZ9WvXz8VK1ZMGzZs0Ntvv61ffvlFc+fOlSQ9+uijio+P16xZs1SrVi2n5WfNmqUmTZqodOnSeX5tL9WxY0dVrlxZ48aNkzEmX7UDuIwBcEdJSEgwknJ9eHp6WuOee+454+/vbzIyMq64rrlz5xpJZvny5Tn6YmJiTExMjPV8+fLlRpK5++67zfnz5632Ll26GIfDYVq1auW0fHR0tClXrpxT29mzZ3NsJzY21lSoUMGprXr16k7bzjZ27Fjj4+Nj9uzZ49Q+dOhQ4+rqag4fPmyMMWbhwoVGknnttdesMRkZGaZx48ZGkklISMix7ktl7+vcuXOvOEaSiY+Pz7Xvaq/r1fbvSt59910jyWzfvt2pPSIiwjzwwAPW83PnzpnMzEynMQcOHDCenp5mzJgxTm2Xvw6Xv9/Z4uLinN7HVatWGUlm1qxZTuO++uorp/ZPP/3USDIbN27M835mk2RGjhyZo71cuXImLi7Oel6zZk3TunXrq66radOmJjIy0pw7d85qy8rKMg0aNDCVK1e22gYMGGAkmfXr11ttx44dMwEBAUaSOXDgwFW3kz0vL93f8+fPm+DgYHP33XebP//802pftGiRkWRGjBhhtcXFxRlJZujQoVfdTrbevXubkiVLmj/++MOpvXPnziYgIMCaaxkZGSY9Pd1pzIkTJ0xISIjp1auX1bZs2TIjyTz77LM5tpWVlWX9W5Lx8PAw+/bts9p++OEHI8m8/fbbV605e15Vq1bNqaY333zT6fOdlZVlKleubGJjY522ffbsWRMeHm6aN29utf3zn//M9f3ZuHGjkWS+/PJLY4wxP/74o5FkOnbsaOrVq2eNe+ihh0ytWrWs59mfg1WrVlltp06dMuHh4aZ8+fLW/MrelwoVKuT4uXbpZ+Ho0aOmevXqpkKFCubgwYNXfX0uXe+lPzty+7k5fvx443A4zKFDh6y2Ll26mFKlSjn9DNiyZYvTXM/Pazty5EgjyXTp0uWadQPIG05lBO5QkyZN0pIlS5wel56OExgYqDNnzuQ4retGde/eXe7u7tbzevXqyRiT4xS1evXq6ciRI8rIyLDaLj36kH3ELyYmRj///LNSU1Ovue25c+eqcePGKlq0qP744w/r0axZM2VmZmrlypWSpC+//FJubm7q16+ftayrq6ueeeaZ695vO7Vv315ubm76+OOPrbaffvpJiYmJeuyxx6w2T09Pubhc/LGfmZmp48ePy9fXV3fddZe2bNlSILXMnTtXAQEBat68udN7ULt2bfn6+lqnyGUfCVq0aJEuXLhQINu+XGBgoHbs2KG9e/fm2p+SkqJly5apU6dOOnXqlFXr8ePHFRsbq71791qnd3355ZeqX7++09GCEiVKXPN0wqvZtGmTjh07pqefflpeXl5We+vWrVW1atUcp8dJcvrMXokxRvPnz1fbtm1ljHF6H2JjY5Wammq9366urtZRnKysLKWkpCgjI0N16tRx+kzMnz9fDocjx1FZSTnuGNqsWTOnoz41atSQv7+/fv7552vWLkk9e/Z0OrLUuHFjSbKW37Ztm/bu3au//e1vOn78uLVvZ86cUdOmTbVy5cprnmJaq1Yt+fr6Wj8TVq1apTJlyqh79+7asmWLzp49K2OMvv/+e2v70sXPQd26dZ1O2fP19dWTTz6pgwcPKjEx0Wk7cXFxVzyq+ssvvygmJkYXLlzQypUrVa5cuTy9Ppe7dP1nzpzRH3/8oQYNGsgY43TqYvfu3fXbb785naY6a9YseXt7q0OHDpKu77V96qmnrqtuADlxKiNwh6pbt+5Vb/7x9NNP65NPPlGrVq1UunRptWjRQp06dVLLli1vaLtly5Z1eh4QECBJCgsLy9GelZWl1NRU6/Sm1atXa+TIkVq7dq3Onj3rND41NdVa15Xs3btXP/74o0qUKJFrf/YNGA4dOqSSJUvK19fXqf+uu+66xt4VrIK6BX7x4sXVtGlTffLJJxo7dqyki6cxurm5qX379ta47GuEJk+erAMHDigzM9Pqy8spZnmxd+9epaamKjg4ONf+7PcgJiZGHTp00OjRozVhwgQ1adJEDz/8sP72t7/J09OzQGoZM2aM2rVrpypVqujuu+9Wy5Yt1a1bN9WoUUOStG/fPhljNHz4cA0fPvyK9ZYuXVqHDh1SvXr1cvTfyGfm0KFDV1xH1apV9f333zu1ubm5qUyZMtdc7++//66TJ09q6tSpmjp1aq5jst8HSZo5c6Zef/117dq1yykkX3pX1/3796tUqVIKCgq65vYv/xkgSUWLFs1xjWFely9atKgkWctnB+2rnXqcmppqLZcbV1dXRUdHa9WqVZIuBrPGjRurUaNGyszM1Lp16xQSEqKUlBSnYHalz0H2Kd6HDh3S3XffbbXndmfcbN26dZObm5t27typ0NDQK467lsOHD2vEiBH6/PPPc7zGl/5Bq3nz5ipZsqRmzZqlpk2bKisrSx999JHatWsnPz8/Sdf32l5tHwHkD8EMKKSCg4O1bds2ff3111q8eLEWL16shIQEde/eXTNnzrzu9bq6uuar3fz/axL279+vpk2bqmrVqnrjjTcUFhYmDw8Pffnll5owYUKebrKQlZWl5s2b6/nnn8+1v0qVKnncixvn6enpdPH9pbJD56VHSW5U586d1bNnT23btk1RUVH65JNP1LRpU+u6GuniRf7Dhw9Xr169NHbsWAUFBcnFxUUDBgy45uvrcDhyvX7k0nAnXXwPgoODNWvWrFzXkx2as78Hbt26dfriiy/09ddfq1evXnr99de1bt26HKE5Ly6v5b777tP+/fv12Wef6ZtvvtF7772nCRMm6J133lGfPn2sfR48eLBiY2NzXWd+vl7iZrv0iOfVZO/X448/fsVfsLPD6YcffqgePXro4Ycf1pAhQxQcHCxXV1eNHz9e+/fvv646rzXXb3T57P375z//6XQN3KXy8vlp1KiRXn75ZZ07d06rVq3SP/7xDwUGBuruu+/WqlWrrGtXLw1m+XW1axDbt2+vDz74QG+++abGjx9/XevPzMy0rk974YUXVLVqVfn4+OjXX39Vjx49nOa1q6ur/va3v2natGmaPHmyVq9erd9++83p7q/X89rm5TpLAHlDMAMKMQ8PD7Vt21Zt27ZVVlaWnn76ab377rsaPny4KlWqdEu/1PiLL75Qenq6Pv/8c6e/mOd2d7gr1VWxYkWdPn36mt8TVa5cOS1dulSnT592+iUjv3dDu9Y2rrS+7PbrPXUpNw8//LD69u1rnc64Z88eDRs2zGnMvHnzdP/992v69OlO7SdPnnQKcLkpWrRorqeiZR/1yVaxYkV9++23atiwYZ5+Yatfv77q16+vl19+WbNnz1bXrl01Z84c9enT56q1XH4HzfPnz+vo0aM5xgYFBalnz57q2bOnTp8+rfvuu0+jRo1Snz59VKFCBUmSu7t7nj4zuZ0SeSOfmez3f/fu3XrggQdyrPd6Px8lSpSQn5+fMjMzr7lf8+bNU4UKFbRgwQKneXX5KYsVK1bU119/rZSUlDwdNbuZsk+T9Pf3v+b+Xe1nWOPGjXX+/Hl99NFH+vXXX60Adt9991nBrEqVKlZAk648r3ft2mX159UzzzyjSpUqacSIEQoICNDQoUPzvGy27du3a8+ePZo5c6a6d+9utV/pFPXu3bvr9ddf1xdffKHFixerRIkSTn+UyM9rC6DgcY0ZUEgdP37c6bmLi4v1V/Ts29hnfx/NrbiNfPZfyS/9q3pqaqoSEhJyjPXx8cm1pk6dOmnt2rX6+uuvc/SdPHnSup7twQcfVEZGhtOt+DMzM/X222/f6G5YHnzwQa1bt06bN2/OUcesWbMUFRV1Q6cvXS4wMFCxsbH65JNPNGfOHHl4eOjhhx92GuPq6prjqMXcuXNz3Jo9NxUrVtSuXbv0+++/W20//PBDjltod+rUSZmZmdYplZfKyMiw3rcTJ07kqCX7L/SXf41CbrVkXxuUberUqTmOmF3+Gff19VWlSpWs9QcHB6tJkyZ69913cw11l+5r9vu5YcMGp/4rHRnMizp16ig4OFjvvPOO0z4vXrxYO3fuVOvWra9rva6ururQoYPmz5+vn376KUf/pfuV27xbv3691q5d67RMhw4dZIzJ9WsZ8nokrKDUrl1bFStW1L/+9S+dPn06R/+l+3e1n2H16tWTu7u7Xn31VQUFBal69eqSLga2devWacWKFTmOlj344IPasGGD0+tz5swZTZ06VeXLl1dERES+9mX48OEaPHiwhg0bluOrQfIit/fPGGN97cnlatSooRo1aui9997T/Pnz1blzZ7m5/e9v9Pl5bQEUPI6YAXeoxYsXW3/FvVSDBg1UoUIF9enTRykpKXrggQdUpkwZHTp0SG+//baioqKs6yWioqLk6uqqV199VampqfL09LS+Z6ygtWjRwjqC17dvX50+fVrTpk1TcHBwjl+aa9eurSlTpuill15SpUqVFBwcrAceeEBDhgzR559/rjZt2qhHjx6qXbu2zpw5o+3bt2vevHk6ePCgihcvrrZt26phw4YaOnSoDh48qIiICC1YsCBPNxi51Pz583N9jePi4jR06FDNnTtX9913n/r27auqVavqt99+04wZM3T06NFcA+eNeuyxx/T4449r8uTJio2NzfFlxm3atNGYMWPUs2dPNWjQQNu3b9esWbOsI0dX06tXL73xxhuKjY1V7969dezYMb3zzjuqXr2603czxcTEqG/fvho/fry2bdumFi1ayN3dXXv37tXcuXP15ptv6tFHH9XMmTM1efJkPfLII6pYsaJOnTqladOmyd/fXw8++OBVa+nTp4+eeuopdejQQc2bN9cPP/ygr7/+OsdRv4iICDVp0kS1a9dWUFCQNm3apHnz5ql///7WmEmTJqlRo0aKjIzUE088oQoVKig5OVlr167VL7/8Yn3H2/PPP6///Oc/atmypZ577jnrdvnlypXTjz/+eM3XLzfZoaBnz56KiYlRly5drNvlly9fXgMHDryu9UoXbzm/fPly1atXT0888YQiIiKUkpKiLVu26Ntvv1VKSoqki5+JBQsW6JFHHlHr1q114MABvfPOO4qIiHD6xfz+++9Xt27d9NZbb2nv3r1q2bKlsrKytGrVKt1///1Or+nN5uLiovfee0+tWrVS9erV1bNnT5UuXVq//vqrli9fLn9/f33xxReSLv6skC5+ZUHnzp3l7u6utm3bysfHR0WKFFHt2rW1bt066zvMpItHzM6cOaMzZ87kCGZDhw7VRx99pFatWunZZ59VUFCQZs6cqQMHDmj+/Pl5OtX0cv/85z+Vmpqq+Ph4+fn55euL5atWraqKFStq8ODB+vXXX+Xv76/58+df9Xq+7t27a/DgwZKUY1v5eW0B3AS3+jaQAG6uq90uX5fcFnnevHmmRYsWJjg42Hh4eJiyZcuavn37mqNHjzqtb9q0aaZChQrG1dXV6TbNV7pd/uW3kM/tNuHG/O9Wy7///rvV9vnnn5saNWoYLy8vU758efPqq6+a999/P8ftrpOSkkzr1q2Nn5+fkeRUx6lTp8ywYcNMpUqVjIeHhylevLhp0KCB+de//uV0G//jx4+bbt26GX9/fxMQEGC6detmtm7dmq/b5V/pkX0r7V9++cX06dPHlC5d2ri5uZmgoCDTpk0bs27duquuP7+3y8+WlpZmvL29jSTz4Ycf5ug/d+6c+fvf/25KlixpvL29TcOGDc3atWtzvJe53S7fGGM+/PBDU6FCBePh4WGioqLM119/neN2+dmmTp1qateubby9vY2fn5+JjIw0zz//vPntt9+MMRdv092lSxdTtmxZ4+npaYKDg02bNm3Mpk2brrmfmZmZ5oUXXjDFixc3RYoUMbGxsWbfvn05bpf/0ksvmbp165rAwEDj7e1tqlatal5++WWnz4Exxuzfv990797dhIaGGnd3d1O6dGnTpk0bM2/ePKdxP/74o4mJiTFeXl6mdOnSZuzYsWb69OnXfbv8bB9//LGpVauW8fT0NEFBQaZr167ml19+cRoTFxdnfHx8rvnaXCo5OdnEx8ebsLAw4+7ubkJDQ03Tpk3N1KlTrTFZWVlm3Lhxply5csbT09PUqlXLLFq0KNf3NSMjw/zzn/80VatWNR4eHqZEiRKmVatWZvPmzdYYXeFrIi5/b3JzpZ8hV/o8bt261bRv394UK1bMeHp6mnLlyplOnTqZpUuXOo0bO3asKV26tHFxccnxXg0ZMsRIMq+++qrTMpUqVTKSzP79+3PUuX//fvPoo4+awMBA4+XlZerWrWsWLVqUp30xJvfPQmZmpunSpYtxc3MzCxcuvOZrdOnt8hMTE02zZs2Mr6+vKV68uHniiSesryjI7WfZ0aNHjaurq6lSpcoVt5OX1za3n+EAbozDGL4NEAAAoDD4448/VLJkSY0YMeKKdyMFYA+uMQMAACgkZsyYoczMTHXr1s3uUgBchmvMAAAA7nDLli1TYmKiXn75ZT388MMqX7683SUBuAynMgIAANzhmjRpojVr1qhhw4b68MMPVbp0abtLAnAZghkAAAAA2IxrzAAAAADAZgQzAAAAALAZN/+QlJWVpd9++01+fn7WF0wCAAAAKHyMMTp16pRKlSp1XV8cf70IZpJ+++03hYWF2V0GAAAAgNvEkSNHVKZMmVu2PYKZJD8/P0kXX3x/f3+bqwEAAABgl7S0NIWFhVkZ4VYhmEnW6Yv+/v4EMwAAAAC3/BInbv4BAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM1sD2a//vqrHn/8cRUrVkze3t6KjIzUpk2brH5jjEaMGKGSJUvK29tbzZo10969e53WkZKSoq5du8rf31+BgYHq3bu3Tp8+fat3BQAAAACui63B7MSJE2rYsKHc3d21ePFiJSYm6vXXX1fRokWtMa+99preeustvfPOO1q/fr18fHwUGxurc+fOWWO6du2qHTt2aMmSJVq0aJFWrlypJ5980o5dAgAAAIB8cxhjjF0bHzp0qFavXq1Vq1bl2m+MUalSpfT3v/9dgwcPliSlpqYqJCREM2bMUOfOnbVz505FRERo48aNqlOnjiTpq6++0oMPPqhffvlFpUqVumYdaWlpCggIUGpqKl8wDQAAABRidmUDW4+Yff7556pTp446duyo4OBg1apVS9OmTbP6Dxw4oKSkJDVr1sxqCwgIUL169bR27VpJ0tq1axUYGGiFMklq1qyZXFxctH79+ly3m56errS0NKcHAAAAANjF1mD2888/a8qUKapcubK+/vpr9evXT88++6xmzpwpSUpKSpIkhYSEOC0XEhJi9SUlJSk4ONip383NTUFBQdaYy40fP14BAQHWIywsrKB3DQAAAADyzNZglpWVpXvuuUfjxo1TrVq19OSTT+qJJ57QO++8c1O3O2zYMKWmplqPI0eO3NTtAQAAAMDV2BrMSpYsqYiICKe2atWq6fDhw5Kk0NBQSVJycrLTmOTkZKsvNDRUx44dc+rPyMhQSkqKNeZynp6e8vf3d3oAAAAAgF1sDWYNGzbU7t27ndr27NmjcuXKSZLCw8MVGhqqpUuXWv1paWlav369oqOjJUnR0dE6efKkNm/ebI1ZtmyZsrKyVK9evVuwFwAAAABwY9zs3PjAgQPVoEEDjRs3Tp06ddKGDRs0depUTZ06VZLkcDg0YMAAvfTSS6pcubLCw8M1fPhwlSpVSg8//LCki0fYWrZsaZ0CeeHCBfXv31+dO3fO0x0ZAQAAAMButt4uX5IWLVqkYcOGae/evQoPD9egQYP0xBNPWP3GGI0cOVJTp07VyZMn1ahRI02ePFlVqlSxxqSkpKh///764osv5OLiog4dOuitt96Sr69vnmq43W6X7xjtsLsEW5mRtn4kAQAAUIjZlQ1sD2a3A4LZ7YVgBgAAALsUyu8xAwAAAAAQzAAAAADAdgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALCZrcFs1KhRcjgcTo+qVata/efOnVN8fLyKFSsmX19fdejQQcnJyU7rOHz4sFq3bq0iRYooODhYQ4YMUUZGxq3eFQAAAAC4bm52F1C9enV9++231nM3t/+VNHDgQP33v//V3LlzFRAQoP79+6t9+/ZavXq1JCkzM1OtW7dWaGio1qxZo6NHj6p79+5yd3fXuHHjbvm+AAAAAMD1sD2Yubm5KTQ0NEd7amqqpk+frtmzZ+uBBx6QJCUkJKhatWpat26d6tevr2+++UaJiYn69ttvFRISoqioKI0dO1YvvPCCRo0aJQ8Pj1u9OwAAAACQb7ZfY7Z3716VKlVKFSpUUNeuXXX48GFJ0ubNm3XhwgU1a9bMGlu1alWVLVtWa9eulSStXbtWkZGRCgkJscbExsYqLS1NO3bsuOI209PTlZaW5vQAAAAAALvYGszq1aunGTNm6KuvvtKUKVN04MABNW7cWKdOnVJSUpI8PDwUGBjotExISIiSkpIkSUlJSU6hLLs/u+9Kxo8fr4CAAOsRFhZWsDsGAAAAAPlg66mMrVq1sv5do0YN1atXT+XKldMnn3wib2/vm7bdYcOGadCgQdbztLQ0whkAAAAA29h+KuOlAgMDVaVKFe3bt0+hoaE6f/68Tp486TQmOTnZuiYtNDQ0x10as5/ndt1aNk9PT/n7+zs9AAAAAMAut1UwO336tPbv36+SJUuqdu3acnd319KlS63+3bt36/Dhw4qOjpYkRUdHa/v27Tp27Jg1ZsmSJfL391dERMQtrx8AAAAAroetpzIOHjxYbdu2Vbly5fTbb79p5MiRcnV1VZcuXRQQEKDevXtr0KBBCgoKkr+/v5555hlFR0erfv36kqQWLVooIiJC3bp102uvvaakpCS9+OKLio+Pl6enp527BgAAAAB5Zmsw++WXX9SlSxcdP35cJUqUUKNGjbRu3TqVKFFCkjRhwgS5uLioQ4cOSk9PV2xsrCZPnmwt7+rqqkWLFqlfv36Kjo6Wj4+P4uLiNGbMGLt2CQAAAADyzWGMMXYXYbe0tDQFBAQoNTX1trjezDHaYXcJtjIjC/1HEgAAADaxKxvcVteYAQAAAEBhRDADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZrdNMHvllVfkcDg0YMAAq+3cuXOKj49XsWLF5Ovrqw4dOig5OdlpucOHD6t169YqUqSIgoODNWTIEGVkZNzi6gEAAADg+t0WwWzjxo169913VaNGDaf2gQMH6osvvtDcuXO1YsUK/fbbb2rfvr3Vn5mZqdatW+v8+fNas2aNZs6cqRkzZmjEiBG3ehcAAAAA4LrZHsxOnz6trl27atq0aSpatKjVnpqaqunTp+uNN97QAw88oNq1ayshIUFr1qzRunXrJEnffPONEhMT9eGHHyoqKkqtWrXS2LFjNWnSJJ0/f96uXQIAAACAfLE9mMXHx6t169Zq1qyZU/vmzZt14cIFp/aqVauqbNmyWrt2rSRp7dq1ioyMVEhIiDUmNjZWaWlp2rFjxxW3mZ6errS0NKcHAAAAANjFzc6Nz5kzR1u2bNHGjRtz9CUlJcnDw0OBgYFO7SEhIUpKSrLGXBrKsvuz+65k/PjxGj169A1WDwAAAAAFw7YjZkeOHNFzzz2nWbNmycvL65Zue9iwYUpNTbUeR44cuaXbBwAAAIBL2RbMNm/erGPHjumee+6Rm5ub3NzctGLFCr311ltyc3NTSEiIzp8/r5MnTzotl5ycrNDQUElSaGhojrs0Zj/PHpMbT09P+fv7Oz0AAAAAwC62BbOmTZtq+/bt2rZtm/WoU6eOunbtav3b3d1dS5cutZbZvXu3Dh8+rOjoaElSdHS0tm/frmPHjlljlixZIn9/f0VERNzyfQIAAACA62HbNWZ+fn66++67ndp8fHxUrFgxq713794aNGiQgoKC5O/vr2eeeUbR0dGqX7++JKlFixaKiIhQt27d9NprrykpKUkvvvii4uPj5enpecv3CQAAAACuh603/7iWCRMmyMXFRR06dFB6erpiY2M1efJkq9/V1VWLFi1Sv379FB0dLR8fH8XFxWnMmDE2Vg0AAAAA+eMwxhi7i7BbWlqaAgIClJqaeltcb+YY7bC7BFuZkYX+IwkAAACb2JUNbP8eMwAAAAAo7K7rVMa9e/dq+fLlOnbsmLKyspz6RowYUSCFAQAAAEBhke9gNm3aNPXr10/FixdXaGioHI7/nXbncDgIZgBuO5wezOnBKHjMK+YVgIKV72D20ksv6eWXX9YLL7xwM+oBAAAAgEIn39eYnThxQh07drwZtQAAAABAoZTvYNaxY0d98803N6MWAAAAACiU8n0qY6VKlTR8+HCtW7dOkZGRcnd3d+p/9tlnC6w4AAAAACgM8h3Mpk6dKl9fX61YsUIrVqxw6nM4HAQzAAAAAMinfAezAwcO3Iw6AAAAAKDQuqEvmDbGyBhuFwsAAAAAN+K6gtkHH3ygyMhIeXt7y9vbWzVq1NB//vOfgq4NAAAAAAqFfJ/K+MYbb2j48OHq37+/GjZsKEn6/vvv9dRTT+mPP/7QwIEDC7xIAAAAALiT5TuYvf3225oyZYq6d+9utT300EOqXr26Ro0aRTADAAAAgHzK96mMR48eVYMGDXK0N2jQQEePHi2QogAAAACgMMl3MKtUqZI++eSTHO0ff/yxKleuXCBFAQAAAEBhku9TGUePHq3HHntMK1eutK4xW716tZYuXZprYAMAAAAAXF2+j5h16NBB69evV/HixbVw4UItXLhQxYsX14YNG/TII4/cjBoBAAAA4I6W7yNmklS7dm19+OGHBV0LAAAAABRKeQpmaWlp8vf3t/59NdnjAAAAAAB5k6dgVrRoUR09elTBwcEKDAyUw+HIMcYYI4fDoczMzAIvEgAAAADuZHkKZsuWLVNQUJAkafny5Te1IAAAAAAobPIUzGJiYqx/h4eHKywsLMdRM2OMjhw5UrDVAQAAAEAhkO+7MoaHh+v333/P0Z6SkqLw8PACKQoAAAAACpN8B7Psa8kud/r0aXl5eRVIUQAAAABQmOT5dvmDBg2SJDkcDg0fPlxFihSx+jIzM7V+/XpFRUUVeIEAAAAAcKfLczDbunWrpItHzLZv3y4PDw+rz8PDQzVr1tTgwYMLvkIAAAAAuMPlOZhl342xZ8+eevPNN/m+MgAAAAAoIHkOZtkSEhJuRh0AAAAAUGjlO5hJ0qZNm/TJJ5/o8OHDOn/+vFPfggULCqQwAAAAoLBwjM55c73CxIw0dpdgu3zflXHOnDlq0KCBdu7cqU8//VQXLlzQjh07tGzZMgUEBNyMGgEAAADgjpbvYDZu3DhNmDBBX3zxhTw8PPTmm29q165d6tSpk8qWLXszagQAAACAO1q+g9n+/fvVunVrSRfvxnjmzBk5HA4NHDhQU6dOLfACAQAAAOBOl+9gVrRoUZ06dUqSVLp0af3000+SpJMnT+rs2bMFWx0AAAAAFAL5vvnHfffdpyVLligyMlIdO3bUc889p2XLlmnJkiVq2rTpzagRAAAAAO5o+Q5m//73v3Xu3DlJ0j/+8Q+5u7trzZo16tChg1588cUCLxAAAAAA7nT5DmZBQUHWv11cXDR06NACLQgAAAAACpt8X2O2ZcsWbd++3Xr+2Wef6eGHH9b//d//5fhOMwAAAADAteU7mPXt21d79uyRJP3888967LHHVKRIEc2dO1fPP/98gRcIAAAAAHe6fAezPXv2KCoqSpI0d+5cxcTEaPbs2ZoxY4bmz59f0PUBAAAAwB0v38HMGKOsrCxJ0rfffqsHH3xQkhQWFqY//vijYKsDAAAAgEIg38GsTp06eumll/Sf//xHK1assL5s+sCBAwoJCSnwAgEAAADgTpfvYDZx4kRt2bJF/fv31z/+8Q9VqlRJkjRv3jw1aNCgwAsEAAAAgDtdvm+XX6NGDae7Mmb75z//KVdX1wIpCgAAAAAKk3wHsyvx8vIqqFUBAAAAQKGSp2AWFBSkPXv2qHjx4ipatKgcDscVx6akpBRYcQAAAABQGOQpmE2YMEF+fn6SLl5jBgAAAAAoOHkKZnFxcbn+GwAAAABw4/IUzNLS0vK8Qn9//+suBgAAAAAKozwFs8DAwKteVyZd/OJph8OhzMzMAikMAAAAAAqLPAWz5cuX3+w6AAAAAKDQylMwi4mJudl1AAAAAECh5ZKXQT/++KOysrKsf1/tkR9TpkxRjRo15O/vL39/f0VHR2vx4sVW/7lz5xQfH69ixYrJ19dXHTp0UHJystM6Dh8+rNatW6tIkSIKDg7WkCFDlJGRka86AAAAAMBOeTpiFhUVpaSkJAUHBysqKkoOh0PGmBzj8nuNWZkyZfTKK6+ocuXKMsZo5syZateunbZu3arq1atr4MCB+u9//6u5c+cqICBA/fv3V/v27bV69WpJUmZmplq3bq3Q0FCtWbNGR48eVffu3eXu7q5x48bluQ4AAAAAsFOegtmBAwdUokQJ698FpW3btk7PX375ZU2ZMkXr1q1TmTJlNH36dM2ePVsPPPCAJCkhIUHVqlXTunXrVL9+fX3zzTdKTEzUt99+q5CQEEVFRWns2LF64YUXNGrUKHl4eBRYrQAAAABws+TpVMZy5cpZd2U8dOiQSpcurXLlyjk9SpcurUOHDl13IZmZmZozZ47OnDmj6Ohobd68WRcuXFCzZs2sMVWrVlXZsmW1du1aSdLatWsVGRmpkJAQa0xsbKzS0tK0Y8eOK24rPT1daWlpTg8AAAAAsEuegtml7r//fqWkpORoT01N1f3335/vArZv3y5fX195enrqqaee0qeffqqIiAglJSXJw8NDgYGBTuNDQkKUlJQkSUpKSnIKZdn92X1XMn78eAUEBFiPsLCwfNcNAAAAAAUl38Es+/vKLnf8+HH5+Pjku4C77rpL27Zt0/r169WvXz/FxcUpMTEx3+vJj2HDhik1NdV6HDly5KZuDwAAAACuJk/XmElS+/btJV28wUePHj3k6elp9WVmZurHH39UgwYN8l2Ah4eHKlWqJEmqXbu2Nm7cqDfffFOPPfaYzp8/r5MnTzodNUtOTlZoaKgkKTQ0VBs2bHBaX/ZdG7PH5MbT09OpfgAAAACwU56PmGWf9meMkZ+fn9OpgKGhoXryySf14Ycf3nBBWVlZSk9PV+3ateXu7q6lS5dafbt379bhw4cVHR0tSYqOjtb27dt17Ngxa8ySJUvk7++viIiIG64FAAAAAG6FPB8xS0hIkCSVL19egwcPvq7TFi83bNgwtWrVSmXLltWpU6c0e/Zsfffdd/r6668VEBCg3r17a9CgQQoKCpK/v7+eeeYZRUdHq379+pKkFi1aKCIiQt26ddNrr72mpKQkvfjii4qPj+eIGAAAAIC/jDwHs2wjR44ssI0fO3ZM3bt319GjRxUQEKAaNWro66+/VvPmzSVJEyZMkIuLizp06KD09HTFxsZq8uTJ1vKurq5atGiR+vXrp+joaPn4+CguLk5jxowpsBoBAAAA4GbLczArWrRorjf9CAgIUJUqVTR48GArUOXV9OnTr9rv5eWlSZMmadKkSVccU65cOX355Zf52i4AAAAA3E7yHMwmTpyYa/vJkye1efNmtWnTRvPmzcvxpdEAAAAAgKvLczCLi4u7an9UVJTGjx9PMAMAAACAfMr395hdSZs2bbRr166CWh0AAAAAFBoFFszS09Pl4eFRUKsDAAAAgEKjwILZ9OnTFRUVVVCrAwAAAIBCI8/XmA0aNCjX9tTUVG3ZskV79uzRypUrC6wwAAAAACgs8hzMtm7dmmu7v7+/mjdvrgULFig8PLzACgMAAACAwiLPwWz58uU3sw4AAAAAKLQK7BozAAAAAMD1IZgBAAAAgM0IZgAAAABgM4IZAAAAANgsz8GsV69eOnXq1M2sBQAAAAAKpTwHs5kzZ+rPP/+8mbUAAAAAQKGU52BmjLmZdQAAAABAoZXn7zGTpFOnTsnLy+uqY/z9/W+oIAAAAAAobPIVzKpUqXLFPmOMHA6HMjMzb7goAAAAAChM8hXM5s2bp6CgoJtVCwAAAAAUSvkKZg0bNlRwcPDNqgUAAAAACiW+xwwAAAAAbJbnYFauXDm5urrezFoAAAAAoFDK86mMBw4cuJl1AAAAAEChledgVrRoUTkcjhztAQEBqlKligYPHqzmzZsXaHEAAAAAUBjkOZhNmDAh12B28uRJbd68WW3atNG8efPUtm3bAi0QAAAAAO50eQ5mPXr0uGp/VFSUxo8fTzADAAAAgHwqsLsytmnTRrt27Sqo1QEAAABAoVFgwSw9PV0eHh4FtToAAAAAKDQKLJhNnz5dUVFRBbU6AAAAACg08nyN2aBBg3JtT01N1ZYtW7Rnzx6tXLmywAoDAAAAgMIiz8Fs69atubb7+/urefPmWrBggcLDwwusMAAAAAAoLPIczJYvX37V/l9++UVPPvmkpk6desNFAQAAAEBhUmDXmB0/flzTp08vqNUBAAAAQKFRYMEMAAAAAHB9CGYAAAAAYDOCGQAAAADYLM83/2jfvv1V+0+ePHmjtQAAAABAoZTnYBYQEHDN/u7du99wQQAAAABQ2OQ5mCUkJNzMOgAAAACg0OIaMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwma3BbPz48br33nvl5+en4OBgPfzww9q9e7fTmHPnzik+Pl7FihWTr6+vOnTooOTkZKcxhw8fVuvWrVWkSBEFBwdryJAhysjIuJW7AgAAAADXzdZgtmLFCsXHx2vdunVasmSJLly4oBYtWujMmTPWmIEDB+qLL77Q3LlztWLFCv32229q37691Z+ZmanWrVvr/PnzWrNmjWbOnKkZM2ZoxIgRduwSAAAAAOSbm50b/+qrr5yez5gxQ8HBwdq8ebPuu+8+paamavr06Zo9e7YeeOABSVJCQoKqVaumdevWqX79+vrmm2+UmJiob7/9ViEhIYqKitLYsWP1wgsvaNSoUfLw8LBj1wAAAAAgz26ra8xSU1MlSUFBQZKkzZs368KFC2rWrJk1pmrVqipbtqzWrl0rSVq7dq0iIyMVEhJijYmNjVVaWpp27NiR63bS09OVlpbm9AAAAAAAu9w2wSwrK0sDBgxQw4YNdffdd0uSkpKS5OHhocDAQKexISEhSkpKssZcGsqy+7P7cjN+/HgFBARYj7CwsALeGwAAAADIu9smmMXHx+unn37SnDlzbvq2hg0bptTUVOtx5MiRm75NAAAAALgSW68xy9a/f38tWrRIK1euVJkyZaz20NBQnT9/XidPnnQ6apacnKzQ0FBrzIYNG5zWl33Xxuwxl/P09JSnp2cB7wUAAAAAXB9bj5gZY9S/f399+umnWrZsmcLDw536a9euLXd3dy1dutRq2717tw4fPqzo6GhJUnR0tLZv365jx45ZY5YsWSJ/f39FRETcmh0BAAAAgBtg6xGz+Ph4zZ49W5999pn8/Pysa8ICAgLk7e2tgIAA9e7dW4MGDVJQUJD8/f31zDPPKDo6WvXr15cktWjRQhEREerWrZtee+01JSUl6cUXX1R8fDxHxQAAAAD8JdgazKZMmSJJatKkiVN7QkKCevToIUmaMGGCXFxc1KFDB6Wnpys2NlaTJ0+2xrq6umrRokXq16+foqOj5ePjo7i4OI0ZM+ZW7QYAAAAA3BBbg5kx5ppjvLy8NGnSJE2aNOmKY8qVK6cvv/yyIEsDAAAAgFvmtrkrIwAAAAAUVgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbGZrMFu5cqXatm2rUqVKyeFwaOHChU79xhiNGDFCJUuWlLe3t5o1a6a9e/c6jUlJSVHXrl3l7++vwMBA9e7dW6dPn76FewEAAAAAN8bWYHbmzBnVrFlTkyZNyrX/tdde01tvvaV33nlH69evl4+Pj2JjY3Xu3DlrTNeuXbVjxw4tWbJEixYt0sqVK/Xkk0/eql0AAAAAgBvmZufGW7VqpVatWuXaZ4zRxIkT9eKLL6pdu3aSpA8++EAhISFauHChOnfurJ07d+qrr77Sxo0bVadOHUnS22+/rQcffFD/+te/VKpUqVu2LwAAAABwvW7ba8wOHDigpKQkNWvWzGoLCAhQvXr1tHbtWknS2rVrFRgYaIUySWrWrJlcXFy0fv36K647PT1daWlpTg8AAAAAsMttG8ySkpIkSSEhIU7tISEhVl9SUpKCg4Od+t3c3BQUFGSNyc348eMVEBBgPcLCwgq4egAAAADIu9s2mN1Mw4YNU2pqqvU4cuSI3SUBAAAAKMRu22AWGhoqSUpOTnZqT05OtvpCQ0N17Ngxp/6MjAylpKRYY3Lj6ekpf39/pwcAAAAA2OW2DWbh4eEKDQ3V0qVLrba0tDStX79e0dHRkqTo6GidPHlSmzdvtsYsW7ZMWVlZqlev3i2vGQAAAACuh613ZTx9+rT27dtnPT9w4IC2bdumoKAglS1bVgMGDNBLL72kypUrKzw8XMOHD1epUqX08MMPS5KqVaumli1b6oknntA777yjCxcuqH///urcuTN3ZAQAAADwl2FrMNu0aZPuv/9+6/mgQYMkSXFxcZoxY4aef/55nTlzRk8++aROnjypRo0a6auvvpKXl5e1zKxZs9S/f381bdpULi4u6tChg956661bvi8AAAAAcL1sDWZNmjSRMeaK/Q6HQ2PGjNGYMWOuOCYoKEizZ8++GeUBAAAAwC1x215jBgAAAACFBcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm90xwWzSpEkqX768vLy8VK9ePW3YsMHukgAAAAAgT+6IYPbxxx9r0KBBGjlypLZs2aKaNWsqNjZWx44ds7s0AAAAALimOyKYvfHGG3riiSfUs2dPRURE6J133lGRIkX0/vvv210aAAAAAFyTm90F3Kjz589r8+bNGjZsmNXm4uKiZs2aae3atbkuk56ervT0dOt5amqqJCktLe3mFptX5+wuwF63zfuAOwdzyu4ScCdiXtldAu40zCm7S7Bk12KMuaXb/csHsz/++EOZmZkKCQlxag8JCdGuXbtyXWb8+PEaPXp0jvawsLCbUiPyJ+CVALtLAO4ozCmg4DGvgIJ1O86pU6dOKSDg1tX1lw9m12PYsGEaNGiQ9TwrK0spKSkqVqyYHA6HjZXZLy0tTWFhYTpy5Ij8/f3tLgf4y2NOAQWPeQUULOaUM2OMTp06pVKlSt3S7f7lg1nx4sXl6uqq5ORkp/bk5GSFhobmuoynp6c8PT2d2gIDA29WiX9J/v7+TEygADGngILHvAIKFnPqf27lkbJsf/mbf3h4eKh27dpaunSp1ZaVlaWlS5cqOjraxsoAAAAAIG/+8kfMJGnQoEGKi4tTnTp1VLduXU2cOFFnzpxRz5497S4NAAAAAK7pjghmjz32mH7//XeNGDFCSUlJioqK0ldffZXjhiC4Nk9PT40cOTLHqZ4Arg9zCih4zCugYDGnbg8Oc6vvAwkAAAAAcPKXv8YMAAAAAP7qCGYAAAAAYDOCGQAAAADYjGB2ncqXL6+JEyfaXcZfzsGDB+VwOLRt27abvi3eo78e3rPrw7zClfB+XR/mFK6G9+z6MK/ywPyFxcXFGUmmb9++OfqefvppI8nExcXlaV0HDhwwkszWrVvzNP7YsWPmzJkzeRrbpk0bExsbm2vfypUrjSTzww8/5GldV7J8+XIjyZw4ceKG1nO5s2fPmqJFi5pixYqZc+fO5WvZuLg4065dO6e2jIwMc/ToUXPhwoUCqzEhIcEEBATkaM/Pe1RQ/v3vf5ty5coZT09PU7duXbN+/fpbuv2CwLz6H+ZVQI72Wz2vVqxYYdq0aWNKlixpJJlPP/30lm27oDCn/oc5FZCj/VbPqXHjxpk6deoYX19fU6JECdOuXTuza9euW7b9gsK8+h/mVUCO9ls9ryZPnmwiIyONn5+f8fPzM/Xr1zdffvllvtfzlz9iFhYWpjlz5ujPP/+02s6dO6fZs2erbNmyBb698+fPS5JKlCihIkWK5GmZ3r17a8mSJfrll19y9CUkJKhOnTqqUaNGgdZ5vYwxysjIsJ7Pnz9f1atXV9WqVbVw4cIbXr+rq6tCQ0Pl5nbzv6khP+9RQfj44481aNAgjRw5Ulu2bFHNmjUVGxurY8eO3bIaCgrzqmAxr67fmTNnVLNmTU2aNOmWbfNmYE4VLObU9VuxYoXi4+O1bt06LVmyRBcuXFCLFi105syZW1ZDQWFeFSzm1fUrU6aMXnnlFW3evFmbNm3SAw88oHbt2mnHjh35W1EBB8ZbKjuN33333ebDDz+02mfNmmVq1Khh2rVrZ/21ZPHixaZhw4YmICDABAUFmdatW5t9+/ZZy0hyesTExDht46WXXjIlS5Y05cuXN8YYU65cOTNhwgRjzMW/VLi7u5uVK1da63v11VdNiRIlTFJSkrlw4YIJCQkxY8eOdar/1KlTxtfX10yZMsUYY8yqVatMo0aNjJeXlylTpox55plnzOnTp63x586dM88//7wpU6aM8fDwMBUrVjTvvfee9ZeeSx/Z+33u3DnzzDPPmBIlShhPT0/TsGFDs2HDBmud2X9l+fLLL80999xj3N3dzfLly63+Jk2amHfeecdMmTLFNG/ePMd78NNPP5nWrVsbPz8/4+vraxo1amT27dtnRo4cmaOm5cuXO/1VKjMz05QuXdpMnjzZaZ1btmwxDofDHDx40BhjzOuvv27uvvtuU6RIEVOmTBnTr18/c+rUKaf6L32MHDkyx3tkjDGHDh0yDz30kPHx8TF+fn6mY8eOJikpyeofOXKkqVmzpvnggw9MuXLljL+/v3nsscdMWlpajv3OTd26dU18fLz1PDMz05QqVcqMHz8+T8vfLphXzKvbaV5dSn/hI2bMKebU7TinjLl4ZEGSWbFixXUtbxfmFfPqdp5XxhhTtGhR89577+VrmTsimL3xxhumadOmVnvTpk3NhAkTnCblvHnzzPz5883evXvN1q1bTdu2bU1kZKTJzMw0xhizYcMGI8l8++235ujRo+b48ePWNnx9fU23bt3MTz/9ZH766SdjTM43fMiQIaZcuXLm5MmTZsuWLcbDw8N89tlnTv0VK1Y0WVlZVtv7779vvL29zcmTJ82+ffuMj4+PmTBhgtmzZ49ZvXq1qVWrlunRo4c1vlOnTiYsLMwsWLDA7N+/33z77bdmzpw5JiMjw8yfP99IMrt37zZHjx41J0+eNMYY8+yzz5pSpUqZL7/80uzYscPExcWZokWLWvuX/aGuUaOG+eabb8y+ffusvn379hlPT0+TkpJijh8/bry8vKyJYowxv/zyiwkKCjLt27c3GzduNLt37zbvv/++2bVrlzl16pTp1KmTadmypTl69Kg5evSoSU9Pz3G6wODBg02jRo2c3te///3vTm0TJkwwy5YtMwcOHDBLly41d911l+nXr58xxpj09HQzceJE4+/vb20ne8Je+h5lZmaaqKgo06hRI7Np0yazbt06U7t2beuHrzEXJ6Wvr69p37692b59u1m5cqUJDQ01//d//3fFz2C29PR04+rqmuOXxu7du5uHHnromsvfTphXzKvbZV5d7q8ezJhTzKnbbU4ZY8zevXuNJLN9+/brWt4uzCvm1e06rzIyMsxHH31kPDw8zI4dO/K17B0RzI4dO2Y8PT3NwYMHzcGDB42Xl5f5/fffnSbl5X7//XenH0RXOr84Li7OhISEmPT0dKf2yydlenq6iYqKMp06dTIRERHmiSeecBq/c+dO6y8G2Ro3bmwef/xxY4wxvXv3Nk8++aTTMqtWrTIuLi7mzz//NLt37zaSzJIlS3Ldn9zOLz59+rRxd3c3s2bNstrOnz9vSpUqZV577TWn5RYuXJhjnf/3f/9nHn74Yet5u3btrL9EGGPMsGHDTHh4uDl//nyuNeV2fvHlr/PWrVuNw+Ewhw4dMsYY6y8o2X9Bys3cuXNNsWLFrOdXOr/40vfom2++Ma6urubw4cNW/44dO4wk669HI0eONEWKFHH668iQIUNMvXr1rlhLtl9//dVIMmvWrHFqHzJkiKlbt+41l7+dMK/+h3kVkGPcrZxXl/urBzPmFHPqdptTmZmZpnXr1qZhw4b5XtZuzKv/YV4F5Bhnx7z68ccfjY+Pj3F1dTUBAQHmv//9b56XzfaXv8ZMungeaevWrTVjxgwlJCSodevWKl68uNOYvXv3qkuXLqpQoYL8/f1Vvnx5SdLhw4evuf7IyEh5eHhcdYyHh4dmzZql+fPn69y5c5owYYJTf9WqVdWgQQO9//77kqR9+/Zp1apV6t27tyTphx9+0IwZM+Tr62s9YmNjlZWVpQMHDmjbtm1ydXVVTExMXl8W7d+/XxcuXFDDhg2tNnd3d9WtW1c7d+50GlunTh2n55mZmZo5c6Yef/xxq+3xxx/XjBkzlJWVJUnatm2bGjduLHd39zzXdLmoqChVq1ZNs2fPlnTx3Pdjx46pY8eO1phvv/1WTZs2VenSpeXn56du3brp+PHjOnv2bJ63s3PnToWFhSksLMxqi4iIUGBgoNNrUb58efn5+VnPS5Ys+Ze8RqwgMK9yx7z6H+ZV/jCncsec+p9bPafi4+P1008/ac6cOfle9nbBvMod8+p/btW8uuuuu7Rt2zatX79e/fr1U1xcnBITE/O8vHQH3S6/V69emjFjhmbOnKlevXrl6G/btq1SUlI0bdo0rV+/XuvXr5f0vws5r8bHxydPNaxZs0aSlJKSopSUlBz9vXv31vz583Xq1CklJCSoYsWK1iQ7ffq0+vbtq23btlmPH374QXv37lXFihXl7e2dpxqu1+X7+PXXX+vXX3/VY489Jjc3N7m5ualz5846dOiQli5dKkkFVlPXrl2tSTl79my1bNlSxYoVk3Tx1qpt2rRRjRo1NH/+fG3evNm6CUBe3rv8uvwHjMPhsH4IXU3x4sXl6uqq5ORkp/bk5GSFhoYWaI23EvPqxjCvLrreeXUnYk7dGObURQUxp/r3769FixZp+fLlKlOmTEGWd8sxr24M8+qiG51XHh4eqlSpkmrXrq3x48erZs2aevPNN/NVwx0TzFq2bKnz58/rwoULio2Ndeo7fvy4du/erRdffFFNmzZVtWrVdOLECacx2X8NyczMvK7t79+/XwMHDtS0adNUr149xcXF5XgzO3XqJBcXF82ePVsffPCBevXqJYfDIUm65557lJiYqEqVKuV4eHh4KDIyUllZWVqxYkWu28+t/ooVK8rDw0OrV6+22i5cuKCNGzcqIiLiqvszffp0de7c2emHxLZt29S5c2dNnz5dklSjRg2tWrVKFy5cuGJNeXk9//a3v+mnn37S5s2bNW/ePHXt2tXq27x5s7KysvT666+rfv36qlKlin777bd8b6datWo6cuSIjhw5YrUlJibq5MmT13wt8sLDw0O1a9e2fmBJUlZWlpYuXaro6OgbXr9dmFfMq6u52fPqTsScYk5dza2YU8YY9e/fX59++qmWLVum8PDwAlmvnZhXzKursev/VVlZWUpPT8/fQvk++fE2cvn5q6mpqSY1NdV6nn1+cWZmpilWrJh5/PHHzd69e83SpUvNvffe63S9woULF4y3t7d56aWXTFJSknXhZG7nyBrjfO5qRkaGqV+/vunQoYMxxpjffvvNFCtWzDqH91K9e/c2RYsWNa6urubXX3+12n/44Qfj7e1t4uPjzdatW82ePXvMwoULne7y16NHDxMWFmY+/fRT8/PPP5vly5ebjz/+2Bhz8SJMh8NhZsyYYY4dO2Zd/Pjcc8+ZUqVKmcWLFztd+JmSkmKMyf285GPHjhl3d3ezePHiHPV/+eWXxtPT0xw/ftz88ccfplixYtaFn3v27DEffPCB9X0oL7/8silbtqzZtWuX+f3338358+eveB53w4YNTc2aNY2fn585e/as1b5t2zYjyUycONHs37/ffPDBB6Z06dJONa9evdq6aPf333+3vrfi0vcoKyvLREVFmcaNG5vNmzeb9evX53rhZ82aNZ3qmjBhgilXrlyO1yE3c+bMMZ6enmbGjBkmMTHRPPnkkyYwMNDprj9/Bcwr5pUxt8+8OnXqlNm6davZunWrkWTeeOMNs3XrVuuahL8C5hRzypjbZ07169fPBAQEmO+++866YcLRo0ed9uevgHnFvDLm9plXQ4cONStWrDAHDhwwP/74oxk6dKhxOBzmm2++ydPy2e6oYHa5Sy/8XLJkialWrZrx9PQ0NWrUMN99912OC8mnTZtmwsLCjIuLS45bpV7u0jd89OjRpmTJkuaPP/6w+ufPn288PDzMtm3bnJZbs2aNkWQefPDBHOvcsGGDad68ufH19TU+Pj6mRo0a5uWXX7b6//zzTzNw4EBTsmRJ4+HhYSpVqmTef/99q3/MmDEmNDTUOBwOa7///PNP88wzz5jixYtf9Vapl07Kf/3rXyYwMDDXCzrT09NNYGCgefPNN40xF3+YtGjRwhQpUsT4+fmZxo0bm/379xtjLk7u7P1RLrdKvdTkyZONJNO9e/cc23zjjTdMyZIljbe3t4mNjTUffPBBjpqfeuopU6xYsQK5Veql8jMpjTHm7bffNmXLljUeHh6mbt26Zt26dXle9nbBvGJeZbsd5lVut0OW8v7FsbcD5hRzKtvtMKdym0+STEJCQp6Wv10wr5hX2W6HedWrVy9Trlw54+HhYUqUKGGaNm2a71BmjDEOY4zJ3zE2AAAAAEBBumOuMQMAAACAvyqCGZAHhw8fdrqN7eWPvNxyF4Az5hVQsJhTQMG7lfOKUxmBPMjIyNDBgwev2F++fHm5ubnduoKAOwDzCihYzCmg4N3KeUUwAwAAAACbcSojAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQCgUGvSpIkGDBhgdxkAgEKOYAYAuC49evSQw+HQK6+84tS+cOFCORyOfK2rfPnymjhxYgFWd/McPHhQDodD27Zts7sUAMAdhGAGALhuXl5eevXVV3XixAm7S8m38+fP211Cgbpw4YLdJQAAbgDBDABw3Zo1a6bQ0FCNHz/+quO+//57NW7cWN7e3goLC9Ozzz6rM2fOSLp4KuGhQ4c0cOBAORwOORwOGWNUokQJzZs3z1pHVFSUSpYs6bROT09PnT17VpJ0+PBhtWvXTr6+vvL391enTp2UnJxsjR81apSioqL03nvvKTw8XF5eXrnW+t///lcBAQGaNWvWdb0m+/fvV7t27RQSEiJfX1/de++9+vbbb63+MWPG6O67786xXFRUlIYPH249f++991StWjV5eXmpatWqmjx5stWXfdTu448/VkxMjLy8vDRr1iwdOnRIbdu2VdGiReXj46Pq1avryy+/vK79AADcWgQzAMB1c3V11bhx4/T222/rl19+yXXM/v371bJlS3Xo0EE//vijPv74Y33//ffq37+/JGnBggUqU6aMxowZo6NHj+ro0aNyOBy677779N1330mSTpw4oZ07d+rPP//Url27JEkrVqzQvffeqyJFiigrK0vt2rVTSkqKVqxYoSVLlujnn3/WY4895lTLvn37NH/+fC1YsCDXUxFnz56tLl26aNasWeratet1vSanT5/Wgw8+qKVLl2rr1q1q2bKl2rZtq8OHD0uSevXqpZ07d2rjxo3WMlu3btWPP/6onj17SpJmzZqlESNG6OWXX9bOnTs1btw4DR8+XDNnznTa1tChQ/Xcc89p586dio2NVXx8vNLT07Vy5Upt375dr776qnx9fa9rPwAAt5ab3QUAAP7aHnnkEUVFRWnkyJGaPn16jv7x48era9eu1g02KleurLfeeksxMTGaMmWKgoKC5OrqKj8/P4WGhlrLNWnSRO+++64kaeXKlapVq5ZCQ0P13XffqWrVqvruu+8UExMjSVq6dKm2b9+uAwcOKCwsTJL0wQcfqHr16tq4caPuvfdeSRdPX/zggw9UokSJHHVOmjRJ//jHP/TFF19Y670eNWvWVM2aNa3nY8eO1aeffqrPP/9c/fv3V5kyZRQbG6uEhASrroSEBMXExKhChQqSpJEjR+r1119X+/btJUnh4eFKTEzUu+++q7i4OGvdAwYMsMZIF48adujQQZGRkZJkrQ8AcPvjiBkA4Ia9+uqrmjlzpnbu3Jmj74cfftCMGTPk6+trPWJjY5WVlaUDBw5ccZ0xMTFKTEzU77//rhUrVqhJkyZq0qSJvvvuO124cEFr1qxRkyZNJEk7d+5UWFiYFcokKSIiQoGBgU41lStXLtdQNm/ePA0cOFBLliy5oVAmXTxiNnjwYFWrVk2BgYHy9fXVzp07rSNmkvTEE0/oo48+0rlz53T+/HnNnj1bvXr1kiSdOXNG+/fvV+/evZ1es5deekn79+932ladOnWcnj/77LN66aWX1LBhQ40cOVI//vjjDe0LAODWIZgBAG7Yfffdp9jYWA0bNixH3+nTp9W3b19t27bNevzwww/au3evKlaseMV1RkZGKigoSCtWrHAKZitWrNDGjRt14cIFNWjQIF91+vj45Npeq1YtlShRQu+//76MMfla5+UGDx6sTz/9VOPGjdOqVau0bds2RUZGOt1spG3btvL09NSnn36qL774QhcuXNCjjz4q6eLrJUnTpk1zes1++uknrVu37qr706dPH/3888/q1q2btm/frjp16ujtt9++of0BANwanMoIACgQr7zyiqKionTXXXc5td9zzz1KTExUpUqVrrish4eHMjMzndocDocaN26szz77TDt27FCjRo1UpEgRpaen691331WdOnWsYFKtWjUdOXJER44csY6aJSYm6uTJk4qIiLhm7RUrVtTrr7+uJk2ayNXVVf/+97/zu/uW1atXq0ePHnrkkUckXQxaBw8edBrj5uamuLg4JSQkyMPDQ507d5a3t7ckKSQkRKVKldLPP/98Xde5hYWF6amnntJTTz2lYcOGadq0aXrmmWeue38AALcGwQwAUCAiIyPVtWtXvfXWW07tL7zwgurXr6/+/furT58+8vHxUWJiopYsWWIFoPLly2vlypXq3LmzPD09Vbx4cUkXrzP7+9//rjp16lg3sbjvvvs0a9YsDRkyxNpGs2bNrO1PnDhRGRkZevrppxUTE5PjdL8rqVKlipYvX64mTZrIzc3tmt+rtnv37hxt1atXV+XKlbVgwQK1bdtWDodDw4cPV1ZWVo6xffr0UbVq1SRdDHOXGj16tJ599lkFBASoZcuWSk9P16ZNm3TixAkNGjToijUNGDBArVq1UpUqVXTixAktX77c2gYA4PbGqYwAgAIzZsyYHCGkRo0aWrFihfbs2aPGjRurVq1aGjFihEqVKuW03MGDB1WxYkWna8BiYmKUmZlpXUsmXQxrl7c5HA599tlnKlq0qO677z41a9ZMFSpU0Mcff5yv+u+66y4tW7ZMH330kf7+979fdWznzp1Vq1Ytp0dycrLeeOMNFS1aVA0aNFDbtm0VGxure+65J8fylStXVoMGDVS1alXVq1fPqa9Pnz567733lJCQoMjISMXExGjGjBkKDw+/ak2ZmZmKj49XtWrV1LJlS1WpUsXpNvsAgNuXw9zoyfQAACDfjDGqXLmynn766aseBQMAFA6cyggAwC32+++/a86cOUpKSrK+uwwAULgRzAAAuMWCg4NVvHhxTZ06VUWLFrW7HADAbYBgBgDALcZVBACAy3HzDwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZv8Ps17pkj9surgAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layers_updated = list(res_dict_updated.keys())\n", + "utilisation_updated = list(res_dict_updated.values())\n", + "lut_values_updated = [] #Initializing a list to store LUT values.\n", + "for i in range(len(layers_updated)):\n", + " x = list(utilisation_updated[i].values()) #Extracting the resource utilisation for each layer.\n", + " lut_values_updated.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", + "\n", + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(layers_updated, lut_values_updated, color ='green', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"LUT Utilisation\")\n", + "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From these numbers, we see that the first layer has been removed as the bottleneck and that the entire network can now perform one inference in ~4096 clock cycles (when the pipeline is full) as compared to the earlier configuration where it took ~38400 execution cycles.\n", + "\n", + "This decrease in execution latency of the network though comes at a cost of a 45% increase in LUT resource utilization for layer 1 of the network.\n", + "\n", + "We now observe the `instream_width` and `outstream_width` of our network with the updated folding parameters and then apply the `InsertDWC()` transform to it in case there is a mismatch in these widths due to the updates. " + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Instream Width = 5 Outstream Width = 4\n", + "Instream Width = 2 Outstream Width = 2\n", + "Instream Width = 2 Outstream Width = 2\n", + "Instream Width = 2 Outstream Width = 1\n" + ] + } + ], + "source": [ + "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "for fcl in fc_layers:\n", + " fcl_inst = getCustomOp(fcl)\n", + " print('Instream Width =',(fcl_inst.get_instream_width()),'Outstream Width =',int(fcl_inst.get_outstream_width()))" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "model = model.transform(InsertDWC())" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Stopping http://0.0.0.0:5901\n", + "Serving './cybsec_DWC_inserted.onnx' at http://0.0.0.0:5901\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save(\"./cybsec_DWC_inserted.onnx\")\n", + "showInNetron(\"./cybsec_DWC_inserted.onnx\",localhost_url='xirxlabs53')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because there is a mismatch in the `outstream_width` (4) of layer 1 and the `inputstream_width` (2) of layer 2 the FINN compiler inserts the `StreamingDataWidthConverter` layer to remedy this when we call that transformation for our network above.\n", + "\n", + "On expanding this layer in the netron we see that the `inWidth` of this layer is 4 and the `outWidth` is 2.\n", + "\n", + "Note, we do not see this insertion where these widths match. They are only mismatched for the first two layers and hence we see that the data width converter is being inserted there." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "res_dict_DWC = []\n", + "res_dict_DWC = res_estimation(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['MatrixVectorActivation_0', '', 'MatrixVectorActivation_1', 'MatrixVectorActivation_2', 'MatrixVectorActivation_3']\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/wAAAHWCAYAAADKCYKCAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABc+UlEQVR4nO3deZxPdf//8efH7GY1mBnLYGwxGkaEsY2yDCFFxCXGVhIKUXyvkBaq6yraVFToipStxZVKCNnXkp2sMUPGzFgyZnn//vCbw8cMZhgz07ke99vtc8vnfbbXOZ/Pe07Pz9kcxhgjAAAAAABgK0UKugAAAAAAAJD3CPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAkI+aNWumZs2aFXQZeergwYNyOByaPn16QZdSoNgOOTd9+nQ5HA4dPHjwhuN+9913ioyMlKenpxwOhxITE297ffnN4XBo0KBBBV1GoZb5ndm4cWOup/3pp5/kcDj0008/5X1hAFDIEfgBQJf/Z/Jar7Vr1+Z4Xjt27NDzzz+fozCTnyZPnlygYTTzf7rnzp17zXGuF3zmzp1r/U975rxy8sLf16lTp9SlSxd5eXnp3Xff1X/+8x95e3sXdFm2t3r1aj3//PO2/HEFAP7XuBZ0AQBQmLzwwgsKCwvL0l65cuUcz2PHjh0aN26cmjVrpgoVKjgN++GHH261xJs2efJklShRQr169SqwGvJK9erV9Z///MepbdSoUfLx8dE///nPAqoKeW3Dhg06c+aMXnzxRbVo0aKgy/mfsXr1ao0bN069evVSQEBAQZcDALgFBH4AuEKbNm1Ut27d2zZ/d3f32zbv/yXBwcF65JFHnNpeeeUVlShRIks7/r5OnDghSXkaOs+dO8dZAn8jFy5csP3fTb6TAG4nTukHgFyaPXu26tSpI19fX/n5+SkiIkJvvvmmpEuXBnTu3FmSdM8991inlWdeO3r1NfyZp6Z/8cUXGjdunMqUKSNfX1899NBDSkpKUkpKioYMGaKgoCD5+Piod+/eSklJcapn2rRpuvfeexUUFCQPDw+Fh4frvffecxqnQoUK2r59u5YvX27VdGUdiYmJGjJkiEJDQ+Xh4aHKlSvr1VdfVUZGhtN8EhMT1atXL/n7+ysgIECxsbF/y9N+4+Pj5erqqnHjxmUZtnv3bjkcDr3zzjuSpISEBA0fPlwRERHy8fGRn5+f2rRpo19++eWGy7nWPRt69eqV5eyPjIwMTZo0STVq1JCnp6eCg4PVv39/nT592mm8jRs3KiYmRiVKlJCXl5fCwsLUp0+fG9bicDj0/PPPZ2mvUKGC01kfqampGjdunKpUqSJPT08VL15cjRs31uLFi52m27Vrlx566CEFBgbK09NTdevW1ddff51l/tu3b9e9994rLy8vlS1bVi+99FKW71V2mjVrptjYWEnS3XffLYfD4VTnnDlzVKdOHXl5eVk/9Pzxxx9O8+jVq5d8fHy0f/9+3XffffL19VX37t2vu9w//vhDffr0UXBwsDw8PFSjRg19/PHHTuNcvHhRY8aMUZ06deTv7y9vb281adJEy5YtyzK/jIwMvfnmm4qIiJCnp6dKliyp1q1bZ3st+pdffqk777zTWu533313w+105d+Ql19+WWXLlpWnp6eaN2+uffv2ZRl/3bp1at26tfz9/VW0aFFFR0dr1apV1vDnn39eI0aMkCSFhYVZfy8OHjyojh076q677nKaX/v27eVwOJw++3Xr1snhcGjRokVW2++//67OnTsrMDBQRYsWVYMGDfTf//4323WZPXu2nnvuOZUpU0ZFixZVcnJytut++vRp1atXT2XLltXu3btvuK2utHLlSnXu3FnlypWTh4eHQkNDNXToUP3111/WONOmTZPD4dCWLVuyTD9+/Hi5uLg4fedutG2lS9vX4XBox44d+sc//qFixYqpcePGuaodAHKDI/wAcIWkpCT9+eefTm0Oh0PFixeXJC1evFjdunVT8+bN9eqrr0qSdu7cqVWrVumpp55S06ZN9eSTT+qtt97S//3f/6l69eqSZP33WiZMmCAvLy+NHDlS+/bt09tvvy03NzcVKVJEp0+f1vPPP6+1a9dq+vTpCgsL05gxY6xp33vvPdWoUUP333+/XF1d9c033+iJJ55QRkaGBg4cKEmaNGmSBg8e7HTKe3BwsCTp/Pnzio6O1h9//KH+/furXLlyWr16tUaNGqXjx49r0qRJkiRjjDp06KCff/5Zjz/+uKpXr64FCxZYoezvJDg4WNHR0friiy80duxYp2Gff/65XFxcrB9ufv/9d3355Zfq3LmzwsLCFB8frw8++EDR0dHasWOHSpcunSc19e/fX9OnT1fv3r315JNP6sCBA3rnnXe0ZcsWrVq1Sm5ubjpx4oRatWqlkiVLauTIkQoICNDBgwc1f/78PKlBuhRIJkyYoH79+qlevXpKTk7Wxo0btXnzZrVs2VLSpRDfqFEjlSlTRiNHjpS3t7e++OILPfDAA5o3b54efPBBSVJcXJzuuecepaWlWeNNmTJFXl5eN6zjn//8p+644w5NmTLFutSmUqVKkmRtp7vvvlsTJkxQfHy83nzzTa1atUpbtmxxOiMgLS1NMTExaty4sf7973+raNGi11xmfHy8GjRoYN1LomTJklq0aJH69u2r5ORkDRkyRJKUnJysDz/8UN26ddOjjz6qM2fO6KOPPlJMTIzWr1+vyMhIa559+/bV9OnT1aZNG/Xr109paWlauXKl1q5d63Q20c8//6z58+friSeekK+vr9566y116tRJhw8ftv7+XM8rr7yiIkWKaPjw4UpKStJrr72m7t27a926ddY4S5cuVZs2bVSnTh2NHTtWRYoUsX4wXLlyperVq6eOHTtqz549+uyzzzRx4kSVKFFCklSyZEk1adJEX331lZKTk+Xn5ydjjFatWqUiRYpo5cqVuv/++yVdCtNFihRRo0aNrO3asGFDnT9/Xk8++aSKFy+uGTNm6P7779fcuXOt70umF198Ue7u7ho+fLhSUlKyPcL/559/qmXLlkpISNDy5cut70ZOzZkzR+fPn9eAAQNUvHhxrV+/Xm+//baOHj2qOXPmSJIeeughDRw4UDNnzlTt2rWdpp85c6aaNWumMmXK5HjbXqlz586qUqWKxo8fL2NMrmoHgFwxAAAzbdo0Iynbl4eHhzXeU089Zfz8/ExaWto15zVnzhwjySxbtizLsOjoaBMdHW29X7ZsmZFk7rzzTnPx4kWrvVu3bsbhcJg2bdo4TR8VFWXKly/v1Hb+/Pksy4mJiTEVK1Z0aqtRo4bTsjO9+OKLxtvb2+zZs8epfeTIkcbFxcUcPnzYGGPMl19+aSSZ1157zRonLS3NNGnSxEgy06ZNyzLvK2Wu65w5c645jiQzcODAbIddb7teb/2u5YMPPjCSzLZt25zaw8PDzb333mu9v3DhgklPT3ca58CBA8bDw8O88MILTm1Xb4erP+9MsbGxTp/jypUrjSQzc+ZMp/G+++47p/YFCxYYSWbDhg05Xs9MkszYsWOztJcvX97ExsZa72vVqmXatm173Xk1b97cREREmAsXLlhtGRkZpmHDhqZKlSpW25AhQ4wks27dOqvtxIkTxt/f30gyBw4cuO5yMvvllet78eJFExQUZO68807z119/We0LFy40ksyYMWOsttjYWCPJjBw58rrLydS3b19TqlQp8+effzq1d+3a1fj7+1t9LS0tzaSkpDiNc/r0aRMcHGz69OljtS1dutRIMk8++WSWZWVkZFj/lmTc3d3Nvn37rLZffvnFSDJvv/32dWvO7FfVq1d3qunNN990+n5nZGSYKlWqmJiYGKdlnz9/3oSFhZmWLVtabf/617+y/Xw2bNhgJJlvv/3WGGPMr7/+aiSZzp07m/r161vj3X///aZ27drW+8zvwcqVK622M2fOmLCwMFOhQgWrf2WuS8WKFbP8Xbvyu3D8+HFTo0YNU7FiRXPw4MHrbp8r53vl347s/m5OmDDBOBwOc+jQIautW7dupnTp0k5/AzZv3uzU13OzbceOHWskmW7dut2wbgDIC5zSDwBXePfdd7V48WKn15WnpQYEBOjcuXNZTm++VT179pSbm5v1vn79+jLGZDlVu379+jpy5IjS0tKstiuPlmaeoRAdHa3ff/9dSUlJN1z2nDlz1KRJExUrVkx//vmn9WrRooXS09O1YsUKSdK3334rV1dXDRgwwJrWxcVFgwcPvun1LkgdO3aUq6urPv/8c6vtt99+044dO/Twww9bbR4eHipS5NLuMj09XadOnZKPj4/uuOMObd68OU9qmTNnjvz9/dWyZUunz6BOnTry8fGxThXPPHK9cOFCpaam5smyrxYQEKDt27dr79692Q5PSEjQ0qVL1aVLF505c8aq9dSpU4qJidHevXut05y//fZbNWjQwOnoZsmSJW94Wv31bNy4USdOnNATTzwhT09Pq71t27aqVq1altPEJTl9Z6/FGKN58+apffv2MsY4fQ4xMTFKSkqyPm8XFxfrqHNGRoYSEhKUlpamunXrOn0n5s2bJ4fDkeUsEklZniDRokULp6PUNWvWlJ+fn37//fcb1i5JvXv3djoS3qRJE0mypt+6dav27t2rf/zjHzp16pS1bufOnVPz5s21YsWKG15qUbt2bfn4+Fh/E1auXKmyZcuqZ8+e2rx5s86fPy9jjH7++Wdr+dKl70G9evWcTl338fHRY489poMHD2rHjh1Oy4mNjb3mWSBHjx5VdHS0UlNTtWLFCpUvXz5H2+dqV87/3Llz+vPPP9WwYUMZY5xO4e/Zs6eOHTvmdLnGzJkz5eXlpU6dOkm6uW37+OOP31TdAJBbnNIPAFeoV6/edW/a98QTT+iLL75QmzZtVKZMGbVq1UpdunRR69atb2m55cqVc3rv7+8vSQoNDc3SnpGRoaSkJOs031WrVmns2LFas2aNzp8/7zR+UlKSNa9r2bt3r3799VeVLFky2+GZN047dOiQSpUqJR8fH6fhd9xxxw3WLm/l1aP2SpQooebNm+uLL77Qiy++KOnS6fyurq7q2LGjNV7mNdiTJ0/WgQMHlJ6ebg3LyanWObF3714lJSUpKCgo2+GZn0F0dLQ6deqkcePGaeLEiWrWrJkeeOAB/eMf/5CHh0ee1PLCCy+oQ4cOqlq1qu688061bt1aPXr0UM2aNSVJ+/btkzFGo0eP1ujRo69Zb5kyZXTo0CHVr18/y/Bb+c4cOnTomvOoVq2afv75Z6c2V1dXlS1b9obzPXnypBITEzVlyhRNmTIl23EyPwdJmjFjhl5//XXt2rXL6ceXK5/ysX//fpUuXVqBgYE3XP7VfwMkqVixYlnu4ZDT6YsVKyZJ1vSZP+Bc7xKcpKQka7rsuLi4KCoqSitXrpR0KfA3adJEjRs3Vnp6utauXavg4GAlJCQ4Bf5rfQ8yL3U6dOiQ7rzzTqs9uyelZOrRo4dcXV21c+dOhYSEXHO8Gzl8+LDGjBmjr7/+Oss2vvKH0pYtW6pUqVKaOXOmmjdvroyMDH322Wfq0KGDfH19Jd3ctr3eOgJAXiLwA0AuBAUFaevWrfr++++1aNEiLVq0SNOmTVPPnj01Y8aMm56vi4tLrtrN/7/mc//+/WrevLmqVaumN954Q6GhoXJ3d9e3336riRMn5ujmaBkZGWrZsqWeeeaZbIdXrVo1h2tx6zw8PJxumnWlzB8zrjyqe6u6du2q3r17a+vWrYqMjNQXX3yh5s2bW9ctS5duzjV69Gj16dNHL774ogIDA1WkSBENGTLkhtvX4XBke33ulT8aSJc+g6CgIM2cOTPb+WT+GONwODR37lytXbtW33zzjb7//nv16dNHr7/+utauXZvlx5icuLqWpk2bav/+/frqq6/0ww8/6MMPP9TEiRP1/vvvq1+/ftY6Dx8+XDExMdnOMzePsbzdrjxD43oy1+uRRx65ZnDL/NHj008/Va9evfTAAw9oxIgRCgoKkouLiyZMmKD9+/ffVJ036uu3On3m+v3rX/9yusfAlXLy/WncuLFefvllXbhwQStXrtQ///lPBQQE6M4779TKlSute4NcGfhz63r3eOjYsaM++eQTvfnmm5owYcJNzT89Pd26/v/ZZ59VtWrV5O3trT/++EO9evVy6tcuLi76xz/+oalTp2ry5MlatWqVjh075vQ0kJvZtjm5jwUA5AUCPwDkkru7u9q3b6/27dsrIyNDTzzxhD744AONHj1alStXzrMj0DnxzTffKCUlRV9//bXTEb7s7hZ+rboqVaqks2fP3vA55+XLl9eSJUt09uxZp/95ze3dsW+0jGvNL7P9Zk/hzc4DDzyg/v37W6f179mzR6NGjXIaZ+7cubrnnnv00UcfObUnJiY6/TCQnWLFimV7SnbmUepMlSpV0o8//qhGjRrlKAg0aNBADRo00Msvv6xZs2ape/fumj17tvr163fdWq5+osLFixd1/PjxLOMGBgaqd+/e6t27t86ePaumTZvq+eefV79+/VSxYkVJkpubW46+M9ldGnAr35nMz3/37t269957s8z3Zr8fJUuWlK+vr9LT02+4XnPnzlXFihU1f/58p3519an7lSpV0vfff6+EhIQcHeW/nTIvF/Dz87vh+l3vb1iTJk108eJFffbZZ/rjjz+sYN+0aVMr8FetWtUK/tK1+/WuXbus4Tk1ePBgVa5cWWPGjJG/v79GjhyZ42kzbdu2TXv27NGMGTPUs2dPq/1al2r17NlTr7/+ur755hstWrRIJUuWdPqxKzfbFgDyG9fwA0AunDp1yul9kSJFrKN+mY/Ly3yecn48ri7zqN6VRwGTkpI0bdq0LON6e3tnW1OXLl20Zs0aff/991mGJSYmWvcLuO+++5SWlub0yL/09HS9/fbbt7oalvvuu09r167Vpk2bstQxc+ZMRUZG3tJpvFcLCAhQTEyMvvjiC82ePVvu7u564IEHnMZxcXHJcpR1zpw5WR4Bl51KlSpp165dOnnypNX2yy+/ZHlUV5cuXZSenm5dWnCltLQ063M7ffp0lloyjyhe/bjG7GrJvPY605QpU7Ic4b/6O+7j46PKlStb8w8KClKzZs30wQcfZPtjwZXrmvl5rl+/3mn4tc5kyIm6desqKChI77//vtM6L1q0SDt37lTbtm1var4uLi7q1KmT5s2bp99++y3L8CvXK7t+t27dOq1Zs8Zpmk6dOskYk+3jH3N65D6v1KlTR5UqVdK///1vnT17NsvwK9fven/D6tevLzc3N7366qsKDAxUjRo1JF36IWDt2rVavnx5lqP79913n9avX++0fc6dO6cpU6aoQoUKCg8Pz9W6jB49WsOHD9eoUaOyPII0J7L7/Iwx1uNVr1azZk3VrFlTH374oebNm6euXbvK1fXyMbPcbFsAyG8c4QeAKyxatMg66nSlhg0bqmLFiurXr58SEhJ07733qmzZsjp06JDefvttRUZGWtejRkZGysXFRa+++qqSkpLk4eGhe++995rXZ9+KVq1aWWcc9O/fX2fPntXUqVMVFBSUJYzVqVNH7733nl566SVVrlxZQUFBuvfeezVixAh9/fXXateunXr16qU6dero3Llz2rZtm+bOnauDBw+qRIkSat++vRo1aqSRI0fq4MGDCg8P1/z583N0Y8ArzZs3L9ttHBsbq5EjR2rOnDlq2rSp+vfvr2rVqunYsWOaPn26jh8/nu0PGbfq4Ycf1iOPPKLJkycrJibG6ZFuktSuXTu98MIL6t27txo2bKht27Zp5syZ1pHu6+nTp4/eeOMNxcTEqG/fvjpx4oTef/991ahRw+nZ4tHR0erfv78mTJigrVu3qlWrVnJzc9PevXs1Z84cvfnmm3rooYc0Y8YMTZ48WQ8++KAqVaqkM2fOaOrUqfLz89N999133Vr69eunxx9/XJ06dVLLli31yy+/6Pvvv89ylkJ4eLiaNWumOnXqKDAwUBs3btTcuXM1aNAga5x3331XjRs3VkREhB599FFVrFhR8fHxWrNmjY4ePapffvlFkvTMM8/oP//5j1q3bq2nnnrKeixf+fLl9euvv95w+2UnM2z27t1b0dHR6tatm/VYvgoVKmjo0KE3NV/p0qPtli1bpvr16+vRRx9VeHi4EhIStHnzZv34449KSEiQdOk7MX/+fD344INq27atDhw4oPfff1/h4eFOge+ee+5Rjx499NZbb2nv3r1q3bq1MjIytHLlSt1zzz1O2/R2K1KkiD788EO1adNGNWrUUO/evVWmTBn98ccfWrZsmfz8/PTNN99IuvS3Qrr0aMSuXbvKzc1N7du3l7e3t4oWLao6depo7dq1at++vXU2QNOmTXXu3DmdO3cuS+AfOXKkPvvsM7Vp00ZPPvmkAgMDNWPGDB04cEDz5s3L0SUXV/vXv/6lpKQkDRw4UL6+vk6n2N9ItWrVVKlSJQ0fPlx//PGH/Pz8NG/evOveL6Fnz54aPny4JGVZVm62LQDku/x+LAAAFEbXeyyfrnj80ty5c02rVq1MUFCQcXd3N+XKlTP9+/c3x48fd5rf1KlTTcWKFY2Li4vT46Cu9Vi+qx9Vl93jyIy5/EinkydPWm1ff/21qVmzpvH09DQVKlQwr776qvn444+zPFYrLi7OtG3b1vj6+hpJTnWcOXPGjBo1ylSuXNm4u7ubEiVKmIYNG5p///vfTo8LPHXqlOnRo4fx8/Mz/v7+pkePHmbLli25eizftV6Zj+w6evSo6devnylTpoxxdXU1gYGBpl27dmbt2rXXnX9uH8uXKTk52Xh5eRlJ5tNPP80y/MKFC+bpp582pUqVMl5eXqZRo0ZmzZo1WT7L7B7LZ4wxn376qalYsaJxd3c3kZGR5vvvv8/yWL5MU6ZMMXXq1DFeXl7G19fXREREmGeeecYcO3bMGHPpcWDdunUz5cqVMx4eHiYoKMi0a9fObNy48YbrmZ6ebp599llTokQJU7RoURMTE2P27duX5bF8L730kqlXr54JCAgwXl5eplq1aubll192+h4YY8z+/ftNz549TUhIiHFzczNlypQx7dq1M3PnznUa79dffzXR0dHG09PTlClTxrz44ovmo48+uunH8mX6/PPPTe3atY2Hh4cJDAw03bt3N0ePHnUaJzY21nh7e99w21wpPj7eDBw40ISGhho3NzcTEhJimjdvbqZMmWKNk5GRYcaPH2/Kly9vPDw8TO3atc3ChQuz/VzT0tLMv/71L1OtWjXj7u5uSpYsadq0aWM2bdpkjaNrPI7y6s8mO9f6G3Kt7+OWLVtMx44dTfHixY2Hh4cpX7686dKli1myZInTeC+++KIpU6aMKVKkSJbPasSIEUaSefXVV52mqVy5spFk9u/fn6XO/fv3m4ceesgEBAQYT09PU69ePbNw4cIcrYsx2X8X0tPTTbdu3Yyrq6v58ssvb7iNrnws344dO0yLFi2Mj4+PKVGihHn00UetRyFm97fs+PHjxsXFxVStWvWay8nJts3ubzgA3E4OY/L5nDIAAADgb+TPP/9UqVKlNGbMmGs+nQIACiOu4QcAAACuY/r06UpPT1ePHj0KuhQAyBWu4QcAAACysXTpUu3YsUMvv/yyHnjgAVWoUKGgSwKAXOGUfgAAACAbzZo10+rVq9WoUSN9+umnKlOmTEGXBAC5QuAHAAAAAMCGuIYfAAAAAAAbIvADAAAAAGBD3LRPUkZGho4dOyZfX185HI6CLgcAAAAAYHPGGJ05c0alS5dWkSK351g8gV/SsWPHFBoaWtBlAAAAAAD+xxw5ckRly5a9LfMm8Evy9fWVdGlD+/n5FXA1AAAAAAC7S05OVmhoqJVHbwcCv2Sdxu/n50fgBwAAAADkm9t5WTk37QMAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANFXjg/+OPP/TII4+oePHi8vLyUkREhDZu3GgNN8ZozJgxKlWqlLy8vNSiRQvt3bvXaR4JCQnq3r27/Pz8FBAQoL59++rs2bP5vSoAAAAAABQaBRr4T58+rUaNGsnNzU2LFi3Sjh079Prrr6tYsWLWOK+99preeustvf/++1q3bp28vb0VExOjCxcuWON0795d27dv1+LFi7Vw4UKtWLFCjz32WEGsEgAAAAAAhYLDGGMKauEjR47UqlWrtHLlymyHG2NUunRpPf300xo+fLgkKSkpScHBwZo+fbq6du2qnTt3Kjw8XBs2bFDdunUlSd99953uu+8+HT16VKVLl75hHcnJyfL391dSUpL8/PzybgUBAAAAAMhGfuTQAj3C//XXX6tu3brq3LmzgoKCVLt2bU2dOtUafuDAAcXFxalFixZWm7+/v+rXr681a9ZIktasWaOAgAAr7EtSixYtVKRIEa1bty7b5aakpCg5OdnpBQAAAACAnRRo4P/999/13nvvqUqVKvr+++81YMAAPfnkk5oxY4YkKS4uTpIUHBzsNF1wcLA1LC4uTkFBQU7DXV1dFRgYaI1ztQkTJsjf3996hYaG5vWqAQAAAABQoAo08GdkZOiuu+7S+PHjVbt2bT322GN69NFH9f7779/W5Y4aNUpJSUnW68iRI7d1eQAAAAAA5LcCDfylSpVSeHi4U1v16tV1+PBhSVJISIgkKT4+3mmc+Ph4a1hISIhOnDjhNDwtLU0JCQnWOFfz8PCQn5+f0wsAAAAAADsp0MDfqFEj7d6926ltz549Kl++vCQpLCxMISEhWrJkiTU8OTlZ69atU1RUlCQpKipKiYmJ2rRpkzXO0qVLlZGRofr16+fDWgAAAAAAUPi4FuTChw4dqoYNG2r8+PHq0qWL1q9frylTpmjKlCmSJIfDoSFDhuill15SlSpVFBYWptGjR6t06dJ64IEHJF06I6B169bWpQCpqakaNGiQunbtmqM79AMAAAAAYEcF+lg+SVq4cKFGjRqlvXv3KiwsTMOGDdOjjz5qDTfGaOzYsZoyZYoSExPVuHFjTZ48WVWrVrXGSUhI0KBBg/TNN9+oSJEi6tSpk9566y35+PjkqIa/1WP5HI6CriD/FOxXEwAAAABum/zIoQUe+AsDAn8hxVcTAAAAgE3lRw4t0Gv4AQAAAADA7UHgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYUIEG/ueff14Oh8PpVa1aNWv4hQsXNHDgQBUvXlw+Pj7q1KmT4uPjneZx+PBhtW3bVkWLFlVQUJBGjBihtLS0/F4VAAAAAAAKFdeCLqBGjRr68ccfrfeurpdLGjp0qP773/9qzpw58vf316BBg9SxY0etWrVKkpSenq62bdsqJCREq1ev1vHjx9WzZ0+5ublp/Pjx+b4uAAAAAAAUFgUe+F1dXRUSEpKlPSkpSR999JFmzZqle++9V5I0bdo0Va9eXWvXrlWDBg30ww8/aMeOHfrxxx8VHBysyMhIvfjii3r22Wf1/PPPy93dPb9XBwAAAACAQqHAr+Hfu3evSpcurYoVK6p79+46fPiwJGnTpk1KTU1VixYtrHGrVaumcuXKac2aNZKkNWvWKCIiQsHBwdY4MTExSk5O1vbt26+5zJSUFCUnJzu9AAAAAACwkwIN/PXr19f06dP13Xff6b333tOBAwfUpEkTnTlzRnFxcXJ3d1dAQIDTNMHBwYqLi5MkxcXFOYX9zOGZw65lwoQJ8vf3t16hoaF5u2IAAAAAABSwAj2lv02bNta/a9asqfr166t8+fL64osv5OXldduWO2rUKA0bNsx6n5ycTOgHAAAAANhKgZ/Sf6WAgABVrVpV+/btU0hIiC5evKjExESnceLj461r/kNCQrLctT/zfXb3Bcjk4eEhPz8/pxcAAAAAAHZSqAL/2bNntX//fpUqVUp16tSRm5ublixZYg3fvXu3Dh8+rKioKElSVFSUtm3bphMnTljjLF68WH5+fgoPD8/3+gEAAAAAKCwK9JT+4cOHq3379ipfvryOHTumsWPHysXFRd26dZO/v7/69u2rYcOGKTAwUH5+fho8eLCioqLUoEEDSVKrVq0UHh6uHj166LXXXlNcXJyee+45DRw4UB4eHgW5agAAAAAAFKgCDfxHjx5Vt27ddOrUKZUsWVKNGzfW2rVrVbJkSUnSxIkTVaRIEXXq1EkpKSmKiYnR5MmTreldXFy0cOFCDRgwQFFRUfL29lZsbKxeeOGFglolAAAAAAAKBYcxxhR0EQUtOTlZ/v7+SkpKKvzX8zscBV1B/uGrCQAAAMCm8iOHFqpr+AEAAAAAQN4g8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbKjQBP5XXnlFDodDQ4YMsdouXLiggQMHqnjx4vLx8VGnTp0UHx/vNN3hw4fVtm1bFS1aVEFBQRoxYoTS0tLyuXoAAAAAAAqXQhH4N2zYoA8++EA1a9Z0ah86dKi++eYbzZkzR8uXL9exY8fUsWNHa3h6erratm2rixcvavXq1ZoxY4amT5+uMWPG5PcqAAAAAABQqBR44D979qy6d++uqVOnqlixYlZ7UlKSPvroI73xxhu69957VadOHU2bNk2rV6/W2rVrJUk//PCDduzYoU8//VSRkZFq06aNXnzxRb377ru6ePFiQa0SAAAAAAAFrsAD/8CBA9W2bVu1aNHCqX3Tpk1KTU11aq9WrZrKlSunNWvWSJLWrFmjiIgIBQcHW+PExMQoOTlZ27dvv+YyU1JSlJyc7PQCAAAAAMBOXAty4bNnz9bmzZu1YcOGLMPi4uLk7u6ugIAAp/bg4GDFxcVZ41wZ9jOHZw67lgkTJmjcuHG3WD0AAAAAAIVXgR3hP3LkiJ566inNnDlTnp6e+brsUaNGKSkpyXodOXIkX5cPAAAAAMDtVmCBf9OmTTpx4oTuuusuubq6ytXVVcuXL9dbb70lV1dXBQcH6+LFi0pMTHSaLj4+XiEhIZKkkJCQLHftz3yfOU52PDw85Ofn5/QCAAAAAMBOCizwN2/eXNu2bdPWrVutV926ddW9e3fr325ublqyZIk1ze7du3X48GFFRUVJkqKiorRt2zadOHHCGmfx4sXy8/NTeHh4vq8TAAAAAACFRYFdw+/r66s777zTqc3b21vFixe32vv27athw4YpMDBQfn5+Gjx4sKKiotSgQQNJUqtWrRQeHq4ePXrotddeU1xcnJ577jkNHDhQHh4e+b5OAAAAAAAUFgV6074bmThxoooUKaJOnTopJSVFMTExmjx5sjXcxcVFCxcu1IABAxQVFSVvb2/FxsbqhRdeKMCqAQAAAAAoeA5jjCnoIgpacnKy/P39lZSUVPiv53c4CrqC/MNXEwAAAIBN5UcOLbBr+AEAAAAAwO1zU6f07927V8uWLdOJEyeUkZHhNGzMmDF5UhgAAAAAALh5uQ78U6dO1YABA1SiRAmFhITIccUp5g6Hg8APAEBB4bIvwBl9AsD/uFwH/pdeekkvv/yynn322dtRDwAAAAAAyAO5vob/9OnT6ty58+2oBQAAAAAA5JFcB/7OnTvrhx9+uB21AAAAAACAPJLrU/orV66s0aNHa+3atYqIiJCbm5vT8CeffDLPigMAAAAAADfHYUzu7vARFhZ27Zk5HPr9999vuaj8lh/PP8wz3HwGAHAt7CMAZ/QJAIVYfuTQXB/hP3DgwO2oAwAAAAAA5KFcX8N/JWOMcnmCAAAAAAAAyAc3Ffg/+eQTRUREyMvLS15eXqpZs6b+85//5HVtAAAAAADgJuX6lP433nhDo0eP1qBBg9SoUSNJ0s8//6zHH39cf/75p4YOHZrnRQIAAAAAgNy5qZv2jRs3Tj179nRqnzFjhp5//vm/5TX+3LSvkOJyEQDIHfYRgDP6BIBCLD9yaK5P6T9+/LgaNmyYpb1hw4Y6fvx4nhQFAAAAAABuTa4Df+XKlfXFF19kaf/8889VpUqVPCkKAAAAAADcmlxfwz9u3Dg9/PDDWrFihXUN/6pVq7RkyZJsfwgAAAAAAAD5L9dH+Dt16qR169apRIkS+vLLL/Xll1+qRIkSWr9+vR588MHbUSMAAAAAAMilXN+0z464aV8hxVcTAHKHfQTgjD4BoBDLjxyao1P6k5OTrQKSk5OvO26hD8wAAAAAAPwPyFHgL1asmI4fP66goCAFBATIkc2vpcYYORwOpaen53mRAAAAAAAgd3IU+JcuXarAwEBJ0rJly25rQQAAAAAA4NblKPBHR0db/w4LC1NoaGiWo/zGGB05ciRvqwMAAAAAADcl13fpDwsL08mTJ7O0JyQkKCwsLE+KAgAAAAAAtybXgT/zWv2rnT17Vp6ennlSFAAAAAAAuDU5OqVfkoYNGyZJcjgcGj16tIoWLWoNS09P17p16xQZGZnnBQIAAAAAgNzLceDfsmWLpEtH+Ldt2yZ3d3drmLu7u2rVqqXhw4fnfYUAAAAAACDXchz4M+/O37t3b7355pvy8/O7bUUBAAAAAIBbk+PAn2natGm3ow4AAAAAAJCHch34JWnjxo364osvdPjwYV28eNFp2Pz58/OkMAAAAABAHsnmxuu2ZUxBV1Bo5Pou/bNnz1bDhg21c+dOLViwQKmpqdq+fbuWLl0qf3//21EjAAAAAADIpVwH/vHjx2vixIn65ptv5O7urjfffFO7du1Sly5dVK5cudtRIwAAAAAAyKVcB/79+/erbdu2ki7dnf/cuXNyOBwaOnSopkyZkucFAgAAAACA3Mt14C9WrJjOnDkjSSpTpox+++03SVJiYqLOnz+ft9UBAAAAAICbkuub9jVt2lSLFy9WRESEOnfurKeeekpLly7V4sWL1bx589tRIwAAAAAAyKVcB/533nlHFy5ckCT985//lJubm1avXq1OnTrpueeey/MCAQAAAABA7jmM4ZkFycnJ8vf3V1JSkvz8/Aq6nOvjcRoAgGthHwE4o08Al9EfCp38yKG5voZ/8+bN2rZtm/X+q6++0gMPPKD/+7//08WLF/O0OAAAAAAAcHNyHfj79++vPXv2SJJ+//13PfzwwypatKjmzJmjZ555Js8LBAAAAAAAuZfrwL9nzx5FRkZKkubMmaPo6GjNmjVL06dP17x58/K6PgAAAAAAcBNyHfiNMcrIyJAk/fjjj7rvvvskSaGhofrzzz/ztjoAAAAAAHBTch3469atq5deekn/+c9/tHz5crVt21aSdODAAQUHB+d5gQAAAAAAIPdyHfgnTZqkzZs3a9CgQfrnP/+pypUrS5Lmzp2rhg0b5nmBAAAAAAAg9/LssXwXLlyQi4uL3Nzc8mJ2+YrH8hVSf5PHaQBAocE+AnBGnwAuoz8UOvmRQ13zakaenp55NSsAAAAAAHCLchT4AwMDtWfPHpUoUULFihWT4zq/DiUkJORZcQAAAAAA4ObkKPBPnDhRvr6+ki5dww8AAAAAAAq3PLuG/++Ma/gLKb6aAJA77CMAZ/QJ4DL6Q6FTaK7hT05OzvEMC31gBgAAAADgf0COAn9AQMB1r9uXJGOMHA6H0tPT86QwAAAAAABw83IU+JctW3a76wAAAAAAAHkoR4E/Ojr6dtcBAAAAAADyUJGcjPTrr78qIyPD+vf1Xrnx3nvvqWbNmvLz85Ofn5+ioqK0aNEia/iFCxc0cOBAFS9eXD4+PurUqZPi4+Od5nH48GG1bdtWRYsWVVBQkEaMGKG0tLRc1QEAAAAAgN3k6Ah/ZGSk4uLiFBQUpMjISDkcDmV3c//cXsNftmxZvfLKK6pSpYqMMZoxY4Y6dOigLVu2qEaNGho6dKj++9//as6cOfL399egQYPUsWNHrVq1SpKUnp6utm3bKiQkRKtXr9bx48fVs2dPubm5afz48TmuAwAAAAAAu8nRY/kOHTqkcuXKyeFw6NChQ9cdt3z58rdUUGBgoP71r3/poYceUsmSJTVr1iw99NBDkqRdu3apevXqWrNmjRo0aKBFixapXbt2OnbsmIKDgyVJ77//vp599lmdPHlS7u7uOVomj+UrpP4mj9MAgEKDfQTgjD4BXEZ/KHTyI4fm6JT+8uXLW3fpP3TokMqUKaPy5cs7vcqUKXPDHwOuJz09XbNnz9a5c+cUFRWlTZs2KTU1VS1atLDGqVatmsqVK6c1a9ZIktasWaOIiAgr7EtSTEyMkpOTtX379msuKyUlRcnJyU4vAAAAAADsJEeB/0r33HOPEhISsrQnJSXpnnvuyXUB27Ztk4+Pjzw8PPT4449rwYIFCg8PV1xcnNzd3RUQEOA0fnBwsOLi4iRJcXFxTmE/c3jmsGuZMGGC/P39rVdoaGiu6wYAAAAAoDDLdeA3xlhH+6906tQpeXt757qAO+64Q1u3btW6des0YMAAxcbGaseOHbmeT26MGjVKSUlJ1uvIkSO3dXkAAAAAAOS3HN20T5I6duwo6dKN+Xr16iUPDw9rWHp6un799Vc1bNgw1wW4u7urcuXKkqQ6depow4YNevPNN/Xwww/r4sWLSkxMdDrKHx8fr5CQEElSSEiI1q9f7zS/zLv4Z46THQ8PD6f6AQAAAACwmxwf4c88/d0YI19fX6dT4kNCQvTYY4/p008/veWCMjIylJKSojp16sjNzU1Lliyxhu3evVuHDx9WVFSUJCkqKkrbtm3TiRMnrHEWL14sPz8/hYeH33ItAAAAAAD8XeX4CP+0adMkSRUqVNDw4cNv6vT9q40aNUpt2rRRuXLldObMGc2aNUs//fSTvv/+e/n7+6tv374aNmyYAgMD5efnp8GDBysqKkoNGjSQJLVq1Urh4eHq0aOHXnvtNcXFxem5557TwIEDOYIPAAAAAPifluPAn2ns2LF5tvATJ06oZ8+eOn78uPz9/VWzZk19//33atmypSRp4sSJKlKkiDp16qSUlBTFxMRo8uTJ1vQuLi5auHChBgwYoKioKHl7eys2NlYvvPBCntUIAAAAAMDfkcOYnD2ksFixYtnerM/f319Vq1bV8OHDraD+d5Mfzz/MMzw/EwBwLewjAGf0CeAy+kOhkx85NMdH+CdNmpRte2JiojZt2qR27dpp7ty5at++fV7VBgAAAAAAblKOA39sbOx1h0dGRmrChAkEfgAAAAAACoEc36X/Rtq1a6ddu3bl1ewAAAAAAMAtyLPAn5KSInd397yaHQAAAAAAuAV5Fvg/+ugjRUZG5tXsAAAAAADALcjxNfzDhg3Ltj0pKUmbN2/Wnj17tGLFijwrDAAAAAAA3LwcB/4tW7Zk2+7n56eWLVtq/vz5CgsLy7PCAAAAAADAzctx4F+2bNntrAMAAAAAAOShPLuGHwAAAAAAFB4EfgAAAAAAbIjADwAAAACADRH4AQAAAACwoRwH/j59+ujMmTO3sxYAAAAAAJBHchz4Z8yYob/++ut21gIAAAAAAPJIjgO/MeZ21gEAAAAAAPKQa25GPnPmjDw9Pa87jp+f3y0VBAAAAAAAbl2uAn/VqlWvOcwYI4fDofT09FsuCgAAAAAA3JpcBf65c+cqMDDwdtUCAAAAAADySK4Cf6NGjRQUFHS7agEAAAAAAHkkxzftAwAAAAAAfx85Dvzly5eXi4vL7awFAAAAAADkkRyf0n/gwIHbWQcAAAAAAMhDOQ78xYoVk8PhyNLu7++vqlWravjw4WrZsmWeFgcAAAAAAG5OjgP/xIkTsw38iYmJ2rRpk9q1a6e5c+eqffv2eVogAAAAAADIvRwH/l69el13eGRkpCZMmEDgBwAAAACgEMizu/S3a9dOu3btyqvZAQAAAACAW5BngT8lJUXu7u55NTsAAAAAAHAL8izwf/TRR4qMjMyr2QEAAAAAgFuQ42v4hw0blm17UlKSNm/erD179mjFihV5VhgAAAAAALh5OQ78W7Zsybbdz89PLVu21Pz58xUWFpZnhQEAAAAAgJuX48C/bNmy6w4/evSoHnvsMU2ZMuWWiwIAAAAAALcmz67hP3XqlD766KO8mh0AAAAAALgFeRb4AQAAAABA4UHgBwAAAADAhgj8AAAAAADYUI5v2texY8frDk9MTLzVWgAAAAAAQB7JceD39/e/4fCePXveckEAAAAAAODW5TjwT5s27XbWAQAAAAAA8hDX8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2VKCBf8KECbr77rvl6+uroKAgPfDAA9q9e7fTOBcuXNDAgQNVvHhx+fj4qFOnToqPj3ca5/Dhw2rbtq2KFi2qoKAgjRgxQmlpafm5KgAAAAAAFCoFGviXL1+ugQMHau3atVq8eLFSU1PVqlUrnTt3zhpn6NCh+uabbzRnzhwtX75cx44dU8eOHa3h6enpatu2rS5evKjVq1drxowZmj59usaMGVMQqwQAAAAAQKHgMMaYgi4i08mTJxUUFKTly5eradOmSkpKUsmSJTVr1iw99NBDkqRdu3apevXqWrNmjRo0aKBFixapXbt2OnbsmIKDgyVJ77//vp599lmdPHlS7u7uN1xucnKy/P39lZSUJD8/v9u6jrfM4SjoCvJP4flqAsDfA/sIwBl9AriM/lDo5EcOLVTX8CclJUmSAgMDJUmbNm1SamqqWrRoYY1TrVo1lStXTmvWrJEkrVmzRhEREVbYl6SYmBglJydr+/bt2S4nJSVFycnJTi8AAAAAAOyk0AT+jIwMDRkyRI0aNdKdd94pSYqLi5O7u7sCAgKcxg0ODlZcXJw1zpVhP3N45rDsTJgwQf7+/tYrNDQ0j9cGAAAAAICCVWgC/8CBA/Xbb79p9uzZt31Zo0aNUlJSkvU6cuTIbV8mAAAAAAD5ybWgC5CkQYMGaeHChVqxYoXKli1rtYeEhOjixYtKTEx0OsofHx+vkJAQa5z169c7zS/zLv6Z41zNw8NDHh4eebwWAAAAAAAUHgV6hN8Yo0GDBmnBggVaunSpwsLCnIbXqVNHbm5uWrJkidW2e/duHT58WFFRUZKkqKgobdu2TSdOnLDGWbx4sfz8/BQeHp4/KwIAAAAAQCFToEf4Bw4cqFmzZumrr76Sr6+vdc29v7+/vLy85O/vr759+2rYsGEKDAyUn5+fBg8erKioKDVo0ECS1KpVK4WHh6tHjx567bXXFBcXp+eee04DBw7kKD4AAAAA4H9WgT6Wz3GNR0NMmzZNvXr1kiRduHBBTz/9tD777DOlpKQoJiZGkydPdjpd/9ChQxowYIB++ukneXt7KzY2Vq+88opcXXP2ewaP5Suk/iaP0wCAQoN9BOCMPgFcRn8odPIjhxZo4C8sCPyFFF9NAMgd9hGAM/oEcBn9odDJjxxaaO7SDwAAAAAA8g6BHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwUa+FesWKH27durdOnScjgc+vLLL52GG2M0ZswYlSpVSl5eXmrRooX27t3rNE5CQoK6d+8uPz8/BQQEqG/fvjp79mw+rgUAAAAAAIVPgQb+c+fOqVatWnr33XezHf7aa6/prbfe0vvvv69169bJ29tbMTExunDhgjVO9+7dtX37di1evFgLFy7UihUr9Nhjj+XXKgAAAAAAUCg5jDGmoIuQJIfDoQULFuiBBx6QdOnofunSpfX0009r+PDhkqSkpCQFBwdr+vTp6tq1q3bu3Knw8HBt2LBBdevWlSR99913uu+++3T06FGVLl06R8tOTk6Wv7+/kpKS5Ofnd1vWL884HAVdQf4pHF9NAPj7YB8BOKNPAJfRHwqd/MihhfYa/gMHDiguLk4tWrSw2vz9/VW/fn2tWbNGkrRmzRoFBARYYV+SWrRooSJFimjdunXXnHdKSoqSk5OdXgAAAAAA2EmhDfxxcXGSpODgYKf24OBga1hcXJyCgoKchru6uiowMNAaJzsTJkyQv7+/9QoNDc3j6gEAAAAAKFiFNvDfTqNGjVJSUpL1OnLkSEGXBAAAAABAniq0gT8kJESSFB8f79QeHx9vDQsJCdGJEyechqelpSkhIcEaJzseHh7y8/NzegEAAAAAYCeFNvCHhYUpJCRES5YssdqSk5O1bt06RUVFSZKioqKUmJioTZs2WeMsXbpUGRkZql+/fr7XDAAAAABAYeFakAs/e/as9u3bZ70/cOCAtm7dqsDAQJUrV05DhgzRSy+9pCpVqigsLEyjR49W6dKlrTv5V69eXa1bt9ajjz6q999/X6mpqRo0aJC6du2a4zv0AwAAAABgRwUa+Ddu3Kh77rnHej9s2DBJUmxsrKZPn65nnnlG586d02OPPabExEQ1btxY3333nTw9Pa1pZs6cqUGDBql58+YqUqSIOnXqpLfeeivf1wUAAAAAgMLEYczf5CGFt1F+PP8wz/D8TADAtbCPAJzRJ4DL6A+FTn7k0EJ7DT8AAAAAALh5BH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA3ZJvC/++67qlChgjw9PVW/fn2tX7++oEsCAAAAAKDA2CLwf/755xo2bJjGjh2rzZs3q1atWoqJidGJEycKujQAAAAAAAqEwxhjCrqIW1W/fn3dfffdeueddyRJGRkZCg0N1eDBgzVy5MgbTp+cnCx/f38lJSXJz8/vdpd7axyOgq4g//z9v5rID/QJ4DL6A+CMPgFcRn8odPIjh7relrnmo4sXL2rTpk0aNWqU1VakSBG1aNFCa9asyXaalJQUpaSkWO+TkpIkXdrgKET4PABn9AngMvoD4Iw+AVz2N+kPmfnzdh6D/9sH/j///FPp6ekKDg52ag8ODtauXbuynWbChAkaN25clvbQ0NDbUiNukr9/QVcAFC70CeAy+gPgjD4BXPY36w9nzpyR/22q+W8f+G/GqFGjNGzYMOt9RkaGEhISVLx4cTn+l051yYHk5GSFhobqyJEjhf9yByAf0CcAZ/QJ4DL6A+CMPnF9xhidOXNGpUuXvm3L+NsH/hIlSsjFxUXx8fFO7fHx8QoJCcl2Gg8PD3l4eDi1BQQE3K4SbcHPz49OClyBPgE4o08Al9EfAGf0iWu7XUf2M/3t79Lv7u6uOnXqaMmSJVZbRkaGlixZoqioqAKsDAAAAACAgvO3P8IvScOGDVNsbKzq1q2revXqadKkSTp37px69+5d0KUBAAAAAFAgbBH4H374YZ08eVJjxoxRXFycIiMj9d1332W5kR9yz8PDQ2PHjs1yCQTwv4o+ATijTwCX0R8AZ/SJgucwt/MZAAAAAAAAoED87a/hBwAAAAAAWRH4AQAAAACwIQI/AAAAAAA2RODPQxUqVNCkSZMKuoy/nYMHD8rhcGjr1q23fVl8RgAKCn9/bg77CPtie98c+oQ9sa1vDv0hB4zNxMbGGkmmf//+WYY98cQTRpKJjY3N0bwOHDhgJJktW7bkaPwTJ06Yc+fO5Wjcdu3amZiYmGyHrVixwkgyv/zyS47mdS3Lli0zkszp06dvaT5XO3/+vClWrJgpXry4uXDhQq6mjY2NNR06dHBqS0tLM8ePHzepqal5VuO0adOMv79/lvbcfEZ55Z133jHly5c3Hh4epl69embdunX5unwAl7GPuIx9hH+W9vzeRyxfvty0a9fOlCpVykgyCxYsyLdlZ6JPXEaf8M/Snt99Yvz48aZu3brGx8fHlCxZ0nTo0MHs2rUr35ZPf7iM/uCfpT2/+8PkyZNNRESE8fX1Nb6+vqZBgwbm22+/zfV8bHmEPzQ0VLNnz9Zff/1ltV24cEGzZs1SuXLl8nx5Fy9elCSVLFlSRYsWzdE0ffv21eLFi3X06NEsw6ZNm6a6deuqZs2aeVrnzTLGKC0tzXo/b9481ahRQ9WqVdOXX355y/N3cXFRSEiIXF1v/1Mic/MZ5YXPP/9cw4YN09ixY7V582bVqlVLMTExOnHiRL7VAMAZ+4i8xT7i5p07d061atXSu+++m2/LzA59Im/RJ27e8uXLNXDgQK1du1aLFy9WamqqWrVqpXPnzuVbDfSHvEV/uHlly5bVK6+8ok2bNmnjxo2699571aFDB23fvj13M8rjHyIKXOYvP3feeaf59NNPrfaZM2eamjVrmg4dOli/zC1atMg0atTI+Pv7m8DAQNO2bVuzb98+axpJTq/o6GinZbz00kumVKlSpkKFCsYYY8qXL28mTpxojLn0q5ibm5tZsWKFNb9XX33VlCxZ0sTFxZnU1FQTHBxsXnzxRaf6z5w5Y3x8fMx7771njDFm5cqVpnHjxsbT09OULVvWDB482Jw9e9Ya/8KFC+aZZ54xZcuWNe7u7qZSpUrmww8/tH5VvPKVud4XLlwwgwcPNiVLljQeHh6mUaNGZv369dY8M3/R+/bbb81dd91l3NzczLJly6zhzZo1M++//7557733TMuWLbN8Br/99ptp27at8fX1NT4+PqZx48Zm3759ZuzYsVlqWrZsmdMvoOnp6aZMmTJm8uTJTvPcvHmzcTgc5uDBg8YYY15//XVz5513mqJFi5qyZcuaAQMGmDNnzjjVf+Vr7NixWT4jY4w5dOiQuf/++423t7fx9fU1nTt3NnFxcdbwsWPHmlq1aplPPvnElC9f3vj5+ZmHH37YJCcnZ1nv7NSrV88MHDjQep+enm5Kly5tJkyYkKPpAeQt9hHsIwrTPuJKKsAj/PQJ+kRh7BPGXDqiKsksX778pqbPLfoD/aEw9wdjjClWrJj58MMPczWNbQP/G2+8YZo3b261N2/e3EycONGpo86dO9fMmzfP7N2712zZssW0b9/eREREmPT0dGOMMevXrzeSzI8//miOHz9uTp06ZS3Dx8fH9OjRw/z222/mt99+M8Zk/RKMGDHClC9f3iQmJprNmzcbd3d389VXXzkNr1SpksnIyLDaPv74Y+Pl5WUSExPNvn37jLe3t5k4caLZs2ePWbVqlaldu7bp1auXNX6XLl1MaGiomT9/vtm/f7/58ccfzezZs01aWpqZN2+ekWR2795tjh8/bhITE40xxjz55JOmdOnS5ttvvzXbt283sbGxplixYtb6ZX7Ra9asaX744Qezb98+a9i+ffuMh4eHSUhIMKdOnTKenp5W5zHGmKNHj5rAwEDTsWNHs2HDBrN7927z8ccfm127dpkzZ86YLl26mNatW5vjx4+b48ePm5SUlCynPA0fPtw0btzY6XN9+umnndomTpxoli5dag4cOGCWLFli7rjjDjNgwABjjDEpKSlm0qRJxs/Pz1pOZie+8jNKT083kZGRpnHjxmbjxo1m7dq1pk6dOtYfZGMudVQfHx/TsWNHs23bNrNixQoTEhJi/u///u+a38FMKSkpxsXFJcv/wPXs2dPcf//9N5weQN5jH8E+orDsI65W0IGfPkGfKGx9whhj9u7daySZbdu23dT0uUV/oD8U1v6QlpZmPvvsM+Pu7m62b9+eq2ltG/hPnDhhPDw8zMGDB83BgweNp6enOXnypFNHvdrJkyed/qhc69qb2NhYExwcbFJSUpzar+6oKSkpJjIy0nTp0sWEh4ebRx991Gn8nTt3Wr9OZWrSpIl55JFHjDHG9O3b1zz22GNO06xcudIUKVLE/PXXX2b37t1Gklm8eHG265PdtTdnz541bm5uZubMmVbbxYsXTenSpc1rr73mNN2XX36ZZZ7/93//Zx544AHrfYcOHaxfvYwxZtSoUSYsLMxcvHgx25qyu/bm6u28ZcsW43A4zKFDh4wxxvq1LvPXyuzMmTPHFC9e3Hp/rWtvrvyMfvjhB+Pi4mIOHz5sDd++fbuRZP1SOXbsWFO0aFGnX+JGjBhh6tevf81aMv3xxx9Gklm9erVT+4gRI0y9evVuOD2AvMc+4jL2Ef5ZxsvPfcTVCjrw0yfoE4WtT6Snp5u2bduaRo0a5Xram0V/uIz+4J9lvILoD7/++qvx9vY2Li4uxt/f3/z3v//N8bSZbHkNv3TpGou2bdtq+vTpmjZtmtq2basSJUo4jbN3715169ZNFStWlJ+fnypUqCBJOnz48A3nHxERIXd39+uO4+7urpkzZ2revHm6cOGCJk6c6DS8WrVqatiwoT7++GNJ0r59+7Ry5Ur17dtXkvTLL79o+vTp8vHxsV4xMTHKyMjQgQMHtHXrVrm4uCg6Ojqnm0X79+9XamqqGjVqZLW5ubmpXr162rlzp9O4devWdXqfnp6uGTNm6JFHHrHaHnnkEU2fPl0ZGRmSpK1bt6pJkyZyc3PLcU1Xi4yMVPXq1TVr1ixJl67nOnHihDp37myN8+OPP6p58+YqU6aMfH191aNHD506dUrnz5/P8XJ27typ0NBQhYaGWm3h4eEKCAhw2hYVKlSQr6+v9b5UqVJcgw/8zbGPyB77iMv+1/YR9Ins0Scuy+8+MXDgQP3222+aPXt2rqe9VfSH7NEfLsuv/nDHHXdo69atWrdunQYMGKDY2Fjt2LEjx9NLNn8sX58+fTR9+nTNmDFDffr0yTK8ffv2SkhI0NSpU7Vu3TqtW7dO0uWbZ1yPt7d3jmpYvXq1JCkhIUEJCQlZhvft21fz5s3TmTNnNG3aNFWqVMnqeGfPnlX//v21detW6/XLL79o7969qlSpkry8vHJUw826eh2///57/fHHH3r44Yfl6uoqV1dXde3aVYcOHdKSJUskKc9q6t69u9VRZ82apdatW6t48eKSLj1+o127dqpZs6bmzZunTZs2WTc8yslnl1tX/9FxOBzWH6brKVGihFxcXBQfH+/UHh8fr5CQkDytEUDusY+4NewjLrnZfURhRJ+4NfSJS/KiTwwaNEgLFy7UsmXLVLZs2bwsL8foD7eG/nDJrfYHd3d3Va5cWXXq1NGECRNUq1Ytvfnmm7mqwdaBv3Xr1rp48aJSU1MVExPjNOzUqVPavXu3nnvuOTVv3lzVq1fX6dOnncbJ/OUtPT39ppa/f/9+DR06VFOnTlX9+vUVGxub5QPu0qWLihQpolmzZumTTz5Rnz595HA4JEl33XWXduzYocqVK2d5ubu7KyIiQhkZGVq+fHm2y8+u/kqVKsnd3V2rVq2y2lJTU7VhwwaFh4dfd30++ugjde3a1ekPx9atW9W1a1d99NFHkqSaNWtq5cqVSk1NvWZNOdme//jHP/Tbb79p06ZNmjt3rrp3724N27RpkzIyMvT666+rQYMGqlq1qo4dO5br5VSvXl1HjhzRkSNHrLYdO3YoMTHxhtsiJ9zd3VWnTh3rj5gkZWRkaMmSJYqKirrl+QO4Newj2Edcz+3eRxRG9An6xPXkR58wxmjQoEFasGCBli5dqrCwsDyZ782gP9Afrqeg9hEZGRlKSUnJ3US5vgigkLv62o6kpCSTlJRkvc+89iY9Pd0UL17cPPLII2bv3r1myZIl5u6773a6hi41NdV4eXmZl156ycTFxVk3q8ju+hFjnK/rSEtLMw0aNDCdOnUyxhhz7NgxU7x4cev6liv17dvXFCtWzLi4uJg//vjDav/ll1+Ml5eXGThwoNmyZYvZs2eP+fLLL53u+t6rVy8TGhpqFixYYH7//XezbNky8/nnnxtjLt34wuFwmOnTp5sTJ05YN5x46qmnTOnSpc2iRYucbraRkJBgjMn+mp0TJ04YNzc3s2jRoiz1f/vtt8bDw8OcOnXK/Pnnn6Z48eLWzTb27NljPvnkE+sZqi+//LIpV66c2bVrlzl58qS5ePHiNa9xatSokalVq5bx9fU158+ft9q3bt1qJJlJkyaZ/fv3m08++cSUKVPGqeZVq1ZZN0o5efKk9czMKz+jjIwMExkZaZo0aWI2bdpk1q1bl+3NNmrVquVU18SJE0358uWzbIfszJ4923h4eJjp06ebHTt2mMcee8wEBAQ43cETQP5hH8E+wpjCs484c+aM2bJli9myZYuRZN544w2zZcsW69rT/ECfoE8YU3j6xIABA4y/v7/56aefrBumHT9+3Gl9bif6A/3BmMLTH0aOHGmWL19uDhw4YH799VczcuRI43A4zA8//JCj6TPZPvBf7cqbbSxevNhUr17deHh4mJo1a5qffvopy01zpk6dakJDQ02RIkWyPE7jald+CcaNG2dKlSpl/vzzT2v4vHnzjLu7u9m6davTdKtXrzaSzH333ZdlnuvXrzctW7Y0Pj4+xtvb29SsWdO8/PLL1vC//vrLDB061JQqVcq4u7ubypUrm48//tga/sILL5iQkBDjcDis9f7rr7/M4MGDTYkSJa77OI0rO+q///1vExAQkO1NNFJSUkxAQIB58803jTGX/sC0atXKFC1a1Pj6+pomTZqY/fv3G2MudfjM9VE2j9O40uTJk40k07NnzyzLfOONN0ypUqWMl5eXiYmJMZ988kmWmh9//HFTvHjxPHmcxpVy01GNMebtt9825cqVM+7u7qZevXpm7dq1OZ4WQN5iH8E+IlNh2Edk9/gnXfH4q/xAn6BPZCoMfSK7/iDJTJs2LUfT3yr6A/0hU2HoD3369DHly5c37u7upmTJkqZ58+a5DvvGGOMwxpjcnRMAAAAAAAAKO1tfww8AAAAAwP8qAj9wkw4fPuz0qJOrXzl5LAsAwJ7YRwDO6BPAZfnZHzilH7hJaWlpOnjw4DWHV6hQQa6urvlXEACg0GAfATijTwCX5Wd/IPADAAAAAGBDnNIPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAIFeaNWumIUOGFHQZAADgBgj8AADkk169esnhcOiVV15xav/yyy/lcDhyNa8KFSpo0qRJeVjd7XPw4EE5HA5t3bq1oEsBAOB/CoEfAIB85OnpqVdffVWnT58u6FJy7eLFiwVdQp5KTU0t6BIAALitCPwAAOSjFi1aKCQkRBMmTLjueD///LOaNGkiLy8vhYaG6sknn9S5c+ckXTql/tChQxo6dKgcDoccDoeMMSpZsqTmzp1rzSMyMlKlSpVymqeHh4fOnz8vSTp8+LA6dOggHx8f+fn5qUuXLoqPj7fGf/755xUZGakPP/xQYWFh8vT0zLbW//73v/L399fMmTNvapvs379fHTp0UHBwsHx8fHT33Xfrxx9/tIa/8MILuvPOO7NMFxkZqdGjR1vvP/zwQ1WvXl2enp6qVq2aJk+ebA3LPMvg888/V3R0tDw9PTVz5kwdOnRI7du3V7FixeTt7a0aNWro22+/van1AACgsCHwAwCQj1xcXDR+/Hi9/fbbOnr0aLbj7N+/X61bt1anTp3066+/6vPPP9fPP/+sQYMGSZLmz5+vsmXL6oUXXtDx48d1/PhxORwONW3aVD/99JMk6fTp09q5c6f++usv7dq1S5K0fPly3X333SpatKgyMjLUoUMHJSQkaPny5Vq8eLF+//13Pfzww0617Nu3T/PmzdP8+fOzPSV/1qxZ6tatm2bOnKnu3bvf1DY5e/as7rvvPi1ZskRbtmxR69at1b59ex0+fFiS1KdPH+3cuVMbNmywptmyZYt+/fVX9e7dW5I0c+ZMjRkzRi+//LJ27typ8ePHa/To0ZoxY4bTskaOHKmnnnpKO3fuVExMjAYOHKiUlBStWLFC27Zt06uvviofH5+bWg8AAAob14IuAACA/zUPPvigIiMjNXbsWH300UdZhk+YMEHdu3e3boxXpUoVvfXWW4qOjtZ7772nwMBAubi4yNfXVyEhIdZ0zZo10wcffCBJWrFihWrXrq2QkBD99NNPqlatmn766SdFR0dLkpYsWaJt27bpwIEDCg0NlSR98sknqlGjhjZs2KC7775b0qXT+D/55BOVLFkyS53vvvuu/vnPf+qbb76x5nszatWqpVq1alnvX3zxRS1YsEBff/21Bg0apLJlyyomJkbTpk2z6po2bZqio6NVsWJFSdLYsWP1+uuvq2PHjpKksLAw7dixQx988IFiY2OteQ8ZMsQaR7p0lkOnTp0UEREhSdb8AACwA47wAwBQAF599VXNmDFDO3fuzDLsl19+0fTp0+Xj42O9YmJilJGRoQMHDlxzntHR0dqxY4dOnjyp5cuXq1mzZmrWrJl++uknpaamavXq1WrWrJkkaefOnQoNDbXCviSFh4crICDAqaby5ctnG/bnzp2roUOHavHixbcU9qVLR/iHDx+u6tWrKyAgQD4+Ptq5c6d1hF+SHn30UX322We6cOGCLl68qFmzZqlPnz6SpHPnzmn//v3q27ev0zZ76aWXtH//fqdl1a1b1+n9k08+qZdeekmNGjXS2LFj9euvv97SugAAUJgQ+AEAKABNmzZVTEyMRo0alWXY2bNn1b9/f23dutV6/fLLL9q7d68qVap0zXlGREQoMDBQy5cvdwr8y5cv14YNG5SamqqGDRvmqk5vb+9s22vXrq2SJUvq448/ljEmV/O82vDhw7VgwQKNHz9eK1eu1NatWxUREeF0k8D27dvLw8NDCxYs0DfffKPU1FQ99NBDki5tL0maOnWq0zb77bfftHbt2uuuT79+/fT777+rR48e2rZtm+rWrau33377ltYHAIDCglP6AQAoIK+88ooiIyN1xx13OLXfdddd2rFjhypXrnzNad3d3ZWenu7U5nA41KRJE3311Vfavn27GjdurKJFiyolJUUffPCB6tatawXe6tWr68iRIzpy5Ih1lH/Hjh1KTExUeHj4DWuvVKmSXn/9dTVr1kwuLi565513crv6llWrVqlXr1568MEHJV0K8AcPHnQax9XVVbGxsZo2bZrc3d3VtWtXeXl5SZKCg4NVunRp/f777zd1H4HQ0FA9/vjjevzxxzVq1ChNnTpVgwcPvun1AQCgsCDwAwBQQCIiItS9e3e99dZbTu3PPvusGjRooEGDBqlfv37y9vbWjh07tHjxYitYV6hQQStWrFDXrl3l4eGhEiVKSLp0Hf/TTz+tunXrWjefa9q0qWbOnKkRI0ZYy2jRooW1/EmTJiktLU1PPPGEoqOjs5z2fi1Vq1bVsmXL1KxZM7m6umrSpEnXHX/37t1Z2mrUqKEqVapo/vz5at++vRwOh0aPHq2MjIws4/br10/Vq1eXdOlHgiuNGzdOTz75pPz9/dW6dWulpKRo48aNOn36tIYNG3bNmoYMGaI2bdqoatWqOn36tJYtW2YtAwCAvztO6QcAoAC98MILWcJtzZo1tXz5cu3Zs0dNmjRR7dq1NWbMGJUuXdppuoMHD6pSpUpO19hHR0crPT3dulZfuvQjwNVtDodDX331lYoVK6amTZuqRYsWqlixoj7//PNc1X/HHXdo6dKl+uyzz/T0009fd9yuXbuqdu3aTq/4+Hi98cYbKlasmBo2bKj27dsrJiZGd911V5bpq1SpooYNG6patWqqX7++07B+/frpww8/1LRp0xQREaHo6GhNnz5dYWFh160pPT1dAwcOVPXq1dW6dWtVrVrV6XF+AAD8nTnMrV54BwAAkA+MMapSpYqeeOKJ6x61BwAAl3BKPwAAKPROnjyp2bNnKy4uTr179y7ocgAA+Fsg8AMAgEIvKChIJUqU0JQpU1SsWLGCLgcAgL8FAj8AACj0uAIRAIDc46Z9AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhv4fvqS0ZW20Rz0AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layers_DWC = list(res_dict_DWC.keys())\n", + "print(layers_DWC)\n", + "utilisation_DWC = list(res_dict_DWC.values())\n", + "lut_values_DWC = [] #Initializing a list to store LUT values.\n", + "for i in range(len(layers_DWC)):\n", + " x = list(utilisation_DWC[i].values()) #Extracting the resource utilisation for each layer.\n", + " lut_values_DWC.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", + "\n", + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", + "fig = plt.figure(figsize = (12, 5))\n", + "plt.bar(layers_DWC, lut_values_DWC, color ='red', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"LUT Utilisation\")\n", + "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `StreamingDataWidthConverter` layer does not consume a large number of LUT resources as shown in the above graph." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "Question: The name of the 'StreamingDataWidthConverter' layer is not coming in the graph.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Additional Information : Constraints table\n", + "\n", + "The below table exposes the constraints associated with each layer. A developer working with these layers has to be mindful of not violating them when setting the PE & SIMD values manually." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "+------------------------------------+------------+----------------------------------------------------------------+\n", + "| Layers | Attributes | Assertions |\n", + "+====================================+============+================================================================+\n", + "| addstreams_batch | PE | inp_channels % PE == 0 |\n", + "| channelwise_op_batch | PE | channels % PE == 0 |\n", + "| checksum | ~ | ~ |\n", + "| concat | ~ | ~ |\n", + "| convolutioninputgenerator | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| convolutioninputgenerator1d | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| convolutioninputgenerator_rtl | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| downsampler | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| duplicatestreams_batch | PE | channels % PE == 0 |\n", + "| eltwise | PE | inp_channels % PE == 0 |\n", + "| fmpadding_batch | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| fmpadding_rtl | SIMD | inp_feature_map_channels % SIMD == 0 |\n", + "| globalaccpool_batch | PE | channels % PE == 0 |\n", + "| hlscustomop | ~ | ~ |\n", + "| iodma | ~ | ~ |\n", + "| labelselect_batch | PE | num_labels % PE == 0 |\n", + "| lookup | ~ | ~ |\n", + "| matrixvectoractivation | PE & SIMD | matrix_height % PE == 0 & matrix_width % SIMD == 0 |\n", + "| pool_batch | PE | input_feature_map_channels % PE == 0 |\n", + "| streamingdataflowpartition | ~ | ~ |\n", + "| streamingdatawidthconverter_batch | ~ | ~ |\n", + "| streamingfifo | ~ | ~ |\n", + "| streamingmaxpool_batch | ~ | ~ |\n", + "| templates | ~ | ~ |\n", + "| thresholding_batch | PE | matrix_height % PE == 0 |\n", + "| tlastmarker | ~ | ~ |\n", + "| upsampler | ~ | ~ |\n", + "| vectorvectoractivation | PE & SIMD | kernel_height * kernel_width % SIMD == 0 & channels % PE == 0 |\n", + "+------------------------------------+------------+----------------------------------------------------------------+" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/advanced/finn-hw-arch.png b/notebooks/advanced/finn-hw-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..e5631ab97d0d6bdce91aea4b916a7c7a2780560d GIT binary patch literal 110452 zcmdqJ^;?_G^9EYn-5rV*Z*jNa?(W6iDeh3*-HW?xAXq65#ogWA?WCXgd;W)WogZ># zpXA9ryEFIf&d$!A2qgtc6huPAPoF-aNK1*SeEI}s@aYreCwNHkCnVDJso*yV7Zpj- zPc;+7N8k%+3lVvdPoL`JkY0^pz}E7!U$OhoO6!D$YR z7PdTDkI-Jwd1AZ@1~it)sr^6?t_ZanvKllLI*yo{a1js#LZk=?jI)o&MM~NK(Y*wu z6gRws5l`Q^@%J%x@NjqcaDQlRWNZ9*frAVbCP#;dhNSp3hyH(UQOF7k=>OUw&}sni zm;bv5p)Bm*jsM!1N+=Zn*QUxR6#buoKw&9ij*$O^1;z(h!~eg6A^!g(cxW={LyPob z^|9lcWFzxFS8!Tv2)gvePPyph`>qP^6(;8osJz{2a zUN7nK-&}^m`{b%Q=dY#S`x8b+shtE%f0yMm;R9d!z#tKV0^Q62OIE|P@P~%~j-Kx~ zy<2dcmeH5n_5*r}HNE(BHaNs8qJj&<)~t2d)~y#8o?6JCixtJ}chX2-+o~&SZ*E>v zA$Qi;%BBtnxG?>tQTQrkD?zvS?_#9AEjWM3SLQD>+U!DniQE8*8IYlC*G<=e7c!N8 zP|YNn4r1>4R?$E{o_?Io`>QVpm96LGr0r93L178kN|ysoBE_s%sakW z^ni==)6;j9?I}`6cT@q~8yoAN{wK9w!=TgegOfPhwXGgfH!^iy_Qyx()tbbi8 z={F2avu2#FgR*FfafmV=J`h?GwuuaIXjvJR34NEhegqyO1KHyOiC<)`7ZWT#Q@5RA zoUi%gQu#Cn-y#!^G(bYXN6LI2_HlI&x-Pr0hnZW{u+SS7wk8E$fjf)Qd|HBvHzvR>4Tyu2eRf9 z2gmPjMc+=v_yn)Jx7BY-{kVg{SO65tnz0Wqj^F-LY{|GLzqE_9)*$B`@r@SnNwNp! zfX4>Gn7b+hZ^hB<>A)cHQ;&Vl^rm5DJvdQ95%i62De)y5L0GLc7CA$qwn5-nval}nKloS;Im7B@K9*x_SD;3 zyCta=Gl6h&p#L`5TJ-xnIYZncMQKfghL=4ovr4 z8fikE4z;)6sONxjcQr?#GAqg@K9E@%;r51UCsi?0UA+^0PZ#aE=xdgokjXJ;3y5HR zQFBZ;ET~ORsZ+nC z+t86Lpjy7OUJ3`7Q4CsI|8!uAbDjoP{LssEG)3W8JqgD}^YQl8h|f6}*LG`7OpU}R zk{_UiGgY%odSP3ii@#qze9qW;c?jxqx|_O*4yz4=wygZ=-s%4|i_v%f^@p*aHS?Rt zEV`{{&s)DqHoIlr-irfK24-vL2YK+R1fP%+wYjG9+NP)Vnr5*l`{l(AWc9gTGGC|1 z-VQen&sLcXiI9m_JDHL@=IxfP+RkUQ{DpnB0eA2Y+mnAP7eTf|{pYkY*d zR-l@np${!L&hqxXOCDN|Ai4Pc*qb$58B1l!VOnBAR`1<=_}RylwOBDZ1cKe$OgUmI zF5Hy)G4Q46ac9Wjn%vG|NB01BriMg{XhS0p$F|m6`Mkq>3fOYrLDdRJ?;MVrtMm~;tKd;#q^7-4XRZ5288T$#{93ca(#_|{8VJL{x#2}v}fUNO(mx>kJ%p2 z);t~;yz`ALF_*M8#p->;l2@9gtC&yC@2hZ7xp=Ym`!gKg#P0r97G;a^h2Rk-S0_C$ zyvHxDtGK=UK)v6T?T&2?R;WV*yy_bxExGISwt2LKtiNrmy2!|^9Zk+uBFs!&mX0hf zGafzoWyqYRp^xyRR+NqbQJJVK ztEUd5m2$pCt!GPg>nzx`bLF6tN3hQQrFT?9QM`4uvpeW|$K~nCgl}@q?{;>zFY)K_ zoxtbSQ$|J~-E-Pb3VgJ8`Jk=xw^FyY?@rX7Q?V7g`_7IN1|9>zWXBbe!)i2OtD(u# z`bkSchPbDbeQWW+lh&7^5|p#BX+T)h=x_{&(7%YqCTc)b;YsMG{(&4I?L1xL0BZUrv}YFPt%^O z$n5N{j4t>1CG!W}ZSA@$a(X^fjD6C7T8|9mb`H*SGqo&&8N5>i`Xu^&SD)u7Ghz}G zM{&nZ3X2O$zV9dSpJYCeVsD<`$Hgq6MkYdLSFMKIR<2V@mbLDD*H67^$7$r&<|$%g z=xMs2{^DT+^|Z&RW_f$s)k;bftiO{cuLSUsomL$HT=Oz3FAaEpn>gkZYil>JRlfQ{ zg&l>lse5Cfy_%rQmzER}=AMx_e^h;<@zar2Nwu+RD%S25nz)I2SnQ~Bme~_KpffSi zQsHaDM#EiEtc{wzwv|Fr@bkHNqj|%(uZ#wYBLUSHc}t9a1?gv{QfnPnK?ZKBwGCPR}E?Qmmqo*{`o_I{Pe%IrA4U2=yMfLY0 z^3pty`BG3}W#gHuP14E~LGqWeYj1)(CNdq}mH12%pADFG@4z=4VqVJt>;F76am?*sVaZO zPBYUD%=|mr(q3QeW2W@RZ*l)c%|}?BxGeiZDR& zUs34%66lIewGik^2s=Hs))Q1zxT_zvihFfY*vyGIrdLX!jF1YkboBg6b8|#Ws>2(9 zaDel>1JB1IVhh*2&cj3J&c=ugRQbn*i-DRDIDUF(5#-k#TFJbxSDB--vAnhreqsNj zw9UxSuxEVEHG7!{%55nb%3?^T8~XcMvv8dps{VE+!%<)E@~8TGR*Uigde!kV=7ihw zaEL+NP=$>Cu2}2wK_?vR-K=3=|HJ=H5&FVZ(bU7lJLA<#8Zj zL>6p}i9sca#vKL^`=d450M3az|IKlrHrBoR@|(QvZ1QbiWbRzZ`1w#PZ%2UlmeoPk zG8&11QEE%;Q!3n()wh}OJEe2`>H%sA+I@{~qBWb|iVoo;8$X&+K-7TLx*fNFRUtEcqoJ=MSvPC!J9l?LoEYVHP9w7jE+Ox9ED{R?|`wK zYL_1i>xguA63>tUxcsjgj2#7#yHL*8oW9@dheyDM>iBt;(c*-wGJ5AMNh z#=bvrN$&Zqf7(8kn#U!*-tI1%Wg?`+Ttw)uhku}Zd#vbznz@2yGhrKS+>JP~piUrR zTDQ?Q2-z_WPbwSCW7`sK6qtT|fetO;khY;F;^4U>hM8O-OYjLwzOaL%+kmIHe-6ra z($CXv7(>NWN%dfYypO$jegzee?SEwvz#V8mexx=+<1tg`P;bx0ORZNjE(>iX;qkjq zun-mTbdwhks=u}MLabPlVX+o+cW?If(Z%zThfKE7DQK3gT@>37f?=3mRo8G*G5*T# zVoHp-L9s`+p>5ufm4qwXKHl_7as?mQ32I5$FX=boB1(Zw`Fx&vP-^~7#faK|AeD<8 zSc0|0!C`Q)VBfKuE516usbtE(+AFUp&f>aU?V22pw%gW%opF9pGN@3xy-~iGW1we{ z9Yef8<0$wLG&^qx+D+9KU~N_{nq78gGW#vgsqY^FrMrk!^L
  • aPJ;Fl>y0GbIRO zh}zg&u2^`Tu2ql;W<*lY!K3<2!~oX$lm<#EsE57i!sq*I&K)uP;k(t3B2E8<40bdq zo}m>~`P!z>@vYD%>t5EK%vEsOqn|8L>1^WMuf8e;SG8kzzrnXutlr}<3VfNskz`qQ zL!M-=hK3u~HG|Q^&fufg`3wlyzxmskIRt5W_m77L4tfOZ<AaM|pv|?5pUcS~%5*7i19p!qwLI;QH`6 z?cf}8hh&xAm56jNc!k+%%K!by6VJ)*8woCe){hM~y-(=FRa`%7GGG9{Tnu#PFLB{3 zf7-!Nfh(IHnvSGUKe{DvC&h{2O0}jVGc@qc!$f@$uNEtN^GN-1b@Z3SWn0YHf~p-X zrl_jDr;b90C#_8_^ic>0sw^CVC~VP=#=`uX`lT!F-yRgSo3K?AuXV`^<`Oth124*s z(9QHmSOzhu@G^?c;2RN6Bd<5>pRS2!gk}P|=0p2cmEznG=NP zQMH>8^@u2~pFoSTW9qtjX+N9QS4yuhlxEw8lH*qXbdzwMMxp0My8Sh~D~YyE#NovB z5lio-4C*!nY}D@ePew`;2#o=Djcby&8jt!Mr_Agt3_U${3M@#E9LA)=lP_P~5aLO5 z(!IFT2^+TPvji_CI}DEb~7qSUoR=pO4TlU|XE81OD~du$2gNdtl3i^WMMji~}(b zi^4Qi;?4{6PfA|%D0rXANz_+jgt%Z7)N-i!67bKlM?`vb?8LETG5|`IFde;A*!kx)%G<#@r=cWsN-zS~1 zt?PcaIC^SChqEg` zRaxgLwaz0awy6oh7|p#Slw8{5&!OS~zZ5O&1>TSfkJSH#xZ4rXh)|>?xfPGD2ReK;hEoh`V zI}JPHleM93tOU6~0yI<{;6Y75 zrzOsLzRJ`b9x}U6?Nc=-7k!#Vx&>bg->m_tTXd!wNr;NLs>i_fPLERZm--hO@r}ZS^E%$t!wy()h5-VnMWh+prF-*>LyI?XHS-JBQNrt6h<{E zUSL>BzoML^(96z&>)MwF}cA~6JN{r+CvO^N4(MmaIP3mPsnzlJSH8i#E^tIN4t z1nG;6b2_~J^=%f9&dY&0t&H&^xDdk2kRC58-sC$|<(gID6=gvT2EI&s00`t!UU=72 z&CGVO=m9|8>)>kkSVm(?$$}^BH>o(`fQN~29`GQ5jbyf)MKlMhm(@h{i@tf^XMcmE zyCHGraCQvSMm8x=W=bCOa&tsw*PpHyYlh(E^EBqH|32)Kv4d?}-hE4T<(LTzivY|9 z-NiyRK;tilyEODr?_`CsN6Wi}J)yJU6J9ew6>3Qx4^B~j_eL#<2$tF{K6Wt+)0UkL zl~=m!SeJUK>~jZtN4c#P0e$zF?Nf0KLey zXxjZm=)~nQhZ1*|@+05#8GRpQ_Dady;3H&p?N1?=A%v{aO8s$@AeDaqbzq=X|6CoL zn3*6&eOp1TUT?xOe-ebL-N|K@Qypt?)tdOa>BkI?zsnLbq4RFNk8rYxLI6lv$=;LI zv^pnY;rC+sFrHpi)eC&AxyM956F>PGNB=Y~ZAsU;y(m;-+(bUoFI%<6Vy>NI*oHY$ zS1ZAE8b1b*|MCJj9irQm2@OYb7CkV{MY7WipduxH53_cBBXpJzeP#cbav%R||K&|* z7NhKU9RA80@$Rba9XDriqKXP-OIc^fK1W36@yJMIWaLQ4Lk_g;qYjF09@$PlWYhq* zpZB*qXH)Ufb8|E=x;EjtrE}v#;et^0@8MjZj=gujj7vo1Dm3^1f*nF@ORzOB7FMmE z8$@AzpWj30!wK_@ljoU=)yX3}$S+Cm#Cgln`^1ra0KrId`~4h9Yu3rhszYxz8YGfg&Ei9YZ=KU%?n;H zom`TG;(jAs16se3qCCU%0{%=w$Y(EazARS$a|W3UA-A3Kv@TY33&y-YbdJ|e=Uu}& zbi)frM;azrrXoxQ_8z}yo$NTr8RHUFdk#9vMKy~{Pm27oZJlx^!0ctxg0!HQb;OBr zJ2l`zLj7a?@XYtbNi={oO1q4dZ%p2=DExZC`lo4HIBFDb2lg`erWw316SJh}VLdre zQFa||NtWuene(OHiq1Uume4%;<95qiS`pz9R&rP1o7ee3yvPpV(8fss)-PS5eW2}; zM8#qj$6r4>1(Dn35uL+l=Lh^%`&kK2u;2oYuN{J8@RKHG1lxTw`fU7No$6dZA*3NW zJ{5s`3TnafW!l{34ARq*T+{YKP(x$|h!?wgv-6ClxBm#ai=C`&Yg!kfA3UbGK4SBD z9i#ojayKsE=!D5H({%?ns;q#Cy3y{kutK>LWNIqdCbu#|ziVY|Xl+%LF-Bqz8z~MS z%|0eElmmUlnzz5=I;b1lKG@3r)qkjiZO$UwnZE^R>fJhPG5rc7KWk@n-L8NjC@hw@ zl!0o%aZSh%Bt!^aIz|R|q~{O<#ROVnZ{2@jbDY{R+^KAQ5mpoz9~@z7Fi=>b-n%Px zdKW#w{`7B|BG5;fB>zL>2ltKBr*Yh##`)T_qW+@$hHjx2d`@S}Fd!zPWt+Z^614q(F3fDmx`PPdP`#I5gp=s<+gCXA+IQsJ zQon3JNQbR6*AhUE=bfIQhi)k1A3Qa8N&G8JHo_WydTCTt`rC!|?TYa>m#{;@yABN^ z)ns75E8H&6lj3yuub(=_F7uVi9K8$^xYWyo9II&WZn75XoJpqWJt{!of3g2|7 zFxE_1i+~Z{B1kRoruX02I3r#O1>q|K4rP)wLd8(QMv-5ftynXtpB}h9MX#{XQ?qNZvMI z20nyw=kAe~rK`f^yd?F8?h=R1O3=*wrCFHy4kQdZwKp$?SW>^NCfol(4#eC4rW|m3 ze1HLM&)qc;rfc!n=+Hw~19_eh6>8T73GUloM?rI#)Cit z-#Q4lPnbMdRr5<`PU}8%x)bNK-FA{Q!49~uHbz_m3=du~CmUtWBNz=jyaqvCD+@JR zU8+;$bw+iQ;wO!TY-RC!rZK5eG)ACJ|Bsq5@|^F)YFfwY4{YIG&l2QCGpaVtS)rK2 zjWG3j1D|q?@%E>Um2C(43F8+!gv!DKK)LsE4#$X;b~W|@XVHNUIi{wJzjatu&aND< za2tf)-w7zGVIDBT8y<=|;c!pchP{RA)fB zVRlLOYV!U&?4PSiS@qbpj#GG#qzqGyH+2cKc!5YLoDdvT-+6Xb_-+%0N~|C0o(g99 z6#}~?r01mig*Ne)Ix79eQLgSdU$`y5&6KxhU^2kL9U^xlucC~*|32d|UhpZLziekj zBjLG=^rkbk!+B>~BzinIDXi`h7}i4NRmbA>j(;4`>4#l;%XTFP8mgnZi|c9k$RRYG z@Y-@vUEa;V#9v%rF+3uBSI7LFTKqe`wlZTx8{<0oi4G5~I-9zvGajbMXEF4V)oW}H z{lSrvn=0Dd4u16AtGZx;4njiacyAe@pO_eeIC4MM!N<(Wl>@pwCwBl^1t{Rd3Q5si zW3rR8>Z%pV;kDHNXZ4+=Ic)v9F{4~~{#o-I{JR~N6Tn{?C86mcpozM98@@I+oHhR5 zUl|~5s3>V8>#H2S6C?0>ELn>_wU{215?oC~A%vm^7*f2suwKmZ)D1$FmEgHp9bFJ( zqX_gT`&GWRv>3Q*`M+9y|x-O(0jB-R_#2uF@K^qv-D7 zIcVEkGv3(Sh!#^g(?x+AC6Av+v$wAs5`QH|j#Okmrz*a=*}$)9FOT96i7-z*tN9aT zAjwa{7O{=Rl~G7ZCe{n(6nuuBZjULt;nCMk420a(o&Y+fPx@V@W=R7kI#(J(Eax3a zikHg1V)HQ`!*awIh3U#0V#(JsaZz-Iato4!UPfO!)NY_4+3^vLQ@*MgraF{^mvs_x z8en_bFoDK~3lM1h?&Mwc!FbfFOweAzeiLj2qzhpV1wDzjmBp2WJw9ALv|OrdzOqat zWbVwp+?cym?SWCol^KbNi+V0aF54Q-Q|-gOrwfQ* z<2aoq2eO{`<6K77_SwS9?B|qt_7B90Nv_@%Z*IZ%`~IczUz`zhC_d3|+-=PR@-@27 z*U#i9Bv(olRdI(WBzlG;(CvLxgpI+ShL;;#6dixYrC(O>u=*X#aiJFniBhy^t#=<= z6bNPAHe9#MXNTf)UJDnVsv@=CWw}VREhKihepm6tz3c~3Nv4kibh+jHlmT1bj)T#m z@DTigUhX|v`3wk7>(g^2tvjzDaUe*%*0hFiIp8LPf*DAqr8X?5_|oG5x@CFk2g9$j zH34aO*^0}g_S{c+{`};JdY420M+2ezf#Bkhcz8j!_j2>*+0UlC_@;!@_Xfh2y;4(- zRt#qCAxDHe0#4c*#x45bDzmWf93PZ)$(W;^I2je z#eEly^$^^wlAd=P;e9i>;AH~BUNt*B=LN1p%o1nn5@t5#jaF3_Rh_)Nrh| z*V@CI-t0ZwTTk=IYhr#}oLk9t=nRfR9Bx#7Lf<%=0JUZ(nBNZ{q)^FPKO`MMA+StI z9o4GZu=p|y<)N~X0N3lHjl3Ss@yx_&`(7+Q3tV(6ApBLD&_JaD6%KUQZ-kd^rGjIV zv6bDzFUaS;*`2FFJsj_Q^2iPJ)TPtCYa2Hcx_+EYd#5geXmmN{De}nidPmg?A@@65uf(t}Y71_27I7geLFGk81t1CY8AjC=^1theq@fq^-678zYI-0-& z?BNTxhbpGBk##Rrn3G!(TXCh}{#E^3YfwVwZ&0jgqwlM$7IJu#d@uUWr-nMzBE}sZ zIhl3_g*V9+Ld;nu*ET|vvKD^2&z%`G(>c|xxk<6#OPy%S!+&`9@KBf|k`si~!(i}^ zbWJkBl+wCa`>pFH%DBPO2e*F(>>t}q&aOZwBgpf4x3>6y%@_KBHG4@JZpjnf57WyS3U=L~1_tnXz|%!&j}|UpC5j)xY27l+OzpArvT13A?h*Rs0Q<^dh=ax%{fbWNXk)!XO(; zxzlDm07)1Td7XhaJH( zX|)C_i``ziL#R225{U$^)MZTh1MyQb2EMqs+clMDS#Rx?UH5Z)#bdt*#O6uVh#Rz^ zt|o4lUZ81l2nSrA_b!)3$iGCyxnC-o9N}eOF;aTsgXk_3@k@VJ=`&xUYzaL!(CTV^ zsbfEV4eGmr#lX<-w7!68T5drQZ&f-R5m1%o0pd8 z1HXY?)R3&IHqZIO^lfr;Ug_DDfrvWHIW;*|RZ4G+;UudRn*&>O%0&5aNye{McfHyD zj&u2RNB6T$!u&=Xy5aY(v?K+k^%oB1Wbe^v&Ojmxa20@$N$8x?WnN;l2CJ&cSC`37 z!g;74?3eN_FHdPhnWgvsb1)yv=45zpt1|{1zs0ZAOICU(daF9)UWCh@53BDr4E~6O zmgqYV1AiD*ej?ekFDcWnf;es>u!T^< zkO4wAUE7oobs-_>2i}N82dqXzLs~_Qh?$vzsSqm;!}s+*w|{48Z*<836w@iLs?iaU z;tWp1JYR?O6<0V5-{)YIFZD#fx$LNC1(X@#t+7cyRPPoG7nY?~wgat@RFgggmC+D9@7vyl z@buOt*#~tc%0>>*2D%UmtgOzkHJ(NedZPD#bf%tGEqbD z3mL9+$qs~R21 z1AEdBH90d8E!eR&*;I`GHY#|ha?6~yU5u$^QS_-SJ%`7P$&D78-9FknEZyADj$(7EGf_;N12}6+5_PQXePe;cfDNm zevwS~z5F(1Wz%PnlK*0w8NxoFAOTYW?fZ zMNzyw)E}`#obOXpwYJUJla^nq%zyp)1*71Fy+FAFHNoc7ya&qIT#{Ddi{Xw&-i+EH zpm2O`H(MPEufU?Pu?aUj`?cNchJN=TCil2lwkX5a&4$ z&$zg6jOS#byRRmhud;WmMB8g(zxbmu+~Z6oi{3UaLS_ViAihTMW7CEt4yget$B~&> z=L+#j8eFsP7q&iP`7$_C$vr$=B43KV>asS(({gO*R~9v#60?e3W;HWxanWwIo%Ga# zw6U_Dc;@bw^a;h9mmo7cGAt}r+Nd9Ap{jR9&d{c6A5p^@J2lp~WGFUEkh;1j=5olS zA`8ZH3V;hdO&oC+(z-S9IwRC7E2 z7~sN?#3$q3Ds9*jQ47n3F|9{ZOp2E_`Dux)*A+W8(`;RO!8u53i%+LvO`U6!kO}YO zR%4)DpsUUrmS@pGrnieLx9yL57X!QtM^{5cI3|ZLZ+nkIGc73{if}gkk=_a`KMBr3 zbl5vyMF{^1nf_Ss2!qpQ1&AQ=z?Xb--bsBZCg>|#ekp~LF9#fTYGQ6maFoXs>G(kg z%>IiO-8aj97;m-PykgFl1?>yt;^OZa&DWP^B;Q?BNs6mw)5xYh)g!59lyUc7rR)*n zXRT(e@QAzYwo~T0JCVrJ2dqEj{uD2K4oB{LLADAQWxvVWa)m*9ybjnLRZVV+jq(L*L^oH@&cI?5~_I zX^|D0ieJ@0wn3vbbUZf7ndtVHXg1JHPI)bdf;Us!PSQ++)8tCYdB#UgA;-$;rXF}N zCOg{K+h}yaQTpgQ3h-&tVnuX!;z>$hi19R@h*OAy@)vJ8i{Dr6UVYFv9QmbB`rvT+ z+MoGnQk5vacJwarG>ktLH#GQa~h);cd`P5aYcQG^6rL5qGBy{hxfzOgP)r_FVw`*qijV$kTWV4 zQl8DG@YU`_N-0$m$Z_u**$vFif(0yJA9y zY&BJTWM?DO++CN7$IXW*D=iNIOm^Ka_HU|C$!`;r?_DcFX;bYLPt!lhhZVi1{soX2 z<{jIWx5x+J^7#^`dW=&0p8QRmUpT5gCCg4u7m3K_%<-H07DGF!dc1)r-XYl4;f9O8 zZIzxu#KP|=?Xxy-AoZg_PQZKV*ljw^HCIQMz$#m-RZHc$#lyav{_`z+n5FG*adCc; zMFzzE+Fb^>){iW=j9y0!Tw)cz*JdoZlD`j(bX|VZT+qkh}Mpl6sqOecx++ zm3j<1_v#sKVELv+nj>V(6ZEU_^Plm@X$e6cWM@6`dp*D1oF!395x}RSuonYs-yDa= zY?hj>0&3b)bEBMIYI48L7RjJ~D`WE_Oy@cZBgOA!`{shYGBUi|_p6T56V2RXqr!pa zm`mkVJBeB=*?3##`EA)rgr}l}2~b?oJ`Kl+pY>wYIj<>!guC)Gdj*<*h(w_Xphf5H z&vFOFVhP7G8{|dr2JL+ud^6+b?nqn?dpV#-8=r!5)upDw7mgrepG&EG2}=}94S(#G zY&GsKdx$HamsG4Ad<`J@wn~(Im%U>H`Xmoy<-L}eIVdMbgpEz}&M`8NpgtsZ)Wsp4 zi>t|bJ#Ad7Seo5+6A1#SX{-9}y2#$04I!2$-2|#^B`VzzjJ?oZy=y>tF>;IQ_35gu zHDn4d@R9CWn3?Vy()b#z^vsGqtWNAzqFQ=xgSdtucMbn-?g*YcK%EX&Wy}mrbaZsp zKh|#V`_pRHCnm|(IW^MINKBZLB$Du*-7-8AoYIqN&gG4X)lD<~7@ zYik{0)P%eq!N*J7B{*`j{s$3x9%CJz^!tQ#GZo(Q!eT&B`oSU|s>Z%lZZM_`gLU`; zLBPci&7}L0`ORJ^tk%oyV=hClaOq3qc(&(f{=Jm**LeXg4FaqqXQEotJCXi zwETrt?@&%3buS^2zx-zK9Q#kNVVzw zrhstL&WSkry`#X`KWMIZ@_PLZm6<}ga60Mjp=Q{ca~$2YJ`07_tkb>12lsaX*&ps1 z=~yo8VVRW{$7=Xm=}q1r`nFW`lDc+JfL|33F6lh)R%4xxQC`!1=zdF z{5|6LiBT7?8y*iz;!vu5l4M@sY{?S!ZA9Inu$)H>(99pQZtr|Nb#?I{`j(XxH8*CN z7%P84JVnACX+nl1BY`rUJ-knVe=-qGFv_iLJwMv9HrC#c-m*FA6+l3WCiEDB96_#b0tsxQ5^1DPH| zGpaq*+^smcL(#v9xsQi8Kr6CA)!AUm62iKpY$+AN@MH;!p$^=Xtn!cY5LeO|5G;CU)l7_Ms|F#( zDYGCOq89!5JlF~fQ@x`TaZtp=)%TOM*eSu;xK&6UA&NY{&+_C~EK$CAJ|2U3b&OIi zzLzt@VjYNQ3RV1%+|5pxvQPnnJ`ReSgK2*(Xww+FKf5oJUmH4qOQrxHI>8`s@p+!Z zQ9+tpy{#&AWP>ZxL>uOWTnsfOB^4b!Ecg;9B)_h-P7~nx?6C3FztOaj9cI4XRV@2i ztA9W?)C&4r7$)3PHw11Tfb-pol$Y5UQI{Fd3cJ&k<`}##!VHdA)rF+psLy&)u zO!3(ee;V*)$KYg$fw*hQK%PosLeqQkTh(&cPuzXH)VLHkArNr&q)U4>c?r&T0(&3` zxmUW+!$dd3TMkMIR!~SL(u+`-Qvq@4ZNzS$(+Xo*iV8OnsvVKR#JvpGO&k5a5N+=J z#pIajsb7^NH_@|uns0VTrMO+ixZY_a+^@O|yWKFaz>t?|pgpu&%sm3yP|-F~@PZsO zIB$tQ)CNA9U^P3W`_Yse3JDnP)=(IorsZucD!Gy$%?cZN-4zC|Gyu_TUU1RuHo1n6 z?8kiej$iqys@te0#LV|%`4Zd!ndWcRn+cjd53~0C_X@QnKgi%SI&ZNB*ZZV{%>*~U zX3TxvO@wElc2Cf8g(??fg=PhTnGAvjhqLHAwD+nCoh4QFRLntsH-{$?>_z>=MOda1-;Gwp^$JYof@2Hr zE&8}$yxR^GmVo>J4wH6>Z0GIefkC4xUC94^rPV@|M8O7*yv(mL2E@1W&*$R{Ez%QLn3hL|Y7j67|{J#S> zFr)bW(+_?Fad8;@t&jHyaE0pq(M%~4JUkT*&He3d{C>c{u0Z_P6{t-|Zc@@*0ygtc zblAAK8cIs}B_*6(To;Fj--SpJdutgWq&Pfh|c++AIJu58HBhsgdjpPX0#H~0ztGGg%1 z*ID3L2>g`p;PZ2P3yXs|GNE^=Ir4N#RaI3vxzqQz7kPR4I!+9*ayb7}PMfNM0W%BB zj~LE;ZgzN^c@#DLLP-Bx=PL_}*_+taf%>bH5ccyT|!cS_0Frk0jO8eeg+ z7ETswMXV1da=_RLZ3eT%P4)h79HK5RHz41as;Vk_dU}xh93BzTXityhW>4Tn83hLU zPx4+2=c9uIkGtbJJQjma{|{1nN@z#|tp8Mp|95^~!_ZJjZ7ra3ZhqeH{q>1&8W$T| zOi_`AmiFOjhIYCrZ)>((m64HAPfrgCmyz51zN)sC#r2=1{!f?PsHAa|6BF}OQ`+?= z17O8{G+%==O$G+8uC5~^BR%BONq%9|(<(?vQC;2-tY5$Wou0-dxw=1F5t>FI1cOro z=6_E7irC!Ttf8R+bU(`!@aDwA!h(Sr0FP}Q=iLNrK@fg!&(ltyb68@R)RdPfWxPn23vu*BW+{l91@DtKU&V z%^(kqjYVf>K2PNE+1c3@Nq7Gapfoo#!@n#TWJLiAhq`AAh6MnBT=+aSA`l?n0c9Q@MPB5Lv z)%7(^hI=zvcxt`Vlg)2K26hQHN+%wV$syWL@U%rpNCLif1fvzsm;yN4}>CngsBzRpfEd2Eg zvgB}koAp=6?cK=&pppnYYAUFIz{$|@dAg=%VjBPbd-rgv5Dd~bFa`GKdN0_R>gwt} zgr&2xbKpeZ#KgqI#ReEVX(=gtyStq}PnhZ3KP6Hx*1L4;O+rILLZYL`z%w{_4vUJ4 z>J2K&13zK|?Vpuc)xUCb3jVm<{3RY`KAt)7=Z}Q6G|%@x(O}Pg>xKBWv%L)o1tsMB zd~5jJ5Df=a!u>0#uCvqUY`M9qsYy*uZES38b#*l;CS4>FH^9c6KmaPUfqDUU&3$8Q|a4TD;_(uC&JG{XfLLby$|&vo@@BmvkxJ z%}s|$cS=b}H_{!_Eh!z+ARS66jUe6K-QD>up69o}ckg4r$M^5&pXz;G*P1nR&N;JY zW@WW}|85CRuj^hsYA7qmW@G>-;`#LGO+b*6FPp^9#^&zniC$(5`=p_w0>wWwKQF+^ ziH$(DKLwDI>7T8P_gh?CtgdE%{rWY~`Q;DKlCoQA53Lvy7ZZCS-?m~09LcA}UpLWf zgOP}>B|SYo02?21A(T)K$d`+)b6dZD{Q`K5wg9v2&tW#0S6FBUuxVo>gWt70IT@da zxkB%^|JMkN9+-b{{bp|jV8RbeoMUubTU%;sY6l00kCdRKj$#~Rx?gNhxXSo1*0BYR9UdN@o#hh{Ain0?Ux4zH zgZW2#EzsXE0cym?#@gH4QxLxs5%J5+^jT>SVrOSZe=UT9hW2O0?&D^d@O(emy3d4M zTwHOFnPTVX=Pw|iEG*m~Zk;VGENpF=k6ThxQ!z8NffktdC$|ABc%)!v_E~OyxvBxb zf^3ekuK159mD352i17CEA{B76J6-?#l|f@LjT@b(bMd$5hYug59Wh>zC#R%nsi>%E zXk4vygtQc$TD1AYjuk3?va}?B5G;P#ZPJPVR=?S+s;*8$Rdue#SD4jv`6?kH0R;sm zI5?Q1G0M}^vjuEF4Glc;Qp?^3Kg{XWzwbr}9hj1szrex9zP`E9($p*}D*DWcfQ)=J zQzNOb@7syD=;7h8z^v!#+JdRZ&v4Z*O3)M=j+_>BKN_a6+D!*5*SfJyXjkZc13YySu<8 zA|W9Gv-%80NJJzlE*@0`nXfc}Mn@mopQ!;#pU9%`bA4<8U`JnHzdnR4=3hcWfal@q zSyEEc(cUg6FE1!4C^_E_9D*Nacvu*TpvO6IUBJDABn{wns$~W6w6dO_cX_poC(!%l z0A+xQe3^D$egYZ~R`ubhbqc>LGXw&u+E{IH+-mL5OF?)+0WZ$X#f6JK(o$qX4|D4I z&p4Mqall8LTUtV`#3v+BUJ@FZnJs}|gnpr*qM~YOXvojc2g?3wsoBQLD((7r7}={V zpyU8E)YR2+ad5yV!otIU(9!}}7KtyXtgW4zl%(sc* z^o+wigXfI}SSs)*B(Gk@r=-k`jftkpypSSeU|;~RL`5CAzd36Yyxts01Ei9|5;Ys) zoey+pV#IQrk=Sp>%D2n@K+}=&m?_cfr6eU?w}-OsXZ%vlR9EG!HP5(g#^6w%(EjkO23){3J6 zKoax7qKk`*8yg$*@+fI&Xuz)0e#^>Q3Z)JMU;;47%gd|1y}gqSZ(?F1BO^l+r4-sv zj{P4cG61Ij`Gcvh?m}p|AlNYgL_qPtwrFc>D=RCrc1=&abuOHrZ4E6q&rM8R)G#A# zezLZP*~?Hd27gRVq5P;H1R*c9xw5}UiKM_Q@_fb&nw*-#tN&J8dk!!K?vKtqSjscs zcf_EjrG0^}uv|)@9DwF`pZ|=y1)R%z2(}ggp??1S$qihx*EMUEgp#bRl~@x4&|j;m z@|5Ic7+BbsX>|1TP{?0ac-Vmi3=Iv*RU>&}qB^Wd}Lk6UUv-KL$wLs9g${QmvBww9B}%G|;NNVd%#X0TwQL<72vazNz8{mk(2 z3oBf*SFb^^(31x{ANc+IcXA2}8*A%wpl;j4WQd4}K<6-jLx2&rMIG!;lmL3s*4EZZ z*tYc4dvv_g@fOt?^$6xq3ySn<2%FYg^NB8@b}BIV=q4CG!lOoJ}%uvhi)jg%@WYtq@^gqU^uD)g-61RWzf<v4K?uRa%WF1}3Y$6knIsNGRn%~RHO91j1zQi)6d1Pv z;1GaL11=@nJ@q2E&S68wj$yXRwE{?|p@FwbqVllhk?IvpQLri{bseEk2+$a892`H# z;iaV~5VIvQ>&^m1I9zD@psfu+5CI7ZYUtB)VAPi{!Or0^!cf4@)>)wOvlA010d-Df z)FP*(?Cj_ONcK)%9`*i{nDa+zX=#Ag&d!jksAZVf$)E`fJB+R*7~09%+57&w3Rqfr zYv^RivRI}Fz>beVOVYSMHQTND2L*M7R)~wfS$hszjs2kxU$-9IKRn3F%7#o5pI=^v zhlT?4t*owQ)^FBPP&fp!8i)b_3-fh`g;wjJ14FG^6ajSsY%Vo*7Wf=MrNph_$*HN~ z)Z#edUw#Y+#_Rk22Hx1=InYX>1=O@KKfj^A9%$H0#WlaYi{QQkjZBY=4pb~NQ z^z{L~57?6+B7RTG?(%G{k0=1`bvr@?q4BN&s{%BcgN;oY5OY@6(0{Riqc4eK5jKFA zW(fmd3AGT6I${|GVWhb6%Zf_gaOb|MC7Fu<*>{sF-A|tbKrL) z{{pkWukY;qe0*di_Ul(&z&%4kx|Oz|lOBPY1LP$FRUdG0z#6cGIj=5wEaS1B)!`fA z7ewHwc6N3EsR!{ipw1BRu}n+{KW*kDoGks``hea=5M4rNUJs@7)_C1G7#J8BR$u}z zcwGMk>?0{EMZ^LlVF3h&4Erf>^8?8pr4=V03mrN8R+#7O87$Q zg!&->FE8YCuc@x?GE;5x?%g}UGzgSQ@CgVIbt>!Y@%vE@Lo2OTrrtu*$zM(!yFz8E zk&uuKW$?p~#{g{1$RN1~aWa3FkT*HVR(Qca8^qMJ&AQeHA>$z-AXHXW0pK4Q8v__I z|L2csC(+8G@YUDY7TABK{HPH6BMii3H#awcE^iHGWdU~U=l7DB3iFiY8BT>^NTXTk z>4C)(-Xh`O`-0D0Rsrg#$Hc_M!cqYo4xmhRpO)TX-fMj$REYg-d0n`@y=`u8{^H9t z=RNWc(DFLl1*oJ7y3sY%(~4)kYnB97^zkFev}`6aeGn59YlX$u(UWe$d@-f&k zixOYw7#)mu3J2;PCZVQ=Ei5byXw2*ntJi`aYG6HcbF#On>{QQbfygfb0nhoEBt|VL z=2Vb$T3=rW4h}>giRxUSMO@m0EmO=)sM-t0O6m!B#^L&$L-D8R!MO&h)usHBz%@!r@J<241@a@VxA;8ArN{tQ;Ca@WXfl0n)6Pm}7v}*@}%IoOb8>P(;9Kcpw+j95mbWAU7G>1^ zIV_+x&ZJV5g%RfH)um8-yu&@9OX&CvRCHb;}gbD>*#?HY( zP{;5yqk6V_;2V~K)!df>>YFa$uA-{?%BRNI;RRS@r+!&52yPX#geFEup)^Pl0z>`$ zL;%k+-c0}kKGgv5{KKRVmyj?Ocz|Etceu^`nJ~|fJ<%vJkdg}h_U#P^3_A?SYiejr zU0%AV(B0etqq@0syE)AVLGCaD;5s~gPlI z!;SG-v6<6zati!`CEv3FrrO!rsYE*tcn<*Y70>m*|2Vk#x2TF4@dKq&1Xa4M1*SU9 z_OZcPcPTum6Vk+ac~wuLaDr**#c|539w;|kd6o41NQ8HkP^X&9Uyj3^2*v;2S_J_ zU<&T$W$O>O*;)$#VtFlOPjIx_AdLWGMlziGmjBp9d$Jr*3U<@v)KvebZ+W2O?m6_o`_^`{f1 zD43O66|6auKnc|#QvZ4Nv4Cv;D{~__@V+%5L<6~X5a<18iO&zd28+di1(N*#wSwm# z`aJ)O7~228kGv+z`Nr7UcTv6zu}Nwlzu$~)X~*vXUr)dRz81gbc^%~NY8@w0n|Ytj z_`X+{h{5t|O>>!3b#KYJK^XfzZH;oB6->{9BsTbJsS>9zn#Z)RA~L* zPqP2A-+w>+MluDKF~K<5Wa`x#bu%Gui-KG~Vr>KEAB7OA-1Of^*-?rP=3W*a2Cr;8 z(_gXvvxh)Ze^{Pn*aHXwx0v$a!GKO}a*G%$V(f;G2^lhkz$H7~MtfEN9E%wEqyFb_ zzX$3b8Ahyx>MM$kvwOH+(Df*8Ilb)bNfe`1`z^;^O-ubGRG-G~f9>hYr_h|7yQcb` zM{)>b@8p7D1ZhO7`>+se*(Hlxv;YRMvH!W>*F=U+5NkP<%gH8p`NR6sbCCuknL67K z-JR=PZ%Ea3Zb$#l4EOA2lVL?i?pp`Qup`I7+FKyHY+gpUIZ+VFw)mg>Q@>NWsCpc7 z8m~y_q4reTybt0pUqpn=bbfDMovIS?37*}hc~1fz$B{*_NB=id$bP1mqRKaT1S-jx zMKCRlxa-H6fr=NiJ&v#R|7}6aSYm-J$k1 z4?q86!U82DtLvlbL}B*E4uSN~XlG-t2t>PTRz_auo%P2H23!ZWk+A_y-@k&w`5&dw zLc|?&e&n#eq6#8x1RmU^Ui&{ zXj{X)fkxergPFU6vFCmtG=E@Q4r2E8=aG?!uxIIiU^iKT~jW?e$LdHy>-+9UFiL?1JaZm*u3#g zubRoq*_D13b$ZSPOOlGt-IkB7soDR4(El*u7P4mbj{9XtLk{!gItw`5S0x+lf)z}L z*?*2RIiskZ?^-t~&26&8Wfd4aCi%&0np@iR%dRe%kgo{bsV3fxCgOP;o#~8-PWsUO zpbTLscj2o;p`}V)`=+O9A#sxC6cG;;QGsP6e-Of_6NaDRQ;zG`N2NqCfdK! zQ3W_HSXdboJf~BLr2iMqT(yEQmZP{I8th__q^M?nP{Q3d&2@2tg{Ok+avx{?&KSRL zgF2!fPTsGk2E`uzPKG0t#l_K|xm?Gmhn0*pg!5%T;<#>~{1A1u^VZ`UJX*$^z<#c< z8Ag;QFGwpk#m$|$%NEc>`AyA?+G{AGXg zR6F+1Iq^6;WP2r;wR}6SFY=0_&?1rhY$+kdDV+ZWiY;Uk7FGze_Y`(R?|s|p`L#4 z$nxU8;=g@6SmgDK1636(m-Hsv{W}3lnm=p7JVdC8YsX5Eeui>X4?C&PnQ^v{;t|{7 zr8(prT7Tbf2?UJoAsi*8t&z$&jw&OjA0zbPyR8YZenKF~^aXNlanaPsI~e zpV&hGjEa%c@2xbGu(J-082KZR1WA62a6EcaV9N#egt=e5-P=4Fh~V7@6RQlj9(uzR zJK{mk^^5L$USX5AK!ev}58cDXlHK=78j0?PraNB2Xhivvv~LqD&v@xo7CfUo3$Jl} z_DR49mmqWEMX$uA?10JsbL-RD-%J|38Wok310RgnM7B)7jqj-H(v92Sa=nR*ias4y zRbu(QlB&eJDlj0=5ojs!Y7-7kftrCTpF>@Fp3b6S3w`Dib~kPbc< z$@C{Xu_QR=9_6%{D>=nRSgM(N=Z==i*1igXDM$m&8Z^fx23Fy+Boi$2PTf?B>qna+&lDkE5%m@7 zP?w2 zFf;nlNIiQhYdOTYbS{zl9ycRegaLo|6^&Hu*xZJ{*J zzGii(Cxm*6sGjFeiuDiv#C5dKbd_W10OyIi#tuEmwD#?t5rH_`_t`?~jaLViT=RNH zN7;AfCtsIu(Z`iyhEX@*$18gkVp**R2z8J0FgqqW|1>TvHf~hAclPYdm;Qs#)LDeo zL}j@&Pif;ATl53cK@y^nbXK_@yxs=3TdNoYUg~;6uIK7peS6KD=B9X!ygL2Bi2OxA z@%?pLc85!6`a|)ZAd1bW+<#qo@E-{ULKKgce7jXU)?ybK#}wMXv}QI+Tt9p^txCN& zrZeBqtKqZLBf3*~>Qb~+ra!iszs1I#%hDQ@2@-ca8bhQdcYaSTVK2Y@Dk#W0ftC|> zE`1{nElb!{TK;ibPr7QLFtf93gUf#_hBG4m9tE3rP&B$dw#phV>4DxMUMHIAWBwb8 zHNKCp+1KyfcvDi!CAeK!Qhpo<$_~SP`i$^QH|0z*wA5$#-a&^b|b=nn&T-?(<~Z!rGtvh;T+wkR}&T0Z~5j6X-FRny0e=8phQrr!f#C~9$Trhp)B;G z;5Fiu5i;t=twej}+(O0N3|~3%?kEkiqFT@@-!EbHnrRXc^z@e zj?%;$KNRG8L`%zsV>i`&f6EK>`-fORj0MwDkU4O0kR0~Po{r}JRnoMClb|+DOj%W# zn)iygaG}0Osl5yR`T3JP#TLeg zmb;?!>{syUfA?eKv?=JQdph1plX;B`FtL4tYLSQg$G6J8g!G49)K`i+Jn~U7MI
    |E!^TU!U)bRxihCMkPT+ca%)?hBaRDtPiP)3JflSNf=9HvQ1#5|Kn-lN^j5P-HkQH!YH{8zhHCKA#p3swXV@`!KfJ2w-cSpAqs)a22llCr%gNj`n z!W@l`m-h#rlPhF96#PsZ)N_Y;dQ6|<6M3}xTT0KZ_0jP%_ACn(Gx)uCnwxFR-#Vb4 za_Xs(yavr{7rl!+L1nqo@vb@_y|gyX6?fjm$Bq;W={*MW>QD3icJd)zJ;b@~XXqU0 zSZYgQ9pPn6W+fa9HK9@ehZV-_qwZxDm#^eeT9q>Jd zAlz~-zy=*@rEEcrk1B54?B^2_&}61Z-d8z0g3NZ=l9a~;M#TU zvoS+XP0L9tJf>IQu~x_Q8@`88L>D>L)ai}!{oAfn4!a%b^%_jUJvR)8b5FC{6 zgJx>E^);I0tDxI{FP;8ht@VMF zI;vo2lbOxjzv0i|*?oy2Bw`nl;oY9X>13&A{}q%jS!-estP&j(I78!*rZm?jB9YX; z4(2o)WGKE+`!GaDX@;e`@U%ajzU!qPL&!nGp*kC>RF-VS(j^f_zBXr+jFB}J$Y9yi zI`Po-0c11G@0lz$ldZfjK40eybk`Ce%zMip(wAYU3rgxJaDRiMle&Rr=`a7J#gh~r z60M=5cjWE?MH?TOSIy3D!vYMbaidnmT#V~995o3yJC~Bd@uIl6rsY6&A-6>AdNcHs zvO8|NZW8f+nX9Trhd(cIznrY`55lE1sYRq?B7l? zPWO&jVQ9x7sz(`mbw*`;V80`#+F!jipb&rhEtSGBBo}JL6X~dsHiu6)%1-! zJO)H3=RQu!MpW)b9~yw%8jErVV>ClfM!(5f{q*BcRMuvYR{_z3no@_qPWK!Vk@}Hf{=xco8gVSPov9EL zp`c?jVIrqlpui$e0F*pS_P$=+`*Cb5Y6iyQ-2@ohz#f!(%G?W-b^JEuLH&z3A{2kZ zoNPigkYuZhsF=V=O4&5tG2RI2qqXi;khSOh8yRjNLgehtvaRC+JT7mSF@CbqTuOpXzDrd`Td#%e zBcgjHnv>ZLN%3<*8Vdv>Hxre+HwVyXW~UX+>nH40a2C{>81}YpQhm$vN68n92JeDIA^CF@5IKuVRrgJZ zgLhKy?liVTX3I83Z;3`MK-`hoj#RB9uAp;k7ppX3eSZ#eFB4nm(6#aN6iZgIF~}H8 zzN+{7#cS03Z3Ac)$vh)!I-Pla-o{(rTVqN*&J`I;rY{3jLoWiiVl>8uSRDYxt9NuV{hBdYIztEtES|hj-h|jj(#{&P}81qvRw57SA z4ytTw{O?cU9ENO+L=|aEx*V=sWiJeWAd#K=<7|$6>@gR|$&|!^Qzg;qy%@xa@b8Bj zMK&o44ge_(lKk9n;q8Xep`I=vYg_v7m*$b&ax zTb%3EO~hLD5`XDvzD_fg)JvHW@#6%=l&bnzzO;y~fGS!^2RDCAJxcV><*8l#Zfm5T zE-^NIP6IsWR6*$CWtl9d)^Q;Q>FgpZHEE5@#)HwEc<9y{EsMI6K$UIBfw zV>5%IUkX-mGCS%zI4hrEiLtsi(2$TVhK)6G*>>h0p#ygB zipfr_Xg|^cI12!9rt1Cr2q&$pX)`rU-`3KAqdc!o)?{YNGFf93!zh>XoubHJS2^KQ z>c%Z9degPQNdg8|eA_gRgecT!_vURP5`EVt0l>jm2Lc&2!jc5ml8<~cQ(5y$Iw zJglsBbGF;KNg6Busp-z=7CUWJjt{W7`npKm`elcS;qQ#!g0;#=Ip~zfieX*M@#}+m zQiWoKsedOxi*jX1+~=gtP%sqw-;&im{n7NKBD3T~>R>TR|0+?cPA2ZCT{+X-a9*@y zPKXw3!mr5ieV)&Eb9~YS%W8-R-UQQw&1~+-wD4{032`O;8FG^v@un&X-2Ku=UeBbqIAA?_ajGg~Y*j<)O!N(F{bSpWwgTfFuT4~n4Q#d@L*}aE z`thqR#_;i8PfdV0V2Gi%rS?lkI*NGLAVa;9*Bz|9>`AsxCK zOdNDdV+95?T_!Ah?ssMopPCLNHixl{fl}M7xqi_vsX|c_O1r_E6s|*Jt9bX+;%_0u zP1{d|z1nevxLXSg?FvC~jkxbdf3&}=StEDSL)T#O|}=iYZk->p{_>#a-rLN2WWv6{tYEvZ*ei6=b33nk+3W+cE5^2|D-fR7K7o3c?G4w_G-33!rlUu`)$YXq zBlWW2gNDTJF4NJRLNb~Ose){a1}bbFS$>~p51(-MVvs0>@G`VcEe*Ey&9=?`rJ=*W zVOLT0co(_hje4VMhNG61>{igZF)8kztfSQEwbAByq}l$BL-Z9L{qS5{-F3Qtzj+7M z@~N!oy`E4*VX>RcPN4rSD)i7+PmIGGSmO!53jckQw(q~Kbws^8&f=b&bspQlPbWvd z5Uxz%?x_sW(0MCn3uT)Fj_-m1V7tT;71xEQAO-0atm>zJnf{Q^s}e6MK~;I@pB-IN z68}dlXra$EF1M$svqFfC+jBiJO=ulCs^RY<4IX=JF5A2c?n}`>&hUPH(mr->FDVjH z#5?F{8?KI1_L7*3i5IF&|29TQv~?P+gau5+*pu3!yr#>b$l)4Su$nT|Go92T$FkBW zU9615FPRLDU`mIWUCFm#z1yUb_y+(DWlC1IDoem2=9nL>f~aaue>#rHZvc)hvejxH|tPQ=W7Kj3ErF`m8?86b~DZd-b91Xa*;u?qTcbY(yx*drm;q#rWje*tbH(j!Ib>eXeR0 z`nx~hf(OD5XXo(j0*niyVBGBr^>gyE)`l|L9ulUY8q8F}!&1gjC%LX+_RGw^ z=K+!LidtHn93tfY#2jU0j-vWvr7!Xp(Fyv_^B)w7R!0>_XEXrcp$Z&g3 zj>eo)uXl(3)xr7su1F_Dy$v(sD=Tc}%q-S1y>2aLFNk(@|UjH0f zJXu@bqDRIz&_3PXks$I93`?gy)0NG$y~WK(e_d%I1L~phmm)VQ@ygDE=}u;Zd9=r1 zLW>tr^O=_RCfnXKTy+JdT0AKY67Z723rP!=x3z1bXtb(=;LPh< zeLWw9Q@;+i-PEYnI)r<{EojYy8|UPDaE!drSLyCK6e%>VgRQ+Za<|9^nagsxBMc*1 zAlOC&L~e7x)W0&cHfFj#0g<2UNFOBPpNz-v<`eZeHIzOcw!_M*mkH*XV5y-9^K&si zpZh_=8N4nOSu#ah)(xd7!Tb86pZ(d0hvoSB(*sNqFa1asppc+?_@4vMCu2^kYi(Tt zd*iy=do3Hu!f*Grh!e2Jyt5>L37LJb3KJ*I4Py)jK+VR2H)pCYj2b^dU*er zzxSqDt8^qvsNJmLC3-0fF^eU3H+~TrbI7@A>{yOl1Ny#?BPFALN3jJVP5MHVKR)P5 zE^X!qltrcXC&K1n?QLY%Q(-i_W+koMuV=58Huoq#*g)>EK zrs}TswttDn7s6HX3hzTFcR)&Pl!nukcT(r>e!BpAL}3z|Z8A-3SEr-Y+~ivY%RYyb zF0XyLk0OgV{0aS7uk54h=SM?_Tlv}dwgys|Xl)4N)S(lY8KAFv*NXd7#T~D=WEK>o zhAaj{y)L$Y-nw8?J79-@W$k+D}TRa#86Pq++fk<8Gm+?mTPf z(~~guL3Ho_d_Fth-kaP*iTA^X!@SaXzXUzAA=_ZtbPIRc+}*d170hDPGoIR^aNy>Q z6xd&Y-j6IVT`eFh#Eh-#QD~p&?9THLOnhaUC#F2~!Ew2NqTANx@Z}}7Yo3_a-w|xN zvyIn53FKL1Or@vFOFa?+WcvAgSvvAX2UdfBrJEbVz*JwY0@BxM3khkeSR`2mO&Xsw ziJ81cFWl`+#dMTqL|&EC4xQ$V-{zt->ia{uj5n}hr*#uQ=H?oF@M!0R(0zhZ`>S~H zGkAc^mIaAu>4$&R(@P<6(*()p>IW20T3_h@ehzL|O3rSG&Yo#MCI&75UUW$T@qaSF3;-yr`)&5Q-Ff1PVPD|Sgdu`p>s3BdF9%?kT6Zs(8uNnt9 zreMqBNM$8E>eB{g9iQSS)u0}b)UvqYhJiDI%_lUonmi_l9rZkbh+LLU3q|?Mi)+#zW z^49+8!BeX2sE;B!e@tI$I}GYZk73ybcxT&L44H^&(JHC6Bk9GaTzr&CoD;`1rAK+P zI7>cdRAC4$K*(4K9V)B8W4CDCvb0eMXID1d?7b5+<@`)HL)KX0M>zUv3W-_f=#!9N zAA(RF%JiHQm}BS{`Wqs?cW@(v+UNTgmFT|qr^A?IsI?HI_xX6NQ6P`U+jpNc{1OxG zXHcgCdxd?=lC>>EpOo+qS9WnUPEv|INgzA11_s$nZ-%?|ba#JA?!{nMkS#rznVf(c zELLgG&%$d0hCI|9YScQ{Xb$P1EyRY`YRhoH@B1 z4;vP$mW`aris&%)>55|)tuTuBO`n!NIvHME*?&22;DhfH?@ySXamZXD2vv<6kFmd6 zI9>%wc!u&y9E9zUULj&vzO9x4ok# zgtoOYEo>XMb;D-WwBliOc!&8(H2!3t`>#wt5ud+GR8S-IzsMv zcX}i>L}#{^R6F9}K21zmkDHGh{xFmN%G**J+|}iw^6NaoF?!AzRdo&#M-|+7>OXX> z93y|X^i-f6YEkIDvvcQ1+B>c5a?}JOT9A_SWoKVwCwEvm4iU!vPZ?P^=qUy)G*1R4$Iypx3{x6XEevyzAi6(6hGUkwR^=J&|pM|lLpU=}!p;ZudgS)j+ zy~n3s+T?3pE#s&#-1Pkj#(^7!I>)Y+t$>j*Av>xF5mQx>kv1KHsi%{pg6z|A&UL}g z+Yxq^#*uoxO00b8!)0xl_1y7A@huHH6@w0}P zWTC^_byS&DqZ7WDrhlt3v{s{KAAI;pwj-*~Ht=bap*l{HC2tl_*^M%@ zbb2Mi+QDTnIPv0Y9oonQWUNUs{-0p7qmk`;jjya$_c_fOOSYRmxmGsvvVWdASWkR; z{R;;p1u}eaJJdD48+@v$f0c9v?sB@qcxrD!biQJ;eLQZkQ4^};GHDW0+}{rB!2d_T zcUKy+WT(`rPRtH+bRHIGjuKov$l}wn z(JqQ!zC58Sz8R=r<-^;^FI6*d%*P|0y-VHOCHhS-Js{=bCKve?DyU#3waIHKwuW1+ zT<7adjk4G8Q_+_8^LE7JV~(zIJ09|!biIajvEn}y}P*Vc=;t>(WL(9xq4L+CCoKty@z1G!zvI&%wzhPer z;F1zJvm|~E($QZ0;QE!9=cJ=|kmxSQxKh&Hup>Q*Ui(#nE|bSoatWFHI(ypxbQlke znfjhN5iQ41gyw0Q@1u89zaMm_{UtN3lR;dyHo99?)w83}C0H>-huq4swR4qS*ldfY z2b*71_35LIk*&0&WkPgtKcAjQzf;1pEox239~VU(RrqFs#0+esa@Vc&U#nHGwg!uY zf1oiB-1NCEjL)ZcHfPhMexZU9iz4?1AXt*j{I2?u#f$e1}Et4^~? zXlh?;wm3Od{%+Jr_Z=LK+R}k?3{!M|^BuZ>!u6Aon|9yi>_z^~+8n$F6E@bId+!@Q z#2Z6Xo0y0$dGTWL8n@(>#7yUCPmi0P#;+x~NDdssHT}syR*%>6@(_j??zoxaHL2UV z1N`jahE_XV_56?r($ecOC^GguHA0X3KW_4x@8r3f_I;l)JTt2rbJGKJNWK5)7I{C4ff@TGxZ$>R%S{||LR z+A8C|lQ}dlW$9fVpxx_3e_1wl5hD;`)?3JW1t}lY1<$w?`;MMLR%W2Pj$|&0CRU zguslX$ENQZd~#nK?Rr&h22v=3_EBg<_>Z+(ngj&c);HH}?|<;`&i?UGOlQ>X(4+cn zg}tJpbNYX`xC>Q8LkUU7HnQ%;iRz%Zg5an zJ)gVwtU!$&!ZFeypxZszArPQGFKbKPWnvq;SYWs=vUIhAS>H*etckgavP0#7o0)>W|N4%C|w7 z;ibZmvKl=8#ztD3k-#HBLdRL%-pG=jnY`Z|nS&SUw`Lm^8!aC{c>_|iiEVn?eIshf zVe`3cQ9pgDqPz?zZ$luQ>;SXR41KH;Jg-v-2Kk(%=Y*Vg=`le`5Bo7?Y9^MWH#9$Q zqI;D3J0#Yh{--^aZu}cK%Og&N8OMnniv8(cjG3v69OtKK3g`B}8nM?c2_MpJl8&_R zre~)2cD3dL6AIjy^twbtaMY_Q^9hhg^WK>0<)*|$dbJg^QBXau7+?~fJB>3^&E=(% z{W+V}GI%+~$CQ-ZTXpn-}2ny6`U9eP4w z(#A?cJ6l5pUM-2hs+9zG-u%9`t7C2n9&=Amrl9vqX1T64Bkgej$M~NR7=eiqH6`cs zq3?Y%vX!G_Dgwn$r~ZraNjHHsi3NEvcpltpZ4J3ariWOn(3!6hkhv%&B{c<`eGmQr z)|^ijv$esc?$T@|DCmxJc=%B?v-inIR{yl)Bv%tAYAVd%;P$WKp}Wp6CaBt7Xk#lS zOrNdamkaTfu`=Z_`m1!`bU5#6ZDf8rDqNCTD~ufOWi3C!tQW(ZP3x*~mucfg>_Clm zc1&bMUXl&ER6%1_a`JR6g(MiS09uDUJVO>HsM86_{TRG5($n!U(}?2X!rPR{h(b4s zb=-BxnaP=1xXO0*7t8NK>9!JDU$iV!Xyibc9HTVJO>j8=fRiSi_o7trQ#srl8F=t)^&7MKts6LJ}P4~9KxHvB7)`qrKBYjK$eT!WPHlb zjoMHLF)QfYksitylb5VH7(uL%@G3$mtO_QTv8{eOcf%xsZ#ggv(=)C#id0^B`US z87p_Qn_Y*14>R>*Zli+Yx{Hk-b8~WZ6e43!os!H9AF8|4UnVr<%NB3zvg34KM|3R- z<4OOJlG1Zx;VYVMb>?J>H?>CwE)IH*5`jz5s1s%HmSfQ%j$<1o+!wYB_NOwE4xzFU zqpFEX@4XGJkBltbu6n-T)C8*U*WU_IHa!ijeToPx4F>J1-!S}DWE2$8h%t5kg09M~ zogFYrM6UE8Gsf~7e(iZYT7Gh`<@D4#S%+*8iqo~tBOwIz^o>kS`Lbqyw(705=(xni zjo4UQ`_SO90eak8gEm1U@LGQGkMdsG?aKbNi#z^OKU(?=SC?UKkIjrwFJcaww`RNy zys2x`W9zeISIY%%$OL-Jwra8of($YIhKmtK_iD7Y-_k$ad>&i>vt9OFm1sR4PDCt2 z-JzYBh82aY?42oXJwCr&*oaa@*5Pti)VBFXKJ0MIFG4;`$SB&v0FHH>JbiPvVw`os z_!Tto?wzG3Mj`#*cY{Bj?p9PKP1d6^SBu7xNI|)f<>toJ8p1(E7y}84jz?TsSX2r+ zTBwnWc6e^=paGkU9sCC0DWgylr=faqJ_^k=hVsN#-sS9rpX1lfoOii+85aceIBz5h zK0CZ?pSy>5sL8&uPU|JEuXoG4nrUeYc2=D?Uilk*_p9mn<;V$)67R}epQFuN* zAASbcy1AyfpRZ&0BV!BXAfjW{_gAKQc3vl?k9uY%ezxivsE9;!5N?W#|KdJ*%3<3R zAYx$R0M`bT88aCnX^PPz5Z;d?W%c-pIiC?Ju3$=%}c$05J9@)b>k97WcJI>?bTg zpmCq=JSZ-3cYcn>zE1AcInS&i(ij?B`}5 zgECDDt=p708vZ=OM!0eNO$kxYo{U0LS)^yN23FS&lZN4q+}8p966NTe%8`v@OvQ%r zKQMVI)Nd|CJPh*9UygLy`B-f4Sg)US@HegD8#}XaZ%ms~_9iXXyC2MOZ|YD&l^s5p za--Fw6=lZJ<0Mvp9;P$)12+(^AVBLxZ#S7C1krhWpOe1`jELC$fNGA%u6UawlEtSQ zU;q3h%%BRZKeVXmwm(|sUY`g}E`VozsI02OK;D??=<9o*=5XVKuu_=O79; zFgTkk=p&H0eO+v5lQ`%8(p`vq{RBz*B^@INzd!_0h}p6+VU$_`oyB{1N72YAo7Gtj zrP)W@FCmtla+_}~`unw}3;5_r!(T)jIwYru6Li-rbW%`~B3m5$(^1!OHz%eMh>>|O zooLw;WRemmq?A?!{Xmza$s|gN4B_iBdG|Hw^=>wHf(J13>Cc|ku^D%RH-I!26_aUh z)qtNKcmc5q9-nO&4$H$;75R|s&dXiE0x;!e3*Fo!6A%EwIo6rK%;!^&mk-D}0NjLaaF51}-kc_?@>*7&iNbZjS|R_aHRJjZ#g+T0)QPEI%Ok$Wyc z#{Ca^E$au3dqjxpr=rY#mnr75PY)&3Aa@@3{@)wVS@8ecBNsAa(fRhsAWL9;@w3hS z-@~(OP^z$jI1X33j&O*O1B!-7&Yc4$#PJyp{wpCMGQRL~+>tz|u*nY_d7PjK{`oCB zq}ER53oeeqSwt^n3uvsu)+^kI=F}%+`Y`nYv%};1C`x)`1_ltYECoja)&dPRwPb#~ zUE8IqxOmUhl+$25!%EK0AvAH<$qNw~Yl5BW>uFX#%G{9I$`X=EzFNM(`6Z34y*|>jOGx}mXp_Nn8`O(8Nc>FCi z5U&(&ist-S4CZKR`fZ2+>U}uuMI)q|swC$1R`iXCO9N8+^;_qY%e*$sN}zU(Gk{eg zl?#=v_qyjOoO$E}U<*W-cyNGQ^>}v)faHK-d|<9w5hS!>RzE+pSYJZ^+iSXK_?@XL z@&3H1*!I)a6`~JcAhgco+6F)$D1Ur#=t1sDFFcqNZ5R zgRWzz*PTEVMVUD{CIHo>P&Aw{EG%qg;Z_S^UEt#4N^ZY?sjmL_^*dBeO-)g8@%Xqn zCuirUq#8I-dTN|4EI+tE$by5?=VO0rNLam(kf~m17yBVTEMR63sm;v%dYp7^#LQXx zg1yKoB1k)F)7_hq|E<}TR%4D3By?)NTiE|bcPGRB@9fWHiFau>@~xr$;sc0Mprot4 zZgrFbpaIu^H;TJ@X1A}+ez$GOTMJYiigq@k9856jQtPx|X6mzbymHae(?0)~4H9)B zu3PPMXP3vR7VpXyf|6~;0iL2J=@NQfLE-I3;=N|%FB=RMi2x12tyqa zBTV(~o}71Ys9KPuFJ6C)CcxB5)#rNIqI;bJOezCNE`XYpLwd9Adbx+?I)eb6nVb}5 zO*2^20qQV>hgPN*h5~&tywWMyfu(|l7GK4aS>LFTS=WQi`30V^m}0+!M-o|#GfwYa z%b`=W8E{Q{X8`915Do!&2Ed^bc~^-i2n-Kaa?!qcK;@M=k{a57(g#a!ba53d0jbij zvJNWZ4bhROgf%ooSL59ChMc_I!On-5pM*K1o=e~;yuaXuhD*qipOS?Sn$Sqv*RmS% z@ybm$ERO(9p8eKqVQwZ#{X_&g@z#}4^NkfdzE8<9i%z9lSZ1g>9Jp#VNXU2bpCde@@j!YfF3(x?Jt86!mzmaaWn-hTlSzi7*cdVf7Gk z9-IP_C-LTM`GgXB?c?~zv1ctvs>8sw2lRb#%mAx65+<}??ietZ-C{iR=87us`+36p zo{58l7C@$>Y5bxPU4XU+0OJ4*O;u&(Kd2+~8u|hJ70@G7U_%YykO&9}*7M;%KSI$R zZrt49tTAz5|2(r*(ns#tBwZ`^$;QK{@EUll5U3p@CL@D<RRQ_9!k#;bi zPMCq|$GNu6+t4g{&>ILjAep%xt|~I%gz&Nc2ZUwzWwH^FWaL&ovbv;h=;-3r3S5&TNW|qeqR8r} z2{t1>CTYUvRTYj(+K-~Jv@3B+6FnSKso`Hp$Z`=wxx(2qXk2Jt2?~F$K3Jw@7nAgL zet^)AG*$)S<{MasgC31iO5D8Ww^w@>P^lO0@~4dMMO{` z9H6b$1N_jkGF#xT08p-0qQw)mDKOm976BTFtynILJ4;8+mdjj`LBqQ>#)C~e$v*en zWg_MrF5on&Jvfrv9X)0XL8M@x#&>{tW;XZ&Iz3h2HGwJ}RP#wOI0P(1m5DvJ4Djl!PJrP7EGsH1YJYz}aPbUkHv|6D{X_#faCHjCxPK(-C%d(Dr+x;C z7EDa<-fiiK+`sB^I05Ur-&`_0#F|Wup;X0N@#ngV$!tDo75+vyQf?^imqgoaTAL=> z!ig_8Vkx1EwW^X5X~x9iP1Wh?X?zUU(MPDjmk6gDzxdGYGg)EP7GR%sss1g6#O!c5 zv7KmOSDlKB<`1>Lo2Bx)STr~qCcXPcEz*px4-%)>)kW(z$NKZ+2rFq=LM>8fdrWckQWqC(EobjHrzqa> zEwTj=-7o2w$-Vv|t}ZtVvG*QjPg4qq9EUUC&94A@P-2 zv2Qrzt#~dg_s#ysBaEK0BBxBqqE@M7^w%jQQlJ*w4G%g;zH*oD@n|dhQvUsu3Q`;RoK~sPCLXh#(X;D`tv77#W^P>TQ%~2^g|H}R zSm2~`QaCyvDLjK1*><)VY6}j=ie-A~C@aAL6%~N<`8Iae3WNaIlb*mtd#Gf1W=5X* zmxq@-7VUBg1VyV6Y-vD;wl49uZCR5w5t-SYBMRs>GA}Y+#7UlvJChyx_Ul@|0|Kg_ zY;?D?T%s3pKxbnQ$e#R|+pm*%S^}MCYiLJB)j73yMB;B&75ME5i zeQi8&N*MR7`JOHICP!dw^#0o`9M$K%WS z_%Yb6TwRth+w5BuT+AsH5~U#i{L4-wD|+A^PYEZ@H#RXfH^k9w(wHap zJ2~Z1z7xr&0aV=VQKebgK`Ctyw}aO|C+*o=eO2$G#)h=r4qDVcm&EL4u| z4u)Io^HAo0GT0~vojZ=#*Ae3VH3vJxH+GBf1YT-|`SFNF=7A+KAdz{L(UdYi$?=Ap zovbRlt8UhWY`_1Igd9{W>AOYf0i~{ok57N<;B_X~WWpDktU$l03Xgz4dCDl0tFr+t zAzDS!jIgw>MfbLUuxL1rBW2an**l6ooysq4WSkf2~74|FZZhAEtS#n3` z5jrCZl#m8}N=Nd&Ia`zSgjHK~f!jn43k(v6@ z-_fi`GGt|BOLU_NefL{U?H@rzSNf;lD{}GPVvR*frP-umf64ltUu9sgxwpSyr2LEI z(OpgjL*}PP1iQ_?8aiIQl3+=Mq@hJdz~V6S0IPih<9Q<1EX``;kX--A7PLvmpIlytTNr|^Cgaz0Ft=_Yfu#uBWiAFfpM%&vh?fgz`OS`g z;2TEH>51_b-=;O#Eq*fPlXuWm670!^DNWF8w(Ua9j1_e@a=SEbtM4y=)oI{W1l^#p zI|+LprCHP2DcJ!Mr$fN!r@Tk$fHH(Q}!IlDJ?V>hgff*FOh zx&#u3Pi^)F>$Xc;*9ha~l#3lxVmuu;3X<&25aV7$aZAH46ni9>K)5A)9@cK2sO7au z5(`;Xd+nJ2th99l4weG=TQkY{-Gh^|$Y@#F${+eY2yw63f&X0CjQosr_wen~#M#QW=k( zuGad1+7yNdM{t>-?T7bGS1IPhb^k@;qz#S<}f^X?L5%EbOHr41Xg zkf4F04)Qh@vbxTiR^A=8(6_<(6BEc}##No`$mo*G@9|=A4>Ac-w6N17F|cEPcoJCL z1Vl(G=_aJc;sawy113spR=PstbM1Dm{Y`rOT*B}x7qt%_J!@aJ% zNspGt=zfDc)nBtWA15PI&D;0*CCb2j%r(trFj(V8#?JWbogoX^sjdLq(LVcOw?O9;60AR>4(D`hC*h^R^Nx8jM07Fs5&k57v6R z6w!YFh)$g>*ri4SQ*x-Qqo#GgQ>9;9gL~~C*Z%LPdYYJDd~|p{cH!i5{QxrJa+*9r z>&XD4?q~A-CZig#+V1$t`3U!9v`qyzOF_cHT{*h=cj27drZKBDQW0E1H@lN9z^An} zgS11iP1fMLvZ$t^rXnpWyIzL+?4?vzE!H&9^ypgek)+d@r0vGjEQ=ytFf_st6ewxu za>ZP0&-VKYb}q3uS;cxTB0~D}AU#Q|K54f33}qyA)Ov`t29mSvx>^{P))KX4{irjC za4qMVCJ~SNykKpt>>4?CxSzifyHSSA2U7^`|K&KL#t%?Gf<(TVE2_U*eM!T~wm7y@ zx>o35Zlg8(b|atckn}U^{u&vnRXSf&f#X)ekQT*dKIJI7@jF5F3;h4$mQ-Xw61nGc zc&HkurIwnn^*mMFU8TKx4U`NeUlgs@N&Gm*Ld^IVedO1Hw}7^4od!+55ackV=@`A% zvmE1371!5w0N%|2Nq2QWhg{cHR^Czjl9M(%#le2iXx*X17{+3u)|q=K*!=};@twMb#L&5q6AtIJ>yg?I zy5)U7h4jz=fYLoC!9Wje^QDrwH1D(|qk+R`&Pr3sIHTnR(qmo?M}4*GXA0-{Q+7?+ z87I4$TCoMY8!|S03KWo-igVyEEesyl5mH{Fill>gU>MKCHE zg2D=6RTOBkoya@He;!@Z`lfc+{BkCv5%jqm6$Pcd@x$xJ*J2fK53o^$l^db}6!Os# zW+bChK|l;m(x3jzqar5HIM|LSNvc{86T#-Bs{acbHbs$W??W zlFb{J*c1iNFMLly(amNzWZx(4V#VCbuih?HVbN&lRuB{|qjlm+f|MTQPJv{qL}|yC zmX2);q}=%wRf^X7Ngt&#d0oHMJo5{$kv=~-P5;N_hwf!U;P)z#_onRc$pjfJG^>0a ztRXW$qG^Vppc36Fb&3F0zyN;M^M#>{^rd<){f+U$bIvkB0m|Eg%J)2n|qa71hSfdSh?P z^M?lP6maAij7=dTx&`>nY$5Lzx|i*L#_dCGa!YM;O3ZT31h`Bz5yT$7A10uh1x;SV z&EG1w^gH8u@f{w2ch`=Z;0lIA^VP-Xsky8J|93SxIByU{8{PdrDpY%j*1a?BHv2%F zdh3Fq+_@Syq?%2&%~68McdDq<*(beQKZCIJ7Wr>*3v2`)j3c14L*GD(;&{ySIQ!o zJ}xMlAb|U(r>mRl{T8770U1}~X0?Nan}B1;L+z`=)pGHys-bZ119k_oSQr`yZA2K7 zH+^5{=gyj=@3x9YZbrJ5jVX`6oiHr=ds&8-oQ{$*^QsJ3GE+~RzWdQ#LGmAYK;LR)TX12QKi29f2BQ`oJqe7j{57z|oMZ9g6_ z;4NSz+Sw|1HVe7YKc4XlU?u8&kf8i`oEKS`A%2f@01p+wRN_1!F+Z>OMgZjCz}yD} zNzilDXDC-m)E6(HNR058p;BI{`=(bSHd$}oJU=(pxnqbk{FAlVarmfF9_2b_EGLLw za_BBGQI{Y+oHvFr+sOt2*|Xv}NKM$$-PGP)$_nZxjUDS|JNrhQdV-}gP;<-arEro! z#SJSP@{!lmsGgXgYR`P%7))TDp4YbA$FE*`=*jsxU8+ zt?x36GvE5c>+w?Vuxz7mtp7fn8;UFlU;+Z%`!60#^~WbmWcha(bn3sT$jC4-F#*^i zdHfmvC&+`uf4-o!RnGV1v}8F?M{O}>tCFOKha`xK1x+6nG3!Yi&=?HZ&N( z|}puy35(;`(pI5Y@X6g!;0s8x6BJ4Dx+lAr$d+ay1G(h3Pwk0WA{M z><*Rp$M=jiO|{nDN>2BZzB%C|H?>nAzS`0UUr+jY3rT*tB1b?{?5bGr+oc-Ts=q$N zpZHcCJa(ojk^`rHI>QB~(&OVhSZoKX@#51A88iE@o+>y5lsE*U1cNxCaQ5nIAeHfb z5deh`PL~-3nS$Quz@H_7liS$P0HlY3PwKLV^D-U3gz(A0uFTNcRCzAorhr*5NrKe8 z%)q0--dvCg1Ja%IdU;au{@@ihIQZ6RKfQ*|=$X}(4y~uLCc#Yi zJe-ORp=RNQH$760)iE{wnqrvk9BJsso~FZhD$vzTQ5Axpr=!Ir;o%B#w=`oU-az8> zs^&_*G|iiYrOMJHAGBMbOTzk0G>O64;5d^hh{nL{G1a*Lt zFeNp$D5;F<%+rGf&MPBQpIv^|SZ6iA&mpLJ~nLB3V6gdk>nR?W?hZC;hi zqCFUte%^fZ;`6gE-f?apgF-!Du27NCs#|*v_8->%nW{AxXKA# zBHlDIH8T-BYjI}h>p$^`1ZUkeu3t}`93gYlLX#KYMHHuS?PxY!QaK;-%E}96{`$2z zHz!ShiX#NQiRiK|BI)hw_da!ly}hJ`{~aITIbqwPf+_4V5b~4Z;NTqC1G#hn(+;(? z^fCYZSfJ5Kx{hy2IWb0Gzjc)G+11l50=$Q&3JM9eE#1t?*R!7*W=cD;>L7wl-phlk z=*SMzer=oOq?K>h1Yd;bdK_y~O5{eInKzL^9>jky5h0wxjHzArr>aRH1TX#>sd`*` zNE6I}mbR8BvySFd#Rt?L!H9_j8=x7Xe{c}^MH;HAZC97Y@4ozJzk|Wa^#}~w1A?Y= zC`aiCRM*u2>R6<8ZFMyX5VL4fbs+c?f|#F z2~f)bipsv9o}vj!0kdSJIH{;K>iazkZ|zUvdA?}bHI>;&lAt%=EKmXR-u@u4H&y<5 zAB73|(zQYB-ji2pT46Tg2VEeRrru%Ei z3Xy)NlkWEm1R?5kdL}vu-Xi@fzBK9#E5y!JJpT%Xz3@G2Lq+V5a9^W=a1EjMeyqg_ z32VSo0`L0rn?jOcL7X4O@Ln-S6-7ngE4dUF%e#{L#~@%^V`5^c0EHmur;cBJ|K1B+ zOo(T(^*~!+wcED7N2_MpPRWyB;>~NY_CJjwz)?$lWa&B>6DJZ!vcE%Q!)DNLejXAb zRFe{tM;JK0U{fcKQ)Vq!kMbeq3P`el&!=m#KbI1LWovFYQ`_Cxftj3{tid&3~MoCPQ3oXt5XrvVF&r9k=TeH2J3sq~Z zK^PK%1^WM2<$;z?N|pZs!>7P<)8^~;V!CFXQ0IHs6}|P;)pICpVzL2Tr3<_trSNfS9=2`9#4c`AbgF9w z->4wdgE|<{nF(kKspQK8&xVbpWM7#;sKZ4uiKw);kfdamf}J?Iq>j28R9#C|w9?1K zTX?HY!tbGA=tkc(PgLipPV+t-CM-HfP5ez5c!rk0qN+8LIPb2-xn%CFLSDc6&k+K= zT}sxo1=-8Ag?fk8j!m{Kbk6jzU{}*?g7+%Qta|ivIme4`1?eA%*79jDch8+xsun)h6s{*d_oz1A1b6F!g3nG&R#8Zs}Z6b*W|b_GPvOCcG0&?4>q>bl`3Q*8Owd&@=}4@~vaDs!E!X=?02pljnd0(?G;; z!XKy6kAfr1neZgyn6SeOb~#nZa`w%<@VqQPEgL!V^|jk`B``2lW5;^x&8`o4dh={=T zo|~No%x*9>+NJ%}-`^~-uuJ<%jK!MuYGuVf1Lhb0A0$DDj!vG|k7xC{n;3{;Q}vAF zNczKv*{hgqz&1E9#fKy?rb1jOgIk%*MpK;5pMJ+ia}7x!n*$f`JZk5{eGmn^s2*ps ziFKWAz?kgyk?m$quG#VlIW>JQDGNvcRJXVl1s!CTtEb*$M38fGoHNQN_e6ZUw|&ZB zdymKL#@C|VS&7?}zt|;sV04q7kW6Nsz`*ur5Z(Dt4Ksq$p|*GPbq#|LiOEU?2Sr|f z3I6tcvlkQb-iYLANb;w3)h0deFy4rn%8e)=$}P0QRg75|CGM%j(wppWf;m}P;!rO3 zC?ZNP$Mr0?rY4!`5?w0SmH0F{%~J#Wfe&lM8l9y*vMyof)1ySZbw0;w{2tCey`_Zs zI10R0s+_cGK94R1nZaqVC@9`?bJzd*ZhViLE_^!uhWL5!biKdkXf9O~H%x2y5qKng z1kmXzT(!Px)zCEbek6WtPFj-~6zscQE%$|VRmo0_HXwgoB@%{2qYcNVGWC*`Weis6 z{fQW^B-k&fAl38x*NJ>cX)G#w^IqdwIT8$Yx zNSrK+d0nmCv_E(mf-2yGN3qTy1%9%ON;18?dV;OK_6T39JO4XJxl?NcC4ikO6dP{8 ztL^oOKP{h72`A>wv!%YqoeT?Rq45a-(|#RLR#di{t)E^J{7XTO*KL4dJBb1mh&hfI zxagshqkWDi8@IU+mOU|1B#;MKPM6;zD4;kpXVP`si&-ol{*YOGK041CI3E&HqFbi_~ zZw06azAId1V%CJZiXW)U>NA*?1sJIY6v}*HAYA-YYzXx51Fe%l-B&_HMnXskF@;^Z zGhOVT-&|L~{F5_6bY!vaNpf?frQR`iBL=PYSV`npOx0(QTbVoj%cSCQhofSrSS>?~ zMyJ4F&U5Kd(d5v0||3$iKg5vvBKD#mLg!qFV-0w4BtZjpRv_QOP zSe)ZgF>IrYo+~#=RokM>sk~mQb}ZN4B921!t^YiUY+Odjo3QiCO9&??CmS2D*B$$X zLJnLo|FRt#*~i0g zQK}U1{iw`Mwl_A6`uayC?bOB*aS0VFZ*e9+gs*8F^YJ_Gb+pH9-xHTNvO8adFhSd5 z;YQTPb(0@<_V(_AC8VSfIbuv-S#Gc|otMorr-h;vu3mp!&vN!0=1cp{iFJTZIw6>6 zk}w}!_MpEGbVL~IPKB3sCs^Pw>uu4h|JU(t;{6TW5bquse?+2dcQ)PMkj6LU=u0vz zV52tZ`^BJ`7>A)mmUGtx+07(wZ$3OAD(obBzG#N)fr1*gIgUK=6wh+EVUXo9r419} zC%i&K!P^Pw4t6|GP)tg2@>YT`9DX&Fn=-b;zER%8Tx5s_D24z6eX**O6XwIFUrfNf z&h*Ni$60+lw1V$IkY%~$qlC6607H(ahmrVMq|>F7Dv;|LZIZc>b>VKc2(elf!-emU zYqNFKAHDfd&1jscunXLBaEAg84NW0GAh-oQh5gYK0JD9%X*YGp@KGzy5N~tfPTfBF*oUax<1J)Er($oI_q7`Xh92E@qNn4X z9j+010SUDpaE4Z$KXUm2|A`Dj9#-OciBYvf(>pQB@2NA~JwBkUy>|C~N~N&BcLH%i zWnxUFLOw`_NtK^3EX_A`zwiDyO(;{FD z{kaB=O>LV;h95v2{P6nE`%Qx{z}hZ*ekpOQWq6&t;wYp;uD4l&90mXma@J9h#nq+zovv?Y-gq_iOh@m4XPZt+nwf;i-ySb3e?9g_Gtz1}0B#uCN6H zV_gmUFH=#fFNsfV7cftZVAJpEJWF*FWTvW663DJd;yXC$!s#4Krm&0#K7>oEwn4$A?CFo6(bu{RrvD=n>@ zmD{j@6?NV_ve;7g#T-ZZ1OCRvLr+}Qv}r7Twr_Uy;$qW9by(aKO5D^i=uo&RTLyMa z(@*{W0`!}iRYi$YRh?~|`4ooo$?cFewvfaH%V4Wn-_*z;JT-}=)#r#+N#^+GSY9gJ z7l>y3I?G#ITglP@v0Xr$f&{RM^rsQUnR`juWwM023wK>p{WobVZ!sLp41NBsyBP*B zkuSEq@-yB{mrp)*&B@KL{wXamXVHb??owBI%$If|DXrcL3R=%%;^7t0KRwKnch=!I_;?fyS_$_uHWJbbmT2x(%GsJt3lsKMaL6k5*!nz z5+&Vzq%zksjarrR{@8k%7x0vjgl_7V>~jyQiru*wV*?yE;M3*sdEEh-l%t?fsENFq zFigc~tuHA=Bm>T~wTB*IvsCAT)m;!Wuh7Nck$PXnbk`e@NgmLnR&&YEgSP`^Km-lm zonDg#`KW->o2LR93S1&2eF{s?g@w+$=hM4`)%JVChx}ryQ_6Z1shAJQTz%}W$8nRDYh|zMc35U`rMpk*5Lq! z0$_|Tg~M7}MutqDG_$B^FF_vfRkj=`03vWlo32~49%m{F8!zyv?E=v2^-Uq1Hp|Uo zcD6pgmPlv^YflAWVNPt#!O@=fo{k=0qsh1TZ=CykIoFY&p3%PfXoH^j@8pqUPor!O9M{#mf@nPM@CxJJN|@|QTJSxW&)9g|2B|ET4+j$6Lw4osD*UJ8 zx0`-zc{h^X5ZOsVrA|Nc{6^Lm!|aXJHA$6D;;6{Tw!XePK(7x_e_hL2268daL1&5z zIM#YPDz&b*63xgdwLjGn@lF5P%pE8&2+C_dLICJ0{XKpefX|8Q;_~_-fGg+?5fRZ& zXR9MG{U0BQES*d$vqcBcX~!rd&~Pt|+D(llv`RKP$=*9c6x5H{S@L)3)VA1=lW;gU z$h)RTyU5nmbOUqhf`LC98SKp%MivIR7t^*_euSIqB&#Y}(+UPnW9i>o{hL$FRS_g{ z^Wvk>X|#1T3zB?t;N4j2;~g^cI&U^ET)rKi^z0~$2nh)RnsykwhD@ks2JAj#Sy=dg ztrnb4;#>4YcXxPA|E)w*Pw(*f_;?A~w+EP}SX@|01y&&_*b0d3d<@y)Y%PMz4wcBa z2LX1TT3o3~^{=6k8MpHlvvQ~RpG%r)6}2_Ll{-^pvs2mCRCWv2=}Elv_>v`WRpBYN zMhjJ4*&e_RS)CiMOL@PHH|s8L>Mmw}NtHL8$nSihc^!E1oDcs^f;g+Ml%!Z`ZY8B^ zX#I7xw?Otew83v}YbNy7@ech%FTzdWeer{`h6|e0YfGobugeELw%hNK1?uYS@!rOD zV|f*`b8z=hj?0TJ%TGffTaKK(izZY^{qzCSFxd?IYDA2e4Tfln%E&n}#(oOftUM3;IN@ z*+}qJKscJda)7$5aTYG8^U!rDg?Va&>P?cw=}z6$hr(4?nTYZy!8o(k=*{(>u={7I zrtXnZrX%z^doENA{wOI))H9UHCOj16;kDjZDnkp5nw+q3 z^xStFC*^~&kp{{lj^|o#t5cm#oOEH$W%-Zi_`TC7Ov?-@GyEvIOVje37h@qBOSpYB zgK2Y(9=%E|ANv!~ndnQ{_SX@i)HzT3I(JbL@-{yH9YLWhh>(?^Zfy*T zi*`O`AReu5@QdE6D~YA@_)Dk^x*94_=#z=Z;P=w+pP=L$WuhEAnbl`hPurY6@Uq|Q zSc{P(vN6uf(e>nH#bmNR4U)&<97_7F(ay|_%z1w9rIMIf?=o00-+Q+Sp` z%Om95z>aYYoPPmqH$u=iQaN=oDEf-!}N4DUNCa~!^r5&3Y`tG`hWiy}&G`CU7`FWBAu%HH)2tdjJBELCYLy){G1A1tewplt-(KC? z*4G4_hbkA{ac4Hi6OZE^+X6RJB8In{)gi~xMeEYZ*|LoG6Ux^GhOwKMco*!*wgDId zJz}l%ZQ^pR>VHY9uwAt9%HSxL#QSft#c?1MCK9@v<~hmkt9q=Z1vjE$a`fy3@0RY( z*N-7X6-%T1u%Lo^5vZ3ZOvSOFoiMI0@cBG2l_YEoXx#>Z0BNXlDyXe3mHZulB=`xa z=|+CY+%?rzaq9uHcGf*5Ne)}rREb)ehPF-6$N09KOhTNS2;g{m7a2;l1h2%u6TmYC z{c46gIQ_2Wn2yhz4MZg1c$k@EIV+<9Qq{)LnbgQ54?`M^PEx7GjpUo?Jm&fN2i+1( zD_)`-^)774-PfQ)Z%o|4svd-VoD{9R_j=J2-nq~du&~t8xIbMlQz1BiswaV6LdPIQ zUgBI1#K5SKots9pg(b_?oM7Ezjt+WsciS!@g5(&YkDrsFh|n;`qnl(J@!s%c($>_1 zvB~)WVUIY6)1n{I*N(6FCXkwp1j7rq=R&a|1djiA(ck;U4w!+AesBZ!9Y@+@(6%iE z7dI)M-T*IBGmr+<{RznOJ{1|3cGRfVJ`U>mIC8E|vj!lCpzUbXkU$P_M6%%+R1V+G zSC(vq#Rb@*iJA{%4@cT&;!81S&8KU#L?;yJ44no}Rw;i(CYaVkdV z?r7;_?P`wanK3)wZ&qqFY858>=dKqUg?JPi!hWSEOf8-ZBj?$}MzAzp4rX=I(b-W< z$-hmW5C@T{bl%;$1H)E8?1T1~AFVB;c=aO5;bHAucwAt?o6_NQi+>E?#RpwtL^mfd z^CxrAdF+nUjqCY$%ma@5RyK*TQNWBSKn|de80$Zq&7R-(=i4q7LBH)`ASk;MgqJWNEqa8%^Z4wUq8eT`rgAYr+1!eLLN)N?VwGo^PYOo++ZXR36D7tCH5Qkm40L_o*UG)57(POH*>JJj2DDK^51U_>`^E=5mK&Hy(Si8u zvL&ba(8O;VB3F++UAE`O7l)NbNMMYRqK6-zX+)_qgH5|Nw%mo8sJ8#RL96L2XoZ=v zS=e8fm7gHd3i^Vcxs&OdRWSUwZ*y<56GYg^DaO?{Ptv>sOnGh4_7qW$uXP8kw)>7K>Wby^`1#stW@eC$t|w`q#* z$_n;jx{cG(3SFn>f?{G(OWgp&X0FEGk0u zAQcPqv7(by(Onc+oC?Pui#;@T5RwCI<1+NU{fu@T(N_|W$F^j*nBD$fUy^V_8F5a- zzXQz)1}QiBYTkGZqfJjZlacK2I~BK>^xc8Xqts2v;6Gf!x{=(~P-=x72hSTHrZ2+! z4?nC`w$gr+d?kj}WGU+mH>Q|XjWff}PZ$lV9fup%nJ^z^jDo?wKOUF#^}9uddRlIS ze2t-2hQ`9ahe^3HxSHHl4howaY48x5iTq&$vYVMsHa^wIDJ3GX{Q!4~{v$Ad= zCoj}qYcv(bhO@eU=`s2`I{22OYSa-V%v2iv)RnO2Qmmf z<5xZ#_ul9c(~^1z9vy8zvU56Sc9YG-^&mPI4P+ch_5%NQu>WqzKxsb zj9}FBlAxnq9P88*n-^Vbo1o>QBESD(>zdP&nbZ{jJ53|l$4o>RmfQHNov^=ABX=1e z?in~7q8vZvNs>?U%ji?F>7DxW6l*2Hb8xI}5XctTR#|y@GZT}A3gdU~r6j=yNI7u7 zn<#LwpgZD(#qN&9*IjRPOw8C5Ug9STPm}e;F9$m~fO=JNvj|opLSue;<&z>Ph17HM zy0noTL!BUK(oPYN5^|7PoP9W$P@{C}x3B+T3Eja+$u6*DEViM~wqbW^H#%t&EQM#d z;GH;}%s-rJnP$4xW%lx`1S;tgM@1iF#Na31t`lc>=jJiVBS~>Jm|mpLTLzDoA%(H> zl3w(2HE%r|L{k@`$_*h8m2@}%EUV+;tD>Z>>Jns}=WXHQi`ek@AR~(e1C`@O{OhwBj0D+nL#i53eooT-A7HfRY%T)PVNx|P16p}LD*ER^YXDj*i4ab(Y| zs-~>UrYr~F%KhvK-a#Fv`om3&^(@s*TpPH#-&8p1-VK&LAK&Oi}~nfw_8hZ>%V<#@{UNvr}#bzgh0!LB6)D2 z6blr~|4Cm1q4aJo9xKH2R}pd4B}-{Wi(rM!4K)RY8WY3{3GLe5`|u`@f!B|GNvBH{ zE+$bVqc_`P-P=H>urx#A|E+j)YxXNYmVo{KcQ;{4eGx0&HUOkOIw`wMJD7@%J?DP+ zTbmizq&YoeaB$w7vlRP-J05nVn5&Cwb9zktPSRk4!CsS6Jg>{w=Tc*%ZE^!?$s{du zjqbd)DZ6_e>&xkFc>-;^a?e%i)e!g7x_1+1`FnJNivkHr!7xAoi}iZbszjM~;TL8% z`KCcZYeJllleBAZn)#DA-ZJ;mhj+?=htb*jq1aI213a@tyH#tatpe!mxTeBQ()#D% zra}{mueD9iQI52b)ejfkcNSJ1j3g9(VhJ$7RF(j^(ergrf)b7-2q*+CvY0?M)zlzB zASxDut){My4g{jWW>)U?jMYOD1#guOxE+0?i_jpU!+jOXACm@!d!0V_GG@|JHgNb~xv&bi(3L4)gp zmI!vwEx_}dikz-hQg5PZ>mR7s8=+q{1`whe8PC6drCt!smx4V~ZgUV!{P-6dOQ@f#%b}}tznI6BV^hIKU-;8k_r}&aHY`KT`S?+&M98e8C-Jbe@b4o+BeM?or z81*NVwwFJEL~wU?q%d^BgUB6I9Gim*8^=;#&|``ZLSDu)C*r&wt+%aqa7pm6%=;GC zVp)4YUV{I!MAy~A5#~_cJ?v%`irhHGJa@E|g(!<$>gJZeF3{8c^*FU>sBW*`!c&Z3 zGDJfpFD{z)Nb9427$W!BS_&6l-p_bsruHyi8d`6!FJO%AcAR#xIuDQof&9?6mb3aY zX(>->9SzUWMy9r^;+EYKd(gj2bHc0(-9eAeDY4BhuzZrqG(Y^P!8PQp`XVMZEgq|5-E3zbHW0#@>8F)VU=FzFUXi!uC*Vlin*oT(MWtYM}1r*)&xj zQ-+c?U=)n6w{)m8|01Pgq`H zvpM+bMXa@hD>nWFm?p#zX<>MI`3O?d_49#_(apgQJmJ@@Abi3YsMP73lOSYLWC*MT?dglt+fZ;AGLJng3HoJD!ssInO|bB-PvUWQxTAJ0l~ z!Sfp>93@Ny3~wYuAN77g}I#?BiNESP| z8GgcCfy_@@_emTSvj5;^MI4C8l*SV0!T3B!7Gbh*MknqUSa9;QmKknqI?uJOFCkW#6WxB~LMw(eHp72fce@oIFrzbs1dX1K_VOo!v*AJA zaYyP*wPkZH$K#zk5xHHAdOU6lqts3Jzm=OO6`$Utk){0U%xIbXprNfNsajo8u@#rS zpJ#73yhM_VAHu69)n~Z#KRaJO8JS>P4=>Cv{mmQPN@>IIiRf_~RR;fos@&;A4wnUG zJ^=dK?~#!^ZE=BQsTnZcPj9zoQVMj_*(v-S>0vPhdg**5K|=Tchqbqgs-s)CMgKqu z1cC+k0Kr`*F2Nmw2M_M<9tcd_-CZZ{6Ad0DxVyW%b1G}Cz0Td|`L@ObJWy4mYK%{N z@1OPx@s564{r+&tv4IK7fGR2DsJgww@La2FH;de2kMz>AA4P=EL6O_HITK1<~mlrro&j#P0 z_sMI-K#@blRNr*^FhyQgSJ^h5Bon9wGg)u}@D*S$`DKG0cSf;&?KW>P&ff%#`E;He zZPHNF>@sa^>ePGy+TeESKYsR$~kJsZ=V;$?al2}aFahz%uW8DIsdbn)=#{^Q>41NYRLoz&Qhl; zYFAU--QH~w&ZwrtnO}bVAO@bQmsu;v2dB~E2qqhO$%#c!V4!hY4aT}|cP#6CH(z)e z?S!+m^?nRV^wkDfyMat9*f?_TLHB81rph|0{#%_Y_b$+uE3o;~p;V%Dp zTboQtiGhZf6S~8I;wNyx=l(EDld2j=`Hj>*Za5t6rlh2b75mk38fsaslOsNC{8Ekn z6N~#M%6zRwJVLEOaXEgOv%o+NCsixO^6M}imP_Sa~d1s&(9qLkhr48hM zWo-w&vt0c{d2I8jHj2c*kFnR?nUJ!wvaYT!ir*dVmsvz)WNV-#<DbfmUC6S>dCUhEf?#gp&*yIYkQ4-lR2&(n_AVSOZj}Eirh!-#qvU;-*xdESp zYAZ2ln`1*4TRK@u1us<*WA%~dd+&8j$-Mvz_nUo?pcmB3J|*y|_xYomOUU%{?u{K2 z6LMG$kM~-L%~6m~_$Tvld8R8F+WxjO)Z2@RgW?ZQLo%D8T@D286x_0ZJvJ$fA1(qO z9-gf1UlgX5nc1iR`hqG~Q4{r)=6CxVEr*&=Tl6IvAt~-X3$A2fM0kuZ?dVKDB4dhv z6X&N*%D#NceOf-~=fmv;mUJdoym$b{F@@&$8Ts-}Jn zM-2pAa~WA61RYF6)NL0V$^m@z-!K?R2xvSaQo$xbOLa)S9OGMUwV;rk2(QU7E`#Il zT#jOQY;*J(n)R`3qm4E%P|qGmYGf4n>Q%C9L-fq)Mhf(K|8sLw)1UYiT8%blQ_R%k zuDhAXc4gWj2AF*S6KF}8kd4mH1-L)pt`J%tG-^$rcl2Z9NxzDyk}rY?8W}YEI?VP% zU;3LFcy&aSH^PGRWWk?4B0~H(*_MT4(?up*KMn=CuKiwn_doXokP#mW@Kbs4#@WWA z@YqZ`V~xX-Dn(gB{nOK7T;+#U7|QA2sBHrkE@fqmqXW~CbnfOUX7;w>VbI*~&e`9c zMX&n%c-V1KE8!rn|84;&6q*eN8g)eYpsCua{JbJ9$||*?rQW%5)L!aP;7R?6+S$HJ zKQre8uM!{^jRO5QCji8#kD3pbL`FyG`BGe5jE05feZJ8<#@7nY1_LNBl+m(ZNBxT7 zXIvgPA1Sb|#~|xP3R!E++};mP0HoiC=5MYh`Uwpdz*{rW(Ln$SuiDx;uKim8E8N&; zS3>$^=GXlh6?OD))ly@Uf@Ia>;-OPvBj-xTlMLU1LU4#+c5Yz`a|j!CL2?}- zrY|F~vR5=YIXNH$RDI5FZRG=^J@~&@h8Khlrn`7m1StRCZl|9pC6VD#{^u=irFH=w z&VAf?v}6^d9sGRj{Zixgi?}gn$J{*A@A}4*jpub29~y6?A{cN}i{8GroG1N#AcMbd z5BQvD9jJt}Bm!Q=`T6YM(k6aPM~9}qYaQdIG(BWo--gUE>D8Z5RaeoBS#QD@8RM0hvU^6 z-AgOsJV||3p8WMx#Izjz`$1B3=d0)nd`Hj4oKJ@;JqN!ab1i8*E&v$=4(eM2v%mKE zUyr8%KxWw3E<>^C#DnoIw3;Ty*?!<0d}~hOg1@4Y*K2-1=x~(&?d~f?KVd#CjQ3VM z;pPQRC;0y0k-Fl-!PV{hYJ_w>k8>D_{6b>8bX`pPt1)R&^)!ptO)NC-I0Y%_v0U!~oJ-TOlhu-=SD;JI8 z13-F${(wNQq4mwsX@YPfuT5|d$|mat9~n{?5gzAU$}qrLms|^gG1L!dv31(|Fdun4ZPy@i2@mkZ2XlZwd}&2z|cy03q_ zzX5-Gjeq_apOo|y|Kj{S;tMF=?E(O!nT&Q~oDx0L^LIaB0EaF%PMI%G{k0l@228p0 z&|~B&Vd~kRAdVI56+Ut^S}aFx6FEdrCj0((aZprK#~_pA?Zq@l^{hki`M?85Q&ZP{ z+f_v%2IY|wE2`wReCaPaCVI!)vnjXJ;zP{sHKP4=Y0V+#9%d8~z&hL&{ok$V?0gMI zCIIZq3LrF;e&zY08DE;u2P;8Yj^|QA_^MBbmi8bEXz@TH5f6bPU1<$j&_aU!B(3#h zW8c%Rb%w=TLa*-+0)(y*Jr(VMrwqr(%94Y?zm2y3UijawDqa;fl$BRI@n@MCyI6JI zbcUCg)(qE`SxYUkuTvNusEPre$ViuR@h2oLXIn9$IaiM7{GJNLP^Z^EwtIMR(2R#suwXM4oUho3oxkJ8d}y z#Qg4P966`l^rR(0^u?=chy(?2l|5rWrtH>$IN*~{RzpL>*w`506K`%@fH=so2coZI zrt{AdkKcgd+o+cS*9D5i?({>k04M-vL(9i0zPbcrtOYS-EnoT>bC~d zA1zxI-1PKp4NIJ}7sHXXRy`2X$yQi})87Q`(;#oMn#7;x3p2AZo!k-CH0BL1qiVI| z)ixeE`XokJN@yn~P1b+*9~)SR?dKVZ6LSd~!{%`UaJ)L`MF745dT`F0|2hHjX8es@ zma&a@Oh}8lTA4ocP2;pIeh!vY%(B=MRSJ+0CUbd7myg_6UwKQ(8k)ElL`?Wk_rm+5YP4;rQAW;#U~&)!dEZkzR+SwE|t>P5F(l6>4; z+fuQN6;-0EU~OokSvnurBzBRG zB)a93$qmJSzOqFWJi8evRiHL^a&NOF8?Csk1nHIHx4N62iA$+nE%kPjAwEc{R`=FF zB;l}3cq@w#DeAskfGeWEd|z}+c0J<2!i>ZhiSkkiKGIq-0g88@KOdeX_xz&G-%kNM z3WekMy|X|VS>MuZRlX~X&2<#F({5H5e?AkS^TY+HL_$Kky6sSC${pVS$NZ8$?zQZhDw--*^F(EDmd-)^%J zzT8NJl!s`~cxR1^s2||ayj}q$U%b;xlck;zePN%>z>ze=#YB6@(PX-+-7{FnwER?G z7hGw86bJZcQ&XyQHkfnupZ_@@;-6sEx&hpk%|f;3eW~+WuW^9+#sg!AfAbpL<9YToJ4Ms_#Kh=Oj-^4z!@}qvbaZH_qevHa$`fE*#hQT-EvG z64wYgu!rfJ3yA|w=(J5CaL`l-Ft+)$ecKeXz`$-yw(5Sq z72x-7pshKGL~AzT+M_@rWnnS50Pkq0xbdwvF_p^?QXv}&!ydPaBc&)J1c z$0aB4qG*adOIA>iSW)2FWklAXN0weI}Nq; zXRv#i2M=d7NZ)048FEMLUfg|US92TjUmwbPN6k|p)*t`j8FkDM@`OG4i zO-%k>-?S38y}P`weddEJ3g7Mda9OvvYVy6?<5q~)_TNzE73M#2+s3V*4k{4S#8SwB zZ)0Zpft0EXWW4YPeuAJ8DJD+&i3Ma88OAct={jRl^8Gda-I9CDQd1DG=`g?XMtURx zt899CagX@(NFkKaiC)>1QMX>@dp9st%a{)-_d>9#Su?C6MN*E%{n%PLZ|tHeSKOEU zoI<;-EPb-6=ufh6@c!jaCAa+|&!6d`AMJm+&< z!<>zzq)dS911Hg=Kge@bPX*CVS&-kdv*9`R6jOGY9i)4*-1-=I`cFP>>VsQO+3tGntG#W2{nG9I`N)L z&VZLm4m*13j&(I+&yRP?C&ip1;rF$-SxrsgB-HUYl$Ddyh3pNcyqlN64k|Kgacib+ zo%AdLX6Z%~Y5oJ}<5WKp(?sKQULD!stJsuupT|jyx?&~`!5=Wk9l(QcDx7()C68y) zX?6k%mYoN=2t9yc!@skJe=STzNrt3Nt)2c}Qg&l~@H%ba_rPm5FpOKQAV~}hbe&Ef zKKg)`3ZFQ`CgO@YEAe^PL(KTG$aU3zj_%`xa8{-OIY8NcOMym-&Dmp=f^6dNs5Oh#J7M_)wC&b`V@J4scYeii81jRm}DZEbBpOm7eRzk)5aDLWva zBNs&`C99=_jSB;QDkKGRa|>VMfI0xWzO(Ci{(KtAf@ZIGI2~oOwzXZ=W7y&?$>Po$ zN#N!3Qy%006VKDq(E;Gro45M7gHK{!1{B9pEzX+ zwz!iPivlOw0D*(2#KaW*0Ja?|j8i$)ZaB8urorSoH7Rz}H4|hLE92pQ%WMyLm$k!oR=)oQM|Mm0S zK2N~ZiP-Hh&eORj<6TNwdX4sAz_^D5Ix~2+=^y|QTWpRE`BUWp(sDBX)dp5(5qG+a2uAXx8Z9pmY>&1~VJmXc}i! z6C)0zcB@-8pd}BeGTJYbqXl;Yl=gpR&rbqXa3v5(hwp29eSHF8o_~02eE*(3ih6c& zQBz%A4UkX1gbSCIm+K>coGplo0(1-A-MKyLSzuV2{+sm_wYBjBr8hvJH81Zcke+<` zgw_o}bpDl90>2~l=m8L~|Ns6v*Hf2i?97}lf4=Py{;mj0;hzEXf{_RJV%kX=}3 z9D*U1_YXM7xGxsb_$Y)x>h!wc_(7g2i{HDGw0^5_yteX%e<9RRTh6$Wc&^o8WjLe8 z{#q|Miri|Hg>OjE^uhn<=|=+1*IWtAhF$y&jAUNJPNC_$OylRgx?TLRvUGT?(w=E= zVt(ZJ=2?0m-#&lX!DAFbp z9&D_wR2V<*unUlJF;Q-FZYu&Rw4otd#4?$(X@**d%Wwhosa!{^sAG=3LLzxiz8d zlB%XKW*e4Uc|z@e-SE)NGeN{tPRJmj{LTWq^n6Q(L~P1sQN~oxzkVXDsD=`Kb>#C+ zT+9@Tb%HTThf{1i40yYSlzuj7^w{@PzW}XlonoRtVz-Q-FA;wR>_mfVj1T3bKv|tJ zX2CMN^|ZT`KF5Y7m)XF8Iv>jG9UIcmf(GSYCg*`7!R9IJ>HFyTo`v^LTU$dUfeD-u zh@dB>lsL39&&;R@aJWO7fw(H$|6RZvr+N+*Rgf3n@9C|IK07S;e$*_9O|e9ipi90F zn3(w%!IoitesyB%h5-j!{S)Z+A!QJnjSQHr3F%ZMdnixy%dgM_i%_qky>AoY>Uqn7DfCmAuL{T=4A*E&Uni!`n0V3>QVphxNi#>tj-Mx!X=3mrDgUzv zR&&e8b|x9?@ES&NWmEOE zL|>c|XU?yfIY>8CK$3&o92=auX*gilyL%M)Sx((9&w6A1C(>&*>& zK&i;o6<`s|dVl>gtDA;(^u727P-IYdD1C>be~+x!atJma3OjF@P+~`9w0%k5xYt#c zf0xM6QvEDrEtFmv(GXjqcb;@}171SYA}Zjt-I^ZuVzWH6o}E`gTQ@4Eebf}6{{`AN ztRx^mugCX>fGXRI)X2)Q1_ac21|2H(251Sm0^2&c($;xFpCdDS>ctEX8@ts^4SfQ_ zLcqBnY^(<$1l-_TdbB!`x`HdImjQMoOe8VqV{JaW==35myLq)8jm z9=euD*R?GBT<$I_V(FBwiT#5_p3GIV+7Zr@{$EYC}{p z_Vyq8NU<377E33e%j>X=jpDD;H-@+A zgMzleqgk}Et;3*|c0Zzo67|7;pRJjBZNfw2kkzpV$w4hI`$qf*mBI(jkFgHn49_p( z{UhRYKU2-uJ-FMak3}30JyKCu-2|HYkRHauEnB}3=ul{w@lo$A)}Zgy%yf=UFQ~=M z?`n%K96EHRuWmM6s5{U;xOC2gKJz!$_LVi_3%^D=a{84&f+|o`7qvu4=wDwN5PO*Y z|CRqekvn-0UaPb@KEG&r%fL&}W$aRM((AQI$j(+PzQp2VQ1@UmQ4+W*b7i?3~n647?#GVgoDknpGFjf2khUb3#9&Q_Z{hNJXkVS?8y1+E|?xcNf?fwlzS70i=7 z@HT_Bjt;~9y1e*Au0Nf7k^QMsctm|%hr7e;h;vwxqr<2srBgp8K?m%s60X6+&B%$n z)7Iv1QxdPj;@rZD?f5B#pxFvAJA5FO!s>zM+`N`qTT|6f-`+xzkIm~?wWA&>GV}x%ApZ!qtDU{yh-|H;T-oUDjl($^ z*iY6-I~-i)_jH@|G6~Jpr`AE97eet+7_q`g7EE{y79f;H@SVG7(LNoiUY4){B-m=| zr;^YYdzq^4=a7>4ym;%d!KpM$%WPT1h^fwN`&!0k@csGk(z(7!M@lQ$)65G41VUbW znvlbQPWz6>IscqmR^ihV`(o*pU$I{k{*h9B+bpo{4q5GR*VCQtTIrO&?M?afbm*d| zbT;yOb?vBF4Bc36yTr4D9{CFYrl@YGB}zQpPdTpAR*u|DU~h17u2(C($6pbwdEXrV z%R~|Dw8voM6kO4VYTk}WwwNti6u`>tEh+W762c!HE5JxGKP21~pUJz+A+p%GK?~&7 zZ^$Vsy83$%4{0ZV5?*IzF1I^$PSFH8hvBzh(VrycemM?OuMkvYt*U8cn~cR?Y;-?- z8Da&5nW&bWyx+llzaC83>=7%!-R>qo#{4W#f~MVJ^bY5?tf1w4Ag4fEVdF$v_yQZo^}z zOkRaB`^Ww&2Yg)MGq{mNp=a3|)Yf#~LPaJF!G1r~??cF6B>d{~pHSh)Na!*eLdeTu%Ct@Os#9^sQUR5J3f3zd?Y~a`NcDqR zLs{fL*KgRMTX*C2@&7f@g4m~q{m#OHl@{A?T#?kFBax zxgXOyYNFio!!T9ln@~#mM{#u-cX6xi6Ak7x`*{CU5&=K_2GyOOz=fr){`Y<9&$n69 z9n;J;HMNZ@0v+jru?}4`jnz&4&2_U0J4RHNgz1tZ^3_ij5>^i4CTjM}vm=p%*^Q>C z!GpY_yR$wo3z~YE)(+-07|28EZ8e<+tk)S@oTvJ!uIN?ss9+&9vaKUjtJ4zyuq#D##l>xmdP{k;wo}cab z03jz97yrzQ+`Ex50R=k8?dNQm?Y`W zD`OE=ln$RD-DFI!Ti`U72|f&xHXuA*S{k+=Z;K62T`4!VA(q+zQ!sPglfyFo4N~%D z*jbWxYKG@NJzpZ_tbRRFAy3v+x75iTy|hlR+uBSHWUs-Ow285L?^&06nlCOY#rAnV zv%=G1;B9lD0f#w>;HFY&Jk~~T(Kb(hobGEuT-8~sMAJDIj|9aAomC*>W_DNau*xQl zYb)KJOIgK^N}Lw&-Ar(?XOSAaul1|87Md|*b!M3LV5p((vwczcLa75rmlcIttx{af z=!)lV^^_JeKtAJ@NeH9kHY?`h7z$8tT#ITce}8&&Spl`cQlGf0&nrI{2|Wzcscmcv zft*~TGw4`5-?uCAHtqlJXFFCJ`qVb|!u@o;o1DMUx2ai&ZJqOQdj3|%@`KD$`MaezT7^1A1gEM?bL(s5 zF?{B9s8!JMQ5k7$f*vFJC{oPUOvc8Iu8`mF^=dhb$Y_0=l7Uc8DA`+i5NY1O5|dAA z5aqbLKlbHKG$_J3SCO03S9LVRM1p4*1?4BxxNUZ_xi6VmSw5%*_`;}E z-6QAtS0sj(HGPLw6i{1IqT8LSb8-eH7WO{0@p5OSgoaD4HT4jcxJIXzLm#~Lb=52o zANF}Op*S2bUr=~0$?7YvK2M`aack@K$@eoG4}0uiEK0(EU;XAct)HZJ7Z#XD0Ofo7Hsf7sC zdzUap);3jpkdYsj=}fHj%2_w%1N{KY*go`jXLVU}Bz+MpOIjIcU#RQLje5*>1R#%! z@UL1*cjvBS%}k7Y*(KxLt}oD*P!EjwZS~H`CAXGC+hgc6En?&`Z3yzJ z%c3=2sjv)+c|o-7ftW;f^C!aa5R2ffscAEJgO4ZK&f3$U!lw7GVzm%%T@U_~@1$j@ z5*%GtSwx~uW&<=c1J_HIbBQIcPMsT$iO@67yZ5ioAh+dT8t;$&wTKhkC@dQIhvX!L zIIgqBjpkj4Zsz5zC}H95>&ORB#^LTP6LrdsN7S)dlwzx`qM2``X@jtPjbjU|9AS{T zyXCJ5h1R4RXlz{(R%OqmA*E!`+&A(GG}O7OJ$B5Ebhobg%gKu<80fKLy2|I1 zKAxJGKHTVu zyvTYT#iZ>p!q-x?-Gbk0yKWU?@*>yhaEJUQSsGELK0PAQJ4@b%yt*{{(aVl2m1$p3 ztoT5t^duw@V<+sJu`|x%_4vsK3^pxd1t8US246iYpUW2>A8)zp7-W-msuBooRix zoG~kdQ9cN9w)N#iw>$DwbbB&o1LZ&e-X+BTk9*R$!D9c zLSm$>MwxO@)vG4Ij7Yj%=D=nZlT)7tewh#dkg_|~F@ zY|1gCd>1nUx&JhHFXd7WTK#k}T*yng_O)4liY@d7+g|wBnwWz`t4~XvnrZ@mg8eV! zHAvetny2j^03mSw`#a0Ej0k(%Y#8Ys?*;C9QPbDTT2WI$PI7vXF7kQYg7S8wd|{Ch zT2tm1`)U`03I|Yzx+d{i5&*%SS@HYl=T>M@o`|?DgO~NI7KUoR);KB(5;f zIUpaw;y3xzAP+l9$5ckdIV&$0UUBlRh4FOV+MqR(Z-WUd`6F?CoyF7ct7dm*S*oN2 z=oBnr4(oF|Ic?;sk4*=<9~QZ2Snuh3jQ`vi7ZwvMCCShhdS=RuW5dNhOkqlp7zNB$ z@vNwv@7qnRd5W`Jre;@t+6OeUU=tWb2#UMvh`>aeu* zFqGxWL`{(;5dUo)go+q+O~~PqAANpq*ed+7tqiUnS5jg@>C#g?DGRTjaWmb^TAdi5 zU-^c4XmjQqI(e{|-6mV-#vjl1ILG;*u_%?1)Kjs3*5 zqt1RuBpFD^1>ZqaC_*?l^-b1t<*-S1zVhU-Axc4AU*g9!B&6gL z?x-{@XDx=vQJW-FZ{JGNy1?`Gkh*+}ISo9^yaJQGBqVDDOS+NEXYx+-?|&wlTCUS) z_sfB&OZ6;bmS6O`M>m&l*;V*el%G7YE4edL%B3Z%+TwirZ@8{Hs!Zw=5T9_t(l zJQi}F4b$5qLrd+y3#^wq0>rx^XQPiqy{-U}Q3862C165Jd-9WK`MvYZ-O6Hv1kV}l zxhZnKQBk?gUeYAsrQ51;l{{K4d5GY8omg--LP|)A_0|78=B2NFSM}Is0vO&gyG39x`d1f1BZsUTM(J3lc{a`E%l7DGF$Y2^Z z)K6_z+bWRyf~~8~Lzf4;X{q_hY6C;G%{ZG_J@M6%WK5dO#-yt1w(&Fpg_?r#{g|qy z!-qOQQDk1o#<4TI9cOpLy5^^qp{;=vyfJTgMs9Dj&-=3ZPFr}ySwnA!EOhUz#e@_S zA`Ot%C2%L3lLC}o^t3C7-;Ce=@2Q}fjd%}EU9l`8g%7a4I5)t*8GTUlJ-!p9r>;QWaYarioQVWra*JRzk3lJ*{? z!LZXi@4a)AZwK7bd-(fvgCzR|*wwNBq5Eql&#*syCQy zW_SE@nY__dW*jps@E+iJk$X#b6}ROAjDJFRHg}py-X^RUU8O?E)lbi?8ukgqPWfZV zY)Y95dT$w1hx>m2YM0~KA!p26v6x$P@jic>NCO#dXTBKvk@b`j?Idd_JiU98g%`XI zXs`oq46ZpRHC&=}NjYw@Dd>*OEeeu^gym3@EqwM3lb*n=3A{#WyB945B=`B?j2@qr z`JYi+i#X`u#f2*Io%*DIaX{d)$dC#y?1n4%=bE60 z+n`_V?dl??is?sXJ{eEtsZ1wH_Wh!7N&;-?jtjt9+Klo=MH+6vAo0GH&Y0k~h`^MR zk`kyV+4{|^+U*b<-r4r|wa@X>WZLe8h!i9^=)rifKKsH-rO; zdbUx#y03-&3gP7Ez*BmX2UKcGk$h@=$dKV4DN}-Jm++QHlv-rc zB3r{Kpj0_j6!o(K@sT6289UIuCr8jz<;oEk&You@zKiN&=&(s{ezN1_Bl&tNY0QYh zv3L3v=4Q(6ArPDu(XGq|I%h?lj zXr&ciyw>BsjFu_Mt?bp9D$a$|rjh2iIZk~Ndo9sj_?Drxs8aDwK__Kf7yn0IP*43A zy1Fkm!4VIKyGk{Knw61%OmlP7DG%<*K}V$xEzIyoE+fdJrwa5g*oTB`CIQA+Grc&g z2QJQx-xdaO##iZatx!0N-R?ILp0YP?_((+lxDHBxF4mM#_cn(x^MZbcfFHwE8)?z+ zDwwX8fO@1bbo!FA0_KG(0P$=pEh`mqNVF6@55Ob19x~llP*t`6`KR)ml&FwO1uE*u zfct!Lzp`ztOvTI9a$i_p?ZnfSEVkweTjFXMIVH_T8#tdlo>cLvc)MR8tH-=Y4YhvS zO~mYavz2}-esW~dnaLLNcn$Z-8)lTO`Sq=O-P?-2ZB&sK=j!RrcTDiZ*H`_H-WN^K z;GcfC`MEi8`wbRSb84G(XPc@XdS6e95uyd5Rv-B{^W7nVBP+`6sJ|<0UX~sv1%h#v z{++%5l#`fQl#`9$I_e)5yzv&ni@X`+vP?PL6+In1(DW2H!h{!GH+>8HtCUns&tmW8 zC)1CL9XB{3M*P76-h@rNSwl;2+e%&6{=@!m;@~TFWd4fbw~Fh&0MziPl!NEDkjNkS zitFe5U#Diy9T!Lz-;6&E3oXhZQ=D1234YjhYSN?{Q@Z5;9Bg3AbD`2u`_q%j{YO?u z5J?H?Nf%uReuH}7;@~nSdK+Dy;Wm1VF6NUJ7U8IGDOhK^FNkV<{7c|nt@d+a8*8W9 z%g%k6t8ra9UzfdVxyjE`#*b4~#3^)HY^th;bUkxGI~p%00IA!Ix%v%9G06`D~aRw;shU~o#B>#`r@ zV#Hj~SI1FZre;up_?>iLLF2MaYDdEkZsro1LHdKYy7gW;_*gaM25|$2am72&c%L{^ z4fo05`7KYWIgU>;mw{4*`=l7R1VwP2dVxa25RR$;(p>{FKLa@AD{cx5B(`@)3;KlVEr>v2KQ z)2ZbXBm$thQ_o2DtF9}YdP_(|9@sFxqiy*4;LeW&@tiEee^|4&p6?zzF+Wz7LJ5t{ zO*(0vsk1|pVfc^;4lQX{?j~rP9+Lo)cw#tVT#?SI|rqmQY6nCi#`R&{DLc< z0O%{^ghyf2S`Q(OhwawCzDwPRC>GK|5*>vr{@*Q`bKqh^%Q{Gb+I`S$OL$aR z-cXp62Z2y@AB?ONTsBH3gK^k5Y%(pQd`W+a-alF(fG{=FGbAJi>W(T*=ZjeThhVg_ zf>hAKVy^+6%H2G*c^Kw2^DdSqSNPp?^WW4JaNn~5>QSaHRv(Ed(xTt3`-5oiG#4kg zIe{uc#4dIM4{2 zR2;Jik8-Bf30)Hswton{c{#A`m_n1-EeZ`saK&&x|4G-hN4*bKOP>iYec2z)Zh)y} zyG$uDGs$2ZEe%#dTOmD(4$}~9n>uq$T+S1LL26uLM;NZ?Eiv`nNQ=M#a9+Kdk6d`v zeYFUOJLo6(hlSJX4lGZS`ty}ZVHg!Y#p|<1mpHdQm3rjsg|X`9m%GB{-r659&>b5vP+*B7x-=u^JMH?b_vkK|zDe<3Z_zfVqYd6-iY-E%z@H1ZF%$JLA= z+Uh{!6D$AZC>>HNPL?G~|1e~DNh1bb-RXdOmXWW%V-aBe3Eu?!c=RVB4fIbLz}0j9 zECj#SgIBb=M8yR3hYr6leZqT^b!$%DM85WdoZtEb)S(TFZNt71_aD`~TcB$MO{Iln;}ss*a!Obw_%0x*9$YP+LO<1s($(<`S9WdJ>@P~_ z`&;ThJ(ccP9@-_FwReejCkd(@?NVncBm)_)uilYBE=tRPJXjP%q7_Wz49S^f(*62^ z>E_B!@W<@}I?F%UKGfE`eL)Cl$U9f&A<)i|fUFpXhpq2IZD()#Vn6G7+kcb7l!dGX zjVbhByLdTu)ca?ELmi{S7p~=6GVc?=*CRr}S>#3y=gbooULSQM396B=igUVAnW%jc zn;ZQz5vEWyOaBbYnFFKkom)zk6-ngduNFp4t$fR zUe&utV;p!*S;yb=Au?K%HFz}QFQ7Ewzs%yFJN^0WffyMv+KSjt;I@2CGcYw**^1Yg zbWjd17t?S@w!VAxTqKZ*d;VZ+n*j^}sk;by?ayq%NAE`b7cRnXY@fcCo0otuz@l`` zakuQ)60=}aaRR!hRzJk0UyzNv@^By+K3{d7Q&O}_CLfe++qPH)ZA~IaQ zPd2!% zwA+$)mne6pP2m`M*j?1J#gZCvq5XBPfg7?9O!4}MuOcrv=Kvx0V-Xn1bJUzN@TqYhnUC8fOZkOT^X zOsco}X;cF&bvyEL!5!78BC`^C-EsD+QdA!cDr#pQXOv86Luqgb1vjiqYlr3IlR_-` zrsIjCjwjD{tA5<1VlM9Dkj!mus88bnVeXpiyVu%eTN606X*&rfAe4j%JoOez)nU@t z?l57-M|}<_qZ+#&zoW)^nx-vrGtiHYeH;9_H~tc3JD|wp>9$wZiV_1$1vjDutrEQ0 z5|raK{Z3$>IVCY2e4FY>dahvav{i~IZ?c80a)75>Z4`lc%c4Gz_HT{k!Zf*3DfFGMfhdor+@ZAQI7 z?Dr&0Wl<{`-)wNS-q>V>wo_G^I?<(+$-!?`j17~Wl@c_erjB!@bKUOU@zsivn_~Sr z&a}g@5eN8ciZXVic;yEED*1zU_xd)DY2AYhkwf|}!wk)~vs*02Q)Oh4j)9NBzgo9V zMe9m3w0M3T%Y@JjcMn%T#JIk{Df^T;;?Pg$)kJC?9j~f%dyObLDD;5HNb@u^eh}zO zFzCuOc&ONowp3Rk9j82*gNncx@p)%XW7pGlT2xc|q*hJL`7P*a!-t6TSHA5&(urC( zlc?X*fVNJ(z^$4>E_qQ#^=71^sMKwATSHV#^G9{8p9rw8EvsMvvzr&o{Gnh*RlbkE zWA_#SRAsJ&aML?YiX8)S;`>RoZU9mQAieC)_RBu_w1vTo2BbRn`dKo?*IzqL8J%XW zwrt*6uA=Jk@DLD=DCAL)j5ZuJSGa0hq<0jQPEA`+7XR*JJM|(!+d9BoU0FHT3;T({ z$-q5-bhuLGOMxA}8>Dx2R^Ou=kApI(!WR@#7Pu zh7$!N?r(m#U>6}x?ot27SwY_SN&l{uy#*sKT_e@9`q0w@H=pdj>s=b^;p!xS(d$sSl9VzVg6D13~)4UI&uVF_|ED-Tf> z92t_8+#e>AltNW%_B0{MkC*&v^x^AtfkyaUX&s+I>?Tavs+8U;3#=N)r3)U3Z*ggP zc{#0$%c_-l==oG!(rL?+tfCCg!^}^lF5ak)3tF=9uQo`{Edhb4Ub)d_>XHf*nrQG z+|YJ@(_J*Rg6-w{o=&AnW`>RSox1)Bj&>l4Qvg*bYRGlXa;4gd#YkOiePmc-qQqXk z0_i#FO4SbL?MO=Pv!5Zyd4QaH+xC7*)6f%YcYDU0r=!pok{AQO01{oGbcRo?SN$3& z5=G?d!!F4G(;1)RRbcNSy~6(cJ8}x{U4e)m+YW^0{io~}U)HP6YgU>&q(U8JM)fb! z1K;j@ruim5?O+cc*iqPv8W~c*e()Y4u2_`B|9VGUN{NX>E68@YZyO(90%H)Xj$0 z#((YQHphW;i&(@?jW{ulC+WTkU$twy#a1 zB5z8};n>K6Rl0#2(s(N1^-|HAGoQzBv_?%h6uF}4EJq)DY5$eCHrdZ$>07gVkS;El zIM#-q*Q{FR)^4#+!z~UjkEKXf`EGWa<49rltQ0!Ep7h5jE4|Y+ArS!^zTFX1O2%<9 zIb#|>##8y>ygXdaOv7#>*!~}NgN3m@UpO<6WIEgXSSs6dHgVH+ZtH6ywZkVD25Qm{ zGtfgRZ4_>hM?BjIg0(>i#Iht&URT-HdN91(Z}1iw_BBvYL`M3oA*KBl_IV zS<)EK8TCx%Kl+IJn}lV%8L1)ItoIKrBdt-If6nT=99%%gEdya?8Nv$k1AGy-%QwrO zIqSxsT^%OAMzb(-U-$i{co7lNZ|ztn&|f6cyeZGM`}%bOY{kRS{2%Jx z@~`SA+yWJ(yHUD3rMpX78lOd7C%4B zxBRy;;_aXLX3th#Yr#(M`)NB=!{sd+C*@o@bTh?0%7^TQBl_b+TVOTk+hfP6qKXKHQC~veiWg2iJVZKWeGUyt7{_na zS3eo-Lmk+!P(XH_#nBms3;9~rQf0rL_1?fR69*c%;#Zf3zm5ES6k!C`OX3gcKYJT) ze%Ya)6H}aJbhx=<&cs26h8|L~ZFE~bJo(cYmicQu?{ut|f@4K6BwnN>p1!nwm)~4y zBu_>4(HGVpk({1O{{1_txWvb84ea%+Hwj%DA2V_fT9dJ22LwddOSLT!Z5Kj71@Q`; zZ}Ok0W94&H>z<_@oRfQCB4xzsZM-ZBZE$$&DdFTCR{V%7&VFxFrukT!79LPVU~%Kf z+|_s)8rqqfpGxUup z{Pew!7=0DN=>F6=GD^5qzj2I5Jty@C>jqT*W$zZ!=f?EFL|J9| zmvm|}W6R7;!cD(j^qq7os-wy6K)bsjznF{HrUaqp!aTipy6qNtz zjQLIFl>~1>v0E*oQ5VTT9%enaTPsQW*PXSGHA~3n_PzoBEY$w%m*A%J3*F$<%(wRx zu3SEYVd!02xv01>eN_0fK6VUaN)e7tU9`mx`y_ZN8jqz7MQmLCV@E>v(!{1-PpPhh zFwc8Rkw?1K;+0h^4Oo`tqX_kyDe?ZtW8o{Fm4Aqm@U0wo`yaHdj}O~>)!q{5mLa_> z5M(|@Mzh02vOE1!S6)s(x3(8uxTmwcIO!V3WP6?X=TdWXk~&-qZq=#}*^_wY2U;0A z>$8H0`&)=&+QSmIo zeRp}Nnkh;4<%Mb568=~=F0NnR28OD?yxb`_J9STahJbZE@!ChNSE|#?kFZ*K_+Q2A zjv4NC6Nq;{l$@M*PiY}pw}$o~|Fmrni{%-ZnN>+A*|o5#i2Y+uqa?;&N`1jR6S98_ zg?8*T<+>^W*I(#od8>u3Ir?uCrDhy0a< zCPLXhOuV?mdp9YWSsVFuKt)1Mm&ukc7>Vd*ethI_VGUe!Di}Ad7j|hnkELJA*q9fG z7ZG?+o2I`mBsH5;8K{`AxGBp^=*z3=>px`G2G%y>zGB+=xhPUp-a<9nE4lyUXLcd9 z*q4*T^Q}T6MHEOpgU#dOL?ZqN26wFkqWW8xk8L&IT8jjglf1P0s0u#{#Sb&&Dqp+f zZ{fVkdiQ?roqjN5f1*lMke{pWk57U19)5RM&GaJK<@K17uio6Dbm~@e8LW0NIp5X~ z&fL{PPKIkf*HkV3k)JgER(bjdDV>g&`{k^>j6_&K1y%?85CfeUsdsUi&0)&B)ju;M zVa9-~5^OeI@9y0$s_P_Xq^0R=Pt1qD{=7J&s~4Y14k(F>eCr(=$IYk6GIEqVZTaHhg{Q{q~Y<71S(NBEb z#Q2W52=#yB-kgZ!J3pjW&9=TBIu3y3v@kCTTI#$Ud^@|~D@r=yAbI6H9QQt6 z*?Vx9rvAlItfQU-Nxi5@BWt`FGwFKmqllvh&hEx7wClDn>(oa@095-Hwpd9i<*sf-K_C)BW4UepK&}a`Q6>tT4?R*%>3BsK0`*4qV~M9 z)*Bbo!-073w5*xB>6^PDtsr-JWsThiR$q3K!s+9{CC9b z5x3srwIS|ejoZVej||{0bbI`2<&FE}rv(BG^eLn3^zE`ix0r-dn-c&1%hP}ugucE1 z$VaZV##MKUy&tYuSMHSGdlnhv8_WmWL&N~b%BH*y@Iy)W(UfiO_G`ZC@4hcnbC&PW zQhvb<`;7Q6M?N{uEpI3y<6>c9Z?74X*&)Y3kbJt<)l1BNMTq-ajEsSXybk3`kZ^Mj zp^xr=4`hPVR4|*Y|5;+#^VzEZ&pw&*&eW8bBU@KiR0Ka^&~DtE@2g2kNx8UioVxI6 zXlSqzbGX>pFyc(k&&%E~E85vHk1VgQD&0rz{+X+0bJ5o)E{Y2cg-gA8CR!Qu;1>`u zHZQ^9;GlxzkcfZ970hSsJ2Ij|qhM=o-J?)lS{fkE&)?yt=Afsim$N@JH`hB-QwV;y z?qIz0`}c3*DH|&*a^EAk#TIupi8B?1dqi`7oFxGvaW#oIscGcKsjuT{efwWBqjU7? zc8#DYLUEX;6YK<}dGT;@o+(cT;Y!{|eHx#xVPNOLhCo+9HZ25$&mlZqu2iY7r>29m z$1T))U1hO2+Q+g*Ev@M~$IfpLO4>8h$5rnZuyad|`OjfST*Z~U>Pg7kecjZ>mG@Sr zgJTvgl>MFAI3(1%TuV}1`R_@cgW&oK;pL$?p3IfWT-6nn{bN;3k7M4-qDN37}^ zX zFaxy8b`~3KQmL=b&jmft3hU}PXwaRUfGTIKDZkG9_sjcJ#TOSB?DCI0KQpjtm4KRV z9E+!u6Z;4gBV*+LECd2kG4=Kqw2B_LwiwG!N=)o41oB?4zo+S0Sy|r#p&hPokP8MT zw4#KNPoE^Bi;9X;&keqLK<$gUBow72B)p`iqo+5v1_#VPolj0q%1#AkL+uNBUt_2h zL`S0*;$vX=m#V6(p95Kf)Y+f~GNEg1g&@(daU|+u_ffQ5T)5WLw|k zr)&XtG(7AC5>D7c_@CnGB==AEH-7JKZ6BE-?TLA({R^klazC|jc6@}CcvF&$Thn(L(Jh1PF zhlh(P`}^RXa%6XPiOP;GE)vvrkcjxAD584FO@apzg^V+^|B;uHf}I>58R+gS!8D8>52$`fR{mRG5;J zqnu814l~aQzCW9imq%WRkBu#nPBQXxUkqGxcXJbZzoo6sbTUXEPvMi0FnFr4Fa&MG z(T-8`kZYn5(OlS`g*qB=oXD%u`F|M0V%|T5Xw#XJD8d~T% z6B84dMuLO(f3E`CLX0PaI-vHuo1RBUj4$<^iJ6&PXfFV~SGB+W3nN|hWN7e>Xpm~^ zj6-sH0V?MDkO^5bDnM#tLt@givZ5DgyNaM}pb|(b=;t$ym6aA%^F#+xXIYCbj2vFv z3C(jDxy9EE2buwFYDb>rc=M*`KpRk*k+lZ~LjN1ilx}k%H~VaV-QcmjN63nD?R{bNb$2WWJz93rXCoJut7nwBF4FtV@{Ud@>Uz4o;u#uNt+`^YyM!)o0#1CWCLUrp9Yx{ees;9nlz>P*h7#49Q#nN}MS& zF#93r`T6-^u9M*Ri3L3{60*z67z*JDQHrFYpaPN@wM=x;&4b_qM7lW@J5hp!V>tMuEz zjWPUp#35rjB4GB*2`786-)d^k0+uD?u}QCL_&>vAW3lzeVp71@eR|7I2|8OUg#JSK0TD}~C~XO4?XM=V&jwVZn|C3K!M~4?+2uPE_z-bz!n$igG$68 zHGilS(I3&V}_s z4&=K)g9m}85#lwHAZWJ(ruyu6_7KOHHsS!x0gi|`lK%I30F>O1mb>do z?(;*>^9u@|FI5A%u--!iuAxuPW3|HvIRoweIQ66IhWomi9>$|Sa8SQ!mD3Duw~KF3<`;H83O+U zbkfT>l6rFUrf+7$cyvU!=%*Yc*VJ(P0fFE$T=QNju+A^30aBa`&C7QwVcjAY(N-9C zk_>@`(~hQ#2qCkAj33lU0Hm=Ja@3SXpT@3R12|c~`U#!%8ENlCAx6>QJ=S4~xyZ?( zrocpLBZKG62svk4MTZrK089gL(hhZSB|68pf4EQwuL%W4SHFh3CsslMLiS-?mE9sF zm$qvt1lWS}#yormAJI2i0fvLtynTov{_Nyr;5nerV9($?sLjnI6MDxL{5(`{5q#F? zpP&p>Lf5ow{!;7PzVMyxg?j1sxljn*-^|U;jYdD@1rr}md}d}8JQZ_K6Knv)9RHjv9fc?+#pZH6* z4VbD(6l~*DcCH>ggneVTj&bZV#l$KSfk|7ed$3o z_@65f>7GV7gW5Naafy2|9mb~$)rGE^`uei?pJVFy#3z6tswN7SUEzNB6FE;*98^sU zFy<|i1d@Z(JleP;Bq(j4IFg+%^sZP&S@f8SXFp(!rvD<37Vxbl6v69=LB_G!p1s`p z+1VKzR}Twv5pcBNTLF0{)-YS2&PAdAZ%u;plki|!=={6+v-O&XN)Q)94LE~XR3C37 z_EZWWxq-AaoVgCLDl(yw0GQ|HE&|6k46Oz}vLHjC)d8wZkpb5lM{=*!e-5)(^1t_o zT)}8t|G&I&A^a#37%iP~V6yYT1Ix?F1)L8J4YB%VyoJq31?9{`+2R zu;~Q1h+r*&o6*68U+~MKKd-HmPvfXBz|`iD3u#mQLoEW}bQj?#gDT+?NNjc3IO>`I zg->|E@1_S9g&wAG3ejOW2a%Gep68;2IEfVeBeNz7e0q+Wrobl_RhscHr;IIABKQ!+-4TBC4je^L|#PM2* zJ~NQ~`^&3~|DnkE_wReN|9^jyM9&fSVPIf*tE?Og;%h|2)3Y;YCnqOI$C`$Q20#?j z2+p2HsVc+{}5D7Heue5^T2E-;6 zfOPia_Y4RTgQ-B!&_6cT>2|mXq6Y*ZdI5Y?BZy6~u&}BP+9xL_pw^;eVnR*f;-z3V zt6~0^Yemf;aeI4vVPWCN@bFse$3N4hY69*@Fi${ll7N_)7@6=cY5~ZyR%(BRJOGN? zCL}~;B+>n$`rm0J(#a2N3g8%sw!JsoDAU%fr*t%G!Fh#bviI9DBVW*9YjSX7M^vpfWQf zGv1x+xZGT8RqJ+|0p&KrMkK(8_RJzm?1d&eDh?lWyzGupRI%D@cQe!wY`Z|Kke`=l zW^NuD0=u@-;d2LaJ+IoIqoa?v7mQyaFwpEn|4Ys3N8m{57z2{Gva&Le+w}MMvl@1w zoc>@asmjgm>h9k7kw6RFB1r7m2Z@-8g@6>MJnZbjyvxb-_dkI1=Pl!Y$t!J}jJ8*c zo*6B0OCBQu*8#y|6DbsSWOhl@9goeif!h(1l29 z3YjlaY}*kQI(#io_8QJE?#7hE4b~c-2HylUB(!vzwfQ|gg4~gifIwPCrgw-Sb%FvT z3={f4`jvoJI9*g-_rQR>eS8iN52K@^fFPq`hxZNOrLD;XT*3O^JWdZ5>cEs?8B0;#mn@Rl` z1j2Spjf#LD8?fb4QkvGxLIcWdlmE(p2z_09PGS}o79T%;j1y!N5fkg?vYGV{2r%w4 z0Ld}pGD2|B=d=)0z~Wyjh=EPxR9oZ3#`b1na-vu3#RxDwxW8jr0`4Hv296L=BX@v> z4W3#|1+a0Mn3&{N8467x2uQL`s}(CT>$fHXcuMeEEF$(|kr#}gd-L@&0zr{gna;k) zaw=Eyy>XDAcn2_nTQB%<+G_U$KHfANBmgT+1eRZcY3nvAP0XOK?wJmb4SmH$`RnE5szk`FGFN|cU<5K|J+C5Ht z1_p{-T3%;KN|H$fxs-q0!TC@Aq=`vMo*o{SmX?AZC+lF~*8>6V?AFc>9BgLak9Lo= z7khKr60|WnZQdKVD;eltj^uubB&+)5!41@O(;(Q$8#*$HSI0@ML%9zbJm zO6TOiN!0vL zF!E;8QLaoYqMez6SnZ~@fC27qi-*kmE9}S5d>Z6dq|20(%tF1n7@o{(7z2OYbBOU< zP_|jIwR`*7KZk@Q#m1svx`IrU3plv&@O>Y7B5xE##1=q!2AmGiahkPVbUP0DmS!`a zBOo(Bh9JwpS##lg{niTLwkt{$09+xbO(0Dx!pwXa_$2@ZzhApvgqp}D40?=Si8`?C*~fp6K-^GUQJDO=9?kwRv2GF^G|Lvn;Z{Gwfxk#SJdK3C%3xX`uG^Yz*3j?3a5j2hY4p_~=2R-h^$ibKd zV-pco6ciLZIyj#FlAG6{;35L&Z!yD!>FMbw=H@^F=d4IE!-&w>#CY4VZX;@**~*Mq zZCA82I8X%g@_i0wx`Fv2ZeF1U`RR2MAiqSu(Hx)PaCCA)L_!kwx*QoA`VG7vkY6%4 z+v0V4jHpWTlAImrGZs>t7a)3x4F+4-7>l;=tBc8Tlx>ij{HH5db3bSp)tT{~)Il>% zH?%lsIk_JoknxRaktUNyIProLv|6mUs+qdRC!?eU@?#@kM|k7G>bfwGazYv+SLTTC{9y#4GhRg$hSE^>;Uj! z66!JW+fwzDz&6rn#o(X10t)s04Wir5kPz5ZYFo6(6vX#!cO!${JPb_)lrWSWwm$hF2r&O(^7-93 z!PnwVQPc}NK?mWuvoKjwef-aUI7(vS@XBHS#LwZdyLiDn3`$baiVp;NpxFn(vZA7; z72xM*pa)daPFw5UL48^|6O*;IP9y1UqDF@_-9Dd`-#0@ZR{zRWCw)nB{l%rY`g#G7 z#kaekHJu^3qbN8c%DExSe?(UP8#ZzhHFOs7<1GB=X~d{As-hD@Slwj=n+xij+N+5M zXDyD#1PvZviaVwIiQ&I<_N4qDeKW=%am(kFH`2r;@?Xp`y-APPY0!+-3`TC9~)w z=}OBei?|#GMlXdZ88p+J{%T0fZ;S|N$ZL;nH>_}Yzq+1%8o^wGuEcy1Cfjrx16&BC z-0pV$%Vx+QcQ;bO0FLig93@tk-6|j|WTc|%>*?v~8mxbs*TMLy3N$HZS#L*Obb!Fe zb^e4;k{f4eatjCTm3sF6)P$K?-{3O^>N5f6Jsw~~-q_dxfR@6dzxd6QqZyx>_~BcK zeB-e&nX|^(1|w)~7`xwmIw&8zjRMN^arNk6iUE_pLxaL`N1O$=EUI%VpfmaVXH@?)kV{x-ut@>X6CJH+WMpJ+PII6- zL2ceA`@Ea^kC8JDrLR8FB<+Pt&B=Kh&y(2P+ys&I>gp;G6>zkr zvFAhh*9d9!Tt>J>mNj;DJqIHKBGd1ZpW{8k8BIs*}dTRswTC zTsH=^H9%Hv&|p0S@EZv51_lPEXJkxloA-@_Zp{B4Ldw`U4}=<{SpsQkX)Q$xOFk#p zfRJ^^p1fc?fc1&z9vjKEav2Q#B1QAqDC^F#$0|~U2o88EtqOyuoh-32l`>#H{(a)| zu`1L;!@%=_mELMU9D|s^yRg8=ZNKaRP#9>EY;Kxd`$#S4V}yyIgTx33rUYDzaB^~L z>*{6;dSbQy?ECQ^BxN0{9Ii-ji4M`(k2pX!;W%c&B3g{1dd~#Hw~bFQ_e;BEPS^Sy zggF*F1m|xxj(>%eBi|%XO-_K4MK-UKxSSk44GmnoGAuigf+BkL9E6Zg&ddOl0J&g? zQ8zR{Bow4PUvL@#!|Q~P2c60}hxe9q!${`yOV*q6Fy6uN%7P-d1W;c4cF1#s2Mi+A>82E@@)3206Our)m;BFrAus`}0{B+Lv+*n2?P65z>ZCr{gcC4>M0AH= z_rP&9tq;e?nvY};^!K~1cR>LTSCF1#@QA>eoPh3#e+nUMs+yW^`;*lC{D3eL)_~43 zq%coLv_O>o8cHuYzUn(jGk{VEzz}QTy;)c&;fVhS^-rVI(+NO@=%mMtajqR|r>}(AV_AymfU`JO`f_JZna`QNffZ^i$3928U*fpVdff4ka z%%37#&l|jUC?NB+zH0sAQl#|3EWfzG)W|%8({8o)qHSzhvtGH(o|Kx919*tg%gak( zFJN1eNYGQ5ses63Ts#2!J4UdVo3mo5&U|Vmy!?_ec>8jD#%eO*gMe{1O}{1g`?2|1jJigj7&_Dv}l-} z&kJk9kn5g^xy}=d+GAt>>xXFtqD&xBk|_lAixxPFmA5}w6cUKIF~QCm{5&5cE-*5c zwhrrZJX+=hDkP@^)%ky|*#;PJ=u}X2fNGW9;@6Cbh{egtT0kM(>_dU^5*EzIUNM0i zUg@zB4XtTr-Qn_z)=>Iwzj?AIH;|*4)x<06honcmYl=Z~R1~&0xcG=8I89PY9+=qL z%1=Tzxt)lt-B7=KA z-vx$&X$I5eOFBaZQL6smFIO5{F@Q1>FCTh9L3XC5uE6v|hky^*BOiz*b+ECa^p(C} z^6>JSnw+d!s28I6L%BmY^CUYs`tclPdaAWEG%_F}FpwzFIlqhgy9E;eMN!;9!QWg5A@U{}Q|n zq?%4`pgPC37igKZYjVrVBAUa2-jA%keH8#<5)yzgG&Ds`4r@J6U?i=tc(5ZLxN7W)8oTHZYJ-iz=7nXq{BC;l9196>*| z$Lg}z3roBa-mlA)8Ehrlsx+saRRQT z-l?ge-9`|Zbs<{VlMD=${0og5_2nY3Q z6q|KeEWbm-ANlKJ0yI%nmnqI8Bt&gCxRH)lgTx#U-yi9e zyZL5xc+KsYO{J`&S%W3=YG2eZL?|px?}RK$5snLIPXe{TC#ycN+%z&B`;AX^PD0&a zN%+A1#u%l@*snueUzWwwUyLlVu3YK>zBb({I%2{Xw^3nMh(}M~ zabv1fp>9wS*CW49^j{j>a%Zn>d^vu#OGr2+G%-BR$WdFvF3h2!wX<`pGxBSzKjyWF z@^xipazk-|&0AY?_NCy=_k=dXbIwCohGlR6G%r!B4`4e;R{)FSsXt^{BkxE5{gbi2 z>5bQ^^!67yS0nqDPfdnnC~%|U0XJM?-NHl1(H~JS`5=I?@kmP{YP!{(7=F%j>f};{CC>7=B>UrpF{e7n{1k{3Ai>@R#xkKE4n(z zfBn2sr{!=wyhpb{3%@Hds))(HvB=M_Y``Fx=vRHHhq3aoyE5?#?{!&0`BmrT=*#S~ zOZ0VpuVWReiR4fUN^%@>-N|?KQku0Emm~`dl;nDstA7`97dQtE>`ouYTCN!Aj}0B< zFT6!Mm2)z^MqRR9j#_rT$2b%#vo7quRotr`YhF_1MHvedkBj$K6c#rhvB zTiXvNCgNnmF-`DkCZIQ{M9~066cx%(0Mwe}2v(Jbo>Z;pb?xXznemq|thUcSn0?$= zv}t&N4$Kt1Z}Yh3gJs=ct{SN-b>)a#qjZ%V?V1z9JqyjIQ#PfK!S$FgAtahTQ@o3n zClaEr#8RI{YKp2TW2u>Gr=pX%!b&bJHFS$KWwb|uJr_#aNGw$w1h7>fUqYt`b%60J{ zx3bmBx(EBT{XTxLqxh!xYH!~PI7gPe!?=g~-xA@rV@8muF|(E+0Z+87X%}wpxg;E{ zRrk$SG0TtSJ4Sg^8HLr3VeVrF)B7`jOUnM%&Ef2>v|3ZYoEja*Q~lb{;Oun`tKzx7 zn^fm2XIJV!jq&aVVa(;--Ayc^cE2#4o+zq~UIug~3R>jR0)0`1`F>Hkn{#Xz`F{B* zNlPsI=-k9>!Xt?8`wu(}4?@4$_hCzmhHBs2GMzJOH)WwKL9UZRrsOFzy^rJK)rdrV zya@kY7U!mAGDL3awvQG3@})bgsT)PR5F zM=>fxB9uJdUOzi8@BGvhB^6b0bhO4JZgSR(}=}`Ke;f|ad&6fa+0l}KuM9AB`Vv&vHn3J7t z7)~$NPcyi}zY%7Zl;tJgHtGG~-hG{!ki9|l6v_1AdXKnb+2=mBEc$cwwlSKn_x0e3 zW%GyF%x}&UUElk}qdx__^8NF@XtgvUX^lT(XhTv?)BoEz~;1bEa-rgi?xgsm=9FBqY8@ zM(ZF7X7AK^u`aT)yDI~5>Pb*#8SIg652vJ#y?7eRUn02dZ}Y@&6g(pICn`o*w8R2EB*bd@knSBXAL5#1(F}J>{Ri z!Ov(7yyucsu@}m7c|5>GGe;sKg0p%Wo5qOAOMH=J$9ZCOHjL3{qYNuU<(b1@W|BUw z$zIG|(tE_iYgBa0qodm^*X}o8`B;*_nR11vF5`HEH}$zI=FRj3b8F?sVAh}asZksD zaW%tAew><1@x{&9E|-pJ=r%Ib`0i1Z$cG-5Du@)aH+BRODu}aWTcgJYa$(USox_~@ zPA}ruha^{+ae}qyIWuotU@INnDL#}Z+Qc-&r(9`Wp#)nU^rsTBl!sZp+YI`g zKFUuFr7J#-7IxJ_BlIdz9;Pk*Jx+6SZ~EIB{iga{T>hr5g_t+4aY&S5b3~^xzWyw6 zgwb3$xjjTI90SH&c1;*+Kh@e6@}|mA}-`NrEmsXp7;vf;?hMNZn|N%9XGe?*}>I$LrL3 zHmtFX1Hs%5oAX}G+?AfcB&42>u>!uPoil2ILN~}Z0>!9atjU6657=j_saYpoA4*~b zZK~)jWm2`;T$!E^2d97iGN?Qu^Bp75}Dp&nI2Lqms#hYRxafm{#B z#9~Q=hQ`LmMn^lWXBgfgyg5sXD%Y8FSgbb_U|}IaW!AUX-~DG`wAQjeOZO~|%qbXa zl3&HD?hIS=>f9k%xp`0`vUvaPDOzBLXkS*ioPN61arM?gZCg5wt)>j+mQ;!4pEa8m zq6b|kd2R5Tq%ibXs2`jJs5lFp;O<|&?9Ja{XBn{}EH9IRgOT!}5@HG{fAi^ORgD-o zqEAdNpRMYo)(nS^+EGvJgn+kwU4Oh=JprE8NhMX_2iQpKY7Nvx-Zuj#_`0YmpOFO` za<%m831XxzT}Uh}3bV$?pTO8jaAxzx zj>6{A#Gp?Ee}b}Ip;p(IoN+YXs3UL|QAh1JV`C`FqY_3U#pusHiE;$h4>Xkiv%UHJ zxhdg~c+y)@FznW0Qa|ldQ>OSeXHF!adzqr4^Trl2a z`^4CjazrJRsV4F$7xkr;$WRe!7g3DdPV*5(m@gJy=oe2Mrl@C6K8|ygb#Uz0syba4 zR%z6)bXQhX^(pSCySwJi@O>dk!a++mzVQd|VtekCCDDe~u`#rDbih_D^j{9R7mti_ z2*4z;v9QEN;=pF6gfh$sy#Lccpf#`1f?7h3+Wh<-(qWLpot|}hdHH9TTu#}PColJ|mw1A0xLv-WQLo(OW9xDNKl!aiac>-%w@3|k}Iy!EZ1KYp7i z#MIkwoY+9`8B@k`|H@Fa_tN!7@q=RkPM~XK#n;x)mC2>iXNUnGZRXyvS8q)WN#ip8 zRF!xiGfbazW&c%#Oc$*R0U_zA+`=Ggrc+dnDDwHtJ&+m&Z|u(TK)5e34&OijODRG z1gL!SQm6GptB$j=8-7a71&?c7BJ?OmjDBy|AV#-UB5rqWLrm(qzu4H@|0vK#qW?Ok zVY6OBgVo!a1pUTfF>pboGShCGfZ@EyI^6fG8=RxA_Hpu;=Uq zDH_2I@y?Hz_i5jJ?iy=rLE-TNbQX_Kg@A^E_y}`&{OVs52`SUAQMSv=f%(UUIMgZ2 ztd`#IA4;JYo8d7qF^_+(CK#^iUm5NW1#LyHsI>CG9}CMw+t1eM4UcFY&94fN-3}y1 z&m41#E6~NJkZl^eolfyxv2~4J~=SO6M7TK*| z@s)=TBX}?osD9!Gw?%uEn2()_-z!(;oT?eyTpnyEUpJXajkbbNlKljI!rr>1(6`Ti zXZW!jcj1%n-vR=bUi?{R2=uW{3;`AbA8o;~j$g08!4H%QeXTcdjE#-;fk2t1B?4ri z_r3v&erCbLW!qepk-E}xnS;H z`v(TV83eEi0Jg?+@g)g#AuPfHYDh!U+&n&%YVk;ahIMbB^skYQMKA?i6*LXL?mZ@W zlKSX-tD=&|?k>NXh&h(xdJKa3V-;sA>hbp^6FNJ5)AHxfUvysDp%OnWHa`(%3aAW2 zTfd@dfvyaVdu5i-!(%+!XZR4^#MDQ zFi9}0IJECh=$)T37n9etgURvNbTel9*L){fc#yueUiq+++3={3GQXx1){hVH~A1*0L%bMIFoOZ0GP|Apo$N2b8 z^T)L@Ln0&e7)N}*b(#}*CYp?UTUvSOJLFPhedS|2A!CTEh@&`-xtrBPl=9iV4hmfP zk+>eu`4?VOhD3P#Z1e92euo;>zlEvlciwXep6Hbs8~9vtNyvi|C;x3y#P8OW>iue+ zrq9XAO8WZk4Gk9$UMG@J-i+VBe+L~lV1xYoJ*qYga>>R>q|A>h$WR1>^s1+Rb+N^I zw@snD97DqddZzf&r>50Y5*=g<>-cp1&k^Hn=sAIz42b{OE zl(N`s(VY^OWdq7`rQ+;0KGI90t3b9jSqSfzATXS=srjc~TI zD*<#uq}5Bk1Xa`V|D;drAU^RC6pNIHQoZAO)JN44w?>$Ao>gxhRqeH|Czzf6p1B&z za4SB3*m+Y^XbPQX9JrtNElPX6fM7I>uKJO1dK|AHR8fp{kzllJL(n%jfs(F? zL2AvW$Mt)qUDYYV$gmx8$m6{vD)FNfs+XNeJ=cfc5@S2lh#nUH<{-9Z)Gvq)hmswP zMRb^c?fLSrTd5p4M;$d$<){?c9;7-jl5Px4Dt?u);%7YOYK_p6Jb5bMNLt$!huqzh z(R~;hQu%S|swc;hf2lm^%uNC7^B`(H7^^Cg|8UfqLLiPsN|f{Qc2k2um*0H$VzJ2W z=3<3fvE413T!7beaPY@iCZC#y1}q#MoHr%dVS&+_n5#A7N%S)}Ko{BHC-#YS%iHgO zxzeLPY!9L=Ff{s5Qq4U4LtuCNPNW9T=y!Kzg23E(wf1mz2LB0MRDT^zYIe4gyu5(V zEhj4KPX_bksTB2Z_Ov*werJqdFo+NNkPi0Z1qXFYU=9g{-MIFC50AWUoK9X6ml<&c ztd2#(PWcsd(o(uR6%L9{cUx@w8`~vNuqhiAzh>k{{k`gXmNbJI*$ajvYf}3T>W=$@ z3GNcr-J)$mn<)OOEVE?DQ2W>w?jyPk1OX=UN6Qs@{^BsN8z$ z^^%i@pzd|sU~tkN#MKaJ;x9Ux79v@cYs!%ex( zsmp9F60mM;QF==h_Kt z59J(U)2d+aKr*;K|SZLslV8o7y+RcHm5{d(|-a>*~h*x^G%+KcQm zOS-I(_(<|(o5iQPr0ymW6GX5ma@6?OeQB6%*x^TKkKD_{9LwlLb63W1FuHXGk|8#H zkf&=eVyt8Idlu`|5L>Q#=Z%H7*tjrD+A9)dtr?gu9w&um=CTrIyB_*_Q_Kl2ZK?DsE_5TVYGxh3{7i}iw7({kUy`;RI!R#{ds0OmS+9#}hB(`>&5eV`{l%wo{Qkz`M}%*B zY^|JA^<^Y3vr)%9k(k21?$#=7(TrEMgd@m+ycc4#fm!=FJ9c;-HN+y+}+(Bio3fMm*Vbh z+}+*v%zHk5bIt6@B$?#NTKDo87B;_e7FFR$p;4qrB-@XMW(@UkeZMl z^vo0&xXx9rq8P`2_EhgnC(ByNsV!oOn}i~zf6gIhoRN&qS`etsGb?T^}O%t_H zIFCLYwKbD!m&$1S>U!zhLV$c}`dQ^WnxB{903~QsbiG zu#_^#z{UoicorI{h_o>#pLhAux5~;|+lSZb7hxqVqN=T3lUzDWQK|9<(qg!ro8GeBt*L2F}uhLk!Lrrv9O}Evw!|o8LtQwTNrjMP?O=$L4Rp%4kT9&DG+T8^f0NUPsX#^+Ey~0+~WH}?`CxK$N zwQlG0BNx;nGM+QsWTD)x`crC_AY zHz4JyehkeC>uL(Am@6nqfFZ|52;JE33rbrnnoA1mItz<2CGli^Zox3kb*-DX-6IGL*?3%y8SHDQR zUaeKn2Fdu?Vr9;#+|U({_O1%?$uf5~x;X!u78LAzdpesdlQWebtM3UeKhR%oH8{?n z5OUw0UMNJ@ekt&fQBjjP|L&%&D7z7G_l=6~ z*@7g=i!ZMBz~u3WaBe%gO0Li|HSKD%n-*xK9z?=do4OgX%Ect3C?62(chq8U)yv=WfcddtRPB!URI2wqMd>DVI-aQ>@0$^ z%TAK!WV;wygR}hSl2Of4wJR<75?l{94NKC-V9S9-8GB%Z zCL$~h3kyqMcJzrJ>Izp_tfli&R1yLrT-)87MWz#4QVjxE>RlHmk&oSfr(OuvUNZf; zv#h48)k0cjFfsJcE?^bX%VYkuc~B$Hl40$FLekw&Y`9M5x|tKW>-nQk#BF|N^fk3U zx4pcjHsU*`g(_k$S@mqv9hnYUH3Pp!(akfa@+?obx6U;=6Z8fajI+{`_XQ$;(+7(?PYGt%B3Ycx zmpstiNy{2ZO!^%4Kp3#vpKz+x zS7>&6x_NkT9hM<{dNspP>^(ZTTcR3%;En&jSN5ZixI_;fD?L#CS4dy8{7wDyRf#N` z56(tTFkz$XO9e@f>Rsr+^7$0)4&mLbr%lJqXt2y7ac?CmEH` z+C1B@p|$C0F7A#hBxdXJ+gbipblbm-pW8%DE-WH(JC8h8AKb6Ksji=PXVW~gIK`TJ z-wcog1t(~$*WdiA=l1vaA0Ho|F(y>-NpA?Si1>XUaeMwPC;bUj1~iP329W=2*oBAh zSn|w-KePDiLYVs6K(m>)Y~P`tYFC78HnG!ninRrtgu0h<;zAUAA!Hmig`k596vV$2 zAJaDLvk1o$B84>m!46aNT@XXQT=^A=u=>DT7mm#NchMq|SuZZbZ5peLu2HFnyE-4J z?U{W)_h1Tf8TS@bCrT3DtRGq|S|xO4xw|%3(Gs_x?bGXX`toqS&Nf?}y|CWWkc5R9 za_(w2dGZDH>SKX`fI!6ZtI`ECzi2AFRA_k%zEi`n0pi~WR|9UJJ# z7>m5MLj?Oi)NMiDI=UKDAvo?OS_5Em&X>w-qdgt;LEL6=^Y)A@{`Z~d_7qgjdN$Y? zOFH?H`=&feS#@JIue79_t=<>iE2VMYSy)}^n18x^JH?a)>LC)~m^naUAAqMu4s15o zU6#e9_=oB~%HmoIlL~`^3X>}H(`{Tetmw;jZI9KndQ;aB`q+O9OScauIZ4x=Wwh^r zpf_ZMGmSA7>)<4RmQaSNu8EA+$uT!tW&M691_Wzp$jBLL^HAs`MEtLrb}u>vU&#I@ zCeB@6GVFK$_q{{-TBcuDR#Q>+Hz1fjVvRTfJ?A*4*d4fOUBCb4UMpsQ-NF!^I1Y z;P@O}22ZuYp*=)4!tu zW|tqt#7o`%UqycbZ}K9Niz?{5@*JRY0n{%4KZ4lrh{&o;9IrYgO>|iM$w+iR!gvz9 zzAEi>;9F&Sp&fFZugmdXm|1ynzv9rLXJFIX#p=jO4YnF{t%fHU4&GO$s~<_&*3=xy zirsZ}53P@-#}(t#(pvFACe$mPj!SH25m8In8{@=8Kl`PZ*p**h$Da2K_;$d3didAd9Jz0_ffyMA?Zs$pd8 zp+WjCAM@e1&gET(4Nd){C`gCnP!n>(1*1XHz>r6GcNkGjZZkSL1d++kHiO-6Pxk&j&G zSJPynCvZB=Y+8HH4lPJ`u_~x49PS&H7a7{CtUQ)Q2Y7Tl7(KK*d0XEi7!^=xX#Ob| zeDV`yBdV}7tIZd3aU7r^U((Rf`v#yG3%oDeqJJW09%EI~TyKZuDB+_&=sjL!9G$0x zB!|%gI>6lAt@!g`rkic`GyJz+!S;+c_Mir|>?og$;_bQoDH{KDHO7j)HjEXHXB;vU zW_pcTEpRk^IL%Ue=5mMr+q@S%wU~3I7yy#-Q$I@ogKc5ucKwXMI!7=e_$MA27cH?> z)bVA=MI688_P0DDpP_pR?}UUVV7MI>ZL%2LZx0=doLhIYgQH2$q{q5ck#-5u)hjl{ z=(1x&ddey6$j|>>lf|Zd?4gRNTRm9BZn4YZ5&WRqTlC3ImR!>Q`Ajkni$2{u z%iLa%7O3x6tE=bU*fpa6ii8joUYPZ%n`vTTo<}aTN_e#V?!}-a<9Fav9jw%aI?akO zcNt>@>?fF2_w3RFFbKc;@yDJySi7@`shf~%?%YLZH;kVJ!E8f%lG$V@i>|lp8*SOs zP@c*f87a@Ll?+{O`qu2W9@zYQ${1a1b4mU4N0o_5oqeMWM40uk`#5UwYeK_>g=3cn z7D5!e_taEHmr4j-KSeEJyVnp|uaXmQN@4zK6vl`kYWkpmS-2wR_`lmOw-9 zB~Lzd6p*GCZ`A7Gh6R*OReQjW!ASO1bNOBxfF(y&|*GTvklKk=t-85sq^;)?`)t_IFGvu+>u zml7+z&p1XZ=ygT4E#*F_Ym_JqDS#!rxwTa=1}6Cq*M{BoZBJW~dUVF-uq>Os*cnVr zyElEfFmJCaT3$`sJE$QDVox@plf2UfGxUo3r@=l9mJmCZF^}C<4GR=6^Rl!&l^>t` z$n+g7oNn)9BJ)tX2uLifPO~wQ0g9o<7q+Y-x!J`uU}pf(3xFeqKP%medi&$H5533N z@iQaO_kJlC)796&dBt4B`veRNI$3|=5DX7#Hn+e=)IB$Ua*7HYPr575dL=IZEfiTKNTH4shkfQFeo9zsRWva$=viIO z09@ndh~@D``WkMt*$R}`8+omJcCu#);Z+~@(A4=X$r(5rW67_yj4Xk9t?0bqJ#Dlu z5w^tU_G6(s(?-h;5djey8KKecevjJ*|EiV%8)ZoyZ8fV&oBhyXvM4|!Ff%n}WNZvT zp1wKIKZ3{FPpRQue*LOz+cPY+mpspCbrSApZVo+nV97{5rHs6_M+%J$gLiIr-v6^I zqWzE<(rfnF7Jh@X)9-z<#2yN*%1)W7PEcEkiqd>2$r+^%=rjFW=z+ z1sR)%%|)oX*aToq8CXu(R}l?}5ZTU$mxGQrnp1y!bse5t9`p7?CLGQUYSsl*{8G8Y z^9=B^a$i|0S;UNk_12bCBmI=tmF&PuBtc$n$@r>Yz6 zGkq75fW8dik2v1wb`Le!gO4w8VGoxFyQwHC|71bi&`6~L1fz5|8^A3Ke3M6phCKrN zl6l;DFVwGZ@*7)@tPNyswN#O*1?{=Y2>$l3M%CEcvf07iwLj#P+|+yb@jxyuVOwYM zk~f~St>#1Av{IWCFIAQatji9GQlNiN!PVtboMp~PN!hZW9i4mA(dwOT7;BGR^7_)R zX3`V`zUP+9Ze#eII{VJe#WmhPKDxNR!264D?{nhNUGS8$Y?f=|?b{d1 zwMBD!OP`n?Ak7Qxr`w z&u9sPUT>LgjNjP2BCWKd@Ln^{zZbJbO$^Vdm2L?eds*3v{|V|y50h%Pk$vGA4N(3H zoTXWKSXxGA)`!GZHaYQ&YMl|e7bk%opB0tmP;SMnnJ0Lag6ZiMNJm#ZBA3(;H5x=H-42TG{MaX>-L{aaV^a7zBSCJ0fA$ zQB*xV+p#b>(Lcw=RGmx4bGeYc+iY@yD>K85oGiaWh+wO+=Wr7UOK_n_zq3EyYrt!J ziq38^+w!C-t$BAjI*K{PQczwX`$57BSPM{()47>deKe))M~~M=a)Xj2I6V%k>xr$u=F%qgq(K`uytjnW}Jie%1?$Bv&$8zT$%wt|#2lhJ0am*`YfQ zv_eAQ2#gclGDLamVqnr6%`=gTH(2+?F*FQd96C)f)mx5lwTBkFNrjlAoI9Apj<9cw$<$G*;M&{ z5;ugU#$}n4woRFfq-YIM^DgU)4dN5ha8NO#xRK&B&+g=TxEaqOlHf{lZJ}nCrP<3# z8S4}4TT`-l!xgTa4KuF8hXk=QRp*)RaXFN8Ug=bv2MKxH!=DAKBDbGwaQU8kXJw)0 zlhOBw=umo&4~Yb@%i?LZBeEV`$@M$Paa+@rl%vk(OvxHt-jaAfl~XNM3jSI7FA zc&ge3rZzsbM=-VreYQ7!rTe^VGrVQ(fzRG)ifIl2qq-RfJDp8Te;;YHCD8L~+rSwvi9dMfMM!9Ydl4(-y21 zo@e*=f6{?T@u8ys@*bad=R8?5Kv#(-x&HgrahxznOdORAXxJGK2N#xzjldf@}C>Ry+O)%7psTd5s=-!aY8H zsr=CsssUFS4BXrNnmHc1hO*Bwx}AXj7pAt<7vqKY5}R$Oh!ljcWQkm1=`3&bI{Ri0 zIrdH3Y!6G1T!;J~o-tKLePTqH4{f|X4Zu&dMb;LHmc%a%R>F@J*}FM5mK+PqH=cwI>lrITWF-Umhf^k#f2%^y671jlNJ#P0`RUdFffdaaFaUv zEIiCzLy37XU1e);!p3)*q!tpk`YRg2_2H&n6~V1R3Bmrbq^mJl5r0G!Y3mB{v;*g7 zvk!~u>)beaS-K2(LT}1ZU7aL9&N-M()85LW%Qnd2?3S*j+_~h50az6(oax zK9;VUf2AVQm)9n(h`WWnqABup*I}L{tvuRhOA-4HgV?24$R;ypUFMjZN7~Y$U@WjP zhB*b+b|aG&8nV2Vg`gPg|CK7GIlBj^upi3byl*mxEvjNP<(K1AjC>`n61U7J(dty9 zrMr5+$#6v@&>%4g?l83hCpz<=NGpT;GglGTU^mxLFPIrrd4{_(Q@>+Q(9nh03GeVr z$WEzuBklsaPCUsQjbM0(cBCr5gRH*XW#SWcT?h(@GqYOI@0Xm#+h6oNDBmy`2hBgy z{Ka@AP^#=brJ<$-1jiJ5myI%(7f!xadDMu=y&AE(H)8R$+>SOKzE(I5!=_|S9i#>q6#;0H8I?A= zpaCAds-yG!Ho7r>f3Xn@oN!N7E$t&iFY}l1JFz!SDNgU9FOklDCj+b`s7wvFG-hT; z5871 zztKNU3jOrD*OvMOdVDue>o->*IDb2K!ST>2_ms6MAn~Pp|2rZPBev0b0ngj=`5D(w zsMh(yJV3KlM1)0~expOz?1@ha2~T8t{=o+H_|J|{bX||dbA!;s?ZlphvW?R%_EKO; z2GKUCe&ykMZEn_al(}Rzh|N)`g{wX;9C;#BQE~DxJ(a$E3$hrpeCWyqYCZI6cMZ?YRUM8@W)EH zHStSyUVP)|2!nPisqm0HdMWtc6Y}g_+WzIW@TV%`N2=R2o`t6o7wzqro~9wWZW5F2 zu3c^3M?()vNEWCu0B`*Ykag6mb+o&@c>w7g4CcmB1T`yxK-kmYt{mjr{M%zX_y$2e zMscke^I&1*Flo-V00<P3+@Vk*flrQ#igNTw02IWeL)w}q~;XcL3~im;mcO(Z5xA1^2w z;MQ|i`Xru)Kz3OFHR{rmiIy`C%qB&Cx5DivcrgxnZPO%ZBo=b|u~$^g%Ac#+=mig! zzq_nr+%4Tl`3vHZGa&pO*IHI=i4DY3;W5!cUO$6pr#WJ#Hs(mcwm*9>m~dG<347La z&ec|sMzt`1jA%+Q-rox#4%XXPTxduHMu{}bwAJLG*JbA-|IM6w>dwPxO%B8fDEhms z45D4Gsl|%c*wr8SR7Q7)a=>MpdIxoP@VmVhncR?YuBMnmTsJcAJEmX#ouHPQ`ht;q z!2tPVYAb@r9T}SuVI9nJ+cygCF0yXbO8-|3*NfcaHcHssj<28}&M8es+N;o+zo6c6 zhG_`guwz6m2*8UO7#v%(sx$A98&+jpVup3p(q}xzvP0E6^~wJ6l8-qJsR=I5PAiHh z#i*$|B$V(A2mquU@JrGbs%~C@D!-`3ZqxNHl_bAATI!q(P6C3qc2~vpbY$nz!O`KK zRSl~T&|^qQuR9q^ay8qT-7^fEU>Db6ceqSr@I|EC>f6@B1*X=!?%btS<~T2y{LvbM8K-I&D!A-khAfKcw~f4J{>%~p$ER4;8zM?Ir> z^mGoB>8;VYyLHRl@Qp)S&Q~p*)HlE67)5A86pWQ}rBHDZ;`k}vR*Qzg0~O{g#0?TM zs_Lhc6jV0&{rxq$=_DAm+-5*RiU;AKm?jExA3&7}qJ*6yxf4JXGrYam^F+~=FaKnv zhE}*G`STD${AcSM-Gh`ukfH-kBGMX&J^;hH2`7SX&=F<*Byr{!l&0mlRq9_`I6{BL zeNh36^?+iLC^8Ut%_6aQ&|e36I&j$BX1?gbmk331($e}o3wC<0{{91N@nL48hf91f zph2gpQfnMmdvkHjymTTZhjCo;!CSSIV)ksj8ezyT&mdYhsjz6rfW2W&JWHUW6n z?9*F`v65OEXp+%{F=T7Vx$nw|7AJ;#jq=^aF=kx?a2ep`XT1Cfv8Vgc(=Hr%Leho@ zrSvVd$Q;y0p`!c;Ya&=eBLBpdg7!`C*I}nveW9>zB`XX*{qHy$?>{ee4#y)qN3xj) zj6h35xDQ0b`4pukDb)rh9Nzcw3ul33Oky0j0e4;LL0nrb%7~iLo$vYE62z@=13t7Arq^it2C-iGh2#J`E3PUWi+R_swH+ za6QK!=8?rS3a6(X*ScIh0{+srp|)AyzISzbn)K2dy6z;zf_MxQ%)s4pIl>Ykb=@T~Tn3Uf=V z6BH;OOVqC*sq|;>M5+k|AqbSoF)^$i2_}>b+ToR-gg4E37L=PzxRTb$KkyFbBF`Mh z@g1Ee>?(B!8{Ef1-aDHw0pRQs;&YQIf5G4C`{(B$rNp_-66o$*MozhB*3^!QfCAE7 z#1?M8-I}t5U}WvA=ml_N^crUR0@hWmsJjv{@{cwn$I6D>inLUM{gttH6T}@H;yrfd zr4_(Ap})|v#=uaV5B(jje+eZ`67$n`LdX|B~C8#nZCBR}FE(9s9T?mNVTq z-&a7izsn)sL#BfKn+EC0DcR+I8NY}kkYeF?aQ#tYV@C{F<^xIpEIr-txRMDleLPyF zOC6{&c>^|wdABnmC#FDqeh=<;WEH;`pSjo-Cct}^7^P^#p<%$yUfpaNPflP`MOY8o zbEar8sCk}M6!Q0~DlOR_;7|`T$ZPMUV!g_$kGN||peWp%bYMow1INnG?VQZja7f#e zflT6!VX2S){ge*dNtwIcNozid)<|c6@)~;@|5O}U1g{*E-8vXx$2wCkEyNbF0<57a3hOl#8pFXS5Bdwd;poP= zZ9Sqs(G}vv@8hj+X{<+UA6CEeKJbPnrZ<~UZ$=i()Bqf5WlKeFw#IYLKBfxY0h?nw zr@NKOhJb`fVMT=}-~}suGT{X~sf9&k!NT~7jOU}u&oMqf--(-wm?)eqrk9q!e9NoB zAsPC%c8ulC!jqQQnVF2mL`IOXomGq!u)9=}+XR0K#xx*$N+XD1Q@Ei%#`1Y7>*AG6 z^59^6zZTMY2+=|CaeEqNyK}bPW5{TgMuP4 zLpPLu9Im?F;g2;z*A(tUoUt}NF;bB?KfMq@g~KJ^vcpPu>~_s?h{x$7jhWXby2&*D zUc+252Ni1U+HQi59}^3vHLTVJuS75iZo{!*1X{#yJp3@bWQ^b3cECn|h?bK<+E7{K z7YdYW6XPp%K#<%q?lD$o`<`L7%u$2G+D0H;N*uR1zV9uLIEX6E(hk2em?LD>Ts|Nr z=HI0?vUBxs#$4l>?V1p5%l>+*F@Sz6ZQ2t8AaS?8UWu@96FDJ##nsffk4z4$yhm)i zzy+Wu0KwVw?il^Q5ikZ2pFyBw0LKTA-sAGipqd>=>B$Q2ApRRme(5c2~n`6vPG9%LR!>u_+>sYDPMEpaVnr8a+Je7M+O(gueUD}dBaePEZEq29szvd1 zgZ8*X+LbG|F=I@+!U`{0)@y%Y;85mmzmb?$RXf$E^+bacDLn>ADgfK;+S-~_lf>j0 zGSlI*0PEp{zf_0FtA9~kP-eLr>}?s-oXVb$_t&yS0-rW}SM`Hk`BpkMNVo#yhIUC= zeMW(Wpg-bUvRHVSHt3FBSP@FwXlhJ1QKK~UQM0Ie>qI$t@JN0)BwJrcVPIP0-mfkH z%q8_maSp(=9V^~`Ih4qu!8aYZTbMf@OvvXE_gTE0qFJLm`s2yQqKscb)e?j;L6`pc zWKq9_@5Yl+Iyfp0e-M2Tu0%=gVvER{$d&zCHQY2?ZNY8aB(S1Cdo!fVRu>caS~GUB zih<*Jtkxr68Z#eqg1_F~?4B)aCaT}KAu<8BXo|B^pfl?WS#B{3qq*G2TFAdRA3-KqR-d$E+3zR34DN^V}Kg}rYN@NpGUI{&F2|2 z^)Wk=-V=YV^-6+ZWnFfZN>ySuJVHaj?P*XQ%3cth(eB}5dsTO_x!Tuhi1u<%yW#>$ zUG}=u@&tUMSJJqQk-aFkJO2ce*#SHtNr9g{uUMThC{+=;1yMv1S zlmlf4oYr!3zMO=Jt%|9Zd z*@Gmf3L7$_Y}rl^zNZ3KvZ?8Tnbp;?9xC>Z_u+`9$@23>GaYV^^Kc+T)LlqTyFEX8 zdx;sddTE`?=X%?C9N!d)uj^k8*A_8>YSDb>5z^*#(TAJbOVHj0dCnXpLn#+@@d8gO z)^XWnlx=-|9RnM4(_I*i`z3;6@A4dfF$YaIsix2pZMZ$NLYM3Qcj`7?xFL#!sq3T9 zMuAcLg&ZvoGcBI?kr31_m+&?Om?29b0qo8k&~ZALiF=NR?XID9uw+j6K5@`p6DPIp z8Rls*6>H^*Ts6P=Fdh5kXV`t6*xuwa*SDp+W=5VlFouIS6OMd4zBum~(sDqosn~x^NB{8W9`K1> z%g~1hBY)i=&6QMTWx_a(@A0va+I*dH!#es`u}bR*;rFmn5tEhg$27-)Zywbx^Xr-b zmQQ}G+?k%!q_A}k1kEZR#0+#9ei>Y83qt{4ju~?g%XfJloe|8 zA*iMF<-|;DX1E-Z7ne&D!7e%JQ{$<(PHHJH9M1N0579ZBoye;UT%wc0=K6Ua_NF45`B7C~{A7;26eDpW z6T$ZBC0n;OYP$B-=PzU+SP`p9VR6rI)xNMOjA0e8dt+tEglp$DIk|(hhv#7qYN`ws z{>}D#|0Nz>%m$l+w%uG-^5-XXTyH^Rb@U7@(+f+`)q$YIk9QFtHBHS)B2ZatDkdpu zH83&pTsL%l(YQBozHbgn_CZrZd*iX9EjgVyBKWjdRqiEw4$Zs&4Y8r5xDqj|>rR*X z>K%MJ8=Wjho7s_X!=Us=3xAs{D-g2$8UOfGL7J{d36P5bu=)Xd>}Xdoz~o|QXP1`t z6^+vURI=6eMZR~#e{5{*Y^5ndNm0ASBvyChW)x?7QbCB@`A&0hQs3$r#FU^H__e_H zu*H$dULhfLZmyX6D?@HX*dawHZYTkR87<)^0T*aiy5(`du%|#?ggHi ziZ=riDl?F+EW~~RQdKTYhk|Tq<4xH*4~<@DRCrJ+$B}!&o?YrEoi@%agno~zcFGv z2tZ{IjGUs$@|8t?V%3p>F$=e}3ztQ8m+3UAeSy^GKs6+Uee|lLC(lxHY$80*eg0qPD3sIH%?3c%ucT_zBh?r0NtrYUz}|VE zc>&*ljbz4?+tCdh_DUR!RSKsCJvX>MSdBos#fq*!hyW8OJux+1_qgD;l;(PY2a-n5 zsl2T8c0T3bFLABl4#|;uvfSFoUS$uSH*9meW0*hkDmk`Mm%P29D|>I?H3x9)TWnad zwtU@3W{wnuG}L2(qb3p!3^?CJGnKOXr*lhdNc^V`o@o=euFd%9zP4zX-;6@ehSL${ z&_K3#-ZN7Qx!KS)BM`av50E-HUX&D^2BD=yNgJk(=FTmz81~RYO;%FTM7!c~4LC_*gWGYxAvM)e@2l)rP@4h%ctO+2@Z@f-cpVVrimk$5oo zgd7gL8ENs_8q1`Iua2owwKdmxT!km+SXuj+Mih-%!O=1rd-srY{M&>~_g^8NQPvRJ z+f2@&kM|t2^x+&Xnl=dJw~zM5>E7dNzFYeh+5!kifRGQwnc8gR?e73cE7S&@>#u}E zC7#NzA9mt`(YngVT_NGoRJYfpR8*;PaRBmker~S8vFJbt{xt$9(h_(-8*TPuh7_uA zgRqhli;Y3$=9X_!egP3uqM{}i7AL=8fMkk8L8$6RDZkWIY%VS?2ndLGF9Y?VM%|+z z`LRlQ45o)$q`xi?bBZlQbfi8SgT1u*_4ZYVj(2@QFFH!9v9Yn=k&}^m?2r5<^z_W> zftShBzIUEeW?WKWZYwgod5p*xGPZg{EigL0Z5L5On3l4k%3zD98 zK+4GcjTssm(fY**M?ET{qLL4uS{oMqlF*skY9;LIW@;c^laK=cWK|13c^;Hae(UK$^s;=?2 zC~A6mM=VH~OgN^rWNj?+mL-|P?)|avsQ_M-1hnHC!b{)=5VUe#y z5q*ZnL;1t)`cGX;m=7_x>)q-8VFyhH4@aAej7-z*QUWuwS!wy%VU-1Zj7~vezT0(P zXO@k^J%!U!*H2LEz^|x^uuzHn@X!NgA1|9XdJzdF8(YuDtROT;q>UOk8aOyA4R`># zNlrnL;yNn$PZ7E@_@(dL9@xb#gd$v$U~7ry^`h2plvPWM8yXs#m^k)) zy6oj2pul6+a#?tP-jf0)Gd@u8+8}dg={qYc3cgN7MTV7T7l}ux^^z7(2+>0c_w{XH zO`&1<@$+B&bs#1q>#*JGNoUjJpU|BIk`Cspb>&1lR@s>L7Ub?(x+=H$&7FXJICDpD zC4rR`m~tUr%Z?N!oH#{P%~gdNj;nE5hOSC|H;0#w)0vWrw1(jBN#nzlyn42BSnMQ? z@2P8lW`AkCIF&}FfaeB{lz%Aa!QX6>^#<(?l{C~n#z=KCwZ7a)mR}Sl4J0|MIm#)6 z`NS!$C)_@sMBY2sebXLKemiDjq7Bob>|kdH zU`qCY1#?nT(tk(*Mc7BWF_r)7b;5l=DCjAlM2iA{?EWNow!Plm+y<00{;1jiJwP>c zlDavT`X9CWQ*OfL`{%c@vC;eZhWN%|dR6*-SMb`?>7e`eLYcng@Y{t>?;q85VSaJg zIoq&LJSIR|#(1U;F;|zVL&*nLrZIH}P;J0;{vZQk6*6;$F!?nz$_pWHU;t`ZruwSl zoV-r8xl5mm!X&GJ(n6$woqGKCUSN2{WU6lNqUUIK^UKNt{9|Wzb#(yTo|u>jXd$O? zU@pC$v=DPc1l>;fWv#^YEQ4)~uDf8mYOn}+JA;BmfCvX*^9m$Oo4GXm;&g`dbx8=X za4``5s%=F&m*jo>^;-9@Ft(~id;NfkS^exN^L^XGy~2kD5c1#rXTuyA=uB5&eOUQW zQJBT@FyP@{V}POy*~$#gUY~3@seNgYIYdDN#C6&dM>;tP^nlL`k@0B9)%8)fbL%Gf zN!U4hf5C+UV=jnnuLW8d`*)j;GMLx#6#?i~$xgUo65?u=27qs%)$^&Yqr(db3sO=- zE2)HhCBje?;`vIE)zQP%?E1lDmOLxaQ{#4H0!RtHz2AY6;NI1u}v-{Of`r0J|*&vkDX zM6DbgTywxEpF7XES{m(btuR%YU`Ij)ydA!F^@~m3+$J7cP>496FC!Z3ozT@L%)3C= zY0$KKcl-)?^b$G<<^e?t_51!o_-He?7MM<^>s)3#rL#C3&FmtfBORroZ1uCEA-e;( zxz|8|)cH!&1rRC)ybvIf4M=7L4(W2t`8LJsV{zcmQ~ve$2JFi5D+}6o?Hc&|#=^#j zOiaj1&-l>%ha)k|Wkdp!Gq7<0c{wVCB@`hZlR%} z{2;72jn`2rM2qxv5`%azYYO)bS$>i2%C@CiK6S)dChN7QKc6o`8#XfOI*BXR zUM0kbRT=BAPtn)=$GgJ3xO&ULQ>aY#*Whxs|6)Vz1=Q7-fLtsNPEJu#Q4B!T>*&be zBsLX|4ETw~5g%aB2K)Q#TaS={Xp+i*k+Fu;!V&adH>cv?V?9X}$bqLBDE-NtnX`2;>XvbvNQ zz%_4(_~jeNP4wF8DtC78czvd3SF_8D^Z4})#+%-yOzRg&L ze|`WYrhqz~cQvdHc2V%M1QIQDwfN;LZ zlh42F{{O&Hpv}(q_DEJ+hB_GH6d5Y?0^M1s`-jo) z_V)IFe#8yjPc^sv<-|r#fN;OglpX~Y1(M}e^nW+d$a;S#h7-SS4xW6UY!0RewYM|> zYYz6+}+>r$5wACS~jofF!UASWP% zM9B;Iow|ZFK zE?W21(tkvoVXDZeC<=0NhAz5Kn=br7CS+owWA-Akl@izdT{_VuB0ne`K_6K zRdnZ+IIg3;#13)TZYZ_sVjc1A!=ag-b>{8T4v~hM`u|!w?|3TPKaL+HNu-pSab#44 z5~qx;)JbJz$2lYsMay_1E2D&Xvf_ydg{;hMg(5^mMy0F{*<@w>KAz`y|HG?r?)$p0 z>$|S+^?9R5g2*ho`?<*TstvQ+g#C2(n^f1&&yYw(%I8NYaE$6g>~J=Mk78qi z5(BzLL0nv%xJ~jNSC)OZ+s1c;!awCT33T)|UsCM_U8X_@IChANi5+zwF)%aZ-uUpQP3MJZIq0XEBy411q9k zI@K*KJe&dIT${-3Sf>fmby#*+J9B*Q-Q#v!2-MnjG#h)?zs5P@cZ5@7%z73nZik|a zW;N)7Icg?$VpA*ORR%V-P%S!F?pA=5$<>v#M|#6D-7c`X&sO)_N>gNWOB}889(j8* zZjwac5@dw^RSVX_&pT~XMBXBr;*XBd^S=FQ-s*=>j@My~_!<{YG&e167^o4@)q#Zs zA6gN(PKa6;#+4G!L@0V`ngS8p(bOgV21imP_o~P<#Tc#1R3?@trxoQd-a^Q6@eh0M z&E)h8uSP%4lr3eQw*FQAG=SE4KPOqKPTJuML&4Qfib1=})<7Sle_Wfq7kY$c#QFn# zxt#nZD>C|wHMNF47$mqHS3MAXFE20SR91C$b%F=+d$TH9YC$`ENGD8N5)(5$t*Ss* zE~MJV73WgZ7vJ46 z(lEWHDM9k$3srT>urUR|ZaoN?0znQrs-NEmELf08fFh%mx}dE4}NvM5YWF((ACJ0TVg4UP8pcF6T2H^ z1>j6YrLv;psBYjE0-*(se74kY^D0fpANZ;Y8vNiovrXT^OxcQISA&+BSdh4uV zNw4#mmh5a%Y~Bojsk^&7E%r%l?3-oWGr@Aoexal_*lVnC<;MNkB{6_CI%IG{mve}8 zdW&;5McK<~!tMA`iGc>M;pfvA9-$(!YuD)74jf2q>0qDq{cC=la{Ba^SrdP1XyHU( zhn#C;s{P`)G4H+J16r|ei~h9$%sD9CN$ncf1D|WYX^byN!qtZr9+Z_8 zRCL+m)|2+)g|C!N<3(27Ri5No*5`umr>_5}Av6B16OC zDieIvx{_8G%IuqS{_;5W6MAQVF>0npx2XZZo1U7w@aGRnPR{3{I)RYGj?U=${cOVO zr1XGqQlGd7S1h9xex6^eAXVn%O`pJW^Q1={fipA+D4?LlC!hC3zhvm9cp6Mb$vai{ zwYTIwsb6y)#0UIQaend0bYX*LXuOf`#{&mkD+X`=q?$_|tgQG0Mg8HVpDkljhlrnR zRa)yh#vaB#cr-hOjkUk^bX(in5Uznb^}4-XPDMqq@^3ZAr4yZ9k!|n3?GFrSgjrN!qU|-)(*tIb^)LIQ z1}nKZUPivHeeq9AnZ!?0NbLA^MEmpi`JyaC?~Cq4#p!~rR9`;_8k1-vBkKa#sDEW- zWWXQHO7Iko^&P!NqooBIM-~LLW|&`5zEfV!Q60`AWN~`GJN9HuTR$1EWOtaf$r*iA zFj=1D$)AP+t;^p|bHC1dFSlek`@+yQJ;OgOQ?dwEf5|&0g6)-%0NI!5hzL+?op0Ve zrK95ohfKQ&;Rw!ps~+Bpi3evp)wRjm&1LZpeD9>B90jPf^RH~T3O0{ZdOAA}=(0a_ zD2|;R7|Z5)ZFBUJ+@X9I=dBN}Zw*{|K$j92beHK9yG-=wYzq|GF5B4wIt?AvnbDmc zGC8ne5&At-rN##|INvl?PSmBb)VBn2=2SD%w-&G%XNQM91op|u*tqi6%(J{a#1!D> z)_nQ0JU!TeEKkv8)Y8gISX5N>d4hRuV{W8xxP{4RRpNz%;sb+HdnmdUS+~E^ViVh) zIn2iR=EBB?O%IQlDf}`!qT!_;-Dc>(q3Qe;VpiJm#t#+Tw60awXpbxZBIRXUTMLU3 zQxZ*qnYBT4_LK#2n!WLQ((<51(Nh^kHhq0+XStTO14F>rV$AWO~XOzv^j zy$Nmu&t26`322%mv&JBt#-8m=#{^G{GCztz!@hJ?gc)yvYBVmuK7wf|LKOX4c(drs zq6BGI*_&{d_kzFIo)t8n)cjpKzX<`dpo;t6mkFE^{GI}gA)(pkvO>F`cNSd!jDv)5 zvO&c{x~euEy}er(g`FI*mf&Qe)`^n(uA{e2balgk^9ElQ8IOcfh7eqUu$Y*7NGZ!2 z3q`|vAdLKURYYv}?xe)T^Olz0M&Ak@_y9Rg{s)%*P8Nwmp%@!~266_VI_PoN*Pl%A zB@n)wndI}T&%fe|yX9jO%{Jvg)ztK3*lNmfgh(e^ecUmS`(WO1R z)%T829u2t+@8e&*rKh0<1$y=+$h!ApgX;LpO>0+wUTu)NMIk3{ky7};G(m}>t z4SIE9yDr%SU6H4}0z#kg@EC?>+5e-fdsd?aJkFV+CcZ}J9GMmcB)=gcA;^LeJRu1I zrkqNpvazv2u^Gh|_-1r9tox-s!NI{8h|#gJtKZ6o`}-Nbet{Xeaq}ke>y@il1$lYt z%Y*3%Rr7-qpV)1dYWrgw$riS!cJrTcc0S|4aP!23qFr6Qg-pVaO1iHSiA6u71T|1U zhK}$7cX~oa4Olpc90BSqD_efRuXyiX;Ah89$y>7#QEB|v=Z1GSRVMe3c(GNMrn&pg z^>eV_<5w8s+?97M;Nl{b)MU5Cn_rv0vz<}2ozrCwMts67e4X^Eth}d*}BYXZ0xXb=7i0`()&0<*{MdW%N=0a5FagBjiXH`j`!C$clFJ$5 zjf5)%Wr%<%#}ykFaswNgUzzB&I4$SFFYA8OR#3e6+Qeisx^T!mXamVj?}6ma44>P5 z0j2#8;}Tc-Q%AT1Bps;A(g==|(?~^y9Ni+s$~nj5rY2VLUIyv5mmY^t@ROYqYNXSK zp;)SsUWBU4{uM3kzSS`nIrNC(jZP2LqvmqU%?*n}ofkREZyUlmlON?04z(xd->{ zdyl4B?9gayk5~CHK;H`v%fP?@rO<|9qnwPMSCPI!#;VqppFZvuZ*GV$y{)CT82ohP zNXq5S`?~M*x@mo8HFP@NKHG@$Wd3OfXU9$p+5EEfix~+|88ew3ZcI)FdRW~>$ly9Y zbfM?{>k`j4t>a>crv@8HNW7un4b7K*C};8Ux!<~lkWFo<`y+UPgAL7$G&G1ISOqRM zAti-UZ2$XjeXc9YzFuuooofOyhWjCQC!KD8wL3R1BKS0tvDdE?)6$ro*s<2U_Q8F7 zr_^Fmo0*y6@lbdplkb6TiSTnEXnJ}YNe(9`Cs2SOaLxdDf#};Po7x1aIjh>u0y77fe88emGL-(u(%``Moy_B>%pC*90z~x zb%rwPm_n3z(hVYFy8mWmltS2xkkC*S&*cTc!r&}nBf;Z^)Hci{#s)DR@+2q`_=&Uv z(%aqLja~binhn$3ust|irlZpg8-6Jql3MFfVt@R=iBJJGW|&vc{!MYZ-@?cu`T9IO zJg~Hzwd_%1#VgIC)0@A~8&+wi5*a%V(Jq_bx*gEcK+6+WYD;S?*0s7zd{oq2UoDHS zFT�zYy9Hyst7&H^5ZG18Hj3#wogPhv0N?qm=V#L`g+)cyAmn`yEM#E!nPgE zq{4?{W&S2rJ_yj#&wHqdu5pKwwdOf~*rD%8=+bFLLBh4s5U+F1peydP8?8 z_-SlFVoG57-TusLy>$ck^7|qlGGTY%_5~eVGO0n}x2;hD4vB4iYyE2DF({&$lM3~8 zKR#4&vF9SG45T52MuP^9d;p<{caF(oXlTg6!BMpdpKrki3qY8xj){*SA0L+j!izqhxuqig$u1zg4S@05Ci1xZHc3WPRS15cpidcy$7p$#pD3vh|J=$zi-{QWwA9k7X*dj&UQ#d}* z{pup0NVJ4;=Ch}XqGCkTKz#tqc!ix2CwHB*4!Ps}Uv^0}vbz%ZhB=HM`UiyFUkB`9I3zQFW#@Bmw z#s>zW3_=xo+wlX03ZC_O{@=BZAp8Hf-0K7U-zD?!D*N|)%XL@%-&IBUv_1gqmv`S2 buT`dm;Z>s*_pG<;|8i1O`&gE`<(>Zlk_qm? literal 0 HcmV?d00001 From 19097bdcf2ba5e8d02271e011dc1d676e9409486 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 29 May 2023 15:53:28 +0100 Subject: [PATCH 496/628] [TlastMarker] Update interface naming for code gen --- .../custom_op/fpgadataflow/tlastmarker.py | 45 ++++++++++++------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/tlastmarker.py b/src/finn/custom_op/fpgadataflow/tlastmarker.py index 895a2eedab..6eaf03ab16 100644 --- a/src/finn/custom_op/fpgadataflow/tlastmarker.py +++ b/src/finn/custom_op/fpgadataflow/tlastmarker.py @@ -130,9 +130,11 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ "for(unsigned int i=0; i &in0, - hls::stream &out, unsigned int numIters)""" - % self.onnx_node.name + """void %s(hls::stream &in0_%s, + hls::stream &out_%s, unsigned int numIters)""" + % (self.onnx_node.name, self.hls_sname(), self.hls_sname()) ] else: self.code_gen_dict["$BLACKBOXFUNCTION$"] = [ - """void %s(hls::stream &in0, hls::stream &out)""" - % self.onnx_node.name + """void %s(hls::stream &in0_%s, + hls::stream &out_%s)""" + % (self.onnx_node.name, self.hls_sname(), self.hls_sname()) ] def pragmas(self): self.code_gen_dict["$PRAGMAS$"] = [ - "#pragma HLS INTERFACE axis port=in0 name=in0_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=in0_" + self.hls_sname() ] self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname() + "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) dyn_iters = self.get_nodeattr("DynIters") @@ -239,10 +248,12 @@ def get_outstream_width(self, ind=0): def strm_decl(self): self.code_gen_dict["$STREAMDECLARATIONS$"] = [] self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream in0 ("in0");' + 'hls::stream in0_%s ("in0_%s");' + % (self.hls_sname(), self.hls_sname()) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream out ("out");' + 'hls::stream out_%s ("out_%s");' + % (self.hls_sname(), self.hls_sname()) ) def get_verilog_top_module_intf_names(self): From aae59b1e6448c3274c36daa5667f943ee0d56a9a Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 1 Jun 2023 11:18:40 +0200 Subject: [PATCH 497/628] [Zynq build] update PS IP version --- src/finn/transformation/fpgadataflow/templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index f52bad0ffb..bc34f61a8b 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -135,7 +135,7 @@ create_bd_design "top" if {$ZYNQ_TYPE == "zynq_us+"} { - create_bd_cell -type ip -vlnv xilinx.com:ip:zynq_ultra_ps_e:3.4 zynq_ps + create_bd_cell -type ip -vlnv xilinx.com:ip:zynq_ultra_ps_e:3.5 zynq_ps apply_bd_automation -rule xilinx.com:bd_rule:zynq_ultra_ps_e -config {apply_board_preset "1" } [get_bd_cells zynq_ps] #activate one slave port, deactivate the second master port set_property -dict [list CONFIG.PSU__USE__S_AXI_GP2 {1}] [get_bd_cells zynq_ps] From 1679e01ee4526a220664b04a8c77cbf9ef13e3a1 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 1 Jun 2023 13:51:12 +0200 Subject: [PATCH 498/628] Minor fixes and workarounds for version updates --- src/finn/analysis/fpgadataflow/post_synth_res.py | 2 +- tests/end2end/test_end2end_bnn_pynq.py | 6 +++--- tests/fpgadataflow/test_convert_to_hls_layers_cnv.py | 3 ++- tests/fpgadataflow/test_convert_to_hls_layers_fc.py | 4 +++- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/finn/analysis/fpgadataflow/post_synth_res.py b/src/finn/analysis/fpgadataflow/post_synth_res.py index 8b9c5d2a04..1202120529 100644 --- a/src/finn/analysis/fpgadataflow/post_synth_res.py +++ b/src/finn/analysis/fpgadataflow/post_synth_res.py @@ -85,7 +85,7 @@ def get_instance_stats(inst_name): row = root.findall(".//*[@contents='%s']/.." % inst_name) if row != []: node_dict = {} - row = row[0].getchildren() + row = list(row[0]) for (restype, ind) in restype_to_ind.items(): node_dict[restype] = int(row[ind].attrib["contents"]) return node_dict diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 62b76d2f13..4c68a018db 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -328,13 +328,13 @@ def test_export(self, topology, wbits, abits, QONNX_export): (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "export") if QONNX_export: - export_qonnx(model, torch.randn(ishape), chkpt_name) + export_qonnx(model, torch.randn(ishape), chkpt_name, opset_version=13) qonnx_cleanup(chkpt_name, out_file=chkpt_name) model = ModelWrapper(chkpt_name) model = model.transform(ConvertQONNXtoFINN()) model.save(chkpt_name) else: - export_finn_onnx(model, torch.randn(ishape), chkpt_name) + export_finn_onnx(model, torch.randn(ishape), chkpt_name, opset_version=13) nname = "%s_w%da%d" % (topology, wbits, abits) update_dashboard_data(topology, wbits, abits, "network", nname) dtstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S") @@ -374,7 +374,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): chkpt_preproc_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "preproc" ) - export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name) + export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name, opset_version=13) assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model pre_model = ModelWrapper(chkpt_preproc_name) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 73721b6cc5..001c353c8e 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -38,7 +38,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul @@ -67,6 +67,7 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) model = model.transform(LowerConvsToMatMul()) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py index 5a45638ba1..0fa7155ac5 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py @@ -39,7 +39,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames +from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors from qonnx.transformation.infer_shapes import InferShapes import finn.core.onnx_exec as oxe @@ -64,6 +64,7 @@ def test_convert_to_hls_layers_tfc_w1a1(): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) model = model.transform(ConvertBipolarMatMulToXnorPopcount()) @@ -135,6 +136,7 @@ def test_convert_to_hls_layers_tfc_w1a2(): model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) + model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model = model.transform(Streamline()) from finn.transformation.fpgadataflow.convert_to_hls_layers import ( From 3d0b918195b5b5f04b38e2b067ddd797b317a8d7 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 7 Jun 2023 16:22:43 +0100 Subject: [PATCH 499/628] [CI] Update tool version in Jenkinsfile --- docker/jenkins/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2954877c2a..d8fea0124c 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -5,8 +5,8 @@ node { checkout scm } withEnv([ - "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.2_1014_8888/installs/lin64", - "FINN_XILINX_VERSION=2022.2", + "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2023.1_0507_1903/installs/lin64", + "FINN_XILINX_VERSION=2023.1", "FINN_DOCKER_TAG=xilinx/finn:jenkins", "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" From 1c36cdbbe7ac9c8d18414cacb27149f45c03c890 Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Thu, 8 Jun 2023 13:13:04 +0200 Subject: [PATCH 500/628] [Zynq build] Retrieve auxiliary IP versions from catalog --- src/finn/transformation/fpgadataflow/templates.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index bc34f61a8b..5ffb5e4f46 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -135,7 +135,8 @@ create_bd_design "top" if {$ZYNQ_TYPE == "zynq_us+"} { - create_bd_cell -type ip -vlnv xilinx.com:ip:zynq_ultra_ps_e:3.5 zynq_ps + set zynq_ps_vlnv [get_property VLNV [get_ipdefs "xilinx.com:ip:zynq_ultra_ps_e:*"]] + create_bd_cell -type ip -vlnv $zynq_ps_vlnv zynq_ps apply_bd_automation -rule xilinx.com:bd_rule:zynq_ultra_ps_e -config {apply_board_preset "1" } [get_bd_cells zynq_ps] #activate one slave port, deactivate the second master port set_property -dict [list CONFIG.PSU__USE__S_AXI_GP2 {1}] [get_bd_cells zynq_ps] @@ -144,7 +145,8 @@ set_property -dict [list CONFIG.PSU__OVERRIDE__BASIC_CLOCK {0}] [get_bd_cells zynq_ps] set_property -dict [list CONFIG.PSU__CRL_APB__PL0_REF_CTRL__FREQMHZ [expr int($FREQ_MHZ)]] [get_bd_cells zynq_ps] } elseif {$ZYNQ_TYPE == "zynq_7000"} { - create_bd_cell -type ip -vlnv xilinx.com:ip:processing_system7:5.5 zynq_ps + set zynq_ps_vlnv [get_property VLNV [get_ipdefs "xilinx.com:ip:processing_system7:*"]] + create_bd_cell -type ip -vlnv $zynq_ps_vlnv zynq_ps apply_bd_automation -rule xilinx.com:bd_rule:processing_system7 -config {make_external "FIXED_IO, DDR" apply_board_preset "1" Master "Disable" Slave "Disable" } [get_bd_cells zynq_ps] set_property -dict [list CONFIG.PCW_USE_S_AXI_HP0 {1}] [get_bd_cells zynq_ps] set_property -dict [list CONFIG.PCW_FPGA0_PERIPHERAL_FREQMHZ [expr int($FREQ_MHZ)]] [get_bd_cells zynq_ps] @@ -153,8 +155,10 @@ } #instantiate axi interconnect, axi smartconnect -create_bd_cell -type ip -vlnv xilinx.com:ip:axi_interconnect:2.1 axi_interconnect_0 -create_bd_cell -type ip -vlnv xilinx.com:ip:smartconnect:1.0 smartconnect_0 +set interconnect_vlnv [get_property VLNV [get_ipdefs -all "xilinx.com:ip:axi_interconnect:*" -filter design_tool_contexts=~*IPI*]] +set smartconnect_vlnv [get_property VLNV [get_ipdefs "xilinx.com:ip:smartconnect:*"]] +create_bd_cell -type ip -vlnv $interconnect_vlnv axi_interconnect_0 +create_bd_cell -type ip -vlnv $smartconnect_vlnv smartconnect_0 #set number of axilite interfaces, and number of axi master interfaces set_property -dict [list CONFIG.NUM_SI $NUM_AXIMM] [get_bd_cells smartconnect_0] set_property -dict [list CONFIG.NUM_MI $NUM_AXILITE] [get_bd_cells axi_interconnect_0] From 8cb04e65ed38b64f968cc28f44d95f12af346df9 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 9 Jun 2023 18:18:40 +0100 Subject: [PATCH 501/628] [Tests] Add node naming to cppsim tests --- tests/fpgadataflow/test_fpgadataflow_concat.py | 1 + tests/fpgadataflow/test_fpgadataflow_lookup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/fpgadataflow/test_fpgadataflow_concat.py b/tests/fpgadataflow/test_fpgadataflow_concat.py index 5fff286e54..2b2069a72b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_concat.py +++ b/tests/fpgadataflow/test_fpgadataflow_concat.py @@ -95,6 +95,7 @@ def test_fpgadataflow_concat(exec_mode, idt): assert model.graph.node[0].op_type == "StreamingConcat" assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow" if exec_mode == "cppsim": + model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareCppSim()) model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) diff --git a/tests/fpgadataflow/test_fpgadataflow_lookup.py b/tests/fpgadataflow/test_fpgadataflow_lookup.py index da4204c81a..3164f2b4a6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_lookup.py +++ b/tests/fpgadataflow/test_fpgadataflow_lookup.py @@ -122,6 +122,7 @@ def test_fpgadataflow_lookup(edt, embedding_cfg, exec_mode): assert model.graph.node[0].input[1] == ename assert model.graph.node[0].output[0] == oname if exec_mode == "cppsim": + model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareCppSim()) model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) From 488dc9fd6ce39bd50bb8053cef7baba67fddf2a7 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:22:28 +0100 Subject: [PATCH 502/628] remove mention of remote_exec and remote_pynq options Signed-off-by: Fionn O'Donohoe --- src/finn/core/onnx_exec.py | 11 +- src/finn/core/remote_exec.py | 119 ------------------ .../fpgadataflow/make_deployment.py | 1 - .../fpgadataflow/template_driver.py | 2 +- .../test_fpgadataflow_ipstitch.py | 1 - 5 files changed, 3 insertions(+), 131 deletions(-) delete mode 100644 src/finn/core/remote_exec.py diff --git a/src/finn/core/onnx_exec.py b/src/finn/core/onnx_exec.py index 2695113661..daecb59743 100644 --- a/src/finn/core/onnx_exec.py +++ b/src/finn/core/onnx_exec.py @@ -31,7 +31,6 @@ import qonnx.analysis.topology as ta from qonnx.core.onnx_exec import execute_onnx as execute_onnx_base -from finn.core.remote_exec import remote_exec from finn.core.rtlsim_exec import rtlsim_exec @@ -51,7 +50,6 @@ def execute_onnx( # check if model has an execution mode set # if None, execute model node using the QONNX-provided execute_onnx impl - # if set to "remote_pynq" execute model on PYNQ board # if set to "rtlsim" execute model using pyverilator model_exec_mode = model.get_metadata_prop("exec_mode") if (model_exec_mode is None) or (model_exec_mode == ""): @@ -91,22 +89,17 @@ def execute_onnx( # check if model has an execution mode set # if None, execute model node by node using execute_node() - # if set to "remote_pynq" execute model on PYNQ board # if set to "rtlsim" execute model using pyverilator model_exec_mode = model.get_metadata_prop("exec_mode") if (model_exec_mode is None) or (model_exec_mode == ""): return execute_onnx_base() - elif model_exec_mode == "remote_pynq": - # use remote exec metadata built into model to execute on a remote PYNQ - remote_exec(model, execution_context) elif model_exec_mode == "rtlsim": # use stitched IP for rtlsim rtlsim_exec(model, execution_context) else: raise Exception( - """Metadata property "exec_mode" is set to an unknown value. - Can be left unset or has to be set to "remote_pynq" for remote execution - on PYNQ board or "rtlsim" for execution using pyverilator!""" + """Metadata property "exec_mode" is set to an unknown value. Can be left + unset or has to be set to "rtlsim" for execution using pyverilator!""" ) if return_full_exec_context: diff --git a/src/finn/core/remote_exec.py b/src/finn/core/remote_exec.py deleted file mode 100644 index f487b48f86..0000000000 --- a/src/finn/core/remote_exec.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) 2020 Xilinx, Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of Xilinx nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import numpy as np -import os -import subprocess -import warnings - - -def remote_exec(model, execution_context): - """Executes the given model remotely on the pynq board. The metadata properties - related to the pynq board have to be set. The execution context contains the - input values.""" - # TODO fix for multi input-output - pynq_ip = model.get_metadata_prop("pynq_ip") - pynq_port = int(model.get_metadata_prop("pynq_port")) - pynq_username = model.get_metadata_prop("pynq_username") - pynq_password = model.get_metadata_prop("pynq_password") - pynq_target_dir = model.get_metadata_prop("pynq_target_dir") - deployment_dir = model.get_metadata_prop("pynq_deploy_dir") - platform = model.get_metadata_prop("platform") - assert platform in ["alveo", "zynq-iodma"] - bitfile = model.get_metadata_prop("bitfile") - bitfile = os.path.basename(bitfile) - if pynq_password == "": - if "zynq" in platform: - raise Exception("PYNQ board remote exec needs password for sudo") - else: - local_prefix = "" # assume we are using an ssh key - warnings.warn("Empty password, make sure you've set up an ssh key") - else: - local_prefix = "sshpass -p %s " % pynq_password - - if platform == "alveo": - # Alveo can run without sudo - remote_prefix = "" - elif "zynq" in platform: - # PYNQ Zynq boards need to execute with sudo - remote_prefix = "echo %s | sudo -S " % pynq_password - - inp = execution_context[model.graph.input[0].name] - # make copy of array before saving it - inp = inp.copy() - batchsize = inp.shape[0] - np.save(os.path.join(deployment_dir, "input.npy"), inp) - # extracting last folder of absolute path (deployment_dir) - deployment_folder = os.path.basename(os.path.normpath(deployment_dir)) - # copy input to PYNQ board - cmd = local_prefix + "scp -P{} -r {}/input.npy {}@{}:{}/{}".format( - pynq_port, - deployment_dir, - pynq_username, - pynq_ip, - pynq_target_dir, - deployment_folder, - ) - bash_command = ["/bin/bash", "-c", cmd] - process_scp_in = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_scp_in.communicate() - - # use platform attribute for correct remote execution - if platform == "alveo": - remote_cmd = "bash -ic 'bash alveo_run.sh execute %d' \"" % batchsize - else: - remote_cmd = ( - "python3.6 driver.py --exec_mode=execute --batchsize={} " - "--bitfile={} --inputfile=input.npy --outputfile=output.npy " - '--platform={} "' - ).format(batchsize, bitfile, platform) - cmd = ( - local_prefix + 'ssh {}@{} -p {} "cd {}/{}; ' + remote_prefix + remote_cmd - ).format(pynq_username, pynq_ip, pynq_port, pynq_target_dir, deployment_folder) - bash_command = ["/bin/bash", "-c", cmd] - process_exec_accel = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_exec_accel.communicate() - # remove stale output file from local dir, if any - try: - os.remove("{}/output.npy".format(deployment_dir)) - except FileNotFoundError: - pass - # copy generated output to local - cmd = local_prefix + "scp -P{} {}@{}:{}/{}/output.npy {}".format( - pynq_port, - pynq_username, - pynq_ip, - pynq_target_dir, - deployment_folder, - deployment_dir, - ) - bash_command = ["/bin/bash", "-c", cmd] - process_scp_out = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_scp_out.communicate() - outp = np.load("{}/output.npy".format(deployment_dir)) - execution_context[model.graph.output[0].name] = outp diff --git a/src/finn/transformation/fpgadataflow/make_deployment.py b/src/finn/transformation/fpgadataflow/make_deployment.py index d4684dc83c..aa83b600cb 100644 --- a/src/finn/transformation/fpgadataflow/make_deployment.py +++ b/src/finn/transformation/fpgadataflow/make_deployment.py @@ -96,7 +96,6 @@ def apply(self, model): pynq_driver_dir = model.get_metadata_prop("pynq_driver_dir") copy_tree(pynq_driver_dir, deployment_dir) model.set_metadata_prop("pynq_deploy_dir", deployment_dir) - model.set_metadata_prop("exec_mode", "remote_pynq") # create target directory on PYNQ board cmd = 'ssh {}@{} -p {} "mkdir -p {}"'.format( diff --git a/src/finn/transformation/fpgadataflow/template_driver.py b/src/finn/transformation/fpgadataflow/template_driver.py index 05ee6ad920..158825191e 100644 --- a/src/finn/transformation/fpgadataflow/template_driver.py +++ b/src/finn/transformation/fpgadataflow/template_driver.py @@ -135,5 +135,5 @@ file.close() print("Results written to nw_metrics.txt") else: - raise Exception("Exec mode has to be set to remote_pynq or throughput_test") + raise Exception("Exec mode has to be set to execute or throughput_test") """ diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index b220338e69..7e4069f5c4 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -206,7 +206,6 @@ def test_fpgadataflow_ipstitch_gen_model(mem_mode): assert sdp_node.__class__.__name__ == "StreamingDataflowPartition" assert os.path.isfile(sdp_node.get_nodeattr("model")) model = load_test_checkpoint_or_skip(sdp_node.get_nodeattr("model")) - model.set_metadata_prop("exec_mode", "remote_pynq") model = model.transform(InsertTLastMarker()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, 5)) From 63ee3261f205a72d93183356b758031a2d6e8296 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:25:58 +0100 Subject: [PATCH 503/628] remove DeployToPYNQ() class and any test affected by the removal Signed-off-by: Fionn O'Donohoe --- src/finn/core/throughput_test.py | 79 ----------- .../fpgadataflow/make_deployment.py | 115 ---------------- src/finn/util/gdrive.py | 65 --------- tests/end2end/test_end2end_bnn_pynq.py | 123 +----------------- 4 files changed, 1 insertion(+), 381 deletions(-) delete mode 100644 src/finn/transformation/fpgadataflow/make_deployment.py delete mode 100644 src/finn/util/gdrive.py diff --git a/src/finn/core/throughput_test.py b/src/finn/core/throughput_test.py index 3533fd1339..08633be33b 100644 --- a/src/finn/core/throughput_test.py +++ b/src/finn/core/throughput_test.py @@ -28,90 +28,11 @@ import numpy as np import os -import subprocess -import warnings from qonnx.util.basic import gen_finn_dt_tensor from finn.core.rtlsim_exec import rtlsim_exec -def throughput_test_remote(model, batchsize=1000, timeout=None): - """Runs the throughput test for the given model remotely on the pynq board. - The metadata properties related to the pynq board have to be set. - Additionally a timeout for the SSH communication can be set. - Returns a dictionary with results of the throughput test. Returns None - if the test fails.""" - - pynq_ip = model.get_metadata_prop("pynq_ip") - pynq_port = int(model.get_metadata_prop("pynq_port")) - pynq_username = model.get_metadata_prop("pynq_username") - pynq_password = model.get_metadata_prop("pynq_password") - pynq_target_dir = model.get_metadata_prop("pynq_target_dir") - deployment_dir = model.get_metadata_prop("pynq_deploy_dir") - # extracting last folder of absolute path (deployment_dir) - deployment_folder = os.path.basename(os.path.normpath(deployment_dir)) - platform = model.get_metadata_prop("platform") - assert platform in ["alveo", "zynq-iodma"] - bitfile = model.get_metadata_prop("bitfile") - bitfile = os.path.basename(bitfile) - if pynq_password == "": - if "zynq" in platform: - raise Exception("PYNQ board remote exec needs password for sudo") - else: - local_prefix = "" # assume we are using an ssh key - warnings.warn("Empty password, make sure you've set up an ssh key") - else: - local_prefix = "sshpass -p %s " % pynq_password - - if platform == "alveo": - # Alveo can run without sudo but needs correct environment - remote_prefix = "conda activate finn-pynq-alveo; " - elif "zynq" in platform: - # PYNQ Zynq boards need to execute with sudo - remote_prefix = "echo %s | sudo -S " % pynq_password - - # use platform attribute for correct remote execution - if platform == "alveo": - remote_cmd = "bash -ic 'bash alveo_run.sh throughput_test %d' \"" % batchsize - else: - remote_cmd = ( - "python3.6 driver.py --exec_mode=throughput_test --batchsize={} " - "--bitfile={} --inputfile=input.npy --outputfile=output.npy " - '--platform={} "' - ).format(batchsize, bitfile, platform) - cmd = ( - local_prefix + 'ssh {}@{} -p {} "cd {}/{}; ' + remote_prefix + remote_cmd - ).format(pynq_username, pynq_ip, pynq_port, pynq_target_dir, deployment_folder) - bash_command = ["/bin/bash", "-c", cmd] - process_throughput_test = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_throughput_test.communicate(timeout=timeout) - - # remove any pre-existing metrics file - try: - os.remove("{}/nw_metrics.txt".format(deployment_dir)) - except FileNotFoundError: - pass - - cmd = local_prefix + "scp -P{} {}@{}:{}/{}/nw_metrics.txt {}".format( - pynq_port, - pynq_username, - pynq_ip, - pynq_target_dir, - deployment_folder, - deployment_dir, - ) - bash_command = ["/bin/bash", "-c", cmd] - process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_compile.communicate(timeout=timeout) - - try: - with open("{}/nw_metrics.txt".format(deployment_dir), "r") as file: - res = eval(file.read()) - return res - except FileNotFoundError: - return None - - def throughput_test_rtlsim(model, batchsize=100): """Runs a throughput test for the given IP-stitched model. When combined with tracing, useful to determine bottlenecks and required FIFO sizes.""" diff --git a/src/finn/transformation/fpgadataflow/make_deployment.py b/src/finn/transformation/fpgadataflow/make_deployment.py deleted file mode 100644 index aa83b600cb..0000000000 --- a/src/finn/transformation/fpgadataflow/make_deployment.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2020, Xilinx -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import subprocess -from distutils.dir_util import copy_tree -from qonnx.transformation.base import Transformation -from shutil import copy - -import finn.transformation.fpgadataflow.templates as templates -from finn.util.basic import make_build_dir - - -class DeployToPYNQ(Transformation): - """Collects all necessary files for deployment and copies them to the PYNQ board. - Expects information about PYNQ board to make scp possible: - - IP address of board, username and password for board and target directory where - the files are stored on the board""" - - def __init__(self, ip, port, username, password, target_dir): - super().__init__() - self.ip = ip - self.port = port - self.username = username - self.password = password - self.target_dir = target_dir - - def apply(self, model): - # set metadata properties accordingly to user input specifications - model.set_metadata_prop("pynq_ip", self.ip) - model.set_metadata_prop("pynq_port", str(self.port)) - model.set_metadata_prop("pynq_username", self.username) - model.set_metadata_prop("pynq_password", self.password) - model.set_metadata_prop("pynq_target_dir", self.target_dir) - - # create directory for deployment files - deployment_dir = make_build_dir(prefix="pynq_deployment_") - model.set_metadata_prop("pynq_deployment_dir", deployment_dir) - - # get and copy necessary files - # .bit and .hwh file - bitfile = model.get_metadata_prop("bitfile") - hwh_file = model.get_metadata_prop("hw_handoff") - deploy_files = [bitfile, hwh_file] - - for dfile in deploy_files: - if dfile is not None: - copy(dfile, deployment_dir) - - # helper script for Alveo - platform = model.get_metadata_prop("platform") - if platform == "alveo": - alveo_run_sh = templates.alveo_run_sh_template - fill_dict = { - "$REMOTE_DEPLOY_DIR$": self.target_dir - + "/" - + os.path.basename(deployment_dir), - "$CONDA_ENV_NAME$": "finn-pynq-alveo", - "$REMOTE_XRT$": os.environ["XILINX_XRT"], - "$REMOTE_PLATFORM_REPO_PATHS$": os.environ["PLATFORM_REPO_PATHS"], - "$BITFILE$": os.path.basename(bitfile), - } - for key, value in fill_dict.items(): - alveo_run_sh = alveo_run_sh.replace(key, value) - alveo_run_sh_path = deployment_dir + "/alveo_run.sh" - with open(alveo_run_sh_path, "w") as f: - f.write(alveo_run_sh) - - # driver.py and python libraries - pynq_driver_dir = model.get_metadata_prop("pynq_driver_dir") - copy_tree(pynq_driver_dir, deployment_dir) - model.set_metadata_prop("pynq_deploy_dir", deployment_dir) - - # create target directory on PYNQ board - cmd = 'ssh {}@{} -p {} "mkdir -p {}"'.format( - self.username, self.ip, self.port, self.target_dir - ) - bash_command = ["/bin/bash", "-c", cmd] - process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_compile.communicate() - # copy directory to PYNQ board using scp - cmd = "scp -P{} -r {} {}@{}:{}".format( - self.port, deployment_dir, self.username, self.ip, self.target_dir - ) - bash_command = ["/bin/bash", "-c", cmd] - process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) - process_compile.communicate() - - return (model, False) diff --git a/src/finn/util/gdrive.py b/src/finn/util/gdrive.py deleted file mode 100644 index d525437300..0000000000 --- a/src/finn/util/gdrive.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2020, Xilinx -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import gspread -import os -import warnings -from datetime import datetime - -from finn.util.basic import get_finn_root - - -def upload_to_end2end_dashboard(data_dict): - gdrive_key = get_finn_root() + "/gdrive-key/service_account.json" - if not os.path.isfile(gdrive_key): - warnings.warn("Google Drive key not found, skipping dashboard upload") - return - gc = gspread.service_account(filename=gdrive_key) - spreadsheet = gc.open("finn-end2end-dashboard") - worksheet = spreadsheet.get_worksheet(0) - keys = list(data_dict.keys()) - vals = list(data_dict.values()) - # check against existing header - existing_keys = worksheet.row_values(1) - if not set(existing_keys).issuperset(set(keys)): - # create new worksheet - dtstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - worksheet = spreadsheet.add_worksheet( - title="Dashboard " + dtstr, rows=10, cols=len(keys), index=0 - ) - # create header row with keys - worksheet.update("A1:1", [keys]) - # freeze and make header bold - worksheet.freeze(rows=1) - worksheet.format("A1:1", {"textFormat": {"bold": True}}) - # insert values into new row at appropriate positions - worksheet.insert_row([], index=2) - for i in range(len(keys)): - colind = existing_keys.index(keys[i]) - col_letter = chr(ord("A") + colind) - worksheet.update("%s2" % col_letter, vals[i]) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 62b76d2f13..89b434b577 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -59,13 +59,12 @@ from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.transformation.merge_onnx_models import MergeONNXModels from qonnx.util.cleanup import cleanup as qonnx_cleanup -from scipy.stats import linregress import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb from finn.analysis.fpgadataflow.dataflow_performance import dataflow_performance from finn.core.onnx_exec import execute_onnx -from finn.core.throughput_test import throughput_test_remote, throughput_test_rtlsim +from finn.core.throughput_test import throughput_test_rtlsim from finn.transformation.fpgadataflow.annotate_cycles import AnnotateCycles from finn.transformation.fpgadataflow.annotate_resources import AnnotateResources from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim @@ -75,7 +74,6 @@ from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP from finn.transformation.fpgadataflow.insert_dwc import InsertDWC -from finn.transformation.fpgadataflow.make_deployment import DeployToPYNQ from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver from finn.transformation.fpgadataflow.minimize_accumulator_width import ( MinimizeAccumulatorWidth, @@ -95,7 +93,6 @@ MoveScalarLinearPastInvariants, ) from finn.util.basic import get_finn_root -from finn.util.gdrive import upload_to_end2end_dashboard from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -715,121 +712,3 @@ def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): model.save( get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind) ) - - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_deploy(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "driver_" + kind - ) - model = load_test_checkpoint_or_skip(prev_chkpt_name) - cfg = get_build_env(kind, target_clk_ns) - if cfg["ip"] == "": - pytest.skip("PYNQ board IP address not specified") - model = model.transform( - DeployToPYNQ( - cfg["ip"], - cfg["port"], - cfg["username"], - cfg["password"], - cfg["target_dir"], - ) - ) - # save the model to be able to link it to the parent - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "deploy_" + kind) - ) - - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_run_on_hw(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "deploy_" + kind - ) - model = load_test_checkpoint_or_skip(prev_chkpt_name) # NOQA - cfg = get_build_env(kind, target_clk_ns) - if cfg["ip"] == "": - pytest.skip("PYNQ board IP address not specified") - (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( - topology, wbits, abits, return_topk=1 - ) - parent_model = load_test_checkpoint_or_skip( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") - ) - iname = parent_model.graph.input[0].name - oname = parent_model.graph.output[0].name - sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] - sdp_node = getCustomOp(sdp_node) - sdp_node.set_nodeattr("model", prev_chkpt_name) - ret = execute_onnx(parent_model, {iname: input_tensor_npy}, True) - y = ret[oname] - assert np.isclose(y, output_tensor_npy).all() - - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_throughput_hw(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "deploy_" + kind - ) - end2end_example = "%s_w%da%d_%s" % (topology, wbits, abits, kind) - model = load_test_checkpoint_or_skip(prev_chkpt_name) # NOQA - cfg = get_build_env(kind, target_clk_ns) - if cfg["ip"] == "": - pytest.skip("PYNQ board IP address not specified") - ret = dict() - # try a range of batch sizes, some may fail due to insufficient DMA - # buffers - bsize_range_in = [8**i for i in range(5)] - bsize_range = [] - for bsize in bsize_range_in: - res = throughput_test_remote(model, bsize) - if res is not None: - ret[bsize] = res - bsize_range.append(bsize) - else: - # assume we reached largest possible N - break - y = [ret[key]["runtime[ms]"] for key in bsize_range] - lrret = linregress(bsize_range, y) - ret_str = "" - ret_str += "\n" + "%s Throughput Test Results" % end2end_example - ret_str += "\n" + "-----------------------------" - ret_str += "\n" + "From linear regression:" - ret_str += "\n" + "Invocation overhead: %f ms" % lrret.intercept - ret_str += "\n" + "Time per sample: %f ms" % lrret.slope - ret_str += "\n" + "Raw data:" - - ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( - "N", "runtime[ms]", "fclk[mhz]", "fps", "DRAM rd[MB/s]", "DRAM wr[MB/s]" - ) - for k in bsize_range: - v = ret[k] - ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( - k, - np.round(v["runtime[ms]"], 4), - v["fclk[mhz]"], - np.round(v["throughput[images/s]"], 2), - np.round(v["DRAM_in_bandwidth[MB/s]"], 2), - np.round(v["DRAM_out_bandwidth[MB/s]"], 2), - ) - ret_str += "\n" + "-----------------------------" - warnings.warn(ret_str) - largest_bsize = bsize_range[-1] - update_dashboard_data( - topology, wbits, abits, "fclk[mhz]", ret[largest_bsize]["fclk[mhz]"] - ) - update_dashboard_data( - topology, - wbits, - abits, - "throughput[images/s]", - ret[largest_bsize]["throughput[images/s]"], - ) - - def test_upload_results_to_dashboard(self, topology, wbits, abits, QONNX_export): - # ToDo: Extend the dashboard to also upload QONNX exported models? - if QONNX_export: - pytest.skip("Dashboard data upload is disabled for QONNX exported models.") - else: - dashboard_data = get_dashboard_data(topology, wbits, abits) - if len(dashboard_data.keys()) > 0: - upload_to_end2end_dashboard(dashboard_data) - else: - pytest.skip("No data to upload to dashboard") From fc2fa437186cf0e07c6a271b8753ec747ab300b4 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:28:20 +0100 Subject: [PATCH 504/628] remove standalone board access tests Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_access_board.py | 56 ------------------ tests/end2end/test_end2end_cybsec_mlp.py | 61 -------------------- tests/end2end/test_ext_weights.py | 66 ---------------------- 3 files changed, 183 deletions(-) delete mode 100644 tests/end2end/test_end2end_access_board.py diff --git a/tests/end2end/test_end2end_access_board.py b/tests/end2end/test_end2end_access_board.py deleted file mode 100644 index ba3c49195b..0000000000 --- a/tests/end2end/test_end2end_access_board.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2021, Xilinx -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of FINN nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pytest - -import subprocess - -from finn.util.test import get_build_env - - -@pytest.mark.board -@pytest.mark.end2end -def test_end2end_access_board(): - build_env = get_build_env("zynq", 5) - if build_env["ip"] == "": - pytest.skip("PYNQ board IP address not specified") - remote_cmd_base = [ - "ssh", - "-o", - "PreferredAuthentications=publickey", - "-o", - "PasswordAuthentication=no", - "%s@%s" % (build_env["username"], build_env["ip"]), - ] - test_text = "BoardIsAccessible" - touch_cmd = remote_cmd_base + ["echo %s" % test_text] - verif_res = subprocess.run( - touch_cmd, stdout=subprocess.PIPE, universal_newlines=True - ) - assert verif_res.returncode == 0 - assert verif_res.stdout.split("\n")[0] == test_text diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index d2a4d0287f..5e402bdeb4 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -34,10 +34,8 @@ import numpy as np import os import shutil -import subprocess import torch import torch.nn as nn -import wget from brevitas.core.quant import QuantType from brevitas.export import export_finn_onnx, export_qonnx from brevitas.nn import QuantIdentity, QuantLinear, QuantReLU @@ -225,62 +223,3 @@ def test_end2end_cybsec_mlp_build(QONNX_export): assert est_res_dict["total"]["LUT"] == 7904.0 assert est_res_dict["total"]["BRAM_18K"] == 36.0 shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build", QONNX_export)) - - -@pytest.mark.end2end -@pytest.mark.xfail -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_end2end_cybsec_mlp_run_on_hw(QONNX_export): - build_env = get_build_env(build_kind, target_clk_ns) - assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/") - deploy_dir = get_checkpoint_name("build", QONNX_export) - if not os.path.isdir(deploy_dir): - pytest.skip(deploy_dir + " not found from previous test step, skipping") - driver_dir = deploy_dir + "/driver" - assert os.path.isdir(driver_dir) - # put all assets into driver dir - shutil.copy(assets_dir + "/validate-unsw-nb15.py", driver_dir) - # put a copy of binarized dataset into driver dir - dataset_url = ( - "https://zenodo.org/record/4519767/files/unsw_nb15_binarized.npz?download=1" - ) - dataset_local = driver_dir + "/unsw_nb15_binarized.npz" - if not os.path.isfile(dataset_local): - wget.download(dataset_url, out=dataset_local) - assert os.path.isfile(dataset_local) - # create a shell script for running validation: 10 batches x 10 imgs - with open(driver_dir + "/validate.sh", "w") as f: - f.write( - """#!/bin/bash -cd %s/driver -echo %s | sudo -S python3.6 validate-unsw-nb15.py --batchsize=10 --limit_batches=10 - """ - % ( - build_env["target_dir"] + "/end2end_cybsecmlp_build", - build_env["password"], - ) - ) - # set up rsync command - remote_target = "%s@%s:%s" % ( - build_env["username"], - build_env["ip"], - build_env["target_dir"], - ) - rsync_res = subprocess.run(["rsync", "-avz", deploy_dir, remote_target]) - assert rsync_res.returncode == 0 - remote_verif_cmd = [ - "ssh", - "%s@%s" % (build_env["username"], build_env["ip"]), - "sh", - build_env["target_dir"] + "/end2end_cybsecmlp_build/driver/validate.sh", - ] - verif_res = subprocess.run( - remote_verif_cmd, - stdout=subprocess.PIPE, - universal_newlines=True, - input=build_env["password"], - ) - assert verif_res.returncode == 0 - log_output = verif_res.stdout.split("\n") - assert log_output[-3] == "batch 10 / 10 : total OK 93 NOK 7" - assert log_output[-2] == "Final accuracy: 93.000000" diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index 0a92c74a38..bef2e0ffa7 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -110,69 +110,3 @@ def test_end2end_ext_weights_build(): if os.path.isdir(get_checkpoint_name("build")): shutil.rmtree(get_checkpoint_name("build")) shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build")) - - -@pytest.mark.board -@pytest.mark.end2end -@pytest.mark.xfail -def test_end2end_ext_weights_dataset(): - # make sure we have local copies of mnist dataset files - subprocess.check_output(["mkdir", "-p", mnist_local]) - for f in mnist_files: - if not os.path.isfile(mnist_local + "/" + f): - wget.download(mnist_url + "/" + f, out=mnist_local + "/" + f) - assert os.path.isfile(mnist_local + "/" + f) - # rsync to board - build_env = get_build_env(build_kind, target_clk_ns) - mnist_target = "%s@%s:%s" % (build_env["username"], build_env["ip"], "/tmp/") - - rsync_dataset_cmd = ["rsync", "-rv", mnist_local + "/", mnist_target] - subprocess.check_output(rsync_dataset_cmd) - - -@pytest.mark.end2end -@pytest.mark.xfail -def test_end2end_ext_weights_run_on_hw(): - build_env = get_build_env(build_kind, target_clk_ns) - deploy_dir = get_checkpoint_name("build") - if not os.path.isdir(deploy_dir): - pytest.skip(deploy_dir + " not found from previous test step, skipping") - driver_dir = deploy_dir + "/driver" - assert os.path.isdir(driver_dir) - # create a shell script for running validation: 10 batches x 10 imgs - with open(driver_dir + "/validate.sh", "w") as f: - f.write( - """#!/bin/bash -cd %s/driver -echo %s | sudo -S python3.6 validate.py --dataset mnist --bitfile %s - """ - % ( - build_env["target_dir"] + "/end2end_ext_weights_build", - build_env["password"], - "../bitfile/finn-accel.bit", - ) - ) - # set up rsync command - remote_target = "%s@%s:%s" % ( - build_env["username"], - build_env["ip"], - build_env["target_dir"], - ) - rsync_res = subprocess.run(["rsync", "-avz", deploy_dir, remote_target]) - assert rsync_res.returncode == 0 - remote_verif_cmd = [ - "ssh", - "%s@%s" % (build_env["username"], build_env["ip"]), - "sh", - build_env["target_dir"] + "/end2end_ext_weights_build/driver/validate.sh", - ] - verif_res = subprocess.run( - remote_verif_cmd, - stdout=subprocess.PIPE, - universal_newlines=True, - input=build_env["password"], - ) - assert verif_res.returncode == 0 - log_output = verif_res.stdout.split("\n") - assert log_output[-3] == "batch 100 / 100 : total OK 9296 NOK 704" - assert log_output[-2] == "Final accuracy: 92.960000" From e40e59dd2b890cae4126d8b4457ff831e90fe6e0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:29:12 +0100 Subject: [PATCH 505/628] remove now unused template: alveo_run_sh_template Signed-off-by: Fionn O'Donohoe --- .../transformation/fpgadataflow/templates.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/src/finn/transformation/fpgadataflow/templates.py b/src/finn/transformation/fpgadataflow/templates.py index f52bad0ffb..ce1545b5be 100644 --- a/src/finn/transformation/fpgadataflow/templates.py +++ b/src/finn/transformation/fpgadataflow/templates.py @@ -242,22 +242,6 @@ close_project """ -alveo_run_sh_template = """#!/bin/bash - -if [ "$#" -ne 2 ]; then - echo "Usage: alveo_run.sh " - exit -1 -fi - -cd $REMOTE_DEPLOY_DIR$ -eval "$(conda shell.bash hook)" -conda activate $CONDA_ENV_NAME$ -source $REMOTE_XRT$/setup.sh -export PLATFORM_REPO_PATHS=$REMOTE_PLATFORM_REPO_PATHS$ -python3.6 driver.py --exec_mode=$1 --batchsize=$2 --bitfile=$BITFILE$ \ - --inputfile=input.npy --outputfile=output.npy --platform=alveo -""" - vitis_gen_xml_report_tcl_template = """ open_project $VITIS_PROJ_PATH$/_x/link/vivado/vpl/prj/prj.xpr open_run impl_1 From bcc7ace9d2b522c214fcce619ee82a05fc881ae2 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:29:43 +0100 Subject: [PATCH 506/628] remove unused build environment parameters Signed-off-by: Fionn O'Donohoe --- src/finn/util/test.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/finn/util/test.py b/src/finn/util/test.py index bd8bde2820..4250079ef3 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -114,25 +114,14 @@ def get_build_env(kind, target_clk_ns): if kind == "zynq": ret["board"] = os.getenv("PYNQ_BOARD", default="Pynq-Z1") ret["part"] = pynq_part_map[ret["board"]] - ret["ip"] = os.getenv("PYNQ_IP", "") - ret["username"] = os.getenv("PYNQ_USERNAME", "xilinx") - ret["password"] = os.getenv("PYNQ_PASSWORD", "xilinx") - ret["port"] = os.getenv("PYNQ_PORT", 22) - ret["target_dir"] = os.getenv("PYNQ_TARGET_DIR", "/home/xilinx/finn") ret["build_fxn"] = ZynqBuild(ret["board"], target_clk_ns) elif kind == "alveo": ret["board"] = os.getenv("ALVEO_BOARD", default="U250") ret["part"] = alveo_part_map[ret["board"]] - ret["platform"] = alveo_default_platform[ret["board"]] - ret["ip"] = os.getenv("ALVEO_IP", "") - ret["username"] = os.getenv("ALVEO_USERNAME", "") - ret["password"] = os.getenv("ALVEO_PASSWORD", "") - ret["port"] = os.getenv("ALVEO_PORT", 22) - ret["target_dir"] = os.getenv("ALVEO_TARGET_DIR", "/tmp/finn_alveo_deploy") ret["build_fxn"] = VitisBuild( ret["part"], target_clk_ns, - ret["platform"], + alveo_default_platform[ret["board"]], strategy=VitisOptStrategy.BUILD_SPEED, ) else: From 9ddc555ea851549ba90ac6074e6e33741a2bf96f Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:30:58 +0100 Subject: [PATCH 507/628] update RST files based on remote_exec removal Signed-off-by: Fionn O'Donohoe --- docs/finn/getting_started.rst | 3 --- docs/finn/source_code/finn.core.rst | 8 -------- 2 files changed, 11 deletions(-) diff --git a/docs/finn/getting_started.rst b/docs/finn/getting_started.rst index 9b3111b70e..c575ca7e3b 100644 --- a/docs/finn/getting_started.rst +++ b/docs/finn/getting_started.rst @@ -107,9 +107,6 @@ These are summarized below: * (optional) ``LOCALHOST_URL`` (default localhost) sets the base URL for accessing e.g. Netron from inside the container. Useful when running FINN remotely. * (optional) ``NETRON_PORT`` (default 8081) changes the port for Netron inside Docker * (optional) ``PYNQ_BOARD`` or ``ALVEO_BOARD`` specifies the type of PYNQ/Alveo board used (see "supported hardware" below) for the test suite -* (optional) ``PYNQ_IP`` and ``PYNQ_PORT`` (or ``ALVEO_IP`` and ``ALVEO_PORT``) specify ip address and port number to access the PYNQ board / Alveo target -* (optional) ``PYNQ_USERNAME`` and ``PYNQ_PASSWORD`` (or ``ALVEO_USERNAME`` and ``ALVEO_PASSWORD``) specify the PYNQ board / Alveo host access credentials for the test suite. For PYNQ, password is always needed to run as sudo. For Alveo, you can leave the password empty and place your ssh private key in the ``finn/ssh_keys`` folder to use keypair authentication. -* (optional) ``PYNQ_TARGET_DIR`` (or ``ALVEO_TARGET_DIR``) specifies the target dir on the PYNQ board / Alveo host for the test suite * (optional) ``IMAGENET_VAL_PATH`` specifies the path to the ImageNet validation directory for tests. * (optional) ``FINN_DOCKER_PREBUILT`` (default 0) if set to 1 then skip Docker image building and use the image tagged with ``FINN_DOCKER_TAG``. * (optional) ``FINN_DOCKER_TAG`` (autogenerated) specifies the Docker image tag to use. diff --git a/docs/finn/source_code/finn.core.rst b/docs/finn/source_code/finn.core.rst index afa1ecffa0..28cb47eaf7 100644 --- a/docs/finn/source_code/finn.core.rst +++ b/docs/finn/source_code/finn.core.rst @@ -54,14 +54,6 @@ finn.core.onnx\_exec :undoc-members: :show-inheritance: -finn.core.remote\_exec ------------------------------ - -.. automodule:: finn.core.remote_exec - :members: - :undoc-members: - :show-inheritance: - finn.core.rtlsim\_exec ----------------------------- From f593d53762d7f2a3687dd9b525a0a5dbd8b8bf19 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 12 Jun 2023 15:36:39 +0100 Subject: [PATCH 508/628] update RST files based on gdrive removal Signed-off-by: Fionn O'Donohoe --- docs/finn/source_code/finn.util.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/finn/source_code/finn.util.rst b/docs/finn/source_code/finn.util.rst index 7ba3b252ab..aebd0604f4 100644 --- a/docs/finn/source_code/finn.util.rst +++ b/docs/finn/source_code/finn.util.rst @@ -99,14 +99,6 @@ finn.util.fpgadataflow :undoc-members: :show-inheritance: -finn.util.gdrive ------------------------------ - -.. automodule:: finn.util.gdrive - :members: - :undoc-members: - :show-inheritance: - finn.util.hls --------------- From 4ccff628d5cadc8441f3fce6fa018b461cb5bd2d Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 14 Jun 2023 15:15:37 +0100 Subject: [PATCH 509/628] remove update_dashboard util functions as uploading results was previously removed Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 38 -------------------------- 1 file changed, 38 deletions(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 89b434b577..27aaa1986d 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -34,13 +34,10 @@ # import pytorch before onnx, so we make sure to import onnx first import onnx # NOQA import os -import subprocess import torch import warnings from brevitas.export import export_finn_onnx, export_qonnx -from collections import OrderedDict from dataset_loading import cifar, mnist -from datetime import datetime from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -119,24 +116,6 @@ def get_checkpoint_name(topology, wbits, abits, QONNX_export, step): ) -def get_dashboard_data(topology, wbits, abits): - stats_file = build_dir + "/end2end_%s_w%da%d.txt" % (topology, wbits, abits) - stats_dict = OrderedDict() - if os.path.isfile(stats_file): - with open(stats_file, "r") as f: - stats_dict_txt = f.read() - stats_dict = eval(stats_dict_txt) - return stats_dict - - -def update_dashboard_data(topology, wbits, abits, key, val): - stats_dict = get_dashboard_data(topology, wbits, abits) - stats_dict[key] = val - stats_file = build_dir + "/end2end_%s_w%da%d.txt" % (topology, wbits, abits) - with open(stats_file, "w") as f: - f.write(str(stats_dict)) - - def fold_tfc(model): fc_layers = model.get_nodes_by_op_type("MatrixVectorActivation") # (PE, SIMD, ramstyle) for each layer @@ -332,15 +311,6 @@ def test_export(self, topology, wbits, abits, QONNX_export): model.save(chkpt_name) else: export_finn_onnx(model, torch.randn(ishape), chkpt_name) - nname = "%s_w%da%d" % (topology, wbits, abits) - update_dashboard_data(topology, wbits, abits, "network", nname) - dtstr = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - update_dashboard_data(topology, wbits, abits, "datetime", dtstr) - finn_commit = subprocess.check_output( - ["git", "rev-parse", "HEAD"], cwd=get_finn_root() - ) - finn_commit = finn_commit.decode("utf-8").strip() - update_dashboard_data(topology, wbits, abits, "finn-commit", finn_commit) assert os.path.isfile(chkpt_name) def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): @@ -641,10 +611,6 @@ def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): ret = throughput_test_rtlsim(model, batchsize=batchsize) res_cycles = ret["cycles"] est_cycles = latency + cycles_per_sample_est * batchsize - # warnings.warn("Estimated & rtlsim performance: " + str(perf)) - # for (k, v) in perf.items(): - # update_dashboard_data(topology, wbits, abits, k, v) - update_dashboard_data(topology, wbits, abits, "cycles_rtlsim", latency) assert (abs(res_cycles - est_cycles) / res_cycles) < 0.15 @pytest.mark.slow @@ -688,10 +654,6 @@ def test_build(self, topology, wbits, abits, QONNX_export, kind): cfg = get_build_env(kind, target_clk_ns) model = model.transform(cfg["build_fxn"]) model = model.transform(AnnotateResources("synth")) - synth_dct = eval(model.get_metadata_prop("res_total_top_synth")) - for (k, v) in synth_dct.items(): - update_dashboard_data(topology, wbits, abits, k, v) - update_dashboard_data(topology, wbits, abits, "board", cfg["board"]) model.save( get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) ) From 96efcda45cc7e58848db8cedd4af805924282294 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 21 Jun 2023 13:21:47 +0100 Subject: [PATCH 510/628] [SELU] Add selu to MT transformation + test case --- .../qonnx/qonnx_activation_handlers.py | 126 +++++++++++++----- .../brevitas/test_brevitas_selu_act_export.py | 68 ++++++++++ 2 files changed, 163 insertions(+), 31 deletions(-) create mode 100644 tests/brevitas/test_brevitas_selu_act_export.py diff --git a/src/finn/transformation/qonnx/qonnx_activation_handlers.py b/src/finn/transformation/qonnx/qonnx_activation_handlers.py index 9819086d82..5a5834a1c6 100644 --- a/src/finn/transformation/qonnx/qonnx_activation_handlers.py +++ b/src/finn/transformation/qonnx/qonnx_activation_handlers.py @@ -286,6 +286,7 @@ class QuantReluHandler(QuantActBaseHandler): def valid_predecessor_op_types(self): return [ "Relu", + "Selu", ] def _check_compatibility(self): @@ -293,16 +294,19 @@ def _check_compatibility(self): q_inst = getCustomOp(self._q_node) narrow = q_inst.get_nodeattr("narrow") signed = q_inst.get_nodeattr("signed") - if signed or narrow: - raise ValueError( - "FINN only supports unsigned and non-narrow Quant nodes " - "for Relu activations." - ) if not self._model.get_initializer(self._q_node.input[2]) == 0: raise ValueError( "Only Quant nodes with zero-point == 0 " "are currently supported for ReLu activations." ) + act_node = self._model.find_direct_predecessors(self._q_node) + act_node = act_node[0] + if act_node.op_type == "Relu": + if signed or narrow: + raise ValueError( + "FINN only supports unsigned and non-narrow Quant nodes " + "for Relu activations." + ) elif self._q_node.op_type == "BipolarQuant": return else: @@ -312,7 +316,31 @@ def _calculate_act_bias(self): # No bias allowed for Relu activations, see: https://github.com/Xilinx/ # brevitas/blob/a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/ # export/onnx/finn/handler/act.py#L48 - bias = np.array([0.0], dtype=np_default_dtype) + act_node = self._model.find_direct_predecessors(self._q_node) + act_node = act_node[0] + if act_node.op_type == "Relu": + bias = np.array([0.0], dtype=np_default_dtype) + elif act_node.op_type == "Selu": + # Gather parameters + q_inst = getCustomOp(self._q_node) + if self._q_node.op_type == "Quant": + bit_width = self._model.get_initializer(self._q_node.input[3]) + narrow = q_inst.get_nodeattr("narrow") + elif self._q_node.op_type == "BipolarQuant": + bit_width = 1.0 + else: + raise RuntimeError("Got an unexpected quantizer node type") + # Calculate bias, see: https://github.com/Xilinx/brevitas/blob/ + # a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/export/ + # onnx/finn/handler/act.py#L64 + if bit_width == 1.0: + bias = np.array([-0.5], dtype=np_default_dtype) + else: + if narrow: + min_non_scaled_val = -(2 ** (bit_width - 1) - 1) + else: + min_non_scaled_val = -(2 ** (bit_width - 1)) + bias = np.array([min_non_scaled_val], dtype=np_default_dtype) return bias def _calculate_thresholds(self): @@ -326,30 +354,66 @@ def _calculate_thresholds(self): quant_scale = self._model.get_initializer(self._q_node.input[1]).astype( np.float32 ) - # q_inst = getCustomOp(self._q_node) - # narrow = q_inst.get_nodeattr("narrow") + act_node = self._model.find_direct_predecessors(self._q_node) + act_node = act_node[0] + if act_node.op_type == "Relu": - # Calculate thersholds, see: https://github.com/Xilinx/brevitas/blob/ - # a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/export/ - # onnx/finn/handler/act.py#L21 - num_distinct_values = 2**bit_width - num_thresholds = int(num_distinct_values - 1) - flat_scale = quant_scale.flatten().astype(np.float32) - num_scale_channels = flat_scale.shape[0] - step = np.abs(flat_scale).astype(np.float32) - min_threshold = step / 2 - thresholds = np.empty( - (num_scale_channels, num_thresholds), dtype=np_default_dtype - ) - for c in range(num_scale_channels): - for t in range(num_thresholds): - thresholds[c][t] = min_threshold[c] + step[c] * t + # Calculate thersholds, see: https://github.com/Xilinx/brevitas/blob/ + # a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/export/ + # onnx/finn/handler/act.py#L21 + num_distinct_values = 2**bit_width + num_thresholds = int(num_distinct_values - 1) + flat_scale = quant_scale.flatten().astype(np.float32) + num_scale_channels = flat_scale.shape[0] + step = np.abs(flat_scale).astype(np.float32) + min_threshold = step / 2 + thresholds = np.empty( + (num_scale_channels, num_thresholds), dtype=np_default_dtype + ) + for c in range(num_scale_channels): + for t in range(num_thresholds): + thresholds[c][t] = min_threshold[c] + step[c] * t + + # ToDo: The index 1 needs to be changed to -1 for the channels last format + num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[ + 1 + ] + final_shape = (num_output_channels, num_thresholds) + if thresholds.shape != final_shape: + thresholds = np.broadcast_to(thresholds, final_shape) + elif act_node.op_type == "Selu": + q_inst = getCustomOp(self._q_node) + narrow = q_inst.get_nodeattr("narrow") + if narrow: + num_distinct_values = 2**bit_width - 1 + else: + num_distinct_values = 2**bit_width - # ToDo: The index 1 needs to be changed to -1 for the channels last format - num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[1] - final_shape = (num_output_channels, num_thresholds) - if thresholds.shape != final_shape: - thresholds = np.broadcast_to(thresholds, final_shape) + num_thresholds = int(num_distinct_values - 1) + flat_scale = quant_scale.flatten().astype(np.float32) + num_scale_channels = flat_scale.shape[0] + scale = np.abs(flat_scale).astype(np.float32) + half_scale = scale / 2 + # alpha and lambda + # from https://pytorch.org/docs/stable/generated/torch.nn.SELU.html + alpha = 1.6732632423543772848170429916717 + selu_scale = 1.0507009873554804934193349852946 + thresholds = np.empty( + (num_scale_channels, num_thresholds), dtype=np_default_dtype + ) + for c in range(num_scale_channels): + for t in range(num_thresholds): + step = -1.0 + half_scale + scale[c] * t + if step <= 0: + thresholds[c][t] = np.log(step / (alpha * selu_scale) + 1) + else: + thresholds[c][t] = step / selu_scale + num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[ + 1 + ] + final_shape = (num_output_channels, num_thresholds) + if thresholds.shape != final_shape: + thresholds = np.broadcast_to(thresholds, final_shape) return thresholds @@ -371,10 +435,10 @@ def _remove_activation_node(self, multi_threshold_node): "the Quant node must exist." ) act_node = act_node[0] - if not act_node.op_type == "Relu": + if act_node.op_type not in self.valid_predecessor_op_types(): raise RuntimeError( - "The predecesor of the Quant node must be Relu for handling " - "of Relu activations." + "The predecesor of the Quant node must be Relu or Selu for handling " + "of activations." ) # Reroute upstream tensor diff --git a/tests/brevitas/test_brevitas_selu_act_export.py b/tests/brevitas/test_brevitas_selu_act_export.py new file mode 100644 index 0000000000..2f1422e4cd --- /dev/null +++ b/tests/brevitas/test_brevitas_selu_act_export.py @@ -0,0 +1,68 @@ +# Copyright (c) 2023, Advanced Micro Devices, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of Xilinx nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import pytest + +import numpy as np +import onnx # noqa +import os +import torch +from brevitas.export import export_qonnx +from brevitas.nn import QuantIdentity +from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.cleanup import cleanup as qonnx_cleanup + +import finn.core.onnx_exec as oxe +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN + + +@pytest.mark.brevitas_export +@pytest.mark.parametrize("abits", [2, 4, 8]) +@pytest.mark.parametrize("ishape", [(1, 15), (1, 32, 1, 1)]) +@pytest.mark.parametrize("narrow", [True, False]) +def test_brevitas_act_export_selu(abits, ishape, narrow): + export_path = "test_brevitas_selu_act_export_%s.onnx" % str(abits) + b_act = torch.nn.Sequential( + torch.nn.SELU(), QuantIdentity(bit_width=abits, narrow=narrow) + ) + + export_qonnx(b_act, torch.randn(ishape), export_path, opset_version=11) + qonnx_cleanup(export_path, out_file=export_path) + model = ModelWrapper(export_path) + model = model.transform(ConvertQONNXtoFINN()) + + inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) + idict = {model.graph.input[0].name: inp_tensor} + odict = oxe.execute_onnx(model, idict, True) + produced = odict[model.graph.output[0].name] + inp_tensor = torch.from_numpy(inp_tensor).float() + b_act.eval() + expected = b_act.forward(inp_tensor).detach().numpy() + + assert np.isclose(produced, expected, atol=1e-3).all() + os.remove(export_path) From b30a70f68687ef21107028eff799ae12c31d39c8 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 21 Jun 2023 16:12:57 +0100 Subject: [PATCH 511/628] [SELU] Cleanup qonnx handler and SELU export test --- .../qonnx/qonnx_activation_handlers.py | 19 ++++++------------- .../brevitas/test_brevitas_selu_act_export.py | 8 +++++++- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/finn/transformation/qonnx/qonnx_activation_handlers.py b/src/finn/transformation/qonnx/qonnx_activation_handlers.py index 5a5834a1c6..bbe5e1a0e3 100644 --- a/src/finn/transformation/qonnx/qonnx_activation_handlers.py +++ b/src/finn/transformation/qonnx/qonnx_activation_handlers.py @@ -374,13 +374,6 @@ def _calculate_thresholds(self): for t in range(num_thresholds): thresholds[c][t] = min_threshold[c] + step[c] * t - # ToDo: The index 1 needs to be changed to -1 for the channels last format - num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[ - 1 - ] - final_shape = (num_output_channels, num_thresholds) - if thresholds.shape != final_shape: - thresholds = np.broadcast_to(thresholds, final_shape) elif act_node.op_type == "Selu": q_inst = getCustomOp(self._q_node) narrow = q_inst.get_nodeattr("narrow") @@ -408,12 +401,12 @@ def _calculate_thresholds(self): thresholds[c][t] = np.log(step / (alpha * selu_scale) + 1) else: thresholds[c][t] = step / selu_scale - num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[ - 1 - ] - final_shape = (num_output_channels, num_thresholds) - if thresholds.shape != final_shape: - thresholds = np.broadcast_to(thresholds, final_shape) + + # ToDo: The index 1 needs to be changed to -1 for the channels last format + num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[1] + final_shape = (num_output_channels, num_thresholds) + if thresholds.shape != final_shape: + thresholds = np.broadcast_to(thresholds, final_shape) return thresholds diff --git a/tests/brevitas/test_brevitas_selu_act_export.py b/tests/brevitas/test_brevitas_selu_act_export.py index 2f1422e4cd..3f4807c5d7 100644 --- a/tests/brevitas/test_brevitas_selu_act_export.py +++ b/tests/brevitas/test_brevitas_selu_act_export.py @@ -35,6 +35,7 @@ from brevitas.export import export_qonnx from brevitas.nn import QuantIdentity from qonnx.core.modelwrapper import ModelWrapper +from qonnx.util.basic import get_preferred_onnx_opset from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe @@ -51,7 +52,12 @@ def test_brevitas_act_export_selu(abits, ishape, narrow): torch.nn.SELU(), QuantIdentity(bit_width=abits, narrow=narrow) ) - export_qonnx(b_act, torch.randn(ishape), export_path, opset_version=11) + export_qonnx( + b_act, + torch.randn(ishape), + export_path, + opset_version=get_preferred_onnx_opset(), + ) qonnx_cleanup(export_path, out_file=export_path) model = ModelWrapper(export_path) model = model.transform(ConvertQONNXtoFINN()) From 175e7c60fcfad810162bc4d478831be55750bec2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20B=2E=20Preu=C3=9Fer?= Date: Fri, 23 Jun 2023 12:39:46 +0100 Subject: [PATCH 512/628] Trying a package for replacing the global ifdef'ed declaration. --- finn-rtllib/swg/swg_common.sv | 18 ++------ finn-rtllib/swg/swg_pkg.sv | 41 ++++++++++++++++++ finn-rtllib/swg/swg_template_axilite.v | 38 +++++++++++++--- finn-rtllib/swg/swg_template_default.sv | 2 +- .../swg/swg_template_default_dynamic.sv | 43 ++++++++++++++----- finn-rtllib/swg/swg_template_parallel.sv | 2 +- 6 files changed, 111 insertions(+), 33 deletions(-) create mode 100644 finn-rtllib/swg/swg_pkg.sv diff --git a/finn-rtllib/swg/swg_common.sv b/finn-rtllib/swg/swg_common.sv index d953078abe..f2cdc333ca 100644 --- a/finn-rtllib/swg/swg_common.sv +++ b/finn-rtllib/swg/swg_common.sv @@ -1,5 +1,5 @@ /****************************************************************************** - * Copyright (C) 2022, Advanced Micro Devices, Inc. + * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,20 +29,10 @@ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -`ifndef FINN_SWG_ENUM_DEFINED -`define FINN_SWG_ENUM_DEFINED -typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H -} state_e; -`endif // loop controller used for both, "default" and "parallel", implementation styles -module swg_controller #( +module swg_controller +import swg::*; #( int unsigned LOOP_H_ITERATIONS, int unsigned LOOP_W_ITERATIONS, int unsigned LOOP_KH_ITERATIONS, @@ -62,7 +52,7 @@ module swg_controller #( int TAIL_INCR_H, int TAIL_INCR_LAST, - parameter INNERMOST_STATE + state_e INNERMOST_STATE )( input logic clk, input logic rst_n, diff --git a/finn-rtllib/swg/swg_pkg.sv b/finn-rtllib/swg/swg_pkg.sv new file mode 100644 index 0000000000..1200310aca --- /dev/null +++ b/finn-rtllib/swg/swg_pkg.sv @@ -0,0 +1,41 @@ +/****************************************************************************** + * Copyright (C) 2023, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +package swg; + typedef enum logic [2:0] { + STATE_START, + STATE_LOOP_SIMD, + STATE_LOOP_KW, + STATE_LOOP_KH, + STATE_LOOP_W, + STATE_LOOP_H + } state_e; +endpackage : swg diff --git a/finn-rtllib/swg/swg_template_axilite.v b/finn-rtllib/swg/swg_template_axilite.v index 9479c7f80d..1f39e4440e 100644 --- a/finn-rtllib/swg/swg_template_axilite.v +++ b/finn-rtllib/swg/swg_template_axilite.v @@ -1,8 +1,35 @@ +/****************************************************************************** + * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ -`timescale 1 ns / 1 ps - -module $TOP_MODULE_NAME$_axilite # -( +module $TOP_MODULE_NAME$_axilite #( // Users to add parameters here // User parameters ends @@ -12,8 +39,7 @@ module $TOP_MODULE_NAME$_axilite # parameter integer C_S_AXI_DATA_WIDTH = 32, // Width of S_AXI address bus parameter integer C_S_AXI_ADDR_WIDTH = 6 -) -( +)( // Users to add ports here output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg0, output wire [C_S_AXI_DATA_WIDTH-1:0] cfg_reg1, diff --git a/finn-rtllib/swg/swg_template_default.sv b/finn-rtllib/swg/swg_template_default.sv index 4970762172..78a8d0a3b9 100644 --- a/finn-rtllib/swg/swg_template_default.sv +++ b/finn-rtllib/swg/swg_template_default.sv @@ -98,7 +98,7 @@ module $TOP_MODULE_NAME$_impl #( .TAIL_INCR_LAST($TAIL_INCR_LAST$), .INCR_BITWIDTH($INCR_BITWIDTH$), .IS_DEPTHWISE($IS_DEPTHWISE$), - .INNERMOST_STATE($INNERMOST_STATE$) + .INNERMOST_STATE(swg::$INNERMOST_STATE$) ) controller_inst ( .clk(ap_clk), diff --git a/finn-rtllib/swg/swg_template_default_dynamic.sv b/finn-rtllib/swg/swg_template_default_dynamic.sv index c1647ef699..5a6fdda170 100644 --- a/finn-rtllib/swg/swg_template_default_dynamic.sv +++ b/finn-rtllib/swg/swg_template_default_dynamic.sv @@ -1,14 +1,33 @@ -`ifndef FINN_SWG_ENUM_DEFINED -`define FINN_SWG_ENUM_DEFINED -typedef enum logic [2:0] { - STATE_START, - STATE_LOOP_SIMD, - STATE_LOOP_KW, - STATE_LOOP_KH, - STATE_LOOP_W, - STATE_LOOP_H -} state_e; -`endif +/****************************************************************************** + * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ module $TOP_MODULE_NAME$_controller #( int unsigned CNTR_BITWIDTH, @@ -39,6 +58,8 @@ module $TOP_MODULE_NAME$_controller #( input logic [INCR_BITWIDTH-1:0] cfg_incr_tail_last ); + import swg::*; + // (dynamic) configuration registers logic [CNTR_BITWIDTH-1:0] Cfg_cntr_simd = $LOOP_SIMD_ITERATIONS$; logic [CNTR_BITWIDTH-1:0] Cfg_cntr_kw = $LOOP_KW_ITERATIONS$; diff --git a/finn-rtllib/swg/swg_template_parallel.sv b/finn-rtllib/swg/swg_template_parallel.sv index b55a51e400..83a525ff36 100644 --- a/finn-rtllib/swg/swg_template_parallel.sv +++ b/finn-rtllib/swg/swg_template_parallel.sv @@ -123,7 +123,7 @@ module $TOP_MODULE_NAME$_impl #( .TAIL_INCR_LAST($TAIL_INCR_LAST$), .INCR_BITWIDTH($INCR_BITWIDTH$), .IS_DEPTHWISE($IS_DEPTHWISE$), - .INNERMOST_STATE($INNERMOST_STATE$) + .INNERMOST_STATE(swg::$INNERMOST_STATE$) ) controller_inst ( .clk(ap_clk), From a0b2141f762cd6929ff95dac0995bfb17e1203cd Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 23 Jun 2023 17:53:15 +0100 Subject: [PATCH 513/628] [Tranform] Enable rtlsim to utilize swg package --- .../custom_op/fpgadataflow/convolutioninputgenerator_rtl.py | 5 +++++ src/finn/util/pyverilator.py | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index a1a32ba6af..c54c4ac1c9 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -1064,6 +1064,9 @@ def generate_hdl(self): shutil.copy2( os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir ) + shutil.copy2( + os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_pkg.sv", code_gen_dir + ) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain @@ -1082,6 +1085,7 @@ def prepare_rtlsim(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") verilog_paths = [code_gen_dir] verilog_files = [ + "swg_pkg.sv", self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", "swg_common.sv", @@ -1106,6 +1110,7 @@ def code_generation_ipi(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") sourcefiles = [ + "swg_pkg.sv", self.get_nodeattr("gen_top_module") + "_wrapper.v", self.get_nodeattr("gen_top_module") + "_impl.sv", "swg_common.sv", diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 8d18858569..7452394524 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -118,6 +118,8 @@ def file_to_basename(x): if not remove_entry: filtered_verilog_files.append(vfile) remove_entry = True + elif "swg_pkg" in vfile: + continue else: filtered_verilog_files.append(vfile) @@ -315,8 +317,10 @@ def file_to_basename(x): xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" + swg_pkg = os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_pkg.sv" + sim = PyVerilator.build( - [top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], + [swg_pkg, top_module_file_name, xpm_fifo, xpm_memory, xpm_cdc], verilog_path=[vivado_stitch_proj_dir, verilog_header_dir], build_dir=build_dir, trace_depth=get_rtlsim_trace_depth(), From b238b7b989d19b44dc275aff59c9b105f2f402aa Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 26 Jun 2023 18:01:44 +0100 Subject: [PATCH 514/628] [notebooks] Updating first part of folding notebook --- ...Folding-Tutorial.ipynb => 3_folding.ipynb} | 357 +++++++++--------- notebooks/advanced/finn-dataflow.png | Bin 0 -> 164258 bytes notebooks/advanced/finn-folding-mvau.png | Bin 0 -> 29710 bytes notebooks/advanced/finn-folding.png | Bin 0 -> 84958 bytes notebooks/advanced/finn-hw-arch.png | Bin 110452 -> 0 bytes 5 files changed, 176 insertions(+), 181 deletions(-) rename notebooks/advanced/{Folding-Tutorial.ipynb => 3_folding.ipynb} (63%) create mode 100755 notebooks/advanced/finn-dataflow.png create mode 100755 notebooks/advanced/finn-folding-mvau.png create mode 100755 notebooks/advanced/finn-folding.png delete mode 100644 notebooks/advanced/finn-hw-arch.png diff --git a/notebooks/advanced/Folding-Tutorial.ipynb b/notebooks/advanced/3_folding.ipynb similarity index 63% rename from notebooks/advanced/Folding-Tutorial.ipynb rename to notebooks/advanced/3_folding.ipynb index 409595d0d8..b1baf69cab 100644 --- a/notebooks/advanced/Folding-Tutorial.ipynb +++ b/notebooks/advanced/3_folding.ipynb @@ -26,14 +26,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note: The build_flow in the cybsec_mlp notebook comprises a transformation step `step_target_fps_parallelization` that automatically sets custom parallelization parameters needed to achieve a given `target_fps` by invoking the `SetFolding` transformation.\n", - "\n", - "More details of the above step can be found here: https://github.com/Xilinx/finn/blob/main/src/finn/builder/build_dataflow_steps.py#L394\n", + "Note: The build_flow in the cybsec_mlp notebook comprises a transformation step `step_target_fps_parallelization` that automatically sets custom parallelization parameters needed to achieve a given `target_fps` by invoking the [`SetFolding` transformation](https://github.com/Xilinx/finn/blob/main/src/finn/transformation/fpgadataflow/set_folding.py#L46).\n", "\n", + "More details of the above step can be found [here](https://github.com/Xilinx/finn/blob/main/src/finn/builder/build_dataflow_steps.py#L394)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "This notebook shows the manual version of this step and explains how these attributes can improve performance and what are their effects on resource utilization for developers who need to maximize the performance of their network. \n", "\n", - "* input : the 'step_convert_to_hls.onnx' file (we pick has gone through a series of transformation passes) to be analyzed in terms of clock cycles and resource utilization per layer\n", - "* analyze the estimated execution clock cycles and the resource utilization of each layer in the network" + "For that we will use the `step_convert_to_hls.onnx` file as starting point. This intermediate model from the cybersecurity example is the model representation after the high-level ONNX layers are converted to HLS layers. Each node in the graph now corresponds to an HLS C++ function call and the parallelization parameters can be set using the node attributes.\n", + "\n", + "We will take this model to show how to set the folding factors manually and analyze the estimated execution clock cycles and the resource utilization of each layer in the network." ] }, { @@ -42,11 +48,15 @@ "source": [ "### FINN-style Dataflow Architectures \n", "\n", - "We start with a quick recap of FINN-style dataflow architectures. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, as illustrated in the figure below taken from the [FINN-R paper](https://arxiv.org/pdf/1809.04570.pdf):\n", + "We start with a quick recap of FINN-style dataflow architectures. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, as illustrated in the figure below.\n", + "\n", + "![](finn-dataflow.png)\n", "\n", - "![](finn-hw-arch.png)\n", + "In practice, the layers are instantiated by function calls to optimized Vitis HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library.\n", "\n", - "In practice, the compute arrays are instantiated by function calls to optimized Vitis HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library. As these function calls can only handle certain patterns/cases, we need to transform the network into an appropriate form so that we can replace network layers with these function calls, which is the goal of the network preparation process." + "Since each layer will be instantiated, we can flexibly set the parallelization of each layer and thus control resources and throughput of our network, as visualized in the imaged below:\n", + "\n", + "![](finn-folding.png)" ] }, { @@ -55,15 +65,14 @@ "source": [ "# Part-1 : Loading the ONNX model.\n", "\n", - "The 'onnx' file needs to go through multiple transformations before it can be fed into our estimation functions.\n", + "As discussed above, the network needs to go through a few preparation steps before it can be fed into our estimation functions.\n", "\n", - "The 'onnx' file loaded here is taken from the cybersecurity end2end example notebook. The build_step in the notebook comprises several series of transformations that take place before the onnx file is used for bitstream generation.\n", - "We pick the onnx file `step_convert_to_hls` to which the necessary transformations have been applied for this notebook (Network layers mapped to necessary FINN-HLS blocks. In this case, the `MatrixVectorActivation` Units). \n", + "The `.onnx` file loaded here is taken from the cybersecurity end2end example notebook. \n", + "We pick the onnx file `step_convert_to_hls.onnx` to which the necessary transformations have been applied for this notebook (Network layers mapped to necessary FINN-HLS blocks. In this case, the `MatrixVectorActivation` Units). \n", "\n", - "More information on these transformations can be found in the tfc_end2end_example notebook.\n", + "To interact with the `.onnx` file we use the `ModelWrapper()`. This wrapper simplifies the access to different model attributes and allows us to apply custom transformations on the model.\n", "\n", - "To interact with the 'onnx' file we use the `ModelWrapper()` helper function. This function gives access to different model attributes and allows us to apply custom tranformations to it.\n", - "In the below cell, we load our onnx file and view the cybersecurity MLP network in netron." + "In the below cell, we load our onnx file and view the cybersecurity MLP network in Netron." ] }, { @@ -75,7 +84,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Serving './step_convert_to_hls_folding.onnx' at http://0.0.0.0:5901\n" + "Serving 'step_convert_to_hls.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -85,7 +94,7 @@ " " + "" ] }, "execution_count": 2, @@ -103,152 +112,63 @@ ], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(\"./step_convert_to_hls.onnx\")\n", + "model = ModelWrapper(\"../end2end_example/cybersecurity/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")\n", + "model.save(\"step_convert_to_hls.onnx\")\n", "\n", - "showInNetron(\"./step_convert_to_hls.onnx\",localhost_url='xirxlabs53')" + "showInNetron(\"step_convert_to_hls.onnx\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# Part 2 : Parallelisation Attributes : PE & SIMD" + "# Part 2 : Parallelization Parameters: PE & SIMD" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**PE & SIMD represent the amount of time-multiplexity to which we expose each of our network layers. \n", - "These parallelization attributes are subject to certain constraints and should be selected accordingly.**\n", + "The computational parallelism can be varied by setting the folding factors or also called parallelization parameters **PE** and **SIMD** of each layer. These parallelization attributes are subject to certain constraints and should be selected accordingly.\n", "\n", - "We see how they work through an example of a multiplication computation (Matrix-Vector) in the `MatrixVectorActivation` layer looks like.\n", + "To see more details about how this is implemented in the `MatrixVectorActivation` layer (MVAU), please have a look at [this documentation](https://github.com/Xilinx/finn/blob/github-pages/docs/finn-sheduling-and-folding.pptx). A schematic of the folding in an MVAU for a fully-connected layer is shown below:\n", "\n", - "From the below block diagram, we observe that `SIMD` represents the parallelism within a single dot-product computation (the number of multiplications is a single clock cycle), while `PE` refers to how many such (Matrix-Vector?) dot-products execute in parallel.\n", - "\n", - "If `PE` & `SIMD` are set to 2 & 4 for a given layer that means, that within a dot-product 4 multiplications will happen in parallel and 2 such dot-products will execute in parallel.\n", - "\n", - "The base case of `PE` & `SIMD` both set as 1 suggest that there will be no parallelization therefore the resource utilization would be low (resources can be resued for differnt multiplication operations) when compared to settings where network layers have higher `PE` & `SIMD` values." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
    \n", - "Question in the third line of the above cell.\n", - "
    " + "![](finn-folding-mvau.png)" ] }, { - "attachments": { - "MVA-1.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABX4AAAMbCAMAAADNe32MAAAACXBIWXMAAB7CAAAewgFu0HU+AAAAV1BMVEX////v7++lpaUgICDd3d0bGxvh4eEAAAAQEBBKSkq7u7syMjLNzc1WVlYNDQ2YmJhnZ2dCQkLx8fG1tbUrKyvU1NSrq6t2dnaIiIg5OTnDw8Po6Oj5+fnQLuJiAAAgAElEQVR4Ae2djXqiOhRFbdVGW387ttXW93/OSYJBxBw4OxFF3dz7jUDOSWAR12Qi4mDAhQRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARI4J4J/H0vl69vhzP4+xrZtb+vr6/ylPbFxot9scvopSw4W9l/zUI9Z2XcQQIkQAIkcELgbW78spv53T9mZ1+3dlfp31djxnbfsogzZrKKO3b/M7EhH0U9J21wgwRIgARIoE7gb2jMdLWaWnH+urKfRdDvTwhdl/od2mVsIxf/Qlnldb+2Yl5tjKF/K1S4SgIkQAICgZUZ+rHs384M9zYmjH4nZnPIeLG+ndj1pRn6Pfv3oVlEFLv1Ne3nZn1I5AsJkAAJkIBMYGy+i8I3Y97tWhj9rsdh9mFppofJh0K/g8HLOKLY/WH4PDLmT26OJSRAAiRAAgWBgzTtxnztphTC6He6MofZh7V5rel38O5nGPYv5fI5GHwHJX/RvuxcJEACJNBO4MOsrTuPy/bw0dv0d1HMPvyZ9b+6fgdjs618FucnJ37crv2bm8DgQgIkQAIk0Erg236Qtvo+DljL0a9VrL/3YWte30/mfl2VczO3+p2MJ+Pif2vqqXn9sh/SmXn8tojWA2EACZAACTwZgdeFdabZrNzEr12Cftd2zc8+7Mzn+eh3ZaZF+PHPnflZmN3O3iXhbhzmQgIkQAIk0EbgZesGrcbs/GA36Hc6mPk7HUZ2nBvTr7s97WSxd5zt7MD3beNnL06KuEECJEACJBAn8Pn+Y+05ccPWMPdr7x4butmHrb0z4nzyYRU+ZzvWtzELP+yd8cbfIxSukQAJkEA7gX8TN59bTj7YyQU/+7BZ7COj351ZDQavm83w8L8dC++CkTdm2d4aI0iABEjgyQm8rkpXLv39DcfJh8GX/ebFl3Pye/3OB3uPr00rv4Xs73yYOyO7ZepugeBCAiRAAiTQSKCwrQ/5NQv7GvTrvrq2MbOtsTcD/6vf+WDFax+88/n29vbn/rf/uVmKw7fdPsxrY5MsJAESIAESGAx+jfOrX368P8Pcr7uzwc4+bCb2Rt766Hc0CSPdQ6p7+Tp8281+e463nlW4cJUESIAE4gTss3a27q7fz59CxNXR75cZe8+GOx/Ge7v8zbYTMz7eKFxWuzZTq+r99PyetDKEKyRAAiRAAoHAi73lwQx37k9/m28YA7vRr5198M+BCHc+2Jhi2fh71EIVh9fR2Ix/thsz4eC3RoabJEACJBAjsLfGdMuu+N5FGP16/W79Z27lnQ8+bjGeLuPfLC6eHDylfWOYuY8ESIAEIgT+Zu+zyGxCJLJl18vv70XqaWmGxSRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiQAEnibr8AMhpMACZAACVyAgP0K3AVqYRUkQAIkQAIgAeoXBMZwEiABErgMAer3MhxZCwmQAAmABIrfHgKTGE4CJEACJJBLgKPfXILMJwESIIEkAtRvEjYmkQAJkEAuAeo3lyDzSYAESCCJgP01i/hzJpNqYxIJkAAJkICSgNVv8URgZTzDSIAESIAELkJgZsJvaV6kOlZCAiRAAiSgI2BHv/wFeR0qRpEACZDAJQl8m8nh5+AuWSvrIgESIAESaCbwPTHLpTGb8LP0zdEsJQESIAESuACB/a/7Oc75fvA7tAL+mfEOiAtAZRUkQAI9JrB/ue0yGs3eX5c/8w/3C8dj//PG+6UVsFmsV9vv39HbbQ+vx1eOh0YCJHCfBP7+bVfTzXjhpNeTZbP6F0a8+/dV8cP0PTi0yXA3/1n+ft7ndeZRkwAJ9IrA/nflh5eF2xaLyeSG/w+Hm/V8tX39fakxenF/QeyG48ktl4r+d9tR7Qi5SQIkQAIQgf1ybKWy2K2W/2ZvdedBNT1F8P5v9Pu9LeZG1vxKyFNcc54kCXREwH2wNfz5Df/M76iVx6v25XtlZ2qmf493ZjwjEiCB6xB4tfLlbV1prF+2CzPkDEQaPGaRwNMT+Ge/1MCBb3I3eNuZMce/yfiYSALPTOBzbLa0b0YP+Pww84x0ppIACTwtga35eNpzv8yJjxaG0w+XQclaSOC5COzM93Od8OXPdsVHAl0eKmskgccnsDeGd5plXuZXM82sgekkQAJPSODPjJ/wrC97yr9md9kKWRsJkMAzEHgzi2c4zU7P8ddsOq2flZMACTwkAeo3/7JSv/kMWQMJPCEB6jf/olO/+QxZAwk8IQHqN/+iU7/5DFkDCTwhAeo3/6JTv/kMWQMJPCEB6jf/olO/+QxZAwk8IQHqN/+iU7/5DFkDCTwhAeo3/6JTv/kMWQMJPCEB6jf/olO/+QxZAwk8IYFL6/fve7l8fTuA/Ptyz6L5+/r6Ksnui40X+2KXUfMXnt++7uJZbNRveXm5QgIkcCsCb/Pil9B2M38EP/7buFu7r/Svfba7+5bzMvxi2mQVXH1+zPZRYnyS7jkW7iEBEiCBMwJ/9leLpqvV1Lr11xX+LNzDEJx+f0LsutTv0C7+J+akX9rY74yR3Rwq5CsJkAAJkMBgZYbel387M3TTBmH0OykfivBiVTyxJUsz9Lz270OzKIbKZ/yctqnfMyzcQQIkQALnBMbh2cFvxrhfAA6j3/U4zD4szfQw+VDodzB4GZv1eU12z6/5MJx8iKLhThIgARKoETjMOdi987WbUgij3+kqzD6szWtNv4N3Y+zwd/9SLp++1v1wMeLotwaYmyRAAiQQJ/Bh1oU7D8Xbw0dv099F8UjGP7P+V9fvwP7YXOWzuGJyYmAnMpYDjn7jnLmXBEiABGoEvo1ZrL6PdyuUo1+rWH/vw9a8vp/M/boK5u6nKpeT8WRc/O9N/e2mJDj6rQHmJgmQAAkIBF4X9uMys1m5iV+7BP2u7Zq/92FnPs9Hv6vIb/X8TSbW4tRvwZF/kgAJkEArgZetvbPMLjs/2A36nQ5m/k6HkR3nxvR7/ls9a/8hHicfWoEzgARIgARKAp/vPxs7geu+7xbmfu08wtDNPmytVM8nH1bntz4s3XwER78lU66QAAmQgJLAv4n353H0W8w+bBb7yOh3Z1aDwetmMzz8vxt8mfHf/nP/aczo090/zIUESIAESKCJwOtqGYqX/v6Gin6/7DcvvpyT3+t3Ptifurdp5beQ3Z0P7gsXYSmrDFXzlQRIgARIoEagsK3f+et/QDno132vYmNmW2NvBv5Xv/PBivdlMPh8e3v7c//b/wbLcbHYLygPqd8aZW6SAAmQwBmBX+P86pcfP58b5n6ndp+992EzsTMJ9dHvaOLmHuIL73yIc+FeEiABEqgRsM/a2bq7fj9/ChFXR792Ptd7Ntz5MN7b5W+2ndh53lo15Sb1W6LgCgmQAAk0EXixtzyY4c796W/zDWNgN/q1sw/+ORDhzocwt2s2/h61aLW88SyKhTtJgARI4IzAfuvUa8yu+N5FGP16/W79Z27lnQ8+bjGeLhtubeDo94wwd5AACZCAROBv9j4TZxOkJO4nARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggdsRGK39r1nc7gDYMgmQAAk8J4Ev/9Nvz3nuPGsSIAESuCEB+/TJG7bOpkmABEjgaQlQv0976XniJEACtyVA/d6WP1snARJ4WgLU79Neep44CZDAbQnM+NHbbS8AWycBEnhWAhz9PuuV53mTAAncmMCXMQ2/MXTjg2PzJEACJPC4BKx+vx/37HhmJEACJNBbAla/694eHA+MBEiABB6XgNWv2T7u6fHMSIAESKCvBL7NxJgVp3/7en14XCRAAo9K4HVhlq/GbF4p4Ee9xDwvEiCB/hHY//4MjZnvB7ONMeOfXxq4f9eIR0QCJHATAp8vHSx/o9Hs99/r9me6Wdhp3+HSndp+aQVsFrvV9vV3NvrroNkXuv0mfYiNkgAJQATevn/mu42dku16+Vj9lgc2cyPhLpfFeDNdbd8/yxa5QgIkQAJ9IrD/txpbCbqBqTGTLpbh8GM3ndtx7ld9QPryu3XeH44v3+yiOCF3Uh8/X30CzmMhARIgAUfgc+vcO57+LN+//up2vHNEn2+zf9vVznl49+/Oz4WHTwIk8GgE3u2//4fbxx4c7t9Xdlpl/fdo147nQwIkcM8ElvZf5s8wLtxvJ2by2H/J3HM35LGTwBMS+GfMz4NNOEhX8W9txhz/SnS4nwRI4MoEPsdP9OXf/drwB5av3MHYHAmQgERga3ZS0QPuf1sYTj884HXlKZHAXRL4eK4HP66eaKx/l/2RB00Cz0Ngb8xTfSPh1Uyf5+LyTEmABPpM4M2M+3x4Fz+236eaa7k4PlZIAiRwOQJvZnG5yu6gpl+zuYOj5CGSAAk8AQHq9wkuMk+RBEigjwSo3z5eFR4TCZDAExCgfp/gIvMUSYAE+kiA+u3jVeExkQAJPAEB6vcJLjJPkQRIoI8EqN8+XhUeEwmQwBMQoH6f4CLzFEmABPpIgPrt41XhMZEACTwBAer3CS4yT5EESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESOA+CexfcpbP85OuVLg/L73unj4dy3XPnK2RAAn0n8CryVnm5ye4rNQ3XP2eB1xxz+mxzK7YMpsiARIggTYCVr+T1GVhpufVV5VnTTx9OQ+52p7ascwjg/WrHQsbIgESIIFTAq8xhZ6GiFttufvZz8Rs/sT8axbYYzHm45Z/F1zzbNkWCZDAHRBoU2jTKShy3zZmffMp4MM5vA3Nuul0WEYCJEAC1ySgUKh4OJrcv6HZihVcueBtbJZXbpLNXYvAW+UzB66SQDuByMzptTpr2Y5GoWVwbUWV+24W/Zh+sAf/z0w4/VC7iI+ySf22C4cRVQJPod/BtD/D38GOw99z3f5bbSbHbnle3tWe72q7i9xW3sxYfwvl1qz0wf2L/Lvz468QXZltZat5FYltrsmWqsaOuZ2yNT/nKHS5/8y49SiuFfBthtdq6l7a+fo4qtetXeu4Z5uTdi+hX/2hL81KHbx/wW+YwXKwaHfgW/OjPv4Q+PmCfgiDZwwGaM4KGBDpYpVH8C/jnoOANP9Vp9B4O8rcsenNHbf7sRnFz+VZ974vzHg7+xuVy5VA/FuY4UXbtaNf/aEj+k1RHZaDRbuzxDMGgx/4X6F4Bt6KTqnFldXF6qLsRORzTD4M7L8Z9G+MjiPnPTqWjk9VVf3XwtzkbujZwqzwQWXTKVG/TXRsGS5TPANvRSlLf266WF3U8+hXOUhu6T2XKX41kS/qXabqu6zlw8zRf5Je4jz3m4tfB+q35cLgMsUzqN+Wi1AvznGjMndkJvVWb7b9xcnfKvulGV52DFqtvGHdtntp61O/DbxdES5TPANvRTlW9eemi9VFPc/od7Awvbn1bG/Mpd/3Lb2+18Ub83qL49sPzfel26V+W4jiMsUzqN+Wi1AvVo5g62l+W5v70Z/P3gabHh1LFOo1d/6a8U3+Mnrv4N8g1G9Lz8Flimck6VfdBXXjWl3UE41+p5cf67R0Nbl4av7Jhc9Wskq4f+kSjLpol/ptuTK4TPGMJP22HPexWCdWXdQT6VcJ5Mi5w7X5bf653eEZZVQ9NLd5IOiwg3+CUL8tHQGXKZ5B/bZchHqxdgKhnue2tbkplzHW3iX29elYLnE+OXW8mOwvOyQ138lnsdRvy7XAez6eQf22XIR6sVah9Ty3rc1NuUk81t4l9vXpWC5xPjl1/LvRE+C+u7jjnfpt6Qq4TPEM6rflItSLtQqt57ltbS7yHaNYO5fcR/0eadonBxw3rrjWyTWgfluuIC5TPIP6bbkI9WKtQut5blubS/3G6N1+363m5DuZf6d+WzoULlM8g/ptuQj1Yq1C63luW5vbJ/326VhiTK+5b32jO1J25j35NF+/hFTqVwATduMyxTOo30Bb+apVaKw6bW6flNenY4kxvea+W90DPUx/7tF+KH1pnPpt6Tq4TPEM6rflItSLtQqt57ltbW6flNenY4kxvea+yY2+jZjRrv3xVGH4S/22dB1cpngG9dtyEerFWoXW89y2NrdPyuvTscSYXnPfrb6Anf5QYfttZelZPdRvS9fBZYpnUL8tF6FerFVoPc9ta3P7pLw+HUuM6RX32edfXLG1Y1MZtxvbwa80/KV+j4Sja7hM8QzqN4pe3qlVaKwGbW6flNenY4kxveK+lxs9iS69XTf4lYa/1G9L18FlimdQvy0XoV6sVWg9z21rc/ukvD4dS4zpFfd93uhLb5AoT3j4wa8w/IVqRXpByl3KWA4W7YjgGbgYUzLwHOTmR12sLuqJnvmAdPaTt1sHG306lg5OD6kyYxIAaeYsFhJlNbsY/ArDX6hWpBekqA7LwaIdETwDF2NKBp6jlKXvBrpYXRT1W31nXW0deeNd7aBu09Dd6ffVTT24ZRYBRv1GoFR34VMJeAb1WyWuWNdOIMSq0ub2SXl9OpYY0yvuuzf9hsFvfPhL/bZ0HVymeAb123IR6sVahdbz3LY2t0/K69OxxJhecd+96fcw8ysMf6nflq6DyxTPoH5bLkK9WKvQep7b1ub2SXl9OpYY0yvuuzf9+tse/ORD7Ilp1G9L18FlimdQvy0XoV6sVWg9z21rc/ukvD4dS4zpFffdmX7LmV9r4MX57C/129J1cJniGdRvy0WoF2sVWs9z29rcPimvT8cSY3rFffel3+PMrxsAT884Ub9nSE534DLFM6jfU+atW1qFxirS5vZJeX06lhjTK+67L/36we/ix4xXfv7hbPhL/bZ0HVymeAb123IR6sVahdbz3LY2t0/K69OxxJhecd9d6dcNfhc/L06yIyfgs+Ev9dvSdXCZ4hnUb8tFqBdrFVrPc9va3D4pr0/HEmN6xX13pd9XJ9/BoJCsE3B9+Ev9tnQdXKZ4BvXbchHqxVqF1vPctja3T8rr07HEmF5x3z3pd//x8+fQBMmO5vXn/oYSFUCkF6R8wwzLwaLdCeIZuBhTMvAc5XfU/FXVxeqi+K031Rvl0kHIG+/SbfesvnvS74sd+brlKNm3fbEn/HksCXsaXpFekKI6LAeLdqeFZ+BiTMnAc5Sy9NdSF6uLon4b3h7dFSFvvO6Oohc135N+AzBZsnJJyK28Ir0gRXVYDhbtTgPPwMWYkoHnKGXpr50uVhdF/VbeDtdbRd541zuqm7RE/aqwp6gOy8Gi3UHjGbgYUzLwHKUs/ZXSxeqiqF9V5790EPVbEqV+SxRNKymqw3KwaHeseAYuxpQMPEcpS3+BdLG6KOq3qc93Vkb9lmip3xJF00qK6rAcLNodK56BizElA89RytJfIF2sLor6berznZVRvyVa6rdE0bSSojosB4t2x4pn4GJMycBzlLL0F0gXq4uifpv6fGdl1G+J9lb6/TLD8hjQFfkDNrkk0gbSC1JUh+Vg0e508AxcjCkZeI5Slv4a6mJ1UdRv5G3R/S7kjdf90dy0hVb9fhXLyN9xaw91dNjhXsK+kzP4m80ON4id7K5tvJt1bY9+U5asXBKpHekFKarDcrBodzp4Bi7GlAw8RylLfw11sboo6jfytuh+F/LG6/5obtpCq37H/ukK9o/J6s0daeWJj+bn/ND/fbj49dd5yemenEsgS1YuOW3dbyGHkKI6LAeLdieAZ+BiTMnAc5Sy9FdNF6uL6o1+16PE5W17/s17j6n+B9LZ67mX3u7TsVz63MD6FPqdDN2yMGbs/Ds0xbbbt61962EwsE9D/1itrKJ/W45jZbYtEXKxLFm5JFIb0gtSVIflYNHudPAMXIwpGXiOUpb+GupidVG90e8kjHESXs8efBLp6u6NuYruv8XOPh3LLc6/0marfifmtQj/Xhj3Hd+hWVbSa6svE2/V/dRsaiX1zQ/zXt+l3pYlK5dEKkd6QYrqsBws2p0OnoGLMSUDz1HK0l9DXawuqj/6naQuC45+I2/tu9nVqt9x0K99u5vPFv0uD9r9E34IvsTyZxZnI+eysG1FlqxcEqkT0RcSG5rCcrBo1waegYsxJQPPUcrSg9XF6qJ6o1/dCDZ0rOorH7lTpXF36wr9htHuzDu1Nvrdv5SLdfN2syoIjFsGt1s/kk6kJUtWLok0hegLiQ1NYTlYtGsDz8DFmJKB5yhl6cHqYnVR1G/oq1d9Rf7ZedUDu35jrfotJx8Gv8aMBoONWVbHrZWfvpwcj75t9PvZpudjVZE1WbJySaQaRF9IbGgKy8GiXRt4Bi7GlAw8RylLD1YXq4uifkNfveor9VvibtXvcfJhZZxga6Pf5WQ8GRf/V6Z7f1ru6l2Zj/II8BVZsnJJpBVEX0hsaArLwaJdG3gGLsaUDDxHKUsPVheri6J+Q1+96iv1W+JW6NdPPnx+2aebuxvNhmYxPiwV35b1+ZV3Y75P95xu/cR+JvM0pGlLlqxcEqkP0RcSG5rCcrBo18Y1HoR+nVbm5ecLgZ38qoulfmsE+6S8Ph1LDdO1N1v1W7knxn9RonLfb2W24eSwfxeNd7l8TQ3wdjupudiQJSuXRKpBhIfEhqawHCzatUH9BtLnr9RvjUmflNenY6lhuvZmq34PX7tYjKevfs53aLajt8MSP9jDHWq1wuVPsczd9zLCp3m1GOWmLFm5JFI1Irw01f18lp9Ltq7YG6FbY04Dfoy+/rfDbf1ze/WwBc8YjZpzZmfL1GzP9kk7dLFrXY1L5W1bkd5zwV3auxdiTWpz+6S8Ph1LjOkV9yn0uxzs7X9hqc39vm42w8P/uyLE3p8W+TbcYFe9o3zRODcRmpJeZcm+VRvhOgm0EJhSv9KbrMP91G8Jt1W/xzsfipyafut3PuxXi/j32cLo142Bp2bR+q3k8gjPV+5GvwvgZnorisnC/qf834baX31W1z92X1K0y6LyncViT9ufeEZbKx9ny8QMz/ZJO3SxuqiPFfV7/vbqfA/1WyJu1e/xzocip6bfTzsP8ef+t/+5gLlZ/CsrF1fyun2Tfsdim2cF3U8+IN+rxqc38IzBQDknWmGFZ+Ct6D5OKw5KF6uL4p0PlQt9vVXqt2Tdqt9JbaLW6vc4E1FWE1bszWmzsN7wau8Ljj4trSGlUkT9ehjUb6VP1Fap3xqQPimvT8dSw3TtzVb9no9+t/tyqR+t/WbGv0Nhveh02344c7oD2aJ+PS3qV+401G+NTZ+U16djqWG69qZCv6e3KWwqn2Ys6ke7PhY2f7im/cC23oDfpn49Buo32jv8Tuq3xqZPyuvTsdQwXXuzVb/nH70dFXum32NR8/cuBiP/DbrEk6V+PTjqV+4/1G+NTZ+U16djqWG69marfjs6oEnG5C/16y/KA+l3cXioqaKzzVWxuih+9KYAfvkQ6rdkeiv97lofyF4e4tkK9euRPJB+gW9B6sa1uijq9+y9dY0d1G9J+Vb6ndduqCgPSLFC/XpI1K/cV6jfGps+Ka9Px1LDdO3NW+k3xR2BDfXrSaQgxO/ixTN432/oqMrXnM+htbl9Ul6fjkV5iboKu5V+cy4B9et7wwPpVzWfW7wFdLO6uihOPnSllcZ6c977jRXfX+Gt9Pua8XMX1K/vZw+kX879ppqDo99Ucr3Iu5V+/xn/+MokBtSvx0b9yr2Hc781Nn0acfbpWGqYrr15K/3+msMD0hJOmPr10Khfue9QvzU2fVJen46lhunam7fS71frT9HLJKhfz+aB9Mu5X7m3N5doJx9yJvuajwAvpX5LZrfS76jl1+DKA4ysUL8eygPpl3O/kW6u2kX9qjD1NehW+pUV2k5KzpVLIrXygZMRKGe7eOPZGZJL79AqNNauNpej3xi9m++jflWXIGWkieVg0e6g8Qz8jtyUDDxHOVPrr5QuVhf1RDee/evFg+WL9xonHwoO9k/qt0TRtJKiOiwHi3bHimfgYkzJwHOUd+n6C6SL1UU9lX7T7zRqemOklFG/JTXqt0TRtJKiOiwHi3bHimfgYkzJwHOUY1V/gXSxuqgn0u+v+Wjq31cto35L3NRviaJpJUV1WA4W7Y4Vz8DFmJKB5yhl6S+QLlYX9UT6HYl3Go1et8t/L4fOP/pyv0Lz9vU1Ouyw/z7++nor9n3ZZfRZFsRWKnmx4mIf9VuyoX5LFE0rKarDcrBod6x4Bi7GlAw8RylLf4F0sbqoJ9Lvnzl7OLfH+ftRPKN7Xvz4187/SvnUmHH5i2Jz47+gal+KZbiVDfytup+J+vXo3R/Ub4miaSVFdVgOFu2OFc/AxZiSgecoZ2r9BdLF6qKeSL8DY0qhVjr6u/0h6/lqtTNm6P1b6Nf9aM37IWo/CfpduB/FHtuioRsNx5Y3+wPTsf21fdRvCYT6LVE0raSoDsvBot2x4hm4GFMy8BzlWNVfIF2sLuqZ9Ds0X5EOvjFTb+XZpHgGy27xY6OmZmJWh+hvsziMfos9n8uF2QjjX6tt6vfATfdC/ao4pagOy8Gi3UHjGbgYUzLwHKUs/ZXSxeqinkm/69jvG3wZc5j1fTUL59Qw+WB/sdzTHgzmZnrQ7/ywx/6ebvx3crdmR/0eIClfqF8VqBTVYTlYtDtoPAMXY0oGnqOUpb9Sulhd1DPpN3KwBScAACAASURBVPr1mVk5JfH5MXVTCjtv1rXZbg6zD5+Lj5+T0a/veBM7ZP58CctnMRj+Wmz+Ub++k6r/oH5VqFJUh+Vg0e6g8QxcjCkZeI5yptZfKV2sLuqZ9BudcP0z/qO241sgjH6328Psw7fZBv2G0a/9nVwzc8PicvEl+42Z/Yr3VxybGAyih1INeJ516ld1rVNUh+Vg0e6g8QxcjCkZeI5yrOqvlC5WF/VM+o2b8cfO1v68H6dyi7lfO/q1P0XuJ4WnZhT0u/L83R8T90thq3FYJr5kZQfOvxz9lpBUK9SvClOK6rAcLNodNJ6BizElA89RytJfKV2sLuqZ9Ls3sd8W31v/2mX3c/hcrhz9Dj787MPnYmf7mRvdzv2f/hIMNpHJ33f3BNl36rcgpP2T+lWRSlEdloNFu4PGM3AxpmTgOUpZ+iuli9VFPZN+7e0M0Q/Mvn42TsCLuf8MLtz5sB0Usw9LO84N+l15/u6PTW3Kwu76Gy/sNzU4+i0Z6VaoXxWnFNVhOVi0O2g8AxdjSgaeo5yp9VdKF6uLeir92o/F/HTCeX//+17Z23l3rvQ4+n3z9z5M7ZA56Lec+x0Mz01e/HI59XtOt3HPnxk3lndVCD0asnYQcq5cUqvCbfKBkxEoZ7uin5ifRZ3uQHOUY1XfiC5WF/VU+rUfjc2/RsflOOFrue63xs3nHu98cKvv9ktZ9kE9Qb8rz98FG/d85tVmePhvs7Kfpq33n/tPp/iTekPKySs/eitxQMIqs/JXctqVc+WSyBFTvxEoZ7tQlboK0BylLP2x6WJ1UU+l38Fs4WYZyuWfxbmd21sYiqWY2z2Ofv3sg5t7KPVbjn7tjb92ouHkzgf7NeVyiX29I7TiX6nfEkfOj/6UlSSsQKKs1S/nyiW1Ktwm9RuBcrYLVamrAM1RytIfmy5WF/Vc+h3MCklOisV9q7iwree69Q8Eruj3z977sHbfyjgb/c7909Ne3v7sf/6Pl3AbhP2C8nhM/Xqgqj/8B5aqyMsGQaKsNS3nyiW1Ktwm9RuBcrYLVamrAM1RztT6Y9PF6qKeTL97O2Bd/Vav8NZMwvMbPvx8bkW/g7Upfkoj6DeMfr/93EO1mnKdc78lCt3K942egv9SfqtRd5zVKFmyckk1/7COfHSFxIamsBws2rWBZ+BiTMnAc5RjVQ9WF6uLejL92m8Sn8jXPnBrbMavbrJ2NDeLk2+92X1LM/a/wRf0u9rv959/7ysjK4P69X1U/8fN5mHKrzvqjzVEypKVS0Ju5RXRFxIbmsBysGjXBp6BizElA89RytKD1cXqop5Lv7+L88c+uPngxWY3tBO3bi64cueDvZfMFjk1B/2Wk7tT8dO1d37rzXdS9R8p72F15U2Bk9hd4E0JxzJZsnLJMbtcQ04diQ0NYDlYtGsDz8DFmJKB5yhl6cHqYnVRz6XfykRv6KJ2/OvuObPLvJixrU4+2NkHP99wot/FZh4eRXmspFx73NGve9S8XUbFY5HtPxeKbf9n2FdiOKy8fQl3+h0D18Xfescd11r7iD4AT9W6LFm5JFIxoi8kNjSF5WDRrg08AxdjSgaeo5yp9WB1sbqop9LvuxnHR62j3/dZvCT05Au/3uxf3BnnUfwlZf+emqz8bLn790JY3DM6I8to0T7CHJsw9x6poMtdUz+vlNSCLFm5JNIQoi8kNjSF5WDRrg08AxdjSgaeoxyrerC6WF1Ub/S7Pt6Oi629FTcshC7X9Lo6/6paU3iHZfep34l72vzQTtaMnTGHpth2+7bRQe7ePsO+za2zG33rwslD+Euj/brLkpVLIrUi+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXVRv9Gtv2EpepqG/Nb/aX60o7/Ftjuy89B71Owmjxe/i6fND/y2VJlT2iyyt+g3PlWuqp5uy9/SfXpUlK5dETgLRFxIbmsJysGjXBp6BizElA89RytKD1cXqovqj38O9uPjLQr4NIfTD4lU3LXua09HWPeq3uAnEAbFatVM1rfq1Py0dfcjRCdOd+T7Zvt7GfpE8+StLVi6JnBiiLyQ2NIXlYNGuDTwDF2NKBp4zB3qhLpZzv6EXhtftIty3G/bc7PU+9eu+k+0W+4h6+zFlTb/78Oj5l5diGn0/XNjHIrdMPszMIjpvUTTU7Z+r8uek0HZkycolkTYQfSGxoSksB4t2baBfbkjLuUYrU+DzX13snY1+lRMIoWdVXotvRlR2SKvFM3Gk0qvuv0f9lpMP9qlu7ivXG7OsmnN5nDsqfqXJvW1aR7/T9AnY7Av2ZRatX0+MNyJLVi6J1IQID4kNTWE5WLRr4xpivE4rOqUWXHWxujFybyYfrqDf3flNv6GjXvv1HvV7nHwofgavNvpdTsaTcfH/xuH8ds8qahv92l8xle5Zu8IlWZlNWuuyZOWSyPkgwutedXgLeIZ7TsprhETTLjwDb0Wn1OIodbG6qGfS78TNWPZjuU/9+smHzy/7pT93z8DQLMKPfXjfnpL9m0ys2lr0a78GEyY0TrOvs/W5McOk8a8sWbkkckqX0G9lzuc4+3NYs7++crZP3qGLfqvclzS3PwmDLvaZ29UqWtNn9jEt2xm62B+rgVLsDzwe4t9bFxvbGvP+rot6X2o/uIp0n8vtUk8gRJrU5uZ8yT/SbNaue9Rv5d4UO651+i2X8JvQFSZr/2FG4+TDp/0Ib1VJuf7qn33O/vx1VnGA7hhkyb6VTLhCAu0Epk+j35HqZ4B077/cqHvU7+FrF4vx9NXP+donzo/eDssZj2XxdcHY6Pf1xy+rufV5+7Phzmq+6I7Pn9N3SOTvkVh7fdKveKvQwn2bfiEW1wq00WN3m/dhWVTu/A772l4XplpDW/Rw+PExMeMPdJkYmwgsC7M5RK9bl4nZtMas17qo9epp9HurJxvG3r/3qd/lYG//C0tt7vd1Yx89X/y/G3yZ8Z97+Lz9jO7zmFFkVh+MbBa3nHxwx/O3nW6GpYqG4eSaX5v0O25OrZZeYvKhWl99HZubxaJdW3gGPiubkoHnKGdqPWBdrC7qieZ+//XiL5riLXKP+j3e+VCcQ02/J3c+2GmFcqkLdlmMfv2f9vmfs6K2u/qT+vWXi/qVey31W2PzWvmh4lrR1TfvUb/HOx8KXDX9ftp5iMOz5/8Gy8OHcnZ6YVjX7wnsH3d/xN0t1K+/ZNSv3HOp3xob6rcGBNyc1O5SsPqtzyuc1Rib+z0Jekn/6tlJPdfdoH497yT9LnjjWaWz9uOf5Nq7FyoHXq5qc/s04uzTsZQgW1bOR79b+/T5wyLkturXzh9uhdwe76Z+/cVJ0i/v+612bOq3SuNa6/ep39NpBHvTVrksBHCNN575HP/tDCG7t7upX39prqRfeLxsP3oDc5RTBf6sdbG6qCf66K1PyuvTsWgld/7RW2lfI+u35ZkP9gdFpFztcd0gjvr10K+kX3i8zDsfwPeEdgIhVq02t0/K69OxxJhecd84+bljVzzIWlPUrwdC/db6RWWTo98KDLfaJ+X16VhqmK69ebMfG8o4UerXw6N+5T5E/dbY9El5fboLo4bp2psp7+FrH2O9PerXE0m5dOisLD6P6w4NbUUpS3/WulhdFOd+PdJr/9GPDzyvfdbR9rY3fORk9IAUO6lfDylJv/BMLp94puiReSHa+dtYK9rcPo1++/QF6BjTK+7r02XRnjb160lRv3KH4ei3xqZP7/NZ+u+M1c7q7jfv8R8C1K/vdtSv/O6jfmts+qRf+f1bO+jH37zHfwjIl08uiVzJp3zkDnhHLj6P60Bz7jfS3eRd2gmEWA3a3D7p1z4KLHYqkX2j1+3y38uhYPTlfp3h7evL/tbPYXn5+nK31tp9bhk1PFD+82sW6gnJfXi9x38IyJKVSyKsn1K/nPut9oR+/NtPq9DqkYd1bW6f9DuYtP0GZXF2v/a3gt0yL34UZ+c/prJPbByXT1uwzwxzPyBqX4pluI0b+M9HHOoJ6Prw+mUiv5XRhwNrOAZZsnJJpDrqNwLlbBc/ejtDcukdWoXG2tXm9kq/ut+de7cPtJ6vVjtjht6/hX7XVrTvBxJ7+8zyQr8L9+hq90j0YeyLZqOxGa5W9inUcTnHuF5pHySsKx1TWzPyMcslkTqp3wiUs13U7xmSS+/QKjTWrja3V/rV9amNmfph7mxSPCxzt3A/sjY1k/JHeuxvVR70u/JsPpf2wf0Rxa59TX9DX0GM4s32QcK62VGeNiwfs1xyWoPfekr9cu632hM4+VClcbX1bSnQhia/jDnM1r6ahXNqmHwofmrYZc7N9KBfNwZ2i/0V+PNniP2aiXfysn93XPTpJ/gKgu1/ypKVSyK1PqV+Ofdb7QnUb5XG1dbfNY8ZnxlzmOP9/Ji6KQX786n2T/szrpvD7MPn4uPnZPRri3/MxGZ9lj9p+2nFGx7ruB/17tO3lzt85o4sWbkk0rWo3wiUs126fyiepqE5ytvEfCO6WF0Uv/V2et2utfVSDmwbWvwrftH9GBFGv9sweP4226DfMPodjPwP+JSfxfm5YT/V/BKbFD5WfqM16lcFPuUeWywHi3YHjWe4f669qs73GIRn4K0oZekPSheri6J+j9f5qmvr2m9HRBv/sR+k/bz7aQNfXsz92tHvyI9w3TTwKOh3Vdbgf5Zidfi5n/F4YksW5u/bPp938VPeMVFG33qF+lVdgRTVYTlYtDtoPMOKkXO/1evNyYcqjeutv5pNuwr31r922f18FQdWjn4HH3724XOxs3MNxZ0P5eh3sDmb/N3b+WAzWVsB79obvR4D3xL1qwKeojosB4t2B41n4OPSlAw8RzlW9VdKF6uL4uhX1fkvH7QfmvnX6Lgch7gnbX39+B+VWMz9nG2482E7KGYflnYIHfS7KvM2Zw+xsVMdZmVbsDeyuVsnerVQv6rLkaI6LAeLdgeNZ+BiTMnAc5Sy9FdKF6uLon5Vnb+DoNnCj2zDH//EJv6+V/Z2Xj9sPY5+38zEJkzNX6nf4+h3eDb6tfotxtpLnyY2dYsC6ldFPUV1WA4W7Q4az8DFmJKB5yhl6a+ULlYXRf2qOn8XQV/u6xPGTIolfI8i2tJ+a/xUcXnng7sJ4n3w4m6fOBv92pkG++nGajM8/LdZDdzkg6/Yfph3mMeItnOLndSvinqK6rAcLNodNJ5hxci53+r15txvlcY11z/tl4dXjTLczmfhgOZ+hvc4+vWzD27uodRvOfq1N/7aR0Kc3vlQ/k7bwpRVhqpv/Er9qi5AiuqwHCzaHTSegY9LUzLwHOVY1V8pXawuiqNfVefvIsh+d6JFhYVtfdtbM7WvFf3+2Xsf1u5bGWej37n/asXL25/9z/9hg+zNEr4aOwvRt7vPqF9V50pRHZaDRbuDxjNwMaZk4DlKWforpYvVRVG/qs7fQdC/9nkAe7dCkOWH92dFv9aoxbetg37D6Pfbzz3UDjh82+3VDGslN9+kflWXIEV1WA4W7Q4az8DFmJKB5yhl6a+ULlYXRf2qOn8HQYVQGyt+GZvxq7slYjQ3i5Nvvdl9SzP2d7AH/a72+/3n3/vK+HFyrd6Xib/l4e/8U7la4PU3n1i/iL6Q2HARsRws2rWBZ1gxcu43XB73yrnfKo3rrf+rPDRSbNXdHbHY7Ib2Ezp/Z0R19Gs/RfPPgQj69Z/juT+msXvY/i3MbvszMWve9yvS1hfIXy2WSyK1I/pCYkNTWA4W7drAM/BxaUoGnqMcq3qwulhdFPUb+uqVX+dnd4fFDuDF3XNml3nxEV1Vv3b2wc83nOh3sZkLt1D4JwcvVr2zr719YxE7817vkyUrl0ROCNEXEhuawnKwaNcGnoGLMSUDz1HK0oPVxeqiqN/QV6/7am8FO/5iRWPTo9/3WWw825gUK3x7/71IPbG6M/ZRvyp4KarDcrBod9B4Bi7GlAw8RylLf6V0sboo6lfV+S8e9N6/Jz9e/Bx1FVK/Kk4pqsNysGh30HiGFSPnfqvXm3O/VRpXWw+PLLtag71tiPpVXZoU1WE5WLQ7aDwDH5emZOA5yrGqv1K6WF1Ub0a/6+PjD7C1t+KW2PY+3Ktfu5jCz91rP8H7jKB+VdctRXVYDhbtDhrPwMWYkoHnKGXpr5QuVhfVG/3a3yxLXtw3EtqXXul32Lsv/7YD7CaC+lVxTVEdloNFu4PGM3AxpmTgOUpZ+iuli9VF9Ue/h2cf4C+L2I2ukS7dK/0a08ePwSLUOt9F/aoQp6gOy8Gi3UHjGVaMnPutXm/O/VZpXGvdfmX4Wk31vR3qV3WFUlSH5WDR7qDxDHxcmpKB5yjHqv5K6WJ1Ub0Z/eomEGI99R5/6XhmNrFTecZ91K/qqqeoDsvBot1B4xm4GFMy8BylLP2V0sXqoqhfVee/dNA/zS9tXrrRftZH/aquS4rqsBws2h30NX6F7TqtrM2v6iq4IF0s9VsD2qe539fiG2u1I3zKTepXddlxOaKjU7yFJP32cu7X/xSt6jq4xw5qVE391nD2Sb99OpYapmtvUr8q4kmqg+5uxFvAM9wPtMi/6xIHgWfgreiUWhyfLlY3RubkQ/yad7yX+i0BU78liqaVFNVhOVi0O1Y8AxdjSgaeo1NqcX10sboo6repz3dWRv2WaKnfEkXTSorqsBws2h0rnoGLMSUDz1HK0l8gXawuivpt6vOdlVG/JVrqt0TRtJKiOiwHi3bHit/Di4sxJQPPUcrSXyBdrC6K+m3q852VUb8lWuq3RNG0gssRHZ3iLeAZuBhTMvAcpSz9BdLF6qKo36Y+31kZ9VuipX5LFE0rKSNNLAeLdsdK/cpXjPqtsemT8vp0LDVM196kflXEU1SH5WDR7qDxDHxcmpKB5yhl6a+ULlYXxdGvqvNfOoj6LYlSvyWKphVJdfsXeZmbpVx4VqKLfqs8j9D+Ykt1s1Iir67NUi6MlMxmO7OdoQua82GWhybeW5eN2bbGvL/rot6XygfWNHWN/DLtF4djLWlz+6S8Ph1LjOkV9z2afpOf28fEJyQwpX6v6JrQFPUbSDzcb71d2yENjwi0P8baUFov0kWPh8dlYSbHDeXawlRraE/6+FiYzQe6oDnH+HXrMjEfrTHrtS5qvaJ+SxNcb4X6LVk/2uh3XJ5Z6wryNV9p8qGpESwHi3bt4hn4rGxKBp6jnKn1sHWxuijO/Tb1387KqN8SLfVbomhaSVEdloNFu2PFM3AxpmTgOUpZ+guki9VFUb9Nfb6zMuq3REv9liiaVlJUh+Vg0e5Y8QxcjCkZeI5Slv4C6WJ1UdRvU5/vrIz6LdFSvyWKphX8rlxUj7hMU44Jf4AOnkH9NvWkSJn27oVI6kCb2yfl9elYYkyvuI/6VcHG5XgF/UJPVCtOE5cpntGtfnXPMtNFPc/od98n5fXpWFTv/u6CqF8V2yT9Qs/WxceyKceEyxTP6Fa/umkFXdTz6HewNT+qjn6NIOq3pEz9liiaVlJUh+Vg0e5Y8QxcjCkZeI5Slv4C6WJ1UU+k3x+zberfVy2jfkvc1G+JomkFH5uiesRlmnJM+FgWz6B+m3pSpEw7fxtJVc/9IndZxtq55D7qt6RJ/ZYomlZwOV5Bv08596sb1+qinmj0m/K3aNMbIqeM+i3pUb8liqaVJP1y7rcJaaVMKUufoYvVRT2RfjdmVgF+21Xqt+RP/ZYomlaS9AuNTvEW8Ax8WiAlA89RytJfIF2sLuqJ9GvMS1P/vmoZ9Vvipn5LFE0rKfOsmB6xaHesKceE/xsUz6B+m3pSpOwKc79fBvg2fuQQL7qL+i1xUr8liqYVXI6c+23ieVqmHKv6JF2sLup5Rr9LMz9Ffsst6rekT/2WKJpWkvTLud8mpJUypSx9hi5WF/U8+rVPk67wvvEq9VteAOq3RNG0kqRfzv02Ia2UKWXpM3Sxuqin0e9+Yr4qvG+8Sv2WF4D6LVE0raTMs2LKxqLdsaYcEz6Ti2dw7repJ0XKup/7fTUfkXZvtYv6LclTvyWKphVcjpz7beJ5WqYcq/okXawu6mlGv7s+zT0MqN+y+1O/JYqmlST9cu63CWmlTClLn6GL1UU9i35fzWRfwX3rVeq3vALUb4miaSVJv5z7bUJaKVPK0mfoYnVRT6Lf2aRXg1+Ofo9dn/o9smhYS5lnxZSNRbtDTTkmfCYXz+Dcb0NHihV1PPf7O+nTXWcWAEe/ZS+gfksUTSu4HDn328TztEw5VvVJulhd1KOOfj9H5TJ7nRoz7dPUA/Vb6fxPrF9EqUhsoIuNTrFo10bKMeFjWTyDo9/QA5SvFx/9Lk9+73ux7Zd9Ofo99gvq98iiYS1FdVgOFu0OFc/AxZiSgecox6r++uhidVGPOvpdTsrlY7r9a+jXNyni5EOJnfotUTSt4GNTVI+4TFOOCR/L4hnUb1NPipRdfPQbaaNPu6jf8mpQvyWKphVcjlfQL3RnRXF2uEzxDOq3qSdFyl7NupyrBVfetmYaqbHfu6jf8vpQvyWKppUk/fK+3yaklTLlVIHP0MXqonoz+TA5mazFNqjfSk+6t1XqV3XFkvQLjU7xFvAMfFyakoHnKGX5wPot52rRlQVHv6o3cE+Dnlm/wOg0ZZ4V0yMW7XpTyjHhUwl4BvULvtc59wsCe5zwZ9YvMDrF5ci5X/2b5OlHv+kTCDnq1l+gy0Zy7rfkSf2WKJpWkvQLjK5TxrIpx4SPZfEMjn6belKkLEehObmRQ7nKLuq3xLw3ply/l5U38ZdT5JLIuSH6QmJDU1gOFu3awDNwMaZk4Dkc/YY+A79SvzCyXiUY07PvxLTTkSUrl0RqReZOkdjQFKZHLNq1kXJM+FgWz6B+Qw9QvuYoNCdXeXgXD+Po94h0Ynr3rZjjwcXXZMnKJZGaEOEhsaEpLAeLdm3gGbgYUzLwHI5+Q5+BX6lfGFmvEjZm1qvjURyMLFm5JFItoi8kNjSFjU6xaNdGyjHhY1k8g/oNPUD5mqPQnFzl4V08jKPfI9Kp+T5u3MeaLFm5JHJmiL6Q2NAUloNFuzbwDFyMKRl4Dke/oc/Ar9QvjKxXCat+PYpZw0aWrFwSqRcZbyKxoSlMj1i0a+Ma49LrtLIBfghSF6sU+r9efGkhR6E5uaGjXvuVo98j8a1ZHTfuY02WrFwSOTNEeEhsaArLwaJdG9RvIH3+Sv2eM+nNHur3eCn+mfVx4z7WZMnKJZEzQ4TXverwFvCMwUCppQotPANvRTeiLQ5KF6uL6s0zH/i1i0qPe6pV+72Le7vzTJasXBK5ptRvBMrZLur3DMmld+RMIOTkXvo8tPVx9FshNTS/la17WJUlK5dEzguZz00ZaWI5WLQ7HTwDH5emZOA5yrGqv4a6WF0UR7+Rt0X3u6jfCuOV+als3cOqLFm5JHJeHP1GoJztus7oV3/zo+5GSer37EL2Zwf1W7kWMzO5s9kHWbJySeWEwyr1G0g0vV5Hv19Nh3BSphOrLoqj3xO019qgfqukN/d265ksWbmkesKHdeo3AuVs173qVzWe5o1nZ9f7Cjuo3yrkVzN8qW73fl2WrFwSOSnO/UagnO26V/2qxtPU79n1vsIO6rcKeb8z87uafpAlK5dUT/iwztFvBMrZLur3DMmld+TcvZCTe+nz0NZH/Z6Q+lqY+T2Nf2XJyiUnJ1xsUL8RKGe7qN/PMya6Heq3VI5Cc3J1p3H5KOr3lOnvwox/fis/snpa3LctWbJySeQcqN8IlLNd19GvaqbWH9vV73x4+VidQdHs+J3804TZmByF5uQqD+/iYdRvDelod/rrqrXinm3+DXfCEWH6BX6NQrrHdv8iL2vzKheeleii3yp/R67NsrKlW/0w37rAQ9Rs9mGWM3RBc4bm+9DEe+syNMvWmPd3XdT7sv2ZD38fJulr+XZIY5T+zVFoTq7wJup8N/V7hvh3tRsef2X1rPhOdryd/i3CLRJoJDBt1e/fxlWwgvu/s6/WvzkKzcmFT+pCCdTvhUD2rZrr6/f4V9bZmjGLs33yDl30eHhc7IzRcUO5ZsCcj4+F2XygC5pzjF+3Lguza41Zr3VR61Wbft3Y1y0rsKcX9lX6N0ehObngOV0snPq9GMp+VYRNPgC/dCxNPjSdPpaDRbt28Qz868ApGXiO8ksSHrYuVhfV/rWLt2GhX9C/wb66Z2nnKDQnt6nzdllG/XZJ94Z1U78t8PEP0vCMR9LvIMm/mH350VtLp2XxnRCgflsuFC5TPOOh9Dv4O4x/F6sWtMdi0L7U7xEd1+6ZAPXbcvVwmeIZj6VffPyL2pf6bem0LL4TAtRvy4XCZYpnpOj38vf9qmpUfekYHP/C9qV+Wzoti++EAKbfC9z328QF+2gMi3bt4hm4GFMy8BzlB2Ueti5WF9X+0ZtvEpr/xe1L/XrK/OPuCWD65Z0Pigt+ndGv6gE5/mh1YtVFKfWLzP8m2Jf6VXRDhtwBAeq35SLhMsUzHm70Oxi8+S9fKO7/TbEv9dvSaVl8JwSo35YLhcsUz3hA/Zafv7Xc/5BkX+q3pdOy+E4IYPrl3K/islK/HpJq/JtmX+pX0Q0ZcgcEMP1y7ldxSa+jX9V9Cv5otU88U9WouvOhgBQ+f2sY/ybal/pVdEOG3AEB6rflIuEyxTMecfLBYg33n5mffRxyqn2p3zhP7r03AtRvyxXDZYpnPKh+j/5dRSEn25f6jfLkzrsjAOr3W32CKffYYjlYtDtwPAMXY0oGnqO8TcxfLV2sLkp749mhm5Tzv7Hxb7p9qd8DX77cOQFIv4i+kNjAEMvBol0beAYuxpQMPEcpSw9WF6uLAvVb3v8Qef5Zhn2p3/CO4et9E6B+W64fPpWAZzyufo/zD/Xxb459qd+WTsviOyFA/bZcKFymeEaKflX3Kfhzu9WdDwXY4+dvJ6Cz7Ev9nrDkRm8JLJfLwxv1e7mMzNxSvy2XDpcpnpGi335/6bgCtfTvB8UOEQAAIABJREFUqrIzz77UbwUlV3tMwH7tc1sc3tqY9fmBUr/nTE724DLFMx5av5X5hxJspn2p35IkV3pNgPqtXB5+9FaBUVvt5qM338jZ+DfXvtRv7eJxs6cEqN/KhaF+KzBqqx3qtz7+zbYv9Vu7eNzsKQHqt3JhqN8KjNpql/odFD8+b/viyraab1/qt3bxuNlTAtRv5cI8kn7v5c6HAv/x+xeXsC/1W+nUXO0xAeq3cnEeSb93c+dDwb/073xhu6RbIrfhVC5V82rOj8Xn5DYfVXel/KH57th2WrPt55u5X8a884H6lftap5MPttnSv969efa1o9/1KHF525qpTKGnJdRvTy9M22EdOnvxwhvP/rXxOivHbyPDMx77xrMD0vL+B98Xc8a+A6vfyUnHxjao37Nezh3dEDjpmNQv9St1s65Hv/b5k+H3h2yfzLOv02/ysuDoV+oD3H9pArarr37ff+3/H5x84OSD3L261+/x+Tu59uVHb/J1ZEmfCFj98ltv4YI8kn7v684HfwXKO87MT7giia85H5/l5CYebnYa536zEd6mAuq3wv2R9Htndz7Yq/Bbma/N9G+OQnNyK33pqqvU71VxX64x6rfCkvqtwKitdj75cBz72j6ZOf7NUWhObo3Z1Tap36uhvmxD1G+F59q8V7Z0q/h9DHjGM9z5cGrf4vtvuisQicpRaE5u5FCusov6vQrmyzdC/VaYXkOMuErdAaJHNjZvlfNqXtXFdjz6Le27DPc/5Mw/5Cg0J7cZdHel1G93bDut+Wb6TRlpYjlYtKOMSi4t5xqt6JRa9CxdbLf6Le37XX/+TnGM4J85Cs3JBQ/zYuHbzMmaix0IK8II7Nbr1yLjZ72LDDg6e95v9xLCW8AzBgOllipXBc/AW9EptTgoXawuCv2ttwOWin0r9/9GumMFY9NqjkJzcpuOqcsy6rdLujesm/ptgY/LFM94dP2e2PcS/s1RaE5uS1/prPgn3D3aWQus+CYEqN8W7LhM8YwH12/Nvhfwb45Cc3Jb+kpnxdRvZ2hvWzH128Iflyme8dj6PbNvvn9zFJqT29JXOiuem8MMYmctsOKbEKB+W7DjMsUzHlq/EftWnn+WNv+bo9Cc3Ja+0lkx/ilzZ4fCii9JgPptoYnLFM94ZP1G7Zs7/s1RaE5uS1/prHhoRp3VzYpvSID6bYG/MfqnKxRV4RlOv1gryvsU/AHpYnVR8J0Pgn2tf4f2nki3pIx/cxSak9vSV7oq/jRm31XdrPeWBKjfFvr4WBbPeNzRr2jfvPFvjkJzclv6SlfFM7PpqmrWe1MC1G8LflymeMbD6rfBvln+zVFoTm5LX+mqeGnmXVXNem9KgPptwY/LFM94VP022jfHvzkKzclt6StdFU9540NXaG9cL/XbcgFwmeIZD6rfFvtm+DdHoTm5LX2lo2I79fvXUdWs9rYEqN8W/rhM8YzH1G+rfdP9m6PQnNyWvtJR8dJEfiWso7ZY7VUJUL8tuNF7EpxKsbsY3AGgOcr7FPy56WJ1UcCdDwr7Jvs3R6E5uS19pZvi/YZzD92QvX2t1G/LNcDHsnjGI45+VfZN9W+OQnNyW/pKN8VbM+RtZ92gvXmt1G/LJcBlimc8oH6V9k30b45Cc3Jb+konxZYk/vvcnRwJK704AavfF/WyNq/q2J35F439G8nLh/mWC89KdNFfs+PyYZbHDeXa0LwqI4uw9/ehWb6jC5ozMa+HJr5bl4lZtsZ8f+uivre6n2lX2zfNvzkKzcm9+NuvvUJ7YVbtUYy4TwJvxVeP+CcJqAhMVfoF7Jvk3xyF5uRe5T3+V/nrfrk2vOf3KtRv04jV70S9GLPIjh0P5cWYptJ6ni5683FcFqa6ddzftIbmrNcLY59xDy5ozjF+3roYM22Nmc91UfOVRr+QfVP8m6PQnNyrvEeXJ38PTrZXaZSN3IQA535bsOMzuXhGt3O/Q9XvwumiVHc+gPZN8G+OQnNyW/rKZYpfy3HG5mO+/LxMpayllwSo35bLgssUz+hWv7pbynRRGv3C9sX9m6PQnNyWvsJiEsAIUL8tvHCZ4hkPpd8E+8L+zVFoTm5LX2ExCWAEqN8WXrhM8YxH0u/X4jB1+d0C9rT4L/z+vGqqM0ehObmnx8wtEsgkQP22AES/j+ZU+szfetvbD/Hcgtm3HP+Ov1quhy/OUWhOrubYGEMCNQL70VttT9iE9Iv8UDsSGw4Gy8GiXRt4Bj4uTcnAc5QztR6sLlYXpZj7LfyL2vfg37HuVx1yFJqTGzoqX0kAICBLVi6JVI/oC4kNTWE5WLRrA8/AxZiSgecoZenB6mJ1UQr9DgZ2/Ivb1/tXad9BjkJzckNH5SsJAARkycolkeoRfSGxoSksB4t2beAZuBhTMvAcpSw9WF2sLkql38Eqxb7Wv1Pd2HdA/YZ3DF/vgYAsWbkkcl6IvpDY0BSWg0W7NvAMXIwpGXiOUpYerC5WF6XTb7ienb3mjGBzcjs7IVb8yARkycolER6IvpDY0BSWg0W7NvAMXIwpGXiOUpYerC5WF0X9hr7KVxLQEpAlK5dE6kb0hcSGprAcLNq1gWc4MaL3MeAZeCtKWXqwulhdFPUb+ipfSUBLQJasXBKpG9EXEhuawnKwaNcGnoGPS1My8BylLD1YXawuivoNfZWvJKAlIEtWLonUjegLiQ1NYTlYtGsDz8DFmJKB5yhl6cHqYnVR1G/oq3wlAS0BWbJySaRuRF9IbGgKy8GiXRt4Bi7GlAw8RylLD1YXq4uifkNf5SsJaAnIkpVLInUj+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXRT1G/oqX0lAS0CWrFwSqRvRFxIbmsJysGjXBp6BizElA89RytKD1cXqoqjf0Ff5SgJaArJk5ZJI3Yi+kNjQFJaDRbs28AwnRt75EK6Pff2nedx6Jb6b1VezrvwkBLaq/Lmkbg6ctT4jAVmyckmEE6IvJDY0heVg0a4NPAMfl6Zk4DnKsaoHq4vVRfVGv5PiwT5Jf05Df+MrCVyDgCxZuSRyXIi+kNjQFJaDRbs28AxcjCkZeI5Slh6sLlYX1R/9qn/1qhJofyprsVj0Yvweujxfn4CALFm5JIIF0RcSG5rCcrBo1waegYsxJQPPUcrSg9XF6qJ6o9/0ESy/dBzebny9EgFZsnJJ5NAQfSGxoSksB4t2beAZuBhTMvAcpSw9WF2sLor6DX2VrySgJSBLVi6J1I3oC4kNTWE5WLRrA8/AxZiSgecoZenB6mJ1UdRv6Kt8JQEtAVmyckmkbkRfSGxoCsvBol0beIYTI+98CNfHvvblzgdOPlQuClf7TUCWrFwSOSNEX0hsaArLwaJdG3gGPi5NycBzlGNVD1YXq4uifkNf5SsJaAnIkpVLInUj+kJiQ1NYDhbt2sAzcDGmZOA5Sll6sLpYXRT1G/oqX0lAS0CWrFwSqRvRFxIbmsJysGjXBp6BizElA89RytKD1cXqoqjf0Ff5SgJaArJk5ZJI3Yi+kNjQFJaDRbs2PuB5XFyMKRl4zsT8BWitr7pY6rcVJANIIImALFm5JNIQIjwkNjSF5WDRro2NUf2MeTgc/4rn4Bn4kU3M58lRNm3oYqnfJoYsI4F0ArJk5ZJIa4jwUkaaWA4W7U4nRYxDo/39xwAMzxgM0JyFeQnNtb7qYnVjZE4+tOJmAAnUCMiSlUtqVbhNRL8pqsNysGh3/HjGYKAcFbrqDwuegbeiU2pxQLpY3RiZ+g1Xma8koCUgS1YuidR9//pF7+HFxZiSgefolFpcQl2sLor6jbwtuIsEGgnIkpVLIhXev37xuV98LItnUL+Rzta0K+e5DTm5TcfEMhIQCMiSlUsiVWH6xUea2DfMsGh3Opx8iFzUwy6OfmU2LCGBHAKyZOWSSHuYfvGRJqZHLNqdDp6Bj0tTMvAcpSz9NdTF6qI4+RB5W3AXCTQSkCUrl0QqpH4jUM52cfLhDMmld+RMIOTkXvo8WN9TEJAlK5dEwFC/EShnu6jfMySX3pGj0JzcS58H63sKArJk5ZIImPvXLz4fjcsUz+DkQ6SzNe3KUWhObtMxsYwEBAKyZOWSSFX3r198PhqXKZ5B/UY6W9OuHIXm5DYdE8tIQCAgS1YuiVSF6RcfaWL3MmDR7nT40Vvkoh528aM3mQ1LSCCHgCxZuSTSHqZffKSJ6RGLdqeDZ+Dj0pQMPEcpS38NdbG6KN75EHlbcBcJNBKQJSuXRCqkfiNQznZx8uEMyaV35Ewg5ORe+jxY31MQkCUrl0TAUL8RKGe7qN8zJJfekaPQnNxLnwfrewoCsmTlkgiY+9cvPh+NyxTP4ORDpLM17cpRaE5u0zGxjAQEArJk5ZJIVfevX3w+GpcpnkH9Rjpb064chebkNh0Ty0hAICBLVi6JVIXpFx9pYvcyYNHudPjRW+SiHnbxozeZDUtIIIeALFm5JNIepl98pInpEYt2p4Nn4OPSlAw8RylLfw11sboo3vkQeVtwFwk0EpAlK5dEKqR+I1DOdnHy4QzJpXfkTCDk5F76PFjfUxCQJWtLXtTLzvxTx27MbzT2byQvG/NPLjwr0UV/zY7L0LweN5RrY/NPGVmEvb+Pzes7ukzAnIX5PjTx3boszGtrzPe3Lup7a6Y9eMfkKDQntwenzkO4PwJN+jVcSEBNYEr93t/bn0d8WwKN+p2oF2MW2bHjobwY01Raz9NFbz6Oy8JUt477m9YMmLNeG7Nbo8sCzDEmtDBvXYyZtsbM57qo+Yr6ve1bma3fH4Em/Y71p4N8dIXEhiPAcrBo1waegX8o5n61+C2ckPoVnS+uf1C29y3F/yxiXVm8vNjvo4qApqP+R/024WEZCZwToH49k+voF1WpOzQ0p67f80t+3KOL1UXxzocjV66RgI4A9es5Ub9yd6F+ZTYsIYEcAtSvp0f9yp2I+pXZsIQEcghcTL8z9VHg30lzc7P6+tFod+BY/cWpotMC+ESCawdtRSlLfwq6WF0UJx+KXsE/SUBP4GL61X+TLWWkieVg0Q4WnoGLMSUDz1HK0vcQXawuivrVv+kYSQIFAerXc6B+5TcE9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkELqZf/Z0JKXcZYDlYtMOHZ+AfiqVk4DlKWfo+o4vVRfGjt5y3IXOfk8DF9Ms7H9o7EHoTmasRzVHK0h+sLlYXRf22X39GkMApAerX8+Dkw2m3qG5Rv1UaXCeByxGgfj1L6lfuUtSvzIYlJJBDgPr19KhfuRPdmX7X0DPwq8H9eGC8fCFY8nAEqF9/SalfuWffmX4n6gfEnwf24fc65AvBkocjcDH98s6H9r6BfozmakRzlLL0B6uL1UX15qM35Ln8p8/pn/TiicXt3YgRD0PgYvrlnQ/tfQJVqasRzVHK0h+sLlYX1Rv9po9g+Vtv7V2YERclQP16nJx8kHsV9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkELqZf3vnQfhnQj9FcjWiOUpb+YHWxuih+9NZ+/RlBAqcELqbf+77z4Ro/AY+r1F0qVL/G7E+vcMOWLpb6bUDIIhLIIED9enio5FwSnoNn4K0Yo+8NuljqV0+UkSSAELiBfodmhByhj8VysGjXQIoYJ+YFPA88YzBAc3RKLQ5cF6sbI3PyAewMDCeBwQ30m6I6LAeLdr0AzxgMlKPCSifDM/BWdEotDkoXq4uifisXmqskoCJA/XpM1K/cW6hfmQ1LSCCHwMX0q7/zIUV1WA4W7fDhGfi4NCUDz1HK0vcZXawuiqPfnLchc5+TwMX0q7/zIUV1WA4W7S48noGLMSUDz1HK0vd2Xawuivp9ToHwrHMIUL+eHvUrdyLqV2bDEhLIIUD9enrUr9yJqF+ZDUtIIIcA9evpUb9yJ6J+ZTYsIYEcAtSvp0f9yp2I+pXZsIQEcghcTL+886H9MvC+33ZGmRE5j0zPyc08bKY/J4GL6Zd3PrR3IOq3nVFmRI5Cc3IzD5vpz0mA+vXXnZMPcvfn5IPMhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQuJh+eedD+2XgR2/tjDIjcj4+y8nNPGymPyeBC+kX+bWIlJEmloNFuwuPZ+BPY0jJwHOUY1Xf23Wxuig+8+E5BcKzziFwIf0i+kJiw6lhOVi0awPPwMWYkoHnKGXpwepidVHUb+irfCUBLQHq15OifuUOQ/3KbFhCAjkEqF9Pj/qVOxH1K7NhCQnkEGjU74t6GZuv7Ni/kbyMza9ceFaii/6aHZex+XfcUK4tzK8ysgh7f1+Y73d0QXOMCS18ty7GtIbYAF3U99ZMczrihXJzPj7Lyb3Q4bOa5yLQpF/DhQTUBKbU73Opg2ebT6BRvxP1YsxiMZlo/p9MpNjxUF6MaSqt5+miNx/HxZjq1nF/0xqas14bs4YXNOcYP29djGkNsQG6qPmK+s1/O7KG5yLQpN+xHgUyd4rEhiPAcrBo1waegd+TkJKB5yhnaj1YXawuinc+hL7KVxLQEqB+PSnqV+4w1K/MhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQoH49PepX7kTUr8yGJSSQQ4D69fSoX7kTUb8yG5aQQA4B6tfTo37lTkT9ymxYQgI5BKhfT4/6lTsR9SuzYQkJ5BCgfj096lfuRNSvzIYlJJBDgPr19KhfuRNRvzIblpBADgHq19OjfuVORP3KbFhCAjkEqF9Pj/qVO9Gd6XcNPQW0GtyPR2bKF4IlD0eA+vWXlPqVe/ad6dc+US956cMTi+ULwZKHI0D9+ktK/co9+870izyZ9PRJpZNePDJTvhAseTgC1K+/pNSv3LPvTL/pI1j+2oXcCVjSCQHq12OlfuXeRf3KbFhCAjkEqF9Pj/qVOxH1K7NhCQnkEKB+PT3qV+5E1K/MhiUkkEOA+vX0qF+5E1G/MhuWkEAOAerX06N+5U5E/cpsWEICOQSoX0+P+pU7EfUrs2EJCeQQoH49vYV5gSniOXgG/EvHL2ahPhFlLPWrJspAEoAIUL8eV5oYPyHUTqVoBpyjVKo/cGUs9QteZoaTgJLADfSbpjpkdIq3gGcMBkotVS4EngG3olSqPyhdrC5qMPjXi++M5Xx1Iie3cpW5SgJaAtSvJ0X9ih2G+hXRsIAE8ghQv54f9St2I+pXRMMCEsgjQP16ftSv2I2oXxENC0ggjwD16/lRv2I3on5FNCwggTwC1K/nR/2K3Yj6FdGwgATyCFC/nh/1K3Yj6ldEwwISyCNA/Xp+1K/YjahfEQ0LSCCPAPXr+VG/YjeifkU0LCCBPALUr+dH/YrdiPoV0bCABPIIUL+eH/UrdiPqV0TDAhLII0D9en7Ur9iNqF8RDQtIII/AhfQ7MX/q40hRHZaDRbsDxzPgpzHYVvjMB3UnSQ3MeW5DTm7q8TLvqQlcTL/6Z3mlqA7LwaLd9cczUmRK/Xb+XstRaE5u5yfGBh6RwIX0i+gLiQ3IsRws2rWBZ1C/4docXvnEsxoQbpJAGwHq1xOifsWOwrlfEQ0LSCCPAPXr+VG/YjeifkU0LCCBPALUr+dH/YrdiPoV0bCABPIIUL+eH/UrdiPqV0TDAhLII0D9en7Ur9iNqF8RDQtIII8A9ev5Ub9iN6J+RTQsIIE8Ao36fVEvC/OWHfs3kpeFmcmFZyW66K/ZcVmY3+OGcs0YZeAh7P3dmHd4AXO+zSI08d22vJpFW4gt10V9f2/5U5t5b0VmPx+BJv0aLiSgJjClfp9PHzzjPAKN+p2oF2PUoRMpdjyUF2PksvMSXfTm47gYU9067m9aM6ap9LxsvTZmDS9ozjF+3rZMjWkLseW6qPl8Rf3mvRWZ/XwEmvQ71uNA5k6R2HAEWA4W7drAM+7kW297TzD+53FWN14+GLj9xyhflfgHv/UmomEBCcQJUL+ey8PqN37Vi706seqiBgPqt4k1y0ggQoD69VCo30jfKHZRvyIaFpBAHgHq1/OjfsVuRP2KaFhAAnkEqF/Pj/oVuxH1K6JhAQnkEaB+PT/qV+xG1K+IhgUkkEeA+vX8qF+xG1G/IhoWkEAeAerX86N+xW5E/YpoWEACeQSoX8+P+hW7EfUromEBCeQRoH49P+pX7EbUr4iGBSSQR4D69fyoX7EbUb8iGhaQQB4B6tfzo37FbkT9imhYQAJ5BKhfz4/6FbsR9SuiYQEJ5BGgfj0/6lfsRtSviIYFJJBHgPr1/KhfsRvdm37X2EPwK9H9eGC8eCFY8HgEqF9/TalfsWvfm37t86STl6lIgQUk0AEB6tdDpX7FvnVv+m16av/5k/mreya9eGC8eCFY8HgEqF9/TalfsWvfm37TR7Cv1K/YC1jQCQHq12OlfsXeRf2KaFhAAnkEqF/Pj/oVuxH1K6JhAQnkEaB+Pb+F+YQ5GoOm4BnwL8r9mYn6qHSx1K8aKANJACNA/XpeVxCjbecKrbyZoboD6GKpXzVQBpIARuAm+sVHmtjoFIt2wBLEuIdz8Az7W8NgK/L1PO8XuljdGJk/tXnOl3tIoIWA/BaUSyJVInOnoFB8a1gOFu0awDPUP8B+pKUdRx4z9D/zHnKQq6aL1Y2Rqd9wBfhKAmoC8ltQLolUTv1GoNR33at+x/UTiW7zh+ajWLiTBGQCsmTlkkht1G8ESn0X9VsncvntnHt3c3Ivfyas8QkIyJKVSyJYqN8IlPou6rdO5PLbOQrNyb38mbDGJyAgS1YuiWChfiNQ6ruo3zqRy2/nKDQn9/JnwhqfgIAsWbkkgoX6jUCp76J+60Quv52j0Jzcy58Ja3wCArJk5ZIIFuo3AqW+i/qtE7n8do5Cc3Ivfyas8QkIyJKVSyJYqN8IlPou6rdO5PLbOQrNyb38mbDGJyAgS1YuiWAxZh/ZG9+VcI8teF8u3gKe8UT3/fLGs3hH5l4SyCQgS1YuiTSJ6AuJDU1hOVi0awPPoH7DtTm88r7fGhBukkAbAVmyckmkTkRfSGxoCsvBol0beAb1G67N4ZX6rQHhJgm0EZAlK5dE6kT0hcSGprAcLNq1gWdQv+HaHF6p3xoQbpJAGwFZsnJJpE5EX0hsaArLwaJdG3gG9RuuzeGV+q0B4SYJtBGQJSuXROpE9IXEhqawHCzatYFnUL/h2hxeqd8aEG6SQBsBWbJySaRORF9IbGgKy8GiXRt4BvUbrs3hlfqtAeEmCbQRkCUrl0TqRPSFxIamsBws2rWBZ1C/4docXqnfGhBukkAbAVmytmSkXoxRh46Q2FCrMTNgwaJdxUjGe7F8m8VhTfuCZ7y/t+R815elmdR3idu6WF3U9/e2F78TnPPViZzctvcZy0kgQqBJv4YLCagJTKnfyPuLu0iggUCjfifqxRh16ESKHQ/lxRi57LxEF735OC7GHNe1a2jOem3MGl7AnN0xft62TM2iLcSW66Lm8xX12/A+YxEJRAg06Vf3ZVNfKTJ3isSGI8ZysGjXBp6RNPer/xXicOLocyK0Pw3k6pevfWhdH8UfG6oy4zoJqAjIb0G5JFIxoi8kNjSF5WDRrg08I0m/i3A+6ldcv/q/NHVXWBdF/aovKQNJIBCQ31xyScitvCL6QmJDE1gOFu3awDOo33BtDq+886EGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h1fqtwaEmyTQRkCWrFwSqRPRFxIbmsJysGjXBp5B/YZrc3ilfmtAuEkCbQRkycolkToRfSGxoSksB4t2beAZ1G+4NodX6rcGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h1fqtwaEmyTQRkCWrFwSqRPRFxIbmsJysGjXBp5B/YZrc3ilfmtAuEkCbQRkycolkToRfSGxoSksB4t2beAZ1G+4NodX6rcGhJsk0EZAlqxcEqkT0RcSG5rCcrBo1waeQf2Ga3N4pX5rQLhJAm0EZMnKJZE6EX0hsaEpLAeLdm3gGdRvuDaHV+q3BoSbJNBGQJasXBKpE9EXEhuawnKwaNcGnkH9hmtzeKV+a0C4SQJtBGTJyiWROhF9IbGhKSwHi3Zt4BnUb7g2h9e+6Hen/eXTs7ifXvxcUg0rNx+ZgCxZuSTCA9EXEhuawnKwaNcGnkH9hmtzeO2Lfu1vCSYv09o5cZMEOiUgS1YuiRwQoi8kNjSF5WDRrg08g/oN1+bw2hf9jrU/lXoWN+Tot3ZNudkxAVmycknkkBB9IbGhKSwHi3Zt4BnUb7g2h9e+6Dd9BPtK/dauKTc7JiBLVi6JHBKiLyQ2NIXlYNGuDTyD+g3X5vBK/daAcJME2gjIkpVLInUi+kJiQ1NYDhbt2sAzqN9wbQ6v1G8NCDdJoI2ALFm5JFInoi8kNjSF5WDRrg08g/oN1+bwSv3WgHCTBNoIyJKVSyJ1IvpCYkNTWA4WbdvYm0VoSf36AufgGQNY8l9moz4DXay2H1C/avAMJIGCgPzmkksi7BDhIbGhKSwHi7ZtXEOM12lFp9QCqy5W2w+o39BZ+UoCSgLym0suiVQNCC/rNZmcAAARtElEQVRlpInlYNHubFL0+2fGERBNu/CMwQDN0Sm1OEpdrC5qMKB+m649y0ggQkCWrFwSqQbQb4rqsBws2p0NnjEYQHw8MjwDb0UrS3dAulhdFPXrLzH/IAGEgKwEuSRSP/UbgVLfBRE9JKM5Wlm66nWxuijqt361uU0CrQTkt7dcEqmU+o1Aqe+CiB6S0RytLF31ulhdFPVbv9rcJoFWAvLbWy6JVEr9RqDUd0FED8lozgy480EXS/3WryO3SeBCBOS3t1xy3vSe+j2HcrYHIRqS0RytLF39ulhdFEe/4YrxlQTUBOS3t1xyXvmLmZzvFPakfMyF5WDR7jDxDPxDsZQMPEcrS3fWuljdGJn6dUS5kABEQJasXHLeAKIvJDa0hOVg0a4NPAMXY0oGnqNTasFVF6uLon5DX+UrCagJyJK1JfuRcpmZhTJyNGqI/ZoJy69ZCCWx3ero3/DE7W+zCKvq11czUccWgTbjG12WYM7WjMsmXpfNy48ZNwf40p+FJmq5XJmdutd1F5jz1LKc3O7OiDU/MIEm/Q7fkp9bzcQ7ILC48DEOe/A+yVFoTm4PTp2HcH8EmvQ7/htqF2O0kcOhHLs5ewL2YcfGGKkotl8bvVuHxZiwpn7dmYU6tgi0GXN0mYI562r8qnmZm0lzgC/VRa1WU+Cei+7eJzkKzcnt7oxY8wMTaNSv+ryRuVMkNhwAloNFuzbwDHxWNiUDz9HO1Lqz1n2opq2RXzp2TLmQAECA+nWwHke/OqUWHUQnVm2N1C/wtmMoCTgC1K+j8Dj61SnVnbP2xjNtjdRvQZV/koCaAPXrUFG/coehfmU2LCGBLALUr8NH/cqdiPqV2bCEBLIIUL8O33PqVzerS/1mvcGYTAIyAerXsXlO/erEqovit97k9xhLSEAgQP06MI+jX92ItugMOrFqa+RHb8JbjLtJQCJA/Toyj6NfnVKL3qCL1UVx9Cu9w7ifBEQC1K9DQ/2KHUT5XDTqVybIEhIQCFC/Dgz1K3QPu5ujX5kNS0ggiwD16/A9p351s7rUb9YbjMkkIBOgfh2b59SvTqy6KE4+yO8xlpCAQID6dWAeR7+6EW3RGXRi1dbIOx+Etxh3k4BEgPp1ZB5HvzqlFr1BF6uL4uhXeodxPwmIBKhfh4b6FTsIP3qT0bCEBPIIUL+OH/Ur9yKOfmU2LCGBLALUr8P3nPrVzepSv1lvMCaTgEyA+nVsnlO/OrHqojj3K7/HWEICAgHq14F5HP3qRrRFZ9CJVVsj73wQ3mLcTQISAerXkXkc/eqUWvQGXawuiqNf6R3G/SQgEqB+HRrqV+wgvPNBRsMSEsgjQP06ftSv3Is4+pXZsIQEsghQvw7fc+pXN6tL/Wa9wZhMAjIB6texeU796sSqi+Lcr/weYwkJCASoXwfmcfSrG9EWnUEnVm2NvPNBeItxNwlIBKhfR+Zx9KtTatEbdLG6qP6MfnfvqcuPmUpvE+4ngS4IUL+OKvUr96170+/EpC/Ur9wPWNIBAerXQaV+5a51b/odf6QuQ45+5W7Aki4IUL+O6nPqVzere2/6TR/BvlK/XSiGdcoEqF/H5jn1qxOrLqo/c7/Ur/xuZ0nPCFC/7oI8jn51I9qiE+rEqq2Rdz707K3Nw+k/AerXXaPH0a9OqUW/1MXqojj67f97nUfYOwLUr7sk1K/cMalfmQ1LSCCLAPXr8MkUZLh4Dp6BH9mv2cmHXCvRxVK/NWzcJIFLEZCVYEtG2mVmFtrQkRj7NROXX7MQy84LlNG/x9vzX83kuKFcg3O+l2byjS5oztZsDk28LtuWHzNsC7HlP2asiFout724byDn7oWc3Eu9H1nPUxFo0m/67evMfBwCC+WpTKnfpzIHT/YCBBr1O1QvxqhDh1LsRr5dfmOMXHhWoozerctlZxblunYFzpmvzWKOLmjOzkxCE6u2ZWrGbSG2XBe1WvXjK7s5I9ic3Au8FVnF8xFo0u9YjUOu5bwKJDZkYzlYtGsDzxioH0MeziEl45iz99W0/+nnc4uwY8PCmm7uVxfFOx8EyNxNAjIBWTxyyXltXcWGlpD6U2SK1V8clfYjqXAOR5Ue97Svoa1oZela1sXqoqjf9mvJCBKoEZDFI5fUqrCbXcWGlpD6saMpWsDqL3JQMVK/4Wp2+ZozgZCT2+U5se6HJSCLRy45h9FVbGgJqf9a+tV+Gyycw2CAZ+DK1o5V3VHpYnVRHP0erzPXSEBJQBabXHJedVexoSWk/mvpl6PfcHWKV37p+JQHt0iglYAsNrnkvNKuYkNLSP2PpV90xKwdqzqyulhdFEe/oa/ylQTUBGSxySXnlXcVG1pC6n8s/aJjbK0sHVldrC6K+g19la8koCYgi00uOa+8q9jQElI/9Ruotb3qxKqLon7baLOcBM4IyGKTS84q4Z0P50gie9CRrKsCzdHK0tWti9VFUb+OKBcSgAjIkpVLzhvoKja0hNR/rdEvOivLOx/C1ezyNefmsZzcLs+JdT8sAVlscsk5jK5iQ0tI/dfSLzouxUey7uzRVrRjVVe3LlYXxdGvI8qFBCACstjkkvMGuooNLSH1P5Z+0TG2VpaOrC5WF0X9hr7KVxJQE5DFJpecV95VbGgJqf+x9MvRb+gDutecCYScXN3RMYoETgjIYpNLTirwG13FhpaQ+qnfQK3tVTeu1UVx9NtGm+UkcEZAFptcclYJ73w4RxLZg45kXRVojlaWrm5drC6K+nVEuZAARECWrFxy3kBXsaElpP5rjX7RWVne+RCuZpevORMIObldnhPrflgCstjkknMYXcWGlpD6r6VfdFyKj2Td2aOtaMeqrm5drC6Ko19HlAsJqAi8/CvCSrHtv2sP6S5LFPV1FRuaRup/LP2iY2ytLB1ZXawuivoNfZWvJNBOYP3hBXwQ2345/KnlIMrrKjYcElL/Y+mXo9/QB3SvORMIObm6o2MUCRQEZvbn06yAvdisfM3ir0YGUV5XseGQkPqp30Ct7VU3rtVFcfTbRpvlJFAhMLW/YLt7t2Lbvw7tr9nWB7+d3c2AqbQ4YCwHi3Yt4Bn4rGxKBp6jlaU7a12sLor6dUS5kICSwK//AfEPM7E/PWzM4qWehiipq9hwTEj9KTLF6i+OCp2V5Z0P4Wp2+ZozgZCT2+U5se5HJOCGv+VyNviFRoSIvpDYgB3LwaJdG3gGPi5NycBztGNVd9a6WF0UR7+OKBcS0BIohr+FgM8Hv5CSEH0hseFUsBws2rWBZ+BiTMnAR8xaWbqz1sXqoqhfR5QLCagJrMux7/nML6YkRF9IbDgVLAeLdm3gGSkyRe9icEeG5mhl6erWxeqiqF9HlAsJqAkch7+RwS+kJERfSGw4FSwHi3Zt4Bm4GFMy8BytLN1Z62J1UdSvI8qFBPQEyuHv+cwvpiREX0hsOBUsB4t2beAZuBhTMvAcrSzdWetidVHUryPKhQT0BMLwNzb4hZSE6AuJDaeC5WDRrg08A5+VTcmgfkMP0L7m3L2Qk6s9PsaRQEngMPyNDX4hJSH6QmLDgWI5WLRrA8/AxZiSgedox6rurHWxuiiOfh1RLiQAECiGv9HBL6QkRF9IbDgVLAeLdm3gGbgYUzLwEbNWlu6sdbG6KOrXEeVCAggBP/yNDn4hJSH6QmLDqWA5WLRrA89IkSl6F4M7MjRHK0tXty5WF0X9OqJcSAAh8G7vPYsPfiElIfpCYsOpYDlYtGsDz8DFmJKB52hl6c5aF6uLon4dUS4kABHYRZ72UFSAKKmr2HAqSP0pMsXqL44KHZfiKnXtoK1oZenq1sXqoqhfR5QLCUAE3qXBLzQiRPSFxIZTwXKwaNcGnoHPyqZkUL+hB2hfc+5eyMnVHh/jSKBKYB2f+cWUhOgLiQ0HiuVg0a4NPAMXY0oGnqMdq7qz1sXqovoz+t29py4/ZuqwcCGBqxGY1Z/zG1pGlNRVbMqxpMgUOf5wTOi0AK5S1xL6XDWtLF3dulhdVH/0e/wmPb5G/bp+waUHBBAldRUbMCD1P5Z+UclrZenI6mJ1UX3R7/s6Z9mGDsdXErgtAUR5XcUGAkj91G+g1vaqE6suqi/6bTtnlpPAXRBAlNdVbACF1E/9Bmptrzqx6qKo3zbaLCcBgIBV3ki7/HYUG9pH6h+NsGjXBp4xGv0zm3B4ylc8A2/l23woj2Y00sXqokajJT+4At5dDCWBRgJv+CcXzHhqAvzgqvENxUIS0BP4G0/Ui/2pzk5iQ6VI/ZMJFu3awDNScq7RCtKGLlYX5Siu9J2LkSRAApci8DfcqavC5nGLarEcLNq1gBx/OFH0noS0G8/QVmbDeTjA1lddrHbut7U5BpAACdyaAC5H9MO0lBZwKqgYr6Nf/DzaMqjfNkIsJ4G7IZAy0sRysOhUcF9DdK4Tz7DKhltJPR8pTzdGlrK5nwRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAS6I/C1XC7/XPV/dmW5d2tul19xG1xIgARI4CoEvryJrtJUTxqZGWO+3bEs7YqZubWVMWP3+uTLF/8KevIewNO/KoGv+cdV2+tDY/uFMT/uQOZOv1u39mGM/lfcXcJjLu+bVwr4MS8tz6p/BL6sgP7177C6PqK1MTvXxsTp1/2o8d6+Lrtu9R7q35khBXwPF4rHePcEnHzN8w1+B4OtPW87ynOTEMYs7Nqvff26++t5gRP4Z0FQwBcAySpIoJHAyP/Tu5gEbQx8vEJn25m3sB0HH9Y49euv884CoYAfr8vzjHpFYDS3E6B2ecbBb5hrmNoZB+tfO/lr/ybi1K/vn274axeOgHv1buXBPBSBkf2kv1j8HQAPdW6ak7FjvPnAfQL3Zech7OSvnQTm1G8Bzg9/KWBNL2IMCSQQeCvlazYJ6TdP+Xw5XT7hI/qx4zs34Tt287+L/ZvVjZ2M4GIJHIa/FDB7Awl0QKAiX3v/1ffJ4r6GcLJsa8tPfVnVlnl9ma5ry0dt2Qxry6S2FLMk1gfSgv8d4hzz92PMyg98Z9/WwRmkX94fadkcMXMKIqNXMJUEzgn8tNrs+Pbr6dqi5md/E9n5mTbs+bRn9s/+M/vVT/tu7b8G3O1nqYv7JO9Bl+FvKhTmkQAJnBH4O/Hv+nSwWhvKrlb1wW5tMLzdnoyV7cbr9/fp///qI8NZbfka1ZbTmYWXF3xu4eykz3bYr1m4GZg3/823qRWx//LFWZhux6w2nr/vzXHlbxJ+C0PXAxhFAloCVQH7z/vtja92cX8Wa37zsf8oZr/drMXITjxY4XDq93DBX47/OtrYfxxwIQESuCyBl3IEvHjSLxvY2V67+G8eD92a++4FF0fgx+FwC+XLDkEC3RB4+fFfuH3a213/vGH89639QHjdDeb7q/XvMPilfO/v2vGI74fAy7YQ8JMOf/2Y98VdLj8Qzpn6vZ9rrjjSH//3EuWrQMUQEsggUAj4Sb/t5ca8xR0TfiDMD/iLjuQHv5RvxruKqSSgJPDpRsDPOfz9t1uvD190m9sbkzn1W3QZO/ilfJXvHoaRQCaB/Xb8pMPfTHCPmf6yoHwf88ryrPpJ4HNrb37lQgKOwCtvNWNHIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIIGuCPwH9j3d4Kg1fY0AAAAASUVORK5CYII=" - } - }, "cell_type": "markdown", "metadata": {}, "source": [ - "![MVA-1.png](attachment:MVA-1.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Based on the above block diagram we find that `PE` & `SIMD` parallelization attributes are subject to the following constraints. \n", - "If `W` is the width of the input and `H` is the height of the output in a Matrix-Vector Computation then:\n", + "In the case of the MVAU, `PE` & `SIMD` are subject to the following constraints: \n", + "\n", + "If `MW` is the number of input features and `MH` the number of output features:\n", "\n", - " W % SIMD == 0\n", - " H % PE == 0\n", + " MW % SIMD == 0\n", + " MH % PE == 0\n", " \n", - "For the above example, H = 12 and W = 12. The demonstrated PE & SIMD values adhere to the above constraints.\n", + "Total folding in the case of the MVAU is defined as:\n", "\n", - "We also define a term referred to as total folding which is defined as :\n", + " Total folding = (MH/PE) x (MW/SIMD)\n", "\n", - " Total folding = (H/PE) x (W/SIMD)\n", + "In a streaming dataflow architecture like it is in FINN designs the throughput is determined by the slowest layer. So, the goal of adjusting these parameters is to get an almost balanced pipeline i.e. equalizing the throughput rate of layers in the generated dataflow architecture.\n", "\n", - "The goal of adjusting these parameters is to get an almost balanced pipeline i.e. equalling the rate of producers and consumers in the generated dataflow architecture.\n", - "This can be achieved (or almost achieved) by keeping the `total folding` parameter approximately constant across all layers.\n", + "The FINN compiler provides analysis passes to facilitate the exploration of the folding factors of each layer. In this notebook we will show how to use these functions and explore how the parallelization parameters affect the clock cycles and the resource utilization of the generated dataflow architecture.\n", "\n", - "We now explore how these parameters affect the estimated clock cycles and the resource utilization of the generated dataflow architectures.\n", - "We start with a naive case where `PE` & `SIMD` values across all layers are 1 and observe the above-mentioned numbers.\n", - "We define the utility functions (`exp_cycles_per_layer()`) and (`res_estimation()`) to estimate the number of clock cycles and resource utilization of each network layer." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
    \n", - "Should this line be added (The `exp_cycles_per_layer` formula is equal to the total folding in this case as the number of input vectors is 1 and the mmv value is also 1).\n", - "
    " + "We start with a naive case where `PE` & `SIMD` values across all layers are 1, this is the starting point of our exploration and is also the state the network is in after the conversion to HLS layers. If you take a look at the model using Netron and click on one of the MVAU layers, you can see that `PE` and `SIMD` are both set to 1 by default." ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, - "outputs": [], - "source": [ - "from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer\n", - "from finn.analysis.fpgadataflow.res_estimation import res_estimation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now individually extract the `MatrixVectorActivation` blocks from the onnx file and set the config values manually (although this can be done automatically by Vivado tools also as mentioned in the introduction).\n", - "\n", - "In the first step, we set the `PE` & `SIMD` values for all the layers to be '1' to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", - "\n", - "We utilize from (`getCustomOp()`) as the helper function to set different properties of the node. The (`set_nodeattr()`) function within this function call helps us set these values." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "from qonnx.custom_op.registry import getCustomOp\n", - "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", - "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", - "config = [\n", - " (1, 1, [16], [64], \"block\"),\n", - " (1, 1, [64], [64], \"auto\"),#8,8\n", - " (1, 1, [64], [64], \"auto\"),#8,8\n", - " (1, 1, [64], [1], \"distributed\"),\n", - "]\n", - "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", - " fcl_inst = getCustomOp(fcl)\n", - " fcl_inst.set_nodeattr(\"PE\", pe)\n", - " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", - " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", - " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "After setting these parameters, we save the model and view it using `Netron`\n", - ". We can observe the values we set in the above step by clicking on any of the nodes and observing their properties." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Stopping http://0.0.0.0:5901\n", - "Serving './cybsec_PE_SIMD_not_modified.onnx' at http://0.0.0.0:5901\n" + "Stopping http://0.0.0.0:5920\n", + "Serving 'step_convert_to_hls.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -258,7 +178,7 @@ " " + "" ] }, - "execution_count": 10, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "model.save(\"./cybsec_PE_SIMD_not_modified.onnx\")\n", - "showInNetron(\"./cybsec_PE_SIMD_not_modified.onnx\",localhost_url='xirxlabs53')" + "showInNetron(\"step_convert_to_hls.onnx\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We pass our model to the `exp_cycles_per_layer()` and `res_estimation()` functions which iteratively go through all the layers in the graph and measure the expected execution clock cycles and resource utilization for each of them and return a dictionary with calculated values." + "We import the analysis passes (`exp_cycles_per_layer()`) and (`res_estimation()`) to estimate the number of clock cycles and resource utilization of each network layer." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "cycles_dict = []\n", - "cycles_dict = exp_cycles_per_layer(model)" + "from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer\n", + "from finn.analysis.fpgadataflow.res_estimation import res_estimation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Analysis passes in FINN return information about the model in form of a dictionary, you can learn more about analysis passes in general in this Jupyter notebook: [0_custom_analysis_pass.ipynb](0_custom_analysis_pass.ipynb).\n", + "\n", + "We start by calling the analysis pass `exp_cycles_per_layer()`, which returns a dictionary with the layer names as keys and the expected cycles as values. Afterwards, we plot the result in a block diagram." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'MatrixVectorActivation_0': 38400,\n", + " 'MatrixVectorActivation_1': 4096,\n", + " 'MatrixVectorActivation_2': 4096,\n", + " 'MatrixVectorActivation_3': 64}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cycles_dict = model.analysis(exp_cycles_per_layer)\n", + "cycles_dict" ] }, { @@ -303,7 +255,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3cAAAHWCAYAAADU7HB0AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABpGklEQVR4nO3deVhV5d7/8c8GBQcmZxzIsVQUJSckcyhJHNOyQTPFsTS0lHI6ldpo2SnNnBpOUuenOaYNThEqalKZirOmpmkqOAKKCgj37w8f1nELKii6afd+Xde6nrPvda+1vmvtffv0YU02Y4wRAAAAAOBvzcXRBQAAAAAAbh3hDgAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAAAAAJwA4Q6AU2jVqpVatWrl6DLy1cGDB2Wz2RQZGVmg1pWTyMhI2Ww2/fbbb7dl/fnpdh+La9m7d6/atGkjb29v2Ww2LV68+I5u/05o1aqV6tat6+gyCrSs39+///3vm1reZrNp3Lhx+VsUAKdBuANwW2X9R/+1pp9//jnX69q5c6fGjRungwcP3r6Cb8K0adPueFDA309YWJi2bdumt956S//973/VqFEjR5fk9I4ePapx48YpLi7O0aUAwB1RyNEFAPhneP3111W1atVs7TVq1Mj1Onbu3KnXXntNrVq1UpUqVezm/fDDD7da4k2bNm2aSpcurd69ezusBhRsFy5cUGxsrF5++WUNHjzY0eX8Yxw9elSvvfaaqlSposDAQEeXAwC3HeEOwB3Rrl2723qmws3N7batG7hVJ06ckCT5+Pjk2zpTUlJUvHjxfFsfbq/MzEylpaU5uozb6uLFi3Jzc5OLCxeGAY7C6ANQYMyZM0cNGzaUp6envLy8FBAQoA8//FDS5cs7H3/8cUnSAw88YF3WuXr1aknZ77lbvXq1bDab5s2bp9dee00VK1aUp6enHnvsMSUlJSk1NVVDhw5V2bJl5eHhoT59+ig1NdWunpkzZ+rBBx9U2bJl5e7uLn9/f02fPt2uT5UqVbRjxw7FxMRYNV1ZR2JiooYOHSo/Pz+5u7urRo0aevfdd5WZmWm3nsTERPXu3Vve3t7y8fFRWFiYEhMTc33sEhMTNWzYMFWpUkXu7u6qVKmSevXqpZMnT153uZUrV6p58+YqXry4fHx81LlzZ+3atStbvyNHjqhfv36qUKGC3N3dVbVqVQ0aNOi6/7F65swZNWnSRJUqVdKePXtuuv5z586pePHieuGFF7It99dff8nV1VXjx4+/5WOxe/duPfbYYypZsqSKFCmiRo0a6dtvv7Xrk56ertdee0133323ihQpolKlSun+++9XVFTUNdc7btw4Va5cWZI0fPhw2Ww2uzPPmzdvVrt27eTl5SUPDw+1bt062+XKWZc3x8TE6LnnnlPZsmVVqVKl6+5Pamqqxo4dqxo1asjd3V1+fn4aMWLETf3OsyxbtkwtW7a0xmjjxo01e/bsbP127typBx54QMWKFVPFihU1YcKE69aaxWazafDgwVq8eLHq1q0rd3d31alTR8uXL8/W98iRI+rbt6/KlStn9fv888+t+atXr1bjxo0lSX369LHGZ2RkpCZPnixXV1e7Mfb+++/LZrMpIiLCasvIyJCnp6dGjhxptaWkpOjFF1+0xnTNmjX173//W8aYHPdl1qxZqlOnjtzd3XPcD0kyxuiZZ56Rm5ubvv7661wdqyx//vmnnnvuOdWsWVNFixZVqVKl9Pjjj9tdvv7HH3/IZrNp4sSJ2ZZfv369bDabvvrqK6vtRsdW+t+/sXPmzNErr7yiihUrqlixYkpOTs5T/QDyF2fuANwRSUlJ2f7j2mazqVSpUpKkqKgode/eXa1bt9a7774rSdq1a5d++uknvfDCC2rRooWef/55TZ48Wf/6179Uu3ZtSbL+77WMHz9eRYsW1ahRo7Rv3z599NFHKly4sFxcXHTmzBmNGzdOP//8syIjI1W1alWNGTPGWnb69OmqU6eOHn74YRUqVEjfffednnvuOWVmZio8PFySNGnSJA0ZMkQeHh56+eWXJUnlypWTJJ0/f14tW7bUkSNH9Oyzz+quu+7S+vXrNXr0aB07dkyTJk2SdPk/7Dp37qx169Zp4MCBql27thYtWqSwsLBcHdtz586pefPm2rVrl/r27asGDRro5MmT+vbbb/XXX3+pdOnSOS73448/ql27dqpWrZrGjRunCxcu6KOPPlKzZs20adMmK4AcPXpUTZo0UWJiop555hnVqlVLR44c0YIFC3T+/Pkcz5qePHlSDz30kE6fPq2YmBhVr179pusPDAzUI488orlz5+qDDz6Qq6urtexXX30lY4x69OhxS8dix44datasmSpWrKhRo0apePHimjdvnrp06aKFCxfqkUcekXQ5qI0fP179+/dXkyZNlJycrN9++02bNm3SQw89lOO6H330Ufn4+GjYsGHq3r272rdvLw8PD2u7zZs3l5eXl0aMGKHChQvr448/VqtWrRQTE6OgoCC7dT333HMqU6aMxowZo5SUlGse08zMTD388MNat26dnnnmGdWuXVvbtm3TxIkT9fvvv9s9zCU3v3PpcsDs27ev6tSpo9GjR8vHx0ebN2/W8uXL9dRTT1n9zpw5o7Zt2+rRRx/VE088oQULFmjkyJEKCAhQu3btrllzlnXr1unrr7/Wc889J09PT02ePFldu3bVoUOHrH8vEhIS1LRpUytAlSlTRsuWLVO/fv2UnJysoUOHqnbt2nr99dc1ZswYPfPMM2revLkk6b777lNSUpIyMzO1bt06dezYUZK0du1aubi4aO3atVYtmzdv1rlz59SiRQtJl8fqww8/rFWrVqlfv34KDAzUihUrNHz4cB05ciRbeFq5cqXmzZunwYMHq3Tp0tkuJ5cuB8i+fftq7ty5WrRokTp06HDDY3SlDRs2aP369erWrZsqVaqkgwcPavr06WrVqpV27typYsWKqVq1amrWrJlmzZqlYcOG2S0/a9YseXp6qnPnzrk+tld644035ObmppdeekmpqalcRQE4mgGA22jmzJlGUo6Tu7u71e+FF14wXl5e5tKlS9dc1/z5840ks2rVqmzzWrZsaVq2bGl9XrVqlZFk6tata9LS0qz27t27G5vNZtq1a2e3fHBwsKlcubJd2/nz57NtJzQ01FSrVs2urU6dOnbbzvLGG2+Y4sWLm99//92ufdSoUcbV1dUcOnTIGGPM4sWLjSQzYcIEq8+lS5dM8+bNjSQzc+bMbOu+0pgxY4wk8/XXX2ebl5mZaYwx5sCBA9nWFRgYaMqWLWtOnTpltW3ZssW4uLiYXr16WW29evUyLi4uZsOGDddcf9b3vGHDBnPs2DFTp04dU61aNXPw4MHr1p7b+lesWGEkmWXLltnNr1evnt2xv9lj0bp1axMQEGAuXrxo1/++++4zd999t9VWv35906FDhxvu09Wytvnee+/ZtXfp0sW4ubmZ/fv3W21Hjx41np6epkWLFlZb1vG9//77rztGsvz3v/81Li4uZu3atXbtM2bMMJLMTz/9ZLXl5neemJhoPD09TVBQkLlw4YJd36zjaszlcSjJfPnll1Zbamqq8fX1NV27dr1h3ZKMm5ub2bdvn9W2ZcsWI8l89NFHVlu/fv1M+fLlzcmTJ+2W79atm/H29rb2acOGDTmOoYyMDOPl5WVGjBhh7UOpUqXM448/blxdXc3Zs2eNMcZ88MEHxsXFxZw5c8YY87+x+uabb9qt77HHHjM2m82ubknGxcXF7Nixw67vlb+F9PR08+STT5qiRYuaFStW3PD4ZK137Nix1uecvr/Y2Nhs38PHH39sJJldu3ZZbWlpaaZ06dImLCzMasvtsc36N7ZatWo51gDAMbgsE8AdMXXqVEVFRdlNy5Yts+b7+PgoJSXlupe33YxevXqpcOHC1uegoCAZY9S3b1+7fkFBQTp8+LAuXbpktRUtWtT631lnHlu2bKk//vhDSUlJN9z2/Pnz1bx5c5UoUUInT560ppCQEGVkZGjNmjWSpKVLl6pQoUIaNGiQtayrq6uGDBmSq31cuHCh6tevb51dupLNZstxmWPHjikuLk69e/dWyZIlrfZ69erpoYce0tKlSyVdPgO0ePFiderUKcd7Jq9e/19//aWWLVsqPT1da9assS5HvNX6Q0JCVKFCBc2aNcuat337dm3dulVPP/10ntZ1tdOnT2vlypV64okndPbsWet7OnXqlEJDQ7V3714dOXJE0uXf6Y4dO7R3794b7teNZGRk6IcfflCXLl1UrVo1q718+fJ66qmntG7dumyXuA0YMMDuzOW1zJ8/X7Vr11atWrXsfnsPPvigJGnVqlVW39z8zqOionT27FmNGjVKRYoUsdvW1cfVw8PD7jtxc3NTkyZN9Mcff9ywbunyd33lmd569erJy8vLWt4Yo4ULF6pTp04yxtjtX2hoqJKSkrRp06brbsPFxUX33XefNQZ37dqlU6dOadSoUTLGKDY2VtLls3l169a17pVcunSpXF1d9fzzz9ut78UXX5Qxxu7fNElq2bKl/P39c6whLS1Njz/+uL7//nstXbpUbdq0ydXxudqV3196erpOnTqlGjVqyMfHx+44PPHEEypSpIjdGFqxYoVOnjxpfV83c2zDwsLsagDgWFyWCeCOaNKkyXUfqPLcc89p3rx5ateunSpWrKg2bdroiSeeUNu2bW9pu3fddZfdZ29vb0mSn59ftvbMzEwlJSVZl3799NNPGjt2rGJjY3X+/Hm7/klJSda6rmXv3r3aunWrypQpk+P848ePS7p8z0z58uWtS/Wy1KxZ8wZ7d9n+/fvVtWvXXPXN8ueff15zG7Vr19aKFSuUkpKic+fOKTk5OdfvLuvZs6cKFSqkXbt2ydfXN1fL5KZ+FxcX9ejRQ9OnT9f58+dVrFgxzZo1S0WKFLHuxcztuq62b98+GWP06quv6tVXX82xz/Hjx1WxYkW9/vrr6ty5s+655x7VrVtXbdu2Vc+ePVWvXr08bVO6/JCV8+fPX/M7yMzM1OHDh1WnTh2rPacnzuZk79692rVr1w1/e1Lufuf79++XpFz9DipVqpQt8JUoUUJbt27NVe1Xj9ms5c+cOSPp8nFLTEzUJ598ok8++STHdVy5f9fSvHlz63LktWvXqnz58mrQoIHq16+vtWvX6qGHHtK6dev0xBNPWMv8+eefqlChgjw9Pe3WlXV5eNa4ynK972v8+PE6d+6cli1bdkvv6Lxw4YLGjx+vmTNn6siRI3b3/l35RygfHx916tRJs2fP1htvvCHp8iWZFStWtEL/zRzb3P4mAdwZhDsABULZsmUVFxenFStWaNmyZVq2bJlmzpypXr166Ysvvrjp9V7rLMe12rP+w2j//v1q3bq1atWqpQ8++EB+fn5yc3PT0qVLNXHixGwPRMlJZmamHnroIY0YMSLH+ffcc08u9+Lv49FHH9WXX36pDz/80O4hJ/mhV69eeu+997R48WJ1795ds2fPVseOHW8Ysm8k67t86aWXFBoammOfrFd2tGjRQvv379c333yjH374QZ999pkmTpyoGTNmqH///rdUR27k9gxJZmamAgIC9MEHH+Q4P+uPG/nxO7/ajcbWrS6fVdPTTz99zftScxO277//fqWnpys2NlZr16617slr3ry51q5dq927d+vEiRNW+8243vcVGhqq5cuXa8KECWrVqlW2M6K5NWTIEM2cOVNDhw5VcHCwvL29ZbPZ1K1bt2zfX69evTR//nytX79eAQEB+vbbb/Xcc89ZT7e8mWPLWTugYCHcASgw3Nzc1KlTJ3Xq1EmZmZl67rnn9PHHH+vVV19VjRo1rnlZ3e3w3XffKTU1Vd9++63dmYQrL2fLcq26qlevrnPnzikkJOS626pcubKio6N17tw5u7N3N3rC5JXb2b59e676XrnNa21j9+7dKl26tIoXL66iRYvKy8sr1+sfMmSIatSooTFjxsjb21ujRo3Kt/rr1q2re++9V7NmzVKlSpV06NAhffTRRze1ritlXRJZuHDhG35XklSyZEn16dNHffr0sR62MW7cuDyHuzJlyqhYsWLX/A5cXFyynWHOrerVq2vLli1q3br1dcdNbn/nWZdJbt++PU/vprwdypQpI09PT2VkZNzw+7revjdp0kRubm5au3at1q5dq+HDh0u6HOA//fRTRUdHW5+zVK5cWT/++KPOnj1rd/Zu9+7d1vzcatq0qQYOHKiOHTvq8ccf16JFi1SoUN7/s2zBggUKCwvT+++/b7VdvHgxx6fttm3bVmXKlNGsWbMUFBSk8+fPq2fPntb8vBxbAAUT99wBKBBOnTpl99nFxcX6C3HWo9uz3umVl1cE3KysswdXX+I0c+bMbH2LFy+eY01PPPGEYmNjtWLFimzzEhMTrfv72rdvr0uXLtk9fj4jIyNbcLmWrl27asuWLVq0aFG2edc6W1K+fHkFBgbqiy++sKt9+/bt+uGHH9S+fXtJl7+HLl266LvvvtNvv/2Wq/W/+uqreumllzR69OhrPlL/Zuvv2bOnfvjhB02aNEmlSpXK9vTFmzkWZcuWVatWrfTxxx/r2LFj2eZnvaNOyv479fDwUI0aNbK9XiA3XF1d1aZNG33zzTd2j61PSEjQ7Nmzdf/998vLyyvP65Uu//aOHDmiTz/9NNu8CxcuWE/azO3vvE2bNvL09NT48eN18eJFu3m5PSOXX1xdXdW1a1ctXLgwxyB/5fd1vX8zihQposaNG+urr77SoUOH7M7cXbhwQZMnT1b16tVVvnx5a5n27dsrIyNDU6ZMsVvXxIkTZbPZcvU00CuFhIRozpw5Wr58uXr27HnTZ0qv/g4++ugjZWRkZOtbqFAhde/eXfPmzVNkZKQCAgLszsTl5dgCKJg4cwfgjli2bJn11+0r3XfffapWrZr69++v06dP68EHH1SlSpX0559/6qOPPlJgYKB1P0tgYKBcXV317rvvKikpSe7u7tb7ufJbmzZtrDOJzz77rM6dO6dPP/1UZcuWzRYAGjZsqOnTp+vNN99UjRo1VLZsWT344IMaPny4vv32W3Xs2FG9e/dWw4YNlZKSom3btmnBggU6ePCgSpcurU6dOqlZs2YaNWqUDh48KH9/f3399de5emiLdPndaQsWLNDjjz+uvn37qmHDhjp9+rS+/fZbzZgxQ/Xr189xuffee0/t2rVTcHCw+vXrZ70KwdvbW+PGjbP6vf322/rhhx/UsmVL67H6x44d0/z587Vu3bocX8z93nvvKSkpSeHh4fL09LR7wMat1P/UU09pxIgRWrRokQYNGmT3sJxbORZTp07V/fffr4CAAA0YMEDVqlVTQkKCYmNj9ddff2nLli2SJH9/f7Vq1UoNGzZUyZIl9dtvv2nBggUaPHjwNffvet58801FRUXp/vvv13PPPadChQrp448/Vmpqaq7fDZeTnj17at68eRo4cKBWrVqlZs2aKSMjQ7t379a8efO0YsUKNWrUKNe/cy8vL02cOFH9+/dX48aN9dRTT6lEiRLasmWLzp8/f0uXTt+Md955R6tWrVJQUJAGDBggf39/nT59Wps2bdKPP/6o06dPS7p8xtHHx0czZsyQp6enihcvrqCgIOs+sebNm+udd96Rt7e3AgICJF0O+zVr1tSePXvUu3dvu+126tRJDzzwgF5++WUdPHhQ9evX1w8//KBvvvlGQ4cOve4rP66lS5cu1iXoXl5e+vjjj/O0fMeOHfXf//5X3t7e8vf3V2xsrH788Ufr3uGr9erVS5MnT9aqVaus185cKbfHFkABdWcfzgngn+Z6r0LQFY8oX7BggWnTpo0pW7ascXNzM3fddZd59tlnzbFjx+zW9+mnn5pq1aoZV1dXu9ciXOtVCPPnz8+xnqsf6z927FgjyZw4ccJq+/bbb029evVMkSJFTJUqVcy7775rPv/8cyPJHDhwwOoXHx9vOnToYDw9PY0kuzrOnj1rRo8ebWrUqGHc3NxM6dKlzX333Wf+/e9/272i4dSpU6Znz57Gy8vLeHt7m549e5rNmzfn6lUIWcsPHjzYVKxY0bi5uZlKlSqZsLAw63HmOT3+3xhjfvzxR9OsWTNTtGhR4+XlZTp16mR27tyZbf1//vmn6dWrlylTpoxxd3c31apVM+Hh4SY1NfWaxzUjI8N0797dFCpUyCxevPiW6r9S+/btjSSzfv36fD0W+/fvN7169TK+vr6mcOHCpmLFiqZjx45mwYIFVp8333zTNGnSxPj4+JiiRYuaWrVqmbfeesvuu8zJtV6FYIwxmzZtMqGhocbDw8MUK1bMPPDAA9n27Vq/2+tJS0sz7777rqlTp45xd3c3JUqUMA0bNjSvvfaaSUpKsvrl9nee1fe+++6zfi9NmjQxX331lTW/ZcuWpk6dOtlqCQsLy/aqkZxIMuHh4dnaK1eubPe4fmOMSUhIMOHh4cbPz88ULlzY+Pr6mtatW5tPPvnErt8333xj/P39TaFChbJ970uWLDGSsr0apX///kaS+c9//pOtlrNnz5phw4aZChUqmMKFC5u7777bvPfee3avhLjevlzrtzBt2jQjybz00ks5Hpsr13vlqxDOnDlj+vTpY0qXLm08PDxMaGio2b17d47HLEudOnWMi4uL+euvv3Kcn5tje61/YwE4ls2YO3w9BQAAt+CRRx7Rtm3btG/fPkeXAvwt3XvvvSpZsqR1XyEA58E9dwCAv41jx45pyZIldg+BAJB7v/32m+Li4tSrVy9HlwLgNuDMHQCgwDtw4IB++uknffbZZ9qwYYP279+f6/foAbj8sKSNGzfq/fff18mTJ/XHH3/c9OsXABRcnLkDABR4MTEx6tmzpw4cOKAvvviCYAfk0YIFC9SnTx+lp6frq6++ItgBToozdwAAAADgBDhzBwAAAABOgHAHAAAAAE6gwLzE/J133tHo0aP1wgsvaNKkSZKkixcv6sUXX9ScOXOUmpqq0NBQTZs2TeXKlbOWO3TokAYNGqRVq1bJw8NDYWFhGj9+vAoV+t+urV69WhEREdqxY4f8/Pz0yiuvZHsx6dSpU/Xee+8pPj5e9evX10cffaQmTZrkuv7MzEwdPXpUnp6estlst3QsAAAAAPx9GWN09uxZVahQQS4ud/B8mgPfsWf59ddfTZUqVUy9evXMCy+8YLUPHDjQ+Pn5mejoaPPbb7+Zpk2bmvvuu8+af+nSJVO3bl0TEhJiNm/ebJYuXWpKly5tRo8ebfX5448/TLFixUxERITZuXOn+eijj4yrq6tZvny51WfOnDnGzc3NfP7552bHjh1mwIABxsfHxyQkJOR6Hw4fPnzdFzUzMTExMTExMTExMf2zpsOHD99aUMojhz9Q5dy5c2rQoIGmTZumN998U4GBgZo0aZKSkpJUpkwZzZ49W4899pgkaffu3apdu7ZiY2PVtGlTLVu2TB07dtTRo0ets3kzZszQyJEjdeLECbm5uWnkyJFasmSJtm/fbm2zW7duSkxM1PLlyyVJQUFBaty4saZMmSLp8lk4Pz8/DRkyRKNGjcrVfiQlJcnHx0eHDx+Wl5dXfh4iAAAAAH8jycnJ8vPzU2Jiory9ve/Ydh1+WWZ4eLg6dOigkJAQvfnmm1b7xo0blZ6erpCQEKutVq1auuuuu6xwFxsbq4CAALvLNENDQzVo0CDt2LFD9957r2JjY+3WkdVn6NChkqS0tDRt3LhRo0ePtua7uLgoJCREsbGx16w7NTVVqamp1uezZ89Kkry8vAh3AAAAAO747VoODXdz5szRpk2btGHDhmzz4uPj5ebmJh8fH7v2cuXKKT4+3upzZbDLmp8173p9kpOTdeHCBZ05c0YZGRk59tm9e/c1ax8/frxee+213O0oAAAAANxmDnta5uHDh/XCCy9o1qxZf8sXaY4ePVpJSUnWdPjwYUeXBAAAAOAfzGHhbuPGjTp+/LgaNGigQoUKqVChQoqJidHkyZNVqFAhlStXTmlpaUpMTLRbLiEhQb6+vpIkX19fJSQkZJufNe96fby8vFS0aFGVLl1arq6uOfbJWkdO3N3drUswuRQTAAAAgKM5LNy1bt1a27ZtU1xcnDU1atRIPXr0sP534cKFFR0dbS2zZ88eHTp0SMHBwZKk4OBgbdu2TcePH7f6REVFycvLS/7+/lafK9eR1SdrHW5ubmrYsKFdn8zMTEVHR1t9AAAAAKCgc9g9d56enqpbt65dW/HixVWqVCmrvV+/foqIiFDJkiXl5eWlIUOGKDg4WE2bNpUktWnTRv7+/urZs6cmTJig+Ph4vfLKKwoPD5e7u7skaeDAgZoyZYpGjBihvn37auXKlZo3b56WLFlibTciIkJhYWFq1KiRmjRpokmTJiklJUV9+vS5Q0cDAAAAAG6Nw5+WeT0TJ06Ui4uLunbtavcS8yyurq76/vvvNWjQIAUHB6t48eIKCwvT66+/bvWpWrWqlixZomHDhunDDz9UpUqV9Nlnnyk0NNTq8+STT+rEiRMaM2aM4uPjFRgYqOXLl2d7yAoAAAAAFFQOf8+ds0hOTpa3t7eSkpK4/w4AAAD4B3NUNnDYPXcAAAAAgPxDuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ1DI0QXg9rDZHF2BYxnj6AoAAACAO4szdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBBwa7qZPn6569erJy8tLXl5eCg4O1rJly6z5rVq1ks1ms5sGDhxot45Dhw6pQ4cOKlasmMqWLavhw4fr0qVLdn1Wr16tBg0ayN3dXTVq1FBkZGS2WqZOnaoqVaqoSJEiCgoK0q+//npb9hkAAAAAbgeHhrtKlSrpnXfe0caNG/Xbb7/pwQcfVOfOnbVjxw6rz4ABA3Ts2DFrmjBhgjUvIyNDHTp0UFpamtavX68vvvhCkZGRGjNmjNXnwIED6tChgx544AHFxcVp6NCh6t+/v1asWGH1mTt3riIiIjR27Fht2rRJ9evXV2hoqI4fP35nDgQAAAAA3CKbMcY4uogrlSxZUu+995769eunVq1aKTAwUJMmTcqx77Jly9SxY0cdPXpU5cqVkyTNmDFDI0eO1IkTJ+Tm5qaRI0dqyZIl2r59u7Vct27dlJiYqOXLl0uSgoKC1LhxY02ZMkWSlJmZKT8/Pw0ZMkSjRo3KcdupqalKTU21PicnJ8vPz09JSUny8vLKj0NxS2w2R1fgWAXrVw0AAIB/kuTkZHl7e9/xbFBg7rnLyMjQnDlzlJKSouDgYKt91qxZKl26tOrWravRo0fr/Pnz1rzY2FgFBARYwU6SQkNDlZycbJ39i42NVUhIiN22QkNDFRsbK0lKS0vTxo0b7fq4uLgoJCTE6pOT8ePHy9vb25r8/Pxu7QAAAAAAwC0o5OgCtm3bpuDgYF28eFEeHh5atGiR/P39JUlPPfWUKleurAoVKmjr1q0aOXKk9uzZo6+//lqSFB8fbxfsJFmf4+Pjr9snOTlZFy5c0JkzZ5SRkZFjn927d1+z7tGjRysiIsL6nHXmDgAAAAAcweHhrmbNmoqLi1NSUpIWLFigsLAwxcTEyN/fX88884zVLyAgQOXLl1fr1q21f/9+Va9e3YFVS+7u7nJ3d3doDQAAAACQxeGXZbq5ualGjRpq2LChxo8fr/r16+vDDz/MsW9QUJAkad++fZIkX19fJSQk2PXJ+uzr63vdPl5eXipatKhKly4tV1fXHPtkrQMAAAAACjqHh7urZWZm2j2o5EpxcXGSpPLly0uSgoODtW3bNrunWkZFRcnLy8u6tDM4OFjR0dF264mKirLu63Nzc1PDhg3t+mRmZio6Otru3j8AAAAAKMgcelnm6NGj1a5dO9111106e/asZs+erdWrV2vFihXav3+/Zs+erfbt26tUqVLaunWrhg0bphYtWqhevXqSpDZt2sjf3189e/bUhAkTFB8fr1deeUXh4eHWJZMDBw7UlClTNGLECPXt21crV67UvHnztGTJEquOiIgIhYWFqVGjRmrSpIkmTZqklJQU9enTxyHHBQAAAADyyqHh7vjx4+rVq5eOHTsmb29v1atXTytWrNBDDz2kw4cP68cff7SClp+fn7p27apXXnnFWt7V1VXff/+9Bg0apODgYBUvXlxhYWF6/fXXrT5Vq1bVkiVLNGzYMH344YeqVKmSPvvsM4WGhlp9nnzySZ04cUJjxoxRfHy8AgMDtXz58mwPWQEAAACAgqrAvefu78pR77K4Ft5z5+gKAAAA8E/1j3/PHQAAAADg5hHuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAkQ7gAAAADACRDuAAAAAMAJEO4AAAAAwAk4NNxNnz5d9erVk5eXl7y8vBQcHKxly5ZZ8y9evKjw8HCVKlVKHh4e6tq1qxISEuzWcejQIXXo0EHFihVT2bJlNXz4cF26dMmuz+rVq9WgQQO5u7urRo0aioyMzFbL1KlTVaVKFRUpUkRBQUH69ddfb8s+AwAAAMDt4NBwV6lSJb3zzjvauHGjfvvtNz344IPq3LmzduzYIUkaNmyYvvvuO82fP18xMTE6evSoHn30UWv5jIwMdejQQWlpaVq/fr2++OILRUZGasyYMVafAwcOqEOHDnrggQcUFxenoUOHqn///lqxYoXVZ+7cuYqIiNDYsWO1adMm1a9fX6GhoTp+/PidOxgAAAAAcAtsxhjj6CKuVLJkSb333nt67LHHVKZMGc2ePVuPPfaYJGn37t2qXbu2YmNj1bRpUy1btkwdO3bU0aNHVa5cOUnSjBkzNHLkSJ04cUJubm4aOXKklixZou3bt1vb6NatmxITE7V8+XJJUlBQkBo3bqwpU6ZIkjIzM+Xn56chQ4Zo1KhRuao7OTlZ3t7eSkpKkpeXV34ekptiszm6AscqWL9qAAAA/JM4KhsUmHvuMjIyNGfOHKWkpCg4OFgbN25Uenq6QkJCrD61atXSXXfdpdjYWElSbGysAgICrGAnSaGhoUpOTrbO/sXGxtqtI6tP1jrS0tK0ceNGuz4uLi4KCQmx+uQkNTVVycnJdhMAAAAAOIrDw922bdvk4eEhd3d3DRw4UIsWLZK/v7/i4+Pl5uYmHx8fu/7lypVTfHy8JCk+Pt4u2GXNz5p3vT7Jycm6cOGCTp48qYyMjBz7ZK0jJ+PHj5e3t7c1+fn53dT+AwAAAEB+cHi4q1mzpuLi4vTLL79o0KBBCgsL086dOx1d1g2NHj1aSUlJ1nT48GFHlwQAAADgH6yQowtwc3NTjRo1JEkNGzbUhg0b9OGHH+rJJ59UWlqaEhMT7c7eJSQkyNfXV5Lk6+ub7amWWU/TvLLP1U/YTEhIkJeXl4oWLSpXV1e5urrm2CdrHTlxd3eXu7v7ze00AAAAAOQzh5+5u1pmZqZSU1PVsGFDFS5cWNHR0da8PXv26NChQwoODpYkBQcHa9u2bXZPtYyKipKXl5f8/f2tPleuI6tP1jrc3NzUsGFDuz6ZmZmKjo62+gAAAABAQefQM3ejR49Wu3btdNddd+ns2bOaPXu2Vq9erRUrVsjb21v9+vVTRESESpYsKS8vLw0ZMkTBwcFq2rSpJKlNmzby9/dXz549NWHCBMXHx+uVV15ReHi4dVZt4MCBmjJlikaMGKG+fftq5cqVmjdvnpYsWWLVERERobCwMDVq1EhNmjTRpEmTlJKSoj59+jjkuAAAAABAXjk03B0/fly9evXSsWPH5O3trXr16mnFihV66KGHJEkTJ06Ui4uLunbtqtTUVIWGhmratGnW8q6urvr+++81aNAgBQcHq3jx4goLC9Prr79u9alataqWLFmiYcOG6cMPP1SlSpX02WefKTQ01Orz5JNP6sSJExozZozi4+MVGBio5cuXZ3vICgAAAAAUVAXuPXd/V7znrmDhVw0AAABH+ce/5w4AAAAAcPMIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQcGu7Gjx+vxo0by9PTU2XLllWXLl20Z88euz6tWrWSzWazmwYOHGjX59ChQ+rQoYOKFSumsmXLavjw4bp06ZJdn9WrV6tBgwZyd3dXjRo1FBkZma2eqVOnqkqVKipSpIiCgoL066+/5vs+AwAAAMDt4NBwFxMTo/DwcP3888+KiopSenq62rRpo5SUFLt+AwYM0LFjx6xpwoQJ1ryMjAx16NBBaWlpWr9+vb744gtFRkZqzJgxVp8DBw6oQ4cOeuCBBxQXF6ehQ4eqf//+WrFihdVn7ty5ioiI0NixY7Vp0ybVr19foaGhOn78+O0/EAAAAABwi2zGGOPoIrKcOHFCZcuWVUxMjFq0aCHp8pm7wMBATZo0Kcdlli1bpo4dO+ro0aMqV66cJGnGjBkaOXKkTpw4ITc3N40cOVJLlizR9u3breW6deumxMRELV++XJIUFBSkxo0ba8qUKZKkzMxM+fn5aciQIRo1atQNa09OTpa3t7eSkpLk5eV1K4chX9hsjq7AsQrOrxoAAAD/NI7KBgXqnrukpCRJUsmSJe3aZ82apdKlS6tu3boaPXq0zp8/b82LjY1VQECAFewkKTQ0VMnJydqxY4fVJyQkxG6doaGhio2NlSSlpaVp48aNdn1cXFwUEhJi9blaamqqkpOT7SYAAAAAcJRCji4gS2ZmpoYOHapmzZqpbt26VvtTTz2lypUrq0KFCtq6datGjhypPXv26Ouvv5YkxcfH2wU7Sdbn+Pj46/ZJTk7WhQsXdObMGWVkZOTYZ/fu3TnWO378eL322mu3ttMAAAAAkE8KTLgLDw/X9u3btW7dOrv2Z555xvrfAQEBKl++vFq3bq39+/erevXqd7pMy+jRoxUREWF9Tk5Olp+fn8PqAQAAAPDPViDC3eDBg/X9999rzZo1qlSp0nX7BgUFSZL27dun6tWry9fXN9tTLRMSEiRJvr6+1v/Naruyj5eXl4oWLSpXV1e5urrm2CdrHVdzd3eXu7t77ncSAAAAAG4jh95zZ4zR4MGDtWjRIq1cuVJVq1a94TJxcXGSpPLly0uSgoODtW3bNrunWkZFRcnLy0v+/v5Wn+joaLv1REVFKTg4WJLk5uamhg0b2vXJzMxUdHS01QcAAAAACjKHnrkLDw/X7Nmz9c0338jT09O6R87b21tFixbV/v37NXv2bLVv316lSpXS1q1bNWzYMLVo0UL16tWTJLVp00b+/v7q2bOnJkyYoPj4eL3yyisKDw+3zqwNHDhQU6ZM0YgRI9S3b1+tXLlS8+bN05IlS6xaIiIiFBYWpkaNGqlJkyaaNGmSUlJS1KdPnzt/YAAAAAAgj/L8KoRNmzapcOHCCggIkCR98803mjlzpvz9/TVu3Di5ubnlfuPXeF7/zJkz1bt3bx0+fFhPP/20tm/frpSUFPn5+emRRx7RK6+8YvdI0T///FODBg3S6tWrVbx4cYWFhemdd95RoUL/y66rV6/WsGHDtHPnTlWqVEmvvvqqevfubbfdKVOm6L333lN8fLwCAwM1efJk6zLQG+FVCAULr0IAAACAozgqG+Q53DVu3FijRo1S165d9ccff6hOnTp65JFHtGHDBnXo0OGa76NzdoS7goVwBwAAAEf527zn7vfff1dgYKAkaf78+WrRooVmz56tyMhILVy4ML/rAwAAAADkQp7DnTFGmZmZkqQff/xR7du3lyT5+fnp5MmT+VsdAAAAACBX8hzuGjVqpDfffFP//e9/FRMTow4dOkiSDhw4kO0l4AAAAACAOyPP4W7SpEnatGmTBg8erJdfflk1atSQJC1YsED33XdfvhcIAAAAALixPD9Q5VouXrwoV1dXFS5cOD9W97fDA1UKFh6oAgAAAEf52zxQRZISExP12WefafTo0Tp9+rQkaefOnXYvEgcAAAAA3Dl5fon51q1b1bp1a/n4+OjgwYMaMGCASpYsqa+//lqHDh3Sl19+eTvqBAAAAABcR57P3EVERKhPnz7au3evihQpYrW3b99ea9asydfiAAAAAAC5k+dwt2HDBj377LPZ2itWrKj4+Ph8KQoAAAAAkDd5Dnfu7u5KTk7O1v7777+rTJky+VIUAAAAACBv8hzuHn74Yb3++utKT0+XJNlsNh06dEgjR45U165d871AAAAAAMCN5Tncvf/++zp37pzKli2rCxcuqGXLlqpRo4Y8PT311ltv3Y4aAQAAAAA3kOenZXp7eysqKkrr1q3T1q1bde7cOTVo0EAhISG3oz4AAAAAQC7k20vM/+l4iXnBwq8aAAAAjuKobJCrM3eTJ0/O9Qqff/75my4GAAAAAHBzcnXmrmrVqrlbmc2mP/7445aL+jvizF3Bwpk7AAAAOEqBPnN34MCB210HAAAAAOAW5PlpmQAAAACAgifP4a5r16569913s7VPmDBBjz/+eL4UBQAAAADImzyHuzVr1qh9+/bZ2tu1a6c1a9bkS1EAAAAAgLzJc7g7d+6c3NzcsrUXLlxYycnJ+VIUAAAAACBv8hzuAgICNHfu3Gztc+bMkb+/f74UBQAAAADIm1w9LfNKr776qh599FHt379fDz74oCQpOjpaX331lebPn5/vBQIAAAAAbizP4a5Tp05avHix3n77bS1YsEBFixZVvXr19OOPP6ply5a3o0YAAAAAwA3k6iXmuDFeYl6w8KsGAACAozgqG+T5nruwsDCeigkAAAAABUyew11SUpJCQkJ099136+2339aRI0duR10AAAAAgDzIc7hbvHixjhw5okGDBmnu3LmqUqWK2rVrpwULFig9Pf121AgAAAAAuIE8hztJKlOmjCIiIrRlyxb98ssvqlGjhnr27KkKFSpo2LBh2rt3b37XCQAAAAC4jpsKd1mOHTumqKgoRUVFydXVVe3bt9e2bdvk7++viRMn5leNAAAAAIAbyHO4S09P18KFC9WxY0dVrlxZ8+fP19ChQ3X06FF98cUX+vHHHzVv3jy9/vrrt6NeAAAAAEAO8vyeu/LlyyszM1Pdu3fXr7/+qsDAwGx9HnjgAfn4+ORDeQAAAACA3MhzuJs4caIef/xxFSlS5Jp9fHx8dODAgVsqDAAAAACQe7m+LDMjI0Nbt27VY489li3YnT9/Xlu3blVmZma+FwgAAAAAuLFch7v//ve/6tu3r9zc3LLNc3NzU9++fTV79ux8LQ4AAAAAkDu5Dnf/+c9/9NJLL8nV1TXbvEKFCmnEiBH65JNP8rU4AAAAAEDu5Drc7dmzR02bNr3m/MaNG2vXrl35UhQAAAAAIG9yHe5SUlKUnJx8zflnz57V+fPn87Tx8ePHq3HjxvL09FTZsmXVpUsX7dmzx67PxYsXFR4erlKlSsnDw0Ndu3ZVQkKCXZ9Dhw6pQ4cOKlasmMqWLavhw4fr0qVLdn1Wr16tBg0ayN3dXTVq1FBkZGS2eqZOnaoqVaqoSJEiCgoK0q+//pqn/QEAAAAAR8l1uLv77ru1fv36a85ft26d7r777jxtPCYmRuHh4fr5558VFRWl9PR0tWnTRikpKVafYcOG6bvvvtP8+fMVExOjo0eP6tFHH7XmZ2RkqEOHDkpLS9P69ev1xRdfKDIyUmPGjLH6HDhwQB06dNADDzyguLg4DR06VP3799eKFSusPnPnzlVERITGjh2rTZs2qX79+goNDdXx48fztE8AAAAA4BAml959911TqlQps2XLlmzz4uLiTKlSpcy7776b29Xl6Pjx40aSiYmJMcYYk5iYaAoXLmzmz59v9dm1a5eRZGJjY40xxixdutS4uLiY+Ph4q8/06dONl5eXSU1NNcYYM2LECFOnTh27bT355JMmNDTU+tykSRMTHh5ufc7IyDAVKlQw48ePz1XtSUlJRpJJSkrK417fHtI/ewIAAAAcxVHZINdn7oYNG6aAgAA1bNhQ7dq107BhwzRs2DC1a9dOjRo1Ut26dTVs2LBbCppJSUmSpJIlS0qSNm7cqPT0dIWEhFh9atWqpbvuukuxsbGSpNjYWAUEBKhcuXJWn9DQUCUnJ2vHjh1WnyvXkdUnax1paWnauHGjXR8XFxeFhIRYfa6Wmpqq5ORkuwkAAAAAHCXX4a5w4cL64Ycf9NZbb+nYsWP65JNP9PHHH+vYsWN666239MMPP6hw4cI3XUhmZqaGDh2qZs2aqW7dupKk+Ph4ubm5ycfHx65vuXLlFB8fb/W5Mthlzc+ad70+ycnJunDhgk6ePKmMjIwc+2St42rjx4+Xt7e3Nfn5+d3cjgMAAABAPiiUl86FCxfWiBEjNGLEiHwvJDw8XNu3b9e6devyfd23w+jRoxUREWF9Tk5OJuABAAAAcJg8hbvbZfDgwfr++++1Zs0aVapUyWr39fVVWlqaEhMT7c7eJSQkyNfX1+pz9VMts56meWWfq5+wmZCQIC8vLxUtWlSurq5ydXXNsU/WOq7m7u4ud3f3m9thAAAAAMhnub4s83Ywxmjw4MFatGiRVq5cqapVq9rNb9iwoQoXLqzo6Girbc+ePTp06JCCg4MlScHBwdq2bZvdUy2joqLk5eUlf39/q8+V68jqk7UONzc3NWzY0K5PZmamoqOjrT4AAAAAUJA59MxdeHi4Zs+erW+++Uaenp7W/W3e3t4qWrSovL291a9fP0VERKhkyZLy8vLSkCFDFBwcbL1QvU2bNvL391fPnj01YcIExcfH65VXXlF4eLh1Zm3gwIGaMmWKRowYob59+2rlypWaN2+elixZYtUSERGhsLAwNWrUSE2aNNGkSZOUkpKiPn363PkDAwAAAAB5ZDPGGIdt3GbLsX3mzJnq3bu3pMsvMX/xxRf11VdfKTU1VaGhoZo2bZrd5ZJ//vmnBg0apNWrV6t48eIKCwvTO++8o0KF/pddV69erWHDhmnnzp2qVKmSXn31VWsbWaZMmaL33ntP8fHxCgwM1OTJkxUUFJSrfUlOTpa3t7eSkpLk5eWVtwNxG1zj0P5jOO5XDQAAgH86R2WDPIe77du3W0+zvNrixYvVpUuX/Kjrb4dwV7AQ7gAAAOAojsoGeb7nLjQ0VAcOHMjWvnDhQvXo0SNfigIAAAAA5E2ew13//v0VEhJi9/63uXPnqlevXoqMjMzP2gAAAAAAuZTnB6q89tprOn36tEJCQrRmzRotX75c/fv313//+1917dr1dtQIAAAAALiBm3pa5kcffaQePXqoadOmOnLkiL766it17tw5v2sDAAAAAORSrsLdt99+m63t0Ucf1dq1a9W9e3fZbDarz8MPP5y/FQIAAAAAbihXT8t0ccndrXk2m00ZGRm3XNTfEU/LLFh4WiYAAAAcxVHZIFdn7jIzM293HQAAAACAW5Dnp2UCAAAAAAqePIe7559/XpMnT87WPmXKFA0dOjQ/agIAAAAA5FGew93ChQvVrFmzbO333XefFixYkC9FAQAAAADyJs/h7tSpU/L29s7W7uXlpZMnT+ZLUQAAAACAvMlzuKtRo4aWL1+erX3ZsmWqVq1avhQFAAAAAMibPL/EPCIiQoMHD9aJEyf04IMPSpKio6P1/vvva9KkSfldHwAAAAAgF/Ic7vr27avU1FS99dZbeuONNyRJVapU0fTp09WrV698LxAAAAAAcGO5eon5tZw4cUJFixaVh4dHftb0t8RLzAsWXmIOAAAARynQLzHPyYkTJ7Rnzx5JUq1atVS6dOl8KwoAAAAAkDd5fqBKSkqK+vbtq/Lly6tFixZq0aKFypcvr379+un8+fO3o0YAAAAAwA3kOdxFREQoJiZG3333nRITE5WYmKhvvvlGMTExevHFF29HjQAAAACAG8jzPXelS5fWggUL1KpVK7v2VatW6YknntCJEyfys76/De65K1i45w4AAACO4qhskOczd+fPn1e5cuWytZctW5bLMgEAAADAQfIc7oKDgzV27FhdvHjRartw4YJee+01BQcH52txAAAAAIDcyfPTMj/88EOFhoaqUqVKql+/viRpy5YtKlKkiFasWJHvBQIAAAAAbuym3nN3/vx5zZo1S7t375Yk1a5dWz169FDRokXzvcC/C+65K1i45w4AAACO8rd6z12xYsU0YMCA/K4FAAAAAHCTchXuvv3221yv8OGHH77pYgAAAAAANydX4a5Lly65WpnNZlNGRsat1AMAAAAAuAm5CneZmZm3uw4AAAAAwC3I86sQAAAAAAAFT67D3cqVK+Xv76/k5ORs85KSklSnTh2tWbMmX4sDAAAAAOROrsPdpEmTNGDAgBwf5ent7a1nn31WEydOzNfiAAAAAAC5k+twt2XLFrVt2/aa89u0aaONGzfmS1EAAAAAgLzJdbhLSEhQ4cKFrzm/UKFCOnHiRL4UBQAAAADIm1yHu4oVK2r79u3XnL9161aVL18+X4oCAAAAAORNrsNd+/bt9eqrr+rixYvZ5l24cEFjx45Vx44d87U4AAAAAEDu2IwxJjcdExIS1KBBA7m6umrw4MGqWbOmJGn37t2aOnWqMjIytGnTJpUrV+62FlxQJScny9vbW0lJSTk+dOZOs9kcXYFj5e5XDQAAAOQ/R2WDXL3EXJLKlSun9evXa9CgQRo9erSyMqHNZlNoaKimTp36jw12AAAAAOBouQ53klS5cmUtXbpUZ86c0b59+2SM0d13360SJUrcrvoAAAAAALmQ63vurlSiRAk1btxYTZo0uaVgt2bNGnXq1EkVKlSQzWbT4sWL7eb37t1bNpvNbrr6dQynT59Wjx495OXlJR8fH/Xr10/nzp2z67N161Y1b95cRYoUkZ+fnyZMmJCtlvnz56tWrVoqUqSIAgICtHTp0pveLwAAAAC4024q3OWXlJQU1a9fX1OnTr1mn7Zt2+rYsWPW9NVXX9nN79Gjh3bs2KGoqCh9//33WrNmjZ555hlrfnJystq0aaPKlStr48aNeu+99zRu3Dh98sknVp/169ere/fu6tevnzZv3qwuXbqoS5cu1306KAAAAAAUJLl+oMrtZrPZtGjRInXp0sVq6927txITE7Od0cuya9cu+fv7a8OGDWrUqJEkafny5Wrfvr3++usvVahQQdOnT9fLL7+s+Ph4ubm5SZJGjRqlxYsXa/fu3ZKkJ598UikpKfr++++tdTdt2lSBgYGaMWNGjttOTU1Vamqq9Tk5OVl+fn48UKWAKBi/agAAAPwTOeqBKg49c5cbq1evVtmyZVWzZk0NGjRIp06dsubFxsbKx8fHCnaSFBISIhcXF/3yyy9WnxYtWljBTpJCQ0O1Z88enTlzxuoTEhJit93Q0FDFxsZes67x48fL29vbmvz8/PJlfwEAAADgZhTocNe2bVt9+eWXio6O1rvvvquYmBi1a9dOGRkZkqT4+HiVLVvWbplChQqpZMmSio+Pt/pc/RTPrM836pM1PyejR49WUlKSNR0+fPjWdhYAAAAAbkGenpZ5p3Xr1s363wEBAapXr56qV6+u1atXq3Xr1g6sTHJ3d5e7u7tDawAAAACALAX6zN3VqlWrptKlS2vfvn2SJF9fXx0/ftyuz6VLl3T69Gn5+vpafRISEuz6ZH2+UZ+s+QAAAABQ0P2twt1ff/2lU6dOqXz58pKk4OBgJSYmauPGjVaflStXKjMzU0FBQVafNWvWKD093eoTFRWlmjVrWq9xCA4OVnR0tN22oqKiFBwcfLt3CQAAAADyhUPD3blz5xQXF6e4uDhJ0oEDBxQXF6dDhw7p3LlzGj58uH7++WcdPHhQ0dHR6ty5s2rUqKHQ0FBJUu3atdW2bVsNGDBAv/76q3766ScNHjxY3bp1U4UKFSRJTz31lNzc3NSvXz/t2LFDc+fO1YcffqiIiAirjhdeeEHLly/X+++/r927d2vcuHH67bffNHjw4Dt+TAAAAADgZjj0VQirV6/WAw88kK09LCxM06dPV5cuXbR582YlJiaqQoUKatOmjd544w27h5+cPn1agwcP1nfffScXFxd17dpVkydPloeHh9Vn69atCg8P14YNG1S6dGkNGTJEI0eOtNvm/Pnz9corr+jgwYO6++67NWHCBLVv3z7X++Kox51eC69CcHQFAAAA+KdyVDYoMO+5+7sj3BUs/KoBAADgKLznDgAAAABw0wh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQcGu7WrFmjTp06qUKFCrLZbFq8eLHdfGOMxowZo/Lly6to0aIKCQnR3r177fqcPn1aPXr0kJeXl3x8fNSvXz+dO3fOrs/WrVvVvHlzFSlSRH5+fpowYUK2WubPn69atWqpSJEiCggI0NKlS/N9fwEAAADgdnFouEtJSVH9+vU1derUHOdPmDBBkydP1owZM/TLL7+oePHiCg0N1cWLF60+PXr00I4dOxQVFaXvv/9ea9as0TPPPGPNT05OVps2bVS5cmVt3LhR7733nsaNG6dPPvnE6rN+/Xp1795d/fr10+bNm9WlSxd16dJF27dvv307DwAAAAD5yGaMMY4uQpJsNpsWLVqkLl26SLp81q5ChQp68cUX9dJLL0mSkpKSVK5cOUVGRqpbt27atWuX/P39tWHDBjVq1EiStHz5crVv315//fWXKlSooOnTp+vll19WfHy83NzcJEmjRo3S4sWLtXv3bknSk08+qZSUFH3//fdWPU2bNlVgYKBmzJiRq/qTk5Pl7e2tpKQkeXl55ddhuWk2m6MrcKyC8asGAADAP5GjskGBvefuwIEDio+PV0hIiNXm7e2toKAgxcbGSpJiY2Pl4+NjBTtJCgkJkYuLi3755RerT4sWLaxgJ0mhoaHas2ePzpw5Y/W5cjtZfbK2k5PU1FQlJyfbTQAAAADgKAU23MXHx0uSypUrZ9derlw5a158fLzKli1rN79QoUIqWbKkXZ+c1nHlNq7VJ2t+TsaPHy9vb29r8vPzy+suAgAAAEC+KbDhrqAbPXq0kpKSrOnw4cOOLgkAAADAP1iBDXe+vr6SpISEBLv2hIQEa56vr6+OHz9uN//SpUs6ffq0XZ+c1nHlNq7VJ2t+Ttzd3eXl5WU3AQAAAICjFNhwV7VqVfn6+io6OtpqS05O1i+//KLg4GBJUnBwsBITE7Vx40arz8qVK5WZmamgoCCrz5o1a5Senm71iYqKUs2aNVWiRAmrz5XbyeqTtR0AAAAAKOgcGu7OnTunuLg4xcXFSbr8EJW4uDgdOnRINptNQ4cO1Ztvvqlvv/1W27ZtU69evVShQgXriZq1a9dW27ZtNWDAAP3666/66aefNHjwYHXr1k0VKlSQJD311FNyc3NTv379tGPHDs2dO1cffvihIiIirDpeeOEFLV++XO+//752796tcePG6bffftPgwYPv9CEBAAAAgJvi0FchrF69Wg888EC29rCwMEVGRsoYo7Fjx+qTTz5RYmKi7r//fk2bNk333HOP1ff06dMaPHiwvvvuO7m4uKhr166aPHmyPDw8rD5bt25VeHi4NmzYoNKlS2vIkCEaOXKk3Tbnz5+vV155RQcPHtTdd9+tCRMmqH379rneF16FULDwKgQAAAA4iqOyQYF5z93fHeGuYOFXDQAAAEfhPXcAAAAAgJtGuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdAuAMAAAAAJ0C4AwAAAAAnQLgDAAAAACdQoMPduHHjZLPZ7KZatWpZ8y9evKjw8HCVKlVKHh4e6tq1qxISEuzWcejQIXXo0EHFihVT2bJlNXz4cF26dMmuz+rVq9WgQQO5u7urRo0aioyMvBO7BwAAAAD5pkCHO0mqU6eOjh07Zk3r1q2z5g0bNkzfffed5s+fr5iYGB09elSPPvqoNT8jI0MdOnRQWlqa1q9fry+++EKRkZEaM2aM1efAgQPq0KGDHnjgAcXFxWno0KHq37+/VqxYcUf3EwAAAABuhc0YYxxdxLWMGzdOixcvVlxcXLZ5SUlJKlOmjGbPnq3HHntMkrR7927Vrl1bsbGxatq0qZYtW6aOHTvq6NGjKleunCRpxowZGjlypE6cOCE3NzeNHDlSS5Ys0fbt2611d+vWTYmJiVq+fHmua01OTpa3t7eSkpLk5eV1azueD2w2R1fgWAX3Vw0AAABn56hsUODP3O3du1cVKlRQtWrV1KNHDx06dEiStHHjRqWnpyskJMTqW6tWLd11112KjY2VJMXGxiogIMAKdpIUGhqq5ORk7dixw+pz5Tqy+mSt41pSU1OVnJxsNwEAAACAoxTocBcUFKTIyEgtX75c06dP14EDB9S8eXOdPXtW8fHxcnNzk4+Pj90y5cqVU3x8vCQpPj7eLthlzc+ad70+ycnJunDhwjVrGz9+vLy9va3Jz8/vVncXAAAAAG5aIUcXcD3t2rWz/ne9evUUFBSkypUra968eSpatKgDK5NGjx6tiIgI63NycjIBDwAAAIDDFOgzd1fz8fHRPffco3379snX11dpaWlKTEy065OQkCBfX19Jkq+vb7anZ2Z9vlEfLy+v6wZId3d3eXl52U0AAAAA4Ch/q3B37tw57d+/X+XLl1fDhg1VuHBhRUdHW/P37NmjQ4cOKTg4WJIUHBysbdu26fjx41afqKgoeXl5yd/f3+pz5Tqy+mStAwAAAAD+Dgp0uHvppZcUExOjgwcPav369XrkkUfk6uqq7t27y9vbW/369VNERIRWrVqljRs3qk+fPgoODlbTpk0lSW3atJG/v7969uypLVu2aMWKFXrllVcUHh4ud3d3SdLAgQP1xx9/aMSIEdq9e7emTZumefPmadiwYY7cdQAAAADIkwJ9z91ff/2l7t2769SpUypTpozuv/9+/fzzzypTpowkaeLEiXJxcVHXrl2Vmpqq0NBQTZs2zVre1dVV33//vQYNGqTg4GAVL15cYWFhev31160+VatW1ZIlSzRs2DB9+OGHqlSpkj777DOFhobe8f0FAAAAgJtVoN9z93fCe+4KFn7VAAAAcBTecwcAAAAAuGkF+rJMAChIOCPu6ArgbBhTjq4AgLPhzB0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcIdAAAAADgBwh0AAAAAOAHCHQAAAAA4AcLdVaZOnaoqVaqoSJEiCgoK0q+//urokgAAAADghgh3V5g7d64iIiI0duxYbdq0SfXr11doaKiOHz/u6NIAAAAA4LoId1f44IMPNGDAAPXp00f+/v6aMWOGihUrps8//9zRpQEAAADAdRVydAEFRVpamjZu3KjRo0dbbS4uLgoJCVFsbGy2/qmpqUpNTbU+JyUlSZKSk5Nvf7G4Ib4GIP8xroD8xZjC7eDt7egKHOv//pPc4bIygTHmjm6XcPd/Tp48qYyMDJUrV86uvVy5ctq9e3e2/uPHj9drr72Wrd3Pz++21Yjc+6f/wwbcDowrIH8xpoD8V9DG1dmzZ+V9B4si3N2k0aNHKyIiwvqcmZmp06dPq1SpUrLZbA6szPGSk5Pl5+enw4cPy8vLy9HlAE6BcQXkL8YUkP8YV/9jjNHZs2dVoUKFO7pdwt3/KV26tFxdXZWQkGDXnpCQIF9f32z93d3d5e7ubtfm4+NzO0v82/Hy8vrHD2wgvzGugPzFmALyH+Pqsjt5xi4LD1T5P25ubmrYsKGio6OttszMTEVHRys4ONiBlQEAAADAjXHm7goREREKCwtTo0aN1KRJE02aNEkpKSnq06ePo0sDAAAAgOsi3F3hySef1IkTJzRmzBjFx8crMDBQy5cvz/aQFVyfu7u7xo4dm+2yVQA3j3EF5C/GFJD/GFeOZzN3+vmcAAAAAIB8xz13AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3DlalShVNmjTJ0WX87Rw8eFA2m01xcXG3fVt8R38vfF83hzGF6+E7uzmMK1wL39fNYUzlgoEJCwszksyzzz6bbd5zzz1nJJmwsLBcrevAgQNGktm8eXOu+h8/ftykpKTkqm/Hjh1NaGhojvPWrFljJJktW7bkal3XsmrVKiPJnDlz5pbWc7Xz58+bEiVKmFKlSpmLFy/madmwsDDTuXNnu7ZLly6ZY8eOmfT09HyrcebMmcbb2ztbe16+o/wyZcoUU7lyZePu7m6aNGlifvnllzu6/VvFmPofxpR3tvY7PaZiYmJMx44dTfny5Y0ks2jRoju27fzEuPofxpV3tvY7Pa7efvtt06hRI+Ph4WHKlCljOnfubHbv3n3Htp8fGFP/w5jyztZ+p8fUtGnTTEBAgPH09DSenp6madOmZunSpXleD2fu/o+fn5/mzJmjCxcuWG0XL17U7Nmzddddd+X79tLS0iRJZcqUUbFixXK1TL9+/RQVFaW//vor27yZM2eqUaNGqlevXr7WebOMMbp06ZL1eeHChapTp45q1aqlxYsX3/L6XV1d5evrq0KFbv/bPPLyHeWHuXPnKiIiQmPHjtWmTZtUv359hYaG6vjx43eshvzAmMpfjKmbl5KSovr162vq1Kl3bJu3C+MqfzGubl5MTIzCw8P1888/KyoqSunp6WrTpo1SUlLuWA35gTGVvxhTN69SpUp65513tHHjRv3222968MEH1blzZ+3YsSNvK8rn0Pm3lPWXgbp165r/9//+n9U+a9YsU69ePdO5c2frLzfLli0zzZo1M97e3qZkyZKmQ4cOZt++fdYykuymli1b2m3jzTffNOXLlzdVqlQxxhhTuXJlM3HiRGPM5b+aFC5c2KxZs8Za37vvvmvKlClj4uPjTXp6uilXrpx544037Oo/e/as8fDwMNOnTzfGGLN27Vpz//33myJFiphKlSqZIUOGmHPnzln9L168aEaMGGEqVapk3NzcTPXq1c1nn31m/dXpyilrvy9evGiGDBliypQpY9zd3U2zZs3Mr7/+aq0z6y8+S5cuNQ0aNDCFCxc2q1atsua3atXKzJgxw0yfPt089NBD2b6D7du3mw4dOhhPT0/j4eFh7r//frNv3z4zduzYbDWtWrXK7i9kGRkZpmLFimbatGl269y0aZOx2Wzm4MGDxhhj3n//fVO3bl1TrFgxU6lSJTNo0CBz9uxZu/qvnMaOHZvtOzLGmD///NM8/PDDpnjx4sbT09M8/vjjJj4+3po/duxYU79+ffPll1+aypUrGy8vL/Pkk0+a5OTkbPudkyZNmpjw8HDrc0ZGhqlQoYIZP358rpYvCBhTjKmCNKaupL/5mTvGFeOqII4rYy6f5ZBkYmJibmp5R2BMMaYK8pgyxpgSJUqYzz77LE/LEO7M/wbeBx98YFq3bm21t27d2kycONFucC9YsMAsXLjQ7N2712zevNl06tTJBAQEmIyMDGOMMb/++quRZH788Udz7Ngxc+rUKWsbHh4epmfPnmb79u1m+/btxpjsP5zhw4ebypUrm8TERLNp0ybj5uZmvvnmG7v51atXN5mZmVbb559/booWLWoSExPNvn37TPHixc3EiRPN77//bn766Sdz7733mt69e1v9n3jiCePn52e+/vprs3//fvPjjz+aOXPmmEuXLpmFCxcaSWbPnj3m2LFjJjEx0RhjzPPPP28qVKhgli5danbs2GHCwsJMiRIlrP3LGhz16tUzP/zwg9m3b581b9++fcbd3d2cPn3anDp1yhQpUsQacMYY89dff5mSJUuaRx991GzYsMHs2bPHfP7552b37t3m7Nmz5oknnjBt27Y1x44dM8eOHTOpqanZLn946aWXzP3332/3vb744ot2bRMnTjQrV640Bw4cMNHR0aZmzZpm0KBBxhhjUlNTzaRJk4yXl5e1nayBf+V3lJGRYQIDA839999vfvvtN/Pzzz+bhg0bWv+IG3N5cHt4eJhHH33UbNu2zaxZs8b4+vqaf/3rX9f8DWZJTU01rq6u2f7js1evXubhhx++4fIFBWOKMVVQxtTVnCHcMa4YVwVtXBljzN69e40ks23btpta3hEYU4ypgjqmLl26ZL766ivj5uZmduzYkadlCXfmf4P7+PHjxt3d3Rw8eNAcPHjQFClSxJw4ccJucF/txIkTdv+YXeua67CwMFOuXDmTmppq13714E5NTTWBgYHmiSeeMP7+/mbAgAF2/Xft2mX99SJL8+bNzdNPP22MMaZfv37mmWeesVtm7dq1xsXFxVy4cMHs2bPHSDJRUVE57k9O11yfO3fOFC5c2MyaNctqS0tLMxUqVDATJkywW27x4sXZ1vmvf/3LdOnSxfrcuXNn668ixhgzevRoU7VqVZOWlpZjTTldc331cd68ebOx2Wzmzz//NMYY6685WX/Nysn8+fNNqVKlrM/Xuub6yu/ohx9+MK6urubQoUPW/B07dhhJ1l+yxo4da4oVK2b3l5rhw4eboKCga9aS5ciRI0aSWb9+vV378OHDTZMmTW64fEHBmPofxpR3tn53ckxdzRnCHeOKcVXQxlVGRobp0KGDadasWZ6XdSTG1P8wpryz9XPEmNq6daspXry4cXV1Nd7e3mbJkiW5XjYL99xdoUyZMurQoYMiIyM1c+ZMdejQQaVLl7brs3fvXnXv3l3VqlWTl5eXqlSpIkk6dOjQDdcfEBAgNze36/Zxc3PTrFmztHDhQl28eFETJ060m1+rVi3dd999+vzzzyVJ+/bt09q1a9WvXz9J0pYtWxQZGSkPDw9rCg0NVWZmpg4cOKC4uDi5urqqZcuWuT0s2r9/v9LT09WsWTOrrXDhwmrSpIl27dpl17dRo0Z2nzMyMvTFF1/o6aefttqefvppRUZGKjMzU5IUFxen5s2bq3Dhwrmu6WqBgYGqXbu2Zs+eLenyvQDHjx/X448/bvX58ccf1bp1a1WsWFGenp7q2bOnTp06pfPnz+d6O7t27ZKfn5/8/PysNn9/f/n4+NgdiypVqsjT09P6XL58+b/dPXP5gTGVM8bU/zCm8o5xlTPG1f/c6XEVHh6u7du3a86cOXletiBgTOWMMfU/d2pM1axZU3Fxcfrll180aNAghYWFaefOnbleXuJVCNn07dtXkZGR+uKLL9S3b99s8zt16qTTp0/r008/1S+//KJffvlF0v9ukL2e4sWL56qG9evXS5JOnz6t06dPZ5vfr18/LVy4UGfPntXMmTNVvXp1a7CeO3dOzz77rOLi4qxpy5Yt2rt3r6pXr66iRYvmqoabdfU+rlixQkeOHNGTTz6pQoUKqVChQurWrZv+/PNPRUdHS1K+1dSjRw9rcM+ePVtt27ZVqVKlJF1+dG7Hjh1Vr149LVy4UBs3brQerpCb7y6vrv6HymazWf+YXU/p0qXl6uqqhIQEu/aEhAT5+vrma413CmPq1jCmLrvZMeWsGFe3hnF1WX6Mq8GDB+v777/XqlWrVKlSpfws745iTN0axtRltzqm3NzcVKNGDTVs2FDjx49X/fr19eGHH+apBsLdVdq2bau0tDSlp6crNDTUbt6pU6e0Z88evfLKK2rdurVq166tM2fO2PXJ+stMRkbGTW1///79GjZsmD799FMFBQUpLCws24/iiSeekIuLi2bPnq0vv/xSffv2lc1mkyQ1aNBAO3fuVI0aNbJNbm5uCggIUGZmpmJiYnLcfk71V69eXW5ubvrpp5+stvT0dG3YsEH+/v7X3Z///Oc/6tatm90/NnFxcerWrZv+85//SJLq1auntWvXKj09/Zo15eZ4PvXUU9q+fbs2btyoBQsWqEePHta8jRs3KjMzU++//76aNm2qe+65R0ePHs3zdmrXrq3Dhw/r8OHDVtvOnTuVmJh4w2ORG25ubmrYsKH1D58kZWZmKjo6WsHBwbe8fkdgTDGmrud2jylnxbhiXF3PnRhXxhgNHjxYixYt0sqVK1W1atV8Wa+jMKYYU9fjqP9flZmZqdTU1LwtlOcLOZ3Q1df0JiUlmaSkJOtz1jXXGRkZplSpUubpp582e/fuNdHR0aZx48Z293Ckp6ebokWLmjfffNPEx8dbN6TmdN2wMfbX8166dMk0bdrUdO3a1RhjzNGjR02pUqWs65qv1K9fP1OiRAnj6upqjhw5YrVv2bLFFC1a1ISHh5vNmzeb33//3SxevNju6Yu9e/c2fn5+ZtGiReaPP/4wq1atMnPnzjXGXL651WazmcjISHP8+HHrptIXXnjBVKhQwSxbtszuhtrTp08bY3K+Vvv48eOmcOHCZtmyZdnqX7p0qXF3dzenTp0yJ0+eNKVKlbJuqP3999/Nl19+ab0v56233jJ33XWX2b17tzlx4oRJS0u75rXtzZo1M/Xr1zeenp7m/PnzVntcXJyRZCZNmmT2799vvvzyS1OxYkW7mn/66SfrZugTJ05Y7za58jvKzMw0gYGBpnnz5mbjxo3ml19+yfGG2vr169vVNXHiRFO5cuVsxyEnc+bMMe7u7iYyMtLs3LnTPPPMM8bHx8fuiUwFHWOKMWVMwRlTZ8+eNZs3bzabN282kswHH3xgNm/ebN2j8XfBuGJcGVNwxtWgQYOMt7e3Wb16tfUgimPHjtntT0HHmGJMGVNwxtSoUaNMTEyMOXDggNm6dasZNWqUsdls5ocffsjV8lkId+baAy/LlTfURkVFmdq1axt3d3dTr149s3r16mw36H/66afGz8/PuLi4ZHsU7tWu/OG89tprpnz58ubkyZPW/IULFxo3NzcTFxdnt9z69euNJNO+ffts6/z111/NQw89ZDw8PEzx4sVNvXr1zFtvvWXNv3Dhghk2bJgpX768cXNzMzVq1DCff/65Nf/11183vr6+xmazWft94cIFM2TIEFO6dOnrPgr3ysH973//2/j4+OR4o2xqaqrx8fExH374oTHm8j9Kbdq0McWKFTOenp6mefPmZv/+/caYy/9IZO2PcngU7pWmTZtmJJlevXpl2+YHH3xgypcvb4oWLWpCQ0PNl19+ma3mgQMHmlKlSuXLo3CvlJfBbYwxH330kbnrrruMm5ubadKkifn5559zvWxBwJhiTGUpCGMqp0ddS7l/OXFBwbhiXGUpCOMqpzElycycOTNXyxcEjCnGVJaCMKb69u1rKleubNzc3EyZMmVM69at8xzsjDHGZowxeTvXBwAAAAAoaLjnDgAAAACcAOEOuIMOHTpk95jiq6fcPFIZwP8wpoD8x7gC8tedHFNclgncQZcuXdLBgwevOb9KlSoqVKjQnSsI+JtjTAH5j3EF5K87OaYIdwAAAADgBLgsEwAAAACcAOEOAAAAAJwA4Q4AAAAAnADhDgAAAACcAOEOAIBb1KpVKw0dOtTRZQAA/uEIdwAAh+ndu7dsNpveeecdu/bFixfLZrPlaV1VqlTRpEmT8rG62+fgwYOy2WyKi4tzdCkAACdCuAMAOFSRIkX07rvv6syZM44uJc/S0tIcXUK+Sk9Pd3QJAIBbQLgDADhUSEiIfH19NX78+Ov2W7dunZo3b66iRYvKz89Pzz//vFJSUiRdvizyzz//1LBhw2Sz2WSz2WSMUZkyZbRgwQJrHYGBgSpfvrzdOt3d3XX+/HlJ0qFDh9S5c2d5eHjIy8tLTzzxhBISEqz+48aNU2BgoD777DNVrVpVRYoUybHWJUuWyNvbW7NmzbqpY7J//3517txZ5cqVk4eHhxo3bqwff/zRmv/666+rbt262ZYLDAzUq6++an3+7LPPVLt2bRUpUkS1atXStGnTrHlZZw/nzp2rli1bqkiRIpo1a5b+/PNPderUSSVKlFDx4sVVp04dLV269Kb2AwBwZxHuAAAO5erqqrffflsfffSR/vrrrxz77N+/X23btlXXrl21detWzZ07V+vWrdPgwYMlSV9//bUqVaqk119/XceOHdOxY8dks9nUokULrV69WpJ05swZ7dq1SxcuXNDu3bslSTExMWrcuLGKFSumzMxMde7cWadPn1ZMTIyioqL0xx9/6Mknn7SrZd++fVq4cKG+/vrrHC+rnD17trp3765Zs2apR48eN3VMzp07p/bt2ys6OlqbN29W27Zt1alTJx06dEiS1LdvX+3atUsbNmywltm8ebO2bt2qPn36SJJmzZqlMWPG6K233tKuXbv09ttv69VXX9UXX3xht61Ro0bphRde0K5duxQaGqrw8HClpqZqzZo12rZtm9599115eHjc1H4AAO6sQo4uAACARx55RIGBgRo7dqz+85//ZJs/fvx49ejRw3poyd13363JkyerZcuWmj59ukqWLClXV1d5enrK19fXWq5Vq1b6+OOPJUlr1qzRvffeK19fX61evVq1atXS6tWr1bJlS0lSdHS0tm3bpgMHDsjPz0+S9OWXX6pOnTrasGGDGjduLOnypZhffvmlypQpk63OqVOn6uWXX9Z3331nrfdm1K9fX/Xr17c+v/HGG1q0aJG+/fZbDR48WJUqVVJoaKhmzpxp1TVz5ky1bNlS1apVkySNHTtW77//vh599FFJUtWqVbVz5059/PHHCgsLs9Y9dOhQq490+exl165dFRAQIEnW+gAABR9n7gAABcK7776rL774Qrt27co2b8uWLYqMjJSHh4c1hYaGKjMzUwcOHLjmOlu2bKmdO3fqxIkTiomJUatWrdSqVSutXr1a6enpWr9+vVq1aiVJ2rVrl/z8/KxgJ0n+/v7y8fGxq6ly5co5BrsFCxZo2LBhioqKuqVgJ10+c/fSSy+pdu3a8vHxkYeHh3bt2mWduZOkAQMG6KuvvtLFixeVlpam2bNnq2/fvpKklJQU7d+/X/369bM7Zm+++ab2799vt61GjRrZfX7++ef15ptvqlmzZho7dqy2bt16S/sCALhzCHcAgAKhRYsWCg0N1ejRo7PNO3funJ599lnFxcVZ05YtW7R3715Vr179musMCAhQyZIlFRMTYxfuYmJitGHDBqWnp+u+++7LU53FixfPsf3ee+9VmTJl9Pnnn8sYk6d1Xu2ll17SokWL9Pbbb2vt2rWKi4tTQECA3QNcOnXqJHd3dy1atEjfffed0tPT9dhjj0m6fLwk6dNPP7U7Ztu3b9fPP/983f3p37+//vjjD/Xs2VPbtm1To0aN9NFHH93S/gAA7gwuywQAFBjvvPOOAgMDVbNmTbv2Bg0aaOfOnapRo8Y1l3Vzc1NGRoZdm81mU/PmzfXNN99ox44duv/++1WsWDGlpqbq448/VqNGjaxwU7t2bR0+fFiHDx+2zt7t3LlTiYmJ8vf3v2Ht1atX1/vvv69WrVrJ1dVVU6ZMyevuW3766Sf17t1bjzzyiKTLYe3gwYN2fQoVKqSwsDDNnDlTbm5u6tatm4oWLSpJKleunCpUqKA//vjjpu778/Pz08CBAzVw4ECNHj1an376qYYMGXLT+wMAuDMIdwCAAiMgIEA9evTQ5MmT7dpHjhyppk2bavDgwerfv7+KFy+unTt3KioqygpRVapU0Zo1a9StWze5u7urdOnSki7fd/fiiy+qUaNG1oNBWrRooVmzZmn48OHWNkJCQqztT5o0SZcuXdJzzz2nli1bZrt08VruuecerVq1Sq1atVKhQoVu+N69PXv2ZGurU6eO7r77bn399dfq1KmTbDabXn31VWVmZmbr279/f9WuXVvS5UB4pddee03PP/+8vL291bZtW6Wmpuq3337TmTNnFBERcc2ahg4dqnbt2umee+7RmTNntGrVKmsbAICCjcsyAQAFyuuvv54tyNSrV08xMTH6/fff1bx5c917770aM2aMKlSoYLfcwYMHVb16dbt74lq2bKmMjAzr3jrpcuC7us1ms+mbb75RiRIl1KJFC4WEhKhatWqaO3dunuqvWbOmVq5cqa+++kovvvjidft269ZN9957r92UkJCgDz74QCVKlNB9992nTp06KTQ0VA0aNMi2/N1336377rtPtWrVUlBQkN28/v3767PPPtPMmTMVEBCgli1bKjIyUlWrVr1uTRkZGQoPD1ft2rXVtm1b3XPPPXavUAAAFFw2c6s3BgAAAIcwxujuu+/Wc889d92zcQCAfwYuywQA4G/oxIkTmjNnjuLj46132wEA/tkIdwAA/A2VLVtWpUuX1ieffKISJUo4uhwAQAFAuAMA4G+IuyoAAFfjgSoAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBAh3AAAAAOAECHcAAAAA4AQIdwAAAADgBP4/da6Cud3tvjMAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3cAAAHWCAYAAADU7HB0AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABwo0lEQVR4nO3de3zO9f/H8ee1sTnM5jRGZkTFMMt5yaEsK1OEosQckoQwOax8naqv6ISETozvlxRKRWjmVCwKy1kOcyiGsM1xY3v//vDb5+uyYRfjWleP++32ueV6f96f9+f1+VzX+2qv6/P5vN82Y4wRAAAAAOBvzc3ZAQAAAAAAbh3JHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdgNuqadOmatq0qbPDyFX79++XzWZTdHS0s0PJFbt371bz5s3l4+Mjm82mBQsW3FJ7NptNI0eOzJXYrrZy5UrZbDbNmzfvtrSf227nubiWo0ePql27dipRooRsNpvGjx9/R/d/J3Tp0kVeXl7ODiPPs9ls6tOnz01tW6FCBXXp0iV3AwJw25HcAf9Q0dHRstls11x+/vnnHLe1fft2jRw5Uvv37799Ad+EyZMnu0wCdjtFRERoy5YtevPNN/Wf//xHderUcXZIuAUDBgzQ0qVLFRUVpf/85z969NFHnR2Syzt37pxGjhyplStXOjsUAP9w+ZwdAADnGj16tCpWrJilvHLlyjluY/v27Ro1apSaNm2qChUq2K374YcfbjXEmzZ58mSVLFmSX5+v4/z584qLi9Nrr71207/wI29Zvny5WrVqpVdeecXZofxjnDt3TqNGjZIkl7tTAcDfC8kd8A/32GOP3dYrNR4eHretbdy648ePS5KKFi3q3ECQa44dO5ar7+eFCxfk4eEhNzdu9vk7MMbowoULKliwoLNDuW0uXbqkjIwM/v8CZINvagA3NGfOHNWuXVtFihSRt7e3atSooQkTJki6fHvnU089JUl66KGHrNs6M29PuvqZu8xnpr788kuNGjVKd911l4oUKaJ27dopOTlZqamp6t+/v0qVKiUvLy917dpVqampdvFMnz5dDz/8sEqVKiVPT08FBgZqypQpdnUqVKigbdu2adWqVVZMV8aRlJSk/v37y9/fX56enqpcubLGjh2rjIwMu3aSkpLUpUsX+fj4qGjRooqIiFBSUlKOzlvmra9r1qxRZGSkfH19VbhwYT355JNWUnWlyZMnq1q1avL09FTZsmXVu3fvHO8rO5s2bdJjjz0mb29veXl5qVmzZna3244cOVIBAQGSpEGDBslms2W58nq1CxcuaOTIkbr33ntVoEABlSlTRm3atNHevXtvKZZMSUlJGjBggCpUqCBPT0+VK1dOnTt31l9//XXNtlNTU9WyZUv5+Pho7dq1Nx2/MUYVKlRQq1atst3Ox8dHPXv2vOVz8eeff6pbt24qXbq0PD09Va1aNU2bNi1LvQ8++EDVqlVToUKFVKxYMdWpU0ezZ8++ZruZnzdjjD788EPrc59p3759euqpp1S8eHEVKlRIDRo00KJFi+zayOyfc+bM0bBhw3TXXXepUKFCSklJueZ+MzIyNH78eFWrVk0FChRQ6dKl1bNnT506dcqu3jfffKPw8HCVLVtWnp6eqlSpkl5//XWlp6dnaXPdunVq0aKFihUrpsKFCysoKMj6zrn6XLZu3VpeXl7y9fXVK6+8km17V6tQoYJatmypn376SfXq1VOBAgV09913a+bMmVnq3ui7Yv/+/fL19ZUkjRo1yjrvI0eO1LfffiubzabNmzdb7c2fP182m01t2rSx20/VqlXVvn176/WlS5f0+uuvq1KlSvL09FSFChX06quvZvk+zDyWpUuXqk6dOipYsKA++uijax77G2+8ITc3N33wwQc3PE9XOnnypF555RXVqFFDXl5e8vb21mOPPabffvvNqnPmzBkVLlxY/fr1y7L9H3/8IXd3d40ZM8Yqy8n3cOYzzu+8847Gjx9vnY/t27c7FD/wT8GVO+AfLjk5OcsfzjabTSVKlJAkxcTE6JlnnlGzZs00duxYSdKOHTu0Zs0a9evXT40bN9bLL7+siRMn6tVXX1XVqlUlyfrvtYwZM0YFCxbU0KFDtWfPHn3wwQfKnz+/3NzcdOrUKY0cOVI///yzoqOjVbFiRQ0fPtzadsqUKapWrZqeeOIJ5cuXT999951eeuklZWRkqHfv3pKk8ePHq2/fvvLy8tJrr70mSSpdurSky7dQNWnSRH/++ad69uyp8uXLa+3atYqKitKRI0esASiMMWrVqpV++uknvfjii6pataq+/vprRUREOHSO+/btq2LFimnEiBHav3+/xo8frz59+uiLL76w6owcOVKjRo1SaGioevXqpV27dmnKlCn65ZdftGbNGuXPn9+hfW7btk2NGjWSt7e3Bg8erPz58+ujjz5S06ZNtWrVKtWvX19t2rRR0aJFNWDAAD3zzDNq0aLFdQepSE9PV8uWLRUbG6sOHTqoX79+On36tGJiYrR161ZVqlTppmORLv9h2KhRI+3YsUPdunVTrVq19Ndff+nbb7/VH3/8oZIlS2Zp+/z582rVqpV+/fVXLVu2THXr1r2l+J977jmNGzdOJ0+eVPHixa1tv/vuO6WkpOi55567pXNx9OhRNWjQwBrowtfXV4sXL1b37t2VkpKi/v37S5I++eQTvfzyy2rXrp369eunCxcuaPPmzVq3bp2effbZbNtu3Lix/vOf/6hTp0565JFH1LlzZ7v9PvDAAzp37pxefvlllShRQjNmzNATTzyhefPm6cknn7Rr6/XXX5eHh4deeeUVpaamXvcKSc+ePRUdHa2uXbvq5ZdfVkJCgiZNmqRNmzbZfXajo6Pl5eWlyMhIeXl5afny5Ro+fLhSUlL09ttvW+3FxMSoZcuWKlOmjPr16yc/Pz/t2LFDCxcutEsa0tPTFRYWpvr16+udd97RsmXL9O6776pSpUrq1avXNePNtGfPHrVr107du3dXRESEpk2bpi5duqh27dqqVq2apJx9V/j6+mrKlCnq1auXnnzySStpCwoKUrly5WSz2bR69WoFBQVJkn788Ue5ubnpp59+smI5fvy4du7caXdr9PPPP68ZM2aoXbt2GjhwoNatW6cxY8Zox44d+vrrr+2OZdeuXXrmmWfUs2dP9ejRQ/fdd1+2xzxs2DD9+9//1kcffaQePXrc8Bxdad++fVqwYIGeeuopVaxYUUePHtVHH32kJk2aaPv27Spbtqy8vLz05JNP6osvvtB7770nd3d3a/vPP/9cxhh17Ngxx+f2StOnT9eFCxf0wgsvyNPT065/AriCAfCPNH36dCMp28XT09Oq169fP+Pt7W0uXbp0zbbmzp1rJJkVK1ZkWdekSRPTpEkT6/WKFSuMJFO9enWTlpZmlT/zzDPGZrOZxx57zG77kJAQExAQYFd27ty5LPsJCwszd999t11ZtWrV7Pad6fXXXzeFCxc2v//+u1350KFDjbu7uzl48KAxxpgFCxYYSWbcuHFWnUuXLplGjRoZSWb69OlZ2r5S5jkODQ01GRkZVvmAAQOMu7u7SUpKMsYYc+zYMePh4WGaN29u0tPTrXqTJk0yksy0adOuu5/stG7d2nh4eJi9e/daZYcPHzZFihQxjRs3tsoSEhKMJPP222/fsM1p06YZSea9997Lsu7K45NkRowY4XAsw4cPN5LMV199dc32Mz8/c+fONadPnzZNmjQxJUuWNJs2bcqV+Hft2mUkmSlTptitf+KJJ0yFChWsejd7Lrp3727KlClj/vrrL7ttOnToYHx8fKzPdqtWrUy1atVueEzZkWR69+5tV9a/f38jyfz4449W2enTp03FihVNhQoVrM9d5vm9++67s+1nV/vxxx+NJDNr1iy78iVLlmQpz669nj17mkKFCpkLFy4YYy73r4oVK5qAgABz6tQpu7pXnteIiAgjyYwePdquzv33329q1659w7gDAgKMJLN69Wqr7NixY8bT09MMHDjQKsvpd8Xx48ezvNeZqlWrZp5++mnrda1atcxTTz1lJJkdO3YYY4z56quvjCTz22+/GWOMiY+PN5LM888/b9fWK6+8YiSZ5cuXZzmWJUuWZNn3lZ+FgQMHGjc3NxMdHX3D85PZbkREhPX6woULdt9Pxlz+/vD09LR7H5YuXWokmcWLF9vVDQoKsvs+zum5zfyO8vb2NseOHctR7MA/GbdlAv9wH374oWJiYuyWxYsXW+uLFi2qs2fPKiYmJlf327lzZ7urUfXr15cxRt26dbOrV79+fR06dEiXLl2yyq58liTzymOTJk20b98+JScn33Dfc+fOVaNGjVSsWDH99ddf1hIaGqr09HStXr1akvT9998rX758dlcB3N3d1bdvX4eO9YUXXrC7Pa5Ro0ZKT0/XgQMHJEnLli1TWlqa+vfvb/dcU48ePeTt7Z3l1rkbSU9P1w8//KDWrVvr7rvvtsrLlCmjZ599Vj/99NN1b7O7lvnz56tkyZLZHv+Vx3ezscyfP181a9bMchUpu/aTk5PVvHlz7dy5UytXrlRwcHCuxH/vvfeqfv36mjVrlrXu5MmTWrx4sTp27GjVu5lzYYzR/Pnz9fjjj8sYY/fZCwsLU3JysjZu3Cjpcr/7448/9Msvv9zwuHLi+++/V7169fTggw9aZV5eXnrhhRe0f//+LLe4RURE5OiZrblz58rHx0ePPPKI3fHUrl1bXl5eWrFihVX3yvZOnz6tv/76S40aNdK5c+e0c+dOSZdv301ISFD//v2zPDeY3Xl98cUX7V43atRI+/btu2HckhQYGKhGjRpZr319fXXffffZbZ/T74rradSokX788UfruH/77Te98MILKlmypFX+448/qmjRoqpevbqky++XJEVGRtq1NXDgQEnK8p1QsWJFhYWFZbt/Y4z69OmjCRMm6L///a/Ddx5k8vT0tL6f0tPTdeLECXl5eem+++6zPreSFBoaqrJly9r1oa1bt2rz5s3WlW/J8XPbtm1b6/ZXANfGbZnAP1y9evWuO6DKSy+9pC+//FKPPfaY7rrrLjVv3lxPP/30LQ+vXr58ebvXPj4+kiR/f/8s5RkZGUpOTrZuFV2zZo1GjBihuLg4nTt3zq5+cnKy1da17N69W5s3b77mHwrHjh2TJB04cEBlypTJcqvitW55uparj7VYsWKSZD2TlJnkXd2uh4eH7r77bmt9Th0/flznzp3LNs6qVasqIyNDhw4dsm49y6m9e/fqvvvuU758Of9fhyOx7N27V23bts1Ru/3799eFCxe0adOmHB9HTuPv3Lmz+vTpowMHDiggIEBz587VxYsX1alTJ4fbutLx48eVlJSkjz/+WB9//HG2dTI/e0OGDNGyZctUr149Va5cWc2bN9ezzz6rhg0b5nh/Vzpw4IB1++uVMm+fPnDggJVYSMp2BN3s7N69W8nJySpVqlS26zOPR7p8e+6wYcO0fPnyLD8uZP4ok/m84pWxXEuBAgWy9OFixYpledbvWq7ul9ltn9Pviutp1KiRpk6dqj179mjv3r2y2WwKCQmxkr4ePXroxx9/VMOGDa3k6cCBA3Jzc8syarGfn5+KFi2a5Tvheu/XzJkzdebMGU2ZMkXPPPPMDeO9loyMDE2YMEGTJ09WQkKC3bONmd/NkuTm5qaOHTtqypQpOnfunAoVKqRZs2apQIEC1vPZkuPnNqefSeCfjuQOwHWVKlVK8fHxWrp0qRYvXqzFixdr+vTp6ty5s2bMmHHT7V75LEZOyo0xki7/8desWTNVqVJF7733nvz9/eXh4aHvv/9e77//fpYBUbKTkZGhRx55RIMHD852/b333pvDo8iZGx0THNeqVSvNmTNHb731lmbOnJmrIzl26NBBAwYM0KxZs/Tqq6/qv//9r+rUqeNwUn+1zM/mc889d82rJ5nPZVWtWlW7du3SwoULtWTJEs2fP1+TJ0/W8OHDrSH3b6ecjrSYkZGhUqVK2V2luVLmH+5JSUlq0qSJvL29NXr0aFWqVEkFChTQxo0bNWTIkBz126tdq1/d6vZX9svc+K7IvFq6evVq7du3T7Vq1VLhwoXVqFEjTZw4UWfOnNGmTZv05ptvZtn2WleBr3a996thw4aKj4/XpEmT9PTTT9/0s2r//ve/9a9//UvdunXT66+/ruLFi8vNzU39+/fP8v517txZb7/9thYsWKBnnnlGs2fPtgY9yuTouXXl0T+B3ERyB+CGPDw89Pjjj+vxxx9XRkaGXnrpJX300Uf617/+pcqVK+f4D5Dc8N133yk1NVXffvut3S/vV97+lelacVWqVElnzpxRaGjodfcVEBCg2NhYnTlzxu7q3a5du24y+mvvJ7PdK29dTEtLU0JCwg3jvJqvr68KFSqUbZw7d+6Um5tbliukOVGpUiWtW7dOFy9ezPEAL47EUqlSJW3dujVH7bZu3VrNmzdXly5dVKRIkSyjpd5K/MWLF1d4eLhmzZqljh07as2aNVkGd7jZc1GkSBGlp6fn6D0tXLiw2rdvr/bt2ystLU1t2rTRm2++qaioKBUoUCBH+8wUEBBwzfcgc/3NqFSpkpYtW6aGDRte94/vlStX6sSJE/rqq6/UuHFjqzwhISFLe9Ll2/gc/dzfDjn9rrjed2D58uVVvnx5/fjjj9q3b591K2jjxo0VGRmpuXPnKj093e68BAQEKCMjQ7t377YbnOro0aNKSkpy6P2qXLmyxo0bp6ZNm+rRRx9VbGysihQpkuPtM82bN08PPfSQPvvsM7vypKSkLIMdVa9eXffff79mzZqlcuXK6eDBg1lG58zpuQXgGJ65A3BdJ06csHvt5uZmXV3IHJK7cOHCknRLw/bnVOav7Vf+up6cnKzp06dnqVu4cOFsY3r66acVFxenpUuXZlmXlJRkPd/XokULXbp0yS5xSE9Pd3gI8RsJDQ2Vh4eHJk6caHdcn332mZKTkxUeHm6VHTx40PqD/Frc3d3VvHlzffPNN9q/f79VfvToUc2ePVsPPvigvL29HY6zbdu2+uuvvzRp0qQs6651FdKRWNq2bavffvsty0iA12q/c+fOmjhxoqZOnaohQ4bkavydOnXS9u3bNWjQILm7u6tDhw433VYmd3d3tW3bVvPnz882ib1yeoyr+52Hh4cCAwNljNHFixevfZDX0KJFC61fv15xcXFW2dmzZ/Xxxx+rQoUKCgwMdLhN6XJfSk9P1+uvv55l3aVLl6z+l12/TUtL0+TJk+22qVWrlipWrKjx48dn6bvOuNKd0++KQoUKWWXZadSokZYvX67169dbyV1wcLCKFCmit956SwULFlTt2rWt+i1atJCkLD8qvPfee5Jk952QE0FBQfr++++1Y8cOPf744zp//rxD20uX38Or34O5c+fqzz//zLZ+p06d9MMPP2j8+PEqUaKEHnvsMbv1OT23ABzDlTvgH27x4sXZJgsPPPCA7r77bj3//PM6efKkHn74YZUrV04HDhzQBx98oODgYOsX5eDgYLm7u2vs2LFKTk6Wp6enNQ9dbmvevLl1JbFnz546c+aMPvnkE5UqVUpHjhyxq1u7dm1NmTJFb7zxhipXrqxSpUrp4Ycf1qBBg/Ttt9+qZcuW1tDnZ8+e1ZYtWzRv3jzt379fJUuW1OOPP66GDRtq6NCh2r9/vwIDA/XVV1/laNAWR/j6+ioqKkqjRo3So48+qieeeEK7du3S5MmTVbduXbtBCDp37qxVq1bd8A/dN954QzExMXrwwQf10ksvKV++fProo4+UmpqqcePG3VScnTt31syZMxUZGWn9kXr27FktW7ZML730UrbzwzkSy6BBgzRv3jw99dRT6tatm2rXrq2TJ0/q22+/1dSpU1WzZs0sbffp00cpKSl67bXX5OPjo1dffTVX4g8PD1eJEiU0d+5cPfbYY1k+yzd7Lt566y2tWLFC9evXV48ePRQYGKiTJ09q48aNWrZsmU6ePCnp8ufcz89PDRs2VOnSpbVjxw5NmjRJ4eHhN3XVZejQofr888/12GOP6eWXX1bx4sU1Y8YMJSQkaP78+Td9W2uTJk3Us2dPjRkzRvHx8WrevLny58+v3bt3a+7cuZowYYLatWunBx54QMWKFVNERIRefvll2Ww2/ec//8nyOXZzc9OUKVP0+OOPKzg4WF27dlWZMmW0c+dObdu2LdtE4HbK6XdFwYIFFRgYqC+++EL33nuvihcvrurVq1vPDjZq1EizZs2SzWazbtN0d3fXAw88oKVLl6pp06Z2003UrFlTERER+vjjj61bWtevX68ZM2aodevWeuihhxw+lgYNGuibb75RixYt1K5dOy1YsMChKVZatmyp0aNHq2vXrnrggQe0ZcsWzZo1y+5ugys9++yzGjx4sL7++mv16tUry75yem4BOOgOj84JII+43lQIumKY/3nz5pnmzZubUqVKGQ8PD1O+fHnTs2dPc+TIEbv2PvnkE3P33Xcbd3d3u2kRrjUVwty5c7ON55dffrErHzFihJFkjh8/bpV9++23JigoyBQoUMBUqFDBjB071hqaPiEhwaqXmJhowsPDTZEiRYwkuzhOnz5toqKiTOXKlY2Hh4cpWbKkeeCBB8w777xjN0XDiRMnTKdOnYy3t7fx8fExnTp1Mps2bXJoKoSrjynzHFw9dcSkSZNMlSpVTP78+U3p0qVNr169sgwH36RJE5PTr+6NGzeasLAw4+XlZQoVKmQeeughs3btWrs6jkyFYMzl4exfe+01U7FiRZM/f37j5+dn2rVrZzfNgbIZEj4nsRhz+Xz36dPH3HXXXcbDw8OUK1fOREREWFMHXOvzM3jwYCPJTJo06Zbjz/TSSy8ZSWb27Nm5ei6OHj1qevfubfz9/a3tmjVrZj7++GOrzkcffWQaN25sSpQoYTw9PU2lSpXMoEGDTHJy8nWPL3OfV0+FYIwxe/fuNe3atTNFixY1BQoUMPXq1TMLFy60q3Ot83sjH3/8saldu7YpWLCgKVKkiKlRo4YZPHiwOXz4sFVnzZo1pkGDBqZgwYKmbNmyZvDgwdaw+Vf3hZ9++sk88sgjpkiRIqZw4cImKCjIfPDBB9b6iIgIU7hw4SxxZH5f3EhAQIAJDw/PUn7195UxOf+uWLt2raldu7bx8PDI8r5v27bNSDJVq1a1a/uNN94wksy//vWvLLFcvHjRjBo1yvp8+fv7m6ioKGvaiBsdizHZfxa++eYbky9fPtO+ffssUxtc3e7VUyEMHDjQlClTxhQsWNA0bNjQxMXFZXvOMrVo0cJIyravG5Ozc+vodxTwT2czhif6AQC42oABA/TZZ58pMTHRuu0OQM49+eST2rJli/bs2ePsUIB/DJ65AwDgKhcuXNB///tftW3blsQOuAlHjhzRokWL7KYQAXD78cwdAAD/79ixY1q2bJnmzZunEydOqF+/fs4OCfhbSUhI0Jo1a/Tpp58qf/786tmzp7NDAv5RSO4AAPh/27dvV8eOHVWqVClNnDhRwcHBzg4J+FtZtWqVunbtqvLly2vGjBny8/NzdkjAPwrP3AEAAACAC+CZOwAAAABwASR3AAAAAOAC8swzd2+99ZaioqLUr18/jR8/XtLl0coGDhyoOXPmKDU1VWFhYZo8ebJKly5tbXfw4EH16tVLK1askJeXlyIiIjRmzBjly/e/Q1u5cqUiIyO1bds2+fv7a9iwYerSpYvd/j/88EO9/fbbSkxMVM2aNfXBBx+oXr16OY4/IyNDhw8fVpEiRWSz2W7pXAAAAAD4+zLG6PTp0ypbtqzc3O7g9TSnzrL3/9avX28qVKhggoKCTL9+/azyF1980fj7+5vY2Fjz66+/mgYNGpgHHnjAWn/p0iVTvXp1ExoaajZt2mS+//57U7JkSRMVFWXV2bdvnylUqJCJjIw027dvNx988IFxd3c3S5YsserMmTPHeHh4mGnTpplt27aZHj16mKJFi5qjR4/m+BgOHTp03QmhWVhYWFhYWFhYWFj+WcuhQ4duLVFykNMHVDlz5oxq1aqlyZMn64033lBwcLDGjx+v5ORk+fr6avbs2WrXrp0kaefOnapatari4uLUoEEDLV68WC1bttThw4etq3lTp07VkCFDdPz4cXl4eGjIkCFatGiRtm7dau2zQ4cOSkpK0pIlSyRJ9evXV926dTVp0iRJl6/C+fv7q2/fvho6dGiOjiM5OVlFixbVoUOH5O3tnZunCAAAAMDfSEpKivz9/ZWUlCQfH587tl+n35bZu3dvhYeHKzQ0VG+88YZVvmHDBl28eFGhoaFWWZUqVVS+fHkruYuLi1ONGjXsbtMMCwtTr169tG3bNt1///2Ki4uzayOzTv/+/SVJaWlp2rBhg6Kioqz1bm5uCg0NVVxc3DXjTk1NVWpqqvX69OnTkiRvb2+SOwAAAAB3/HEtpyZ3c+bM0caNG/XLL79kWZeYmCgPDw8VLVrUrrx06dJKTEy06lyZ2GWuz1x3vTopKSk6f/68Tp06pfT09Gzr7Ny585qxjxkzRqNGjcrZgQIAAADAbea00TIPHTqkfv36adasWSpQoICzwrhpUVFRSk5OtpZDhw45OyQAAAAA/2BOS+42bNigY8eOqVatWsqXL5/y5cunVatWaeLEicqXL59Kly6ttLQ0JSUl2W139OhR+fn5SZL8/Px09OjRLOsz112vjre3twoWLKiSJUvK3d092zqZbWTH09PTugWTWzEBAAAAOJvTkrtmzZppy5Ytio+Pt5Y6deqoY8eO1r/z58+v2NhYa5tdu3bp4MGDCgkJkSSFhIRoy5YtOnbsmFUnJiZG3t7eCgwMtOpc2UZmncw2PDw8VLt2bbs6GRkZio2NteoAAAAAQF7ntGfuihQpourVq9uVFS5cWCVKlLDKu3fvrsjISBUvXlze3t7q27evQkJC1KBBA0lS8+bNFRgYqE6dOmncuHFKTEzUsGHD1Lt3b3l6ekqSXnzxRU2aNEmDBw9Wt27dtHz5cn355ZdatGiRtd/IyEhFRESoTp06qlevnsaPH6+zZ8+qa9eud+hsAAAAAMCtcfpomdfz/vvvy83NTW3btrWbxDyTu7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq5w+z52rSElJkY+Pj5KTk3n+DgAAAPgHc1Zu4LRn7gAAAAAAuYfkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF5DP2QHg9rDZnB2Bcxnj7AgAAACAO4srdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF+DU5G7KlCkKCgqSt7e3vL29FRISosWLF1vrmzZtKpvNZre8+OKLdm0cPHhQ4eHhKlSokEqVKqVBgwbp0qVLdnVWrlypWrVqydPTU5UrV1Z0dHSWWD788ENVqFBBBQoUUP369bV+/frbcswAAAAAcDs4NbkrV66c3nrrLW3YsEG//vqrHn74YbVq1Urbtm2z6vTo0UNHjhyxlnHjxlnr0tPTFR4errS0NK1du1YzZsxQdHS0hg8fbtVJSEhQeHi4HnroIcXHx6t///56/vnntXTpUqvOF198ocjISI0YMUIbN25UzZo1FRYWpmPHjt2ZEwEAAAAAt8hmjDHODuJKxYsX19tvv63u3buradOmCg4O1vjx47Otu3jxYrVs2VKHDx9W6dKlJUlTp07VkCFDdPz4cXl4eGjIkCFatGiRtm7dam3XoUMHJSUlacmSJZKk+vXrq27dupo0aZIkKSMjQ/7+/urbt6+GDh2a7b5TU1OVmppqvU5JSZG/v7+Sk5Pl7e2dG6filthszo7AufLWpxoAAAD/JCkpKfLx8bnjuUGeeeYuPT1dc+bM0dmzZxUSEmKVz5o1SyVLllT16tUVFRWlc+fOWevi4uJUo0YNK7GTpLCwMKWkpFhX/+Li4hQaGmq3r7CwMMXFxUmS0tLStGHDBrs6bm5uCg0NtepkZ8yYMfLx8bEWf3//WzsBAAAAAHAL8jk7gC1btigkJEQXLlyQl5eXvv76awUGBkqSnn32WQUEBKhs2bLavHmzhgwZol27dumrr76SJCUmJtoldpKs14mJidetk5KSovPnz+vUqVNKT0/Pts7OnTuvGXdUVJQiIyOt15lX7gAAAADAGZye3N13332Kj49XcnKy5s2bp4iICK1atUqBgYF64YUXrHo1atRQmTJl1KxZM+3du1eVKlVyYtSSp6enPD09nRoDAAAAAGRy+m2ZHh4eqly5smrXrq0xY8aoZs2amjBhQrZ169evL0nas2ePJMnPz09Hjx61q5P52s/P77p1vL29VbBgQZUsWVLu7u7Z1slsAwAAAADyOqcnd1fLyMiwG6jkSvHx8ZKkMmXKSJJCQkK0ZcsWu1EtY2Ji5O3tbd3aGRISotjYWLt2YmJirOf6PDw8VLt2bbs6GRkZio2NtXv2DwAAAADyMqfelhkVFaXHHntM5cuX1+nTpzV79mytXLlSS5cu1d69ezV79my1aNFCJUqU0ObNmzVgwAA1btxYQUFBkqTmzZsrMDBQnTp10rhx45SYmKhhw4apd+/e1i2TL774oiZNmqTBgwerW7duWr58ub788kstWrTIiiMyMlIRERGqU6eO6tWrp/Hjx+vs2bPq2rWrU84LAAAAADjKqcndsWPH1LlzZx05ckQ+Pj4KCgrS0qVL9cgjj+jQoUNatmyZlWj5+/urbdu2GjZsmLW9u7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq/LcPHd/V86ay+JamOfO2REAAADgn+ofP88dAAAAAODmkdwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC7AqcndlClTFBQUJG9vb3l7eyskJESLFy+21l+4cEG9e/dWiRIl5OXlpbZt2+ro0aN2bRw8eFDh4eEqVKiQSpUqpUGDBunSpUt2dVauXKlatWrJ09NTlStXVnR0dJZYPvzwQ1WoUEEFChRQ/fr1tX79+ttyzAAAAABwOzg1uStXrpzeeustbdiwQb/++qsefvhhtWrVStu2bZMkDRgwQN99953mzp2rVatW6fDhw2rTpo21fXp6usLDw5WWlqa1a9dqxowZio6O1vDhw606CQkJCg8P10MPPaT4+Hj1799fzz//vJYuXWrV+eKLLxQZGakRI0Zo48aNqlmzpsLCwnTs2LE7dzIAAAAA4BbYjDHG2UFcqXjx4nr77bfVrl07+fr6avbs2WrXrp0kaefOnapatari4uLUoEEDLV68WC1bttThw4dVunRpSdLUqVM1ZMgQHT9+XB4eHhoyZIgWLVqkrVu3Wvvo0KGDkpKStGTJEklS/fr1VbduXU2aNEmSlJGRIX9/f/Xt21dDhw7NUdwpKSny8fFRcnKyvL29c/OU3BSbzdkROFfe+lQDAADgn8RZuUGeeeYuPT1dc+bM0dmzZxUSEqINGzbo4sWLCg0NtepUqVJF5cuXV1xcnCQpLi5ONWrUsBI7SQoLC1NKSop19S8uLs6ujcw6mW2kpaVpw4YNdnXc3NwUGhpq1clOamqqUlJS7BYAAAAAcBanJ3dbtmyRl5eXPD099eKLL+rrr79WYGCgEhMT5eHhoaJFi9rVL126tBITEyVJiYmJdold5vrMdderk5KSovPnz+uvv/5Senp6tnUy28jOmDFj5OPjYy3+/v43dfwAAAAAkBucntzdd999io+P17p169SrVy9FRERo+/btzg7rhqKiopScnGwthw4dcnZIAAAAAP7B8jk7AA8PD1WuXFmSVLt2bf3yyy+aMGGC2rdvr7S0NCUlJdldvTt69Kj8/PwkSX5+fllGtcwcTfPKOlePsHn06FF5e3urYMGCcnd3l7u7e7Z1MtvIjqenpzw9PW/uoAEAAAAglzn9yt3VMjIylJqaqtq1ayt//vyKjY211u3atUsHDx5USEiIJCkkJERbtmyxG9UyJiZG3t7eCgwMtOpc2UZmncw2PDw8VLt2bbs6GRkZio2NteoAAAAAQF7n1Ct3UVFReuyxx1S+fHmdPn1as2fP1sqVK7V06VL5+Pioe/fuioyMVPHixeXt7a2+ffsqJCREDRo0kCQ1b95cgYGB6tSpk8aNG6fExEQNGzZMvXv3tq6qvfjii5o0aZIGDx6sbt26afny5fryyy+1aNEiK47IyEhFRESoTp06qlevnsaPH6+zZ8+qa9euTjkvAAAAAOAopyZ3x44dU+fOnXXkyBH5+PgoKChIS5cu1SOPPCJJev/99+Xm5qa2bdsqNTVVYWFhmjx5srW9u7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq/LcPHd/V8xzl7fwqQYAAICz/OPnuQMAAAAA3DySOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXIDDyd358+d17tw56/WBAwc0fvx4/fDDD7kaGAAAAAAg5xxO7lq1aqWZM2dKkpKSklS/fn29++67atWqlaZMmZLrAQIAAAAAbszh5G7jxo1q1KiRJGnevHkqXbq0Dhw4oJkzZ2rixIm5HiAAAAAA4MYcTu7OnTunIkWKSJJ++OEHtWnTRm5ubmrQoIEOHDiQ6wECAAAAAG7M4eSucuXKWrBggQ4dOqSlS5eqefPmkqRjx47J29s71wMEAAAAANyYw8nd8OHD9corr6hChQqqV6+eQkJCJF2+inf//ffneoAAAAAAgBuzGWOMoxslJibqyJEjqlmzptzcLueH69evl7e3t6pUqZLrQf4dpKSkyMfHR8nJyXniCqbN5uwInMvxTzUAAACQO5yVG9zUPHd+fn4qUqSIYmJidP78eUlS3bp1/7GJHQAAAAA4m8PJ3YkTJ9SsWTPde++9atGihY4cOSJJ6t69uwYOHJjrAQIAAAAAbszh5G7AgAHKnz+/Dh48qEKFClnl7du315IlS3I1OAAAAABAzuRzdIMffvhBS5cuVbly5ezK77nnHqZCAAAAAAAncfjK3dmzZ+2u2GU6efKkPD09cyUoAAAAAIBjHE7uGjVqpJkzZ1qvbTabMjIyNG7cOD300EO5GhwAAAAAIGccvi1z3LhxatasmX799VelpaVp8ODB2rZtm06ePKk1a9bcjhgBAAAAADfg8JW76tWr6/fff9eDDz6oVq1a6ezZs2rTpo02bdqkSpUq3Y4YAQAAAAA3cFOTmCMrJjHPW/hUAwAAwFmclRvk6LbMzZs357jBoKCgmw4GAAAAAHBzcpTcBQcHy2az6UYX+Ww2m9LT03MlMAAAAABAzuUouUtISLjdcQAAAAAAbkGOkruAgIDbHQcAAAAA4BY4PFrmmDFjNG3atCzl06ZN09ixY3MlKAAAAACAYxxO7j766CNVqVIlS3m1atU0derUXAkKAAAAAOAYh5O7xMRElSlTJku5r6+vjhw5kitBAQAAAAAc43By5+/vrzVr1mQpX7NmjcqWLZsrQQEAAAAAHJOjAVWu1KNHD/Xv318XL17Uww8/LEmKjY3V4MGDNXDgwFwPEAAAAABwYw4nd4MGDdKJEyf00ksvKS0tTZJUoEABDRkyRFFRUbkeIAAAAADgxmzmRjOTX8OZM2e0Y8cOFSxYUPfcc488PT1zO7a/lZSUFPn4+Cg5OVne3t7ODkc2m7MjcK6b+1QDAAAAt85ZuYHDz9xNnz5d58+fl5eXl+rWravq1av/4xM7AAAAAHA2h5O7oUOHqnTp0urevbvWrl17O2ICAAAAADjI4eTuzz//1IwZM/TXX3+padOmqlKlisaOHavExESHdz5mzBjVrVtXRYoUUalSpdS6dWvt2rXLrk7Tpk1ls9nslhdffNGuzsGDBxUeHq5ChQqpVKlSGjRokC5dumRXZ+XKlapVq5Y8PT1VuXJlRUdHZ4nnww8/VIUKFVSgQAHVr19f69evd/iYAAAAAMAZHE7u8uXLpyeffFLffPONDh06pB49emjWrFkqX768nnjiCX3zzTfKyMjIUVurVq1S79699fPPPysmJkYXL15U8+bNdfbsWbt6PXr00JEjR6xl3Lhx1rr09HSFh4crLS1Na9eu1YwZMxQdHa3hw4dbdRISEhQeHq6HHnpI8fHx6t+/v55//nktXbrUqvPFF18oMjJSI0aM0MaNG1WzZk2FhYXp2LFjjp4iAAAAALjjbnpAlUzr1q3TtGnTNGPGDJUpU0anTp1SsWLFNH36dDVt2tShto4fP65SpUpp1apVaty4saTLV+6Cg4M1fvz4bLdZvHixWrZsqcOHD6t06dKSpKlTp2rIkCE6fvy4PDw8NGTIEC1atEhbt261tuvQoYOSkpK0ZMkSSVL9+vVVt25dTZo0SZKUkZEhf39/9e3bV0OHDr1h7AyokrcwoAoAAACc5W8zoIokHT16VO+8846qVaumpk2bKiUlRQsXLlRCQoL+/PNPPf3004qIiHC43eTkZElS8eLF7cpnzZqlkiVLqnr16oqKitK5c+esdXFxcapRo4aV2ElSWFiYUlJStG3bNqtOaGioXZthYWGKi4uTJKWlpWnDhg12ddzc3BQaGmrVuVpqaqpSUlLsFgAAAABwFofnuXv88ce1dOlS3XvvverRo4c6d+5sl4wVLlxYAwcO1Ntvv+1QuxkZGerfv78aNmyo6tWrW+XPPvusAgICVLZsWW3evFlDhgzRrl279NVXX0mSEhMT7RI7SdbrzOcAr1UnJSVF58+f16lTp5Senp5tnZ07d2Yb75gxYzRq1CiHjhEAAAAAbheHk7vM2yZDQkKuWcfX11cJCQkOtdu7d29t3bpVP/30k135Cy+8YP27Ro0aKlOmjJo1a6a9e/eqUqVKjgWfi6KiohQZGWm9TklJkb+/v9PiAQAAAPDP5nBy99lnn92wjs1mU0BAQI7b7NOnjxYuXKjVq1erXLly161bv359SdKePXtUqVIl+fn5ZRnV8ujRo5IkPz8/67+ZZVfW8fb2VsGCBeXu7i53d/ds62S2cTVPT0/m9wMAAACQZzj8zN3LL7+siRMnZimfNGmS+vfv71Bbxhj16dNHX3/9tZYvX66KFSvecJv4+HhJUpkyZSRJISEh2rJli92oljExMfL29lZgYKBVJzY21q6dmJgY6+qjh4eHateubVcnIyNDsbGx171CCQAAAAB5hcPJ3fz589WwYcMs5Q888IDmzZvnUFu9e/fWf//7X82ePVtFihRRYmKiEhMTdf78eUnS3r179frrr2vDhg3av3+/vv32W3Xu3FmNGzdWUFCQJKl58+YKDAxUp06d9Ntvv2np0qUaNmyYevfubV1Ze/HFF7Vv3z4NHjxYO3fu1OTJk/Xll19qwIABViyRkZH65JNPNGPGDO3YsUO9evXS2bNn1bVrV0dPEQAAAADccQ7flnnixAn5+PhkKff29tZff/3lUFtTpkyRpCxTJkyfPl1dunSRh4eHli1bpvHjx+vs2bPy9/dX27ZtNWzYMKuuu7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIixye56569ep68cUX1adPH7vyDz74QFOmTNH27dtzNcC/C+a5y1uY5w4AAADO4qzcwOErd5GRkerTp4+OHz+uhx9+WJIUGxurd99995oTjQMAAAAAbi+Hk7tu3bopNTVVb775pl5//XVJUoUKFTRlyhR17tw51wMEAAAAANyYw7dlXun48eMqWLCgvLy8cjOmvyVuy8xbuC0TAAAAzvK3uS3zSr6+vrkVBwAAAADgFjg8FQIAAAAAIO8huQMAAAAAF0ByBwAAAAAuwOHk7o8//rjmup9//vmWggEAAAAA3ByHk7vmzZvr5MmTWcrXrFmjRx99NFeCAgAAAAA4xuHkrkGDBmrevLlOnz5tla1evVotWrTQiBEjcjU4AAAAAEDOOJzcffrppypfvrwef/xxpaamasWKFQoPD9fo0aM1YMCA2xEjAAAAAOAGHE7u3NzcNGfOHOXPn18PP/ywnnjiCY0ZM0b9+vW7HfEBAAAAAHLAZowxN6q0efPmLGWnT5/WM888o/DwcPXq1csqDwoKyt0I/yacNQv9tdhszo7AuW78qQYAAABuD2flBjlK7tzc3GSz2XRl1StfZ/7bZrMpPT399kWbh5Hc5S0kdwAAAHAWZ+UG+XJSKSEh4XbHAQAAAAC4BTlK7gICAm53HAAAAACAW+DwgCpjxozRtGnTspRPmzZNY8eOzZWgAAAAAACOcTi5++ijj1SlSpUs5dWqVdPUqVNzJSgAAAAAgGMcTu4SExNVpkyZLOW+vr46cuRIrgQFAAAAAHCMw8mdv7+/1qxZk6V8zZo1Klu2bK4EBQAAAABwTI4GVLlSjx491L9/f128eFEPP/ywJCk2NlaDBw/WwIEDcz1AAAAAAMCNOZzcDRo0SCdOnNBLL72ktLQ0SVKBAgU0ZMgQRUVF5XqAAAAAAIAby9Ek5tk5c+aMduzYoYIFC+qee+6Rp6dnbsf2t8Ik5nkLk5gDAADAWfL0JObZ8fLysgZW+acndgAAAADgbA4PqJKRkaHRo0fLx8dHAQEBCggIUNGiRfX6668rIyPjdsQIAAAAALgBh6/cvfbaa/rss8/01ltvqWHDhpKkn376SSNHjtSFCxf05ptv5nqQAAAAAIDrc/iZu7Jly2rq1Kl64okn7Mq/+eYbvfTSS/rzzz9zNcC/C565y1t45g4AAADO4qzcwOHbMk+ePKkqVapkKa9SpYpOnjyZK0EBAAAAABzjcHJXs2ZNTZo0KUv5pEmTVLNmzVwJCgAAAADgGIefuRs3bpzCw8O1bNkyhYSESJLi4uJ06NAhff/997keIAAAAADgxhy+ctekSRP9/vvvevLJJ5WUlKSkpCS1adNGu3btUqNGjW5HjAAAAACAG7jpScxhjwFV8hY+1QAAAHCWPD2J+ebNm3PcYFBQ0E0HAwAAAAC4OTlK7oKDg2Wz2XSji3w2m03p6em5EhgAAAAAIOdylNwlJCTc7jgAAAAAALcgR8ldQEDA7Y4DAAAAAHALHB4tc8yYMZo2bVqW8mnTpmns2LG5EhQAAAAAwDEOJ3cfffSRqlSpkqW8WrVqmjp1qkNtjRkzRnXr1lWRIkVUqlQptW7dWrt27bKrc+HCBfXu3VslSpSQl5eX2rZtq6NHj9rVOXjwoMLDw1WoUCGVKlVKgwYN0qVLl+zqrFy5UrVq1ZKnp6cqV66s6OjoLPF8+OGHqlChggoUKKD69etr/fr1Dh0PAAAAADiLw8ldYmKiypQpk6Xc19dXR44ccaitVatWqXfv3vr5558VExOjixcvqnnz5jp79qxVZ8CAAfruu+80d+5crVq1SocPH1abNm2s9enp6QoPD1daWprWrl2rGTNmKDo6WsOHD7fqJCQkKDw8XA899JDi4+PVv39/Pf/881q6dKlV54svvlBkZKRGjBihjRs3qmbNmgoLC9OxY8ccOiYAAAAAcAaH57m75557NGLECD333HN25f/5z380YsQI7du376aDOX78uEqVKqVVq1apcePGSk5Olq+vr2bPnq127dpJknbu3KmqVasqLi5ODRo00OLFi9WyZUsdPnxYpUuXliRNnTpVQ4YM0fHjx+Xh4aEhQ4Zo0aJF2rp1q7WvDh06KCkpSUuWLJEk1a9fX3Xr1tWkSZMkSRkZGfL391ffvn01dOjQG8bOPHd5C/PcAQAAwFmclRs4fOWuR48e6t+/v6ZPn64DBw7owIEDmjZtmgYMGKAePXrcUjDJycmSpOLFi0uSNmzYoIsXLyo0NNSqU6VKFZUvX15xcXGSpLi4ONWoUcNK7CQpLCxMKSkp2rZtm1XnyjYy62S2kZaWpg0bNtjVcXNzU2hoqFXnaqmpqUpJSbFbAAAAAMBZcjRa5pUGDRqkEydO6KWXXlJaWpokqUCBAhoyZIiioqJuOpCMjAz1799fDRs2VPXq1SVdvgXUw8NDRYsWtatbunRpJSYmWnWuTOwy12euu16dlJQUnT9/XqdOnVJ6enq2dXbu3JltvGPGjNGoUaNu7mABAAAAIJc5fOXOZrNp7NixOn78uH7++Wf99ttvOnnypN0zbjejd+/e2rp1q+bMmXNL7dwpUVFRSk5OtpZDhw45OyQAAAAA/2AOX7nL5OXlpbp16+ZKEH369NHChQu1evVqlStXzir38/NTWlqakpKS7K7eHT16VH5+fladq0e1zBxN88o6V4+wefToUXl7e6tgwYJyd3eXu7t7tnUy27iap6enPD09b+6AAQAAACCXOXzlLjcZY9SnTx99/fXXWr58uSpWrGi3vnbt2sqfP79iY2Otsl27dungwYMKCQmRJIWEhGjLli12o1rGxMTI29tbgYGBVp0r28isk9mGh4eHateubVcnIyNDsbGxVh0AAAAAyMtu+spdbujdu7dmz56tb775RkWKFLGekfPx8VHBggXl4+Oj7t27KzIyUsWLF5e3t7f69u2rkJAQNWjQQJLUvHlzBQYGqlOnTho3bpwSExM1bNgw9e7d27qy9uKLL2rSpEkaPHiwunXrpuXLl+vLL7/UokWLrFgiIyMVERGhOnXqqF69eho/frzOnj2rrl273vkTAwAAAAAOcngqhFzd+TXG658+fbq6dOki6fIk5gMHDtTnn3+u1NRUhYWFafLkyXa3Sx44cEC9evXSypUrVbhwYUVEROitt95Svnz/y11XrlypAQMGaPv27SpXrpz+9a9/WfvINGnSJL399ttKTExUcHCwJk6cqPr16+foWJgKIW9hKgQAAAA4i7Nygxwld7Vq1VJsbKyKFSum0aNH65VXXlGhQoXuRHx/GyR3eQvJHQAAAJwlT89zt2PHDp09e1aSNGrUKJ05c+a2BgUAAAAAcEyOnrkLDg5W165d9eCDD8oYo3feeUdeXl7Z1r3VKREAAAAAAI7L0W2Zu3bt0ogRI7R3715t3LhRgYGBds+zWY3ZbNq4ceNtCTSv47bMvIXbMgEAAOAsefqZuyu5ubkpMTFRpUqVul0x/S2R3OUtJHcAAABwFmflBg5PhZCRkXE74gAAAAAA3IKbmudu7969Gj9+vHbs2CFJCgwMVL9+/VSpUqVcDQ4AAAAAkDM5Gi3zSkuXLlVgYKDWr1+voKAgBQUFad26dapWrZpiYmJuR4wAAAAAgBtw+Jm7+++/X2FhYXrrrbfsyocOHaoffviBAVV45i5P4Jk7AAAAOEuenufuSjt27FD37t2zlHfr1k3bt2/PlaAAAAAAAI5xOLnz9fVVfHx8lvL4+HhG0AQAAAAAJ3F4QJUePXrohRde0L59+/TAAw9IktasWaOxY8cqMjIy1wMEAAAAANyYw8/cGWM0fvx4vfvuuzp8+LAkqWzZsho0aJBefvll2f6hD3vxzF3ewjN3AAAAcJa/zSTmVzp9+rQkqUiRIrkW0N8VyV3eQnIHAAAAZ/nbTGJ+JZI6AAAAAMgbHB5QBQAAAACQ95DcAQAAAIALILkDAAAAABfgUHJ38eJFNWvWTLt3775d8QAAAAAAboJDyV3+/Pm1efPm2xULAAAAAOAmOXxb5nPPPafPPvvsdsQCAAAAALhJDk+FcOnSJU2bNk3Lli1T7dq1VbhwYbv17733Xq4FBwAAAADIGYeTu61bt6pWrVqSpN9//91une2fPnM2AAAAADiJw8ndihUrbkccAAAAAIBbcNNTIezZs0dLly7V+fPnJUnGmFwLCgAAAADgGIeTuxMnTqhZs2a699571aJFCx05ckSS1L17dw0cODDXAwQAAAAA3JjDyd2AAQOUP39+HTx4UIUKFbLK27dvryVLluRqcAAAAACAnHH4mbsffvhBS5cuVbly5ezK77nnHh04cCDXAgMAAAAA5JzDV+7Onj1rd8Uu08mTJ+Xp6ZkrQQEAAAAAHONwcteoUSPNnDnTem2z2ZSRkaFx48bpoYceytXgAAAAAAA54/BtmePGjVOzZs3066+/Ki0tTYMHD9a2bdt08uRJrVmz5nbECAAAAAC4AYev3FWvXl2///67HnzwQbVq1Upnz55VmzZttGnTJlWqVOl2xAgAAAAAuAGbYYK6XJGSkiIfHx8lJyfL29vb2eHIZnN2BM7FpxoAAADO4qzcwOHbMiXp1KlT+uyzz7Rjxw5JUmBgoLp27arixYvnanAAAAAAgJxx+LbM1atXq0KFCpo4caJOnTqlU6dOaeLEiapYsaJWr159O2IEAAAAANyAw7dl1qhRQyEhIZoyZYrc3d0lSenp6XrppZe0du1abdmy5bYEmtdxW2bewm2ZAAAAcBZn5QYOX7nbs2ePBg4caCV2kuTu7q7IyEjt2bMnV4MDAAAAAOSMw8ldrVq1rGftrrRjxw7VrFkzV4ICAAAAADgmR8nd5s2breXll19Wv3799M477+inn37STz/9pHfeeUcDBgzQgAEDHNr56tWr9fjjj6ts2bKy2WxasGCB3fouXbrIZrPZLY8++qhdnZMnT6pjx47y9vZW0aJF1b17d505cyZL/I0aNVKBAgXk7++vcePGZYll7ty5qlKligoUKKAaNWro+++/d+hYAAAAAMCZcjRaZnBwsGw2m658PG/w4MFZ6j377LNq3759jnd+9uxZ1axZU926dVObNm2yrfPoo49q+vTp1mtPT0+79R07dtSRI0cUExOjixcvqmvXrnrhhRc0e/ZsSZfvd23evLlCQ0M1depUbdmyRd26dVPRokX1wgsvSJLWrl2rZ555RmPGjFHLli01e/ZstW7dWhs3blT16tVzfDwAAAAA4Cw5GlDlwIEDOW4wICDg5gKx2fT111+rdevWVlmXLl2UlJSU5Ypeph07digwMFC//PKL6tSpI0lasmSJWrRooT/++ENly5bVlClT9NprrykxMVEeHh6SpKFDh2rBggXauXOnJKl9+/Y6e/asFi5caLXdoEEDBQcHa+rUqdnuOzU1VampqdbrlJQU+fv7M6BKHsGAKgAAAHCWPD2gSkBAQI6X3LZy5UqVKlVK9913n3r16qUTJ05Y6+Li4lS0aFErsZOk0NBQubm5ad26dVadxo0bW4mdJIWFhWnXrl06deqUVSc0NNRuv2FhYYqLi7tmXGPGjJGPj4+1+Pv758rxAgAAAMDNuKlJzA8fPqyffvpJx44dU0ZGht26l19+OVcCky7fktmmTRtVrFhRe/fu1auvvqrHHntMcXFxcnd3V2JiokqVKmW3Tb58+VS8eHElJiZKkhITE1WxYkW7OqVLl7bWFStWTImJiVbZlXUy28hOVFSUIiMjrdeZV+4AAAAAwBkcTu6io6PVs2dPeXh4qESJErJdcf+fzWbL1eSuQ4cO1r9r1KihoKAgVapUSStXrlSzZs1ybT83w9PTM8vzfwAAAADgLA5PhfCvf/1Lw4cPV3Jysvbv36+EhARr2bdv3+2I0XL33XerZMmS1nx6fn5+OnbsmF2dS5cu6eTJk/Lz87PqHD161K5O5usb1clcDwAAAAB5ncPJ3blz59ShQwe5uTm86S37448/dOLECZUpU0aSFBISoqSkJG3YsMGqs3z5cmVkZKh+/fpWndWrV+vixYtWnZiYGN13330qVqyYVSc2NtZuXzExMQoJCbndhwQAAAAAucLhDK179+6aO3duruz8zJkzio+PV3x8vCQpISFB8fHxOnjwoM6cOaNBgwbp559/1v79+xUbG6tWrVqpcuXKCgsLkyRVrVpVjz76qHr06KH169drzZo16tOnjzp06KCyZctKujw9g4eHh7p3765t27bpiy++0IQJE+yel+vXr5+WLFmid999Vzt37tTIkSP166+/qk+fPrlynAAAAABwu+VoKoQrpaenq2XLljp//rxq1Kih/Pnz261/7733ctzWypUr9dBDD2Upj4iI0JQpU9S6dWtt2rRJSUlJKlu2rJo3b67XX3/dbvCTkydPqk+fPvruu+/k5uamtm3bauLEifLy8rLqbN68Wb1799Yvv/yikiVLqm/fvhoyZIjdPufOnathw4Zp//79uueeezRu3Di1aNEix8firOFOr4WpEJwdAQAAAP6pnJUbOJzcvfHGGxo+fLjuu+8+lS5dOsuAKsuXL8/1IP8OSO7yFpI7AAAAOIuzcgOHR8t89913NW3aNHXp0uU2hAMAAAAAuBkOP3Pn6emphg0b3o5YAAAAAAA3yeHkrl+/fvrggw9uRywAAAAAgJvk8G2Z69ev1/Lly7Vw4UJVq1Yty4AqX331Va4FBwAAAADIGYeTu6JFi6pNmza3IxYAAAAAwE1yOLmbPn367YgDAAAAAHALHH7mDgAAAACQ9zh85a5ixYp2c9tdbd++fbcUEAAAAADAcQ4nd/3797d7ffHiRW3atElLlizRoEGDcisuAAAAAIADHE7u+vXrl235hx9+qF9//fWWAwIAAAAAOC7Xnrl77LHHNH/+/NxqDgAAAADggFxL7ubNm6fixYvnVnMAAAAAAAc4fFvm/fffbzegijFGiYmJOn78uCZPnpyrwQEAAAAAcsbh5K5169Z2r93c3OTr66umTZuqSpUquRUXAAAAAMABNmOMcXYQriAlJUU+Pj5KTk6Wt7e3s8PRdWar+EfgUw0AAABncVZuwCTmAAAAAOACcnxbppub23UnL5ckm82mS5cu3XJQAAAAAADH5Di5+/rrr6+5Li4uThMnTlRGRkauBAUAAAAAcEyOk7tWrVplKdu1a5eGDh2q7777Th07dtTo0aNzNTgAAAAAQM7c1DN3hw8fVo8ePVSjRg1dunRJ8fHxmjFjhgICAnI7PgAAAABADjiU3CUnJ2vIkCGqXLmytm3bptjYWH333XeqXr367YoPAAAAAJADOb4tc9y4cRo7dqz8/Pz0+eefZ3ubJgAAAADAOXI8z52bm5sKFiyo0NBQubu7X7PeV199lWvB/Z0wz13ewjx3AAAAcBZn5QY5vnLXuXPnG06FAAAAAABwjhwnd9HR0bcxDAAAAADArbip0TIBAAAAAHkLyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABTk3uVq9erccff1xly5aVzWbTggUL7NYbYzR8+HCVKVNGBQsWVGhoqHbv3m1X5+TJk+rYsaO8vb1VtGhRde/eXWfOnLGrs3nzZjVq1EgFChSQv7+/xo0blyWWuXPnqkqVKipQoIBq1Kih77//PtePFwAAAABuF6cmd2fPnlXNmjX14YcfZrt+3LhxmjhxoqZOnap169apcOHCCgsL04ULF6w6HTt21LZt2xQTE6OFCxdq9erVeuGFF6z1KSkpat68uQICArRhwwa9/fbbGjlypD7++GOrztq1a/XMM8+oe/fu2rRpk1q3bq3WrVtr69att+/gAQAAACAX2YwxxtlBSJLNZtPXX3+t1q1bS7p81a5s2bIaOHCgXnnlFUlScnKySpcurejoaHXo0EE7duxQYGCgfvnlF9WpU0eStGTJErVo0UJ//PGHypYtqylTpui1115TYmKiPDw8JElDhw7VggULtHPnTklS+/btdfbsWS1cuNCKp0GDBgoODtbUqVNzFH9KSop8fHyUnJwsb2/v3DotN81mc3YEzpU3PtUAAAD4J3JWbpBnn7lLSEhQYmKiQkNDrTIfHx/Vr19fcXFxkqS4uDgVLVrUSuwkKTQ0VG5ublq3bp1Vp3HjxlZiJ0lhYWHatWuXTp06ZdW5cj+ZdTL3k53U1FSlpKTYLQAAAADgLHk2uUtMTJQklS5d2q68dOnS1rrExESVKlXKbn2+fPlUvHhxuzrZtXHlPq5VJ3N9dsaMGSMfHx9r8ff3d/QQAQAAACDX5NnkLq+LiopScnKytRw6dMjZIQEAAAD4B8uzyZ2fn58k6ejRo3blR48etdb5+fnp2LFjdusvXbqkkydP2tXJro0r93GtOpnrs+Pp6Slvb2+7BQAAAACcJc8mdxUrVpSfn59iY2OtspSUFK1bt04hISGSpJCQECUlJWnDhg1WneXLlysjI0P169e36qxevVoXL1606sTExOi+++5TsWLFrDpX7iezTuZ+AAAAACCvc2pyd+bMGcXHxys+Pl7S5UFU4uPjdfDgQdlsNvXv319vvPGGvv32W23ZskWdO3dW2bJlrRE1q1atqkcffVQ9evTQ+vXrtWbNGvXp00cdOnRQ2bJlJUnPPvusPDw81L17d23btk1ffPGFJkyYoMjISCuOfv36acmSJXr33Xe1c+dOjRw5Ur/++qv69Olzp08JAAAAANwUp06FsHLlSj300ENZyiMiIhQdHS1jjEaMGKGPP/5YSUlJevDBBzV58mTde++9Vt2TJ0+qT58++u677+Tm5qa2bdtq4sSJ8vLysups3rxZvXv31i+//KKSJUuqb9++GjJkiN0+586dq2HDhmn//v265557NG7cOLVo0SLHx8JUCHkLUyEAAADAWZyVG+SZee7+7kju8hY+1QAAAHAW5rkDAAAAANw0kjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACAC8jTyd3IkSNls9nslipVqljrL1y4oN69e6tEiRLy8vJS27ZtdfToUbs2Dh48qPDwcBUqVEilSpXSoEGDdOnSJbs6K1euVK1ateTp6anKlSsrOjr6ThweAAAAAOSaPJ3cSVK1atV05MgRa/npp5+sdQMGDNB3332nuXPnatWqVTp8+LDatGljrU9PT1d4eLjS0tK0du1azZgxQ9HR0Ro+fLhVJyEhQeHh4XrooYcUHx+v/v376/nnn9fSpUvv6HECAAAAwK2wGWOMs4O4lpEjR2rBggWKj4/Psi45OVm+vr6aPXu22rVrJ0nauXOnqlatqri4ODVo0ECLFy9Wy5YtdfjwYZUuXVqSNHXqVA0ZMkTHjx+Xh4eHhgwZokWLFmnr1q1W2x06dFBSUpKWLFmS41hTUlLk4+Oj5ORkeXt739qB5wKbzdkROFfe/VQDAADA1TkrN8jzV+52796tsmXL6u6771bHjh118OBBSdKGDRt08eJFhYaGWnWrVKmi8uXLKy4uTpIUFxenGjVqWImdJIWFhSklJUXbtm2z6lzZRmadzDauJTU1VSkpKXYLAAAAADhLnk7u6tevr+joaC1ZskRTpkxRQkKCGjVqpNOnTysxMVEeHh4qWrSo3TalS5dWYmKiJCkxMdEusctcn7nuenVSUlJ0/vz5a8Y2ZswY+fj4WIu/v/+tHi4AAAAA3LR8zg7geh577DHr30FBQapfv74CAgL05ZdfqmDBgk6MTIqKilJkZKT1OiUlhQQPAAAAgNPk6St3VytatKjuvfde7dmzR35+fkpLS1NSUpJdnaNHj8rPz0+S5Ofnl2X0zMzXN6rj7e193QTS09NT3t7edgsAAAAAOMvfKrk7c+aM9u7dqzJlyqh27drKnz+/YmNjrfW7du3SwYMHFRISIkkKCQnRli1bdOzYMatOTEyMvL29FRgYaNW5so3MOpltAAAAAMDfQZ5O7l555RWtWrVK+/fv19q1a/Xkk0/K3d1dzzzzjHx8fNS9e3dFRkZqxYoV2rBhg7p27aqQkBA1aNBAktS8eXMFBgaqU6dO+u2337R06VINGzZMvXv3lqenpyTpxRdf1L59+zR48GDt3LlTkydP1pdffqkBAwY489ABAAAAwCF5+pm7P/74Q88884xOnDghX19fPfjgg/r555/l6+srSXr//ffl5uamtm3bKjU1VWFhYZo8ebK1vbu7uxYuXKhevXopJCREhQsXVkREhEaPHm3VqVixohYtWqQBAwZowoQJKleunD799FOFhYXd8eMFAAAAgJuVp+e5+zthnru8hU81AAAAnIV57gAAAAAANy1P35YJAHkJV8SdHQFcDX3K2REAcDVcuQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJ3VU+/PBDVahQQQUKFFD9+vW1fv16Z4cEAAAAADdEcneFL774QpGRkRoxYoQ2btyomjVrKiwsTMeOHXN2aAAAAABwXSR3V3jvvffUo0cPde3aVYGBgZo6daoKFSqkadOmOTs0AAAAALiufM4OIK9IS0vThg0bFBUVZZW5ubkpNDRUcXFxWeqnpqYqNTXVep2cnCxJSklJuf3B4oZ4G4DcR78Cchd9CreDj4+zI3Cu//+T3OkycwJjzB3dL8nd//vrr7+Unp6u0qVL25WXLl1aO3fuzFJ/zJgxGjVqVJZyf3//2xYjcu6f/sUG3A70KyB30aeA3JfX+tXp06flcweDIrm7SVFRUYqMjLReZ2Rk6OTJkypRooRsNpsTI3O+lJQU+fv769ChQ/L29nZ2OIBLoF8BuYs+BeQ++tX/GGN0+vRplS1b9o7ul+Tu/5UsWVLu7u46evSoXfnRo0fl5+eXpb6np6c8PT3tyooWLXo7Q/zb8fb2/sd3bCC30a+A3EWfAnIf/eqyO3nFLhMDqvw/Dw8P1a5dW7GxsVZZRkaGYmNjFRIS4sTIAAAAAODGuHJ3hcjISEVERKhOnTqqV6+exo8fr7Nnz6pr167ODg0AAAAArovk7grt27fX8ePHNXz4cCUmJio4OFhLlizJMsgKrs/T01MjRozIctsqgJtHvwJyF30KyH30K+ezmTs9PicAAAAAINfxzB0AAAAAuACSOwAAAABwASR3AAAAAOACSO6crEKFCho/fryzw/jb2b9/v2w2m+Lj42/7vniP/l54v24OfQrXw3t2c+hXuBber5tDn8oBAxMREWEkmZ49e2ZZ99JLLxlJJiIiIkdtJSQkGElm06ZNOap/7Ngxc/bs2RzVbdmypQkLC8t23erVq40k89tvv+WorWtZsWKFkWROnTp1S+1c7dy5c6ZYsWKmRIkS5sKFCw5tGxERYVq1amVXdunSJXPkyBFz8eLFXItx+vTpxsfHJ0u5I+9Rbpk0aZIJCAgwnp6epl69embdunV3dP+3ij71P/Qpnyzld7pPrVq1yrRs2dKUKVPGSDJff/31Hdt3bqJf/Q/9yidL+Z3uV//+979NnTp1jJeXl/H19TWtWrUyO3fuvGP7zw30qf+hT/lkKb/TfWry5MmmRo0apkiRIqZIkSKmQYMG5vvvv3e4Ha7c/T9/f3/NmTNH58+ft8ouXLig2bNnq3z58rm+v7S0NEmSr6+vChUqlKNtunfvrpiYGP3xxx9Z1k2fPl116tRRUFBQrsZ5s4wxunTpkvV6/vz5qlatmqpUqaIFCxbccvvu7u7y8/NTvny3fzYPR96j3PDFF18oMjJSI0aM0MaNG1WzZk2FhYXp2LFjdyyG3ECfyl30qZt39uxZ1axZUx9++OEd2+ftQr/KXfSrm7dq1Sr17t1bP//8s2JiYnTx4kU1b95cZ8+evWMx5Ab6VO6iT928cuXK6a233tKGDRv066+/6uGHH1arVq20bds2xxrK5aTzbynzl4Hq1aub//73v1b5rFmzTFBQkGnVqpX1y83ixYtNw4YNjY+PjylevLgJDw83e/bssbaRZLc0adLEbh9vvPGGKVOmjKlQoYIxxpiAgADz/vvvG2Mu/2qSP39+s3r1aqu9sWPHGl9fX5OYmGguXrxoSpcubV5//XW7+E+fPm28vLzMlClTjDHG/Pjjj+bBBx80BQoUMOXKlTN9+/Y1Z86csepfuHDBDB482JQrV854eHiYSpUqmU8//dT61enKJfO4L1y4YPr27Wt8fX2Np6enadiwoVm/fr3VZuYvPt9//72pVauWyZ8/v1mxYoW1vmnTpmbq1KlmypQp5pFHHsnyHmzdutWEh4ebIkWKGC8vL/Pggw+aPXv2mBEjRmSJacWKFXa/kKWnp5u77rrLTJ482a7NjRs3GpvNZvbv32+MMebdd9811atXN4UKFTLlypUzvXr1MqdPn7aL/8plxIgRWd4jY4w5cOCAeeKJJ0zhwoVNkSJFzFNPPWUSExOt9SNGjDA1a9Y0M2fONAEBAcbb29u0b9/epKSkZDnu7NSrV8/07t3bep2enm7Kli1rxowZk6Pt8wL6FH0qL/WpK+lvfuWOfkW/yov9ypjLVzkkmVWrVt3U9s5An6JP5eU+ZYwxxYoVM59++qlD25Dcmf91vPfee880a9bMKm/WrJl5//337Tr3vHnzzPz5883u3bvNpk2bzOOPP25q1Khh0tPTjTHGrF+/3kgyy5YtM0eOHDEnTpyw9uHl5WU6depktm7darZu3WqMyfrBGTRokAkICDBJSUlm48aNxsPDw3zzzTd26ytVqmQyMjKssmnTppmCBQuapKQks2fPHlO4cGHz/vvvm99//92sWbPG3H///aZLly5W/aefftr4+/ubr776yuzdu9csW7bMzJkzx1y6dMnMnz/fSDK7du0yR44cMUlJScYYY15++WVTtmxZ8/3335tt27aZiIgIU6xYMev4MjtHUFCQ+eGHH8yePXusdXv27DGenp7m5MmT5sSJE6ZAgQJWhzPGmD/++MMUL17ctGnTxvzyyy9m165dZtq0aWbnzp3m9OnT5umnnzaPPvqoOXLkiDly5IhJTU3NcvvDK6+8Yh588EG793XgwIF2Ze+//75Zvny5SUhIMLGxsea+++4zvXr1MsYYk5qaasaPH2+8vb2t/WR2/Cvfo/T0dBMcHGwefPBB8+uvv5qff/7Z1K5d2/oSN+Zy5/by8jJt2rQxW7ZsMatXrzZ+fn7m1VdfveZnMFNqaqpxd3fP8sdn586dzRNPPHHD7fMK+hR9Kq/0qau5QnJHv6Jf5bV+ZYwxu3fvNpLMli1bbmp7Z6BP0afyap+6dOmS+fzzz42Hh4fZtm2bQ9uS3Jn/de5jx44ZT09Ps3//frN//35ToEABc/z4cbvOfbXjx4/bfZld657riIgIU7p0aZOammpXfnXnTk1NNcHBwebpp582gYGBpkePHnb1d+zYYf16kalRo0bmueeeM8YY0717d/PCCy/YbfPjjz8aNzc3c/78ebNr1y4jycTExGR7PNndc33mzBmTP39+M2vWLKssLS3NlC1b1owbN85uuwULFmRp89VXXzWtW7e2Xrdq1cr6VcQYY6KiokzFihVNWlpatjFld8/11ed506ZNxmazmQMHDhhjjPVrTuavWdmZO3euKVGihPX6WvdcX/ke/fDDD8bd3d0cPHjQWr9t2zYjyfola8SIEaZQoUJ2v9QMGjTI1K9f/5qxZPrzzz+NJLN27Vq78kGDBpl69erdcPu8gj71P/Qpnyz17mSfuporJHf0K/pVXutX6enpJjw83DRs2NDhbZ2JPvU/9CmfLPWc0ac2b95sChcubNzd3Y2Pj49ZtGhRjrfNxDN3V/D19VV4eLiio6M1ffp0hYeHq2TJknZ1du/erWeeeUZ33323vL29VaFCBUnSwYMHb9h+jRo15OHhcd06Hh4emjVrlubPn68LFy7o/ffft1tfpUoVPfDAA5o2bZokac+ePfrxxx/VvXt3SdJvv/2m6OhoeXl5WUtYWJgyMjKUkJCg+Ph4ubu7q0mTJjk9Ldq7d68uXryohg0bWmX58+dXvXr1tGPHDru6derUsXudnp6uGTNm6LnnnrPKnnvuOUVHRysjI0OSFB8fr0aNGil//vw5julqwcHBqlq1qmbPni3p8rMAx44d01NPPWXVWbZsmZo1a6a77rpLRYoUUadOnXTixAmdO3cux/vZsWOH/P395e/vb5UFBgaqaNGidueiQoUKKlKkiPW6TJkyf7tn5nIDfSp79Kn/oU85jn6VPfrV/9zpftW7d29t3bpVc+bMcXjbvIA+lT361P/cqT513333KT4+XuvWrVOvXr0UERGh7du353h7iakQsujWrZuio6M1Y8YMdevWLcv6xx9/XCdPntQnn3yidevWad26dZL+94Ds9RQuXDhHMaxdu1aSdPLkSZ08eTLL+u7du2v+/Pk6ffq0pk+frkqVKlmd9cyZM+rZs6fi4+Ot5bffftPu3btVqVIlFSxYMEcx3Kyrj3Hp0qX6888/1b59e+XLl0/58uVThw4ddODAAcXGxkpSrsXUsWNHq3PPnj1bjz76qEqUKCHp8tC5LVu2VFBQkObPn68NGzZYgyvk5L1z1NVfVDabzfoyu56SJUvK3d1dR48etSs/evSo/Pz8cjXGO4U+dWvoU5fdbJ9yVfSrW0O/uiw3+lWfPn20cOFCrVixQuXKlcvN8O4o+tStoU9ddqt9ysPDQ5UrV1bt2rU1ZswY1axZUxMmTHAoBpK7qzz66KNKS0vTxYsXFRYWZrfuxIkT2rVrl4YNG6ZmzZqpatWqOnXqlF2dzF9m0tPTb2r/e/fu1YABA/TJJ5+ofv36ioiIyPKhePrpp+Xm5qbZs2dr5syZ6tatm2w2mySpVq1a2r59uypXrpxl8fDwUI0aNZSRkaFVq1Zlu//s4q9UqZI8PDy0Zs0aq+zixYv65ZdfFBgYeN3j+eyzz9ShQwe7L5v4+Hh16NBBn332mSQpKChIP/74oy5evHjNmHJyPp999llt3bpVGzZs0Lx589SxY0dr3YYNG5SRkaF3331XDRo00L333qvDhw87vJ+qVavq0KFDOnTokFW2fft2JSUl3fBc5ISHh4dq165tffFJUkZGhmJjYxUSEnLL7TsDfYo+dT23u0+5KvoV/ep67kS/MsaoT58++vrrr7V8+XJVrFgxV9p1FvoUfep6nPX/qoyMDKWmpjq2kcM3crqgq+/pTU5ONsnJydbrzHuu09PTTYkSJcxzzz1ndu/ebWJjY03dunXtnuG4ePGiKViwoHnjjTdMYmKi9UBqdvcNG2N/P++lS5dMgwYNTNu2bY0xxhw+fNiUKFHCuq/5St27dzfFihUz7u7u5s8//7TKf/vtN1OwYEHTu3dvs2nTJvP777+bBQsW2I2+2KVLF+Pv72++/vprs2/fPrNixQrzxRdfGGMuP9xqs9lMdHS0OXbsmPVQab9+/UzZsmXN4sWL7R6oPXnypDEm+3u1jx07ZvLnz28WL16cJf7vv//eeHp6mhMnTpi//vrLlChRwnqg9vfffzczZ8605st58803Tfny5c3OnTvN8ePHTVpa2jXvbW/YsKGpWbOmKVKkiDl37pxVHh8fbySZ8ePHm71795qZM2eau+66yy7mNWvWWA9DHz9+3Jrb5Mr3KCMjwwQHB5tGjRqZDRs2mHXr1mX7QG3NmjXt4nr//fdNQEBAlvOQnTlz5hhPT08THR1ttm/fbl544QVTtGhRuxGZ8jr6FH3KmLzTp06fPm02bdpkNm3aZCSZ9957z2zatMl6RuPvgn5FvzIm7/SrXr16GR8fH7Ny5UprIIojR47YHU9eR5+iTxmTd/rU0KFDzapVq0xCQoLZvHmzGTp0qLHZbOaHH37I0faZSO7MtTtepisfqI2JiTFVq1Y1np6eJigoyKxcuTLLA/qffPKJ8ff3N25ublmGwr3alR+cUaNGmTJlypi//vrLWj9//nzj4eFh4uPj7bZbu3atkWRatGiRpc3169ebRx55xHh5eZnChQuboKAg8+abb1rrz58/bwYMGGDKlCljPDw8TOXKlc20adOs9aNHjzZ+fn7GZrNZx33+/HnTt29fU7JkyesOhXtl537nnXdM0aJFs31QNjU11RQtWtRMmDDBGHP5S6l58+amUKFCpkiRIqZRo0Zm7969xpjLXxKZx6NshsK90uTJk40k07lz5yz7fO+990yZMmVMwYIFTVhYmJk5c2aWmF988UVTokSJXBkK90qOdG5jjPnggw9M+fLljYeHh6lXr575+eefc7xtXkCfok9lygt9KruhrqWcT06cV9Cv6FeZ8kK/yq5PSTLTp0/P0fZ5AX2KPpUpL/Spbt26mYCAAOPh4WF8fX1Ns2bNHE7sjDHGZowxjl3rAwAAAADkNTxzBwAAAAAugOQOuIMOHjxoN0zx1UtOhlQG8D/0KSD30a+A3HUn+xS3ZQJ30KVLl7R///5rrq9QoYLy5ct35wIC/uboU0Duo18BuetO9imSOwAAAABwAdyWCQAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AADkkqZNm6p///45rr9y5UrZbDYlJSXdtpgAAP8cJHcAAKfr0qWLbDab3nrrLbvyBQsWyGazOdRWhQoVNH78+FyMDgCAvweSOwBAnlCgQAGNHTtWp06dcnYoDktLS3N2CLfk4sWLzg4BAJALSO4AAHlCaGio/Pz8NGbMmOvW++mnn9SoUSMVLFhQ/v7+evnll3X27FlJl2+LPHDggAYMGCCbzSabzSZjjHx9fTVv3jyrjeDgYJUpU8auTU9PT507d06SdPDgQbVq1UpeXl7y9vbW008/raNHj1r1R44cqeDgYH366aeqWLGiChQokG2sixYtko+Pj2bNmpWjc3DixAk988wzuuuuu1SoUCHVqFFDn3/+ubV+5syZKlGihFJTU+22a926tTp16mS9/uabb1SrVi0VKFBAd999t0aNGqVLly5Z6202m6ZMmaInnnhChQsX1ptvvqlTp06pY8eO8vX1VcGCBXXPPfdo+vTpOYobAJA3kNwBAPIEd3d3/fvf/9YHH3ygP/74I9s6e/fu1aOPPqq2bdtq8+bN+uKLL/TTTz+pT58+kqSvvvpK5cqV0+jRo3XkyBEdOXJENptNjRs31sqVKyVJp06d0o4dO3T+/Hnt3LlTkrRq1SrVrVtXhQoVUkZGhlq1aqWTJ09q1apViomJ0b59+9S+fXu7WPbs2aP58+frq6++Unx8fJZYZ8+erWeeeUazZs1Sx44dc3QOLly4oNq1a2vRokXaunWrXnjhBXXq1Enr16+XJD311FNKT0/Xt99+a21z7NgxLVq0SN26dZMk/fjjj+rcubP69eun7du366OPPlJ0dLTefPNNu32NHDlSTz75pLZs2aJu3brpX//6l7Zv367Fixdrx44dmjJlikqWLJmjuAEAeUM+ZwcAAECmJ598UsHBwRoxYoQ+++yzLOvHjBmjjh07WoOW3HPPPZo4caKaNGmiKVOmqHjx4nJ3d1eRIkXk5+dnbde0aVN99NFHkqTVq1fr/vvvl5+fn1auXKkqVapo5cqVatKkiSQpNjZWW7ZsUUJCgvz9/SVdvmJWrVo1/fLLL6pbt66ky7dizpw5U76+vlni/PDDD/Xaa6/pu+++s9rNibvuukuvvPKK9bpv375aunSpvvzyS9WrV08FCxbUs88+q+nTp+upp56SJP33v/9V+fLl1bRpU0nSqFGjNHToUEVEREiS7r77br3++usaPHiwRowYYbX97LPPqmvXrtbrgwcP6v7771edOnUkXX52EQDw98KVOwBAnjJ27FjNmDFDO3bsyLLut99+U3R0tLy8vKwlLCxMGRkZSkhIuGabTZo00fbt23X8+HGtWrVKTZs2VdOmTbVy5UpdvHhRa9eutZKjHTt2yN/f30rsJCkwMFBFixa1iykgICDbxG7evHkaMGCAYmJiHErsJCk9PV2vv/66atSooeLFi8vLy0tLly7VwYMHrTo9evTQDz/8oD///FOSFB0dbQ1Ik3mORo8ebXeOevTooSNHjli3nUqykrhMvXr10pw5cxQcHKzBgwdr7dq1DsUOAHA+kjsAQJ7SuHFjhYWFKSoqKsu6M2fOqGfPnoqPj7eW3377Tbt371alSpWu2WZmsrRq1Sq75G7VqlX65ZdfdPHiRT3wwAMOxVm4cOFsy++//375+vpq2rRpMsY41Obbb7+tCRMmaMiQIVqxYoXi4+MVFhZmN2DL/fffr5o1a2rmzJnasGGDtm3bpi5duljrz5w5o1GjRtmdoy1btmj37t12zwZeHf9jjz1mPa94+PBhNWvWzO4qIgAg7+O2TABAnvPWW28pODhY9913n115rVq1tH37dlWuXPma23p4eCg9Pd2uzGazqVGjRvrmm2+0bds2PfjggypUqJBSU1P10UcfqU6dOlayU7VqVR06dEiHDh2yrt5t375dSUlJCgwMvGHslSpV0rvvvqumTZvK3d1dkyZNyvFxr1mzRq1atdJzzz0nScrIyNDvv/+eZb/PP/+8xo8frz///FOhoaF2Vxlr1aqlXbt2XfccXYuvr68iIiIUERGhRo0aadCgQXrnnXccbgcA4BxcuQMA5Dk1atRQx44dNXHiRLvyIUOGaO3aterTp4/i4+O1e/duffPNN9aAKtLlZ8VWr16tP//8U3/99ZdV3rRpU33++ecKDg6Wl5eX3Nzc1LhxY82aNcvu9snQ0FBr/xs3btT69evVuXNnNWnSJMutjNdy7733asWKFZo/f75Dk5rfc889iomJ0dq1a7Vjxw717NnTbpTOTM8++6z++OMPffLJJ9ZAKpmGDx+umTNnatSoUdq2bZt27NihOXPmaNiwYdfd9/Dhw/XNN99oz5492rZtmxYuXKiqVavmOHYAgPOR3AEA8qTRo0crIyPDriwoKEirVq3S77//rkaNGun+++/X8OHDVbZsWbvt9u/fr0qVKtk9E9ekSROlp6dbz9ZJlxO+q8tsNpu++eYbFStWTI0bN1ZoaKjuvvtuffHFFw7Ff99992n58uX6/PPPNXDgwBxtM2zYMNWqVUthYWFq2rSp/Pz81Lp16yz1fHx81LZtW3l5eWVZHxYWpoULF+qHH35Q3bp11aBBA73//vsKCAi47r49PDwUFRWloKAgNW7cWO7u7pozZ05ODxcAkAfYjKMPBAAAAKdr1qyZqlWrluXqJgDgn4vkDgCAv5FTp05p5cqVateunbZv357luUQAwD8XA6oAAPA3cv/99+vUqVMaO3YsiR0AwA5X7gAAAADABTCgCgAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcwP8Binx+rd+7B7sAAAAASUVORK5CYII=", "text/plain": [ "
    " ] @@ -315,76 +267,118 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "layers = list(cycles_dict.keys())\n", - "cycles = list(cycles_dict.values())\n", "fig = plt.figure(figsize = (10, 5))\n", - "plt.bar(layers, cycles, color ='blue', width = 0.3)\n", - "plt.xlabel(\"Network Layers\")\n", - "plt.ylabel(\"Clock Cycles\")\n", - "plt.title(\"Estimated clock cycles for each network layer\")\n", + "plt.bar(cycles_dict.keys(), cycles_dict.values(), color ='blue', width = 0.3)\n", + "plt.xlabel(\"Network layers\")\n", + "plt.ylabel(\"Number of clock cycles\")\n", + "plt.title(\"Estimated no. of clock cycles for each network layer\")\n", "plt.show()" ] }, { - "cell_type": "code", - "execution_count": 8, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "res_dict = []\n", - "res_dict = res_estimation(model)" + "We observe that the bottleneck in the execution of the model on hardware would come from the execution of the first layer which takes estimated 38400 clock cycles to execute one set of its inputs.\n", + "\n", + "No matter how quickly the other layers execute, the throughput will be defined by the first layer's execution latency.\n", + "\n", + "Let's have a look now at the estimated resources per layer by calling another analysis pass.\n", + "The keys are again the layer names, but the values are now a dictionary with the resource estimates per layer." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABeo0lEQVR4nO3deXxN1/7/8feRSEJGQRJDzFVCIkoRU7SGGKuXVqkSY1WjLb60dDC2TWe0F61W0Vuq5rYuVWOoeSw1U1NLUENiqJBk/f7wy76OBAlhG17Px+M8mrPW2nt/9jlnpd7Zw3EYY4wAAAAAALbJYXcBAAAAAPCgI5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAG4KXXq1FGdOnXsLiNb7d+/Xw6HQ+PHj7e7FFvxOmTe+PHj5XA4tH///huO/fnnnxUeHi4PDw85HA6dPn36ttd3pzkcDvXo0cPuMu5qaZ+ZdevWZXnZJUuWyOFwaMmSJdlfGADbEcyA+0za//Sv9Vi1alWm17Vt2zYNGjQoU//ovJNGjRpla2hI+8fRtGnTrjnmev9AnTZtmvWPq7R1ZeaBe9eJEyfUqlUr5cqVSyNHjtR//vMfeXp62l3WfW/FihUaNGjQfRmCAdx/XO0uAMDtMWTIEBUvXjxde6lSpTK9jm3btmnw4MGqU6eOihUr5tT3yy+/3GqJN23UqFHKly+fOnToYFsN2aVs2bL6z3/+49TWv39/eXl56Y033rCpKmS3tWvX6syZMxo6dKjq1atndzkPjBUrVmjw4MHq0KGD/Pz87C4HAK6LYAbcpxo1aqTKlSvftvW7ubndtnU/SAIDA/Xcc885tb333nvKly9funbcu44dOyZJ2RoOzp07x1G3e8iFCxfu+9+bfCaBW8OpjMADbPLkyapUqZK8vb3l4+Oj0NBQjRgxQtLlUyKffvppSdJjjz1mnU6Xdm3D1deYpZ2SN2XKFA0ePFiFChWSt7e3nnrqKSUkJCgpKUk9e/ZUQECAvLy81LFjRyUlJTnVM27cOD3++OMKCAiQu7u7QkJCNHr0aKcxxYoV09atWxUXF2fVdGUdp0+fVs+ePRUcHCx3d3eVKlVK77//vlJTU53Wc/r0aXXo0EG+vr7y8/NTdHT0PXm609GjR+Xq6qrBgwen69u5c6ccDof+/e9/S5JOnjypPn36KDQ0VF5eXvLx8VGjRo3022+/3XA717qmsEOHDumOpqampmr48OEqV66cPDw8FBgYqG7duunUqVNO49atW6eoqCjly5dPuXLlUvHixdWpU6cb1uJwODRo0KB07cWKFXM6inrp0iUNHjxYDz30kDw8PJQ3b17VrFlT8+fPd1pux44deuqpp+Tv7y8PDw9VrlxZP/74Y7r1b926VY8//rhy5cqlwoUL6+233073ucpInTp1FB0dLUl69NFH5XA4nOqcOnWqKlWqpFy5clmB/K+//nJaR4cOHeTl5aW9e/eqcePG8vb2Vtu2ba+73b/++kudOnVSYGCg3N3dVa5cOX399ddOYy5evKgBAwaoUqVK8vX1laenp2rVqqXFixenW19qaqpGjBih0NBQeXh4KH/+/GrYsGGG10rNmjVL5cuXt7b7888/3/B1uvJ3yDvvvKPChQvLw8NDdevW1Z49e9KNX716tRo2bChfX1/lzp1bkZGRWr58udU/aNAg9e3bV5JUvHhx6/fF/v371aJFCz3yyCNO62vWrJkcDofTe7969Wo5HA7NnTvXavvjjz/09NNPy9/fX7lz51a1atX03//+N8N9mTx5st58800VKlRIuXPnVmJiYob7furUKVWpUkWFCxfWzp07b/haXWnZsmV6+umnVaRIEbm7uys4OFi9evXSP//8Y40ZN26cHA6HNm7cmG75d999Vy4uLk6fuRu9ttLl19fhcGjbtm169tlnlSdPHtWsWTNLtQNwxhEz4D6VkJCgv//+26nN4XAob968kqT58+erTZs2qlu3rt5//31J0vbt27V8+XK98sorql27tl5++WV9+umnev3111W2bFlJsv57LbGxscqVK5f69eunPXv26LPPPlPOnDmVI0cOnTp1SoMGDdKqVas0fvx4FS9eXAMGDLCWHT16tMqVK6cnnnhCrq6u+umnn/Tiiy8qNTVVMTExkqThw4frpZdecjrVLzAwUJJ0/vx5RUZG6q+//lK3bt1UpEgRrVixQv3799eRI0c0fPhwSZIxRs2bN9evv/6qF154QWXLltXMmTOtfzzfSwIDAxUZGakpU6Zo4MCBTn3ff/+9XFxcrID9xx9/aNasWXr66adVvHhxHT16VF988YUiIyO1bds2FSxYMFtq6tatm8aPH6+OHTvq5Zdf1r59+/Tvf/9bGzdu1PLly5UzZ04dO3ZMDRo0UP78+dWvXz/5+flp//79mjFjRrbUIF3+h2NsbKy6dOmiKlWqKDExUevWrdOGDRtUv359SZfDVo0aNVSoUCH169dPnp6emjJlip588klNnz5d//rXvyRJ8fHxeuyxx5ScnGyNGzNmjHLlynXDOt544w09/PDDGjNmjHWKccmSJSXJep0effRRxcbG6ujRoxoxYoSWL1+ujRs3Oh1hS05OVlRUlGrWrKmPPvpIuXPnvuY2jx49qmrVqlnXOubPn19z585V586dlZiYqJ49e0qSEhMT9dVXX6lNmzbq2rWrzpw5o7FjxyoqKkpr1qxReHi4tc7OnTtr/PjxatSokbp06aLk5GQtW7ZMq1atcjo6/+uvv2rGjBl68cUX5e3trU8//VQtW7bUwYMHrd8/1/Pee+8pR44c6tOnjxISEvTBBx+obdu2Wr16tTVm0aJFatSokSpVqqSBAwcqR44c1h92li1bpipVqqhFixbatWuXvvvuOw0bNkz58uWTJOXPn1+1atXSDz/8oMTERPn4+MgYo+XLlytHjhxatmyZnnjiCUmXQ0+OHDlUo0YN63WtXr26zp8/r5dffll58+bVhAkT9MQTT2jatGnW5yXN0KFD5ebmpj59+igpKSnDI2Z///236tevr5MnTyouLs76bGTW1KlTdf78eXXv3l158+bVmjVr9Nlnn+nPP//U1KlTJUlPPfWUYmJiNHHiRFWsWNFp+YkTJ6pOnToqVKhQpl/bKz399NN66KGH9O6778oYk6XaAVzFALivjBs3zkjK8OHu7m6Ne+WVV4yPj49JTk6+5rqmTp1qJJnFixen64uMjDSRkZHW88WLFxtJpnz58ubixYtWe5s2bYzD4TCNGjVyWj4iIsIULVrUqe38+fPpthMVFWVKlCjh1FauXDmnbacZOnSo8fT0NLt27XJq79evn3FxcTEHDx40xhgza9YsI8l88MEH1pjk5GRTq1YtI8mMGzcu3bqvlLavU6dOveYYSSYmJibDvuu9rtfbv2v54osvjCSzZcsWp/aQkBDz+OOPW88vXLhgUlJSnMbs27fPuLu7myFDhji1Xf06XP1+p4mOjnZ6H5ctW2YkmYkTJzqN+/nnn53aZ86caSSZtWvXZno/00gyAwcOTNdetGhREx0dbT2vUKGCadKkyXXXVbduXRMaGmouXLhgtaWmpprq1aubhx56yGrr2bOnkWRWr15ttR07dsz4+voaSWbfvn3X3U7avLxyfy9evGgCAgJM+fLlzT///GO1z54920gyAwYMsNqio6ONJNOvX7/rbidN586dTYECBczff//t1N66dWvj6+trzbXk5GSTlJTkNObUqVMmMDDQdOrUyWpbtGiRkWRefvnldNtKTU21fpZk3NzczJ49e6y23377zUgyn3322XVrTptXZcuWdappxIgRTp/v1NRU89BDD5moqCinbZ8/f94UL17c1K9f32r78MMPM3x/1q5daySZOXPmGGOM2bx5s5Fknn76aVO1alVr3BNPPGEqVqxoPU/7HCxbtsxqO3PmjClevLgpVqyYNb/S9qVEiRLpfq9d+Vk4cuSIKVeunClRooTZv3//dV+fK9d75e+OjH5vxsbGGofDYQ4cOGC1tWnTxhQsWNDpd8CGDRuc5npWXtuBAwcaSaZNmzY3rBtA5nAqI3CfGjlypObPn+/0uPJ0HD8/P507dy7daV23qn379sqZM6f1vGrVqjLGpDtFrWrVqjp06JCSk5OttiuPPqQd8YuMjNQff/yhhISEG2576tSpqlWrlvLkyaO///7betSrV08pKSlaunSpJGnOnDlydXVV9+7drWVdXFz00ksv3fR+26lFixZydXXV999/b7X9/vvv2rZtm5555hmrzd3dXTlyXP61n5KSohMnTsjLy0sPP/ywNmzYkC21TJ06Vb6+vqpfv77Te1CpUiV5eXlZp8ilHQmaPXu2Ll26lC3bvpqfn5+2bt2q3bt3Z9h/8uRJLVq0SK1atdKZM2esWk+cOKGoqCjt3r3bOr1rzpw5qlatmtPRgvz589/wdMLrWbdunY4dO6YXX3xRHh4eVnuTJk1UpkyZdKfHSXL6zF6LMUbTp09Xs2bNZIxxeh+ioqKUkJBgvd8uLi7WUZzU1FSdPHlSycnJqly5stNnYvr06XI4HOmOykpKd8fQevXqOR31CQsLk4+Pj/74448b1i5JHTt2dDqyVKtWLUmylt+0aZN2796tZ599VidOnLD27dy5c6pbt66WLl16w1NMK1asKC8vL+t3wrJly1S4cGG1b99eGzZs0Pnz52WM0a+//mptX7r8OahSpYrTKXteXl56/vnntX//fm3bts1pO9HR0dc8qvrnn38qMjJSly5d0tKlS1W0aNFMvT5Xu3L9586d099//63q1avLGON06mL79u11+PBhp9NUJ06cqFy5cqlly5aSbu61feGFF26qbgDpcSojcJ+qUqXKdW/+8eKLL2rKlClq1KiRChUqpAYNGqhVq1Zq2LDhLW23SJEiTs99fX0lScHBwenaU1NTlZCQYJ3etHz5cg0cOFArV67U+fPnncYnJCRY67qW3bt3a/PmzcqfP3+G/Wk3YDhw4IAKFCggLy8vp/6HH374BnuXvbLrFvj58uVT3bp1NWXKFA0dOlTS5dMYXV1d1aJFC2tc2jVCo0aN0r59+5SSkmL1ZeYUs8zYvXu3EhISFBAQkGF/2nsQGRmpli1bavDgwRo2bJjq1KmjJ598Us8++6zc3d2zpZYhQ4aoefPmKl26tMqXL6+GDRuqXbt2CgsLkyTt2bNHxhi99dZbeuutt65Zb6FChXTgwAFVrVo1Xf+tfGYOHDhwzXWUKVNGv/76q1Obq6urChcufMP1Hj9+XKdPn9aYMWM0ZsyYDMekvQ+SNGHCBH388cfasWOHU0i+8q6ue/fuVcGCBeXv73/D7V/9O0CS8uTJk+4aw8wunydPHkmylk8L2tc79TghIcFaLiMuLi6KiIjQsmXLJF0OZrVq1VLNmjWVkpKiVatWKTAwUCdPnnQKZtf6HKSd4n3gwAGVL1/eas/ozrhp2rVrJ1dXV23fvl1BQUHXHHcjBw8e1IABA/Tjjz+me42v/INW/fr1VaBAAU2cOFF169ZVamqqvvvuOzVv3lze3t6Sbu61vd4+AsgaghnwgAoICNCmTZs0b948zZ07V3PnztW4cePUvn17TZgw4abX6+LikqV28/+vSdi7d6/q1q2rMmXK6JNPPlFwcLDc3Nw0Z84cDRs2LFM3WUhNTVX9+vX16quvZthfunTpTO7FrXN3d3e6+P5KaaHzyqMkt6p169bq2LGjNm3apPDwcE2ZMkV169a1rquRLl/k/9Zbb6lTp04aOnSo/P39lSNHDvXs2fOGr6/D4cjw+pErw510+T0ICAjQxIkTM1xPWmhO+x64VatW6aefftK8efPUqVMnffzxx1q1alW60JwZV9dSu3Zt7d27Vz/88IN++eUXffXVVxo2bJg+//xzdenSxdrnPn36KCoqKsN1ZuXrJW63K494Xk/afj333HPX/Ad2Wjj99ttv1aFDBz355JPq27evAgIC5OLiotjYWO3du/em6rzRXL/V5dP278MPP3S6Bu5Kmfn81KxZU++8844uXLigZcuW6Y033pCfn5/Kly+vZcuWWdeuXhnMsup61yC2aNFC33zzjUaMGKHY2NibWn9KSop1fdprr72mMmXKyNPTU3/99Zc6dOjgNK9dXFz07LPP6ssvv9SoUaO0fPlyHT582Onurzfz2mbmOksAmUMwAx5gbm5uatasmZo1a6bU1FS9+OKL+uKLL/TWW2+pVKlSd/RLjX/66SclJSXpxx9/dPqLeUZ3h7tWXSVLltTZs2dv+D1RRYsW1cKFC3X27Fmnf2Rk9W5oN9rGtdaX1n6zpy5l5Mknn1S3bt2s0xl37dql/v37O42ZNm2aHnvsMY0dO9ap/fTp004BLiN58uTJ8FS0tKM+aUqWLKkFCxaoRo0amfoHW7Vq1VStWjW98847mjRpktq2bavJkyerS5cu163l6jtoXrx4UUeOHEk31t/fXx07dlTHjh119uxZ1a5dW4MGDVKXLl1UokQJSVLOnDkz9ZnJ6JTIW/nMpL3/O3fu1OOPP55uvTf7+cifP7+8vb2VkpJyw/2aNm2aSpQooRkzZjjNq6tPWSxZsqTmzZunkydPZuqo2e2Udpqkj4/PDffver/DatWqpYsXL+q7777TX3/9ZQWw2rVrW8GsdOnSVkCTrj2vd+zYYfVn1ksvvaRSpUppwIAB8vX1Vb9+/TK9bJotW7Zo165dmjBhgtq3b2+1X+sU9fbt2+vjjz/WTz/9pLlz5yp//vxOf5TIymsLIPtxjRnwgDpx4oTT8xw5clh/RU+7jX3a99HcidvIp/2V/Mq/qickJGjcuHHpxnp6emZYU6tWrbRy5UrNmzcvXd/p06et69kaN26s5ORkp1vxp6Sk6LPPPrvV3bA0btxYq1at0vr169PVMXHiRIWHh9/S6UtX8/PzU1RUlKZMmaLJkyfLzc1NTz75pNMYFxeXdEctpk6dmu7W7BkpWbKkduzYoePHj1ttv/32W7pbaLdq1UopKSnWKZVXSk5Ott63U6dOpasl7S/0V3+NQka1pF0blGbMmDHpjphd/Rn38vJSqVKlrPUHBASoTp06+uKLLzIMdVfua9r7uWbNGqf+ax0ZzIzKlSsrICBAn3/+udM+z507V9u3b1eTJk1uar0uLi5q2bKlpk+frt9//z1d/5X7ldG8W716tVauXOm0TMuWLWWMyfBrGTJ7JCy7VKpUSSVLltRHH32ks2fPpuu/cv+u9zusatWqypkzp95//335+/urXLlyki4HtlWrVikuLi7d0bLGjRtrzZo1Tq/PuXPnNGbMGBUrVkwhISFZ2pe33npLffr0Uf/+/dN9NUhmZPT+GWOsrz25WlhYmMLCwvTVV19p+vTpat26tVxd//c3+qy8tgCyH0fMgPvU3Llzrb/iXql69eoqUaKEunTpopMnT+rxxx9X4cKFdeDAAX322WcKDw+3rpcIDw+Xi4uL3n//fSUkJMjd3d36nrHs1qBBA+sIXrdu3XT27Fl9+eWXCggISPeP5kqVKmn06NF6++23VapUKQUEBOjxxx9X37599eOPP6pp06bq0KGDKlWqpHPnzmnLli2aNm2a9u/fr3z58qlZs2aqUaOG+vXrp/379yskJEQzZszI1A1GrjR9+vQMX+Po6Gj169dPU6dOVe3atdWtWzeVKVNGhw8f1vjx43XkyJEMA+eteuaZZ/Tcc89p1KhRioqKSvdlxk2bNtWQIUPUsWNHVa9eXVu2bNHEiROtI0fX06lTJ33yySeKiopS586ddezYMX3++ecqV66c03czRUZGqlu3boqNjdWmTZvUoEED5cyZU7t379bUqVM1YsQIPfXUU5owYYJGjRqlf/3rXypZsqTOnDmjL7/8Uj4+PmrcuPF1a+nSpYteeOEFtWzZUvXr19dvv/2mefPmpTvqFxISojp16qhSpUry9/fXunXrNG3aNPXo0cMaM3LkSNWsWVOhoaHq2rWrSpQooaNHj2rlypX6888/re94e/XVV/Wf//xHDRs21CuvvGLdLr9o0aLavHnzDV+/jKSFgo4dOyoyMlJt2rSxbpdfrFgx9erV66bWK12+5fzixYtVtWpVde3aVSEhITp58qQ2bNigBQsW6OTJk5IufyZmzJihf/3rX2rSpIn27dunzz//XCEhIU7/MH/sscfUrl07ffrpp9q9e7caNmyo1NRULVu2TI899pjTa3q75ciRQ1999ZUaNWqkcuXKqWPHjipUqJD++usvLV68WD4+Pvrpp58kXf5dIV3+yoLWrVsrZ86catasmTw9PZU7d25VqlRJq1atsr7DTLp8xOzcuXM6d+5cumDWr18/fffdd2rUqJFefvll+fv7a8KECdq3b5+mT5+eqVNNr/bhhx8qISFBMTEx8vb2ztIXy5cpU0YlS5ZUnz599Ndff8nHx0fTp0+/7vV87du3V58+fSQp3bay8toCuA3u9G0gAdxe17tdvq64LfK0adNMgwYNTEBAgHFzczNFihQx3bp1M0eOHHFa35dffmlKlChhXFxcnG7TfK3b5V99C/mMbhNuzP9utXz8+HGr7ccffzRhYWHGw8PDFCtWzLz//vvm66+/Tne76/j4eNOkSRPj7e1tJDnVcebMGdO/f39TqlQp4+bmZvLly2eqV69uPvroI6fb+J84ccK0a9fO+Pj4GF9fX9OuXTuzcePGLN0u/1qPtFtp//nnn6ZLly6mUKFCxtXV1fj7+5umTZuaVatWXXf9Wb1dfprExESTK1cuI8l8++236fovXLhg/u///s8UKFDA5MqVy9SoUcOsXLky3XuZ0e3yjTHm22+/NSVKlDBubm4mPDzczJs3L93t8tOMGTPGVKpUyeTKlct4e3ub0NBQ8+qrr5rDhw8bYy7fprtNmzamSJEixt3d3QQEBJimTZuadevW3XA/U1JSzGuvvWby5ctncufObaKiosyePXvS3S7/7bffNlWqVDF+fn4mV65cpkyZMuadd95x+hwYY8zevXtN+/btTVBQkMmZM6cpVKiQadq0qZk2bZrTuM2bN5vIyEjj4eFhChUqZIYOHWrGjh1707fLT/P999+bihUrGnd3d+Pv72/atm1r/vzzT6cx0dHRxtPT84avzZWOHj1qYmJiTHBwsMmZM6cJCgoydevWNWPGjLHGpKammnfffdcULVrUuLu7m4oVK5rZs2dn+L4mJyebDz/80JQpU8a4ubmZ/Pnzm0aNGpn169dbY3SNr4m4+r3JyLV+h1zr87hx40bTokULkzdvXuPu7m6KFi1qWrVqZRYuXOg0bujQoaZQoUImR44c6d6rvn37Gknm/fffd1qmVKlSRpLZu3dvujr37t1rnnrqKePn52c8PDxMlSpVzOzZszO1L8Zk/FlISUkxbdq0Ma6urmbWrFk3fI2uvF3+tm3bTL169YyXl5fJly+f6dq1q/UVBRn9Ljty5IhxcXExpUuXvuZ2MvPaZvQ7HMCtcRjDtwECAAA8CP7++28VKFBAAwYMuObdSAHYg2vMAAAAHhDjx49XSkqK2rVrZ3cpAK7CNWYAAAD3uUWLFmnbtm1655139OSTT6pYsWJ2lwTgKpzKCAAAcJ+rU6eOVqxYoRo1aujbb79VoUKF7C4JwFUIZgAAAABgM64xAwAAAACbEcwAAAAAwGbc/ENSamqqDh8+LG9vb+sLJgEAAAA8eIwxOnPmjAoWLHhTXxx/swhmkg4fPqzg4GC7ywAAAABwlzh06JAKFy58x7ZHMJPk7e0t6fKL7+PjY3M1AAAAAOySmJio4OBgKyPcKQQzyTp90cfHh2AGAAAA4I5f4sTNPwAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbOZqdwFIzzHYYXcJtjIDjd0lAAAAAHcUR8wAAAAAwGZ3TTB777335HA41LNnT6vtwoULiomJUd68eeXl5aWWLVvq6NGjTssdPHhQTZo0Ue7cuRUQEKC+ffsqOTn5DlcPAAAAADfvrghma9eu1RdffKGwsDCn9l69eumnn37S1KlTFRcXp8OHD6tFixZWf0pKipo0aaKLFy9qxYoVmjBhgsaPH68BAwbc6V0AAAAAgJtmezA7e/as2rZtqy+//FJ58uSx2hMSEjR27Fh98sknevzxx1WpUiWNGzdOK1as0KpVqyRJv/zyi7Zt26Zvv/1W4eHhatSokYYOHaqRI0fq4sWL19xmUlKSEhMTnR4AAAAAYBfbg1lMTIyaNGmievXqObWvX79ely5dcmovU6aMihQpopUrV0qSVq5cqdDQUAUGBlpjoqKilJiYqK1bt15zm7GxsfL19bUewcHB2bxXAAAAAJB5tgazyZMna8OGDYqNjU3XFx8fLzc3N/n5+Tm1BwYGKj4+3hpzZShL60/ru5b+/fsrISHBehw6dOgW9wQAAAAAbp5tt8s/dOiQXnnlFc2fP18eHh53dNvu7u5yd3e/o9sEAAAAgGux7YjZ+vXrdezYMT3yyCNydXWVq6ur4uLi9Omnn8rV1VWBgYG6ePGiTp8+7bTc0aNHFRQUJEkKCgpKd5fGtOdpYwAAAADgbmdbMKtbt662bNmiTZs2WY/KlSurbdu21s85c+bUwoULrWV27typgwcPKiIiQpIUERGhLVu26NixY9aY+fPny8fHRyEhIXd8nwAAAADgZth2KqO3t7fKly/v1Obp6am8efNa7Z07d1bv3r3l7+8vHx8fvfTSS4qIiFC1atUkSQ0aNFBISIjatWunDz74QPHx8XrzzTcVExPDqYoAAAAA7hm2BbPMGDZsmHLkyKGWLVsqKSlJUVFRGjVqlNXv4uKi2bNnq3v37oqIiJCnp6eio6M1ZMgQG6sGAAAAgKxxGGOM3UXYLTExUb6+vkpISJCPj4/d5cgx2GF3CbYyAx/4jyQAAABsYlc2sP17zAAAAADgQUcwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm93V32MGANmBr6DgKyiQ/ZhXzCsA2YsjZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANrM1mI0ePVphYWHy8fGRj4+PIiIiNHfuXKu/Tp06cjgcTo8XXnjBaR0HDx5UkyZNlDt3bgUEBKhv375KTk6+07sCAAAAADfN1c6NFy5cWO+9954eeughGWM0YcIENW/eXBs3blS5cuUkSV27dtWQIUOsZXLnzm39nJKSoiZNmigoKEgrVqzQkSNH1L59e+XMmVPvvvvuHd8fAAAAALgZtgazZs2aOT1/5513NHr0aK1atcoKZrlz51ZQUFCGy//yyy/atm2bFixYoMDAQIWHh2vo0KF67bXXNGjQILm5uWW4XFJSkpKSkqzniYmJ2bRHAAAAAJB1d801ZikpKZo8ebLOnTuniIgIq33ixInKly+fypcvr/79++v8+fNW38qVKxUaGqrAwECrLSoqSomJidq6des1txUbGytfX1/rERwcfHt2CgAAAAAywdYjZpK0ZcsWRURE6MKFC/Ly8tLMmTMVEhIiSXr22WdVtGhRFSxYUJs3b9Zrr72mnTt3asaMGZKk+Ph4p1AmyXoeHx9/zW32799fvXv3tp4nJiYSzgAAAADYxvZg9vDDD2vTpk1KSEjQtGnTFB0drbi4OIWEhOj555+3xoWGhqpAgQKqW7eu9u7dq5IlS970Nt3d3eXu7p4d5QMAAADALbP9VEY3NzeVKlVKlSpVUmxsrCpUqKARI0ZkOLZq1aqSpD179kiSgoKCdPToUacxac+vdV0aAAAAANxtbA9mV0tNTXW6MceVNm3aJEkqUKCAJCkiIkJbtmzRsWPHrDHz58+Xj4+PdTokAAAAANztbD2VsX///mrUqJGKFCmiM2fOaNKkSVqyZInmzZunvXv3atKkSWrcuLHy5s2rzZs3q1evXqpdu7bCwsIkSQ0aNFBISIjatWunDz74QPHx8XrzzTcVExPDqYoAAAAA7hm2BrNjx46pffv2OnLkiHx9fRUWFqZ58+apfv36OnTokBYsWKDhw4fr3LlzCg4OVsuWLfXmm29ay7u4uGj27Nnq3r27IiIi5OnpqejoaKfvPQMAAACAu52twWzs2LHX7AsODlZcXNwN11G0aFHNmTMnO8sCAAAA7ijHYIfdJdjKDDR2l2C7u+4aMwAAAAB40BDMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGa2BrPRo0crLCxMPj4+8vHxUUREhObOnWv1X7hwQTExMcqbN6+8vLzUsmVLHT161GkdBw8eVJMmTZQ7d24FBASob9++Sk5OvtO7AgAAAAA3zdZgVrhwYb333ntav3691q1bp8cff1zNmzfX1q1bJUm9evXSTz/9pKlTpyouLk6HDx9WixYtrOVTUlLUpEkTXbx4UStWrNCECRM0fvx4DRgwwK5dAgAAAIAscxhjjN1FXMnf318ffvihnnrqKeXPn1+TJk3SU089JUnasWOHypYtq5UrV6patWqaO3eumjZtqsOHDyswMFCS9Pnnn+u1117T8ePH5ebmlqltJiYmytfXVwkJCfLx8blt+5ZZjsEOu0uwlRl4V30kcR9gTjGnkP2YV8wrZC/m1N0zp+zKBnfNNWYpKSmaPHmyzp07p4iICK1fv16XLl1SvXr1rDFlypRRkSJFtHLlSknSypUrFRoaaoUySYqKilJiYqJ11C0jSUlJSkxMdHoAAAAAgF1sD2ZbtmyRl5eX3N3d9cILL2jmzJkKCQlRfHy83Nzc5Ofn5zQ+MDBQ8fHxkqT4+HinUJbWn9Z3LbGxsfL19bUewcHB2btTAAAAAJAFtgezhx9+WJs2bdLq1avVvXt3RUdHa9u2bbd1m/3791dCQoL1OHTo0G3dHgAAAABcj6vdBbi5ualUqVKSpEqVKmnt2rUaMWKEnnnmGV28eFGnT592Omp29OhRBQUFSZKCgoK0Zs0ap/Wl3bUxbUxG3N3d5e7uns17AgAAAAA3x/YjZldLTU1VUlKSKlWqpJw5c2rhwoVW386dO3Xw4EFFRERIkiIiIrRlyxYdO3bMGjN//nz5+PgoJCTkjtcOAAAAADfD1iNm/fv3V6NGjVSkSBGdOXNGkyZN0pIlSzRv3jz5+vqqc+fO6t27t/z9/eXj46OXXnpJERERqlatmiSpQYMGCgkJUbt27fTBBx8oPj5eb775pmJiYjgiBgAAAOCeYWswO3bsmNq3b68jR47I19dXYWFhmjdvnurXry9JGjZsmHLkyKGWLVsqKSlJUVFRGjVqlLW8i4uLZs+ere7duysiIkKenp6Kjo7WkCFD7NolAAAAAMgyW4PZ2LFjr9vv4eGhkSNHauTIkdccU7RoUc2ZMye7SwMAAACAO+auu8YMAAAAAB40BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm7nezEK7d+/W4sWLdezYMaWmpjr1DRgwIFsKAwAAAIAHRZaD2Zdffqnu3bsrX758CgoKksPhsPocDgfBDAAAAACyKMvB7O2339Y777yj11577XbUAwAAAAAPnCxfY3bq1Ck9/fTTt6MWAAAAAHggZTmYPf300/rll19uRy0AAAAA8EDK8qmMpUqV0ltvvaVVq1YpNDRUOXPmdOp/+eWXs604AAAAAHgQZDmYjRkzRl5eXoqLi1NcXJxTn8PhIJgBAAAAQBZlOZjt27fvdtQBAAAAAA+sW/qCaWOMjDHZVQsAAAAAPJBuKph98803Cg0NVa5cuZQrVy6FhYXpP//5T3bXBgAAAAAPhCwHs08++UTdu3dX48aNNWXKFE2ZMkUNGzbUCy+8oGHDhmVpXbGxsXr00Ufl7e2tgIAAPfnkk9q5c6fTmDp16sjhcDg9XnjhBacxBw8eVJMmTZQ7d24FBASob9++Sk5OzuquAQAAAIAtsnyN2WeffabRo0erffv2VtsTTzyhcuXKadCgQerVq1em1xUXF6eYmBg9+uijSk5O1uuvv64GDRpo27Zt8vT0tMZ17dpVQ4YMsZ7nzp3b+jklJUVNmjRRUFCQVqxYoSNHjqh9+/bKmTOn3n333azuHgAAAADccVkOZkeOHFH16tXTtVevXl1HjhzJ0rp+/vlnp+fjx49XQECA1q9fr9q1a1vtuXPnVlBQUIbr+OWXX7Rt2zYtWLBAgYGBCg8P19ChQ/Xaa69p0KBBcnNzy1JNAAAAAHCnZflUxlKlSmnKlCnp2r///ns99NBDt1RMQkKCJMnf39+pfeLEicqXL5/Kly+v/v376/z581bfypUrFRoaqsDAQKstKipKiYmJ2rp1a4bbSUpKUmJiotMDAAAAAOyS5SNmgwcP1jPPPKOlS5eqRo0akqTly5dr4cKFGQa2zEpNTVXPnj1Vo0YNlS9f3mp/9tlnVbRoURUsWFCbN2/Wa6+9pp07d2rGjBmSpPj4eKdQJsl6Hh8fn+G2YmNjNXjw4JuuFQAAAACyU5aDWcuWLbV69WoNGzZMs2bNkiSVLVtWa9asUcWKFW+6kJiYGP3+++/69ddfndqff/556+fQ0FAVKFBAdevW1d69e1WyZMmb2lb//v3Vu3dv63liYqKCg4NvrnAAAAAAuEVZDmaSVKlSJX377bfZVkSPHj00e/ZsLV26VIULF77u2KpVq0qS9uzZo5IlSyooKEhr1qxxGnP06FFJuuZ1ae7u7nJ3d8+GygEAAADg1mXqGrMrr8G6+tqsW7lWyxijHj16aObMmVq0aJGKFy9+w2U2bdokSSpQoIAkKSIiQlu2bNGxY8esMfPnz5ePj49CQkKyVA8AAAAA2CFTR8zy5MmjI0eOKCAgQH5+fnI4HOnGGGPkcDiUkpKS6Y3HxMRo0qRJ+uGHH+Tt7W1dE+br66tcuXJp7969mjRpkho3bqy8efNq8+bN6tWrl2rXrq2wsDBJUoMGDRQSEqJ27drpgw8+UHx8vN58803FxMRwVAwAAADAPSFTwWzRokXWnRIXL16cbRsfPXq0pMtfIn2lcePGqUOHDnJzc9OCBQs0fPhwnTt3TsHBwWrZsqXefPNNa6yLi4tmz56t7t27KyIiQp6enoqOjnb63jMAAAAAuJtlKphFRkZaPxcvXlzBwcHpjpoZY3To0KEsbdwYc93+4OBgxcXF3XA9RYsW1Zw5c7K0bQAAAAC4W2T5e8yKFy+u48ePp2s/efJkpq4RAwAAAAA4y3IwS7uW7Gpnz56Vh4dHthQFAAAAAA+STN8uP+17vxwOh9566y3lzp3b6ktJSdHq1asVHh6e7QUCAAAAwP0u08Fs48aNki4fMduyZYvc3NysPjc3N1WoUEF9+vTJ/goBAAAA4D6X6WCWdjfGjh07asSIEfLx8bltRQEAAADAgyTTwSzNuHHjbkcdAAAAAPDAynIwk6R169ZpypQpOnjwoC5evOjUN2PGjGwpDAAAAAAeFFm+K+PkyZNVvXp1bd++XTNnztSlS5e0detWLVq0SL6+vrejRgAAAAC4r2U5mL377rsaNmyYfvrpJ7m5uWnEiBHasWOHWrVqpSJFityOGgEAAADgvpblYLZ37141adJE0uW7MZ47d04Oh0O9evXSmDFjsr1AAAAAALjfZTmY5cmTR2fOnJEkFSpUSL///rsk6fTp0zp//nz2VgcAAAAAD4As3/yjdu3amj9/vkJDQ/X000/rlVde0aJFizR//nzVrVv3dtQIAAAAAPe1LAezf//737pw4YIk6Y033lDOnDm1YsUKtWzZUm+++Wa2FwgAAAAA97ssBzN/f3/r5xw5cqhfv37ZWhAAAAAAPGiyfI3Zhg0btGXLFuv5Dz/8oCeffFKvv/56uu80AwAAAADcWJaDWbdu3bRr1y5J0h9//KFnnnlGuXPn1tSpU/Xqq69me4EAAAAAcL/LcjDbtWuXwsPDJUlTp05VZGSkJk2apPHjx2v69OnZXR8AAAAA3PeyHMyMMUpNTZUkLViwQI0bN5YkBQcH6++//87e6gAAAADgAZDlYFa5cmW9/fbb+s9//qO4uDjry6b37dunwMDAbC8QAAAAAO53WQ5mw4cP14YNG9SjRw+98cYbKlWqlCRp2rRpql69erYXCAAAAAD3uyzfLj8sLMzproxpPvzwQ7m4uGRLUQAAAADwIMlyMLsWDw+P7FoVAAAAADxQMhXM/P39tWvXLuXLl0958uSRw+G45tiTJ09mW3EAAAAA8CDIVDAbNmyYvL29JV2+xgwAAAAAkH0yFcyio6Mz/BkAAAAAcOsyFcwSExMzvUIfH5+bLgYAAAAAHkSZCmZ+fn7Xva5MuvzF0w6HQykpKdlSGAAAAAA8KDIVzBYvXny76wAAAACAB1amgllkZOTtrgMAAAAAHliZCmabN29W+fLllSNHDm3evPm6Y8PCwrKlMAAAAAB4UGQqmIWHhys+Pl4BAQEKDw+Xw+GQMSbdOK4xAwAAAICsy1Qw27dvn/Lnz2/9DAAAAADIPpkKZkWLFrV+PnDggKpXry5XV+dFk5OTtWLFCqexAAAAAIAby5HVBR577DGdPHkyXXtCQoIee+yxbCkKAAAAAB4kWQ5mad9XdrUTJ07I09MzS+uKjY3Vo48+Km9vbwUEBOjJJ5/Uzp07ncZcuHBBMTExyps3r7y8vNSyZUsdPXrUaczBgwfVpEkT5c6dWwEBAerbt6+Sk5OzumsAAAAAYItMncooSS1atJB0+QYfHTp0kLu7u9WXkpKizZs3q3r16lnaeFxcnGJiYvToo48qOTlZr7/+uho0aKBt27ZZIa9Xr17673//q6lTp8rX11c9evRQixYttHz5cmvbTZo0UVBQkFasWKEjR46offv2ypkzp959990s1QMAAAAAdsh0MPP19ZV0+YiZt7e3cuXKZfW5ubmpWrVq6tq1a5Y2/vPPPzs9Hz9+vAICArR+/XrVrl1bCQkJGjt2rCZNmqTHH39ckjRu3DiVLVtWq1atUrVq1fTLL79o27ZtWrBggQIDAxUeHq6hQ4fqtdde06BBg+Tm5palmgAAAADgTst0MBs3bpwkqVixYurTp0+WT1vMjISEBEmSv7+/JGn9+vW6dOmS6tWrZ40pU6aMihQpopUrV6patWpauXKlQkNDFRgYaI2JiopS9+7dtXXrVlWsWDHddpKSkpSUlGQ9T0xMzPZ9AQAAAIDMyvI1ZgMHDrwtoSw1NVU9e/ZUjRo1VL58eUlSfHy83Nzc5Ofn5zQ2MDBQ8fHx1pgrQ1laf1pfRmJjY+Xr62s9goODs3lvAAAAACDzMh3M8uTJI39//3SP4sWLKyoqSvPnz7+lQmJiYvT7779r8uTJt7SezOjfv78SEhKsx6FDh277NgEAAADgWjJ9KuPw4cMzbD99+rTWr1+vpk2batq0aWrWrFmWi+jRo4dmz56tpUuXqnDhwlZ7UFCQLl68qNOnTzsdNTt69KiCgoKsMWvWrHFaX9pdG9PGXM3d3d3p5iUAAAAAYKdMB7Po6Ojr9oeHhys2NjZLwcwYo5deekkzZ87UkiVLVLx4caf+SpUqKWfOnFq4cKFatmwpSdq5c6cOHjyoiIgISVJERITeeecdHTt2TAEBAZKk+fPny8fHRyEhIZmuBQAAAADskuVrzK6ladOm2rFjR5aWiYmJ0bfffqtJkybJ29tb8fHxio+P1z///CPp8p0gO3furN69e2vx4sVav369OnbsqIiICFWrVk2S1KBBA4WEhKhdu3b67bffNG/ePL355puKiYnhqBgAAACAe0Kmj5jdSFJSUpZvTT969GhJUp06dZzax40bpw4dOkiShg0bphw5cqhly5ZKSkpSVFSURo0aZY11cXHR7Nmz1b17d0VERMjT01PR0dEaMmTILe0PAAAAANwp2RbMxo4dq/Dw8CwtY4y54RgPDw+NHDlSI0eOvOaYokWLas6cOVnaNgAAAADcLTIdzHr37p1he0JCgjZs2KBdu3Zp6dKl2VYYAAAAADwoMh3MNm7cmGG7j4+P6tevrxkzZqS7eQcAAAAA4MYyHcwWL158O+sAAAAAgAdWtt2VEQAAAABwcwhmAAAAAGAzghkAAAAA2IxgBgAAAAA2y3Qw69Spk86cOXM7awEAAACAB1Kmg9mECRP0zz//3M5aAAAAAOCBlOlgZoy5nXUAAAAAwAMr099jJklnzpyRh4fHdcf4+PjcUkEAAAAA8KDJUjArXbr0NfuMMXI4HEpJSbnlogAAAADgQZKlYDZt2jT5+/vfrloAAAAA4IGUpWBWo0YNBQQE3K5aAAAAAOCBxPeYAQAAAIDNMh3MihYtKhcXl9tZCwAAAAA8kDJ9KuO+fftuZx0AAAAA8MDKdDDLkyePHA5HunZfX1+VLl1affr0Uf369bO1OAAAAAB4EGQ6mA0bNizDYHb69GmtX79eTZs21bRp09SsWbNsLRAAAAAA7neZDmYdOnS4bn94eLhiY2MJZgAAAACQRdl2V8amTZtqx44d2bU6AAAAAHhgZFswS0pKkpubW3atDgAAAAAeGNkWzMaOHavw8PDsWh0AAAAAPDAyfY1Z7969M2xPSEjQhg0btGvXLi1dujTbCgMAAACAB0Wmg9nGjRszbPfx8VH9+vU1Y8YMFS9ePNsKAwAAAIAHRaaD2eLFi6/b/+eff+r555/XmDFjbrkoAAAAAHiQZNs1ZidOnNDYsWOza3UAAAAA8MDItmAGAAAAALg5BDMAAAAAsBnBDAAAAABslumbf7Ro0eK6/adPn77VWgAAAADggZTpYObr63vD/vbt299yQQAAAADwoMl0MBs3btztrAMAAAAAHlhcYwYAAAAANrM1mC1dulTNmjVTwYIF5XA4NGvWLKf+Dh06yOFwOD0aNmzoNObkyZNq27atfHx85Ofnp86dO+vs2bN3cC8AAAAA4NbYGszOnTunChUqaOTIkdcc07BhQx05csR6fPfdd079bdu21datWzV//nzNnj1bS5cu1fPPP3+7SwcAAACAbJPpa8xuh0aNGqlRo0bXHePu7q6goKAM+7Zv366ff/5Za9euVeXKlSVJn332mRo3bqyPPvpIBQsWzPaaAQAAACC73fXXmC1ZskQBAQF6+OGH1b17d504ccLqW7lypfz8/KxQJkn16tVTjhw5tHr16muuMykpSYmJiU4PAAAAALDLXR3MGjZsqG+++UYLFy7U+++/r7i4ODVq1EgpKSmSpPj4eAUEBDgt4+rqKn9/f8XHx19zvbGxsfL19bUewcHBt3U/AAAAAOB6bD2V8UZat25t/RwaGqqwsDCVLFlSS5YsUd26dW96vf3791fv3r2t54mJiYQzAAAAALa5q4+YXa1EiRLKly+f9uzZI0kKCgrSsWPHnMYkJyfr5MmT17wuTbp83ZqPj4/TAwAAAADsck8Fsz///FMnTpxQgQIFJEkRERE6ffq01q9fb41ZtGiRUlNTVbVqVbvKBAAAAIAssfVUxrNnz1pHvyRp37592rRpk/z9/eXv76/BgwerZcuWCgoK0t69e/Xqq6+qVKlSioqKkiSVLVtWDRs2VNeuXfX555/r0qVL6tGjh1q3bs0dGQEAAADcM2w9YrZu3TpVrFhRFStWlCT17t1bFStW1IABA+Ti4qLNmzfriSeeUOnSpdW5c2dVqlRJy5Ytk7u7u7WOiRMnqkyZMqpbt64aN26smjVrasyYMXbtEgAAAABkma1HzOrUqSNjzDX7582bd8N1+Pv7a9KkSdlZFgAAAADcUffUNWYAAAAAcD8imAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDNbg9nSpUvVrFkzFSxYUA6HQ7NmzXLqN8ZowIABKlCggHLlyqV69epp9+7dTmNOnjyptm3bysfHR35+furcubPOnj17B/cCAAAAAG6NrcHs3LlzqlChgkaOHJlh/wcffKBPP/1Un3/+uVavXi1PT09FRUXpwoUL1pi2bdtq69atmj9/vmbPnq2lS5fq+eefv1O7AAAAAAC3zNXOjTdq1EiNGjXKsM8Yo+HDh+vNN99U8+bNJUnffPONAgMDNWvWLLVu3Vrbt2/Xzz//rLVr16py5cqSpM8++0yNGzfWRx99pIIFC2a47qSkJCUlJVnPExMTs3nPAAAAACDz7tprzPbt26f4+HjVq1fPavP19VXVqlW1cuVKSdLKlSvl5+dnhTJJqlevnnLkyKHVq1dfc92xsbHy9fW1HsHBwbdvRwAAAADgBu7aYBYfHy9JCgwMdGoPDAy0+uLj4xUQEODU7+rqKn9/f2tMRvr376+EhATrcejQoWyuHgAAAAAyz9ZTGe3i7u4ud3d3u8sAAAAAAEl38RGzoKAgSdLRo0ed2o8ePWr1BQUF6dixY079ycnJOnnypDUGAAAAAO52d20wK168uIKCgrRw4UKrLTExUatXr1ZERIQkKSIiQqdPn9b69eutMYsWLVJqaqqqVq16x2sGAAAAgJth66mMZ8+e1Z49e6zn+/bt06ZNm+Tv768iRYqoZ8+eevvtt/XQQw+pePHieuutt1SwYEE9+eSTkqSyZcuqYcOG6tq1qz7//HNdunRJPXr0UOvWra95R0YAAAAAuNvYGszWrVunxx57zHreu3dvSVJ0dLTGjx+vV199VefOndPzzz+v06dPq2bNmvr555/l4eFhLTNx4kT16NFDdevWVY4cOdSyZUt9+umnd3xfAAAAAOBm2RrM6tSpI2PMNfsdDoeGDBmiIUOGXHOMv7+/Jk2adDvKAwAAAIA74q69xgwAAAAAHhQEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALDZXR3MBg0aJIfD4fQoU6aM1X/hwgXFxMQob9688vLyUsuWLXX06FEbKwYAAACArLurg5kklStXTkeOHLEev/76q9XXq1cv/fTTT5o6dari4uJ0+PBhtWjRwsZqAQAAACDrXO0u4EZcXV0VFBSUrj0hIUFjx47VpEmT9Pjjj0uSxo0bp7Jly2rVqlWqVq3aNdeZlJSkpKQk63liYmL2Fw4AAAAAmXTXHzHbvXu3ChYsqBIlSqht27Y6ePCgJGn9+vW6dOmS6tWrZ40tU6aMihQpopUrV153nbGxsfL19bUewcHBt3UfAAAAAOB67upgVrVqVY0fP14///yzRo8erX379qlWrVo6c+aM4uPj5ebmJj8/P6dlAgMDFR8ff9319u/fXwkJCdbj0KFDt3EvAAAAAOD67upTGRs1amT9HBYWpqpVq6po0aKaMmWKcuXKddPrdXd3l7u7e3aUCAAAAAC37K4+YnY1Pz8/lS5dWnv27FFQUJAuXryo06dPO405evRohtekAQAAAMDd6p4KZmfPntXevXtVoEABVapUSTlz5tTChQut/p07d+rgwYOKiIiwsUoAAAAAyJq7+lTGPn36qFmzZipatKgOHz6sgQMHysXFRW3atJGvr686d+6s3r17y9/fXz4+PnrppZcUERFx3TsyAgAAAMDd5q4OZn/++afatGmjEydOKH/+/KpZs6ZWrVql/PnzS5KGDRumHDlyqGXLlkpKSlJUVJRGjRplc9UAAAAAkDV3dTCbPHnydfs9PDw0cuRIjRw58g5VBAAAAADZ7566xgwAAAAA7kcEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALDZfRPMRo4cqWLFisnDw0NVq1bVmjVr7C4JAAAAADLlvghm33//vXr37q2BAwdqw4YNqlChgqKionTs2DG7SwMAAACAG7ovgtknn3yirl27qmPHjgoJCdHnn3+u3Llz6+uvv7a7NAAAAAC4IVe7C7hVFy9e1Pr169W/f3+rLUeOHKpXr55WrlyZ4TJJSUlKSkqynickJEiSEhMTb2+xmXXB7gLsdde8D7h/MKfsLgH3I+aV3SXgfsOcsrsES1otxpg7ut17Ppj9/fffSklJUWBgoFN7YGCgduzYkeEysbGxGjx4cLr24ODg21Ijssb3PV+7SwDuK8wpIPsxr4DsdTfOqTNnzsjX987Vdc8Hs5vRv39/9e7d23qempqqkydPKm/evHI4HDZWZr/ExEQFBwfr0KFD8vHxsbsc4J7HnAKyH/MKyF7MKWfGGJ05c0YFCxa8o9u954NZvnz55OLioqNHjzq1Hz16VEFBQRku4+7uLnd3d6c2Pz+/21XiPcnHx4eJCWQj5hSQ/ZhXQPZiTv3PnTxSluaev/mHm5ubKlWqpIULF1ptqampWrhwoSIiImysDAAAAAAy554/YiZJvXv3VnR0tCpXrqwqVapo+PDhOnfunDp27Gh3aQAAAABwQ/dFMHvmmWd0/PhxDRgwQPHx8QoPD9fPP/+c7oYguDF3d3cNHDgw3ameAG4OcwrIfswrIHsxp+4ODnOn7wMJAAAAAHByz19jBgAAAAD3OoIZAAAAANiMYAYAAAAANiOY3aRixYpp+PDhdpdxz9m/f78cDoc2bdp027fFe3Tv4T27OcwrXAvv181hTuF6eM9uDvMqE8w9LDo62kgy3bp1S9f34osvGkkmOjo6U+vat2+fkWQ2btyYqfHHjh0z586dy9TYpk2bmqioqAz7li5daiSZ3377LVPrupbFixcbSebUqVO3tJ6rnT9/3uTJk8fkzZvXXLhwIUvLRkdHm+bNmzu1JScnmyNHjphLly5lW43jxo0zvr6+6dqz8h5ll3//+9+maNGixt3d3VSpUsWsXr36jm4/OzCv/od55Zuu/U7Pq7i4ONO0aVNToEABI8nMnDnzjm07uzCn/oc55Zuu/U7PqXfffddUrlzZeHl5mfz585vmzZubHTt23LHtZxfm1f8wr3zTtd/peTVq1CgTGhpqvL29jbe3t6lWrZqZM2dOltdzzx8xCw4O1uTJk/XPP/9YbRcuXNCkSZNUpEiRbN/exYsXJUn58+dX7ty5M7VM586dNX/+fP3555/p+saNG6fKlSsrLCwsW+u8WcYYJScnW8+nT5+ucuXKqUyZMpo1a9Ytr9/FxUVBQUFydb3939SQlfcoO3z//ffq3bu3Bg4cqA0bNqhChQqKiorSsWPH7lgN2YV5lb2YVzfv3LlzqlChgkaOHHnHtnk7MKeyF3Pq5sXFxSkmJkarVq3S/PnzdenSJTVo0EDnzp27YzVkF+ZV9mJe3bzChQvrvffe0/r167Vu3To9/vjjat68ubZu3Zq1FWVzYLyj0tJ4+fLlzbfffmu1T5w40YSFhZnmzZtbfy2ZO3euqVGjhvH19TX+/v6mSZMmZs+ePdYykpwekZGRTtt4++23TYECBUyxYsWMMcYULVrUDBs2zBhz+S8VOXPmNEuXLrXW9/7775v8+fOb+Ph4c+nSJRMYGGiGDh3qVP+ZM2eMl5eXGT16tDHGmGXLlpmaNWsaDw8PU7hwYfPSSy+Zs2fPWuMvXLhgXn31VVO4cGHj5uZmSpYsab766ivrLz1XPtL2+8KFC+all14y+fPnN+7u7qZGjRpmzZo11jrT/soyZ84c88gjj5icOXOaxYsXW/116tQxn3/+uRk9erSpX79+uvfg999/N02aNDHe3t7Gy8vL1KxZ0+zZs8cMHDgwXU2LFy92+qtUSkqKKVSokBk1apTTOjds2GAcDofZv3+/McaYjz/+2JQvX97kzp3bFC5c2HTv3t2cOXPGqf4rHwMHDkz3HhljzIEDB8wTTzxhPD09jbe3t3n66adNfHy81T9w4EBToUIF880335iiRYsaHx8f88wzz5jExMR0+52RKlWqmJiYGOt5SkqKKViwoImNjc3U8ncL5hXz6m6aV1fSPXzEjDnFnLob55Qxl48sSDJxcXE3tbxdmFfMq7t5XhljTJ48ecxXX32VpWXui2D2ySefmLp161rtdevWNcOGDXOalNOmTTPTp083u3fvNhs3bjTNmjUzoaGhJiUlxRhjzJo1a4wks2DBAnPkyBFz4sQJaxteXl6mXbt25vfffze///67MSb9G963b19TtGhRc/r0abNhwwbj5uZmfvjhB6f+kiVLmtTUVKvt66+/Nrly5TKnT582e/bsMZ6enmbYsGFm165dZvny5aZixYqmQ4cO1vhWrVqZ4OBgM2PGDLN3716zYMECM3nyZJOcnGymT59uJJmdO3eaI0eOmNOnTxtjjHn55ZdNwYIFzZw5c8zWrVtNdHS0yZMnj7V/aR/qsLAw88svv5g9e/ZYfXv27DHu7u7m5MmT5sSJE8bDw8OaKMYY8+effxp/f3/TokULs3btWrNz507z9ddfmx07dpgzZ86YVq1amYYNG5ojR46YI0eOmKSkpHSnC/Tp08fUrFnT6X39v//7P6e2YcOGmUWLFpl9+/aZhQsXmocffth0797dGGNMUlKSGT58uPHx8bG2kzZhr3yPUlJSTHh4uKlZs6ZZt26dWbVqlalUqZL1y9eYy5PSy8vLtGjRwmzZssUsXbrUBAUFmddff/2an8E0SUlJxsXFJd0/Gtu3b2+eeOKJGy5/N2FeMa/ulnl1tXs9mDGnmFN325wyxpjdu3cbSWbLli03tbxdmFfMq7t1XiUnJ5vvvvvOuLm5ma1bt2Zp2fsimB07dsy4u7ub/fv3m/379xsPDw9z/Phxp0l5tePHjzv9IrrW+cXR0dEmMDDQJCUlObVfPSmTkpJMeHi4adWqlQkJCTFdu3Z1Gr99+3brLwZpatWqZZ577jljjDGdO3c2zz//vNMyy5YtMzly5DD//POP2blzp5Fk5s+fn+H+ZHR+8dmzZ03OnDnNxIkTrbaLFy+aggULmg8++MBpuVmzZqVb5+uvv26efPJJ63nz5s2tv0QYY0z//v1N8eLFzcWLFzOsKaPzi69+nTdu3GgcDoc5cOCAMcZYf0FJ+wtSRqZOnWry5s1rPb/W+cVXvke//PKLcXFxMQcPHrT6t27daiRZfz0aOHCgyZ07t9NfR/r27WuqVq16zVrS/PXXX0aSWbFihVN73759TZUqVW64/N2EefU/zCvfdOPu5Ly62r0ezJhTzKm7bU6lpKSYJk2amBo1amR5Wbsxr/6HeeWbbpwd82rz5s3G09PTuLi4GF9fX/Pf//4308umueevMZMun0fapEkTjR8/XuPGjVOTJk2UL18+pzG7d+9WmzZtVKJECfn4+KhYsWKSpIMHD95w/aGhoXJzc7vuGDc3N02cOFHTp0/XhQsXNGzYMKf+MmXKqHr16vr6668lSXv27NGyZcvUuXNnSdJvv/2m8ePHy8vLy3pERUUpNTVV+/bt06ZNm+Ti4qLIyMjMvizau3evLl26pBo1alhtOXPmVJUqVbR9+3ansZUrV3Z6npKSogkTJui5556z2p577jmNHz9eqampkqRNmzapVq1aypkzZ6Zrulp4eLjKli2rSZMmSbp87vuxY8f09NNPW2MWLFigunXrqlChQvL29la7du104sQJnT9/PtPb2b59u4KDgxUcHGy1hYSEyM/Pz+m1KFasmLy9va3nBQoUuCevEcsOzKuMMa/+h3mVNcypjDGn/udOz6mYmBj9/vvvmjx5cpaXvVswrzLGvPqfOzWvHn74YW3atEmrV69W9+7dFR0drW3btmV6eek+ul1+p06dNH78eE2YMEGdOnVK19+sWTOdPHlSX375pVavXq3Vq1dL+t+FnNfj6emZqRpWrFghSTp58qROnjyZrr9z586aPn26zpw5o3HjxqlkyZLWJDt79qy6deumTZs2WY/ffvtNu3fvVsmSJZUrV65M1XCzrt7HefPm6a+//tIzzzwjV1dXubq6qnXr1jpw4IAWLlwoSdlWU9u2ba1JOWnSJDVs2FB58+aVdPnWqk2bNlVYWJimT5+u9evXWzcByMx7l1VX/4JxOBzWL6HryZcvn1xcXHT06FGn9qNHjyooKChba7yTmFe3hnl12c3Oq/sRc+rWMKcuy4451aNHD82ePVuLFy9W4cKFs7O8O455dWuYV5fd6rxyc3NTqVKlVKlSJcXGxqpChQoaMWJElmq4b4JZw4YNdfHiRV26dElRUVFOfSdOnNDOnTv15ptvqm7duipbtqxOnTrlNCbtryEpKSk3tf29e/eqV69e+vLLL1W1alVFR0enezNbtWqlHDlyaNKkSfrmm2/UqVMnORwOSdIjjzyibdu2qVSpUukebm5uCg0NVWpqquLi4jLcfkb1lyxZUm5ublq+fLnVdunSJa1du1YhISHX3Z+xY8eqdevWTr8kNm3apNatW2vs2LGSpLCwMC1btkyXLl26Zk2ZeT2fffZZ/f7771q/fr2mTZumtm3bWn3r169XamqqPv74Y1WrVk2lS5fW4cOHs7ydsmXL6tChQzp06JDVtm3bNp0+ffqGr0VmuLm5qVKlStYvLElKTU3VwoULFRERccvrtwvzinl1Pbd7Xt2PmFPMqeu5E3PKGKMePXpo5syZWrRokYoXL54t67UT84p5dT12/b8qNTVVSUlJWVsoyyc/3kWuPn81ISHBJCQkWM/Tzi9OSUkxefPmNc8995zZvXu3WbhwoXn00Uedrle4dOmSyZUrl3n77bdNfHy8deFkRufIGuN87mpycrKpVq2aadmypTHGmMOHD5u8efNa5/BeqXPnziZPnjzGxcXF/PXXX1b7b7/9ZnLlymViYmLMxo0bza5du8ysWbOc7vLXoUMHExwcbGbOnGn++OMPs3jxYvP9998bYy5fhOlwOMz48ePNsWPHrIsfX3nlFVOwYEEzd+5cpws/T548aYzJ+LzkY8eOmZw5c5q5c+emq3/OnDnG3d3dnDhxwvz9998mb9681oWfu3btMt988431fSjvvPOOKVKkiNmxY4c5fvy4uXjx4jXP465Ro4apUKGC8fb2NufPn7faN23aZCSZ4cOHm71795pvvvnGFCpUyKnm5cuXWxftHj9+3Preiivfo9TUVBMeHm5q1apl1q9fb1avXp3hhZ8VKlRwqmvYsGGmaNGi6V6HjEyePNm4u7ub8ePHm23btpnnn3/e+Pn5Od31517AvGJeGXP3zKszZ86YjRs3mo0bNxpJ5pNPPjEbN260rkm4FzCnmFPG3D1zqnv37sbX19csWbLEumHCkSNHnPbnXsC8Yl4Zc/fMq379+pm4uDizb98+s3nzZtOvXz/jcDjML7/8kqnl09xXwexqV174OX/+fFO2bFnj7u5uwsLCzJIlS9JdSP7ll1+a4OBgkyNHjnS3Sr3alW/44MGDTYECBczff/9t9U+fPt24ubmZTZs2OS23YsUKI8k0btw43TrXrFlj6tevb7y8vIynp6cJCwsz77zzjtX/zz//mF69epkCBQoYNzc3U6pUKfP1119b/UOGDDFBQUHG4XBY+/3PP/+Yl156yeTLl++6t0q9clJ+9NFHxs/PL8MLOpOSkoyfn58ZMWKEMebyL5MGDRqY3LlzG29vb1OrVi2zd+9eY8zlyZ22P8rgVqlXGjVqlJFk2rdvn26bn3zyiSlQoIDJlSuXiYqKMt988026ml944QWTN2/ebLlV6pWyMimNMeazzz4zRYoUMW5ubqZKlSpm1apVmV72bsG8Yl6luRvmVUa3Q5Yy/8WxdwPmFHMqzd0wpzKaT5LMuHHjMrX83YJ5xbxKczfMq06dOpmiRYsaNzc3kz9/flO3bt0shzJjjHEYY0zWjrEBAAAAALLTfXONGQAAAADcqwhmQCYcPHjQ6Ta2Vz8yc8tdAM6YV0D2Yk4B2e9OzitOZQQyITk5Wfv3779mf7FixeTq6nrnCgLuA8wrIHsxp4DsdyfnFcEMAAAAAGzGqYwAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAIAHWp06ddSzZ0+7ywAAPOAIZgCAm9KhQwc5HA699957Tu2zZs2Sw+HI0rqKFSum4cOHZ2N1t8/+/fvlcDi0adMmu0sBANxHCGYAgJvm4eGh999/X6dOnbK7lCy7ePGi3SVkq0uXLtldAgDgFhDMAAA3rV69egoKClJsbOx1x/3666+qVauWcuXKpeDgYL388ss6d+6cpMunEh44cEC9evWSw+GQw+GQMUb58+fXtGnTrHWEh4erQIECTut0d3fX+fPnJUkHDx5U8+bN5eXlJR8fH7Vq1UpHjx61xg8aNEjh4eH66quvVLx4cXl4eGRY63//+1/5+vpq4sSJN/Wa7N27V82bN1dgYKC8vLz06KOPasGCBVb/kCFDVL58+XTLhYeH66233rKef/XVVypbtqw8PDxUpkwZjRo1yupLO2r3/fffKzIyUh4eHpo4caIOHDigZs2aKU+ePPL09FS5cuU0Z86cm9oPAMCdRTADANw0FxcXvfvuu/rss8/0559/Zjhm7969atiwoVq2bKnNmzfr+++/16+//qoePXpIkmbMmKHChQtryJAhOnLkiI4cOSKHw6HatWtryZIlkqRTp05p+/bt+ueff7Rjxw5JUlxcnB599FHlzp1bqampat68uU6ePKm4uDjNnz9ff/zxh5555hmnWvbs2aPp06drxowZGZ6KOGnSJLVp00YTJ05U27Ztb+o1OXv2rBo3bqyFCxdq48aNatiwoZo1a6aDBw9Kkjp16qTt27dr7dq11jIbN27U5s2b1bFjR0nSxIkTNWDAAL3zzjvavn273n33Xb311luaMGGC07b69eunV155Rdu3b1dUVJRiYmKUlJSkpUuXasuWLXr//ffl5eV1U/sBALizXO0uAABwb/vXv/6l8PBwDRw4UGPHjk3XHxsbq7Zt21o32HjooYf06aefKjIyUqNHj5a/v79cXFzk7e2toKAga7k6deroiy++kCQtXbpUFStWVFBQkJYsWaIyZcpoyZIlioyMlCQtXLhQW7Zs0b59+xQcHCxJ+uabb1SuXDmtXbtWjz76qKTLpy9+8803yp8/f7o6R44cqTfeeEM//fSTtd6bUaFCBVWoUMF6PnToUM2cOVM//vijevToocKFCysqKkrjxo2z6ho3bpwiIyNVokQJSdLAgQP18ccfq0WLFpKk4sWLa9u2bfriiy8UHR1trbtnz57WGOnyUcOWLVsqNDRUkqz1AQDufhwxAwDcsvfff18TJkzQ9u3b0/X99ttvGj9+vLy8vKxHVFSUUlNTtW/fvmuuMzIyUtu2bdPx48cVFxenOnXqqE6dOlqyZIkuXbqkFStWqE6dOpKk7du3Kzg42AplkhQSEiI/Pz+nmooWLZphKJs2bZp69eql+fPn31Ioky4fMevTp4/Kli0rPz8/eXl5afv27dYRM0nq2rWrvvvuO124cEEXL17UpEmT1KlTJ0nSuXPntHfvXnXu3NnpNXv77be1d+9ep21VrlzZ6fnLL7+st99+WzVq1NDAgQO1efPmW9oXAMCdQzADANyy2rVrKyoqSv3790/Xd/bsWXXr1k2bNm2yHr/99pt2796tkiVLXnOdoaGh8vf3V1xcnFMwi4uL09q1a3Xp0iVVr149S3V6enpm2F6xYkXlz59fX3/9tYwxWVrn1fr06aOZM2fq3Xff1bJly7Rp0yaFhoY63WykWbNmcnd318yZM/XTTz/p0qVLeuqppyRdfr0k6csvv3R6zX7//XetWrXquvvTpUsX/fHHH2rXrp22bNmiypUr67PPPrul/QEA3BmcyggAyBbvvfeewsPD9fDDDzu1P/LII9q2bZtKlSp1zWXd3NyUkpLi1OZwOFSrVi398MMP2rp1q2rWrKncuXMrKSlJX3zxhSpXrmwFk7Jly+rQoUM6dOiQddRs27ZtOn36tEJCQm5Ye8mSJfXxxx+rTp06cnFx0b///e+s7r5l+fLl6tChg/71r39Juhy09u/f7zTG1dVV0dHRGjdunNzc3NS6dWvlypVLkhQYGKiCBQvqjz/+uKnr3IKDg/XCCy/ohRdeUP/+/fXll1/qpZdeuun9AQDcGQQzAEC2CA0NVdu2bfXpp586tb/22muqVq2aevTooS5dusjT01Pbtm3T/PnzrQBUrFgxLV26VK1bt5a7u7vy5csn6fJ1Zv/3f/+nypUrWzexqF27tiZOnKi+ffta26hXr561/eHDhys5OVkvvviiIiMj053udy2lS5fW4sWLVadOHbm6ut7we9V27tyZrq1cuXJ66KGHNGPGDDVr1kwOh0NvvfWWUlNT043t0qWLypYtK+lymLvS4MGD9fLLL8vX11cNGzZUUlKS1q1bp1OnTql3797XrKlnz55q1KiRSpcurVOnTmnx4sXWNgAAdzdOZQQAZJshQ4akCyFhYWGKi4vTrl27VKtWLVWsWFEDBgxQwYIFnZbbv3+/SpYs6XQNWGRkpFJSUqxryaTLYe3qNofDoR9++EF58uRR7dq1Va9ePZUoUULff/99lup/+OGHtWjRIn333Xf6v//7v+uObd26tSpWrOj0OHr0qD755BPlyZNH1atXV7NmzRQVFaVHHnkk3fIPPfSQqlevrjJlyqhq1apOfV26dNFXX32lcePGKTQ0VJGRkRo/fryKFy9+3ZpSUlIUExOjsmXLqmHDhipdurTTbfYBAHcvh7nVk+kBAECWGWP00EMP6cUXX7zuUTAAwIOBUxkBALjDjh8/rsmTJys+Pt767jIAwIONYAYAwB0WEBCgfPnyacyYMcqTJ4/d5QAA7gIEMwAA7jCuIgAAXI2bfwAAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANvt/0FgPyPFgHAkAAAAASUVORK5CYII=", "text/plain": [ - "
    " + "{'MatrixVectorActivation_0': {'BRAM_18K': 5,\n", + " 'BRAM_efficiency': 0.8333333333333334,\n", + " 'LUT': 319,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.006944444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0}}" ] }, + "execution_count": 8, "metadata": {}, - "output_type": "display_data" + "output_type": "execute_result" } ], "source": [ - "layers = list(res_dict.keys())\n", - "utilisation = list(res_dict.values())\n", - "lut_values = [] #Initializing a list to store LUT values.\n", - "for i in range(len(layers)):\n", - " x = list(utilisation[i].values()) #Extracting the resource utilisation for each layer as a list.\n", - " lut_values.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", - " \n", - "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", - "fig = plt.figure(figsize = (10, 5))\n", - "plt.bar(layers, lut_values, color ='green', width = 0.3)\n", - "plt.xlabel(\"Network Layers\")\n", - "plt.ylabel(\"LUT Utilisation\")\n", - "plt.title(\"Estimated LUT values used for each network layer\")\n", - "plt.show()" + "res_dict = model.analysis(res_estimation)\n", + "res_dict" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note, from the above result we observe that the bottleneck in the execution of the model on hardware would come from the execution of the first layer which takes estimated 38400 clock cycles to execute one set of its inputs.\n", - "No matter how quickly the layers execute the (throughput or latency?) will be defined by the first layer's execution latency.\n", + "Next to the absolute numbers of LUTs, BRAM, URAM and DSPs, the analysis pass also provides information about the efficiency of the memory usage. If the memory type is not utilized, the efficiency is by default 1. You can see that above for the `URAM_efficiency`. In all other cases the efficiency indicates the actual parameter storage needed divided by the allocated BRAM/URAM storage. So, this means in our example MVAU_0 uses 5 block ram and they are 83% utilized. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After we extract that information from the model, we plot the number of LUTs. In this notebook we concentrate on the influence on the LUT usage, but by manipulating the code below, you can also extract information about memory and dsp usage." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABXyklEQVR4nO3deXwN9/7H8fdJSJDVHiqCaBFbipaUoI0KQrncKlViufRqaC3VVm9bSxelC6Vof621l2prbbX2vSiK1BZrKUpQJPaI5Pv7wyNzHQlyCJPK6/l4nEed73xn5jPnnG+ad2bmexzGGCMAAAAAgG3c7C4AAAAAAHI6ghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGYDbVr9+fdWvX9/uMrLUgQMH5HA4NHHiRLtLyRJ79uxRw4YN5efnJ4fDodmzZ9td0n2lY8eOKlWqVKb6fvDBBypTpozc3d0VGhp6V+uyw/Lly+VwODR9+nS7S8nWOnbsKG9v79tad+DAgXI4HFlcEYDsgmAG3IcmTpwoh8Nxw8cvv/yS6W3t2LFDAwcO1IEDB+5ewbdhzJgx9014upuio6O1detWvfvuu/rqq69Uo0aNDPulBdIPP/zwhtsqVaqUmjZtmuGyX3/91Qq0advKzCO7fa7uloULF+qVV15R7dq1NWHCBL333nt2l5QjTJ06VSNGjLC7DADIlFx2FwDg7hk8eLBKly6drr1s2bKZ3saOHTs0aNAg1a9fP92ZgYULF95pibdtzJgxKlSokDp27GhbDdndxYsXtXbtWv3nP/9Rjx497tl+CxcurK+++sqp7aOPPtLhw4c1fPjwdH1zgqVLl8rNzU3jxo2Th4eH3eXkGFOnTtW2bdvUq1cvu0sBgFsimAH3scaNG9/wDElW4BfM7O3EiROSJH9//3u6Xy8vLz333HNObdOmTdPp06fTtecUx48fV968ebNszBhjdOnSJeXNmzdLtoe77/z58/Ly8rK7jLvqwoULypcvn91lAH9bXMoI5HDTpk1T9erV5ePjI19fX1WuXFmffPKJpKuXRD799NOSpMcff9y6/Gz58uWS0t9jlnaPybfffqtBgwbpgQcekI+Pj/75z38qMTFRSUlJ6tWrl4oUKSJvb2916tRJSUlJTvVMmDBBTzzxhIoUKSJPT0+FhIRo7NixTn1KlSql7du3a8WKFVZN19aRkJCgXr16KTAwUJ6enipbtqyGDh2q1NRUp+0kJCSoY8eO8vPzk7+/v6Kjo5WQkJCp1y3tctHVq1erT58+Kly4sLy8vPSPf/zDCkTXGjNmjCpWrChPT08VL15cMTExmd5XRjZv3qzGjRvL19dX3t7eioiIcLpEdeDAgQoKCpIk9evXTw6HI9P3Qt0rN/vs3UjaZyztM5gmo3sD4+Pj1alTJ5UoUUKenp4qVqyYmjdvnu7yyXnz5ik8PFxeXl7y8fFRVFSUtm/fnm7fs2fPVqVKlZQnTx5VqlRJs2bNytRxOhwOTZgwQefPn7c+r2l1XrlyRW+//baCg4Pl6empUqVK6fXXX083LtIuI12wYIFq1KihvHnz6vPPP7/pftetW6dGjRrJz89P+fLlU7169bR69WqnPn/88YdeeOEFlStXTnnz5lXBggX19NNPZ3iJaUJCgnr37q1SpUrJ09NTJUqUUIcOHfTXX3859UtNTdW7776rEiVKKE+ePIqIiNDevXtv+Tql3T+1d+9edezYUf7+/vLz81OnTp104cKFdP3/+9//qnr16sqbN68KFCigNm3a6NChQ9by+vXr68cff9Qff/xhve6lSpWSMUaFChVSnz59nGr29/eXu7u707gcOnSocuXKpXPnzlltS5cutT4v/v7+at68ueLi4jI8lh07dujZZ59V/vz5VadOnRsee2xsrAoXLqz69es77SszMvMzMzo6WoUKFVJycnK69Rs2bKhy5co5td3qtZWuvr6VKlXSxo0bVbduXeXLl0+vv/66S7UDcMYZM+A+lpiYmO6XJofDoYIFC0qSFi1apLZt2yoiIkJDhw6VJMXFxWn16tV66aWXVLduXb344osaOXKkXn/9dVWoUEGSrP/eyJAhQ5Q3b1699tpr2rt3r0aNGqXcuXPLzc1Np0+f1sCBA/XLL79o4sSJKl26tN566y1r3bFjx6pixYp66qmnlCtXLv3www964YUXlJqaqpiYGEnSiBEj1LNnT3l7e+s///mPJKlo0aKSrv7Ftl69evrzzz/1/PPPq2TJklqzZo369++vo0ePWvebGGPUvHlz/fzzz/r3v/+tChUqaNasWYqOjnbpNe7Zs6fy58+vAQMG6MCBAxoxYoR69Oihb775xuozcOBADRo0SA0aNFD37t21a9cujR07Vhs2bNDq1auVO3dul/a5fft2hYeHy9fXV6+88opy586tzz//XPXr19eKFStUs2ZNtWzZUv7+/urdu7fatm2rJk2a3PaEA3fDrT57WaFVq1bavn27evbsqVKlSun48eNatGiRDh48aIXUr776StHR0YqMjNTQoUN14cIFjR07VnXq1NHmzZutfgsXLlSrVq0UEhKiIUOG6OTJk1bou5WvvvpK//d//6f169fryy+/lCQ99thjkqR//etfmjRpkv75z3+qb9++WrdunYYMGaK4uLh0wW/Xrl1q27atnn/+eXXt2jXdL9PXWrp0qRo3bqzq1atrwIABcnNzs36BX7VqlR599FFJ0oYNG7RmzRq1adNGJUqU0IEDBzR27FjVr19fO3bssM5+nDt3TuHh4YqLi1Pnzp1VrVo1/fXXX/r+++91+PBhFSpUyNr3+++/Lzc3N7388stKTEzUsGHD1K5dO61bty5T71vr1q1VunRpDRkyRJs2bdKXX36pIkWKWJ8TSXr33Xf15ptvqnXr1vrXv/6lEydOaNSoUapbt642b94sf39//ec//1FiYqLTJbTe3t5yOByqXbu2Vq5caW1vy5YtSkxMlJubm1avXq2oqChJ0qpVq/Twww9bY2fx4sVq3LixypQpo4EDB+rixYsaNWqUateurU2bNqX748fTTz+tBx98UO+9956MMRke74YNGxQZGakaNWpozpw5Lp8FzczPzPbt22vy5MlasGCB032i8fHxWrp0qQYMGODSa5vm5MmTaty4sdq0aaPnnnvO+jkM4DYZAPedCRMmGEkZPjw9Pa1+L730kvH19TVXrly54ba+++47I8ksW7Ys3bJ69eqZevXqWc+XLVtmJJlKlSqZy5cvW+1t27Y1DofDNG7c2Gn9sLAwExQU5NR24cKFdPuJjIw0ZcqUcWqrWLGi077TvP3228bLy8vs3r3bqf21114z7u7u5uDBg8YYY2bPnm0kmWHDhll9rly5YsLDw40kM2HChHTbvlbaa9ygQQOTmppqtffu3du4u7ubhIQEY4wxx48fNx4eHqZhw4YmJSXF6vfpp58aSWb8+PE33U9GWrRoYTw8PMy+ffustiNHjhgfHx9Tt25dq23//v1Gkvnggw9uuc3M9A0KCjJRUVEZLtuwYcNNX7eoqCin9zozn72MpH3Grv88ptWftv/Tp0/f8njOnj1r/P39TdeuXZ3a4+PjjZ+fn1N7aGioKVasmPW+GmPMwoULjaR0n+GMREdHGy8vL6e22NhYI8n861//cmp/+eWXjSSzdOlSqy0oKMhIMvPnz7/lvlJTU82DDz5oIiMjnT6bFy5cMKVLlzZPPvmkU9v11q5daySZyZMnW21vvfWWkWRmzpyZ4f6M+d97U6FCBZOUlGQt/+STT4wks3Xr1pvWPWDAACPJdO7c2an9H//4hylYsKD1/MCBA8bd3d28++67Tv22bt1qcuXK5dR+/ecuzQcffGDc3d3NmTNnjDHGjBw50gQFBZlHH33UvPrqq8YYY1JSUoy/v7/p3bu3tV5oaKgpUqSIOXnypNX222+/GTc3N9OhQ4d0x9K2bdt0+772s/Dzzz8bX19fExUVZS5dunTT1+fa7V4rMz8zU1JSTIkSJcwzzzzj1O/jjz82DofD/P7778YY117bevXqGUnms88+u2XdADKHSxmB+9jo0aO1aNEip8e8efOs5f7+/jp//rwWLVqUpfvt0KGD01mgmjVryhijzp07O/WrWbOmDh06pCtXrlht1/61OO2MX7169fT7778rMTHxlvv+7rvvFB4ervz58+uvv/6yHg0aNFBKSor1V/KffvpJuXLlUvfu3a113d3d1bNnT5eOtVu3bk7TV4eHhyslJUV//PGHpKt/Yb98+bJ69eolN7f//cjt2rWrfH199eOPP7q0v5SUFC1cuFAtWrRQmTJlrPZixYrp2Wef1c8//6wzZ864tE073K3PXpq0+7mWL1+u06dPZ9hn0aJFSkhIUNu2bZ0+K+7u7qpZs6aWLVsmSTp69KhiY2MVHR0tPz8/a/0nn3xSISEht13jTz/9JElOl9RJUt++fSUp3WejdOnSioyMvOV2Y2NjtWfPHj377LM6efKkdVznz59XRESEVq5caV3We+14S05O1smTJ1W2bFn5+/tr06ZN1rIZM2aoatWq+sc//pFuf9dP396pUyene+nCw8MlSb///vsta5ekf//7307Pw8PDdfLkSetzPXPmTKWmpqp169ZO71tAQIAefPBB6327mbRxumbNGklXz4yFh4crPDxcq1atkiRt27ZNCQkJVv1pn4OOHTuqQIEC1raqVKmiJ5980no/b3Ys11q2bJkiIyMVERGhmTNnytPT85Z1ZyQzPzPd3NzUrl07ff/99zp79qzVf8qUKXrsscesSaJcfW09PT3VqVOn26obQHpcygjcxx599NGbTv7xwgsv6Ntvv1Xjxo31wAMPqGHDhmrdurUaNWp0R/stWbKk0/O0X2YDAwPTtaempioxMdG6vHL16tUaMGCA1q5dm+6+ksTERKdfjDOyZ88ebdmy5Yaz/R0/flzS1XtrihUrlu7yvptdHpaR6481f/78kmSFgbSAdv12PTw8VKZMGWt5Zp04cUIXLlzIsM4KFSooNTVVhw4dUsWKFV3ablbJ7Hcs3a3PXhpPT08NHTpUffv2VdGiRVWrVi01bdpUHTp0UEBAgKSrnxVJeuKJJzLchq+vr6T/vYcPPvhguj7lypVzCjCu+OOPP+Tm5pZultSAgAD5+/un+2xkNMNqRtKO62aX5SYmJip//vy6ePGihgwZogkTJujPP/90utzu2j+E7Nu3T61atcrU/m81Ju5kfV9fX+3Zs0fGmAzfD0mZujS4WrVqypcvn1atWqXIyEitWrVKgwYNUkBAgEaNGqVLly5ZAS3t3rAbjWXp6thbsGBBugk+bvSeXbp0SVFRUapevbq+/fZb5cp1+7+OZfZnZocOHTR06FDNmjVLHTp00K5du7Rx40Z99tlnVn9XX9sHHniASaCALEQwA3KwIkWKKDY2VgsWLNC8efM0b948TZgwQR06dNCkSZNue7vu7u4utaf9Mrhv3z5FRESofPny+vjjjxUYGCgPDw/99NNPGj58eLrJOzKSmpqqJ598Uq+88kqGyx966KFMHkXm3OqY7id58uTRxYsXM1yW9gthnjx5MrWt2/3s3Sj4paSkpGvr1auXmjVrptmzZ2vBggV68803NWTIEC1dulQPP/yw9Xn66quvrLB2rTv5ZdkVmQ2zmb33KO24Pvjggxt+kXXaHyR69uypCRMmqFevXgoLC7O+iLxNmzaZGm8ZudMxcav1U1NT5XA4NG/evAz7ZuZeyty5c6tmzZpauXKl9u7dq/j4eIWHh6to0aJKTk7WunXrtGrVKpUvX/6OvtLhRu+Zp6enmjRpojlz5mj+/Pk3/H7AW3HlZ2ZISIiqV6+u//73v+rQoYP++9//ysPDQ61bt7b6uPraMisokLUIZkAO5+HhoWbNmqlZs2ZKTU3VCy+8oM8//1xvvvmmypYtm+lfGrPCDz/8oKSkJH3//fdOfzXP6NKkG9UVHBysc+fOqUGDBjfdV1BQkJYsWaJz5845/bKxa9eu26z+xvtJ2+61lx5evnxZ+/fvv2Wd1ytcuLDy5cuXYZ07d+6Um5tbujOTWSUoKEg7duzIcFlaPWnHmxm3+uxlJO3syfUzWt7ozGNwcLD69u2rvn37as+ePQoNDdVHH32k//73vwoODpZ0NSTe7H1IO6a0M1HXupPPS1BQkFJTU7Vnzx6nCXWOHTumhIQEl17La6Udl6+v7y0/X9OnT1d0dLQ++ugjq+3SpUvpXt/g4GBt27btturJasHBwTLGqHTp0rf8Q8vNfn6Fh4dr6NChWrx4sQoVKqTy5cvL4XCoYsWKWrVqlVatWuUUmK4dy9fbuXOnChUqlOnp8B0Oh6ZMmaLmzZvr6aef1rx585xmls0sV35mSlfPmvXp00dHjx7V1KlTFRUVZY0pybXXFkDW4x4zIAc7efKk03M3NzdVqVJFkqzputN+0biTqd0zK+0vtNdfTjVhwoR0fb28vDKsqXXr1lq7dq0WLFiQbllCQoJ1P1uTJk105coVp2mlU1JSNGrUqDs9DCcNGjSQh4eHRo4c6XRc48aNU2JiojX7myQdPHhQO3fuvOn23N3d1bBhQ82ZM8dpSvNjx45p6tSpqlOnjnUJXlZr0qSJDh8+rNmzZzu1JyUlWTPnVatWLVPbysxnLyNBQUFyd3d3mlFPuvp1BNe6cOGCLl265NQWHBwsHx8fa/uRkZHy9fXVe++9l+E04mlfe1CsWDGFhoZq0qRJTpf3LVq06IZBNTOaNGkiSdZMoWk+/vhjSXL6bLiievXqCg4O1ocffpjh1OvXfp2Du7t7ujNZo0aNSncGslWrVvrtt98y/IqAe312uGXLlnJ3d9egQYPS7dsY4/TZ8vLyuuG9qeHh4UpKStKIESNUp04dK8SFh4frq6++0pEjR6z7yyTnz8G1P3u2bdumhQsXWu9nZnl4eGjmzJl65JFH1KxZM61fv96l9SXXfmZKUtu2beVwOPTSSy/p999/T/e9gq68tgCyHmfMgPvYvHnzMvxF/7HHHlOZMmX0r3/9S6dOndITTzyhEiVK6I8//tCoUaMUGhpq/QU/NDRU7u7uGjp0qBITE+Xp6Wl9Z05Wa9iwoXUW5fnnn9e5c+f0xRdfqEiRIjp69KhT3+rVq2vs2LF65513VLZsWRUpUkRPPPGE+vXrp++//15NmzZVx44dVb16dZ0/f15bt27V9OnTdeDAARUqVEjNmjVT7dq19dprr+nAgQMKCQnRzJkzMzXBiCsKFy6s/v37a9CgQWrUqJGeeuop7dq1S2PGjNEjjzzi9ItRhw4dtGLFilv+ovvOO+9o0aJFqlOnjl544QXlypVLn3/+uZKSkjRs2LA7qnfJkiXpAo0ktWjRQt26ddP48eP19NNPq3Pnznr44Yd18uRJffPNN9q2bZsmT56c6ftNMvPZy4ifn5+efvppjRo1Sg6HQ8HBwZo7d65172Ca3bt3KyIiQq1bt1ZISIhy5cqlWbNm6dixY2rTpo2kq2eUxo4dq/bt26tatWpq06aNChcurIMHD+rHH39U7dq19emnn0q6+hUQUVFRqlOnjjp37qxTp05p1KhRqlixosvfO5WmatWqio6O1v/93/8pISFB9erV0/r16zVp0iS1aNFCjz/++G1t183NTV9++aUaN26sihUrqlOnTnrggQf0559/atmyZfL19dUPP/wgSWratKm++uor+fn5KSQkRGvXrtXixYutez7T9OvXT9OnT7fe++rVq+vUqVP6/vvv9dlnn6lq1aq3VevtCA4O1jvvvKP+/fvrwIEDatGihXx8fLR//37NmjVL3bp108svvyzp6s+Jb775Rn369NEjjzwib29vNWvWTJIUFhamXLlyadeuXerWrZu1/bp161p/sLk2mElXLw9t3LixwsLC1KVLF2u6fD8/Pw0cONDlY8mbN6/mzp2rJ554Qo0bN9aKFStUqVKlTK/vys9M6erPo0aNGum7776Tv79/uvDvymsL4C64t5NAArgXbjZdvq6ZUnz69OmmYcOGpkiRIsbDw8OULFnSPP/88+bo0aNO2/viiy9MmTJljLu7u9NU5TeaLv+7777LsJ4NGzY4tadN/XzixAmr7fvvvzdVqlQxefLkMaVKlTJDhw4148ePN5LM/v37rX7x8fEmKirK+Pj4GElOdZw9e9b079/flC1b1nh4eJhChQqZxx57zHz44YdO0/ifPHnStG/f3vj6+ho/Pz/Tvn17s3nzZpemy7/+mG40nfunn35qypcvb3Lnzm2KFi1qunfvbk6fPu3UJ2366czYtGmTiYyMNN7e3iZfvnzm8ccfN2vWrHHqczvT5d/o8dVXXxljrk5D37t3b1O6dGmTO3du4+vrax5//HEzb968m27/+mnLM/vZy8iJEydMq1atTL58+Uz+/PnN888/b7Zt2+b0vv31118mJibGlC9f3nh5eRk/Pz9Ts2ZN8+2336bb3rJly0xkZKTx8/MzefLkMcHBwaZjx47m119/deo3Y8YMU6FCBePp6WlCQkLMzJkzTXR09G1Pl2+MMcnJyWbQoEHW6xkYGGj69++fbur0m31VwY1s3rzZtGzZ0hQsWNB4enqaoKAg07p1a7NkyRKrz+nTp02nTp1MoUKFjLe3t4mMjDQ7d+40QUFBJjo62ml7J0+eND169DAPPPCA8fDwMCVKlDDR0dHmr7/+MsbcePxf/1UGN5LRzwNj/jfWrh3/xlx9P+rUqWO8vLyMl5eXKV++vImJiTG7du2y+pw7d848++yzxt/fP8OvNnjkkUeMJLNu3Tqr7fDhw0aSCQwMzLDOxYsXm9q1a5u8efMaX19f06xZM7Njx45MHYsxGX8W/vrrLxMSEmICAgLMnj17bvkaXSuzPzPTfPvtt0aS6dat2w33k5nXtl69eqZixYo33AYA1zmMuQ/vUAcAAEA6c+bMUYsWLbRy5cp0ZwQB2ItgBgAAkEM0bdpUcXFx2rt37z2d3AnArXGPGQAAwH1u2rRp2rJli3788Ud98sknhDIgG+KMGQAAwH3O4XDI29tbzzzzjD777LN79j19ADKPUQkAAHCf4+/wQPbH95gBAAAAgM0IZgAAAABgMy5llJSamqojR47Ix8eHm2EBAACAHMwYo7Nnz6p48eJyc7t357EIZpKOHDmiwMBAu8sAAAAAkE0cOnRIJUqUuGf7I5hJ8vHxkXT1xff19bW5GgAAAAB2OXPmjAIDA62McK8QzCTr8kVfX1+CGQAAAIB7fosTk38AAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANgsl90FAMDd5hjksLsEW5kBxu4ScB9iXDGukLUYU4wpglk2xMBkYAIAACBn4VJGAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwma3BbOzYsapSpYp8fX3l6+ursLAwzZs3z1p+6dIlxcTEqGDBgvL29larVq107Ngxp20cPHhQUVFRypcvn4oUKaJ+/frpypUr9/pQAAAAAOC22RrMSpQooffff18bN27Ur7/+qieeeELNmzfX9u3bJUm9e/fWDz/8oO+++04rVqzQkSNH1LJlS2v9lJQURUVF6fLly1qzZo0mTZqkiRMn6q233rLrkAAAAADAZQ5jjLG7iGsVKFBAH3zwgf75z3+qcOHCmjp1qv75z39Kknbu3KkKFSpo7dq1qlWrlubNm6emTZvqyJEjKlq0qCTps88+06uvvqoTJ07Iw8MjU/s8c+aM/Pz8lJiYKF9f37t2bJnlGOSwuwRbmQHZ6iOJ+wBjijGFrMe4YlwhazGmss+YsisbZJt7zFJSUjRt2jSdP39eYWFh2rhxo5KTk9WgQQOrT/ny5VWyZEmtXbtWkrR27VpVrlzZCmWSFBkZqTNnzlhn3TKSlJSkM2fOOD0AAAAAwC62B7OtW7fK29tbnp6e+ve//61Zs2YpJCRE8fHx8vDwkL+/v1P/okWLKj4+XpIUHx/vFMrSlqctu5EhQ4bIz8/PegQGBmbtQQEAAACAC2wPZuXKlVNsbKzWrVun7t27Kzo6Wjt27Lir++zfv78SExOtx6FDh+7q/gAAAADgZnLZXYCHh4fKli0rSapevbo2bNigTz75RM8884wuX76shIQEp7Nmx44dU0BAgCQpICBA69evd9pe2qyNaX0y4unpKU9Pzyw+EgAAAAC4PbafMbteamqqkpKSVL16deXOnVtLliyxlu3atUsHDx5UWFiYJCksLExbt27V8ePHrT6LFi2Sr6+vQkJC7nntAAAAAHA7bD1j1r9/fzVu3FglS5bU2bNnNXXqVC1fvlwLFiyQn5+funTpoj59+qhAgQLy9fVVz549FRYWplq1akmSGjZsqJCQELVv317Dhg1TfHy83njjDcXExHBGDAAAAMDfhq3B7Pjx4+rQoYOOHj0qPz8/ValSRQsWLNCTTz4pSRo+fLjc3NzUqlUrJSUlKTIyUmPGjLHWd3d319y5c9W9e3eFhYXJy8tL0dHRGjx4sF2HBAAAAAAuszWYjRs37qbL8+TJo9GjR2v06NE37BMUFKSffvopq0sDAAAAgHsm291jBgAAAAA5DcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbGZrMBsyZIgeeeQR+fj4qEiRImrRooV27drl1Kd+/fpyOBxOj3//+99OfQ4ePKioqCjly5dPRYoUUb9+/XTlypV7eSgAAAAAcNty2bnzFStWKCYmRo888oiuXLmi119/XQ0bNtSOHTvk5eVl9evatasGDx5sPc+XL5/175SUFEVFRSkgIEBr1qzR0aNH1aFDB+XOnVvvvffePT0eAAAAALgdtgaz+fPnOz2fOHGiihQpoo0bN6pu3bpWe758+RQQEJDhNhYuXKgdO3Zo8eLFKlq0qEJDQ/X222/r1Vdf1cCBA+Xh4XFXjwEAAAAA7lS2uscsMTFRklSgQAGn9ilTpqhQoUKqVKmS+vfvrwsXLljL1q5dq8qVK6to0aJWW2RkpM6cOaPt27dnuJ+kpCSdOXPG6QEAAAAAdrH1jNm1UlNT1atXL9WuXVuVKlWy2p999lkFBQWpePHi2rJli1599VXt2rVLM2fOlCTFx8c7hTJJ1vP4+PgM9zVkyBANGjToLh0JAAAAALgm2wSzmJgYbdu2TT///LNTe7du3ax/V65cWcWKFVNERIT27dun4ODg29pX//791adPH+v5mTNnFBgYeHuFAwAAAMAdyhaXMvbo0UNz587VsmXLVKJEiZv2rVmzpiRp7969kqSAgAAdO3bMqU/a8xvdl+bp6SlfX1+nBwAAAADYxdZgZoxRjx49NGvWLC1dulSlS5e+5TqxsbGSpGLFikmSwsLCtHXrVh0/ftzqs2jRIvn6+iokJOSu1A0AAAAAWcnWSxljYmI0depUzZkzRz4+PtY9YX5+fsqbN6/27dunqVOnqkmTJipYsKC2bNmi3r17q27duqpSpYokqWHDhgoJCVH79u01bNgwxcfH64033lBMTIw8PT3tPDwAAAAAyBRbz5iNHTtWiYmJql+/vooVK2Y9vvnmG0mSh4eHFi9erIYNG6p8+fLq27evWrVqpR9++MHahru7u+bOnSt3d3eFhYXpueeeU4cOHZy+9wwAAAAAsjNbz5gZY266PDAwUCtWrLjldoKCgvTTTz9lVVkAAAAAcE9li8k/AAAAACAnI5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzl4PZ/Pnz9fPPP1vPR48erdDQUD377LM6ffp0lhYHAAAAADmBy8GsX79+OnPmjCRp69at6tu3r5o0aaL9+/erT58+WV4gAAAAANzvcrm6wv79+xUSEiJJmjFjhpo2bar33ntPmzZtUpMmTbK8QAAAAAC437l8xszDw0MXLlyQJC1evFgNGzaUJBUoUMA6kwYAAAAAyLxMB7POnTvr7Nmzql27tvr06aO3335b69evV1RUlCRp9+7dKlGixF0rFAAAAADuV5kOZpMmTdLFixc1evRo5cqVS9OnT9fYsWP1wAMPSJLmzZunRo0a3bVCAQAAAOB+lel7zIwxkqSSJUtq7ty56ZYPHz4866oCAAAAgBzEpck/zp49qzx58ty0j6+v7x0VBAAAAAA5jUvB7KGHHrrhMmOMHA6HUlJS7rgoAAAAAMhJXApm06dPV4ECBe5WLQAAAACQI7kUzGrXrq0iRYrcrVoAAAAAIEdy+XvMAAAAAABZK9PBLCgoSO7u7lm68yFDhuiRRx6Rj4+PihQpohYtWmjXrl1OfS5duqSYmBgVLFhQ3t7eatWqlY4dO+bU5+DBg4qKilK+fPlUpEgR9evXT1euXMnSWgEAAADgbsl0MNu/f78KFiyYpTtfsWKFYmJi9Msvv2jRokVKTk5Ww4YNdf78eatP79699cMPP+i7777TihUrdOTIEbVs2dJanpKSoqioKF2+fFlr1qzRpEmTNHHiRL311ltZWisAAAAA3C2Zvscsf/78cjgc6dr9/Pz00EMP6eWXX9aTTz7p0s7nz5/v9HzixIkqUqSINm7cqLp16yoxMVHjxo3T1KlT9cQTT0iSJkyYoAoVKuiXX35RrVq1tHDhQu3YsUOLFy9W0aJFFRoaqrfffluvvvqqBg4cKA8Pj3T7TUpKUlJSkvX8zJkzLtUNAAAAAFkp08Fs+PDhGQazhIQEbdy4UU2bNtX06dPVrFmz2y4mMTFRkqyZHzdu3Kjk5GQ1aNDA6lO+fHmVLFlSa9euVa1atbR27VpVrlxZRYsWtfpERkaqe/fu2r59ux5++OF0+xkyZIgGDRp023UCAAAAQFbKdDDr2LHjTZeHhoZqyJAhtx3MUlNT1atXL9WuXVuVKlWSJMXHx8vDw0P+/v5OfYsWLar4+Hirz7WhLG152rKM9O/fX3369LGenzlzRoGBgbdVNwAAAADcqSyblbFp06bauXPnba8fExOjbdu2adq0aVlV0g15enrK19fX6QEAAAAAdsmyYJaUlJTh/VyZ0aNHD82dO1fLli1TiRIlrPaAgABdvnxZCQkJTv2PHTumgIAAq8/1szSmPU/rAwAAAADZWZYFs3Hjxik0NNSldYwx6tGjh2bNmqWlS5eqdOnSTsurV6+u3Llza8mSJVbbrl27dPDgQYWFhUmSwsLCtHXrVh0/ftzqs2jRIvn6+iokJOT2DwgAAAAA7pFM32N27T1Z10pMTNSmTZu0e/durVy50qWdx8TEaOrUqZozZ458fHyse8L8/PyUN29e+fn5qUuXLurTp48KFCggX19f9ezZU2FhYapVq5YkqWHDhgoJCVH79u01bNgwxcfH64033lBMTIw8PT1dqgcAAAAA7JDpYLZ58+YM2319ffXkk09q5syZ6c543crYsWMlSfXr13dqnzBhgjXZyPDhw+Xm5qZWrVopKSlJkZGRGjNmjNXX3d1dc+fOVffu3RUWFiYvLy9FR0dr8ODBLtUCAAAAAHbJdDBbtmzZTZcfPnxY3bp10//93/9leufGmFv2yZMnj0aPHq3Ro0ffsE9QUJB++umnTO8XAAAAALKTLLvH7OTJkxo3blxWbQ4AAAAAcowsC2YAAAAAgNtDMAMAAAAAmxHMAAAAAMBmmZ78o2XLljddfv2XQAMAAAAAMifTwczPz++Wyzt06HDHBQEAAABATpPpYDZhwoS7WQcAAAAA5FjcYwYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYLFPBrFq1ajp9+rQkafDgwbpw4cJdLQoAAAAAcpJMBbO4uDidP39ekjRo0CCdO3furhYFAAAAADlJpqbLDw0NVadOnVSnTh0ZY/Thhx/K29s7w75vvfVWlhYIAAAAAPe7TAWziRMnasCAAZo7d64cDofmzZunXLnSr+pwOAhmAAAAAOCiTAWzcuXKadq0aZIkNzc3LVmyREWKFLmrhQEAAABATpGpYHat1NTUu1EHAAAAAORYLgczSdq3b59GjBihuLg4SVJISIheeuklBQcHZ2lxAAAAAJATuPw9ZgsWLFBISIjWr1+vKlWqqEqVKlq3bp0qVqyoRYsW3Y0aAQAAAOC+5vIZs9dee029e/fW+++/n6791Vdf1ZNPPpllxQEAAABATuDyGbO4uDh16dIlXXvnzp21Y8eOLCkKAAAAAHISl4NZ4cKFFRsbm649NjaWmRoBAAAA4Da4fClj165d1a1bN/3+++967LHHJEmrV6/W0KFD1adPnywvEAAAAADudy4HszfffFM+Pj766KOP1L9/f0lS8eLFNXDgQL344otZXiAAAAAA3O9cDmYOh0O9e/dW7969dfbsWUmSj49PlhcGAAAAADnFbX2PWRoCGQAAAADcOZcn/wAAAAAAZC2CGQAAAADYjGAGAAAAADZzKZglJycrIiJCe/bsuVv1AAAAAECO41Iwy507t7Zs2XK3agEAAACAHMnlSxmfe+45jRs37m7UAgAAAAA5ksvT5V+5ckXjx4/X4sWLVb16dXl5eTkt//jjj7OsOAAAAADICVwOZtu2bVO1atUkSbt373Za5nA4sqYqAAAAAMhBXA5my5Ytuxt1AAAAAECOddvT5e/du1cLFizQxYsXJUnGmCwrCgAAAAByEpeD2cmTJxUREaGHHnpITZo00dGjRyVJXbp0Ud++fbO8QAAAAAC437kczHr37q3cuXPr4MGDypcvn9X+zDPPaP78+VlaHAAAAADkBC7fY7Zw4UItWLBAJUqUcGp/8MEH9ccff2RZYQAAAACQU7h8xuz8+fNOZ8rSnDp1Sp6enllSFAAAAADkJC4Hs/DwcE2ePNl67nA4lJqaqmHDhunxxx/P0uIAAAAAICdw+VLGYcOGKSIiQr/++qsuX76sV155Rdu3b9epU6e0evXqu1EjAAAAANzXXD5jVqlSJe3evVt16tRR8+bNdf78ebVs2VKbN29WcHDw3agRAAAAAO5rLp8xkyQ/Pz/95z//yepaAAAAACBHuq1gdvr0aY0bN05xcXGSpJCQEHXq1EkFChTI0uIAAAAAICdw+VLGlStXqlSpUho5cqROnz6t06dPa+TIkSpdurRWrlx5N2oEAAAAgPuay8EsJiZGzzzzjPbv36+ZM2dq5syZ+v3339WmTRvFxMS4tK2VK1eqWbNmKl68uBwOh2bPnu20vGPHjnI4HE6PRo0aOfU5deqU2rVrJ19fX/n7+6tLly46d+6cq4cFAAAAALZxOZjt3btXffv2lbu7u9Xm7u6uPn36aO/evS5t6/z586patapGjx59wz6NGjXS0aNHrcfXX3/ttLxdu3bavn27Fi1apLlz52rlypXq1q2bawcFAAAAADZy+R6zatWqKS4uTuXKlXNqj4uLU9WqVV3aVuPGjdW4ceOb9vH09FRAQECGy+Li4jR//nxt2LBBNWrUkCSNGjVKTZo00YcffqjixYu7VA8AAAAA2CFTwWzLli3Wv1988UW99NJL2rt3r2rVqiVJ+uWXXzR69Gi9//77WV7g8uXLVaRIEeXPn19PPPGE3nnnHRUsWFCStHbtWvn7+1uhTJIaNGggNzc3rVu3Tv/4xz8y3GZSUpKSkpKs52fOnMnyugEAAAAgszIVzEJDQ+VwOGSMsdpeeeWVdP2effZZPfPMM1lWXKNGjdSyZUuVLl1a+/bt0+uvv67GjRtr7dq1cnd3V3x8vIoUKeK0Tq5cuVSgQAHFx8ffcLtDhgzRoEGDsqxOAAAAALgTmQpm+/fvv9t1ZKhNmzbWvytXrqwqVaooODhYy5cvV0RExG1vt3///urTp4/1/MyZMwoMDLyjWgEAAADgdmUqmAUFBd3tOjKlTJkyKlSokPbu3auIiAgFBATo+PHjTn2uXLmiU6dO3fC+NOnqfWuenp53u1wAAAAAyJTb+oLpI0eO6Oeff9bx48eVmprqtOzFF1/MksIycvjwYZ08eVLFihWTJIWFhSkhIUEbN25U9erVJUlLly5VamqqatasedfqAAAAAICs5HIwmzhxop5//nl5eHioYMGCcjgc1jKHw+FSMDt37pzTFPv79+9XbGysChQooAIFCmjQoEFq1aqVAgICtG/fPr3yyisqW7asIiMjJUkVKlRQo0aN1LVrV3322WdKTk5Wjx491KZNG2ZkBAAAAPC34XIwe/PNN/XWW2+pf//+cnNz+WvQnPz66696/PHHredp931FR0dr7Nix2rJliyZNmqSEhAQVL15cDRs21Ntvv+10GeKUKVPUo0cPRUREyM3NTa1atdLIkSPvqC4AAAAAuJdcDmYXLlxQmzZt7jiUSVL9+vWdZnq83oIFC265jQIFCmjq1Kl3XAsAAAAA2MXldNWlSxd99913d6MWAAAAAMiRXD5jNmTIEDVt2lTz589X5cqVlTt3bqflH3/8cZYVBwAAAAA5wW0FswULFqhcuXKSlG7yDwAAAACAa1wOZh999JHGjx+vjh073oVyAAAAACDncfkeM09PT9WuXftu1AIAAAAAOZLLweyll17SqFGj7kYtAAAAAJAjuXwp4/r167V06VLNnTtXFStWTDf5x8yZM7OsOAAAAADICVwOZv7+/mrZsuXdqAUAAAAAciSXg9mECRPuRh0AAAAAkGO5fI8ZAAAAACBruXzGrHTp0jf9vrLff//9jgoCAAAAgJzG5WDWq1cvp+fJycnavHmz5s+fr379+mVVXQAAAACQY7gczF566aUM20ePHq1ff/31jgsCAAAAgJwmy+4xa9y4sWbMmJFVmwMAAACAHCPLgtn06dNVoECBrNocAAAAAOQYLl/K+PDDDztN/mGMUXx8vE6cOKExY8ZkaXEAAAAAkBO4HMxatGjh9NzNzU2FCxdW/fr1Vb58+ayqCwAAAAByDJeD2YABA+5GHQAAAACQY/EF0wAAAABgs0yfMXNzc7vpF0tLksPh0JUrV+64KAAAAADISTIdzGbNmnXDZWvXrtXIkSOVmpqaJUUBAAAAQE6S6WDWvHnzdG27du3Sa6+9ph9++EHt2rXT4MGDs7Q4AAAAAMgJbusesyNHjqhr166qXLmyrly5otjYWE2aNElBQUFZXR8AAAAA3PdcCmaJiYl69dVXVbZsWW3fvl1LlizRDz/8oEqVKt2t+gAAAADgvpfpSxmHDRumoUOHKiAgQF9//XWGlzYCAAAAAFyX6WD22muvKW/evCpbtqwmTZqkSZMmZdhv5syZWVYcAAAAAOQEmQ5mHTp0uOV0+QAAAAAA12U6mE2cOPEulgEAAAAAOddtzcoIAAAAAMg6BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALCZrcFs5cqVatasmYoXLy6Hw6HZs2c7LTfG6K233lKxYsWUN29eNWjQQHv27HHqc+rUKbVr106+vr7y9/dXly5ddO7cuXt4FAAAAABwZ2wNZufPn1fVqlU1evToDJcPGzZMI0eO1GeffaZ169bJy8tLkZGRunTpktWnXbt22r59uxYtWqS5c+dq5cqV6tat2706BAAAAAC4Y7ns3Hnjxo3VuHHjDJcZYzRixAi98cYbat68uSRp8uTJKlq0qGbPnq02bdooLi5O8+fP14YNG1SjRg1J0qhRo9SkSRN9+OGHKl68+D07FgAAAAC4Xdn2HrP9+/crPj5eDRo0sNr8/PxUs2ZNrV27VpK0du1a+fv7W6FMkho0aCA3NzetW7fuhttOSkrSmTNnnB4AAAAAYJdsG8zi4+MlSUWLFnVqL1q0qLUsPj5eRYoUcVqeK1cuFShQwOqTkSFDhsjPz896BAYGZnH1AAAAAJB52TaY3U39+/dXYmKi9Th06JDdJQEAAADIwbJtMAsICJAkHTt2zKn92LFj1rKAgAAdP37cafmVK1d06tQpq09GPD095evr6/QAAAAAALtk22BWunRpBQQEaMmSJVbbmTNntG7dOoWFhUmSwsLClJCQoI0bN1p9li5dqtTUVNWsWfOe1wwAAAAAt8PWWRnPnTunvXv3Ws/379+v2NhYFShQQCVLllSvXr30zjvv6MEHH1Tp0qX15ptvqnjx4mrRooUkqUKFCmrUqJG6du2qzz77TMnJyerRo4fatGnDjIwAAAAA/jZsDWa//vqrHn/8cet5nz59JEnR0dGaOHGiXnnlFZ0/f17dunVTQkKC6tSpo/nz5ytPnjzWOlOmTFGPHj0UEREhNzc3tWrVSiNHjrznxwIAAAAAt8vWYFa/fn0ZY2643OFwaPDgwRo8ePAN+xQoUEBTp069G+UBAAAAwD2Rbe8xAwAAAICcgmAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNsnUwGzhwoBwOh9OjfPny1vJLly4pJiZGBQsWlLe3t1q1aqVjx47ZWDEAAAAAuC5bBzNJqlixoo4ePWo9fv75Z2tZ79699cMPP+i7777TihUrdOTIEbVs2dLGagEAAADAdbnsLuBWcuXKpYCAgHTtiYmJGjdunKZOnaonnnhCkjRhwgRVqFBBv/zyi2rVqnXDbSYlJSkpKcl6fubMmawvHAAAAAAyKdufMduzZ4+KFy+uMmXKqF27djp48KAkaePGjUpOTlaDBg2svuXLl1fJkiW1du3am25zyJAh8vPzsx6BgYF39RgAAAAA4GaydTCrWbOmJk6cqPnz52vs2LHav3+/wsPDdfbsWcXHx8vDw0P+/v5O6xQtWlTx8fE33W7//v2VmJhoPQ4dOnQXjwIAAAAAbi5bX8rYuHFj699VqlRRzZo1FRQUpG+//VZ58+a97e16enrK09MzK0oEAAAAgDuWrc+YXc/f318PPfSQ9u7dq4CAAF2+fFkJCQlOfY4dO5bhPWkAAAAAkF39rYLZuXPntG/fPhUrVkzVq1dX7ty5tWTJEmv5rl27dPDgQYWFhdlYJQAAAAC4Jltfyvjyyy+rWbNmCgoK0pEjRzRgwAC5u7urbdu28vPzU5cuXdSnTx8VKFBAvr6+6tmzp8LCwm46IyMAAAAAZDfZOpgdPnxYbdu21cmTJ1W4cGHVqVNHv/zyiwoXLixJGj58uNzc3NSqVSslJSUpMjJSY8aMsblqAAAAAHBNtg5m06ZNu+nyPHnyaPTo0Ro9evQ9qggAAAAAst7f6h4zAAAAALgfEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZvdNMBs9erRKlSqlPHnyqGbNmlq/fr3dJQEAAABAptwXweybb75Rnz59NGDAAG3atElVq1ZVZGSkjh8/bndpAAAAAHBL90Uw+/jjj9W1a1d16tRJISEh+uyzz5QvXz6NHz/e7tIAAAAA4JZy2V3Anbp8+bI2btyo/v37W21ubm5q0KCB1q5dm+E6SUlJSkpKsp4nJiZKks6cOXN3i82sS3YXYK9s8z7g/sGYsrsE3I8YV3aXgPsNY8ruEixptRhj7ul+//bB7K+//lJKSoqKFi3q1F60aFHt3Lkzw3WGDBmiQYMGpWsPDAy8KzXCNX7v+9ldAnBfYUwBWY9xBWSt7Dimzp49Kz+/e1fX3z6Y3Y7+/furT58+1vPU1FSdOnVKBQsWlMPhsLEy+505c0aBgYE6dOiQfH197S4H+NtjTAFZj3EFZC3GlDNjjM6ePavixYvf0/3+7YNZoUKF5O7urmPHjjm1Hzt2TAEBARmu4+npKU9PT6c2f3//u1Xi35Kvry8DE8hCjCkg6zGugKzFmPqfe3mmLM3ffvIPDw8PVa9eXUuWLLHaUlNTtWTJEoWFhdlYGQAAAABkzt/+jJkk9enTR9HR0apRo4YeffRRjRgxQufPn1enTp3sLg0AAAAAbum+CGbPPPOMTpw4obfeekvx8fEKDQ3V/Pnz000Iglvz9PTUgAED0l3qCeD2MKaArMe4ArIWYyp7cJh7PQ8kAAAAAMDJ3/4eMwAAAAD4uyOYAQAAAIDNCGYAAAAAYDOC2W0qVaqURowYYXcZfzsHDhyQw+FQbGzsXd8X79HfD+/Z7WFc4UZ4v24PYwo3w3t2exhXmWD+xqKjo40k8/zzz6db9sILLxhJJjo6OlPb2r9/v5FkNm/enKn+x48fN+fPn89U36ZNm5rIyMgMl61cudJIMr/99lumtnUjy5YtM5LM6dOn72g717tw4YLJnz+/KViwoLl06ZJL60ZHR5vmzZs7tV25csUcPXrUJCcnZ1mNEyZMMH5+funaXXmPssqnn35qgoKCjKenp3n00UfNunXr7un+swLj6n8YV37p2u/1uFqxYoVp2rSpKVasmJFkZs2adc/2nVUYU//DmPJL136vx9R7771natSoYby9vU3hwoVN8+bNzc6dO+/Z/rMK4+p/GFd+6drv9bgaM2aMqVy5svHx8TE+Pj6mVq1a5qeffnJ5O3/7M2aBgYGaNm2aLl68aLVdunRJU6dOVcmSJbN8f5cvX5YkFS5cWPny5cvUOl26dNGiRYt0+PDhdMsmTJigGjVqqEqVKlla5+0yxujKlSvW8xkzZqhixYoqX768Zs+efcfbd3d3V0BAgHLluvvf1ODKe5QVvvnmG/Xp00cDBgzQpk2bVLVqVUVGRur48eP3rIaswrjKWoyr23f+/HlVrVpVo0ePvmf7vBsYU1mLMXX7VqxYoZiYGP3yyy9atGiRkpOT1bBhQ50/f/6e1ZBVGFdZi3F1+0qUKKH3339fGzdu1K+//qonnnhCzZs31/bt213bUBYHxnsqLY1XqlTJ/Pe//7Xap0yZYqpUqWKaN29u/bVk3rx5pnbt2sbPz88UKFDAREVFmb1791rrSHJ61KtXz2kf77zzjilWrJgpVaqUMcaYoKAgM3z4cGPM1b9U5M6d26xcudLa3tChQ03hwoVNfHy8SU5ONkWLFjVvv/22U/1nz5413t7eZuzYscYYY1atWmXq1Klj8uTJY0qUKGF69uxpzp07Z/W/dOmSeeWVV0yJEiWMh4eHCQ4ONl9++aX1l55rH2nHfenSJdOzZ09TuHBh4+npaWrXrm3Wr19vbTPtryw//fSTqVatmsmdO7dZtmyZtbx+/frms88+M2PHjjVPPvlkuvdg27ZtJioqyvj4+Bhvb29Tp04ds3fvXjNgwIB0NS1btszpr1IpKSnmgQceMGPGjHHa5qZNm4zD4TAHDhwwxhjz0UcfmUqVKpl8+fKZEiVKmO7du5uzZ8861X/tY8CAAeneI2OM+eOPP8xTTz1lvLy8jI+Pj3n66adNfHy8tXzAgAGmatWqZvLkySYoKMj4+vqaZ555xpw5cybdcWfk0UcfNTExMdbzlJQUU7x4cTNkyJBMrZ9dMK4YV9lpXF1Lf+MzZowpxlR2HFPGXD2zIMmsWLHitta3C+OKcZWdx5UxxuTPn998+eWXLq1zXwSzjz/+2ERERFjtERERZvjw4U6Dcvr06WbGjBlmz549ZvPmzaZZs2amcuXKJiUlxRhjzPr1640ks3jxYnP06FFz8uRJax/e3t6mffv2Ztu2bWbbtm3GmPRveL9+/UxQUJBJSEgwmzZtMh4eHmbOnDlOy4ODg01qaqrVNn78eJM3b16TkJBg9u7da7y8vMzw4cPN7t27zerVq83DDz9sOnbsaPVv3bq1CQwMNDNnzjT79u0zixcvNtOmTTNXrlwxM2bMMJLMrl27zNGjR01CQoIxxpgXX3zRFC9e3Pz0009m+/btJjo62uTPn986vrQPdZUqVczChQvN3r17rWV79+41np6e5tSpU+bkyZMmT5481kAxxpjDhw+bAgUKmJYtW5oNGzaYXbt2mfHjx5udO3eas2fPmtatW5tGjRqZo0ePmqNHj5qkpKR0lwu8/PLLpk6dOk7va9++fZ3ahg8fbpYuXWr2799vlixZYsqVK2e6d+9ujDEmKSnJjBgxwvj6+lr7SRuw175HKSkpJjQ01NSpU8f8+uuv5pdffjHVq1e3fvgac3VQent7m5YtW5qtW7ealStXmoCAAPP666/f8DOYJikpybi7u6f7pbFDhw7mqaeeuuX62QnjinGVXcbV9f7uwYwxxZjKbmPKGGP27NljJJmtW7fe1vp2YVwxrrLruLpy5Yr5+uuvjYeHh9m+fbtL694Xwez48ePG09PTHDhwwBw4cMDkyZPHnDhxwmlQXu/EiRNOP4hudH1xdHS0KVq0qElKSnJqv35QJiUlmdDQUNO6dWsTEhJiunbt6tQ/Li7O+otBmvDwcPPcc88ZY4zp0qWL6datm9M6q1atMm5ububixYtm165dRpJZtGhRhseT0fXF586dM7lz5zZTpkyx2i5fvmyKFy9uhg0b5rTe7Nmz023z9ddfNy1atLCeN2/e3PpLhDHG9O/f35QuXdpcvnw5w5oyur74+td58+bNxuFwmD/++MMYY6y/oKT9BSkj3333nSlYsKD1/EbXF1/7Hi1cuNC4u7ubgwcPWsu3b99uJFl/PRowYIDJly+f019H+vXrZ2rWrHnDWtL8+eefRpJZs2aNU3u/fv3Mo48+esv1sxPG1f8wrvzS9buX4+p6f/dgxphiTGW3MZWSkmKioqJM7dq1XV7Xboyr/2Fc+aXrZ8e42rJli/Hy8jLu7u7Gz8/P/Pjjj5leN83f/h4z6ep1pFFRUZo4caImTJigqKgoFSpUyKnPnj171LZtW5UpU0a+vr4qVaqUJOngwYO33H7lypXl4eFx0z4eHh6aMmWKZsyYoUuXLmn48OFOy8uXL6/HHntM48ePlyTt3btXq1atUpcuXSRJv/32myZOnChvb2/rERkZqdTUVO3fv1+xsbFyd3dXvXr1MvuyaN++fUpOTlbt2rWttty5c+vRRx9VXFycU98aNWo4PU9JSdGkSZP03HPPWW3PPfecJk6cqNTUVElSbGyswsPDlTt37kzXdL3Q0FBVqFBBU6dOlXT12vfjx4/r6aeftvosXrxYEREReuCBB+Tj46P27dvr5MmTunDhQqb3ExcXp8DAQAUGBlptISEh8vf3d3otSpUqJR8fH+t5sWLF/pb3iGUFxlXGGFf/w7hyDWMqY4yp/7nXYyomJkbbtm3TtGnTXF43u2BcZYxx9T/3alyVK1dOsbGxWrdunbp3767o6Gjt2LEj0+tL99F0+Z07d9bEiRM1adIkde7cOd3yZs2a6dSpU/riiy+0bt06rVu3TtL/buS8GS8vr0zVsGbNGknSqVOndOrUqXTLu3TpohkzZujs2bOaMGGCgoODrUF27tw5Pf/884qNjbUev/32m/bs2aPg4GDlzZs3UzXcruuPccGCBfrzzz/1zDPPKFeuXMqVK5fatGmjP/74Q0uWLJGkLKupXbt21qCcOnWqGjVqpIIFC0q6OrVq06ZNVaVKFc2YMUMbN260JgHIzHvnqut/wDgcDuuH0M0UKlRI7u7uOnbsmFP7sWPHFBAQkKU13kuMqzvDuLrqdsfV/YgxdWcYU1dlxZjq0aOH5s6dq2XLlqlEiRJZWd49x7i6M4yrq+50XHl4eKhs2bKqXr26hgwZoqpVq+qTTz5xqYb7Jpg1atRIly9fVnJysiIjI52WnTx5Urt27dIbb7yhiIgIVahQQadPn3bqk/bXkJSUlNva/759+9S7d2998cUXqlmzpqKjo9O9ma1bt5abm5umTp2qyZMnq3PnznI4HJKkatWqaceOHSpbtmy6h4eHhypXrqzU1FStWLEiw/1nVH9wcLA8PDy0evVqqy05OVkbNmxQSEjITY9n3LhxatOmjdMPidjYWLVp00bjxo2TJFWpUkWrVq1ScnLyDWvKzOv57LPPatu2bdq4caOmT5+udu3aWcs2btyo1NRUffTRR6pVq5YeeughHTlyxOX9VKhQQYcOHdKhQ4esth07dighIeGWr0VmeHh4qHr16tYPLElKTU3VkiVLFBYWdsfbtwvjinF1M3d7XN2PGFOMqZu5F2PKGKMePXpo1qxZWrp0qUqXLp0l27UT44pxdTN2/b8qNTVVSUlJrq3k8sWP2cj1168mJiaaxMRE63na9cUpKSmmYMGC5rnnnjN79uwxS5YsMY888ojT/QrJyckmb9685p133jHx8fHWjZMZXSNrjPO1q1euXDG1atUyrVq1MsYYc+TIEVOwYEHrGt5rdenSxeTPn9+4u7ubP//802r/7bffTN68eU1MTIzZvHmz2b17t5k9e7bTLH8dO3Y0gYGBZtasWeb33383y5YtM998840x5upNmA6Hw0ycONEcP37cuvnxpZdeMsWLFzfz5s1zuvHz1KlTxpiMr0s+fvy4yZ07t5k3b166+n/66Sfj6elpTp48af766y9TsGBB68bP3bt3m8mTJ1vfh/Luu++akiVLmp07d5oTJ06Yy5cv3/A67tq1a5uqVasaHx8fc+HCBas9NjbWSDIjRoww+/btM5MnTzYPPPCAU82rV6+2bto9ceKE9b0V175HqampJjQ01ISHh5uNGzeadevWZXjjZ9WqVZ3qGj58uAkKCkr3OmRk2rRpxtPT00ycONHs2LHDdOvWzfj7+zvN+vN3wLhiXBmTfcbV2bNnzebNm83mzZuNJPPxxx+bzZs3W/ck/B0wphhTxmSfMdW9e3fj5+dnli9fbk2YcPToUafj+TtgXDGujMk+4+q1114zK1asMPv37zdbtmwxr732mnE4HGbhwoWZWj/NfRXMrnftjZ+LFi0yFSpUMJ6enqZKlSpm+fLl6W4k/+KLL0xgYKBxc3NLN1Xq9a59wwcNGmSKFStm/vrrL2v5jBkzjIeHh4mNjXVab82aNUaSadKkSbptrl+/3jz55JPG29vbeHl5mSpVqph3333XWn7x4kXTu3dvU6xYMePh4WHKli1rxo8fby0fPHiwCQgIMA6Hwzruixcvmp49e5pChQrddKrUawflhx9+aPz9/TO8oTMpKcn4+/ubTz75xBhz9YdJw4YNTb58+YyPj48JDw83+/btM8ZcHdxpx6MMpkq91pgxY4wk06FDh3T7/Pjjj02xYsVM3rx5TWRkpJk8eXK6mv/973+bggULZslUqddyZVAaY8yoUaNMyZIljYeHh3n00UfNL7/8kul1swvGFeMqTXYYVxlNhyxl/otjswPGFGMqTXYYUxmNJ0lmwoQJmVo/u2BcMa7SZIdx1blzZxMUFGQ8PDxM4cKFTUREhMuhzBhjHMYY49o5NgAAAABAVrpv7jEDAAAAgL8rghmQCQcPHnSaxvb6R2am3AXgjHEFZC3GFJD17uW44lJGIBOuXLmiAwcO3HB5qVKllCtXrntXEHAfYFwBWYsxBWS9ezmuCGYAAAAAYDMuZQQAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAEBS/fr11atXr0z3X758uRwOhxISEu5aTQCAnINgBgC4Ix07dpTD4dD777/v1D579mw5HA6XtlWqVCmNGDEiC6sDAODvgWAGALhjefLk0dChQ3X69Gm7S3HZ5cuX7S7hjiQnJ9tdAgAgCxDMAAB3rEGDBgoICNCQIUNu2u/nn39WeHi48ubNq8DAQL344os6f/68pKuXEv7xxx/q3bu3HA6HHA6HjDEqXLiwpk+fbm0jNDRUxYoVc9qmp6enLly4IEk6ePCgmjdvLm9vb/n6+qp169Y6duyY1X/gwIEKDQ3Vl19+qdKlSytPnjwZ1vrjjz/Kz89PU6ZMydRrcPLkSbVt21YPPPCA8uXLp8qVK+vrr7+2lk+ePFkFCxZUUlKS03otWrRQ+/btredz5sxRtWrVlCdPHpUpU0aDBg3SlStXrOUOh0Njx47VU089JS8vL7377rs6ffq02rVrp8KFCytv3rx68MEHNWHChEzVDQDIHghmAIA75u7urvfee0+jRo3S4cOHM+yzb98+NWrUSK1atdKWLVv0zTff6Oeff1aPHj0kSTNnzlSJEiU0ePBgHT16VEePHpXD4VDdunW1fPlySdLp06cVFxenixcvaufOnZKkFStW6JFHHlG+fPmUmpqq5s2b69SpU1qxYoUWLVqk33//Xc8884xTLXv37tWMGTM0c+ZMxcbGpqt16tSpatu2raZMmaJ27dpl6jW4dOmSqlevrh9//FHbtm1Tt27d1L59e61fv16S9PTTTyslJUXff/+9tc7x48f1448/qnPnzpKkVatWqUOHDnrppZe0Y8cOff7555o4caLeffddp30NHDhQ//jHP7R161Z17txZb775pnbs2KF58+YpLi5OY8eOVaFChTJVNwAgmzAAANyB6Oho07x5c2OMMbVq1TKdO3c2xhgza9Ysc+3/Zrp06WK6devmtO6qVauMm5ubuXjxojHGmKCgIDN8+HCnPiNHjjQVK1Y0xhgze/ZsU7NmTdO8eXMzduxYY4wxDRo0MK+//roxxpiFCxcad3d3c/DgQWv97du3G0lm/fr1xhhjBgwYYHLnzm2OHz/utJ969eqZl156yXz66afGz8/PLF++/KbHvWzZMiPJnD59+oZ9oqKiTN++fa3n3bt3N40bN7aef/TRR6ZMmTImNTXVGGNMRESEee+995y28dVXX5lixYpZzyWZXr16OfVp1qyZ6dSp003rBQBkb5wxAwBkmaFDh2rSpEmKi4tLt+y3337TxIkT5e3tbT0iIyOVmpqq/fv333Cb9erV044dO3TixAmtWLFC9evXV/369bV8+XIlJydrzZo1ql+/viQpLi5OgYGBCgwMtNYPCQmRv7+/U01BQUEqXLhwun1Nnz5dvXv31qJFi1SvXj2Xjj0lJUVvv/22KleurAIFCsjb21sLFizQwYMHrT5du3bVwoUL9eeff0qSJk6caE2ekvYaDR482Ok16tq1q44ePWpdqilJNWrUcNp39+7dNW3aNIWGhuqVV17RmjVrXKodAGA/ghkAIMvUrVtXkZGR6t+/f7pl586d0/PPP6/Y2Fjr8dtvv2nPnj0KDg6+4TbTgs6KFSucgtmKFSu0YcMGJScn67HHHnOpTi8vrwzbH374YRUuXFjjx4+XMcalbX7wwQf65JNP9Oqrr2rZsmWKjY1VZGSk0+QiDz/8sKpWrarJkydr48aN2r59uzp27GgtP3funAYNGuT0Gm3dulV79uxxuhfu+vobN25s3Z935MgRRURE6OWXX3apfgCAvXLZXQAA4P7y/vvvKzQ0VOXKlXNqr1atmnbs2KGyZcvecF0PDw+lpKQ4tTkcDoWHh2vOnDnavn276tSpo3z58ikpKUmff/65atSoYQWVChUq6NChQzp06JB11mzHjh1KSEhQSEjILWsPDg7WRx99pPr168vd3V2ffvpppo979erVat68uZ577jlJUmpqqnbv3p1uv//61780YsQI/fnnn2rQoIHT2b1q1app165dN32NbqRw4cKKjo5WdHS0wsPD1a9fP3344YcubwcAYA/OmAEAslTlypXVrl07jRw50qn91Vdf1Zo1a9SjRw/FxsZqz549mjNnjjX5h3T1e8xWrlypP//8U3/99ZfVXr9+fX399dcKDQ2Vt7e33NzcVLduXU2ZMsXpksMGDRpY+9+0aZPWr1+vDh06qF69euku/7uRhx56SMuWLdOMGTNc+sLpBx98UIsWLdKaNWsUFxen559/3mk2yDTPPvusDh8+rC+++MKa9CPNW2+9pcmTJ2vQoEHavn274uLiNG3aNL3xxhs33fdbb72lOXPmaO/evdq+fbvmzp2rChUqZLp2AID9CGYAgCw3ePBgpaamOrVVqVJFK1as0O7duxUeHq6HH35Yb731looXL+603oEDBxQcHOx0D1i9evWUkpJi3UsmXQ1r17c5HA7NmTNH+fPnV926ddWgQQOVKVNG33zzjUv1lytXTkuXLtXXX3+tvn37ZmqdN954Q9WqVVNkZKTq16+vgIAAtWjRIl0/Pz8/tWrVSt7e3umWR0ZGau7cuVq4cKEeeeQR1apVS8OHD1dQUNBN9+3h4aH+/furSpUqqlu3rtzd3TVt2rTMHi4AIBtwGFcvogcAAHckIiJCFStWTHdWEQCQcxHMAAC4R06fPq3ly5frn//8p3bs2JHuPjwAQM7F5B8AANwjDz/8sE6fPq2hQ4cSygAATjhjBgAAAAA2Y/IPAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBm/w/0qOvg3rATgQAAAABJRU5ErkJggg==", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Extracting LUTs from res_dict\n", + "LUTs = [res_dict[key][\"LUT\"] for key in res_dict.keys()] \n", "\n", - "So our goal to adjust the folding parameters would be to expand the computation of the first layer to reduce its latency at the expense an of increase in resource utilization." + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilization\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(res_dict.keys(), LUTs, color ='green', width = 0.3)\n", + "plt.xlabel(\"Network layers\")\n", + "plt.ylabel(\"Number of LUTs\")\n", + "plt.title(\"Estimated no. of LUTs used for each network layer\")\n", + "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "
    \n", - "Question in the first line of the above cell.\n", - "
    " + "Since we identified above that the first layer takes the highest number of cycles to complete the execution, we will now try to adjust the folding parameters to reduce its latency at the expense of an increase in resource utilization." ] }, { @@ -1931,7 +1925,12 @@ "source": [ "## Modify Parameters\n", "\n", - "We now modify the parallelization attributes of the first network layer to reduce its overall latency." + "We now modify the parallelization attributes of the first network layer to reduce its overall latency.\n", + "We now individually extract the `MatrixVectorActivation` blocks from the onnx file and set the config values manually (although this can be done automatically by Vivado tools also as mentioned in the introduction).\n", + "\n", + "In the first step, we set the `PE` & `SIMD` values for all the layers to be '1' to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", + "\n", + "We utilize from (`getCustomOp()`) as the helper function to set different properties of the node. The (`set_nodeattr()`) function within this function call helps us set these values." ] }, { @@ -2061,8 +2060,8 @@ "metadata": {}, "outputs": [], "source": [ - "res_dict_updated = []\n", - "res_dict_updated = res_estimation(model)" + "res_dict_updated = model.analysis(res_estimation)\n", + "res_dict_updated" ] }, { @@ -2082,16 +2081,12 @@ } ], "source": [ - "layers_updated = list(res_dict_updated.keys())\n", - "utilisation_updated = list(res_dict_updated.values())\n", - "lut_values_updated = [] #Initializing a list to store LUT values.\n", - "for i in range(len(layers_updated)):\n", - " x = list(utilisation_updated[i].values()) #Extracting the resource utilisation for each layer.\n", - " lut_values_updated.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", + "# Extracting LUTs from res_dict\n", + "LUTs_updated = [res_dict[key][\"LUT\"] for key in res_dict_updated.keys()] \n", "\n", - "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilization\n", "fig = plt.figure(figsize = (10, 5))\n", - "plt.bar(layers_updated, lut_values_updated, color ='green', width = 0.3)\n", + "plt.bar(res_dict_updated.keys(), LUTs_updated, color ='green', width = 0.3)\n", "plt.xlabel(\"Network Layers\")\n", "plt.ylabel(\"LUT Utilisation\")\n", "plt.title(\"Estimated LUT values used for each network layer\")\n", diff --git a/notebooks/advanced/finn-dataflow.png b/notebooks/advanced/finn-dataflow.png new file mode 100755 index 0000000000000000000000000000000000000000..ebe98d0fbd1878fabb9ae2d87bd9b111d62dc39e GIT binary patch literal 164258 zcmbrlcTiK|*ELEn(nWzt2^|rnO7BGi(nXq7Nf4z-2Pp|vKnM_uBGRQw6-1;Xs3D*d ziWos42tf#)5K01p%kQ0g=gxiK@B99_d*)1X@|>L6`)OzGwbyxWV+CTnbnOxq6&0JA zsqtMZDtbXGDjGi~hKpYUgl~0Td{Bqp1sPH`j|t!}PUw6LEDflrTC-VBAJAW%GY6SE zhf-1Ta{cF^9+lkJqoO*mHZwN3_tbrp5}B`Z`#=?@7uV}Y`L!RL+6?#B0B54$>*|Ra}j?6&ZP-C*|9h=APEK1{cm3_4uby#e58b?5ctn?%!dCr zSK+|qYzxhQ|I0i4=e~Sv|A+sI0cmxV@UQ-N9^k;`AW6af|E-6tL{t^{zq|ea29qFE z820^tg!TWlD~^lhW&JP0{=eH9q9K!t|Eq5Kul8a6|9?SCW`xP6{~x-|4mRg-rBXr= z-TSQ%^JM7HPS8>9+2M)aDVhy?{vhXnnquGaR+ru}#^pTje7{OkD2VS^Z^vy5#;E#t zKC%y;z$+Ab8lb0pm@F#xKMWil{C6U->%c9tA9H4@y{NDow-EaAMCfO-v8E8^3!BTb?4FSP8m7hR3|@AgRJVsLX>>GG9c2MyW|H_@$%xl!f}j|}E` z(})NA)JOMP)K)w!Ii~#*YJ6lPbpD%XU`5Deu)g{c;&aB1!12GO9!A0L#5#?06nH9d zwE@v#^{+Vccmn@Arhe7n{?X~;xY+8PRDcSW+=q?Z=L&63A_(V+{{Y)E>}I~mMzVMild0;FfrjG=uHmYPNgl0OTgU$)(JDGxF21w^|R;0PHUIfOVw*iiiv?0UIl6= z2AlivVJl>YUiP$%6tvVZqlPJ#s@ep{6p|Nxchxr7xX=(yx87a1o^i_!{ht}IrmBSR zv``o;i`wE(FWEUS_uA;0>-&9HdSr>ai?Hr#BE1XKeK%d=D0Aqv(HhFCbazcmYoP7- z_V$t70wuSg8D}b-=uTNomAX6%{mWEL9{Mq%5+sR>)OUQ`fbWCdvaO}Yp`|Jem zobZ-KA8fy!Bm5N?*u~Xe{WG^!`t@aGx659=iqPwIyGSRc=yAuL-%Ao-{e%cg`d7E5tDTkrJt+3N+iLoV|{ZI3b02BZ!O6C)kmGy zdliC_W{>;3um)fGg-EaN{d%ffc)%(y7A5-rME9@s+wVCk7T5Jacjq^@Cg|&qPQLX| zNzRHP^E!;a!~D*ZGFrC!;#$x1e#yDVruI~Y?fiL?`g0pOyCKx!(E3$yr+Z2F@nO_y zUPeoAFm@^TV{v|I@CrkT?&BP`GoRJ ztyeMgTvNo`zlUMA`>_BZ2Ew$C(Qzq-2>U0U9?1VGw;-q@8_*I`l0r)}i)wCH@TY7p z62Q2I?(3C|TTfzc`eZann2ZGzkj8Wt%|08H1Ba#h4+^Z1fRFX%**59 zz9;46pxu?tZGM z+t;G<>{l{kQO>UDM=6SLTNaLANqq-oAO(6FZjPkweGmMz84-CS%O&o!@u3DjB0* zZNW-q?%jVuiw-J{v)SQCGfk?GnNMGRNi-<_n&cPTZs=TqMMhnyH9tXVx6ifEq^VRUzcI!r^5(|j^~2bo%ehHrrKmTz1dOuMLyj}PEHUL_IQso- z??oi%Z!k$WNMVV;rFhDouK4}SZ#7SY{qz9otwzZRL{cg#>P`aTkdh6D&hE3nCGI%_ zG7Wb}QcvJ!%MP3(yFVRExvDA^E!>G^he!U)CEDYHrjIE97C!%SR?Fcly{T^yf5}N( zE7A6-@HlOCik5g4U|1TgJt90bxJKSEJ_=nY`TcG+dNoh}n?IV@1|b-N8Om1AqTr z13GvS@oRx|xeVHLsQ8Y};Y}a``_UsO&=p*p89?y1?>)gav5ty+L=W#DlzDnw(;QNl z`uI1=)MqI}+sud&q45}3`GiR8!ljhkdh3znVWN)0He3%{*_3IyJR>dweKPl~U)YE2 z2BaS}=9tn6_kj2AE+0qYtox_^knJ$G$zhGWHY`ekTKxJR!abF}6*8x2y>bri zB#Y}njf{;^bX?s_5wIMQ8Vf(a3g#l_99Ex`vXCVswlJifF~vyBS3Y1nFmKFTW{ueWgK3<3f1aOYZl@zJAHqOrUf76R&6l3kpH5{~ z=fxWyu0K|q&9u;z<7SXz%mXi~eg0$%pJt^_kfAAG|8!d@Uu!5qZ1}hGC5A3)GisaT zE4OInfC;H##Z0u0?vu)JACIG?$ILD@p42+t|wGwva@jC~A#ol9<{Ip}hpE=QGaa9*O?*P;xDg6h+*D}B~u^=Je~$hE!URnO3+8M4sh$D1tML=C|ewVrraqU*}k z(WyzBLCs3-W=_hEr{N8MYUjJU(S{`L3}Digy+&Di79*{sh{<7c37oHooVWd}V6%VE zC*;#HrX{$Hh?Jt-J(#Fwp0q7%Wjy6xouQ@F?+|%=cCJzMRZ#_}D@L{B0TAo1H8ovP zjjXc+SC*^C24Tfcp9A}oZb7md5sEA5kO|-#6?pVF4eZsP^$vd?F$Jcgn(3nSIaTfO zOT}a-R^119`XaDhPcLt7g5%TzT-s=0@g!YjSEl{Ln15&fj=o5C4 zcnEOPv0Z6OfY@BC3;wZKEujANirQD3K1_LN>F42b<)43_DyT9sT2T$7`v_`zKgRFv zE54H!PuD@+NiVy*sL)+7skA5!y_B#oDxdNt@Q&GRkiOFE3hM)|l^^S_tXxJ7=^EC zry8ZdLS&PJ&eo*yZS1dD0=S}!l%@E_JIE-fYFc8&*&7%7zr}vb-}!!8#}hG!BD{os z%yAV4&kk+b!QCF_uPu&c?>$l9C3fsj_e({R?<$BmL9%hR0;M9EN}bsy6T^yKe_xXm zh-2Tx2R_^MxH*rOVpda6+7LS{36|C2-|9U*knl1Cek*#?sx%5oJEFM^#_v7fik)u|x1O|1Gkte~mC)3kh{alf8l;iFLnt@(|R{zq`DaKi(5- z)}3hii(_m8Yf9Y@C}iTNIjc=ti&HXad4D^WN9mJ$tNEB1)f#okko?<2m*a#|OG#0; z#!h;IsC=|Z)PY`4iVf&tvJkVzsM9rK^)nWvT4A}lh0vtnrlv>IcfJf{jV=@a&N+BA z_(#@SZ9Tj*+gg-o(Pz{}&#ba#r5cErS{-3^ksM2lMTr)QfJdtYa1(uhvvWmAIsCdD zC8DZVGm_ncfkk!NGrwfnDP!ogt6$GBYGJMY6CSfO$Go5Ip%zS(K}N+cvk1df+M>^n zS5E-@+bgweMlw!18Ce8Z*rfsEYey5(oB1b{mX;TJuBf8Z@yJ*{%*slfYC7Cuqj2KM zf3j?aAXZc5+5FbQYw7v9|6GGE1Cq&o402!nJncrevll%|ufz){J*d5P_mizA;;iML z(FDsA)<-K3=ve(PrB^PI^0|CO1o%g(5)2+ZeHkXe&QS3>66v}&{Gah8b zU+?s^X?#u7Rv$}9BlI}#e*PN{9M02@g7p=Ug3|8~RwqYB}6N0XgsZ!$lK6&wIF_ z0j(H#<1xd!N^A%{*c;jAA2_8-&~p>N*~h(HR(dz0?enm{1(SZhxdu zJ&ztJr+vL?g!}9S~VYEBjCUY zz)F58bpKE2NcAmTjXM(FHLgpRl5G6Yx<5M|NcrwCyza1^%U)j4a8?ofr5xF(ju86T zHt`fQ;7`>j=gxVKR}Lvh*K@Oz*$PedCIlflXkiZ}(};N)n0VRb6kFPzh&viMqhZAA zxQOi-JmP{ZNrmjlKb_@8A7lrwpiD^KS44LS^zc`01~q04uicv*Rl;kv}oo)I5C z{yPcVDlw_V$`(D1@I)4p=xfe2fhcy#RI7K_yzBl|`j@t#M@r&V!{P^U z&s}fN%sWm~C%Puo`Ph5ce;a3*OP;;KCWz|D9q>MszsBN!>8dLQJT4>eEXHtBzs9|LtmWg=IcS9rY=7-0Z-qE&( zB*^CGA>57(?QH&dKVlij5+#co`#4pa{j8xv-A?FB;;c`gpD>Mp;x(&o3e8lkm_BFKDjbyR3%1I^mtROFxQHQ;A$*W_cL}v z0W?oaPjx9xSgSrd**_U86Nh_C$ZPdI1F#tB`RbkNf5{mGL`vov2BuB*O8!7YxWB}m z5qODy+!eeq$teB169=*8htIS-O$v7TXGGdeQb)81Y1-Ai*6HB2Bwh+y@NUd>Ah5V{lwB1375$Kv};zbXl+k^2R*4m98_#T9jVp zJgRD0$#d8C7hKDGr0_T1 z;qxwuE=ay`ykpX)b0X6Io*5{;j3CKT%sKk!{Rpp^n9gmvpufgU_to1J{hqC*Pn%CN z=?^{nx_`=nv$Pp46z4AHIrvM3{&?fPj7{Ld#r`Dk0KA|#4pi>mxqr@Y^pN)8+1aiB z13>L3=)vh{z^$x8n6@&F2%cUUBf9reH-~BnZ&kC8B-T{e~11$Wdm8A_{J@N ze-&AF>n!A7rk`d!o8n=nbvNjp(-uG4Mou-pw44?JfaJ`Dn9MM=ZRj2dm*LbPNlmsK zni=4kgZ|Y0ZxI(@!pa38Hfsi)TAt19hbz`tS_-Ty9~N7M|A1~|`jKJw9O~!a#M*mVz+ zb0$EV=Os7@VtE`-l!IE;pwjjp_O=a%QSpnG__Z2-*?GzkRUyB|8@1sRvCy~8rO$vj z<>5>s-u_TbF3Q^T$Dp#zxI=C`#+T=h?fDZ7Re#Dro@a_Qq$z1OzK%nVmRrv}I@6jI zMiw7VJBp}z14=A4RB@u zh2{VX$E+60gZXl`6T*huLBCjUB`qx8y3RIM6I;bRnImRc@ui+Q#eJ8vO3deTyxJ$X z+TUERS2_*Y1uoqgQf7-D)y=>ADG;qBmNla;MKt*ABkUM@x}b;Iv^f)k&Hv-#dZb1mwnE{-aQ2X1C8-e)MO{W*NB78}OYr+=~UhKx{i zqQ)?*0@hIoDNQPo6%x~jZlkH&bv^y#ABinzzn&yg2UHDYK8_G3+w1WyZzi<1aGmj>6y(Na6$0YC>z+NjAd@^enJP#ggiYtfU}oN+I!TkJ$F#{bV6q(9v_t`Xk79jy6x%N#JbuLbfWV z3}PaH7l00;7a5b|K)|%sVT+}vXUz0k!ahy8V`q)Jj?-lIihAX(dQ8dk9^01-(VU_49Nw9&ftX#oN`4IIDE(!5LQU;W8PS3e^y z0{}n~r7^C%1f0brHRLuz8WEIbwP-VWm4-X|bLRnulE{Nu`75NvwbGY>0b&;vOk6ZHd}jF;Ba?U+{Z}|TMe{o2 zpEpm!3-LGL8enN;+O*H?1??>b?ull5`^dj5`}-FaF!6`CjcLKXKfjJ2TF=So9qoz1 z-Jru|3OQdSJJG)%x~OjvynC90#64O?9d*f*`>PwEwjXL0?}r!>+1)0(0Q01?H{6^@ zkT0ET5i{Ofma4>})|-54Svltr{c~j1hZA^U_cK_XuF-v{`9mVN;3vivv0OItpz`OV zwCOCGSoA#xP4|UZ8iIkxTFl_}u4Y}1-!v_BGA*mufhB_s6REMOWF0_g1=~SSu$u&Z zyIxGg4*n7>`7At3C>S_1{%b$yDPfWkH+C$+7)zaJ9}9{?fQ)Gf!-1NH_q!$h^|UUE z)PgW(9quY{p3w6)nU<&Gga;XO4Fdc4vJ3Q*>aXoGrkaYaY4IAm4A;@^R&4b65_8H1 zOK&yPyY-@EBFT;bqdF{xqfdi@0>pRVBg2 z4B>WC&FLkAb>O?gQgg>U1>TSc2zo`}fE^YMx^ZZvxz56Jl}JD795=N!h8Lq$2I8eL zm4Qq0GZhVA*dZxy6)xK7r>XBmAUW2f4EEm)9Y@M`0HlZQzzZghid9Zr_zmil(yPSZ z!9S>O8-EpU{2Xt@*`-F)=OppB6h%w2`H^(j;FgMxqmTg07!^I|(X9l7AU)8Fyf>`w zm5=TeO(aERbv!X7y4bybw$1W$zh%{aKxmaGdOLP?qwXKA<>PLKWwRh33x++rKIE%m zUoi%UAOPk=?Orc}-Lnx-wXBT{UW->)6rkB1@JVc^xrDwdJIPE4wG^5hHa4adQj^R< z*~#n*c;W0CqFZvr%6GAs^y|g`>|Lsa03BfpUBC@?(|4wuP<*ABp-e>E7bn)?*n^bcIH)D5H5qzQBE@sW9t; z2dDh}v-7VSazl_Yzn94A@=$M;1l|0adVU(*;3y3#baLLB!Al~^2Y{U((LX}ykfNJ6 z{3Bh}nn#RE-qnBJN2P1OUVqsdwgxun-h<4sjIM=JZkX1}!=1ra??Bd_);%m#O+3ud zge#5fEaN^zdcwF${{w`1b6(}Ec1gWS;j5NuPiuO)%!nDi@bD1xxSVH_F(N>MMCUzS z(l|egq5okY93W%Fzb*rI={a%h8)Huus@Q0teA-bcEg~|a%u6Bqy4&8VOF|fYVzdw> zOJH2=l&RZ8w>gJJHPIg>v;Ed+Vbq-QneN?`uU^>~lXU?{JCU@=!rurzs15#6v#$aV zgf?YXE34w!xjBP91WiF5C|^^s16J&Yj`evi%UQ;AX1q?p4~^i@mXoj47Mi?_T))0y zAX^AYewoo+tw+P#@Gm|rdLOoP{D)0u9oL`wK<<+KIb|P*E=b)^AT>{;F}6P{%ZLDg+gHP7FekVU zw&OLh>{<@H*{oL50&b1%W(^qXTGdY|mz{kZ{e$GY+cyriH}39apKd2KVmHoFAEkFX zhq3D<&H5F;Q7JIbtb{_HpS^4twBL_qrOyUte{X;7s1*LpR?zWmXZ3Z5n)~u>EO?@e z@6VnK`B)G<5N%ytB){|Y2~H6x>ZVWP?U4e@y(ZsUoQ=iscFQBSTPV#QEoWJw3FHr* z3t|0PUkM{l0?1rNdkmo54a_=xoZWF4*?&?WWF5X~4jd>Vq=hOjEO#Aja7L*Pez+cE zrwjO@KPSSI^PHc8J(qSr_V{QHyh;2k^yh&vSCi>Wxdqg_`o6Q2`>txtPpWk$xv0}n zw;nXbSzP&YBQ_6Pnqfg?KuVrNPl&D!@0wnkn0&)}G(5OP2hB94 zWjSta96{Q7|EFT;6{qmyrZA$zrOTYB=UF#Dn#tsGCG)<(a}GG<-+#;3FxNopeDbWp zK84Uun79)yBLwbw=2Y$4H>|wW<)DXL|0%50)cN6VNX6BR$q|0D`b`IdYW25AYX4Co z{eMl-;N9hk(eWF_NzvWpt{dwZ-V?!#nbzo@v0=Ho;UrrIKfIuFE|fF>-Dq!13Mwo8 z4`a9xb7cWq)NJIXKO5!8yqt=h zj$po3?P71y<~qHhjR%)&!Zt88NoxbXJ#?=pZ_-b9l>Cek3M&C6z`VV>0^9=%^zFK< z^$Qtt(C-vcn%Oig2jTYaDI(n8ekd!{#m=Vt7ODM5exsDjRX1w*G8Jxn;XgQWaQmBW z|E-nKea4nPD{^sfrtAAW74_e}zmOkO_4YNs8dyV3f^qLLGP?V-YE?QCuP3uFu}H_R z2JHGxEkr)~5U@9&#O@}&l^Pj+JD1nvdPC_BuPWOwxzwijPjYpX>Y!P1XC_|@j%I83 zN@1R24?>#!346sgVU;O|%FX@}u|o{}wcSEwxKAzHiy~E5Ar+pOGf)J~J9v)FU-P4? z;W4G91gbG6`C~eU{7lQ4YWNO5m#D_oi?b$KyO>C7TNo;Q&CCVNc7W%(| zeh)1xgCv$J$i+Gb&w0D8k%&oWlH4ADV$L-lH-uPWZnk$F#g+r-W4^pE)dERo!N&F- zatEsuGh;Bn{zx?5nMq-TT1&<{+m?*bLA9O~QL}vL33<;8YU+W_uJuJ{TJSC7PK!`=RKn#N-qwIZo37osV^4ohg-XqC`(zd;tuNpml&3oj|2fc5#iYms=X5YnKgA5gz z2F!UBFnc=2UJbV&5FYIx2<#EeZDNX2c!-*DT<8qUEvz}=Nzx=Oz0GECtk0f`c;w=d zb)a%TU@y9((_OC%!IJal!P;=G^>|>;v~AyLe_dbLMGapGjQ!4i z_oVlD;ofAFP5b0w(mH<)=nvFkGPh75QAg} zulN1wZ~TpiHPlAee+<5@OBW)re?9Q$FxFRaDe}X@^nT8h{!mLqiddSsZwS+}W}D~x zKl9R-Rdf{%tT@x6_flyi&&YCPUjuhtV6zWXW3U5HDPlq;xP+3~fFG$6Jy$&}isiBT zlxyQR1>9fJQEcc7^;7yc-H*%oGX)ZZpjS!EuF@DifhWzk#6fhvK^*0|U&j*|}{*w$Gbg&P2J&+TKv-=VFRrwqC z>=NSWLZvfF7vKGB2d~u5OITCkh%#<-oP7?fX(9EJJKoMOyB2fYYx&w`s)p@?k38#F zI8G^?c&{=0`vwORW5EF`A{bMN zg|{nu-MHYrM8WMUmxu1KX1$BZ|F|*hDwL*=r?bwGcFEGbyBIH}ceCk||J~oC8Fe%N zQ7~Fng=p=#d#^kSQKCnb?vS-Qo|@Gsg^1GsEPw|oKu}XsAa8cXuo>;0tFJO+nu0Ut~O#xvFTK6~^d{XI+8y@urwHzoE5Vxsuq&h?Pf=*vYYx*p4lUaB zeN0z1%75gjrG6?eyqhH&*4`T}N|OfL!gK;6GpztfQjW{o1Tx zYho0^gFJVw<0e+Am*s)&iZe>zc^tZxhp2J53&P zQPISaGb~D6Y8)BC(@-+VeNr^HKKjcmNyqhSsJ6@|J3p>?I)EU-3jy^L6g%Ez_!KKPYN zc!0@ldCEgEf*F1sCQ0)7kXZ*EC9c%lxPvoG6&}9$6NPqQII2(54RIeZ5k%4HENV12 zB&}Xx(%&{K;iay3Kti9?Cm-TWqVcEl9??UTv7TX-kd@bmd$OxK=KXmln!Y{<+{Jiv zrKYjj`}{0R@PNl79}aERDD}{i?8Beu61@O-&@%nEN|Zb#%&lh$H+8p*GaF8Q+pTZ- z{7R>%p{JZ+7b0sA#B)iUPUniQX#+coAuFYurCeikOr~s+`ENaJ)`~V7pD8D>W&--Xggj!Cu_Z0e!2uKD4rGZs}wT$3LFHm1K%n6 z$!m-6?wFEu!LDpS{=hq2I%mB5mE`ClV~Un8iO6!}EIc64X?sM|y@y`)~=4CT|&SyE{0F;`WVcaB_ho0Kt{CevyQk>S;Cd-Wc~-~;Dy(*_DY8tgc8H}2;oJ)VzS??OIxkQ)3% zV1s2~+8}Oxnf$5*tZW2aw^L2Bn#4j3D8D(w4`jWWC=$SBw|;^M%Q_mUa;RgsfKzIo zsK#0Tc{te$2vc-}^nOVeMKEhR$2`^xkxnTuI4Dq+tlSjr`6uAEvLkooA(jau$XZ92 zLB|U!_VFf^MU80!#Da&&QDWV~XO1ij24D{QTD^PQ=!vv5!i-_ufBwRZ^A&dl0O=#L zM`_SC?!yF&U2B~T*jrs0@aec-tXoH!wUw?H>xK_yb`t=NVq&UtWE>N#tld4`$!cc%k2*9p&0aXT-n+<%nu({DCcETtL@~D)leC&7EBI8FaQr zdmzj`Dy^p^`zfE7B&@dn)pk64v<~>f;B3@bh^|V&5?#~q({}d`Foy#SDkQ!i82!X- z&p=Z55y(*{2s5RV;oPI;vIgDP3M{kQ_UfSOPJ5b{XVjkLgd+PRaC1`<5IVf6$0PYI zqw}Ywtwx|nWi>vx3sEml==X>n{cMGGHTDlFKXUR*;n)1g1p*U{#8lYT-zQDI#B}{D z(r@JP)ad-|RV7R?cc!9eMz-Tr@d&Cji^zOE5j$^pPxv*M0o5r{*WT?-;OPwRH z9H?KE>j!os8S!EU*&k&DY)7E4Cg=4}OSby{-WVp>1`fDYnzwU6eS1-<0Y|duYC0@9 zgMaOPFqJN&GMS|Q=9+2_;@eE?*FEt5up;hrwUu8!Rs@>=EY1W?C>2=0z1c|s*bfAx{f`Bd$wThmnMgGSLMcqDW_ zpP{`v;xDLsVF#&{Xn+uH&FnQs`~SgJp4OTF`cS; zRE{LV5k@z&$PvvxqvEhq0rRKtLU7FJ-`Z1LD<5ZsjK7jJA`oQ_Bu^wb&JtJGj0Off zBeUj2g?0XTkm|JxiFyFc6JqgUP$`92Y@s>!Ociq;s{#E5u`ZqpoSpnFW=+0Vtk2&5 z76z%z41kUS5ih^D2_ma?CwOspVV3C8SF#si>TOvi$4o-B=IZY!V0cML)_|dum;nCf zdEN)pZ^LS^uX>OnzK43p()wyDIx=AGab9xr_P^A3!B ze7Die9Kv_SifKik`uInXFza>NgiECB8rSx5k1TlAc1+ z7^OIPeu}Dr9ONxJA%2Y12F>rd)4k{q;qc~%*nZG1$7)2fs({ANY#tT@XpT~hgdQh_ zu}TRIF2bPSnSqBt(AmfXqc zEWXRhSwFCPJuW{rShQ@`GJ24)@XGjg7BN-ns>*oQ5nHuxWEmM?H<2O`wZiib09yC{ zUaz$*2pxHVVZgEAKaYs<&sv;x4w&&2(^8j3U_4yrqdpJ|CXV2pYTLZ5*Oq792M{Vy zeAjHTk-loxib6_~a9m99TM$ap%Rs+TceGiDZUjdqEzI}aUW3LgK+L*rkgGaSdsqd` z%2nZyOzepGabA4+C`Wngj>!cAPF7lr9Q--x*Yf@{muae|lqt(lX>lHN+ncfv@+6ad zzU*0q|6?D`VW$bT^(}3{C7E9a3KMlXOw@iXaY-h9+LVr9x5Qx5WzE&$d zGHpAs;Q_6(#%10T1DS%_YQB%*WdbVLP*-Vg5@`$d#nr8!oU*OnrIb{fiz$eFn? zaJR3fKP%a|(iab&Xjm6=5S31>pCmqbzCoQv>t#8dxi& zP`z#Aq|(Q|-#lV+?Y!^$phE|g-WDmvFnAP{UWi6U%auO^6>AzgSWu;hw_dgb<7AQY zMUdWSXkRZwU_yjX9Pwg@RBdx3qoIUM-O*RB2*$IFV0$SV%E6!Tj_=2`-EdcYU@$#& zaJcFOt{3RDw}RrH0b_7L?l+L+9ovH@Wh4j1(t1NG+PT+hnm3Hb&47nk_R0Xe4kDMb zSNNdl1;_N>?0-&qmqCz{pKJ|=JSkIB4bk1mfNNerb}zooF`9X z@SJ*5vm%o~Gq4d10%2Q2 zq}4~Z?`EB-wge1QMOOqEnXguf(aXvsoy(c8G>46;yTt+Q2pN=v+^2C`XW=%RxLX7_T!P<0j^$oaSrnB7?8&$8%l(G%pL9{F~JwWs04ok zYeM|ur>7WsCMC>)%5~*J;=t(&vp}ll`B9cM3bVRie+ps9sbU@5a0z+aFJObo6#Vig zl#rwL#>@qAGo<>vDQKm{c#@0Qx7T7d+jxUJEPk&Bnq*v=`XR5Q!meG7bn%WE8j8$; zDGGhCR>#jW&;0wif6zup%ny6g3uIMcIimf=S=U z8F6L3*Lk~mEKnKyrh042Xoa&81;J!f4obOf^Vje%$*tvQOp<7hNkxA|L|!SYr4rs$ zezJr(5WU`ja178NUjO#!D5(AeFiM!$K(oHm&wkSO{F;Yd24zhM$JUEKIuN)<`b?_=H`aVL*<(M=l+vLM0UN;d?A@aA{WJzV&JT)$K(%k zu0_J3SlldJNBHBWQpuHWS;?U}AAxe90yk;-p(=0tv5%AnvvU^u{AmZ!$F&&L!#-`mGJTXn z=}aWFI@{1=e2R?FPDY!>nS*=x8Gm1bej5c#SFr+cqbMVKOfPdG%d?an_nuaiS^ zdg)URDueYU;ZjON6-wKDR>m!=w$=x{7E1MrLEw8Z`^gDqxFW^VI}PStRSoX^fQ|)Q zohTfVk;>~^S1-z$HW8#7ysPob<=I^PWXEfo39%cz;fuEr)@`CKb*aMR8VSw&WQViS zyYflVYjzk7sr5VCKe3?)?+41=*5sm2ZkG7uq?%p2|Ilv z#Mp6qND3SgvuvWE2E;#tr@~rkOkDsU*RA#Po?kHcy*Nvr4?WoF4MLD!Uroyk7mXaZ zG;O*UCvz?S&CXviC&%r2r$3K&xRgqVIZ($+C< zHJb#TY9=BM9T--MZ%uNw3+gf!?cj!c}-inO|$%ln0_CzAEAS zjZV)j_3E+}<*bbvy6zT`_vdRZ8^@=4W=&$89wf92PtA_MHtUvu1gOZm;!Uj&QLfh2G8d4x~qz`u4*0$U9cKiFIbQ{$tf>K9x1>`G_CN?a%Y zkSF%@rvk$({x8bDGaSzE2|L)ZI$1T!YFQDz1krmh@e?hgmk?GbdKV>Fy|?JmyAZvT zXe)vhB}#OH)mF*-_`lcnet*C0muIitIrE&EGiTs1M<-$aLLRaEgCFEcbikuC+W?LO+(JI^4!2ID1zxcP3pY zxgX45%6-KT)gq^`*%7hO}iuyWOswXN>78d za%K8@wu&DQYM9boLgCP2jz(d;dCWB!iJJfBHbp6#&Zf4eDG#u z?8XNE1WS2&8X=9NY(@h<)1Ey#)h|`|+uOX>JdvGSt1HL{n; zMBWKW`Y$T(5gPTa9lgRoN#*Xf6qfWrwwcBAsx98axJ!lB^mV%$=qt68rL#j>5 z=RceaUr3;^-Q-t;OeK;ylXxLH3_7bs#WBqOVqzw!Qo3*Z`6X1th)S1!z zrNpBzlykMFc~G&YE}BeRA`F%{uW4VIaR14~R>Bm0mK-5&z?Lm=-m{vKJk;Z{`HysG z=h>B-w&`qG9N6f&Qm!r@=fNn^Qfci-`bovdl@ZH}u^I2$bSyMCgVN6w9MPPij{>9L zp2OdDj2dK*Y6lZ07eiM&1YB6YVN$uSD^0DlrDlULrqao@XKuMXKiIJYZs8hx>>&)w zZ#tVld3}O9`k4Px1ri#XL#n!&J?=Yr7w;}$ZTzL=U6@$L=}54Q*tn;_C+UqMM4|IY z_&@lC?4NCC&zT_WP)8b8Oq_&6GR)_JS4dZ~*|uT(u+M=N{QwmSD*cW*AN8th$V9hm zJ^YT`u^w+j#_y_<1oh*LiMg^x@hfm8jV38oM`s5z3u1KcLOloayO6w?*Vhfthtef> zZLoI|zG4g(f7be?-OaqmjJt0$f}S??-j0=aU?NE&zFN0G3^KR9+huZpLC+(rBECiyBv;P%i zz$DaV4~|)bDZ;^O74`{=BOsg{_lc{ua5wB1HuQvHnMmp{G&i^sCn5b~<|;?@!;A}? zuhVlxv7m3&=o}z1FTb{xnt=AButATkG_b_AbyyBWU1H15siSsUUsl*&=X{jmo7nj{ zQfA8d*WYdz??PdU=RwK`NDbH6u2@5`8>1*alfIPJ4A?msUaSTK4zH98vC6kM#qbK zDap#6%L<(vQD{mwACN3?Jd+qEZm7B)!OrgWswr$rrd+TNyTS#Q5kC9~zp!&3Cb4NkKtp0Y0S%IJCp?R>Xmozh|! z+_m!;ug%S!g9bW~TJicz!d26U4ddO1;`ucha~(n*2u;$V49+>~Z~c>v*ME50RK^EQ z7O`%NO{S+27j<6|s`%{uSZb#E<{@!aDJ{(uBv1mEiyV6H^QF{e^D~2_ywNCmTjifr zenJ5yCvQJ~Na-7Pf1oK-x-o?QM_@Q+;g z^cI?w)}m-M_;}Md*2cAb&pN^sIS0T^ZNFW9JSmy(1=vyy%WF30*&Hf14r2*ND&iJPRNr1_LTGk;?R`XNtH$+U7er zj8qS=Wykl{|9QY|>%Z$4a}Ud%n=Ev1i%fO{$hOws+{Evl<~sNeRTS(j5Z=a%>DSJF zYnW=jVlrtmk)BISx&Vsi@hw?L7)#HJe@<#M9!`2MCxOy+-kC$7GQ)293ia0RT-K`Z zhOsu?-0s2S$@0=%p{HiL3wuC^LBhn5uSsQhHgcftva_OmYyG_hd(EJ86lU;hcSQwf z=SE^KiwWKd*MGx#8nvbWjRh|4gMjE9`Yzy%9ldU5oVGn{*AKd02{;y?ATE^L&>1ho z?0YMny>AY5b-sc6ygIdS{|U0+h3j_fJBI-eezD0e6#kle_4A8<_5058ER+jSS)Vj~ zwtMjU)+DHAWfr4|@@1U8zN!Ah4=w)OI&L|4QzIo}WuvgDm_Po$b2;tf?Rk(iP%5Uw zJliPxd{n5Ne3KHix5XN8D{@JE?APT^n~W2nn;Lmlb8URIx9j|GE@*B>W~MFnkmdgu zzN5utDIFd$(zCDl+aT8w5iaj41?x{O%wIm-J~#U5$a!p z4GZw8Y1htPv}BhQ_AKjg)Q&w1(;ZXlCm=v22Lv}!yFJZYA)Ta-YYwM++1IF7OSa3x zku5r{n)&cGhkkv!wo*w^+jF%MwE`0+Ld@K-l;bwM|9$r5;+Hw4ZsFr6&>B^Frv`~8 zyxF^%GR)5mk#qQV^-+L-u<3(_-&PbJ%4rc^g{Vk0P69r3-JIHSRg?GfX%xa}0iDgj z3=#IOJ*-Z)DP$-=x7fyCmv+RZEnxS58LKfxCmeZEPa>c*3pu+HD24F1oH<;QOOZ`& zC|~Z$GwVsS>_wk5W9ptkq)23`FPU;kM-m>kgb_#c{!EH?6h}q3Yiz&u3YnB)Pr$Ed z&m@aPGTdml7e9j-F_uDS?+9;K!(Mp=2i9WJtoxjgl`hHkZeLFFvKHNgq_A*{jcm=5 z3g-jJzo%cH2O&}a1ESOe2ojb2#@ha?p30xzcJ%S-8#ngwz~G zfjRVk?5M&>`0@8YgdqO_G{E%49_QC3YeKTyZB~y7`#2|sP8{sg_EtNQ^nXDo%Tdk- zG1AtcmV;fA!o41jqx!d9w+4)*ZHtV5Nl2kSKY!q-Z>| zIw^oY>c9Ph?!6PT2C@Vt4Y!`;{Isqwyre#ZbHj}q8q^g67LM0rP4$6Oy7@XCzY`Iv zw{;%w!& zag4UVBs9Gjv1%M+)!>||1JJQ4gTk)w$%gKz|LUOWd$|w2=PXYbSK7R<0-zZ=@}RKE z2|^+(&rmeK__)gDMyF5;PNexg1C;kM!{L9905a)6vfrj!vK>;oyy`=j(1m;Uh`EpmUy(^cTqR0DpT2)yAB`ZR-Ymm4g>zN>28F$_vmbpfgkgX zp714tmwN1HhVS8MNWAQ&L#;vm_om!0LWj+P=Lo zj=HIThwQrI%Uf83%-VjoYHuz3P8+y1c+kWQ7O+f)sr<#2)2`3FJAMJ2WY)?R0D=oK~i@jrhM z=E0S91a|guv_YrvElH;6-4)%FP8M$_1LNf#Cyopor+&+%_~>%^yqZ!EJ%G znJ?nHcvQu=dslgiaL7LyDhu1_y-VO+Ekg>9lqU3YL6Qk?*K6_d?;eEmTZaT&c(MGj2nWC3(E zG*BpU++agc{}$X58ps4$qmYWicWLQWpp<6uS9jf>p7b$AK3Qn}UJiI)`41tsn-9to z5=(y(tFS$YTq#gu%*a)u$nxYMQI<#gt#-&Y#L^im@7Pu$q0G( z%xq}8fV5{1pFSWaxA3Q#ijrEMK&#qxOtQD8Fs6ftboLaP2nDg?C7t%@5E%(Vp3KM2 zYHDtNqmhh#Un{^&0k93oz(6sWk0D$0$(bc1*p`VC7U+j5p+w;jycvauMi2$xz@Nmx z*{AduzI+QeuPl55|A=<&y zI4{sT6tT)ErnHjGTg>XS{x*xvCVfN`*aE>)WYifiH`Gs{m*hA2A9K;H(sIA*UacgJ zuPP_72ZFK(Ov9_{Sx-`hzsa=%l#R^&-;)-sv@EKGEg#5D>n`&rWFH>k9X`+Cpa{WJ z{vr6HfF36t63L2}ye?m5El)!Dr%J=3n0(rn{EV(h8f+BoLQrMPcddd;^I(8VF3J6I zV?J$>VwJ*_NQ-g-b%L5{rs6aF%Mmpi4CYonvw&Qk45KJ?jM*fG(+WLx zj=@*`ZXu6xS*{ja?{ME9QHVV%mB2(?_v;9$GOH-sCgL|ljj)9Il=OLjIyi&Kkww?t zN*-qGGlM=4M2pmukuNye7re3by!=R9Vuaw3)v zg?@RnNG_mr!AhpFkLx%1-Zd`AR+3*T0lzYz+q|wk#CfMyl`T1qKZGFwm7**ib-fv3 zo+4tFB>m0Q=r+p5+@K+k25KphMI&<_g%fAQAFRq&4Dq2&(U^LE!t$1?ZD8#&h|LWZ z^Mwntj$>3Xd_}$-A0;{^hZm@zVNNPX6$~X$rht{ZFpfCY@r@0%rq(UkdO(0o^U6;M zH*{!48Ple+Z>heB=BNI%E65E=RNl|9*~qwB`9dEK`9j9xJQ+Pomi%a}2TRrx2ypl% z67T{FrOA4%v8RiibJlx^b(Kd9?xF~+AxX&^V3x{axtUJd{xGTOf1#jSL6(AQ4cTXH zxSAKl*vt^eu({78PqIG|-QZ(c&jir+M+DYCiXdi(=vQo$g2*KKxfrnYGG(}!dandR z4IrOKj_#!<9l#3WG4(sQ?SC4ej_E1HRY;!uLau2#{Jm?_yQh17kuKX(+colzUrG51 zM1|dk39<2rKMq}3)}Rqfg6SJ*5TS|H{Fwjuk$=Ek!A_TK!8<{X*G8J7-Or<=i9~uA zfKcu$u#>U+9phR!)iqPx4F83r4O8U^Nu~K{MW(%5R&GddzXFJ*_nAOL4_2e4q8~z+s^@exH2P6* z6{Y6b=MK{lAcVHM;+v>?S%|0xkqN!FSAh+|c_s40w)LEgi%v4j8y*YnMg9&UWmY*= z3e{Y?JNMA=LBzNbA|-QJPpkhT=4`kc8Z5D<_l~M@k-AP>0)VP`4`P!4 zJA0fomqC2uI`NbXgtPoTIg<}}?0f$gTct&CL&3^Hw2knv8s$WAa3{y@mx3Hj?y-m< zxA|(vYn?=fkX~8i*Q&dzqJo_xTi%LpNvKT0KAmH=YBvHm@*+)p!8ANfuNr@QNc_~F zdi)?=jB7~j>(txTk`KzrC1Ib|;MhERql=2YlcfTg zomZ|t$gqD@>^_!_)W%Mk8^fo&Ns0ioG_uwK=rR(3&`p!XSB<%2AwHi3rPK%k7d z;@~EG9x6lF%07z!$7iWRb0)8wc>|ydQW+w zkpP(Mk{tVkqaZFGCw=E*cBa#;T_GU))*_2vE+$vV>Ty)duJ;Icwztf1ws%N#b()zW z9gxv~H+(lsWn;bK=2F0V>CoaVx1U8H+lc{$!A^K{!@Jp0rbWS{cPfV z$s`R0N7b*9dH;*U@$;#yD)!HBDK6a}oyH7rhRE8WtCau-po!-txkB6TZCW3Zk|C1! zN3;k(tj*5W6R)X493`sA*d&-kIr*W#}ac`io>){(MQqKjZu#`1B3y*IvZ810nE`{6fqtgZHIswi_VCV0&t(*VD5j+WIk9}{a_OSU0 zC96ZI+x`qBI5d_;p|^wF@`-!%lZAW;qKt{yangXt2u0btkVIA3(2%^jEO$!SXD)- z0n_4?mdAsnQM?AS-TL$d$zgvVp2?S!qz9HtE9p@$KAd0Xcx^=s($w}Rcy9d|b?|_h zbv(|x@P9*N-SiqskL-ln(ncg8#Q#Jm8i-bGduv)Y^*jS65q$U`4O?R( za?QmrRjfm$6Gyire#hYp{C<9IA9Sj)umEODZe@3z`%RyD`RmXRX4bdy21cFrsX8|O z!s)t!!r5-veg#)`VFxE!4`y?vY)QNYmkx{ietq*?p6-}op70}ml}pZfeLsR}#qq-d z+sIe3=*mlKDsP)xzI%FC|t0ISwVt;>DIZ_^HFz}sFR&w zd;w)GQR$pcWu-zHC6UD2q2a`RL0cQhRyz6Ww#H)Rr=sBVs96T_CFe7R34wx{Wi4Pg z$+0nI_Tg_lxO|!9`zNj^`;(@qdcB+BOG;}EU5tIm2`dGaY$a z@wi1-A=yqc+ud+~G%QM$z?k4~=1p4Z&T@@hw_nNf8ktlEWhlD8>08*zDPkMPndAhq z4X$Tj=?nLwv+nwKuQzf|il>1CZ|LZ;;`4jQXY*kqb28Et;p6Q}&}jw>Zj+*W+WB0^ur2^9EzqIs85G)&M-%5jQDK zKF&MyC){Ya;3xqXW|{aI&GbS>D6>f*B-3-?hOhn`!tNX1?IpPixqwq@-dxocIjok9 zXm`gM#*qEV1hk+(Jo6cD$G*OzCfpEjzGWOELWb|JrjV@Xoaym~7BwOFAo-3;)DXmX zfO&PbBL;be1|g>%ex9Qt+KKpc+tC2Rkp(rL2D{^{mTt!WnLz8D9=}Q?0S+p zS-g%0Y*YQgf2utPlq`+za2FCB+~yubneaDcLNCvcQ9p!^o(K}X^tWb82{i5+p^hB=KIN-wuQTwx&S4o`e2rp3qfl8KbwwD_6N#aJ;uzI}tl_lN8Y zpw!hE!KKo;)5Pylb}eK+<`1hV3HFN$l4deghiwz^TV|iRv3clb*@x@JgP(ZG_td@DOp@f`%M09@_#;7xDIG%DWs0MC50ahQzB;K)c51sFs6 zgU03@d4dMb35#HyCqcnv4g5z`bX?NCM(t z);+?re8`m>;xbme{>F}1AS6oA3)SHJncyx=>~)EGA973>6FR;sZ_Q9IkT9tqal)E_ z)U+?9{jL0$0hCbxk7jXNILVVt2FAPsLcuOk4TanVJm_$^^7g~Z4Abi=WtdN0u5Mtb zFHhmo&dlH5X0t^=fko^iA<-{DoL6;{czqnn@r^yw87Xz_?~JXq`C)z*#=LHba3vXI zG_nQbMV#rej}!h&gI>9Z(+2)#lcuh4nky#mu-cTIi-}5LXCB9B>fgDzLKoiy%^lQ( zv3&Otd@?T9!p^l2$B~$Bc0yDXtBMGXNKRQxOI)CVf%W60oz>BSv5FEt7e% zbhCkOyKwM6D+w9QBMjq4q50oE?{?gU2PGQ&HrV{#X?jleug1_gqA7?ep0(g5nUQi8 z(RJZ#<`r9P9u-*fc?(tDQn-P_Vgoy-z}!D0Rt9EQ)Kcw6R1OJp0p>q^wkVX8r3T~?jJH$vPwhiK`!fd^!_1fZu|$N;v2HfLsIYwZM@jB& zAecI>A@6>?}A_t<(L5bkXEKNt!%|HJ_|kToNy*pGZy zQs??3rABjA+3*VPhb0`I=cqQP2PA&JCQQMuM-_|@Dtt3@8XGhNI-iFJ1CEiai9G+? zyV|9n{)X==Ma{Gqam&-IA0|wqSqeuL9-c@h$4jm}!Wyq_iu794mz|USXS5XMB4Wfj zB;YgqN9;Y;y$&e4Z$aO=GNK*TW?(cwz!b8NB6~Q=e6)udo8l01-klh_Yla^grJ7)6 zCVg=MZ_`iT;w@_7+c>omQ>1XyP4MoAmvu++28APG3&ZLN={EiVYI1?!l^?;VQXx3b z)?B5lelvc2)ya3jB&LXOB5NEqEr_OwVLS^&>z_DG(tFF+l>f44)wJSUOWlC2L-jQ7 zy&*jO6`A2RpNVdkux!Bk$L~_b;VQ?zp0JERq1}L!K=Zd>6$D1T1qrdB9GZb;$SxY* ztZU*s&o`R@39JxDkxy`hvDB-0$uao0xtA;x}I4aTrl=+Iz}`)Jg9mcWv^TkWerI~%0u1-d=Qpr`5=VANyVg&OlK ziKNqi48{y7O-@;Zp;v#(q&dko!KI}_CQ5Ta5GG5KprJB+^X%Y%UoOP?ZI?|#Xx^al zz0g4zXZl@pZE8G~#KydPJ8%n$^^tAEXRCfQT5Is0j^g(1`xU_h_9U~~Lt)W~7IP*- zDWMeAVIn^MUj2;BiApoUo(js^5l@?0=b=7xaeo?MMudss@63A9%cPcVJ|mlwx98k~ z3GVtyp3EMB4YAlZeAcceRD2KlRTUHx6FOyShVo@C$DT|ZnIJ4 zoOOM1TmyO5D7I#8`LE5CyL--aHX*v(XEG-R+@jdC=R4SiH4FH=A#qHk8!Xm^>L(A< zbEmGeZ3^)OEIjF?`;MH(6ec6TTzPG zf;6rgWEG3rtdEb0uH(lY%<0d|uW$~Ua_4zjS|qF*_%tdJ{d~TNCAIsm#O$2cuV-+c zkQNb^Z@&gd5kNi861z&hGO0X5J%kK9k-zu83JXiG8BJ>9v_YAkEm>(}`B6OITzSYB z8ntKwyoSCM9uY!`sifNjZw zX#e&LJ9{qDbBk{%ls6e!3-j#ITKEwN466X9nY~+VMn@=@Pcj1N@+IuU4GcG5e*dPBGP@En>=p!uDeApn9Y2oHgl2% zJz%jXQRWRyMafk8QrhCcdO0~udiuOsn_AW^>(|N!zBYGDoAJ@N3zNUxy{2W>eF~+W z+>Hm@vf@lj`_#8dUza5hZWkys$*itqpng|p0VnB8y$Y$kInFo#?nD+|LD?PNeQgOD zUsb&TL8gaBYPQT83)a<+iwtn=u*V4_l>{_z?=3nYygz`mBm~a?ZM%1wmsJG9r zWkIU;b!JFvyloM=hOJ%oiCfKQhQ$xg`+hHx#NxlMv39p%Gg}^`9SJP9m9E*plYO{ya1rZx!_mi?hGeQZz*}Z z`c0A@z56Q;fWWwnRMWHz7$3vl5IwQ_R-{=sT?W?G{EyitTS9OLpAAC0Be!o)N8le3 zA*Et}b|`Q>l3qyomz3@<=wGV7nD1WI0S0RukImrOqwXePwh`cKJ! ztUhu>Y=CoaPYgVisL;~aOoo>|=e*7Vo!7Dn@#rwUHPjI2rAcD+VraOCrPrw z)9*>~_XXDIgfq1RgO#QrD^yF!8^g^g>l5FDw`Q%%R_!$*8iouV2k$O;TG|#RXAEK! zxlN`a`!GLy%Jg7l8^E@NC27x(6B?n>fo8l=4Qzc#YLbA@|9SM=Jqg8V*i=vpQS%+``isKi-(6!nu$c;W`_iiy@XiM{BqA@vco#Wd^qc%3#+pd< zimF!KA5?&eMhf>kwU=qW3=;T#rfPlj;_HFwm_n;TH4VGU@WEl=(OSUbL59BK46T19 zPb{Utaof^`MzY@d?pU}S>2gY)Z@)2{zuI{d=k?oDR*fkm;+kbs)`Js}VnRI1vYWle zg#PpwF_MK`DngOe#Ey(XJrzc%MR=nOfA2urYB2MiBhTvAtGv z3U@_nbv)W-lPsrgvn7{}aW=ho@t>+Tr|CtVatSTw0)Ejz@#cCYCbieZ4k9aLIjE6s zWGHK`+ujn^E6<0zTvF?vN{(H-Cyya$$v$*|jD&W!0ppK!leBLBYJDx`pL)JTHnpYV z!?{-N@41Ql4b9*yYsQs%)T~0FZfQ7sMZ1fTW&F7ZRR1VVi|& zAEdgo%}_7@Efuo2LHyc{87}o(#6E)<$27)9%y*Snb}1ypQ$4SvjPe;^&GtnSnx>#C z2n8(OFfk71QEB@u);I`!>XH@9>e!HHTD>GUK?xesm4BTVj2bTWNRFCFh^MD1F1lqB z^-1|ciI|`rxlOR8$0ETBdz#Up6WATT+0<`Niab8x4OIOPZOb?=taCTASVM-;x23<@ zKl{jVblS5PJMIgm*-bhzm}^4vC&oKegLAlif(CaTN-2Az%34~ZF|>F`OJ0!_eDodX zVxN~yGJLB0^3u*-DHK@qM?Xm3Bp_=8wqHkk|Iml0M-s-136Hl_TmX}_{=Pb@sR^u3 zj(05U*7r8rIs0tT-SuTZ{CrZ)okaQ+C5fDSN3NOEUo!@xHD?78I(}$5-fbIw23@&A z=3O_LDhJme9~*SQswe7NZ4fSSt;L4`2QQF>ZF;NMVTuB@E%RrPTxw`2{g1R~sx{ES zqAwn^cxh-UEB%Huz3a9iah|Ai$}<6V$@w^L1MbyB>)#!0YCqrhX1y&?eC~=)8$8^z}c?K+4q$Vf+|oGW1(Nr{m8iH@N>CzCloPgTP~udM9*`cfort=OKq5m zA=he3G$4F#Y*4hDhDP!tv;K(S*sjh%RoOG%-87&MlgzR`R+UVN$g!-nS-DMGj*Lc` z9tHctko6t}9O9MwH?~BvtnrP!|0Tuoh++@*`L!-d(RTLdU;S!WUh*b>%BL;PC7^dJA zYwTmYKJ10Qxr&t;r748u+HtSFz)Er892z7JG@MyInhX6fW7?$1QdE5+aJ=I6wc==2 zTm@z?zhaW;vEl{z`L}lVSi{FS-+7 zL#oxA)e>1MfdavMg9Ji8FFS4*f0HXy9TuJZawQrw>q}&M)&+B+!EavhsYUvf)Ex}4 z$~v(yn8^2`9^|aDGCb|Y+`_}!-3eB10LCa^!ct6PuCUC~UNP(7kv|OxM88(g$w@gO zep6m7U4=S+XGm?kjsvtYpoFXPkS(c$}jqWDRpV`gGA4l*Xm`0$tP3?Wq+DKaDH`Vxn^Nc|%?>yKlAe<{oH zLvVW~C*xkR)%MgyH+pllb$5(&cz5YEzeO9;+H(9`S*Qfgxjg_w>MV)|4cQu}0rQNI zrU(;5;>Tx75-dfC*Y9sBBAYvx^|t@P0K$Wf(|g*K(Sc|>rzF4?cu`)(dKa_D8~=Nl z>ofbB0qCxnb9q$1rqeM^x$>Xi-{u9iz_Ts0?l}cS$V75{Po1F4|5{!xC~^%umq9)* zk6w&|)%#2*TQ&K-w^$iogr!JtC9=qLdAuE5XCAyKd8zO3JWAJ>&|fU6e%TE}^4Hko z2=IOV?{|XMcQIG*ZN#!##F=>Gf;(h~W^Q|6vimMhRsZ`yfaBb9y8oh-(vQU$VLvk0 zU{Ph0got2_NMgKmCR#88z7weW*2gZpYi`zaK-u-$~ifSw!T7tFkVQoF~4A{To~ z=GDt!1K}{}bSi22D;;(QSvos92u2@0fcLz8O4$Fcb1V&^z61+Jdm;OL^u2llR#;bC zG>qvIvw5n#LA9id!&4PfoRVMB%;dMm>1|hU`CQ zgk;G+C39bkIKX9uO0vCZ_JOU0RSWjn%)p8B2Zk;#F)?=6A7PEEHdy`!4)nG&V6m>= zSt`Q->{@ifHuw94m&Im2W|}Tac|9EGM}$E!6$=ASY&YV4V!HQi`@uoqgyGU3PfNjL z6HLO0Trq21a%YQ=MiM@!j<$a8li%(R3is?> z%+*`=l5WJF7}FDqlG+^(mkwXuP<wkO7DtsoZ6BFzLzqXzHm>7@R2rXLv@A921!rTAValMIIu66mrNsnv{ zX}L>q`w26-mn$=pe4dO=x5`^6WG=#Jr(A^PHQCu_XM6oxVo)8~4tVHeu&nv*JwUTl zf6y6K{~d*MZ5yKj4oY$rKbifv7^g6ERFAdoOI zz5wnk^^=NYV*B34FFM0WwKpE;qwrplAxsjY2UV2ocAu*lxk|lw_f;dbg*|6E16elSvpN{wz+ zH?lzID0M3e7cgOxj?k@puCK~Bc6{Cf&s9Zm>u3K!jYcvY(yx00BWSO7qW z*rEz|8)*Eg-Z1UOuG(oO%y?|qxR;0XW5V~M5y}7eYVN4ba*%bcghP{#9HJY#$5XYt z7wG{KL&hi{$ic`+;d7)@l^1(6U1=lFTZ#?@rb&StR-{vbc(u2+U>35O*t#!L!ON#7 zYp3}&NFE5hESQn}1#pc&NcKo+_azEDkrD^UHjZ@@zQ&mnS5tr|-BLHr4*0j#RVe=& zIo906|MWXMo&Ke5!Bb4x&tD&tlE5!^g#%EeJKOZMbp$^gw%MJVRNq=^A6O|i!SOU+ z)ind@a0W~gw_&UYC;$q{fZZ&l^8mdH7RHa>Q2et;>W98X)?=lW2>9BPh>oVb(QSXW z0gN4JsG76Ay3eGCr&$`udJvhUOw+D=xEI&lEC5_JI#w9W_E^?9X+7ey1=N+$kIwZe> zn=bw2-2tOu`Vv{GN@FdO-k=s)x?}3|fIi*i13UN5>n98$Hv?)91r98k356sQDyMHNgvgEHGKdbxcwa>Qbd*U<)7UXZnjZ7xU!OfylRN#!RYJ}{H}_&>BKL$YzhsAd(uYp3c_zo zXpQajDWijA&E<)yU;3F- zIu{7-3jEv@DQtHe;!Nbf8F%d=Cq0EXV%O5=ZFl;PK}r^Q45Ee460Fq|hKCA2;$vd$ zWbypbKIxGA@1?SIF>5&Vx1K}S4S><&-xRY~T{;#yC5X<_S%M9 zsAA}>4M#C64K<8!Fz-gaOPShV-B$<|?fin{-dazCPcJq*&sA%sGA%Z?x0_0vJY7-} z<7)%|_Rlbyp1f1Lx7&0%G2ABcEm=Rq6LpIwRs4y_`}=bJTv2{PnkQlDbz+-+$el64 z4)_p%ZMq7wl@Q|im8%@Om4IVS*+e_OVKMUg^-jLyKk^iBmw}78v)O*Locse##t{>` z-D(o`Ck(v-L)xB$tP9&)%l039*|+M57~gmOdmK?a?pn3@-;v%Eh8@+~*LcTKF7K`h z9v1xj-qApO-YV2K+u~abya3dP8uYH@)F+dS)Ian?fr01pcZaghPPwMmkaRUMY)Olb zjGGe9OE>rhw1=Q(Mr29gfY-?{K9Ie$j98tSf%zut!tu1B>Ex)+^QUH@f-XuJI6dO@ zoj$ilYX~4q>t(jMQvsU}_C{Acm$Z0jo^2*iTAu&qJ6o5e%WOA%py&=*D*5G7XRWyO z7kUx8)*ASqFjx0W!F=PNesN9j@($qgowF8tD$3@98i%iMn3rWFyn0)UsWnv`1Jb`z z1LUm}^X!#CaT!dcFU#!p>sDXl+`ao6q%HhnsD>~v3#my6-l<54XUdg)$aj$THOTxr zS8Z_m(BOlwxBBbxvt2yt!oO-Iq<9G*|1{0u+_pR`X8n@B|1xw4_ieQM{L$?U!Py}> zf(~ZcyAN4k_`qLR0T>7AxzWP4Yt-;^#cHVsJgloV^xHe{0jtF>o2bJ~zX#xrq zT9oeZ1dW&D+#8+&725)l;0x7WW~-k-+>)|rB)*@cuXA2$@Bh0mZ-_>(~Jh}n<_U#H=8`Ggd4EO7|!-4tbSit>l*(_;a>`)r zIFp4>a0`6H>cO!c@;)b>7d0$Qes&tdSgf0n!x+F-=Jx_P3Jc_`4^JNZ zUXsJp@n32Of&@^{sI08AZox0_HWT6tj{FpEKOzY%Du0nB&MXVx%J?0#ng*`S&Ya^+ zXkHsmIRk%RvSrNt$?8POaoH==oPAqweRSC7zLfF#ocC=7CBJ0k+`t$K`?Wulb#YO%j85Cbi6TWj{kw8$)p%8X<%9)Hn#T`hPp@s~+OdDZ;<+}qze3BSD* zI9Qk<&Wrwg=yxgHPenfh`wM^Q0$2igVNH6zA;OvcsI&==Eilw&J(MyFk4hE3S0d{a zUX-#|3KTdx&YKSeJ}pNn2Zs9^Gixf98c8=m01Yvt0C&4wb&{A_lQ6x;d0o<&2{)Yo z^zVLs+Yjl3Nl9|McBn?X?Y?SEg)LL?=W+R8jZD;Pc;*fHa-IS*nhTO4JlNaTjD<@a z=q%-i*ceuj6<4Jm{Xl+aGPY1z6wl#P4>{TVY1M*zcAZ!oyMdj3ABD8 z2Z-_=v*3|;k12M*rp@OO?u3z5-fx@&XCAbqk1riXJCykv?+eCx>i>}iE;dJ2&;wvx z1!30{NjoCLW4k1$6yba1T`)~g+9rz{xH<$V9}rw*`Vg#5$ELUgs77Lt+sk4rrY<5L zK1vx4zn9H4KQtKv+VjIya7$eWi%v*lJU~}EzP5}UK@*=0m{EuH6(VcXd7S@;rSA@; z`v1ST_e?@uu8|SR9@ieZ$Ou^}BYQ`e>>bLu*D8DDN`-97-q$GOA}gCWH)Leb@6+e^ z`7oqYgxZw&qEt~f7a&jFai;aiTa+4r7tm-(&aNr3sM z6ENZ5QAl!ga|FImI~d+?>t?Y1^_9HC^Vl^$r4It`S<1A@!B)*{d2w)(#XKb8X4my? zNO(Qh_0FuGEZUwU+I@#Br$^Y4yjvf~^GyD#Bqu_ZyhtF^X^hCAs-Y%dEY`+&Uh?40r`E}gPa`CZyzjyn?M~cla?ZhwMGES zVfd(5qKiB4-A_BYEUX?O$pctKov32z@ytOut)BZ>nrWG?^%Gzd`ej&&WW`;E*CKg9 z-lpp<+}-x|kc9sFJ>t2KH>}=Bu4^q@-?o1~jOOl55!z|odMTZ-Z2x(k9_guh#u>Pr zh*Z$^G5H0)zs+Ic1*-BEZ0FvLFr_l>^uJ#vzj`9H{@P5)fll>A9kHm}cu{p6Yd!zU zT)b(QwNE6AvUupaY*2}90>7{812df(^>VMC{~{+Ec9?F`hUr!d_RBh9=~o_}VEWm+ z3McwLCLzoz#w^;DnLgF0QFyiF`rZ(A0=7k;i#7OU_RAjd${qPEgXs`Cpz`2MJTWq4 z-Z~aDTJQ2;o;njjhkM46U>Opi+01TElr;M0RO-SM$h- zqL8Fxr%ET45Jf5*2Yf0gx0i8$8Nr^3RM< zG{wUWjHbi%j7h}D*fh!?0Ot3!R=5~#93pB?F+*R=>h2cE2CP(1O`yV^KUIqv_JGx+ zGM9RV%ks!@fF_uFPdJ!|#U(0;(hnUjiW2r613)?Rt?M{>h0pS2dSCnA5Z5Vr3_P*07ZL`q!_a(LSlzqP4`2lT%YiKS6??kg z1y~wQ*MqcdOf>-r^Tm^lvBYXiDnXhPIy?3XZ@{kiNN;^hfN zL>W4gzT&4dAMIwBh$bA$U|FjnzxN$CyxsbJ)~==@=>#O%sPPXcPnxz}WwP`_Y0)8T|toUWj==O6)j4?z?8fboFgkbZQ>aaAwo1Ctj< zBzQmvMuWGvEJANM1g#;_Fq*#myp^}BFnCsEFy+f;s%tu$yVI$b$+2%vkbrCp_IXQ` zAH^Uz!!KK|;x`+F-6##}o{E2?LK9A>nIg-;;IK)5 z_{@y>A?=eQTVkfE7amXCc%DfKftZ4a*Y()QZ4~5Ndt%hbQm&kc-ycG1Lgn13BCi)H zNDbbnRr;_chR?g}coB>94~)$L^GPcKrj^+8 zf|kHZSk2K{r( za<%v}N*cqP?r&_4#XIp@9l&H(6s-)FM8(K&nfEznsE4`?KhkHYTa-Rn4%DLn3s)8J zLPY2(FlAQ}E7z%h)_Bnt367s%c$clQ2i$U^Iv{Rtf8}r}#?7!<;#fCT|Jd;Yi5Asz zY~su~-W`Q9bb@ug=}v2AH_DRbE-Bga>x=5_B>5!^#HR7Kr@P*sOsp6!7-F;C*nz3F zUH#m*-O2;R7h!=r!T6q|let~YPTGxZAZZ|%b^VSQa4{F?A!J=!3|`&pAFm#wq{DN>4Xv)LY&LzhX3A00^G+W5vM#Fjq^^+GmV zC<#*AEv2t{pn`6fqi8y1*(BC$ZBi1I505Ql1Jv0prc=K@TW>aT4WtS9z%l>QP(%lu zz>c^1&i8h>L2i+}CyV??aNf=s$mIvd>Nni~sYjJ{od)w_n#cCTTW2;czOc?7gA%P& z(yLy$9pY(8ao)}KD+3g;pewHakzAJI?$tWVbjX$VxxD8{t+HzG1n`Cdn)0>-x3X!@ zBuX81DJH34^+*48T`=r^YR?{$O{bpT( z1YS&dgn}kn$dBLk&rVVp`o})@IIy-u6K{+7tAdZXp!Xi4+O% zYpwE9%t+s_V%DMP`d4&itB*Ui7%RzXot7)(kF--5UUT?T30@uqUsNI0+GXgr?Pty7GdTx;V@m z0}vM8VdG(tlhU}_cbyw{F~>XL{Punsc?9?u0DRO0XUaB&Rk zKW{#`0^eD&(AlMae{MElefRIT0*^(tO4hnpdp(Q)`%*2KDjo8ita6g4ZXmy1v5U2s zSLEzMPfH2ls!jA^si;wLHDVK{Z33q5b>0nmQ7K#lV{DpW<=U`gbd$r z#stS*r?B3Pzu7zi&w(;13NKUN5M)0wO_8xANw~lDUB43a26+k;G)Rl@3Dg7|HE6Yc z0xfk}0W z<4AdeK?+upQ0XUY@j0JG9o2_2ah#0$xz3WU@opMfJlh#9b_Zy??vuK=#?{n~)PNXaUDmN6AaO4aD3f`{X z=cOuYIYUZSj>XNKVLB%rtaFd+j_i^IPD8fi6pKU1vD4{cR>(ulXrR;`70~iZ6mkUfm$vQc<1dm+j*AZh!B}XRl$& z(xU>4aHM2zA}M@!4eDE(A*~54if9-ykA=4r5JF?KQ(?$B#N*I_Sk9UVLV<@9mH|O$ zru7HeiwC2M#VPAlfC6C-Dq#E~KHT)pK8e*}~bCf&7XcmYk$`omyM zz1zI19+)D+{eAEpD8qG`Kqp1I*zNqoifi4J@26_BepZ4Y{_^p?HNL+0!yRX?c^^{I3CmERe?bi+n1C zaRBC7osP<4*U8_b)lHPI_eWDvSxZwrv$?Devl+NCp_KFkw--0I;NI;9TYT&X{-u8M zKnR3sUe51a@Jv4RDOje8j&v-@v0-5o!unZRbbW8Lxie9gF9&0LJzdWO7)d;R*RA3y z;QZY3U9FvF-#yM(|4%3RJZ0P~DUr^u=8fVYr^U3x1s! zBMknOK<%tO!Rz$0rMT~jZ|60`ak=Yy{FOT1Vsb-k4Y(wx%lJzDHk&!2FY5UAc#?Fc zL%>@(k%Hjr@2jRE!j$cVFz0HWD9=7ii=SOFndk|ZG}~-6cNZ6wqu@rIYq=JScxPBc z_sYGDiX{V%cnjWCSMcv)eZ}97Gx9vm-OBF2XqD$q=r?P$+!;(uo~!{KAhKL%pJWz| z>Oo8VjTrUKW~qbH$G+7ovMNU@@u@KjWze!rZ{()M$O66 zoD6lYBQga+WIrrSKvHfP0E+E_;-9m;FtX|uUOzA+hd%LHh%w=PyyQ3XJ#R5m0|cM) z%#W`zI7?}?0Z!gsy>O#x1MZ0Q>k+hl9U}5@29%W9cPG#G^2_HiOz~OR#M}L%df43$ zH$X16`(5Vn7APXxRvy`OV(uJ=vMKHaF*D)F_VJf?Qxl{gmNVq;N9ZPVjHK4xd*pb* zafr=**1JxL^w3k%u3%UOC*@f>`cIihpp)0>$EB|$49^Oq8YR*#rN@D)s)8s7yduF*|NADUvD5fdtiS_NGd_Io6cS~+i+Seg`}{YW`zfNd`5<`gAF*?{t(%vm zu$&addSFY@g=e?FNj)b!xNIno2urVH$jLrsd=}zTF10}}!k#o@w*+`IEF1kTs1fiL z(8HiLlNnxN3@8te_yQ#qW%;*fz^cM`3}=+Uct$(Uz`Os%ffHBXpXi~DcWj&}}L%`nINAG~CHa<#rEg1NLGp~+UQ^54_Qx-~EN z?E;@r?7@Og%sQ#K`?8o{iYkmCh9?$oHD4_nkY&$lUF}Ik;ZG+4WdTJqiv3;ih&Gdq zY^+qaT8r|Gz3LVBA;3DOJcL|)QkRh|V!gu~j-Uelm_~VWC;ppmX`qhRDq%R*iFmb~ zFEIN_6fBhvWCfOW1n6K=X>QiC$$x-k$)Pz4`*BI6&=6DY} zzU+OeRp>8$I>NL(9pjj+iE(0zVyfz_vXd#lgpCgd+yUM6ehF1QOz?%C=)pHrCSvPn7Rm%E7Up*=3;NQj|y{c)QR#4 z+ESai3-rnw?3F+pv*y##WKFoTJbU9Di2!@Z%zs;ag(~r{0oIFdcdm~0U~;Tye;o8G zQReU?Oasa<)62r66~LlQGQ_N2eMIO2FEYJtq*`YjuP6Da@hCZ9GLK?croRm<&ie*I zG_>|9AT3!9c+62P5Z7VFXEXL?q&rQRq z!N3O0UHSpHlok-}R*tBhMOxo)R%PLUGn+1KW~2ao*5Sz^K4~$suKB#BqA7w3faHrcM|bvf{X_4uI7@j~@P?o?P_`jQvPHy{ zW6UbW|4i8MCIQ=0xisRIy%1oyf|#`CkXi5fsL*(t@#D6{`ogv7bFPH>nz5@OUeX9K zefjt4kZvoi>{DmGhDHrGwl_fPf{#elVYAuAz4{!CAc0cKVB$T}rU&9Q3ix1r2!gHS zZbfltte?;?U-->e&>tpW@G=~dvB!AW@$n`gsA5A)J^qCYTZynsE{eH)%7Sr?9giN0 zv0-A=i1Apenqg#UPKIsXvXXH1Y9o>Bvgwmi}Jr^N>Q(SUWJX_=Z7n*NFV?jVOl=-MNv%U@^F_2nhp)}W9|x&bRI!5K zmhoY(dq9b0%rnOma2Z*@qtxH)FhNjdzM=0}u|fRqCsxOc_@8w;r^-+a%k2lPI$}67 z`ll)5*Rq2xR)pye!@+sMkfmJ#T%?7frBpvG@DdHm87M3)63TrO<#=(q>gL@p-}Ektdr(5i2j4BO=>Ks-{(c);k$~8@ySs zI*=kXO{!YFbURwoiJt~HyTIM^A;-oh>eUbve3x>tX)n%>M{fB{8ik~SLCn?yrHCkM z)nDm0YLg9814PgN!UN`yCAF83;E# ze3n|&x&T>{npx-z{ES}ybk_Edh^oaUA*!1Gm>*K)rJN&cXZcS=kC>_u%D$GI zC?}*1OH{iBaK!0L0N3g7(7nuH6gKl8BT}l(krKelYchv#7e)Rz1+X9Rdkog4lbB7$ zl9}aQf$oVTmf1fGUg3Eo-0Zr8WodKiK-Yq*;vu*abBDp?VE?c*z=NVRB3#DKcJbL-LJQGVpczvCXT+wXpqdX6j;TlIK?R6IVP$M0eIPtEL!o6YzfaO~LX4k=!YaH4`g>}Bc^dHsj z)9yot=R#fFt)A&(1quMyl2}OleEXqPBs=Rk0m@6Y)$F$l-xB)2>NOyC&@S1Z`rNfa zQOB=NB}!!2%S2vS9op@WMp{p|Ap`$7zo#7f^B%DuG}v$&AJ7(<8x}nrm{7W9Rcms8 z;bx|>D1$&4v%}0ly%pBriMTb}t&8DQ2eTbF+o^PE!et-k!GbE9689c*A4BPexRt2F?) zc_$@&bnR59LjrlnaDmwK_JXa;Z#w$C0US3*q@_A04t1Sl!U1;dqQjS}v-KI-G~YQ{ zNdVALsq^xwIRz=mbw+It_%OV1a!CL30(8^pD`$JJ!_0jy7;-BjJXCb^kcDZCsYdd| z@qW_R5Tq(^jSqmaWi05&7v68FJHmIyC;J*s&G{Z+3fyPUc773#av^P!%l7O`P61q? zsPV!bj5$gzAY3T+C-7hnY+5q(FsE>9>|+c+#F?_ zkLCa&lX5I$bkLkzjzrNsvmE)3(Esou3^wE?K^aZ=%7fWHlQy@TKcHpY62LN@xo%}n zSKNE#Nit-5i+&q1C(?H_I3A&HEu;<$P~>CW6-hLGBp)aIpx&v{37Da}U5#G;L-`dk#+NNRn8DakXz9!>7XHF!X1&Hw68-t!48dW=nxni+_k>GM zdHoQS^NQ@&DJSH_Iw-^7Pb^*r6lrob)%jEVH6>Efb&8sImeqw+3ssr*_*!S>3ZOOXngyPPW>;2_*1EuVkp7o?;L3NgR=xGQPOO6+<^18gbRQ$5dhav5OlYO<`sQb!QLz=YSFqaJL zLb|7$_Znx$65R=b@>yNe=(ACF`<@3zGgK6A-fmTveCa^bRyo*_6pCggQ!pH zRio*&T;u2K_0(|}qBs|~{o=iP>4P?TogJ$zP0Saa1X6CL-` zWmw;za@!Ko$uNsWl=c(Nx6cUea?&0pM7oLDbbj;*9XdnTyR3iv=n>N7Ywt0+X1$kbYq}4kM*2rRmx#a$;%>`1B14IrGwL(zOS?c)#-k9x3Q?b(;mW;UTyp$5@yN^ z30y02|MZ(gDkiNh{3=F))g;C-+&ucaFW+O2MDoIM2laf7Tl|+cHQ(hAya4UT^6KSn zPP=xuiaodEs|(Y+h2D~@aytAm#_r|*z1)Uv!^Gyw-?9=yfaY$d!}5Lqfj)Z<$@(Gh z{B7BQ=`o76FDZO4XlJ32@A(t!rh2e7l?Iz$T+ogXY*y$3a8Vmm7w2Lmr)liYXpTaZ zZ^w1c*+wTkP=9$?Dp9WZaHg6RE5m_a?z!3y9o>lQ)y8R@{Nc)YL_@>C`lSpUB<^%P ztb}JuIOkO}$lrHjo8jY;1RbZ^@Bo!F)1eWoYr->+A^xsevlIo2Tp6U7JoBqyXLw7xvi|3-< zJG0o&(EAQ;|JAp?d}1u4lR?H&pa(Ot@Ay~rK;%i8yM;iVT7VrwVaP`91eLcWEc&r; z;(ChL9ncY4=hw_9ml&IVbugxbk|B4lu(GWuCv$tMK&r94_Q^gS4j%4Aswm3wo)+Ew z$9+<&0ACt=yK{drA6t!&*grZomgE}`zT}7cL zWr%7+TM4x75WPB=$T8V7ckQVcdi+n_j{&TkyZIg0)~A_AC^ei#WgF*+#$_Gj{O|3B zFFr7(&2lIPJ*_yA5E}__Z#^a z>OXX(Yi%r)eWNA*nXq8Fe7{zxG&*<8O@)fu9$Z0j8nB(cRJse@7~>7rkR z%eyv9Bu)7$ED~C9%;Ahb|2%nD$O}nOxfrsXq==v{qy!Y_2SneN66Q$TX_!BIe*VGx z;8;9H>EHJvkD12QHy5lkRglSYx8;ktYf|cC$K6U_=8rmKfSstK*X6TB-q0_@!>}9u zT85bu5;&G<$XoNe-g%NHJ5p=zzdAQkRnooHar4;&1EU!s(rF(>?1GK=$~zWreBy>E8Q+%Bs26y~NMb1D3L(ZhJz`xjp>kFW=eFloxM51? z+@mH^K;8UAl7dfaL3S5+I;mf=_3gaNh{4&zHy{P3sppH8uw1KPe9=gfgCTr3NC>hmvex?8jx zs0$@=cBQ)R-F}egM{>cq%tMd`P=etw3=uuXagu}B8oq@yy9khERiT-pgsIJs4%l^+ zXqr%ukY7?jaY+UV21B@{#QXd?qkMoI5a#9bn_@52Z*oTq9`ZO1A#tdK8X-36@_rN< zw5JrP>Y0UJ2t;MMa4uvecj4D20cIq4n(4bT{>!mH zC$xn9@3k=Pf+qfA{Gbt-r&x?NcD^6##L3iy%IB)PYc0|nVZK2ok(n#5$)D%wMTLCk zDbYUmtzfW_5AsSAUD$?CuKQ|)o=ufw<3%ow1`Nyk^(1ewN=z@0s?lo2>JB8$mYD|2 zqmmPe*zdonKjZ-Gv1nNc*NcyisjUy|{&G`Hm3NX9|Fw66LvqCL)6kp2%ij?x7t;+7 zVKi3gdO;6L(zz&kJv+1(eqN!d!j`+#Xj(sobvB;L4kq==RN2(?{G+ z5MvQZk5B*BwJ=lVrYWRD6Bxh|(8Cr-NNnkwzPNqR_%9?YT>Wtp(&jLs$x#c#2c-1K zhGT$T@ZTEbO@Co6K1oc&@?6F0sbN1&vEgmow9Tr4PWF<$?W9CjUMuUHht4-dJg_6O z7YUDpY;oCGqyCl5OD6tPt57l#Itn)}x)iVri{AGZi%AqYGxVN=q`z3ftnC?PhzO*e zwwM-0W@j9l-n-;jC5q5e*j7>d-T)#}*P_Cgb{#%a7IXulcD73gE^8ORI96WgLQS8) zvpGWUfu?N2lQceGlM|WVtwjqkfGL_pI*w2-QQ6VVaNWZuU2-wly;m5y571lxqJyX# zLsyUs)9ycolM|8rm{lh&xNY9kVW!P@e)IoIen(ACP9pBb!YgCWvT`Lg?XaRLiG8uS zAOg2{IUkl59%-gB5;LL;&txTtG%^ij^Gu@4KtsL4@p^%uUpkkm{us@ZjnFuQVV0Sg_C&*yL~pD^AwtZqOkh_W7Q4)ZuLZPo(Q>X zziN|c>Yp-hjve`zIa)^!c;6qspGp_8+>HMR!!e63PKk>4v4dsCj%S&@*7??oGmnfoADW`Z9j6Z_Ck|2@@n&km z_mpoXwEJD;wr&vmE>T=UrqbT!6b_+upJQ!O!g8m;LHlk=)br{Z;2VKpJJ7~_>+xOm zi$IKR2T@3&|5zl`OC>(zcragKzi<9t6YlQWWRI<^!j2@n^`weCDS(p^D`zubaU02~ zU~jl-|I8ID^j4HLJw#+b#u{Wj+Uzwals2hgK8{!H9wOs!JS&2S__-V6Z2TA*gcQ5wE2zp=PZgnD}v9`+jr2Gyil?zuU0GuqbdktJ&opd`v} zc@OUgNP5#3bm6YLXXY~49^+Fst0TG6QB76>i|O@K)*9-OY%15vyMK~EF&Pr=mKNZR*R5r~z4CczIUh??e_m_Ozao^UHQ ze8Y~sZ_uA}+AlYi8!E3DUhOy1)8FgE)b5ST#zn8w9o>E3x*zFB%qiuD-30*6pd}rc zRI!Ee`oL>Vc*A#X7?$z+6ncB_OG}ZKXE1{xbospE(H%$$gM&M;bvv3`)$#R~d+7#TK;h@c(YgnlGfEDl>Qw$+kx!?7Ajjw(i?uOrb0&5w^~Vy%6UiX`f_-gab0@ z&`F7>F!-QkA&kAyDCtKwUO8w_i^(-QsX<%8AHZ@e%}up1@c3-RzEH6KEff_3Uv`1b zyI1ECm>|I{IvTc48>FfKktT*ZnXTFesPjdGy5TwAsZ0B1e!4YF+{bAy^cS$MypaI>(x-AtckYL}jW=(=M$}gxC zSsis4>c_uBtke&aBci0uUgckGX=7m%uZj1)KG&R9ur%6-?|UvFGYsmgLrr)syTLTl)kR# zpUcbfdbH_!Ov8|@&`$Nbc*&)PjS(Ndf03<#9XPsDa!efE>(vbDLK-!aGR8q09rT?^ z;5f7V0t`Qg6);$^;-5rm)5FTe`?YD-__$cqm;Zp8+Tzk4gTB^;y z65g4L?=h2GG~S0y(yWeh=2dMtkr9&;kF8LA2RPm^G8o5v?p~sk;RzMk6LQv7tkOBH zm}Em*BI&DC#8d8Wjs32QKc&H0mgT?7HF@{iLU!nnS(c;~BkS#ZW4$^3&@7U;Z)X6a zWTr1o=4UdblpWkWEei|F9mcFc?YPJULOa*e=FnZAv@mnp7_i?c?A#Qw^s2latq1Rg zE2k^YKeW^-Y>5qbj*C_~0D4S@CI!CbOKq(X)!n z(NgTvII}eJV13?Wn$zFMb^~g9*>I600reWt73}uS73BH)k~W;D{~OT3=#82$$eNV`$kaE%7=*^H4R{zg22XN$I^cxP&Omh zl6ya>%iJvDundLNH;v9F8x>LMI)BO61jq1gL!(UN*HW2CMEu716>)a<&Gz^znPO_V zF6?dzZQmok|9{AgZ|pu@6W|cnwTofU^i35{N$h-ZA4-Y!YA%p{0(KVN)@Hdc@_zrQ zF~QSSAhh_RU=Wms?>;o z*V%tdO!CV_32yKidxZladt-%}rt9B;FEP%D^_~ieyQ9<2xNL?^tovmZk*Pu<^cww!%{ z-Q|{ZUEW0{8V}!@d;95>eh<{TcEElo*2KVM-uKc6lG1CVS=tBD%jalY2+2FCl~0dn zUhJcne|@F`$ZxilzSto+>=$P<)JUZKev{N{HuduLE^wRx$2!jlDFDT6(2a4mmOfG0 zdqGti#wLk#@~pTvC|1IglPBOQlTsY)V!~yKOp`E-gV{xI2vuNHf0kOn{!3sfgw{vF zHM~1hO}?!_Y0jcjk~CxCeZt;>3lXS-fVpZ9LfbM#*gcm!rnd_d@)vi0Ty6?>8Tc#Y zG%rsuooON9r-?s3D(HEV@DOgHdHKDo6>9QBhb@PO7y z9iY-S4e5)O5s|{#;qxgB(1X!m01NDzD*91}_mGa=PA&hT)r()Zs-*{BHwB`X|6wfM9U! z;r8?JsL3`xNyrHXzgZ)(1B9oCm}k9^!>zOFBHUNM-z&kfNUZEL*vT?K~4UxO(A zPBTggbFt8$*2bWSqx=3y1tfYbbR|Y3YLM1Q#7aDs#bpn*PcG0Jzsu0;w8G(Efz8w1 za7=cUMIF)7D14D|nHBzu$kxFo{fN>r``q?%x4oA*uN0T7qQ=tuVnP!G*?xH?rkN*% zHIV%x)}9;a8bQ0;an-MOsjnsv^VtsB7&l?h{uYGF$hgE&mp&K^+@_P zyXOUGr1&SHOKF-II9~>EZ=ENn>UHOv8QqupdJU|PQtQ4G2|VGT=n@8;vh?g4Gs4p9Fh z@W=nUK)=hmP+O%LTzU(_i1X`lP{59CFkPSIz*Qbt>~ymA_udC3xCCjE$$oNswYw~) ze`n7RU+JkED@EJN#zG&wuzgCK)s;^4sk&17FKr)DjkJPB9=T-`m%wmovW~+CI5;-_ z0<qdff@S4W{5b=b-*^l}RW90t=@JYhR$4GaPjL{lh*eX5M~ z?MoGv%R_3O&S%_^qoA8WDH8#*TqF0dHM3r)J|DQ}1l2zbtfdu-PelB-k4F5iGeUC* z%Y#zuej5gUdCJVDMW}S0t~u0qgK9un!!#G%;}w*gu4NoaWF` zDiW668NaAVP`!%ZAhBAj`z)!v5H%RHt?+O$q8?f%d?-n47C@>f zx9ZE`{QCMl?}Z#DA(>i_kBe`dhfc9<^V1pY;ud3yeuREwBlMi#lI*tMzIwD9hs-fL zU-@ygZ*TGLEvN+!NeNF;GB9Kt@7Vx=$>u-YbOl7xA9$aXNQIW;sTifY55b|ABlvOIm9_&G0RXES3h&*=QzHcv=cwl*k4 zZF&`|i3!dVQG>lZP;-(jdM3PoT}GCrqpP4dk(_pkrb01FVJ6)hJk9wBj{bS;s4F?2 z4Gf6hc?99X`R%?bMqg$J58lmBEzZk6`O9Mkda4>TlL81#a#qRE>5JXk`fRhHr8c#< z+o{Vt=%4jEmv=H&*Cs!{R-i{ot%~vw+bUpp+X3oAv+=3Ze26A|h_o)kv#<5&RLCm% zM`i`x0tQaGaYTEgf8rT-*NZ_72IKo`EvRG0h3+je%T-!34y;s|qT?Ii=8#aVS^nBB zXaZP>&;lf|BiaL3E6dDzm1V4os_||&UV_c*=$nA}5eO4jg+1>`;)zPJ)!|PH`AVN{ z5X1?{1rEns#^UaPkZ-qHTj2eIEE4bKPaPi2hmh_3MgL;Y2K(uh&kxasEIC@{I8BeA zQrtTS3NgxbpRW?Q*Al8jRsv%1x^(uIGJIhnxGI;c7ck89Ek=OE__l!)Ou30^X{(n0RrJo** zmorIUoD6lhdNh1KUgNlW{PgJOHkEyMYUL~~jvtbd&%H0F04X;bCEp|s6#b#hw4%#L zZjBp^(}wTA+X$U;ozL~k+t3ORUvpXsV)b^CMPLSE?Q!`+SYkiu^Z zJ11H~=#{+k%c-}Q7miboN*BX$lCOM`h~LF*q)PmRbpv{#@ZACZ;5t4yHL%9^+wT!K zb)W?&PYL3oA6DDHw&%~N&R=enhvwjlvkYgh{$%Sj$)#Nmv}t0he)Ot}RV>MtgjqcF zZ}>Yi&*G=VLOL(IGyEKLCU5=01a^t*FL~QK>u2j_Buf()I{zg5;A3#yE05+`3ME67 zYx48S7FmTJODIG?*al>|sJBXV&p06oaP=F{7TFmz=019}(}ml)3fZvjjs-Y#@$MlJ3orL`=YS7dA4w9beJAfECALrkH?m!s19(#_Mp$yU_-_3b z%>Z9jd45YyR^20-8|Ta=f{mK6|I!lUP>EDG3!o@6`JSN=Rj*)Zq9eznL@0!w^n!U% z3kp#&uN7S#q#uqR-}_ddcPa!Lb1y;_fK`&|P5JN_s8?u2?3oI`eP76&n9(ZY=3nu7 zcg^z#$sPOa8MM#v_F^JL!%NqEFzah$%kjXtk_F<Hx^yPO9 zwUSKvH>b9&16ERC=>IC1qnEA9zx#gt)x1pTIbD3*`?ax*f1ZDnWY`vd3b zB<`pf#Q>~I!~3K=%&|8D6U;Lh-+mF;wrLB?mmpPvN!7+(G0AbPEd_x?Q=Rp;n`M@| zj2Dm_qM_`c;J$V~VSm;Nhc+%~uqLBtp}&1W4;-BfeuGlGXy`u8Xo5fboeEq#Plr9UbyXQlEbh&Y#65eq$^v~VLF%PNAYi~#f zUYZ$At8gYIN{a1F4~2xtpqE20#s5XUMt;A!{(Cu^;|*82Jx3Qa&|Q%1&s5_bzdc7qwO(Zsr9I;^fLwZ6z?UOymMkm1^15#5X1mf z$yyb#2MeKFsyi1X2a7)jEN1%W7bN+M?7x)^A@l)o>SYAs+h5{eVF&S#X$O^?X=AAE zM?#;?jMC#`@j!N9th`5$$RK3HvudHdlF@l~h+Kk1jXfKvx1|oss7?pEAA*jQp7F_~ zNAZyeJHPlf;iN=Go>6t!?L0)J1R{L*v+g=MENqa6GOl(M*rwpHc!~>mN$C^jRh^2{ znlwby0az^|fq(nepDUql<4y5DhNvp4&u<8dEoMr@^+icspb}TY&XZVMakr(W-=-P- zAw2-)(jf>rZv7kh?cI=*jG;9DuWmCSITzAe`fm-XcdPhR4GOgQVm_f2laQN!+Ej0nM3=Ua8Ea?D1?xg7LMWBRr5Ae>%}GQ+6T6J z>8rzN*_G+mtIMOM=zk$KhSRJeewJQ70<=ujW}dh;5J#@P6QjF~KrAXHCxqDh7;9n% z1}~kw%y1i329#-@!}%CgVxv4XCGh|;m_!`X6(nZRT1*2B2#`Bm6@+}CFMtjzE2np% zEi{~m6gF&&62uFF|4>SA4=C!%jrj}Xj^Fuj=-ld!Z^dc~4$^SgRb?j)YQzfPbfxiB)%Q6g2^^d5{zs`3|`02FEBl-RimGUR@fw)nozo!hmTyMAvRUR3Y}pv z>IlTm9cPB}GIN(f=s+Bo;&uP$=p41OW~z`aZra$Iy^;S&+?3GVh1R}WCtFAyOTQ?8 zakEua_KSD?IiNfZggpzd`eG|Tcs5=Bm0nW`O`&J~Kce0|9_scBAFrPFg|v_|O;HM= zQkE&Eg=C2&#y*vhb&|20nRZ)^$dW8mC}o?hBg@QFVTzDF>%_z?3|VHz?BBcR^ZovQ z|9N>C?|I+%IoG+)b|s%8p<_1A9T0wt+r`zsEp z;DK1Ob*b53!$Prj@xIJ2_mWdhv%-&-0l?Zglciy zo45Gh#R#tW`p6-DMwh%e_Lh+mZQnzR>GsNqYc1;U?S-F1oI96J(hQN0PRz!m%~M%t zqPV#qsg;IbfR<4jCLSi@+}#fBd!L2@+1@+<-eYVdn(<^Lemuh#JN@0hHUCHt5+^9W zI(rN1NiRl?kPQET!5$_KFah~M32yn~%wzrYmS6RZDfiinscILgx~X*$Wc0slyR6i; zY&E&f<(-k(;Msez9sO;_i+#oS*?NwrsRQ{EJTI91y^RKAEf=rtKDJPlN2n-SA#%ny zTUja>PfTpL(UIPse-q+(R!Y~!Y3OJ@V7y&YI>{Q$j4FtFQ}a@Jk0iB=yN#wHY2dV( z!0JjC6Mo&c$!#P?iJm=_AOtU4k|;ypK6(xe?^`466LWDW6~|VG&p(lxtcZ?rb48z^ zYL3{n|IX0s$9^W{Wm&bsP+`o|y{uc~=TC3aj3gS8R&9bCRCjLcPTB}u8M|mah||xx zbTT<^Yr&xg7iNP6N7(4_<0NP^9cn|&e9J1m(xh_Z^NZ7^G`%B-Y1A+?SXo+3!Yt^J zu*TRCD^L7e_yex4Bas8j_}Ax7*N$a^*?#Eu`${I82VccDy`ul&1LosTsA|OimMtp% z45zSAKMAUgEWFd(WLWRPf)?5OUu! zho!s5`-8}}y%5=bEO$TZ6Y0}-{J`PYD!CMu?&Q2Ty?yt(y~ReB3wkRyJ25nF>78dk z*?Rm;%>1TvpUwjmI}>d$WM6}?5&Z2aPfaQxo?Pjl-XXuL%*Z|KD3rz2HF{eT_^$1J z{;xNO)P(NO&a53Hjmo{JxT&2Iy5){re*!>x;hf3MPa$8yL6YmAf9|7$t7P2sZLL=` zz7Usp^HoL5YZ&yK{)%sB!fZ zyiqXNcH(zSqVA=pu7nlCw8)F*2U1Q5GXr-Xh#5@yef-7pueNXvvb~3hDP5-s$I{j% z&JlvW=LN?Prw$|~Y5YT3>so}-g$YWVmE>YVf`172>_8eX#Z4rHlM8D3 zG}<#yaaV7QKRaIl6gg3ORK@0^Q5#`D#;UBKuVO;$~`@(B*&344MzR`^L? zDTB-n3?WY?9{#a5gF<)L7}bSK22*)T1fjLd!LUeyThk>=#F&9!ch@~@F^5k^reB|Z zI0B9$gx1#1JlAS75qddO-ud`t*xSrus@{gpH6O}$aTC{sD>Bl#F2N&VuEC3_GTpK$ zn)CZcU1K_=`H*F-WVxMmL{4jRy!eg#k9BjpV2^6x_XoL{Yc?1vc~ zR})hiIkdXS>!TPlr-Kd1&cI*QqCvw-NpC6JJX z#=O*aD_bou;<#Y(T&Dk-ClcxB#HkcKP8yQ(sCO>3ccEADx8RVM@i&rCaQO1qy1Gs+ ziWipsN>}Fywd>xrF~txwJvOXa=I+t@trK%H`a&7mNjM;aPqH-Lol)Ek4_Fg@W7R2e z$NF66wHOapv4Fv(cD|jDaL^!9)3Be4t)TS8H_$Gpnr)%o^V5e2E;02nHujkAn^w-b zZICM(+SKj|>Xuds<7g;fGzUuVL(v`BjlegWKP-qsi%u z<*z;+PUI?w(F-TX!iLdWrQIU{S5N>gVPwe|0bcDABH%XKd== zeN4rF|F!ndHbW(hnTKM+$))-RAhxz5;XkzLyRzKXA%%W#)K*@9_w2@wwELUzTf7;i zH$w1T`@i46b>Zo~1GoAbfROgj6J^ER{6RG4=Vd7vznN@0WMSNX$8!VajWJ_lg1R?9t#->DJ?RG?KOTLwDw{wMK;TCrpcBHCpsyTz+k#A)>%A5 zXB^n3-R&mGl`lw|TE2`YUX<}fj5iGFB%7clTdE$ffJg-ylpQ$nx1AbH`HEi*f*3tt zs8yyJB#_J{;+9=*XO{7suOUTJ*Wk7}EMg>rHQ8(nM4sUBaq5B0zgHV*cuiO|o2jWn zASO=v@Zw0_c=VBJVVf;Z2g4CUabkJ^1O-G4DnaTdt`6v#CJQ(d6YPZ6h1}v!>8&EV zc3Konx;Td`fbWre(TJ3%e|>OxR>4Xq-L8P&ANC)r~nl z_c6jt(HvAlB4v4U;$3&f(X+2P4Ho|v{xXP4Y$^H)g@mDl`=a45cm(lqHN{5qZd480 zS;rokQ(~xuxXp76_%(v}=~|#7O4Jc)DSYiizX>Y;JX9`u45FrcX021;8-Ua2O(>?aR%g_ zxN{9V=hqqUC;xh|Pq`>^V83ouEwo!9^+D-`t0uz!CFqGZsk2gtOdtP5?4FA=<7~t1 zIDXwM&+z84iLcdwfEJH^JU`1G{ne~n$@XdgXiAd49e`|Dx9Z714-E5avk%>VxUPI#K|Xplg4Fvw687?Q^TfC?As&ukYgEk z@cy~^)zu&}_=?yaT-h-4d|k(D>CS!olo~CyBG5$2?@tleSNy(LZ~%H1)|zO|wSt}d z`ngtkH(M{LH47ByH?FKpJ91l&wff;*e_;w8rT_AXc0UPB7rvfd@#`aKvU;FYN;iG2 z@&(wOU9!pQD@I$_WaRK_oKvpv?Jly=Q7T26n#W#Y*)%byVSw)U{GPM^FdOB zw}e5E26RkUfJzEZ(nmbx{DN4vK;h2eY*$a4Q9D<|y`cX2&3KAD!uszrQh4`r2UPOdr@G2F0V zU{Ac_Q8oIeFt_};k-fBRFHY7&NQArH@l6p4=b!7*o(sb~x`Ri#iHlao<}Ig5QTu*! z2eN&Ls~rC#5ECp=jGgV;pd92_^0zp>=L(_sd)fcekhjOPeULnXt|H}>?DGVTB*sjA zAECo#o%Cj;K{izZE99~A1l z+Y>}<1VIpER5Qg6;7f$lH0!3frx~YuyCN>I-(dZp?eUzqMM$d!gu(%QjrCEsWp2eZ6{tXJ1l@T1*m1s-GcVv4kI9$Hct1hV`Q@2jy;rHs$Z~Xi&yz z@NN@0yP7gLRcAzl09&{BR-MU4bQXFf)JElu$&=`)dz5{%aniGLi_O8?6iu~zeagQ1 zyf09DVz2lN?`Y)ReQb~g9H_L`5RCjxIezE~k3arntH-+(ywsr5@)f;LuH{nQR}?t$ zSo?Y=bpf2(@OK6S>QIW?zBTQ?9ImBoB!3omJaz4O zS4f~`5*0Pm3M9LAo)1p%@>Ds!@*L>7E;}50Q|*hz+mgCI*}}8J7!Y^ydww_67tqfm zp5g6$3e*~3$+!?|+&G`&fAJ!PE4YEF*DRnLzTN`H3ByCP52x0=vVpnSg?!<6YCRZt zPFefy_LoRTAGh^qOIUhmw<31%0L1 zuk<8@+sEVc2os{^R@G-gf%}rX$uGT>xq)h|2ufcfDYfR0JpAJg8|-i1OI=KL%t|ZJrw}f4`{*&m?%((t8rzq-p203AF9=2jO5fkhkgep!{d;25S{%=|%(3SS zi_3I$Ot?=9%m?K@wTMWgua}!vuG@)Qyq-Pux@5tXb$FQ9Vx^;{gPB(L%A9JA%t2RG zkX)pTZNa$q404L&fqr=}DIe~rV-R`V=(x|C+Z*mY8>&**iW9lE3dJX%ws|;L1y{FA zud)>i^2t?fVLJSK8r`3=z#aR2ZZc9yCG6_;=Ah8-ueDaisu}M9re;4sZAQ<{y z$xH`9==oiFa!GLkT@3+-JHQi+?xPRry@raLjebQcvXKl#8d~KLD98I#Ewl79f$;v% z;PuHoglakC>$XRSN*oQ=vMve+Q~PCP!$97{6;?`u&@{AZRntBMs?`>|c7@q&;^(jK zH@C06e!4N?>9ccVs=N(TbAp6%E>v7-G2wfXmKu_x=3{ms(pQ$ zpFGhC7YV*+js>?tgRB`nW8t96p(5*?tG1nQl+Ql-JTF{4UpJjdB;39--xE$DE_-Co zep$EoxDU4n-gZWo0T2(RTU`-w{vgChu4%LCXU$4luIP*DV~;4|@b<{1EMmL6bA)|l zJK2xtw7Bt)15|uDYD;Ky@Ni&r`(J8`eJ_)XZmElp+pEsHwT5lm35)K~eCMHz9Z>t= zJ#_n6x82LEwuYiu{drgv3f0V49KcTchs4$3(vExLtU7Cd_x23knGy?uwr2*Y1d=_n}IlLN^2c73Ni~a3-l>KvcYLV%=*P)mBqRWYEvd zI!%VPE0I&;ZmsT5O5!A6x>P{NgUon7=T1Z4hBhQl;`&~m?Yg!^6Ckc7u3d?eBCLwq z*M|%4@VVmZ?!e=Q(xkKB(+2-hjCN_fGd~Vf)m}F<<-C?tUb}&*bJ)wRm*vZ|( z10TVQYBs;*kuH=?$ErArC+=@blwD=3Ql=@nNwQ5gY`iX4^3mxMBu^ z6^8akZiQ9Y!Uy-Ke zHCg5E;p}GLw_lIC5h(F*s-l*tUO300C0)3vc}8(-u5gjb1^H3?Ajt=@25*W#@00Jg z!>mch4%hY-Zvms0OYZ->Q91s}pN!+L)`N-GBh+WRe=X?${KXE=WCUa>) zZWSmtiM1fgF%^%Fq>lMrIDYrqQijSd|KATQQqxlmI)3l(w1(N3L>16;Cts8{dbr=0 z`~ByU#~;n=_>IDQuRq+$E<@)AIlaRbW}rt-Tym^_fEK$8|6Nn@+Olfj(V&Kzg)ug0 zg>Yq_KTgmJ{e8^q_H9Wg?%4A@^Hm;EJo7|SM}u_YWKTGXbUt_#K`EaIwCEHq_Q9bV zEM{7JTW3tT>fRYvtaWNXO4Kiz%A#pVo%%((UhY@{D^uL+!@n;f74(JneJT1EO5r}4 z_;E#+9G_^rSCzFqJ)L0HDo)RO`q;3ci%4uMDw#%NzLdL!)hS$WxZm)w;!>IP57kk1FRI~pca?np zvFo^@X$oIrv>H}1*m{{S@(Wk}Wl31r9t*J>5f{SS3)m2?i{_DOlK7J9YEu<ZRD?+@eqz)0J+=$z^NdS(JyS2m%cV+Y%vS#gMp5 z=im`LDN(kN*~|-|PcFs10i<1rmd_twy}7~hl9Pn6(Ot9FP{~*0#LbPLAHGgeFFOm$ zrVj>xnb!Dje=vg7+LdEK|E=Qb`{us>%GD{0n=PL;yy6r-M~{GR$Fymz{aU0p^?{VT zC-c_9cgnsvNtlP|Uhw0)Zpr5HEOm%du5;j~q5}GOXEvD1FEm~{&)lT7%>mf&F~cG>*CCUZ8Wyibh!6)e>=vnyT_eG;SkEV`0FdLr{Fz0tFCA(ykF3= zJNQqsLhCgGJlaLo@tLNF=fk#6Lp{vCyr|_BZDu|VYRP!fmMpZmJ(QJZQ;_C>>7ygP zko%QJN&8q%9)!xy8<|AO+R|H`*Cj-XSL0vd;rjv2q~oLQ1GJ;(C)&+n9+lfz7m*&z zHF`eVxL=P+VN2* zlJOpg`dc(8UyC1BQ8b4m<&N=_D1~XgGtK60cNf_xj8Dh(vO!u!w)$GXKwkcp3zVxo z^R!nD95^XGRP-l13QP%L>p|D4iu$I9lAms@O+(KTNuBX@#X*|IuhtQ9O5IR4Fd{?Q<}x_jv(*!=5A7 zums3*+IUDT%|cyuZ(yD7FXtXBSQ+T`*!Tn&wXY_yhkx@-$ys$(^-`MB?RH~ut6Yrr zK~`4`ww3{<@f!na_n#DtLT(AgHU+rs?9rO2nnWnk^e@#xLHB4aG#7{Pk=+Z6N> z2+fJL%=yRV987r2n)S??GnrV__r>z7RtIR9Dh}z)G19Ywz5V3~&EVYdLU7M0dMttz zbpR_UZYGkKC#z5|Wjz@|GD8t8zoJ%Y|FMAXF6z++{kqGSbJssS{)!J@=qgSozl!c5 z;-0U+a^ME{%)f3+HF}8#@IZ7y{xkx&Ob_AV&xeTxMFjN)Lj?^zs%3%aU<TAD9(fBsOwPB2yK(6lA>e(kbHjQ!;m!JD^>RF-V ztqa0P3#wX@mDVYlhw~-ohtcyc8=NB@qdW%Xx`ziUqQsy5Mk+?D9uON5@pYEjU%Q6u!JYkdEsVHWF z+VR`vY2HA6;$2-^{5U4HshGNmiSNIMZZHHL04flcQ#$nZZ%=sNc~{lFMW!BLXO~lZ z5US>RMD{z1qllOlq=Suebj~tv6IUPB(W9cc6E2P&HMX?qls z27Y?`&tO<{J#F9x_0E|okvb&lbXl!Vsd~0Hu@R!1IFK^UYO5Q(@=CgYm&kMNpyMG( z@=qSrO@mV5u0ZY^cntAyS6BO81PfDf9M&InMK+rJ0aw% z>JTL9QfKM*S8$1}l7Hw|#ns7CSq@)zz&4Mea&`613ubFfRg#rg+O0f$GnWy$E!EEZ z+1a&rc6Mj&g7rH*Pxw8FSKm#)+Y*@&pXqtyhV_ly0}@`Luq4Ef|Gv?7uCYGcmO?I@ z>x)JZU8?;`qerJbC@v1%j+3-)TfEd>z(_)du8H)UCOpDFN@9=eN)FCjf7_Hwkv`XL z|5G`dm%kACIKVw%0Lonxc%#}6)4Yt6vEipkNaE=^yKCe;`i;vJx&|z_r|ws|0_geI zQKas+?V7KO!*B6bd=LgjblD-}nNH2B3J`MzuM__gYUii={&9@na84E0@lY>;`BN5> z?qf>rI+W+BHIia$#m)pvZ~3K@aXNBRXpcG>Wv zI0zAlT6D7z6wCa2WMbRtXcVjAN5R-$-269M*5G4~xE|ie`OH1u>pM5CJ;d$oBujll z67XX`8!wn>b=5S+;Z1U*opFj@aHlw?lP>BYtS0dNslYTcCMYN^OoWx>bG(%xvT`Fz zSmU-hS-#aDiL`#qev&(x7350S?uJM3ql;<%UyIvM+KxtU=_;>Vlewj=D(R!C`6~ME zy!=HrOzvlc&lPEfl@q!?A`MxeTStqLF`&)@@mHR-Wy7j$b6!Ic#9XWmzv52}>ar6+ zdR!N$p@IDBVWfvKFHd?!hgXau4&heEF%1n`&CxR@=%qV7pElgizaN4Jzfypx!4y?t z7tjILdX}bWx6^#e?+&(NYh}#{EYA~~%rH#ALqGnSVEZjC#p5e!9;5Ot;cq-YHb#9m zrE0df6~!CO`?VD62hWikU)IhK;64a1kyh_nm6>J|l#}}@_D?b!r5mt3zs{tA{qa`Q zeTwk=cOUV`ozYufe;Ab;D{;q>U2OA!gxavp!g1367DMusXC(eY=x{S)29k5LF5JFy zp?Du|d3Gp7ZkTW=IeNnjAbZ>VOF~rPd>+x}{Z%`~RFsz=Mg|bC^c>i5ONxTLW-aaisHV*t7;Jxzh)05PXY3m?@Py{yg} zz2i3LZ$iT?&X5rPK9V=5h!a|M!>xMmlBv(<`I~|q8=rpE(&feU?2RT#$91JQyjnY` z8wbXMh`bsWsq=#cRPuiPBR%{;**cQ`0AjE350YJ4*C;%sN9b7|AV@|uSN0mtf3rG8 zLQsVNo_CQgkIt1^_X$e&!)oSV#1h0EhrE}k2{_rhzSaetu%G0Ujc80t+6d{IFN(ug zzKWS3bXZc=)@lCxbYn**GveEJHCRVWRRUh=bUHJ_+myQ74Bg;IbuY9*H!$aI@yuOa z!BXZ7{Sb^Clg3=`dcPf#g*?zB|Jx`xk8aj?*RWIzLYePI)O3C{;@sGU=$hZzS>7Ey z&=2(p`Qo%%r}$RV8h<6o@b;S%v^m+?X`D~PZf|a(E4qr17Qh8pJ5M5Oq*d;SzSZQ> zj-A3wBAn{yjN$F3+qn*^h#MD=qfWt9j&Hq!Y^j#wgOo*x9N_8-|_JMiDY?w{W|) zVbY01a9{UH?c#NNyM?hRZ1iM*2SW)r-%|uVFLOAZ{rx)fd+FDey1(WF ztRf@V(&0EpyL56O?AV1$EdOr~~wKuou%6+@*fI4BOUV`GY*fevBZ%&1nm zwUVm|ct@CDWksVg*$k~>mTKp0J9INK4!_A%aGF$F**)J~zU!T$l{xzK{ya}51xP%e zxa+G?(xM(@&C0P_Be{ERnnRwST)^(OJVDV!Z~Jwnccz+ZW4z<=-6RTSaf;Ob;+bo^ z>8=yb8H5vk>^u@VKT2_sBAK++D*x>I05rK+&R+=1xZf`=nA(zrN1RuEPnsl3a28X5DJ3_&n70U^-^J#nqKC zuld8L}Wvw8hpPBR=}o5VaGM zhg#)GnBc=cjs5wrIk2Unf1|+3byBlvBNxCf1hjLe2jz{q4B#05~-&> z#=|e4`Ll)HNfC&ZG=pnmz{Qi4Y9VTv{BsYd%_wQ30)%lv@D_m{G0CGJ0Y|88Us`r0 z({5Q-?y3?Q64Ad0-kPCLP+_<%j*D)@2mmsC4l|<5E6pBdi8z!;w@yv^aGRH3Q}%E; z7#LRKC(ctUFi7ueCX>g0FvJ_qt+lB&HbnX}Z;lhl z_Vr#LjK|eeB)>>B`whl?%2HaN)qZdC@86UnhbRXV{Gbg(e@rI86{92vUP@;6;QECf zd&WlUWRt{_TqBFk#l-Xl=MhV4Mh!WH=IZ#C9kbppSKi$EgZ!ta!1CN<&ax->neR&b zw=Owi>&(m}TyMLp?7t@NA(8Dzk4NL`ZLxF*;`^ZDGK!a5RO?Rlqyf-rKC~9 zOk~_dT=D9>ISR);?bnF*{o2r|x2Xeychk^D*_YyO*U+IQ!DQ!fCpFN|+I_E9oBz)GvqkN82@gCi={@Rod+( zs^043qznV{c;w0tf?P9m*6^gHUbjpzaj5uWbv^k{BdM`65J$+Iuh<4fZ6F54dSng+ zMm4C>isk?@Ir3^)xZ`YgYFsJk}1Z8t$ z{F)kL3WZw<7deV{=|Q!ANVX||*Mz}5>c^zwd?;!-Oq-A<`M#%~A_7(=()}Y8dk3r|kReBgxN^%}a?SDVuLHESxnr=@$wMO-6-gqL?~(Wjwz1RwxrE z4)D%NO^T9siQqq-N1caJJGL6Uu>ETi0$3iMUTBSOII0Ciu~VEHi~Te7+!iVDXXJO3yjl#KK#KclCBYqdQ6eQjR^`VKBRW25nNizX zZoG0i653^V`Don#pHQ)1fnfNH&0}{W!|DH*Lm4OH0|P`IduR1Z9`3Kniyg0Bp9KjoE+!!*F4L~z z6l_ayQA27YhjLn}0(%NAS)|DpX{Auv*c=Hf{Q_-&SBVP}RjPL>uzy+(zCGPI*jR_D zAAW3p_dnJHsIuro9A45=-r|Zr9qDIkP8ykBH)lBcBo2S_|7Bi>^NXhyAWHa!OC;v< z{hIT$GNWSZUl@tOno`eZhnP|izBN{?9sOuB7+U|^04b{TTNq8vUGYP5!550!88k#K zQrRvN2KI83Sd?Ky+_F@@to+J);efeLV*@w6xGYd&HNdGbkxAucKZW0@IMTL6DW|a5 z%=~_$gOfV*pe{Qa*F?+eKS5iyU0tp*Q?`=HuJh zGc;LH`yzM$HR((z*?DwqP+Lv`LL8j*EsgFlA#vNvTyJA~5eE+s@b2xzjeag;n1xX_ zK!W_;=Y-IWhW})8ESBjs=pU`JsB%s*vhazlT$|oXxzx&kry>j>o`&| z^E}l3Jo@GwSLyEZ){YIF(a4T{>@4?l#)8||px#0-VoZBT#%;^&Og%!NNXPiD&|2=n zQr%ry-mLATx(R}BRFkfnw#=IEJn2kXy(?V!Z>+0K%$Y0OKa!C|YLFFhparFKNORZP zN4!0SebnNrn3403d^R_Q8-gTS*BqKo}(uSrnsI+jPjn3o)f zFAU(t{YRfY=CdCZ`M_)l%v#TrD>hGLPyWr$9@!s(nW*K_Eqhm%KY~1Y1Gv`B+-v5h zf-zUxAfPFZIderibYRr5i#!5A`WIl+9lQo{ie65F^Q1<@pkT0 zUN~;yO+QWArBvi&>f+0CoC0;r$K~I4O)F-F)QpvJzwHSMk}Pc}6y^5uNCX+ZKZz!L z-B0q6wOFbgxF7lPfv2GoW66i}4Qt$KdDle{oi=qT$DqAGY2ax^wD+hy(1`u2(}oY? zmdWy0wM^RDcK>$8YsrY!os2qn-aGM2pS@Tzffw!k~-SXg?_wQ*)=pWfd1Gr0NrFjow#t&}1dVYq_B(X{s-@9Y}i{5)1a zWC^d0VvM>Vszje8oW(;wohJRv6uSmpFtz>Nlr zTr{sS37+3HaPicGw8;ZfEMp758Y_OCRD%^p6G~0*bT*2|qsI_~mwZB=y7Z{5uMQzVnfZ0ax93^Nj;^lX-YRD1+r5i}9n19< z$}k=3A;i_X z14ZKOr|aDLCE_8(5VxZL4wy1+xU-{C!+pzSZn{V~z^<1hoGvNy!x)Jo3XK^ncVuM!dhA zHm8EY=ya9;%+r5HkX{YUq8bq`ks!N*mj##)qlP@^`Ul4PyjHA`Em@eJy}CF|UzqFI z4+|S;a~ZZ1hVABcrWezzrwi=mb6_t6;kxK~?>UO(vr;k1QMUX5>Wf*dCK7pqs8j7! z-G-{r7PY;jb)si#|LIJ`lG2XYA8oe!ld`ld6iTwhH?c46)q!@>4s28+3^jHh-U{;m z)H(OU9Eav~pey(2uB9e04fg^x``Bh#grPn6_Pf8P&5PW3RXBwTS~y^3>U8p z?pS!jG!*_X88f58>!feX*Sje`2vIDd2!DMU=6wjSF1w<;DvN5M3IU+E1uDBz`a+$}0{TFE`>z%r{4OAM(Ow0dGD z-wwMV$@BgF1P@#YH{}gdyi~}!T8y|**Eh7W)s6=gT9Dwv^1v)IbJB2yUhauEIXnIm zf{*b)b}QdEMg|I8kb8YwkN5i9&#fI{`1i~RcB^Z0)CS80=Ea+UX=>{HtZZIXc_oFa2B-dM03g#Zqw?Sl9Kt&8Uyj`S*pHK+W6x-H8=JFMaAOd<1h( zWg4yq|0*E!_s)%gV*YdORUZ9xc|y1LPmf>rvWVF{pBqs@EI=h${m=#uq?96>Px5J4 zqFRN{xUQxWYo>cWAy^(4RfXj}h5JhuP_nS686y$SPaR$~uFw0TnnxcwOenVri0m9U zM+~oZfCFuMHV!|G&{yaRKFKGFb@J%0HilkoQ!0rN69weQ9!*%qn%!srzQW=K9{?2p z83panujxh7e%M|R43nPZFn`wc#o-ko)9(nXu#KAw?a<=SD!L5TI{IOl^cz4F?ZR_t zajY&b^l{wOBl8}0*f!Y{`CMNYrE3@|pCT)CkucxDWW;gKGH(7oZF)pV=?;(hBBh9# zoyD-y0e^vbVNOcsjv9@~a-{GMSG z<(m#X7R3o;&L)xWM<07a@Efs9Z%kN9RM8JurDP?0Fv&tEIc;oy3y?|C8eHw50 zc1WL9{sz|HASnq zS3ULFB>nvb>W|?!&Y?y_q?=rzwLG&@;)w!|f!ZNhjj!#KqKv7nYa6|WH{Fo3vS>jP zaWoAj;TVtyg^i-IZsCx~*D+Z_5st zfupn+&~Cdx0p+yFZusg-&btf}6sS!E7o*>2vnx+;IZOxUbA8kU(p7FGITsHs@>ck# zC`XunJ{E{mF)m!y%s9ON6;t!zrUQxioq=#Em$z%)Jn)pEXIrU@?jZjzJ2U;k-b3bg zXw&(X9-HY;ducIT<5JWp1~tte#!TfER#EGCO+wo?w;PJMu0j6EQWSYAx1UhW=Z8zo z^yqhM_vg`P4il7oB78rCT8-^f=1(&XR|SaiFbI~o%OtucY&ZRH!lB}vz>E-nfr4c8 z-eJPNZ61aY%-n5h#&tP{U2ev5>y^gxRIg zR&pTDNsxNZen`RWe=AQ^gW1q5%@;R^u!~Tef2g-OpnHAZ+M#2HEr8R;OeOfgbwrz* zQjaM>P|;IZAOh?vUX}hB9STUW>0ikY9Stfv_ZN!f<&Nfgh6o=W2YmKYX~rDxTJ)XD z=z|T`b*LY+#b(qIkI|<>g#(#Ct@D}h=Do&c3%*vnVo7Sxu8$i0Ef^Y&jzM%->N^FNB2$pPhsrk0JO;YOuY-WrR;OtVuxA7Vshrup#FJf42~5 zuBAZhOl1B%)Ej6@o!^&7e-V%0?YmzK!r9(c{$K&D4D|x=y@eC6o;+v1Lv4kCVi0sU z07l?(n5t+~>YyF`uW^ShKH8%SlZ(n$G4GmxVt%@gD^yEzPx0}P%L1#^-zV1iPb6jD zy3?*6kcG~=-2v(9O1!BG>(}to%2QjZ^Z#t?rItD1aE28-y6kQP&%S<`pjFwU1)Jir zKO1-mTl+Pnk~UgS}M0uK6Y4wi~(>AOjDf zdM%B&_>KRW`lS;ou5T*4gU1g;yX-Q-v*jc41m7OEYSq8m#|XF?f(spB`ij z{`u^ls`_h^lK{5@&Pf2j>|=Vg3|oNZ9kDKCM%agTkU+NNX#h68kJI-*_QE}d*Me-Y zQh==U*$%vgt+$#5sBQZOP&0c7wb$)A003M8LIpCR*Bqvz8xR^opY`95>gogPq-6ttD)vjK#iMCS4nj2cX5 z)nw0@$ANs4mQTPk zGvpgOH}3)Uo^oX`dnn3ejME3AT3sW^T&@xJ1>IgZJ&%D zwkRxfV_ihgEaktgdVY?z4%n~hU~jC`&%~Q!*ac<#gRqy|$=oJ;ElO9a_px`wadmS}*Z&t6cZ1kr;}0q?oX#kqMWh`@g4H1<{|$$SVcu^%Wu1`? zX4FDZ$y)(D!{g%@5SM}uB>^uN8iBY`;}Llc$XmyP^60UJ0A>MJKk@f^JA$Nm52G_cfMh^PPj_=~(Vrqsnk40zENEAF1I z;HBUWQ&5Xs?qyGK5J3Q7F4q7VrDj+=&+|GU3jja2cg;^V<|_Y}n?OXf@JGNM0VtQ` zH_RDNH>Bm!$L%#>e)(s!7i$ynB)C5Sx`Dj_dpB_H!PbxZTI$7r-vL4STKDJz_csxO z9UUB}T9>H7Sz1`RDL15osSLw`|< zKJf!M#;#VMlh22(3IpHdU;FhB14IW`$lnSKkfV)t=aud2jQc&aki|wi~o-#nW z|FQ5^GwP@29{fo%hz{U2+42vVB({DYJ>s&dIRnxaym`0#UH>-#l^*5C0B`g-`b;M+q~D> z5*Ye7f#OXI_abCuyT$*Kb}r`k3W=>8M3aX1PydhW;7LuBVkg)>^ZyQc^uZYNO+sZ8 zgfX;z(62&;=#=`u91EnYj$`lGdH_b9cI3dWkx~(c&Vpx33*QL&=UDMBd)pAAU@7C} zA$C0tVE%W`0ENxNKQ*`xQS5HV|M-toc0`kG3E{y|AMVPxms7v^ANDYiwL-GL0|YF1 zV*Vo*&^bXQnc&u2aM6)(=No9A|6KQJJ<6_-X^W+tS+s$Fa)Juch{DTv5&@~h6r+FK~}Q}+NR%cLHLn(`>e?Nzue*w zj3~h^tW!hH!z`gsD9^zYoRcmG%XheGlgf^S?Ad`fQf_=&4q)C8w50vS^?m{gzxHA33O z0EHG>OOnE_0V=Q0l~RdHq#>m0CNE)CagC5a{PNkl;2dMGp z)e7+V3|9EPR1djtXrvx8x{qv-GICw52cp(fj3IRf?xSk20+P6(Gq%cw0LqfEj!wt9S z*q97-j#DJ!tg#L!z&QM`Pu`?=08bf+VTZh>pf3bpDG+JzrqGUnAgVs~I>ljI3A+Ya zm*;gj+PFgqVHns&gZ*>_97y#0o2mbQi&=Z|+q(Y-(f~g8v^kGy($l31HEH)>rn6o$ z=wF?X3Z>`&%YXhSDWkoKyl{aH>8vj0b0;lD3M zGZxFZ$-uwtE&Z?dDFK%XKc#K#3UQv!vj$3rOb&AC?S`;lVvudGSN$M(_j&O^f-Y`f zw1JSpc)_PW29*=K9@dtd2X;<4Yn7N_q%(S{IMbWD@acvFt1iJ9&d*m6YS;GGx6jT$ zKnwLiB1UbV0OCnV#?}5OYEvQhS_w}uANKF$vLY{gDJQcdKi~hC(4W)Yo%q6^>(T54 z3@mNEO|fVL+y5lKli4YRKdYhI2xRzQpU;UjQ2z&*CVB~GPs2J%_B+60Af8o7^FXp= zerJfPZ4>IvBe_XTDc-VM@Y4QcX?UdPscEFMTQ$p7N&+wbck1vz0UHuTDfo)UWLg4M zi?DoP7+1Bi*p4trJfW<_rxA+X9k90Z(%Z+5P%m^qoC0};8FiY$l-CTS^!{NtAs@C# zwQ<6(OYs+B{sM*=dw-o!3MFM-Mx^vs&;L!!^D!D&eq9U|fj_SqA+RAK|NUk^ zRW^U;TE8-z-~D+8Y;hbLvVOaF0gP!E(oS;ewiIw*5m0aZ3!Lm!U`LQgfxSAefpx9G zW7F9|JX8hGIrb_K%4u-vTpWQMEjkO8FC*1B`JF^BHUdTbJTjcbBm*ZIzzk0t4tBmf zo23Rw3d9hifp8iF*tC8^KJ~f%1YwpP0o@NR$)ON7<~vTl z1It3@1euluSE{KNBu;vi$b((jGi0K_5g~|nXhXt;5LAH(1qEv3KYeO`npYr(P7-p> zkMt-kRtbEU?^YO&2brv)lF8xu2adN$$Butm5GK4}mU(M1((VHngsgUeI)h47O7ZwO z%wUKaQLp_Vp=kg8%|dEj5Ym_R5Gc*$(Y4MoSwD2qXfIzHM8yxKcB)Lsq%~C7{@Lf0 zL&sIfBn`Aiz2(~ni{3$9ocvhO0qymWADIjwN(3h1u!JTfP2@n%#eI-+eL1j7MoLX70WS6KOb7W_PjRmsIl&|2G%x=%+r ziH<04XWv6#A?JUGciB?KK_T4mgE~_Qec00a@CGuuP6I7reoG!z|L0#aTFh!I4LhZU zl*(O~OIu+cpuny*Ehj^Gxc9fpyhH#%)Kx=8^aNWf+*je{uwfwI&bcM|9C5B~U?b9D){yc3JOq-aGw5?WX=hUL3 zBj!ftj^jiogi>)NceYY-S$s7mPP1(Z+P)m@mP&V&LR0a0E%u%pE${l@ zG)j~YZCtS{?w5p&m}nBGL1HKGMYo?%er@Zc?hz&8ydq%2$7>-({K#q}tCD+Y#sI0T z6y-c{L%P1Sh|v}mcd63&SinN?CLORP!?yq?pSx63#y9OXZ#^aa%^{bWYr3oeF6Ma1 zjf{7{ociO%Zu+Bg(ygzBRbSgU+E|Yl=$NHWQsY6^?)A}bPw!|Mza^aiyE4t70vsIf z3WV>tb9WlwY9EE~-^*+ygt$vYtjPFC9aZne-@01b^c~wPWod6qs-8*1D$rrqJS`~h zqy0`RoN{ZrpHQ(Pc8KrD33X%nXt2piZFshkYurw{_dX;sb$J_z7O@wvr2rxcJpQ9O z9FCqTsC_%aurpHQbY$nQl7Sb+(_G(=5tRK1uMx(tBW-1m=%;rB7HAOmcFO%~Q=V;N z1*SvO*>2@n(cKv+qOEG`_A`gBhP!lu*?Ul?$OqOheQdrdE!g33j+qwg@G>>cWAqI! zv1F7zFXlMR%_heNvQi%8wS-$NVy^wDvV+N-!SYATuUq{K+Dxf&ETLN=;d%m3K?E_x z7SPOGYp|qHv#Pll@My)cpAV_J9Ab7Z)LwdTGxrS1Z6vX&rIPps%cAuz3GMNOKJ=Xj zSSoWitGSQJ%^eovgw|9&`F4$fG{(x!khoOb2GKQ=?0(&pV#B-No=S2F&}-_dPgNvA zDM4LAxX8ogKTerE_C={r)lX;l^wP$rBlGgnx89DVkIP!IM^t7|B4m0OC5*hs7qy&w z+8Hzx)TO~{Y>-fy-U`BJV}c%oSNW=zNGP;=Xh zGUeeeM}X|)UyiFqjOuwk!3mpd2s1PG#TM9OF3L%x$H7q=XW6=+PD+ohO%ixpF>i;l2+Wa`9^#8X<$+;kGoCMv;S#DiIZE+k6KyRCBu3uv`BPfA2Lz{EiNj2aT4_7( zAhOiZWql4yH+1*({ZMReAFpUyW-ZIYo zgYZAGr;pMDG5s)xRY!ANC}W8e^PN9Xzrc=NX+tv&9}$-Gn-XZ6WS2!)o7wq{%LrEQ zb~E1SUD`F>YWKF*yLo>hgSaG^A^xOGl2kX1zJ@#e6}af#Oj9^vP3JqcC3kuk z*EoNR_bc_?f%z+U`(PXoq4^;zQuf#n#lw_RvE^Ja%Na#Ij;agF6Tw7GLyCx|(Ci+YsRlHLcgn??y zO1}L=!~E%xaguP8ACIV+qHd((f*$r!1pocU9Ac_hd+qLt(dRvJI)iIihq-O2>d83T zl6lQc@_nDKiCX)!%v8H&P2My{N6MojRaJ`WHWkOUdtUK4iCTA`MAxN!QL5ArR@^Qk z>^5fYK-)lD|Gcz)Ui%jV&%acI>8~PfWX2no)BClj3?##P(2TWsk}aDe*4t}^9`rvQ zBH!|v=Z9eWjmWy~=~LMm$6bcvVkOI;t$i~L{h-1H=~4zYVz9#0oqIAad?NQk+u%v| zmYjne6(Y;HOL;j?gqJM9lLQZH$ad*(}y&dG6is%`_A0j!#mVkI}W2lN@T#Zet(mQ!^R5 z&ag&hTO{Mey3z)SL-3Ye0_7@lj6RSP20x~h$fnfB=1(xtWW9Sw(9&%CoTWp2`aE;~ za-wq8pj#xz-wxK7kl!bkr-)u+ogcFJGhW_cn;X#Vh)mO%@A#R^#VMM^|3T*(tlFpL zf?GG%4eI(=26eemHz0AZ5Z=5uiD4KG!~8;YA}#ij^xypa3|hp`-6RVMCUuoQ1IKUg2mQ>S%#s{KbH;3b z#${dqAV(&DeBi;jrMv9g5I?QP`r~h=_xZ}vtmEguEC5KW+vlZ@F~zt~es z5>>rQ06Q}lh!+$7;)@xX-pPj2YKI1U8^ypz=%H_AlTwjygmCGM%aVm-4#SI!n_QcXm?UR;6a_~L5!A~6*aLrdb!cCwd2KHo2PScvh^*q(LLGvG^;j#Dm~GbT3a&`iE78kU--`^;9i!_oPcZ4Xm{ z3pB}0-fY83grC* zWc$LlI(<2w#A$itHM8?TM@*+g&COx1&|v`amcm2vm62S;4fp({oHkHeJ7eZxMSQhOg}NIDO))u?9*v`>_z|F zR%{3Gv@DfYb1A`RECg3kvN8{x^}bI=k<>JI%{KYgvVa1dNJ=;PHC^ zPABwJ1j9cgd6^R;dnxO%xI;DEF$-2u={EXx1*0M^ zE#5*2cCk1Lrl!ppaVvm>J$g^ZXQRK~l2y((vGP4{@ntMx>gFYLIIU%(e8%ms)*YFS zMiv(%jGSU+Q92Iu7tG)=$t5tnnesUNu6gv@fX}hD_l)4&0=N$p;hu{tTof|Rxu!Bf z7B8;N`LqBFNTQ^b9Ta|xA8AAi>CwZ-m_ zXZ?f3xR*aV=y6dLhy1mCw<*wR#yH2^Qe^{`TAv1H>D@a%qxc4Q=glGmK3wc#n#BF* z*8pWY>knb`h@qJz+)qld-o^W=EhPa~V{d&_8Eo~^spiW(^_UcSyCw))C4az<5n|vq zSFn`G>YIf>M0{{B1H^Jfe4qp$zU1_w?#UJG+AJrzHrme=R<%g6=lQjvWr2?MXzENg zH2VC=yGUb*rg{}S7dpM!+ABq!AOPtd#ky8? zMewY>h~7Qa-36rNe9R7mTaCoQWk+&KckI%Y1Yh@#!S) z7ToL(8@LAST^0Yo;Ce*?HFtX`O80%>*XF;29RTzS$)lXqM*d0|6_fwO2DfLH`99zJ zA8PHP*%f&dmo^nskhr;%F3i<8QT80R0&IGjGX-GNKrUa0(39B;asC*c{gSJBp?Oq@ zfEUzryUC_}pV?gk@51F-B{mq?SsYa2KR+Tm3Wl0JszUePy*8jkEZk}CjqnG~g2_1h zCSKAaS(kTG+gNL9v_2B^f?x+ zOx=Rf9gRy$i!!H7q5u&3z4_1w_u`0q{H4z*CR`%c0hhFxTMz`V(} z4^^5Z4+czu`aV>Hx9R=P;prrtYsNawqcd$O(uOy6FzpIfmgFeR28^` z&yqfq7PHHrz7kKD>`1Y5B4ruiflaQRt`W9Y66eyR6mu|5#jR=n>5F{+f$PW!jt>{} zPwe7NVVOTV9ipjFlZ`JA{wo-kd#>?fsoDf_ae{I--1P?$3DGO>$uWlVC|N?&Z(4C* zv3iGbD-NJZov?-9@(*d|#r${6HxhdSG0pegDX;N5|E#b&)AnCRi8i=y^ru?{q_kU6 zXU$UKQuO-V?K3%mHa$Xqv5+v>{)NaLFPMJ4-B+*Y@5>DWbgK`NT=zB6e*2u)UAQES z8(EA}YpyJ(G6B-N6488LEKoVQCQT*Zm$C3?|wr8)H6(LR53c(vSO zQ1ziPHAY_^#Ws;XE@%7&zx(SwHR2N@vhxm8U2U@x&vMz5Wz$x2?0Xf}aJhbp1a`$e-F4!J}{~xilGqU<^(e*OLi8)Pfcdf;}}68Z5g!M!SIQR1UcNxMS|O|2k9}Lmh9R&F-_70 ztTxF7Tw^u<=`N10uF}mXP=vk#-EcNf(3E>DJD(ubc*d1Ac(X!sV|0)#V8mas`{Pjy zf*GGS8FY^ku9K!0aZ1n$QJOq?^;Fv)1lY@F&!dENX4r9ssfe@DL#;}&%+=>xS<5M^PKhW;%0{VSrf~+^$Y#-&FMNPYB<6 z71w=-Y0_HqTm4P@V-_9+s-O8L&!e0TmhB9%ep5Hc?TvDPB7-d{_=E5h*7^`jrNx@N(XFH_mc@C+w7fvRSVnCZl!=@9 ztW7KKJ=avQR2`a7kAM4U$cJ+Q#AUvaw)v=nI?hgaFSNZIa;WM*!9M4$4*?p+(mu$B=X?!NW1^pK zZm`(MN-9H-N{e+ZBkbnbJZQkw!)PFJNG-3?BO1K_n-0VUyq_9}LUBH5O z645ac=Z%?}GOkKrh$BX;>&vg>ca%`~B_CjmYYaM^oyN-Q>M!=WwkcQ);vW+ozqC529&q|%8>_|HiCm6k;Z;gF)x zB9V{cQOQ}Snt{|b)7KTAg~E>v_)On!RI_6~cSsIPktyf^Xp9cR>@O5)A(5&_Pmfnx zIU@J^`ET*ZjDB8h;Ar&as(|W0s01*vP-k)};7g^>CN(?ZM7K=$+--#)l^1$db>WD& zUtQe;kl?jE3cY!I5a>_w9<&}3TP%+Gdop(DJKb_IdF@+_?$za4&DRD;Z^CE@z4c-) zM)BC}_feR;*<*>adnuJuwV*}o8@5e^uU%H)2s~yx9YH3@WsD$`v#-k7DZ8)wFh=`VAq_) z3j&&`Wq;ib6i#@KUCWe=QAPhc8%l@K57#YF&2674{WM_?RVXEj%e}$2DRv+8xH}v3 z)BT!FYG}+uUL%ictY$=ae<_6JOm* z()zk8Q+H0vT=?&+>X!R?BR1M8ecw@%4n!PD@+{)pBcxqpGkoO7Y8q1$ys02qx%-yFa;)OxACF56>rJe#QL*XVN3!al0am3$z%+;v zdDwGOue7ete4(UHHJI`JV^z=S$uY>?%1hF`#{G0Fx8fibXz=KqQ9$h_aJmeYzP`of zTYiNnSa9>;%hq>|`-0~15V{q(;KySJPX(|k>*8&|$0q<`B+K}zu5ik#V$uG%@vtdZ zLHPS?`p%)}F|+mSS}z1il2TKSWvT``h2YLpH|+a6w-?s`&^2Cfzb7H|-vzzSsLQkG znlqvu>HE;c66S_^2!4+VYVAo5J+)H$MaWAtxm84buOOWCs3j}z=)^rxH&gfHb+SHV zJZp4n<}gaf$SBqFgAPYb2Id#>0p+)RJY!425X6lD>!QUh_N$uFO6W*_25K9sY020a zQ|t=I8xAW!v+dN$i}&_JPbB9*xw8xjk7d{D{k|K(I66GYLl_^UWK%m+tOjXXlwGl< zc)iSjttTYsMu>D-gh(&%k=efJ)%36GV3j^Q|D;=;AjbmlY1(}$?+9bIR;r2-u@48qJ3g$ud-eC%w~ zUK*ZbLW1+(^AAz!U}X{@9+FO2=dvthlNf@STsZP!IsR-2dsoCkPG{(C1@tevEDf~T z(`yDS8Ehn>p#$jwfgB{X1C(@Ed2mHZcTE4A^kuhlUbEr9Q6G?~1vzJ$5+Uzbx~iZluDUMN4bhtm&h)&JFIK zFBh-XvDlvly4|M4W=T~`{DbqC)fL;pyo@y}F?jx^dx*6C^PY z%`txe7&5sv*aHJJnpL)|l`hbJ#wfbT6Ez9Y^P5y zQ}=gfJ%Jls6~O^r*SdnDyb?&%cF~s0{~c+*!l||onG)3wm;Rs(Xz62>ONzZl`~-ok z1d9*H3D6l)^QjjTALmRwvrRrbx0rEjj!|#-wsW9%nD|j3@7&s$93*+Q`SB_By9+=u z&+&K{`&Bk3%94L1->;%?vM;A**((GGci5fWW!NP$Va;o&lb^yhcLV91yx?MNXcFl_ zm@mKt8ZkYOh1pcs&q&(RHHyWF+6dmKh=Rg{nwVFXch*wK7@4w6?7mKKrcI7Sx>2E& zaZ2quG>DMfs#gTXoaY{XWy#hMD~}7js@`c9i8nTKJg*R+=`r`nyDC&sQ#!Q6FCo~F z^tpC;cZeCWpfC>v2)*kY9Hu4Pj9{|FunmUOE+Lbo9y8`o&Sq zOO}tO#02f4b?cpmMvmPBgHARzFMpC>Y*D%edm*jmDMx_wn@j?E1-kWjW2qHe@TVDJN8@0o3 zW^ty3zxsU7V(sxeb&(nAR1uYL7G}$zIu<*s9SlM5%^sJ<<1t0kFL)O{b9NQ0eV_d~ z>J|*}SF8JMhUN}r%#GTc&n_K=_~@lC&I5q*SwXl(dCjiSbfhv(1-dBAl;>!Te9!#! zaWpRT!hH+IbImx+$^EDCWMk>|xMMfZD)N-zyMCm7o-NON^Ww$_`e?IVsMmgukhl0= zMm)?K!k&N27;2iGQX>0^_{Qc{dgVniJtqrU_2fpjlob!p&Ypr^yN0{mI{6zYz$P4m z721EJk3Nq0`hc#m#X>dV>mL#CC5AH+m$Xk=z1fk}{hF0jHjFrs%DPPZ>r*&i<`H9i zlyZdzXlzs))HmIwa!GU!bED4L>98Ufhi&9fQtV&eHnMmlF$>xP ztPT{4&o@@EsHZqjym;;U?91h$-3|)NfW1)7ZiRY};NtFG_SM!AKM63vfeO7D9T?-Z zd1o2G4dc%=q!|@qgM2HKo!3nsz(Z!%=Y$AzrET(R+3*| zH1JG;)a|0>@$)w?Y9>*~L{dIwq)FcejWG_ceCqvDj;zwEPt;Xi5|>7thZeB+W-IRiP!ha<-BWI7M$l z&D76>eROXqGBYK+UwqrWJNG!Xj+Fef#H-^!*sm<=y*nfRJim{V^Hj?AK-g6uPnl9N zXqqQg6+5F2gb{%(FpvV0c1)Ln^b{+8hF$Mx=~&B&N6xW?VMBTs5wFa->6cs3UfZ)2 zd_IK?`dKdL?FO{&$2xojP$d|1<{n*w^8V;2!9HQ!dG|+56^)fM!(}I9_&_YD=5hMD zfRHtcKO40LOxmQ(Dpnltfqo;?l5}sgIg<2C!VANTUNYITRoRKn~rHZB^GSA4Zlf{_d zXGbO!5{QjF=PNPzaWa5M4hI5C2nIGxvFYwe?pbMhcQJ9hNxGM1*6FHWR*HB zI!5J)daE`I>M3mOn7loL-a*o4xk^QBi}=ixeQCHIcZ!~Jh&KD1 zZVeT$-j5H3A5B@8@uFvC>W`ZvFI zFWK5rQmaSPgs-n|1v?{LRSLm}Zap8C3C3VD>`PC}R1`O+E1x`G)oP7d^F8&Q^H!N! zDe39YUn9a282N?xrT(G8UhK!O1Y@Kvcsx+9(ZVM`*KQF`!!noi?28te^(zN(4?4qo zFlh7I&`18v!{K}9mfQLE$d4O^y<>K3txl?5s@mLZcusu-1H7W}E-hUHbUa^#jG z^!&T7ju#!(oNrH;L~b6|)fi6L&MWwn1!-nzGlQZX`uXKi`==OwF@HSKndHLE%+C|J zSz9h96WtKcs?b-eHF*4Li6;yrX=5ugR%VPc`Xoy2o$xg}*!VgJiu`AMzbC8XhV6)T&itjk&jR-MXFW`P&2}#ThyafjwnsHS$Q6QZ$C=Zro|Pxcqr#G|PGOAZDz5;Gz4s@US|w z6pM=Pv30?yuir1t^9X;3M)(xs9fl_mYr?KHtj4k2z<+J4IS^9#w*I}lJx0c*H^+~iMEM2ZXcirwmlm8ZmIK`_AOW)Or2+fz zu6vgfI}jygXQfbN9GWPFShP?BzUOx{Iu3Awz3gz{aa-eT(z#Wn1+}W~<%AvbC$BXi z`H^ZnQgx|_R=<{q{62t$!-ZWRZ8`|@R@mWoWN7BNjI({evCRu5ak{?SXy5R!Dgr&M z(W+RobJkNM)1fPlRrhdoYl)8_Ltcgj-TyZzasA%>Yp4L)ZULY> zyH{vi%QYUqy-IUHX1!_8Qzp@5T+hQa_edU<2X|&Gvo&8vgj__QgnNx-9JZ))kk1O_ zr7cnq8vGY99}PVmm61G%Pm@cwtE*lRHCy^tnqGRz`|Mq?T>mSv3%KXdlMz2hrf58V zWUG8f#67#`4y~_*-Y-hxr)zi&!ppQ?Snx+G?uR>X5|G%RE9x2!<~TdVT%rqOM?GZn z#ZopR>@x8#kIV~ItT@Af!6^>03o%e19;-VzF(IofAGH}V)RVvUYePRUp)gvV4#68w zXb^xYtFpoU#eH1`Zy42d3Wg)=84kiz;bE^>{9KHR3T{C;i5mU{rq!|9L2leiF7H1Q zvgjT%ucfoyHZF0O5?Gar(g@u_?dRw3-O8D^R&n`rx5)G%#7XE0xMs=`0KK;Ws*5LZA@T-%ZvEdA8i8F*ogTtR2yXQSUSovd$PoUR3 zg9C}N51r)cc72+P)0jDgGOA$6y-c6W@?~xA_6vt@ zFA=;09%P%ck}vg;`THA*Ve5%qUj~gEOX&%!Oqe2Ca4G3Jgnf@KVE7BPB(^Uzj!`ht zgC2jb#Aa1hToKU7i0hxlDA>k;s`;ap^n!oB-5}o%I#6DTDQ?|n$f)PgMBln17;ic2 zOn*j>-~7%zXBpOT7BXKiH3wS=_78nUt7k)SJxpN!%>OgpwoU*@#( z4EXvxc_TeIuvpsz1An0vL1%u^c{b!aO#}ywc!wtCygb2X*I*iP;o@yUjd*QlP%f5& z%H$Lx!9|DWarGPSWC44;`O3oU;^+{48xUu?KV=joRZhh}5Mre=*3+X(=PQPvZQETI(;hFO-k$l{cCL~j4oVbt zP)T6uA~|Ch239>xJ2PtkOabL-^g#hN|D9uk69Wz_Geg@Ovf`JxsrC2!&2 zGrrnNy~DTy`z@`#_1HkKl>-)sXwIW0awSmE+q=J*Q_EpMZ+@{N5EH?PwAL29{Ij>y zT<;^^TrfhgiG4c|b1459d^2Nm6h`jNr`yOg|K;6ai650pz?CGyeCPydLM^V+H~RqE zw(8m5K&uvx*=0QwpJl^3ko}7$@gO+W(0=915;!cJpb2O+FQfQ}fCmcXWiY$xpCUhbbfRyouF&(Sv3m!V^3B_j#e$DhTuL z)n^++8$}m0>K7rLuoTJor&&PGh%hw_tA;U zWyO_BGpn=q;QDPmr?949c4ivom7C{G+Ta zKY&j;$r}gV)5CFYJ?UcABFvk4L{=o-cjeEptvXB>3Z)6ZQ-12<@n~p;n%A=P`19W& z#tp%f%q!dzWc_6*-h`VAqj1OIAE0Yr%^rPn6vkBFSj^`IbxL@Lq0p#b8A#le+RqN} zdTIa$LB_O`J~UsLU;}bD~r}NQu)V*|3OSVgr3#!=}iAPHZy9&1{}>@>{?>m zu4kV*1;7MNVkc8Tss{3EOYf9^=Lp7ELL;$%S%hC@kl__h%Cw}{6)M>Qppd=+7DsOT zr;=wQ{J*#^PyY{vnT6`7h7zH1r6Z1hm z@pIywyS;dhW8RjgxNw8s{W{1EvNTUSj#eZ;@8fd1qmpX+^PvcJnwp*3m2xZMfFEEk z4Gu%4fAZhtod|!2-#(igQ%0Ja0D*iqZz0ZV=@pB7mF1tg3QA0?6kIB(jkrK*u*JL`dd6qdrkW^53E4gGek1Z zQD=|{<1BrO@-|@p4DxlvsfuT0Z}Rt@@|-3)()S20VP4QFzGrEl`a$uH%lA%npsJJf z%qkq~lHjphm!PS27!J*przUG!{+2I%4M>9dRv^Y4XjZ6BnG|9Uza~K|4{&O^x7>16k|X7(Mpr zRh*lM;;Up30)>#aE%%iO(FLksz>`S5XMQ>FH^{>g(e`1MXD>vA*}HEb7&FVW{b>JNI!>ZVW9mplZTNTSLCW3Qz4L1=OMX-Ag}JHZ%KOZyN# z&_QY4r)KNiV_>iKehUY!dMD+{?t^d=&{N31)Bq0HDH^k=wKNNppB!p9feAmmm2R3> zvHmKKBC%$F4p90h&=t$cyquR*@PtLS#`CkN_qGwsb=kV*bT~+bu}H!fn;8Zl>O9C! zyW3JYy|$ifAi;GUp9IZ+8A(qDWI$GPra%oi+}#2otAV5s?p^V_Pfb)YO+Y4YigPu< z=kh@WlA&T!iB-PR8H_S*_v8T>kw*oF{~2cSjo~Z-PEabWB)K@c_6TXl-j`S_It6C$&}&`taNH8=!Gn7=&Xaa&!3w zz3d-c>i=sv;uv91*RSih+qjIldQ=vO;p#py<^TIC4w0vObK3FuBHbndf)L5EG_|#5 zE0C_66(iPe{G`hbvtSI39z0>Cue|uX1Azp62!V|#Kqnet*RXIZXm@=@5bhdZWPeuc zr)cewc{xI}rj!Zj6!8pxk?xnIt1m|c7o~bb2eK5FYD;vsjJG_0mc;X23KtIoW((v` z8c6VF2Tbeky}EiPEo7{f1A3SHtNPYDAJgg%Q9YzelcX4Pln$ddDHvvQgO+H2p@<6y zbaGX1T-P%x44p*6+`bWd&e+e&(NsQ-R=agQUa;3X!?b1t@-^{A_4ebxdvsdbto)$) z9PBht^adEz7TUY5T#S`O{>Zr3=y%^ViBN3(^ycd?w=t++VgWVs+n_mBJs59a>6WFR zr-{+3gbdWB3Hrh)&s7bM$&V?W*Ur$-**@vqkE zs}Yrd1(Ee41I89yp2!p|mlw03D{URw{{~~Wz6A4kiDzn}H%Z=Fh4PQeRSi}{4L~oO zAnI0kksVnM&2DD|o}7(|`Sf({fs2rfRS{2e^+xWv=MdqB0Iu%ov$_}k{AdxA8#DvP zI*Su^)9GMGO-t~@-k~j;aHg51J!K+!dy(U@=2}cg@YTWGyU} zriC4D+u9@qX|{XjM$ap1O6A+rHA^ejQGXP|y_LSiKhzf*>@WLhh_?GO)$nY}?%f$< zLqnOz2-OSIu5&MP#{)@wp-1d3Z}!=WXSzpjzp5!-)cqMH)^fVLP(E{*DD4Bev-Ku% z<9iGJ^@{=0E_K%ANuc}g@3+0TJhFUOP1T ziNT)+5-rY}_jsu;RR-AhF^LOjuPB>P_fw+&UL;7Mt#?UZs|WFhyW#z#3I$eokMmaT z^4k^Ps<;$m&La%s1(%C&bnH=Im84xRB9<&$ykk27Ynp=X)h@S0B1&LOGzccyL(jZ}&S!0(;MePrbME*m(?yHXr z*tTt3wPE=#cVlgh-~NOZNR*aow4Rx_hq>T}s@1%;_4`uw)x5o$E~#lWMQ407k@5HQ z?P^v<`2$Bv;suI=6q$G~a1igmX}`3&{|9gNCdG()zYDIwnyB8odT@7W`j>+T4a&{6 z8=A!jlZ6z}w*uLf*(5i z`r0oE%e;AQ@g@p$&fhAW_tT~P?aPL)W+8k2G3q&mnQnbG2g&wf1b`96VMhC_ zr<=2n?BTM<1(67%$za+HipsRi0T@w8l`d*uamH<AEFH%hsLXOVk4( zU7mySt%cLJJwqEXT=p`vzH7YMNN`YT#TE?Fpc5YYA!Kq29YSe=UnFz4%x(Cdx^drw z`ay;N8Zjxkc)ic5hmo%{ysjOd(fe8wzxshwD&yJdgpV)p+B6M5)d9= zX4mBEJ$7A!CY^>+Wbj9shE3DCU=^Xk>FZ4#B+6~%O)nl#D)V?`j{jE9o^a!zfvn7rS`M}IZw9i2>0B5kpj z^1@~ec5v`hTIH51`d+M^@7l_TcQiLGM%hhzrcD&bd93pluc)cd8efyYoY65G&OCgw7bb{3(Y? zHXw69-YkBRr7JyWiX$!)Ns_lP(2pKfcr58RC^R^Fz5fZZF^9^Q@Pg2w+*6gDv=z4m zv+5-lv}JCdVpa}FhgInv?>dth?*~egobO?<9%IUk*$wDbv0+0~Cy=oE_425g-j{Eu zl!gEQU;i3zqq>wxQV_sS3eJR;ZM(&gz9Jg*2`vZNau2_0`F1o%y>WH#kWFgM zkC4^Bx)rOy3mJk58rO15rH*qf9;d0>yMqq!pvHy*r(&gj{ps3+Zm}*t<2-^nUUo;vQNu9rPSTRJ)d+t9u_7P->Gcn}NndsVG5oSs^}^oLGi_+f=G{e)nF*NQ=r%=` z+t9Dg89qyWJhQmO({Knw3L=;kK=cFvz}6K z4PD#0Ey|#|j;a7EgjY)sMW$(`1%F%WI$p3XNTEw`fi}~EYtrfc-LqlghUn4Ur*Bnu zb)N|?*r#|oFR9s8-xU@-4TG}m3r#w13(r7fg%mqx;J8&S^60S5B2aqd{oaLx59R5k zI7KU6+x>R_YS%&G=~aoXF3mOeaqi_G0li$o7wc96O`oMy?!L=g*Mos57^*W*x^a8s zQlFf<(<-cz2yTZkZs+DxTP5cG5KP`<@4vP4|E37%y-`c}|EPQKuco4|k5j539i&JH zr6^sH-oZu{m7?^Z6iI*xgdU_Q9aO5)yGm~XDG4AD0qHFyLg>BMPy%y(o>?>RJ2U^l z{32_)Zf@>9_vG$<_SxUhmt)kB-UfN;Y59+TGTy;!qt;o7l=Af~p3O@eaE4KTj$riu~|41I(Ryp$bmgvXYO$g%a7~vc-^xJjSIcwy!z)JYeKJVPxkgP{VE_7gYW-JE@Sgy?29$@|!G? zf^S2+e4oxm0Uaha-o4Xj&(*`M8EG)#7h9y^_{ng>=M`rmITSy_PJfAC3VvGilHKjN z;7!1Wec%v`+zCCA%AnW!*?D6@gjq@7Ip9fj&yl?rC?z+E;CZ1Pl+masRVvpMvRjc>hjP^FB?sedGzlG78?is(t4Mf zWEDtF-!2+32hy|AV|z$Jb0t{;&ZgVSmGVQpq}q9jVowp&j8^aOipnjrcO^16SCMF0 z|LmMz(ArtHocT6oC`my|83Hf~+;!2^omY(B!8!*ov0vOB8Ngv`J3PYINFRm*UAWy{ zx~-7f@LL&`LyjeYoGLWtIXIk-LkdGd(x%vN&X|UYhb{ok@ISgz{?CWO`gu^)Ms(Ay zFoc>v+rHGJs-1j@pdDB#W%5)xkqp-P0P``K9$au{VXtaOhE01?5FEp9)qGv&)X;LD zWn=aqcW@&ND2#*jC7)K3U2+*?B8e70qd&mr1=``)LW77t8Bj0{%SMOpc zEtEmO7jv;51g`Hr^il!%3Cf?V@a2^CX?3Q&h9)8hoFeIB1%`)(V3$wYaxR+p^tkhqtp<9{twQWAjg=O`0EXE?+U=B|3Uks*w?2 z<*FH;k_TlVyF;l%5DKj|S1akX0FBu;fQ$4?YBjpzbnk0&AGkfoCabXB){@x622LE; zeP;owbnj7!)z}D?&dDs(Ro=2se?G^QKFb#ksU(>Xhh_{^R+J2s8?oE~`>4b|ei(@M zb@=-Zc8i~pf%N1IY!$L2!xT->2@N zl}~DOkJu5DWPG3veo02&I<MoK}T$@>)Z8Qe}`SVd{l-C%O$ zzyP8N%MJ7H){?;Fh-B^v^zwCYJq@%CLf@CZ`FYv z9O^MM4y;X%?wfi%`Z#()8B7NM$vm0lo+eI3;ypAZ4`E=hvPzoKG2oA#FW2VMC(t@P zqULPcr<;$E7h!(Uw}4O(W2;pP*3`8VbITOGTSEioWtt<1OGb#dW z6MU>v!1@f(6o6RFkd0URZFi(3jXH$8*r7L722oR{4)EDe;!E6JF(8&D&c*h6Vy#k7 zr{O*mydAk1dY{9%R;+0dwVs!ck5*$8iW-pwRmBM^v)qZh0b+y}pF}G`j%6tC1M^w? zoy+GroaP+v^V&M@!LM&$kEn_FonR_t%YYat6L;$ql@%8Ks;!5jtw7Gu@BMC=!*Y(2 zYkluEN-M3qIV|iHk&7>^Gkzvlkl!XPCE+X&D@g=dWTFNa=8~ad47=hA?Q)TfT`9oe##wxJUT_C7Sypo1zmzH&2ijg^ z+=f0$M5#p5Zs^*;*VP{Y5tRnZgiYg<7H&vyR(VNRrrZ(aw$5C=`&Z&TRe0{xzoEsZ=>Q2~*G zu6cegj0G^j@zk*&=4TbHHh2ES^C6E8OR|my86jAy19l!$m48CXoL`N~)>-K})};ml zx?er8<4NWm8qxdm0&*_b7l?!`h38tW07=&0oAvSF!;hopWsyR6{hA8IVwt`QGcfGJ zNjJDd8k}kMm7k(QcK{LvdjQi)9)ZDDk!l^0g^k&pmCb@ZbF5RO2_O zptW~afh_)N_h>zyvJC_zH@-Q>o!1^rSIl^E^Cg4NX1&FmdI#*&o>x8tt0PNgBQx zclL%d5Aoe1;*{TFeUSq#upQz+8aAg%bt7xr!H%i4q0HRwD|PoUAXXDgRSe|GCR4zy zH!IhT6=Nd@XL_`OS45yaStt;Th(mYCOSp-~uZGzvLN_|@!Voh)=B4R-H1jklaBF?f z3zdMBk}DOeo)Hq2WG+-A<#H^%QF?|a@4G`i?3n!)8o5dV$@B*WH@@ccw&XGnw3f4U z7tT?;E#+^y@!*of+ub*TLt9-R-jYP429Y3&`ltJJyaO|!WC%7>WA`mp;$zZ+s-p8H zrEE}qVTO44g&?5iWUob&ZT4Peq!kzBzf*Z*wIdbo`6=|u>*`05N3jYpP2W3+g(I+o z(WF8wJGi~<@Emm####z`2oK5LX+lV_BPRhM;--%yPPjDMkp|p;0nVes7@}$OW8_C* z*zxxd*^6q+Xc$FH<_kS%Lk&4Zdf;Z=0Yg;j0P>?<~#^{>I2@$T6w?lXDZMNQS&JbX0WSzK}D(bv2geuywHDGu~sj$6^b&W`Rn# z`s%^VMSSq8rzQl=6@>3F2?$NtDT|w8Z8Pd%*(d-jAUTXT+`U87+T8O#Q zjb35G*S{eO7tR4bZIL&6LfNVuTDIA3?MzkE83P(yO}pq2+D$9{0Uv?IxU9i*iu5cY zr##Eh;YzQS4RVUmr_jI3wRQvguS-F9SV(bA#|-lcFjjJAvRWU%6ephmnj?&!aAYEb zz$arwUfF8M1=f}m@i~TRBuwUIC0c`kS8RKr9&)5;Gb3`|Z$y;@?miN>yfz;(gM4%R zGTn7vkki>=SLehq!vvpgm=sjq4!t|Lgq@5Qf`_B@NV@ zHicu^u{M{IRfVwZKH-aC`CQf+(XPqo8U|ER2Twq9k-OLp3fkDC=5)|tB0_*>>re-N_e9M%R* z;U1-xfNOw$2i_;|r%q>-R2F+al~AWBHL-S?QsU&u?@c1=)X;HNKYLtGN-U+VSKai? zKPueAc`7e?*5Tl>_fOPRN2>!*#5+Iy1M12jjr?Pulc9zVldKEfS-g$wxoQty;xs}8nEEJBDLHX|Ue z`M0+xSMFIWbn-4)BBWUkCxT26U>2RRk!#IOB8;9U!dF{rcLVo71ono5a@mV?+)Mk6 z)(+ipftv*?36me)s$j$GRqa@lgemXUV8l>uO90O5j|eevUlQ~1MS2gH|F0?WrIo{n zJQm(7H)6~x*6@hbpUpTBWWB#Q{vbyp$+9GUICnuJGufk%)>TLO#6Kef-Ey7I0eK-L z_lvi-{!{|Dq9-Sm@X*6NRz63{#Yu{BIQ~$z@l)0Z-HF_rLm$T-_4mXRyj@OPsL}RU z)I8kV2E*|Z=G zZiWLq{I@i(q#lV?{KUTC>S3 zRmM`A)_ZQJ5cwXNzk+0bJA6~C+S|^Sn@>((w8^XgR5v0t;e!9XKl+TAxB?M4$|1(6 ziCS*h&!$QH!)<=rmBJ0LfcM2qhqu5Fa%_2rZ3gnGAt3FUxJB%@+lD*?%XgJlJGZYB zNTFHvoWEpWL0t8?*yz(4aRI_9xM`rSHSd?6Rac0@b=jsBRIxzpEuUpkK$|V+3>4#t zHHA`s`e-A}40cZvGf*(H|6VNyqg#zTe*eBLcpZNzqtm3&$u)eWESYp~rLVx3 z{*k`t_g0V6L1XYr4$_$!>?g%pR%lF_p<5XB!O|RGu zWt0-PPM4ZJYpETKDO!W^XK&3XIXp&7`KE{({_6d~-AiCsu}}p)9#|<7NLEbX=%eh1 zkB%oFi&_1EatV$ykO#z&sOwrfQO$<3qm>f6tbs}z+V&@m(}R&vmr95k09tc8+b ze6HLwkDrMAL#~?|rPLhEW!{ad>~p_3wzD~OvQ!W<4qlJsZm@gGQ?h;vZDO+1wp=ZJ z9dInO_DC$4A!rOqZa?7l0#~IiyEa-MQR_-GP&*h8o|2chXU>7weG)SSYwv7bZXMk| z(Nd=jeJfOHAlb9+y-=7yy=8rC#mIaB6_3Ycw05@6$9bK?5^bsWb<$TX@9RFnBX$=_ zLjnG}?>&;5zWT>JwkD)IZhFWIL_%u_MDfGrI=Z}gY?&f`+TbrlM`&A^Abye>5w_2I z{uhkC=K7Gx#S(`WN@Ks?c&3Tpr{!KH(5Sa7K4a%#L3iWmJ09 zl4a)*J2$bv<<&dnrTk|3YEca%w+sWFZ0EFn&K?)X8SX@9I{*4e90L$yUwnTq^^-_Y4OOWjg`=4=h$cUq4Bu7ncVRN!R*FH;l z=P9&SzN4wVcP-od7J=&a0T+#H2-DZak=KVz(UF=QCSKjN& z*q9FmnSBymMO2rl_hs0nWzYnNud^Tv?a8Lv<440^cUrSvU+&j zDOZ*Mj^;*DONTAClHa9GxM?p}kG-dpqf;6Q5d__#h$Q6{7-FclMjr*r69s{uC= zl1`GD?^8}mit|#AZz`CN9ebQP9Ca?ikomG@!o6(!fx2*9hjr$VY58Y{^QjiY&+o(_L#=s;wkUfPfD-lPZ%y>ILtfAipVVJH~Wc2hK_1=0qGvrk|sIoT6-*fy1 zRh{X+fE%l8OVt>s7s~YCc9TjvlJ?y%CZ)Onm0LP)I8`o*Mbez>scnMMb0u*k=cx)D z%-=8+xV-=(N3Kt7ec-#QXXz!u_|t3|Q)%aPXd+{`5&8Di<}=X~y?Ih}6&JX9t?cvR z{J05a-to{n#DXV3YXE#SqX6qzGI(piVF55~%~HMr7Tyv!a!5uk^E6doajaQMy&h0bUmA@ zjR{O_@rar2$uPtn=+`4g^EC13KFB<-?q^j<=A()}J={sQM3o8*q~5yh+$3mqFIkJW z?LlYrhVBL8@c^%bOnl(5iflkjQ>epM6Y_ql<=Svc& zHL8nbFjH_rJYw#QT5qFIrju7vks}7f6RZ^>ik6A*P881F*ocX{!)__8EYBnMhtKF?Z2$?otEwVw%jdn#T%AZYB%O`8YMxFAtsX+xJ$a5;Z6lFs$k9&2YaAvzaaWbtf z`}YXPjDk#PRUYQPR%uCW7xhJiw9Pq%%AH2N<0?mhv!g0wXZ4q!(m-2|e;GVRtt0qy zr8c}+@U6-BGRIHIRzGK7N;Gr?PI6VDbxH1dJI32f=U)l~}K#P5P_zhM?qP8!-Sxy@v#PC*#TSzlY^+ zCRF7Y)IZ}+S{!$vp#2$MQnAVA zT^(M_H{9%u*r~w29CYL67v}s^=Q*-lxa>fHBcpgGyBuk^E-G+2s7Z-{j}`oygWX34 zFXJeV;vnL*^Pvc~=c_)5lk99ecI;sM*D<3wXET*3dh&h9F^n9Fj*FDQ1a41mVo(RN9IxCx5D4u?W0nYAVNQXLx8fEuqoOjNHI0mfFNmPa$3pWGP8FnNMUItFaHQWP8H%6bsNj#T*P{1dkkb^JrX|<92gJPD zZtU!{nXeYpS{ip=?!G@erwn!6-3(N-{A6akgs&@>U(j)6;ej}i4X1@E7hTw5M|+g! z&4d2DwL&lZblh5c40??~Z{Mi@I+r#W+sKx>YP@DWh3YOB)t zOOq?hnF~8|O4S6{xKcm_3QCt;?vY>PCFDH}h{yxTNtMV+s)JTjZz4pQcS_sJMcwtU zOLe_;FedEi8EVba3Q)@Ynxn1?@e|D6Y)`H#Y$~%095R|y+s!)~@0T>YH)VPi`JKRq z9!Gdl!=tCt=Dqmz|F*L~a*zL0A*zAD@_I$R-wNn}6XE=v&;QipI_H;~8BHoCQnk## zfGMpEM~UdQj0UmcqvhuYs`v6pe&e|x)USN|B`eueDhY zX4)p^^&)db`7}@d+W98l3kC!&0k=va`Sx=x6PE%}9OmgG6$7fu$w5OC#^tQIi<&V9@FL48}TA`Taqj7V(4RE*n zq6Ly7X3yO0h~NF}P8c2o-mu@Tq$5$x7R_NE{2om54*ml<*1~V4u_SU53Sv1tn(M7g zMaYEAs9-4hMz5EuAZkO&5nWEKwpM25)k7fmMT=0f#aa!n8L`M&xQO25bWrIa*IFW< zM^j^~Z2)KbBnirhV_Y(@4l#U5u^n1J{D4Bf!@W1b_SlQB-E2J(*E!2ydya`gz6ju6&en+3YNBrT!v4QfjPj`!0%wp_&c?0&QQ;;o?;G z1~0Okwm#gU6c5b}!sypqE6QUy)8!si-8@OAvDoM!Jm=HrP93n%-iQv0G>p4xHxVVV zbk!E@=5q0UGnTj{03wBJWya7DXkfXh=(El&ctq6)yFc0Af}L;5WS`itq7Ua!ZBZhW zs}tuRPmnR+L1d`jgqCg;cZ`&Yjo;VwNFGc$N3uuRIT&%~Yk%?0!MlR*%`xaE_P+Ic zec%TUP1INxwz$^RMQXK{C0NkRo(21o7FQrp^_Yl#^Ky5?dQ+a&J^EpX{P4bvcatdk z$N3W+?1b*D($Z7pgd=##;on*}s%No*?pj7wzcuAsy7pPmYr2wg&QCQtMptQ4O71eA zQD3Ju;!Gl$%pRgLT0<{-EVy(xU(r$*zUzh)%#pSsDHF3lbf8vNV(IFTPL7y=YD52C zYqB0*3cb&>dIj6z_l8gFOXDrkRngbl?(I4K6z{QG{I=4oBa?M}dL9pEv-Q13VZt$# z46C(G^x``o6W&#yDRRf1I+;@jE{+;uO`5O@@o@WZ3R<e~1WDN16IwNhz3HT_YbrDa+~}nQv&J z+>k#a-h4nejml-)vZ&L~z6nC{h9R1U=3&5EaF_3{aCUh=M7D?#3@@I#=z=(qW>38m zxmY%@Z9ybfKmhyfi{fume6^zpv5RNA(s`I;J~u4!#D0zmn=Vznk2({n0z&!sX4Td0gWmJ zOBsoJ9`AkQgE*n)+<_=X>_b`f7r%-8R- zB?G&}kJtYSS0GkvL(SsMB8H$BoXHlQ-+fzaYiU2oB+o+$p(LRx4sY-5F=7;vZM{(P zhAANP-GzGTN}u>vZZg>HH?DCjYSTMD&4}7(v{JI&Y@2nJDv75>hKkeUlF5z6#<(Gl zf-gi%SoK><4ZV^c*krt;@6+m?|H#VleNSGCMC7Rky?tJ!J(f>68~m`znXhf7ptet% zQJ3k{XbY0RavZ@ejh2*lA_G6zDKuv0QQR9g!nRm_O5r09cV*$>Nq80!NEIQ;ys`~< z%9oT*ayW66>M04_h^MQ5k;n?OEYCs&U}uX!qK!($ zWujh5ZG_DHGA7u@qTMiBiJ1gE1sy6?{$=`#=!5I2Noj4quiLqY)R8&~f z`@z}tfqf1p*qt~UyD0~=G`>^opG|ryFIu`#XNH&T<bOEuYuH#mbJvky9KqJ{PTU z?BBF}!@5}j`mS=f_?BjGn4*@)9kDTP;|&u&eL{+0N)5$oQub!G%i^L&V*jvZC8wfGYNJ#H(A3?0px!l^+P^U=O-R!OiY z8u4T?pU}bMprq|5mw|pQWqJXt z>w#)*7&x}~Z>!Z%ptj{WE{48cvZLxKl_9`$s7K~Rz+Y#iiW{vUkWQzZqIz;wy;~`H zz7!Ax(!2|yI+qi_IujfxHkiY?o+BK=*$xZyRfz58Z`blTMN-65^3q3>B+Yk6jLbB1 zlmsodwbIg4vUT0GK8EtE^g_w2Uu!wVhzq`5 z-c$l1?e;0VUeT0r!#|9;9)q>ssR*jgUB0y@i*1!By~QQd>N$g$z;z?;5_+GV{Sj8y z(;1xmXh>eG6kz9+(XPI`Fq37#pB_JC!v6A6D;7K?QA>w<&(hoA$WOY33U0o#o?7!cGf<(C3;1*GlPzZ2;yvzaX`Z=zWe8lDuX9?JAWJHMXO;_M%GF)gxsevKeV zz=l+?zkCh8K}k$vyERI5cHhCOsI<;oBOX<#5rNV?Dp|FzDd-^hBpH8g>EjYdmn1}# z;e<=}Przi>2@*1eCY=fi8l=SB;L}mq@c~K@?K6C_Bf8Di&LNk}!Eracz%HW-U$H>n z%vH$E7I*bbBY_Vy3_ZKGnM#s4yWm6BKdzWO89RhSw@$`mS5QFh9fW;88&TnL7YX%gnrw10#`$P;MTxF4P*k0}wF02ssz9Sz!pP$R7z_Y<#Wy49pO&xv~LkXLA7 znb7+ecJes7eVva>ppaH91qUowje^vScBM&}vSqv;F@H0b4qHi!`FJ(h^=|vX{$ng2 zhUQnrR#1_9<3ulA1K{b5AX;W=LUx7hi%4tnc;=4X&Oi#wne0O?c9uhH?`c;xS6PR+ z93z!6+;#9fDv2~PG1J1T%ml>}U`ZNM8elux>lpf*vPaR~`xUT5K~>%P*z5-v3Nk|3 z_4%Lg&Y^V|?O|z!&&ZUcE|p)(`8BMG_^J`LdN*8%!7sfaT4RkJ32=HC=HsFL4G*{% z*^?4^CKA|B!y4HuV(^BeQLKWOaW=fxe4l=bhjvy1UsY}st7i+Q)T3OlW+<* z3{JhAWCW^N!xG9pXe%wtXzPLqxpO@iJhSKMzSOF?fy^=^4HGj#02=Mr7YNE3)M`mF zjU#`x>CS~4ysS<3XOorR#K#4725)?55YHZtNA~GN#dTdnJjM39W2dSJ$v?4AU&_C+ z`IY@s(~%y8d=}5jmavPlkeIF**wsG=urM!<;Zq#@hnDh=f3KE3`eWfPg}shyQ@|8w zML6t{rHAGr> z40s)zNZqvWrfWL$p_9v3k4bM{f20!M-EAef*f7UEmPp^`@}s^wlVc|3p3#e@WTVxl zbpI?$n$QY*$d!sq$4Ujo_3(^GH>h$T-cRS=Uoj~z#E{HM>}h?P*cf%Yc_HcWqUwKu`a)Z+jiv%P;sYYQrYC z+~?SATW!|PMnA<@K&yV;H-Nqmilj4K{dF!g-qd2MS^GFc*^g~2=n||MJk$1^k zSa#pP*B)~I2g;p)UZiIIM?ADfu>xgWE&UNfizp308{cS?@QZE{2dwb`wqM2+uIfyOSo-20Z z@^r@vu~=W%DS_o4?lDGIg|+2dH$pn`HIoC_v_2!dfQ?tRCaGP%(AKkomYg5UPnQE* zpjio9@eGdnwXAKEX?6y;Hy16rIJ0|d)_UC+eMmzTL-xWLb5C1eIAYRZRV*C&l42&zEe+D}~pd z65)$oFs|{n+Ui80_L1%ncfIH`yA$vtpSW}i5zf5pvk_vgIDOuGW2tB)KACP5VeVbQ zV7B?(7*8e!DXS{O;(i>t6Uu7q<&tf`B*SpZAzVf9lY#dSaW8QQHwH0uJVmnXAKeyM+Kbf8)2)MTzj3GoJZ;nB5bv~md+PZ~ zWjaVjZG(laO&ns5FvYE}2ISmECCl-;vhFJ`^#p=M07Luo8#=Y?O~lL6-L{u29rTCi zZ%}36-g~&JSZh#59rv>@I2Us?8;zLLw~^<;G2cgi9$8x=TX@YIwSlgT-Gdrc%ZN>*D0<~yP?|>E zX4P997@{a*cvAMNA7Oi9X^gf?h805O3aM#82>F)=);>oN0tA2|oR2%2zTkwf7bR8b zY3RcNjZw>Yi&;B2hBY{{66mBVBOb9g_d~qCnc?|u(v`fHIRZh6%&$j9(3yH@6fBJ{^ zxMsV8==eWx^B~U9*aq(Lzy2%67I&V+su1x%1zRXWU7wLP)cSuP7#aURM+M0|BojWU zAg>XWSyHd)nJM;bWyGz-^($7IHmlFrav3Yp|8hCH2u7WM|Jm95|DN6dCZP1nm15*p zPy%o2X?yh zHO^Pt%GVil(D63TlLaHD-Fhzr_hrX_{`d|^(Mo0~4w9wk76kQxuMq&8c)|zSdEr~i zsby!i_|v^adR&qBZ=nG<9=}dc%ZFBQI}U;PpQj(kw*b}2;Y{(xI0}4_!MY8Naerp{ zq0Rm+a<;8^E2u+O{tb`U5OP)I=7%H2gJGhj<@V*yxDL$XsDf!p$AYG~v;m%Y)BJDU zuD#e6{mni#F21_+#q`YPxer}1T41axfqs5QZCie2b>~z(VT3qwGpxLO z{~^$SDR|#2aKn>#vhkuUwKzMLzaF`KI^K^~18ftiTyvRujIE&iI2avx>Jy{Rbsc@XI2!WH4az2h2ZxK=TTWCeSu>3iLDr=XnTF zu>}qu08YxPiwYO3dFI)`YPWr7mm{G#vsq_dtLYs7pDCY`@$U~3@M{M=(G>&#dl5P- zKs90>aOwfh{Oi910-QAn@Qwn`nh#tdBQV+Leg91+#&+p{0>~Ab|9<5!5y12ra8VDR z1}=qH*9&;k(guRc9~e*Da{$ZQpp{NmLlmpt*Pp{J8)2>kPV(a^$5@Mv3nHK}+_{mo zf{BSsmTM_;J%vcJHQHu&Zs8e#PY-edQUt)P7??I6{(GYw;VZuAt8PD#%gnO=tjZ6x z{;aUR=IOZB065DyG3MpNpKgmyq1cy3jP(Ejd(P>*@!W|(EHpwqTVeKZ7Ms=By6*H!DU(e*UMo29Zl9y z<3<*Zzht`C1oEVMoGFzLqda?h^T+<${f66*0!CCS%6o(oY;=WD_`R+5AN$UyK{EfI zPZ%T5;Lys zIa~=+aJAmUF8BF?mmfL{{mt!bRP4jG%x5b5w11QL;61X!3w(9D9<}{pTy_B4 zQrFhM{(LSk*+Pgv6*a#Bn>#8y^m#dibp|@Y%bI zZ*S`VGVuy0lhhVH%{2{QY(Aw1e4Xv?;IL*ddeR5iv_om=_SMF*|8CHoR@-YdTgtcd zOIeuIbVcK%-}PR_vwT)|Ua7xAqo& z-8@$ra8`ShZCGL&8IaZZtWIzU00R@GUr752ST(jZURVYdhHn>{vc`K36x$9po;~v^ zhZ@04wzn@o7rO(XvV2Q({g-&%R{tzvN;5OJlTyPwBwf&D=G)7F|NE>g)zGKDm-k(T zDv(duiGeEgOmWf`!K!;-xazasb|7SRy@Ov+mS- zo69fero@~J%j1jF$xObjgG|9vz)`fuP2;;BeyUy%ejzNax;t2A`7id2xu4!1eCS1+ zs{r^l@3BjMR$b{vO=5S`L~6?edTGp0@zh|XZ zWIU;l7%x@K>*Tzs@ORv18qBwk)Ey?q_if<*LJb>U8!RP{qvv%hjX&OM$nYh88oMY2 zhjZo?QODTf@94}$;1{>k?dD(j7Pk-@Oa%?UFj{nbj3!?-4gD5a`l?p@58lk4Q5$f| z;}#HQhH=Lb=2Ld>GTr?SEpN)&Nzx;Hnl3=AFZ#xG>fXtl_W<78zvD6StSwU|*NN_D zW&_iC_c;b1mN|+K7}Z{uZMPs_4!MT9c|#}ZiQOHR8h-uz*)OO`!}~sW$wZ!XXcODY z#$PwNzm_Gb_pd*YO}H~?Y}9npaL3TvtM=}T$x_hU+*UoIghwCa71vJ67rCj*dR_^= zRl;}v3!Kv&Nh{8O7itLIVp}a9$r|4;x8^dMNL7d1I9x5&GIvvFmXGr*lpBi2K2ThY zoa|^xvi18E=3_buTcIvdQA>aqhkt2LQ9!Br3A*ue>*{~x`?QJwiHg!xLngc(RHX05 zUXUxcHE+gOO}MC-4;0_@dbAxpV05eck%)(g4cKQeUyNx$kF>2NC`#1K>q`UUy`c0N zQH|Xv@V$?z_%hm?NtmT0)B4ZYCD_>Kh^PdI7gKhm_U}`IsB+Ehx1c@oW6?DAmCX7E zKd4f4c@jD2qP^S_CE9Pcjl3$H=pF`IGk}o6%o8;p-g39iaT$^BT;omD)%e_BFy3Q} zFdb%E?#-vA+Bl_G@d?N$>hbMvrZ|?wF8p%qy=daTTO(m_Rptpal-gqlE2^1wzqxG^cm1c0oGn# zZBI`_@$0X>&BJ0u=1AGaqr!n+m@FphjI%q*mWIi~N8)!sDeJ@V=;N~R@!I{=nj^-w7uGj-do z1vF%GCY8#c8l1JC3d`)tSM6pf4dnfZi6^HOYcP1L=P&quUl6W*6;~+Y%p7j;ykON_ z?LRqR!HTFxA{1oxWsrHkl=(xHjEd|Rp@#I2smPvw!;e$(zl|c_3IE>s`|9Tb%K+q6 zv91Ov?;c1j|nvBzkyHBYZPt$M;6A#*ZZCk7ZuXwJYNvQ%Ul0si&Hc~bCx028$>SFaH4*; z_o>U#hlwNLL>fNW$>gA}@f<`u$A9TWp(Im7;9<9>3h!~etRBW*E(%5xqvGMquE`;U z)ig;x|Ld_d$>(40LV_3Of5b?i7v9~6EQT6ABmPPKU*n#%BDx}+LDe3`B+MXu#aJUK z^^T%c318t&L1vdDVLEsJH8|7j35GW-d`mLfdda21B3)%yUf~ zw>P@TGj^QP(N`FoC)0r8!hBx_e1bxw-2K$=gnZ(uwwx<$Zaj-ZQEx4NaXrmaw`J!s z?5Dms4GmgLbRtgtxP=~3(8CKT>$xwQR1u%r2)+zw-@Eyn zG0-)wtp0$u)%abohwHuIo4lf8d{M!Y=Z}9C|0F#6EjAD^R&mm@M|Bk*8|>v2?$GdU zRy%5bFFBAq+adU5_wg|EZR>_w&LKU0AJC4@ziiJ;{aGo%1{1tF`}Ew-R_4P&U@?d8 zpZ4M?3POAJ-QoM0g0--TKJEXN$Y}xQQUb~i_mc*fj|{|kRGWt0kdYv}H6J_W**~@0 z)6|r4;csBP9r?~}U%Z6cfT~fT!vb=@Sq2K&_m-*eXfP#JzUa7~NS1iDp@@EdHY{(mwT)I}v7lX+58!LkxN1F6jF9PgZnhH5qR5Mw4U2X;N}4 z7`u>pC9(*fXZwY)o*0jpP*z63o!TK3Z>_!LLKMD_PXJ6a;%CMu+va;%VnOi(FIxK! zU>K>?Td!%WcTlLqwQZNpn@t-dLl6OvlcAZ}V(L19nvn|$v1Nhu_{Da?U{f=jgT1$oYWn~GhHXGZN(7}tLSlkSGgM02q#F_GZWxLn%|KEq0Rg3Z z^dv_J2uKbX-6KYAY-9HuKHvLyeLwg0*Y)Rp&c#2Rvvc;^E1vOqw*U3OmuzdTnMssV zoqo-|r}JrvxYsU`d(W@q_1F3W@0Ox_9^1bHpA7Bd*TzoGwI@uxrxH5A{8_doMdRgz z_8WyPJqnAxi&+}Vvl13pb_WWs(4Q}?-NKr!wCq6mzUxuhJj_Pfe?(?#K8R^JEK{Qq zYBf7OlIWMm#6vy=ZTjS`;k4)Lp^?JZn zDal5!GPS-FeK$DhZB=yCC$VeUsR79=vQ6kuaWGmb#4;|32R?VK6FDsymP46~&Y3R5%vbovYpy7|nKM}Nd^E++u%&-QaOJI!Yfj{!I$&3Yc;l12)V_#5* zi|k4576|_@%NSs&;B!;!d@y;6G>Z2+@+q>|rm*Wlf!XPamlZEMb$Z|{44*DN{_CdB z@cc7}1>MVx=VSvORXr`XqVYHBKL8?Y{#>A69#FvGV!vYNx`2D>h$=vt06j+|W5s<&H$K z!Raq;5Zf>51d81P?hxqaSGE56oY_I#>ty>n7DN3hS9D4w1M5f*Nu727)UMv|2OF{4NCHtU^zh(oVyzDMSvP9&k^9 zg@1gV5UdVUpnd$kVJ`=!QwY-hoUqKiYFh69@;CDD)q(H9z4*m0X-f*bQ~OXDdM;r z@aL}ip?zRLtD45Zqv28(H_q%F6_!=`q0s@Sa=<7A(7u55`U>Hb=t!igY=5wG*LLl! zt~MfY=ckCIfyX;`&{u&%P8{oig=bb<&g~*!o-3oG8Er06i9pT(!Gczn73Jg>9uw0c zpR^CO_58>icWqBya=;6xCeUc}(+wt#+EEdzqm0Blgpy z7r}CZe7Se-89aVWpx^IBMkv#trx%E)c#`)zmKVe;rbyvK;~kdl*vDH!<3tWL6XkLJ z%HN%%>du3e%-D;Sm_xL8T#C@+p)#*ZTd?>K@BtbrG8FoaH^2VmnDzFfCW3R)5LY=L z4u4LOa1dVZG;iipSYUW{BB$BH!tu#K9z_&ym*S&%1H|6=c|<&W%aGCqxc>C2)z!`P zRqa1TrS1teMOy1OS-s@u1YV==229jc%zu%5^kY+3{?uLM1kmy&`A0le^9J&;uW_Yf zR~St%FZ`tZM2LtKAUwN;$h}{4oEhlCIoRXyB#rV@^EPf@ut0X&#(6=eq@=T_ewCjZ zoHupn`OGKd7zg_m`cy>(9epAODAXH_D~&oc;D=7d|0*Rg#m;=_Q5k)T+`f!aa8Z_Sp=EKEsHA9h-78dh2 zh1SR=E?t8KeT_=!R_sPra#BdgtVjsTXn+^ZTP6zR8{<$Ztw2p7YT^F1f@T z)Qmm&kQl?HD)~T^x7q86kBcqUYK51`OE?a1oTy7|!h8U4-_aUMJXxr9tmNhW8e3`Y zqgTG^eSB*71dZ2VTti-HU9eoUQ*z(eY=6auU+t|K>4iW02bJGs#qn6)B$;2U?upCf zJl?%{hc)myn`SF4f;J3Fh;Ya3cKQpJ7FR6e?FGA_rgxnZwwjwJMZa@;W4?4c;=?6a zkmW|_^ov$ky$J>W8!Irqf64tn**S!w2UP`sYy~Y$;#0WNQ5vsgol2UyvYzpBV(9n% zmRT<0gDKAkc1M!GtFU;Y5Vg}VV$7?@%Lu9L8EkKV(;=MAd;QW29F2zTdz1QO#h;64 ztWu+ZmEe+uX!-5arsicgfb=aZw+x>it<+;xRqNMT3{j2(_ID!#CS>sv6l2=s6^YTC zKfiN}R9HUwhO!@qPgxYh^_na#N=*6Vk=zvLbf^+7^*$_mK^1-=8&KcJwhuxojB$xd z^1H?)2f)#G%B#ZC7*#?TWsstDVMVmL|C4Ez8-X>`kD(=7t{mquAFK{4Q53Nz;1#bz zz78_~B+A$&3(iad_U2*{>7LNc`Aj2%Gwsj`A=O<(!+=&R!6AU?)t!azxw>i03zZ z;TZC{y9&vqZt5a*;k&8?Z6yoRYa+0waRa%&g!%7d2g#Rx#=&Zc)oM)cf|bpv^cZT6 zD<{jAfyF?>!>2#<_h6K_bxaK7RvpO&O9Pk(aNk|cV@r484Dp>Bj1M9*U6U`991^RE z6xkURYd$rY*I<0ZxtDC3b5Aq`B2luCkoW6c1$*&Fr@q01T82WYVy2@XiLSrTtn&YH z3#)TVUuTZL5>E}&N@en%nZ|i#JlAwAax80_dh5Wj@MGjdw-(^Esko87z;9+1+iac- z1)D47CjQDedK&ehTS6ZMvyfO46*?n&V6pmHv!%?)03E9_)A=D_s+aA6{$bda!qSFe z+FdnBJHk4hShaquT1S6Rp@Wfu_RnQ{u51S#H9BFO1L8Sb5ZBxvtWzR)`EZXTqO4D} zsCMzCn*JjC`W3vorQe6leHL4*c~HYYO!mJ(&}~H(Nf13X?2&%rxdE$vPfhPz(JOKX zb|FsyX(s8Pr{sRgWD?A`9Jh z_BL34$XD{!8vATs)biF9L0D68r^BdYN6pX>dN#%ZKGE}HprP4#@)jo68Q*z56YTIXN5afcD45i7e#_n2>th4Ey_12tl>NVi0I*WZtAy7S z7mh2k%@YP>03ZDq-a*>q@Yiw~4SL5EGAv7bF{k4Ew9P%Z7igF;>KXb`V%zUiB?6f1 z9UOVrvfqOt$qmk5EkZJa;<}PXoShpS~pfcQ? zbJ{2JfA_ru(}8idK8VCUho-aey9G?abd|QGfYUwr_vnE9y z?hjU@zgVM9!hIJ0XrHl8yofYLcN$!1gZyzCp82190`}uuGISb>ICF`%JP^$_MF`Pf zou5~3ko&I0f%)al6+zaO?4WoPxsHz#`_GXJXS`iD2Jw!gzKi!IN)-1XY{RylD{xHF z=uPcFJ-bS|F(EYR)fGIq<>c_`gJ}6wA zq3-h-UDRk5>c=UW{Ll7)XVeBl21mr{SGu)|WG|rX!`|Q(xf0H$KwajUR&4 zw$poSy07S6gPI%=MgdPZwzQ}9o9wd$pwG{^SM=Io97%d%l)ixSy>9bdcC)m zE|J>pypo37gdAg)3ATQvbh2hZ%I_60afluG?fdEh+d7I&#?c;Qr z0#rEHG@fRrzL%GEw~Rcjc?mXLu5B;36y{L)I^mKsEN;vDH+_La|KM(qsDCn{P^>CW z`>31%imu|fAsTB-ke?kdGKTHeGkZsI<~z5Ki)|iim%HF52VmjYHcy{t8xejW`=-Nq zP-zYON$bihBnvVa#-iYh8wD(Sb(1B@6On+5nNou3(6xv&4!j3MNq$KZ{;(!hp9WHz z!GcNbzJth;A6pi)@EV!b!4b<>ETQjK`mZ8usvFJ)aD^3%8=`nNoBtbCu3 zXowT>LGKmFejm{$3rGcqSQhgrj{NvClV4`J-JLud2Xd~V zryJu#XYrr`NjI2UJ5OGU5Upmy7jIm<0<5G*y`3jX?WJP!V<6g#z4p@mfz8Z8aQ-V# zefz(aY~dliL&p~e(`Re@<4>G6#_g^fk7zIEfb?H$Xf$pKgNoV}i2b$;Wb#X}g2wSm zg7GzeWY}kr_tHe7-c#qmA#>y_WbQ}Tl;4>ic-6w=_w{j|Zk(CUT{nY*F!^?_5r*c3ff>Z#mVAT7bNCcJ`6@EL!-+EPJ{mI7t8}v32G+ItgwU@ zrzI{eR=ny8?6b+_W9a#Rd3DSqV%n)-Pvp0k@^vjjC@y5ReP0aTfc^;be-dPvN6x}H zx_qXC(yP*L1VF=D>1A`fKx4a3C;~f_DPk1eEwZcZ zQOpzJ8^>B}e2_iw^MZ^L)0RyI@_OcrUu<>G^|lqXb156J+9T^ga@Eq>+46I7tJF4$ zy(cqZ?RH&d=F42sULodk}GEy)B*~wus zJ&rFd=PiOBH$Jf0B`CvQ^bUvz%4DcUChE06MU>xt@_swyc8__G#hn~76jIB*YCh_l zO*VIQ1;5IX)ZrN>0dA~;@50wKzb!65ORt_^p2hQZn_H9xhG~AlRAcjbhYE*4;!{+4 zU+|=OcD4f`jy_11l6*wtPreB?ADF`RcLBk#`_`xS+%He{FAW3bs55l`>5lXkWay<} z;G9MH?s&+xg{JaRB{29SXaN)!ErPj4ibg7tCqNOhMc(sHuNErHO`AKd8O3mHjO51S zMNV!;jJI(VC+J^{vV{gxBI#oMk(9ApwzEepP?D>4@CfqM-JQfRNBA*t4~@pkSUtdQ9U8BEinc8fX3jPlI+P-oB05V^T`2*ExT~cRx_LlKR5kK4h_eKCv z1C*1J&7G-q$$weDL06EhILI?@K-^LSjtHD^{C)6)o{>Hh6bIdn<**49gc&O?MjCx9 zzdF^SzI!21r$*n{h7g((lu3UyE;3s7d1{)5P)ua)9dAJG9Tp7TjkKOKU=?8dL7DZD z>%XDhCr;@km~N^Djbe{z0KHPtE1uZe#Nl8I5nu?~-#rG_@GxF#9krBNJ*vl&KP^+c zBMgf*Wi`ZP^ZOTbA%$Zv&cu>BFjAe1tw9*E&-@{p%CKXh?t2aO6Za~vuHvB$@WF>@ z;`e=qC3jhRmq0^W9(e91-x-E850AO@02=9>`jB%_9NQ%8HLABlG66cLR-ilGp*C|+ zBbxGPGZl`izRPq-BWL%)_yG(*#jcWmHClv>QBIEUtE7R*Gc`V)S>{|X|7I5fe?#CF z|0n(B8c=-b8u4D&^xe(zlAw2nyUVFEXS(&+A(@$vra^Mo!OBVFel&9Z3Pyx7GGN5 z;@ceJF){U!#Q`y8^Qv2iYAVfb<0gcUD6DXE=VdxZHKRA1fRfq9aunW)Mq`rt)t+Ws zPP`R*_z9Uz;#O08nco-{6Q=rmYP+$iU>~zWzp?V*-M{ci3QU)tl0x!M>=ASMN3-*w z-@q70YjEA=p6`zhP`u}P@9>HN@6>BcMt^%ns5Y6e$Uin_=f|}gU>=4ez-x3P6p+m_ z^6(gk7h8vlf*&;+fuY&18c}cV-$7G3!3%sLKbyr&2M6m6tTsJu5I>IS>0Ot2`Zppm zr|#L+`@b2xneaHulkbGiF>i2@=$EeL!E1mo>{3q%FlgtZV0+Ua<&)_yFC0CjsmHMB zk$ctjeHQLhK$QZFe>+Vgx~HTvpjF}9ICgM1{g}4p)^h<>@{b6uI{o*D5s(;P z$rAwAz&T*POa*7zk*rEQ2mO_`$XN5wyFda_Aumnvb-`PjJd2N`>Tt$mIzFlV-fdM3 z43|2&6oGiTcZ8%x_I%ck&Iek}q6_!U4NY6Z5rLs^C!%AjVaIjS> zW7pq-xNkNgG;t=)Jz1!A>tj1w%u?y^U_YF&)qh->FWvqB#0OU1uI$CyH$?h%x;BT* z^2zN&;d296{1EnH&Y zKNMo{Pd*)Bn0@7_MNeE67A(@>kS!eixTHxFb`z+%qE*$13A+pMs*vOMK8JVw2d+zM zD--H$7felFC`375L#{u0`lU!u$-LDmaXu%1jCBu>L8j)lDgr2i0Y5h24Ag(e8X7rDNx z6pr`j@5-Ebnz!e?uGf{Oqn8`55s@AZ>H1u{AV3z_l7Ac7!O^b4$dMQh7ZhZbyUu&2 zS8V=CsTHaQ#;EcOQW6|mIci}JH4Dk%N^-^%I*g|4$cT4CRmEBY(8j@p5|icQ!`w$- z0P`pru>S00`?AKJ%LnT=|2TDIL<%Q60Bx*246_=pX|55RWY&?%KnqhK@=>O!a^ZH% z;57Nxy`6C~i=5ZP!H6F~7 z=qj2u4RG~gtGfc2mu%VDPA5bFHy4XwZc{QdgJw=cDMz_Ml!Q9*ALf2A;!J>QbinCz0l?(dp5<&?h+qIrc8pDL}~6g z!PNXF(i&)sDjX+uFn8GK7>Z32ppA*SvbNa?Z7_yu9knEelsnMe)1V7y@PC{y|KA)_ zz`&)|x$djQMN-A?!IEL)COVRy-07mvq)|hO0UMBURLzJkx445~7vgI7i%%0ni4WR# zaR2R!`ri^q0Y!XvtpD+f0@Zv#LhHf~;3)%gQvsJOu!(mG`#|LC zdxq`l`67>!cecIvB*o<|(+kC=-{6?V0UKge3jr=KPj7XeSisP&Z?fMJZ7(bjQ)|v-lQM{xGzES~lDm#i8$b|;qlkphb1ycNeuocrO zQ$N`$;j>5ic9onFjgZ{wKjK^h!x%jqJ7nggNcFnQ)C9qYy52k9G6_|FQql`)JTd7d zz1wrsF%lD4sp*i~XuN09vvY4Q^@ic(X39lgca09=o~3KDCHtfbk|#a0Ztp zoP`UnoO!>1-%oKYy6R{kEL$TR*l~FSUA+k%oer3w7MJzK^%lII(p>AjHsGSgLJ#1E zpJ!ucsQ$x1pJ;f)h5JobFb79QhkP4bHJd8bG8P?nVuzssW<9GU;CyhFfpJO*DLiK& zkeqlbPXF)c|MwpLe@_n)tJpur$;5X#+&YcYmVtnRD|0@h@qZlpT+(@g!8E+*fjSXC z5&Zgbg9v_(SEa~1aThLdR`)Y{Y9c>*XGS%+Tjfy_{e!Oc>#OXA{`AC{HEzye7wbs3 zQJ=Xh4Wv<+`1mP9O4)V@p4lP)FK;~yj|l}e*b$ABh#jY5V?GZ};{Fj_#>>psNQkDM zw!wkKr5te%XUpZ&v3mLag8j2V$fGrTT$mOMwSv)VW%;gK#e4UZ87AMLn})%`x?stUe0l2$Q)jA@!v8+6H0NJ2II?Dj#!Zs7rR z!*o`7^kkL&OojJ%3p$SPw=+oK{MrcW5zMrrcQo!F|7y!< zAT(1^>z$NPWmXc>sgWxm;0-hV)P&@N%xXeBv#d2v#vZv^*NghQ#RsyTlnsq#Yo8h~n8;$mnm7{gplNphxL6FwQGtLn#SF;0lG1b%p z?uD$F<3&j0SNuSDg0BpSG!SlQ7<6} z=@8yP*KI~_plge&PqcbZOjAiFPKYHE{!e-m2?j?J3Z|`_XP z-j448(9^&ve+fQrw&G#1v0Y`sFhY{4A;m;rjwh;fRaPn_qrt;(@L6&mXQ)WmIo2Y@nx{MXeAF#Z(z5zXqIK2@!_>h}pd} ztn;@_+U_GU8D1&3i`74BU&uOD2g(RxxNv;Ty#wzLzkymtW2bzw6FcBgu)T`F$(=t| zUK=XWKv8Ea`3oSgP1NehO1=nR)l-Vz{}ni~`pe465x|x8c^xo7%O7pK}lY<4BREK&eO9t||?a-|{LSgFSWiifR(KnHY*po6M z8}`GYHz{&{Onj$l1p{(&-NYc>S^xA|a~zg^OoC51L}=P5keernHaDcsWdV9`(6=)H;t;w7pIrU6 z(6zw%jY$)|Gb`v(9q)%|`qrxFz^5lOy(i<9 ztYgT952z`^-zD9AXfRD@Tu**A0(Wo(nav+X+Ta3fXKQDM*|Abio6u7~*MENFayibX zw{#l#*4V4Gp%WxY(klJ%2y0orpiCz0x98g+yJNkvz5C0_!F>~xZ13ACt@|*S6}0Y9 z{dp(`jY^%A7K(a(F#T8iZawm??8KSIB-v0C29-la{2cr&Ou}Opw2RY6SNVE}wOjwb zIDk?d+H~TgsZCEJ4m<;9QN^&~i;&ZjRTFJzdo>xISLG8uo)OxV0AL6{#p3%K47ECf zUy@84)_zhq*~G_7)LXXEOZY7v^T}ei4z$LPqc_rP`vNxDNaL^~Kf=OH`Bpfx$!?my z#&2cZb!Y)~GRw`5kMqcAUNbGJMBO|p^;@T(nF5f#w^6FYOkVn^j7yOhst5L{p>#NfY}jVPE9nsZ_d}hKigiixf6Nw?VUG1<=Yb~sI>gC z#vANMtA1lLkV5=qz*64^8~ngf20i#!YoS9S26-4j4SrV$dO#F!mOW%mhg9?hP|NL# zI(h3M5f0v%o265W3DG7MYu&)<4&KN;2}rpvEl&DXC?Rw3Sb_;8E@@LKGbVpRTWU-S zt}!*7&>fX?Ood4t$~oDavu4GBm@_?RQAK`3%_!3q>@>V=ivk)* zYSZ%lE61E(KZt(aK+kdi{91TokbgDCdLe$S!HO8qw)-`d4LDLvmO&V>} znjnE-l6yOuPCkdvNw^<-F(;)enO^Q2E{xUa!k_7_e!rb$UQ-7@QDD>*9q9noN8*bedSA?s|Wf5H+W-XOz$6p8XR-t**)>6 zWR~Fp4V)(%lY)$aWEa~5nj3b1lAZM3v}QX7L8Wy~rIvLK(*unj_CGlISw25BY=D$2 zl{?JzYFCq&n^}N+M``=*`yfud{5El=HQM;4JanxCG5!$0N5&3c(H)a>#6u%D+PxH) zJX5U?PJKbmrzr5AU~1-!{%+0M&yDGd7FFuN0|o8z6Yh=lssUNEbYH!v`hVc%Upr+3r4Evbee+#@DoM_Xf3$%Y{#%IkH9 zz8AdAqYKSg<=J!`HLAnNf|}gi%rV$V_x%H6^Bo$7SDXM$`QMrk zrDV{LL6fVC(OcuQ!NsV?8H$Dg2r)im?}$0m=S?qGt7q{SB!=@)mL}` z^9dY{_YOA-4&OQE-!pQwZNkb)=mZ37FCb%l*C-AA;H#UXq|Bzu_fRe`5{U?hZ`ejM zgno2m`~$>l<78{QYsCjT`*88?JrW&a~k!*%qCbzPzN>#57j2E{lYWN^zJ}Di58FT zXBADaLN=W;9XAKg_it{nq4hpo7yeviY-?KSDO6qaeoulv5+h1waX;GT(qo!-`4B~w zAdZ>G6^{Jtwyd!zT&PVQYcINIdvsAsh)HlaTseL=qiC#(Q^-A+of*y<7H!Dh-dRNT zCg&iy^!#~6UY;Nn^BOo>Kh%%$cjq2{1^zd*PLru?9qVOTvz0y$lS6;0R&PSqvD%N` za2E#a!1li;8TZDm=aktEI@=-SlNVi!JZtpwN1fEE*1Bwu-K`G&U#y6tBBPf)iDIi(WsX2~vapRM2bt#a0xg!guIqYTb$Gw3cG&aRkj`h&QH( z?5Gatxq7pi$6SwZdJ2F)K}m(h>FZM`HK7*+Mcm(b5b8z8b}y!o=-5xD$Zrl%e{_k? zAbw$bX=y-1sXvGC)n@Pc8nSPZC@WKGit{u1a9uNFd2u^nJSYTebAEp=j)Oc1%NM)) zlqUGRMe87EzBVMev~$_D3~7U&+Sxslo2*oREH^PLl_8V@|F&*o;4we{W8{2$;5iMo z&Z1#WCL%VNu2n5@E?sY~zxcD8)M%XzdS@l#Nk2{@qTZ%4qCRf`lNAOei_kLOB9JsY z;}LTy5thI6)Apo39FP?uG14buAech*-esdyMDp#;9wTfp%P5#z7gt&CZxPyTQP3P- z6u!zwXwELZB(i7z@vUFk@%iZqfb`0y`VTVokjWE9<8@t3RZwU-jhN%Wn!|9WtJd2E z__I$%5<~wK2P_y+8J(6ZGPSvqp=buo~QXy}#P-tHs*mpX^;3R_(2IiNa;@6^WyQ z%~h0c+e=~D7jx8PvKb1{8{5fAGP1?d@bSw0LVKu6g)a|z5C`}?9V@W4*$uYqMW3zt#LY&_ZrEMXsMl<&rCYw^U#+!Ie24YPxrEujwqW)|DhcHz zbl>Ba#QQ|1*;b&D*t#Y-c^whqPI#-a8#CIhr)FC~)h#e1x)SCjy?B`Piv(xZzL_5{ zqU9S*7AFn`4J~o7zT5>%e6Ch8PtK<-rOyj$Ak`+dgGMoASfyuXopfH?OU=W2DC#QpeU{I3=)!1~+hfzM`AC4DYiYL1kF+R4||8us6aR3Q(g@-GY_ z6J~EFcTslnXQWO=b-@k1A!H%lcM3v`AGWC<;K63IQI7Xpb;(3Jq#%T}&XYi>$m;i= zKUT#~4b6U%kk!dn|n2aIK*h z_1p3OpHa<@~HTnzRmspp8ev5WFL`54@55-#ze#o zm?#l7I_`i$K9i(@O49F=;lUZ@VvOk`Y&_6mz=nG}fqgW+`qqewp7`4u>S%7=%Vi;c zZ}-6Y8-5$m;y1%JF(jUrd-jR!sW7`wtMI3GtlE?Vona3;nQX2N84KD)9q@CXtb@hB z#>C&$>kB%9JHEV9S)pL8Z?f_UI84IC`=fU1Ga-G6GxStQ2ZvaipKyH{&KhcI6w!l7P`$@G~HDp>PB198Y+x}$2NHr(*kTIU6;;L8ZCG5-Dn0LdTsM0zTudg+-c$` zGXfjTt*BAfesEpLtbDh0AK@`KUd_!2*}!RZ^X?+Ue3(=)2t;ix1K}!`tMOw2W z{j~hY1Ylm~)_u<8@hFdq!;C7I{hs#4cAkJ6Nhv9@wQJbPrn2u3s-t8|+mbWT$`9H6 z7Urs%k6(^&jO&V4!ZZz%bI^=!Q44`0!67;0p0kKGpR`kzpWoyx!lnNyrCzsPp3H$J zkk2uS#-c4~x)SSDAHEOo7(X!I?C#||9t~yNOJc39RavBTIN!UWPUUItW1MOVO6d)w-s`4j%9wmTBx6FM>&$m{i#DOK;^F%!#!Bb6=P| z7S@6w%OC9P$@vzv8ZD3m`sGZzkgb~qe0V_QQ8+xlUnBTs;Bni_dqD>-e=)S85Q#Q zFLMhB{8LtmHl>Vwhx8rQR_@7jGZ&k1cr%Y}k)rp8>Pd)Z!pxKY%dPLAzzdUeHJxge z2^PsHfkh+lS0bTz5zXsUV&%`xs6}nqG<6KW(1pG4Jp`Ezn92=n$AiHv%@wFe?v41x zm=khKrEd9 zyl_=W!}fg10?MQ6@WbU~9%-#9rgAZ`fXH)p&EPKUBQCPOb@0pKd1Kn=K zigoFNNz>Q5fT2kihvs`WJ~-_`eaYqhj+%?3B2;X@?ELaoKZ>O@{0WTPWmYcF#V=RV zk3lPv%S*>=UNS2+JY*O%dgSg;z?3&1tQ}udM3g1ghl7Q(E+peB>QAQi;zS(uMMBlc zDC-ufRqgb-6cL64j=B6Kn#aJadLJlc=M2ne?a{^4eLlz!2j5H>QA9lNLm)a4?Q4-7 zPXg#z@3ZJh#TYYJ$ck`^m;jH+xsJOiZn+WcHA#d+;XAGV#Fp0qS{`#iGa75B^fyj3 z&4c;-C97c%5W6Qa)NG+5+tx0syY>S(R{PERXBW-I?tk!)Hif#lWq2pa`yC`W$Tlm7 z#jqQy(0PyIFh@@|(SXp}kf=*9U=vt<@qkMXSwsT>t4ly)N?bIppJe?aRhO0N5>_|e z@IHQ!*x`EHfcLA9s=9mdvk%?F`1eXTFMU=>J_@Hu)^bnl+yXZjYb%j6ZZ00=Y)8b1 ze#+)<&Fan19NBfKOVc)^k~LJzUTEHHfUf3nO-tZGThdpuo&(w06f-?o9tHLM#YG7x zYLiJX7M-}xqy!pPWbi{KzlWF?fY1Bx;Zorjq?oe1V%_`xNIxRkohx+|E1csCzyA_B z!R#sH+h7=Lc17(#cz3w{Ubx2>1$X~ATaHg)x7qov8-uiuqOR-D^-iknn_o6EHIw_`bi=d(yYcQ#omM*$pdYF_;@N@m~W~uxSzQ2 zCTXZUY-C2nkc;k?nhwlJS+Nd%mPx`%C?bMHIuHgjujTP}rYfUu>onpJg=G_iBhz#7 z3j(Q#3O+Fx5T^jRH!SAdO(^#LF#iv--i@hw2M7Y5WEtMNo+JTC>Lm zKP=(HiCTT^7bV1>L~!ffmm;4#@3cd14$_**wuRd0$==-jBh)h4yw5v#FEaR40L{yt z2_TwoU%}Fs;Y*`hRGFg(8^FjXY;1_-P2gfuQWE%ipFq?bA&9~WTkc5G?rd(m$ETpglkXUXkn9${EM7&Y*i1f1=xY~)jD@|u1%aKDXw?4N+R*$r6q6}435pdMqaPz=T0cHv?)-2R z9`nL9p~P^!{iOVyP4^xj=^PgsE*2r!Q|iP*=JTX6!L=I`g>n`Wem-_Dz*zPxXIn#E&!!1(FNf4Nm z48C?8V-6PugC1stLx67&S)7i`ytj~`{4=HMmIJ?{4&P>|)@om5I`r76z0#vGP)9D!4~!hee|N{ioTy8#|@mHKbHo2e0yL+NvGm#J}mA6%S7H%4luf zZ|eQ6lO=bkgIPxaF>G3O^u38-*xxTmK8ifYThuz#Gb={+lzpwA5^UqCQ%7bV8!$ z3bkQyvjHJv$iB^LNnDK;eTj@4{!JXa5&QM4&T?y<<4v2HCP2aFrTqFdgrkG;RJZoui~u z<>1{VB7r?J+YDWouIc;E7H^by_+`?@cl#%6z6aa(y*hN)r-g$0!kTCW~QG=p&!BQg1op$dMtb6Y`MDNE!Au%%2M$kni<%#vyz7Resnq zeJl*+cjMrG2{$2d;r(se7so#$;oioCsis(`$XSD&1})dLA-4kL1N_e01sC?&b|qNuSxaD`f*VDeZcY4=^8Ozg=U}e!Jsp zwi&C~CE$OpvvOd3Xm@9cSYNI#|MvFxp97%I2j3|Wx=bI>b=ng?`S&b*z2Kwp-x|xua*=Zk`&;mS5#77q+2wEX=Z9XzX;AZJXMqY6_cg+o4q6 zfyDdR{F2N5w$w1njeE`0JW&8ru)PjqKfFjXu3Af2**ZF}dC^wc{mbe-eOuBhrGtt% zTPOs&tL{q%=uyD%KU1U^<6~My>WIQ;_`A+Hn;p?b9j^} zFk{A_hPCh0@6`RN0^)#rM16rQIC(W}XJ?Jbe+B^5+i>^W$>ETZekw zYNrFa{P800(w3A=z{2+*M^Z9*;Y;2?KRk*#3iZi0U9ww$Wlo~I`HR%ciijRlb^&Vj zG|4mwk!cx__)%ts&9%~%SLz%#TwzPmPf5@B70NfM=~XpmjsWIilZD>EyK((`LgAu? zk`%#M%bMS?!4QJ?DAro1D}Cz8CKgutT}p104XmWAz{WG&WP&BbWV;3si(U^wy~D}c z!I%4*_`E`_gn0)g*i0t&q#NebT-fWJltXAS)V%@ zmf4)mZE9rI(yn7$Uu;dU1PmJ$p~s%rgRfL6gq?GSR1s#m%mcX7Ln|yTAe)n%hl?R| zJnmH%NOeO42p2#4F_aMoiH-Zt;M=Jcj;gd1e@S#E~$^vPhfV_P1|A z$Fx4+(Dj9@ksABTgumbM=ub*oyC+FU>613~0)wkBt{vNf^LE7J3+#7y<_{`Y_q_T|(rRp`zD6jVkblmH?yeEgm5kg6 zYp4KMe9XXn%U)*@rLiY|uo~%fZ~=8cqZ1oFCkWXz>C&x-2IRS?G%vMJ1~i)Hf6d?h zRk)At`(X#uVKHbN@o_H<@3bP_zyP*?n}h$+YmkLW-W0VSjY3hq`%&5|-wW?{q+}<{ z%Ye(o>y{mTKbRX$S36td$DrX5f5If~jJ5}!f~(0cE;&RmWH0t+u!ii*S(f#14Hku` zYFan7di$rOyGL0}c)_0EO(cW|UD8+M$v|lOg$h{?Vw>*uGX2H@W@vOvugcD@J7c83 zr)iWO6UsAfnhUJ)=uO*rG>Y!?HFj;LUT4;sPva3EBdOY-hrOrK63vraT&QySRXyu| zwUx8~{z0QM>s-$8r-j0aB|Ib3Hv4g<$9X>_=7gi$kw8%5aFISzUzR zdz5pgGkh7H!I?pP+(__t3iCaArhB5RJ<)%^k)W)Opk-V!s`q+4W!)Zt75bbUaDGF! zTpnMNFNe?Wc0#Ufrolp!Z=p#oA6JgJ7|~zq&M$PA+mUH^@U=&%MQN)&9rt%7&U|e{ zxBglA6Ug}ov^X$rM_oGeR6}gaGQo6v@UYVG`n7bV^`U7%Gu0#im*`W>-ZgawN~7i- z3(Rrpf$8zXn}*o#wZNR-RO;uvr&t{<4868~ zA;Y9&@wvQeo2iC@SK7pH3;JtS30@c0X=ARL1@MPkV<|UohWWe*(~h#vcVw~F-7*~_ zSxB1&Q3qPOE`~ zArelguyASKqm~5-sbRWdd3M)@kOJ^ec?XPb8F2s4L;T*;jZ&^w+ugO;ajz>M!bU!*JCiVb z<@T$MkV>x1=|J6dv8%9b$EmFcGmy{amsb$ywA+9I-Iuqd0tt0fJ-c_=bvt)X>UXh| zUY$h`RdiOB9=6p)iN0;gn2?1OaD1XZ4?Lc~^-uBR8lL^OAex&Aok7h%->*!LzGU{` zN}*xVjEuoPawDm#9f{tG1w|ENpf|>C7pKs}fP2Mx^Vr<{v81B{6TAA1yvcmqKpy=@ za9Bhm;8iWE+y<)#WaKe$=!wk8=>8?^*ZirQAs-gHBOQ$ij$+UJOQ+z@cQ9)W z4t@g@=+Iv3V`1`Mf?=pQliPgf;IJxhC!Npiz>GCL$EcU^ta>Z=DJ1`R3dxnjR2@Up zz;!Hl*^diL+fuXKn%?Pq+-yr&4O5+2OXcgJE9$^6SKPrbf4P(-Ff?@c`vG9p7dZ+>;1`DE5cWSh>N_{+9t|-^>v`&4qadB9&$`K$ADAB>W{*)N8snCj`swV z(*Z%mTcltn7aX2><5}p=gS+9@pI5zwF%jTTTmGCF3{Ux`7kXcJx^v5SC_zQ}*f$=3 z@3x8G4@G%dZYZZwQZjCS;Ri9F8lCN~4nH+$SVeKk7uq`{^>3N^d_-|%J4%+&Ul_bu zo}6;SnIKjAu`BLJKiu` zwY#qCih>SAJ&vf8l54$cXEwas#7qLcHT81T0?wNa{XbNFcRXAF8*hyowSwB4qAE3O z)QH+ut0JBQavT=li?&pZib#O!7MCe4g_> z@AXhq@cKWQ=c@oc;CbEgU{ID@FF;LG%zZ-4r0qxd?A(wsb;z1CKX@`xT@#6~t;~!l zVXho>$`c|qO@!9{>y7`c6&JV9z@dF|A-x3PwL+I8KV0N4c<2=7<1nOq^1VH zfBZzf2X_th>ODy0bRHHF2BM@|kKE#(&1o zc}>K>iNbf-60CGko3=I%KuwR!mF}rns8}etQP8+)&Nq4P_kDgEL?&Jn`FsI7+n6u)_$J3*CPhbmbw;$pG;_B z4b&F&LjkzVUd*TW0`QW4usceD!tjuaG*{%z3kwYIt%l(B5t9iy(t;+)OG za?c1bgguXAbC?$sF*X(&a)vCC__tP&BWoe{W}oM=uCU3NcyTh!ySa0Ef?0(xWtF{= zMm4cg?fH<(#Ozuo1laNk#KyLS&CwdCPz3DdvoWil8|B#C!NB4!St7(i>yTht{zxT^ zI9WQr@@W**f47~%V4nmdP(hj#5um}*$;wXcSb1~!Et}-GmEV4P4emWI(l?amQlftXMmn zXKj7WG?LuvXE$xXr54_MnF+Lq1ff#w0HFyRfLEt{vKz{t zlrh%68t1;=$2Z~TaB$m2*ySlz@@RW5ubfpvlh=G;`;xyeazR?mvi|DepG)n?55eH+ zorJup3=zeAzLO`N#Pj#;LLU79iO40nDV<&a4HW92{C8IDI&TBfcC0-3g;C?MT@*7i zxkr-EN1EDeZt#sUv}D-aU~(GhZ6d`4(g~~<9Jyy~g~ZbNSAv0mTof92_8*`e%*_qU z6`Aw&5B(d_m#tB}N@|i3IJ~>`PK_&!G^cv^NP|bCV}&MsMba#=BQoG(^Y`UK;Gqw$ zeB=IYonC_pK2sW0b*+9vi2f42H5lC^q3BVn*a{7Q(^O=IZwj#xSXv8vSv8fj&=FElpPsCc4+riH- z7*m)H!T~JD8%usps-p5C6TLnB=UBWRKqV)9@m65+GPJ|ghY22jCf0t;$ z0`<3LcmJq&#@$qB77yoW$Z&}s^T9?8?j$2M71xe~owqlsF3K00-Kn(eXm5rWx`7O~ z=Crrp2UK-}bauY;Y1iU2S0sL2_4Z&0GpMZHHy5Hx&pSz!=_nH#%mRLd{VFVbZ?0D{ zn~+Q_OPx?0k%8w7Is9iS{B6@*u)O^=Jos80uL7Cex31@5qRQZe)~z)TO+ZRY<@g>X z24T;;wY3VL>FDSzxCi}4ilpWg|D-NgBALi;$}J0|JzR;h=mj_h+{-sY4XBO#QKwyX zb&|e%mP@;Ci*cpsp?rm@b-MK(*~Vk;y)4KBwOv7I(!!gdMy*{!}*-b zKgTqeubjr7K?7|mRarN0bFm-(>|>Gj_+6F_^zX$Mhu)V?C_GR4ZA^XWT9Zfn9Sq_5 zw*2l?by>JPqAj>V!Y>s{;7^O=hb>4GKW4Y$!Wo1jf*L0~b-;Lbt(hG!znd{X}f)A3+%V4wD`}w6TweQ^H0*AUvbg zoA0`d+8noBZ#*-Q9y=4as_>vNBDQe6JU6f#hyxy7W(B7Af@$;!x6_R)sO*+=3@*DTREi%yyl@LkF#-PY z2lp)>(_HM^&1nf5T}@@vDXKw2j^=!x|5j&+zTCR`mJg#E9<>F~i}^1%T9Im;KA)!p zK%8PI(z`&DS|lVFzOqk{f1CINFwVG+3-Wioi%D^tjUsKjDsVH0%ht@;7`}@W1Z!8O(WF`y5>d#Xy`rHXpr{>=C%1SB0MeLaw2Aml5ht%w z<%{-L3=?6vTTTi(ktx)nq%U!4ynWF*V{h%0PMP5J{!Z2*%0_p`KBF>lm(4}b71YBJ^rI@-w{20&N`~Sg9dEV!7*>b52)L2t)%eL&kLeTLxCHWs$P5z`dGCF@x zJ|ZqIB;M!gdNX}W_GPJUI(7Bbq3bft^LRupbo?)lNZZGTTmt-Yd72UNtK8cYr?ebrvdfBYe-zF*(}t~-f=L|pPi<`E4>H+_+r@`QBPeQ-^@uIPiUQJg*W8xaD@>u>=+9F-=Dm^eUg60#!u4~E zurB}w#)?M%2GB=u&Gh|98LKh+(J-5Mc=3FR0LHejC|Q7Tzt%}y`k7VW&E#qV4^Z6A zm@+`wb~7hpau4oYENkIqo9}v$_+3x^Ix2H1PVN@yPk+C-G)!#U*Ly+GbN9#`W`MX# z`DmAzT5Eem3S3Xm1sa~Cf{KFNd?h}}Id$0qmOzKouwelrhZ79 z|Mo0OzK0B|N}W0F(sNaeH@tHwcYVD5-nb>9ND)aM&K+Au@E_Budw)V1W6-OV?{c1hC8$|An&><-Kfx3tIY+bliHoye>6Uak>!F8ZU%wOVm?)?NULU7+#)%Po@OodTegT=8)Q^|KWpB%~c3ku;5 zcE*LJT|18ofV#obw0}8t*E_m6TVw{0#Y~ zbIY5$6nxgKI+sk#VQXszQ(Lb!Iq}}7&_)B}`y$FKC^d(j{zi^b&t#OBM;u}$(()yl zMYN*FP%Spx`q8DGdseU#Nt?Ws+Cx8+^369)|CkqEDU_WaX~iB$oY)VBPAfV*eFOAp zqVoWB-!f>>AOJpCI#5SeIEv;fICWf<-8Cib=-BGi_UV6k1_G|-6*JN;w>jus=b0!( zoCkGydXfrfA2QCjw&hZvM`ehMT4`aex|@RHytv&X%MPsW(le#<&dnn{)|YdeBYb3VJ%4&}o=#bzd<+VnY(x1GVUDR1JEhOIR< zXQ|Hlp9d=)xPgCAzZAixTGDVX!4-Ffu3YHUiG;$Q9H zm>Vi5<}pmym0+?FgEZ#>50MFC)GrJYAAFw%pF2m3&|GN!3oP%klF#4nJg*zIPNcu^ z=L+-wdDnHmzE_}}i@#jBDB(0!I*GA^vnOYbl<`jT0JI|!_EimCdRb4h?dmVLv0s^% z?q*L=sz>HAdbod;U(ouLMwnd>b^a0)=@>~+Xfqp>=jGOS8b6tKUJo;NCTbM{SBeK?AS-8tP61~z z0?_gb`3A#8%LbHj#?U$)2iy|xuX=VR6~wfJF;ZDFtN=pJi+p0S1TIaTQ_`vBN=-i6 zT*ky!(I2fJUE7Iu1^)~QD!e~`6Vt|Ldcp?&21WRtQTw%OET)h010;cj%>PIN0`1bn z#~DD<_#f_>H#?y^Zfj%-t$%KJW;SMXuuUClGK$quu>BI5dNl{85D3)P6tixvaR!OP z7W*g4LU!|=p&8G>ukC1f%9tsVzp80MXc<-pv)BZ?I4z(j&V!Gs)0_0>BAd^>QYCdF zA;w-g^{}dgVDeJNvokUP8&bkquQc9Ck2}weIo>FsIjHf9Cw_AgERb)KaEl((q-0I_FlFt0 zaw0@>)_#w{q_pyT3Za@hcrE&N0Z=6wwV@LCb2r!`n(XQyb-e`ZM~8-L9UYUJMtUa& zu?XHH*6A_&%nY1E`p6CDDlgeX^4G&8QfFaNL#$mBIPzZ#3L#f7VkvME;U7tvC`yHd z+IS6L+P%|i461l!LD5mt)WR>|O2$O*FZ5+NC~+f}e_{xBUadf_`{9l(LUDN#8AS=) zi@cD5{lA#{#;-0Gm5%2Euz$<@3t8FJ$VJ2>t(x^L;D3H;&8wKEz7(poci$Cn{#vJJ z`O|UXAI3B}$Di)%0B&cOAGV$hfU_{t-@AP2&k`JC$VcE4C*<3!lkY=sq4%-y`ukqH z%WG^t5y6rNC*T!ai=4X#K8wVu%m2NaX6O~O{=6()nNA?}L+Zkld+JD)^gvNy)&PBV zCMUb8Gt_zo1CSMk(# zvZ~MdsAHK#B{LLHbEK6R1sX9#Gyerf?5^$@Dt~LLwROAcnxLDf(Ay4)8ymlCau@E6 zW&q225H%jh#L|w3#5L~w&IYE;jwoZI9vh*h2<~*n0#|VjR*DvLJ=Z04xZ!qeD^t`% zmZ505j}8$z1C^m!S5W3H^<1X0?0R+0t}x@rVC}W#WdQU$*WG^fd+Gu0@MhlO#ZbV3 zf`q-8UO;MQ)5<~qu1JBoZrV%;zE8yHTz_BgYVe)W`kFT7AHg?qQvU&x;s0S0BsQbi zj}#%()fJWN_?DBnh<|=n;?SPbkZ*&DWFznvdA89#(TP?Q+J9Ul24E#=1{v)RL6*u< zR!AYwwmAVq z9j^E!SuT1>Aayylv9PeP)^yJbc$qtVizN?iP)E17l620ryZ#YXc1jjmK5$$yLDy;aB*XG-#3iwA8gZINC!WBBYskwP`sA6lm z^JPFMgb?L(b{(bTn#muRR6+r^Z^8aqmGW+6PNUw1~$3X)Zf4TlrZ%*crAtu4hTAZyU2;p zB)rBpB52~1u+Ere2It5S^N9hIf%9>2m&Oq1AhpFpX)+d0U?-B!pI`pfm9zN9n5k0p zL6@P?T0~n|Ksw>p7EDc(Oc1!{z{&P!7vAYWMgWy>_v+bd{Q%U8Oqj>W#_Z=sQ%f}+ zWT}}I@gus}CG$pN`1Y)x%iPVz8A3H#Md2QQkz9dt=!R@&@xvn9XI|_I#g6^($FYHH zd;SaLh8MkOv+E+yE|~kno^0M-2nq^P$q93$tlx;e3>0ORFco3tY^uW(H^KI0o8hgG zq=6rV`C;JVfWneH{8Tm&ee}+{evKWzj@@l^JAS~x-t~g@(y>y33mSeEn4BMs`0D+s z#&3U4*A&1VhGre!68J44-djJbSr(*`A9ho`6jl-iCiJ$mdCi-xi(joMSu$FOfxcsq z3RTy_QM7uXqnifetEm@wr|{tCra8=i>`?Gq#3P!@9Yw^UtbFj5*m~?Ob5e^IQkt>o zd`y(QW0aWaJ0+llaECg~Ef65Jr_xn9Cv>s)%ZJf%*EU-@zTyw%RDMQ4AMBTcgz-(l(g;w+REA`+q!85_s|`>UrbqVfJOP2ImBqb&)Vr&4(q}+um^7!D?%liQ>d) zD%V3&iY%$;wfx%8>TkYn_${TXX%>3pqVqQV09kFeD{MwDay-9D(e84^-|T$nxSFO> zOomo(0bp(8LVmfwkth9cP(M`tZ%`+SuQv-|@abbKbTz+H`MYFgr;i%kd!j+d5mO`D zPBJF)DR_jImqUqZU3=+08*l2^+>UN-@AS@XJYJbBC|D;FVFDQ1H#WoO4l zwOAs^gA4!QWL*1=E1B}WtMnw`yu}mLy>n8;2E5pkD64f_Y(j8cY#jqbFn#(Xb@UW~ zpKMHyh!Up1gHCm|YMkzB${0T2mgWijq0p4zo%JYIpO|XD8pJ^Z9z&1a1|BH{x{~tc zoy^R3J8JEZi=^Nr?(&D2H`zc_qi@4c%%cx^)782tC})TBJH-WG(OQOPa+ePXE{T(i zg9T{2w&H9nuxv`v7l_k-0>-BP4E+t&=?NnebLaTH06PZ64n*Xsv9-jWaOXLV_9 zrcdRTr2@MIE1-1#k0rX>9$DtYf`Y*2_1uZa4{{@pYYgs`E*W@6lZ~ixRvt~)B`vC( z_8`{1k)6eESznaQnYiUX;bth7rcL&C2=VK{Sp@wY%($AK#JY%}U)F|fnSgtrc&)lz zK3k0{-Oqjj%li30<*O36+zizz|Lfr>4o%Dl%Bbedx=Hcmf&xzKhQRBs5p7bvtq?{Y z#eCR$9JO6swK()7P!X5k{!S@y`>1=-ejtlKazpm}*{SR_GKvg1@0Wk3XV#}ni-oQf zab~NkjbW|C3n2mfdG{b3^&SUfzLzsHT$Zh!A@oXntv}agb0)e3yNU}#PWH02m3&24 zoic+VxsT@77CE^<41J3BwNkT>6ViGGGaT)X$`E4N=O zE`DF0LMkIg0DWJL++1G7Q2K7d#p|u3v8Q<%N$bpyrRw4 zjDy{m0dp%hm@qOXLp2_V`Z~~a6nUj{S=>*b16a`5|5`};8qC9AtdfL-A zWNjQKHvu}Uf$2)_oWG*<^<&Xfx7q^N1#G60jh?RlY$X8GeJ!z$)9Tx4TcaOzBi{?z z>Y_-MiVe=zFB|{g!6!o^EEUyT^~F!N4#d4b@wv9vwd55x`B`Vn(3x>>yM$d z|KH3sFW`MT6=D4U3c($U?$mFL@tOA_e!;oxjzZ?OA>`)GZ{*BT9$<46IQ$`LDH=5{ z?@;X>lX2Me!=`#N**+L1hccj0oSyibee`Xkqk2Pg&{w7QZYj^t=_NCwZfP=FBhpF= zUqb}(Qp0N{rZcwz865{(qjQPYEW~ujc)_vwsGwX=b8sKyZ7)Z}_jSjs?`upGZMM}U z)f|pSSGSg5x^GlQXDC~(8#WhMl=#lKnpOeACki-$RqYzm5xWmqOxSc<_$w)mTkf&} zWkX#PjOro%!k*-v8#H;gPF{|^4}Y(0x9KQl}>fmzpuNd^pc!G*r)EjQV9_Ki0N>Um~=ee;mr+|R1cb1Q#t2CJHn z0SNSy%&Mo64ERrc12uQKxj@5?jM)CcY3c3%|)Tu5p_DRV*R&0|^`0+r%h4tY{eN?ZKhj<9J{@U>I$mgk#+r{eU=^g1t+uh8R1|se% zzF37G+}0y?&DD@ivVV#wVR?Vo+_3KB5vLfn8x0oKjKJzR3dGds!03r>5IGf?qsXbR z#v+t-iz-30+HCK{dHFQYc^4VwJRipIyxe8$oL($xt{@ZoL$NaLhf>W;XWuaXwcczj zZi}P1wsM56Bh#&fe%pFo81*2ZiwjR;`mp!1aq<<0@uCu&-O-IuZnmA10qUPW3@oQ| zKeDJ)!A6)-A!Zf-yvg%moiZm)i{SW%X9T-4@0qb2H3RihvnGdb)q@shj)76dLT9iC zvxAAF}%W~0ez zCCF{qp-jI>VXzv%*nlWv45(a=V5=lJj!(E^OtHFQy&Ywr?-rX1dTfhg1}!LGCIzi9 zm&X<9gT(j;C?4lKPgRVf(qu{L`3qA$a&7`o|7h@jrtSQ+q4umT*_Vo@jMf%!j_ z%?8lyey4uOKq&qSAGIqa@&z7ep6pt^X>I`dOiw$1Z`Ksd6b)nce=q-`-D8e^(umzB z;nuq1L2o>w^?AkJNI@3^ zx}p1T&!81|9;-ESE$qnrTWp0;WKHO=da6BvSbc12(2CMm;9n^YlgS`vmgR$Wu__?$GOwF$8P6}+h(KT{$6Em4%D7w=s)8Z zq#34yU8o=C|HN;;HM7)MZs2`+7$rQp&0T-G@e81#j3O1Dp`qbM@e08Ebo(13w81ly8T{8oni4jv-)~L$uW*0$E@8Uz1{No~I)-jQ?VrXiO=2qy z(?H!e1ee?64Jn z+IC=4@w}VAtNSE(#PjPMxwiNudnX<_4~Nn{pykb-OK|>Y&Bs3QgeUV01T~3SyI3zj zkkjkij#1N0)0GnurC~p4OOS2l5?b^B45Bv=?sZeH6J) z#p>&U{aH<@ACGIrH%kXi#pGwG2dZtz?;C>pRuBpD^fOjbR11SQpM9J+z#u6+1iDNf zJ&uN^$tP2--v7k1o8-p0TU^=S*$BF^!vI|)L%MIE$lj|0jh`?gHiTeL#v!S+q^w>l zA((^j^w9A+u+y}{@WDUKVp0^Ct^Fit7F)hi=fp~3xzeq1;D{JzJ(~{ug#MY1g#jzSf~l7Ak?>0$E( zI-(la)gXoIq-_&m*<2;ZV-)blLpCC|><*V^V1E|YTGvzPz7V$;e6AK7854`SD9T!RO911jgHLO62sm z(cOka5;kI;;M{5b8CSMK_$me%RCI^j!8-i*Fz5ZIqG2#&Ck|oL)2}u`G2)KT)yPyz#`!~lR zN$uI^PH7lq~d` zAl|okQ_)qmBuo%#3a$X3=ouEDt=RR&A zim_t-ZlVB82%z0hq~q~z9`5x~*>|nbO~D2OrE^caoVXVJ=BiEgb9s@Gck=--*qGI> zE*B$eNpYG}AH35v7mnQ+S1m@ZHdIzW8>8(SNpJT?&xQDi8j8H%Q>fkjbD@U*rD?h+ z!7rD8sk(lgeY|o^5m6OWK@Gi=Jbn8No*3dT+lfx-Hr=42oFWl?S#*xz z;@00sJZUrY1R!vYhLV^06XDCW=V=9svK^bg`jESX|%G6x^WcRWC*F^@^-u743f$5Fo zyg`{zrODkgFD_;#|F&haNCyP?^M_1)5iw!eT|;WyCLKB z!*6!G;c_d*zH&`V*roq{6S6NY;x$WI32-Z9?tO@lUG5<0TI6nxSDpJ;8IQLiK3J80 zUkO@~#a3=)>*uh(9cP}hzcZlUm+=@&%8=; ze4{*q9okITkmx;Y>VzedCAp%>wprZhY)4u9$1| zLi~;MzVKr+Gn}mb(0=*^Fmb*Le(H5E*7*rsrW;gx;?t;2>8``tA=q#uH9`wl{YPcr zNjmPhu<*l+F}BiXwH4<7s7pOIQ8ww34J|o}CO>ydG6fxVAXB-S>>L~M4rE)Ye6f_? zdZ(TCjm??lit^7R3U5XL-5`8tUlaywj8)*(1!GkdR{PSH?&A^^4z{UWPw3g^R+tzS zJVWy~>L8J4QL>xm?S~>KP5r=`6G+SiddI5KwiVSoATx{jF0|5FM!xn+I2kNKZoyYZUXLn zjWpZg&HdTmNpBS$C#*UJGBua};sL2~{YweJEPm$-@l;&90%h!qf#aa`?n+9P8>n+y zF@Re@-}d)_k8L|GcQPI4-KOLDm%%ATdUmt`+!zkVy{polk^M9un6qWTb_^(9C9)g+ z>9SY0dsO6%9mD>fSCMD)PIU*aAECSm1 z<0o|mCs^lTmlr@Myn@tQlvW0ZL~6RhVrXOWLiq)FUN?ueKe8=;g0uE4JaTs1p@3Ta zt~vp-eA$4rRkbes)}-T0^c#HcLVZOtZ^!o3ByjUj0xR%afZuP?8+FoxJ6NHr7wbAq zk$v+w)A-=dJdl2H8k#llaBu-P3z^~u>dUPptD*VF{EHb1^;{HV{cv8bo0)-KDWw%5Vh@5$R`EGZ^T|iV0Suxc$zNsrYkHaH6VsP+e07U4>86Y=20*I?)lt`=>IaJ zf!1gZjj^7i_y6>@%^NQ2s@Q~Fc}lw6e)yE6otEo~nzbZB5?7%7gqd$z>%8;*i2C$`yh7 zVMLgD6H218O7ZWs-q^-8i8nn`9;8!P^f~)OalhrI4h5!I<%dOz=Lrv_yD4=IV5aZ}z|=7dot(^p4xZ zv99=f7nP={wR;c`#Z^4?PTd05DP}sq*;B1WoO6W}6VPN?J$3pbjEP_<@;yn<5X-E< z9VhKGO%CF;LnC!fg`3vpUEkxdBKg&cfvIBug`v&ukwu&7sXrCxB~+S|J4p%p^M_KK zx`w!)yY!R6YoGp_v#tYFXzU|NgrkEhfZ@uUNkKK6Tu zO_6>)x+D_9T=tt}epxo&_fOKZCrru0Kzo%hSSMaqszesQ=1~(lp+n}|evYyqT?Rmx zq6v4MFj){YBIspx%Q&vu*78A|{irfK)JwVYh{6M;T}N7bt{eL~4AW3WaLEeAkyB{% z$bRZC;7sH5dz%Z`M%$BkLy%@_Uc{Ns9TGcrBdbIN;l0K2Nw?^kbTsr9<=&JeVGs1x zG<61TcNyxYJZ+tGh7Nxye1U`>?Z1Xhr*5+VC}Nx4UK_va0CNt0Lp?21fe@o*e@`Fw zPuda@T(dTQ9;bw>Spn=#YkV}~eb|nfe}GLV=)=vEn^KV{k9s^B+PXQNBTYRgT7yJt zoG-zeU!%`lEabF%!+-KkP3b+MzzXOQkR|Xy zVc#xS)Bg#=W>NDExnkQdK(4<_L6&KO3AQzpOV4@cqAcRDRI5+R5n+0&?4=_zQPlB0 z5o&d1xgG=oo^>hEUkp7R-O!w@fAHFJ_pER3tj7j60iB$eT3GX1b|+5%j>T5<^M(mv z2CWOmYYiFswLW_71&F-+Xn;PV!(5@lXoTG6cAu8iuN43ZBin8T#P_Z%_GymBk>XCO z8Hi3aou$IZ$rI06n&uQ?`SU+-g0i(Xwn;YS?l=A0`_w|~U7So(%;@H()PQgUL}&6c zNN9cbI{xdN-HSWLu1HzDsoQ7YE#MRrlr>kr%E3!T$gLdXE^+e-8Oaup{c!Dnd!Gi+ zr{M$uwKf)OOWUJs^oErvVb8$nGNX(6of8maLdVCpylBc&q?dNgLhO{8x>Gp2B#?&vHm-UdAS@}RY*D0x7^@Em}^;XT= z2W8gdL4&?Jf}e{DEWB1>3x!%lX8X9uQ_t5W28FwjpBpN<@a8}@aFJBWbR1BE-U=Ct z0Uy~!oYnaRpdU~Y1D1>HBse3ix5j8{UE&3lSw#%-U=|NrW=JL)=;32yLYX@zA3)x} zge2O}F8iB9%@5^PF!Cn>ig3(*?ey`-gkWh~DVo*vI5J<(#nD~kzHJVeAP%4_Gkj7Ixn0LqS&5d^;e3Bxz(li*cp00~=k`G@%X@Nsyb}mkEUMXj^u0S0kknC4 zq@sITlaxc`1C9^A0Y;{|&|3d19@-k@Lfx>>l4#iI#D7^3|2d-3;4YbR6Nu7dv)h*| zuNlY=ZoaPAGeFd3(LTBzdJw`=*# z{znyc^z~Huo2n|!}bWNvfe~>G@z!8Cy%@<#xvzh zVCA=Sv zeCSh0Yh`Oz=seZ?)w7a1XTC<#DlA8+^MYh(?=c3D-A0%R=Uz4=f4Ph{07r^|mDT7T+eJz7?_YzqHJ)`pJKNx%M9!s1dpTR2HM zm8NWFS^?uJGhb zo7&~kj~Ckpt80L;J6A-`r@Rt3bSO_E;!&a0Bj61T`wjnw}iFqs^ z!CgS$8M4v;A6>fQb_iV_Qh&`IbHrWsi*5Whc(_8jApE}Re1T5MU7CTsh;%AzZYEk< zKc#mn$i!~BOQm!#98O70&z#dQ+s}U=Yf0pNZQ!*U5ddM(;N{rqK0R7X9aGMV0YtI; zdfHxh*B{+$&9SkHx;=FZ=Eh%X>$IoTTb9B~`HBOAS~B1>s4 zo8wPiGI0K)Zy*4RUAtU(u?{6;^A#Nfgp^RGEpQDK%ltfM7xF8Mze%gP2z$9mui*o0 z7GAxJd9j|`YC~!|Xyf*(7O~zm;_PS6#v`|4wov-gswUV0ln^JrdvCX`TX98|zGtPa z$jUD89U#(gu5~6meG)ECHZZH|k64BY)Ocg5JRL9sc*pgm-B;w|`{m5HAK**)NUYEzT^tVYH8X#7A@6+#)a-asGQ`3NcR(+q@*SMkJbTfsTPd+ zunS*x%`dM{y8l$J1t;+3F2vNi2DFWnUW0Yz;Jdw>4(-1*Lbg1SfWz?=4@9IS+=7?u zm=CyTw2qt0nV9bHSXeeUdQklCS}8Z##MxwKJu$oEJGgw@7-1~-)Q!bmuYTf_c#_mO zn?J{n&$)SXo|lvM%q%}g(EM^p#E+fQ&)+ttqx`WHii%gH_q}veSV*_;fwO=B z`GFl(ax<_JLnWCeA?ZF_a?B<5>w|Zh?%eu9Xqm|POBjwgMnc6sO%44KVwZKdj2?BQ zyAE2413Bu%h3*q!9bIxeMCKdYtkA7=Z)`h=wH0jlsYic_`-2ZZcriq_s>Y62rL)HY4m%ViGKQF^FT}WmVX5tKKe~)o(NRSc+NiAih8# zRjG1Rx!!;jB*it?&_$gyFG76BFFcB2HDT(t!BBxI4Xe%$ z*2GVhdsjo-(iZaFz9ilX%xGV}y~oiXb$Xul;_iOrF!HeG>~iiMpu$D{r)@@ylgrAal-u6PxiDRHyUO7B z)Vg+3knIaFs4)6ey416f%NcQTU#`VCi(vQViI;#MMImH;PThijGhgXu*IqDE0Tz#g zW45leCyVNOff&^e?b^GfDsQ@ZLP)T|Ec|OMZ$zdf1FNy4Z^?J2H{=?A7XsD`WY9UT zPjZ#PyX(Rj+$tz)^RRz*K%)5IA4c3{a}>dexY?YKsoaRHW+LO6$LdCo+C z;6i2a?KQ&uQsE$u+?Jq|^Ak)-?Ww!h<+-i2>($d#mqq489w5DB32~M5di|CQr*@fb zRAN!f6dwcPU542aYEuK2EWTlFdwB)x?hx}i+YmWfSu^5E(t`2Ti1_tYzz>(DYB;XE zvbfmntPO*nbRE(Scc&KnR=*Bz-q_IG8OOvpkAHom;&m4@zN5U7HPed4J71+g-f3@k z0-u|u^WK0@^xnc@jt?kh? z&aWtHbQn!aaTkC9w8dUQ-uo&OfczT^#VKvbYaf{`vm_;3XqL0zoeh7w)_LrwWOMMf z88hf0PR-ToqxO?vjA5UsBixF$6s2?z;;fEtvPl`vt#qugy2n}f^8yNMeM|cWq0r83 z@JD@lJS1RpQ?r6y?rgOigGL28FJaVpXOxDu6?(q?7pErk8QNTH{InVM6{0~0G=^DR zbR5-Wpt-F_6GPMwz9mwK=DmVF-rXuO30}8bb9xZ$Kr#_mH1jeVUzRUtXRTSy%9yZ; z@*7+fiqXWJZ`~%4?l-+3Vk5McF!S!$U)^qg$`m#99pvY%$?%&m{{xK9YevYEI0h0F znNed8R9fd)wadlcXwhLk4Y*M)lAM%{}^C^6aM{qZvPtax8@;yYE z@`XMrVIqK268=NSCXLpERF}ghLkdfun#x6!C%!_FCumxz$}yoU;}lO?U;cDU0)4iD z^X}>jc5BmQme_EWOv``$T6jB*mTk1SYx#|xVU!|Pjh&j^mrVO9J^dtnJ0f$Sj$m-+ zTVM6Ru<~tE-F<~$i)llWz#h~`&Bj2;Pqd5VdhGYkXHiGU9ch=gpCXbZdi!#J-@G^^ zhcZ4O=j^%b^NZao_z#!zRk05;$qQ~(8>eUFgaUMrC_SLXh^9nFXG6f5z1xJ# zsz1p>v2AD(!k7M~U5Fy`VOJ+-QqQ^yD1tkt!74Ve%D2XyEP_9}2ad+ZZRcA`P9JGS zOl=JPno83mn$CPDCrIkyUSVS(`4%b9_P3aJ_K|$z=N6wAJNUZ&6_FaGzFI`MB{X^T znRLl+ZvJ9yP47Uc%N>QSpw6$!+rt6@k1P9vIn)B%B3aCdzI?XxmlC)#RHCJ(Ih7K0 zWBy&kZYeBKra8Rt;HerY-a5f=wsaJ{5(GU5|2s~U$iHfTtLbsci~Bv07gFl z92Q825`fD3%fd04PceUr5WH?Hvcron78bqmjy1QgXwofu-Zax_c@6Hrd|SVrX+r-8 zscyaOG7D*%_N(`MeaG+7p5kwpm;L@t$sgHpjbR0ML$z8tlI0y-{#5;km`sPj_pyE1 zd?xkpToqAuPOffHUIxstjSxrZHNU=UFy-Zx)FSDk;sKoC&E(Mowg$ib9Ni0%y~n5< zTzP?Ht^P~%;o{NWOKR(Y;K|3e`Z3}z0=*Z96_ylPI&8-yZ9o#~;Hyt=btO2qvb=7_Z+C9j$jETHi`$yfB0K9rNIc{SXm8{v4IV_bEE zzrIhJ9(R(n`pIySx#9th5N-H2Y-iKiOBYNZqyGKdw$>M&r}!9=i3K3c2-&i-)bS&5 zcwa8~VDvgQ;bd@u{pXlxipxzd;c}FmB4ZL=ta-#_;C^3uswMh(=0&HY`{fa^HBi1A zO<}YfH)LE5N6raq15GArI08O3X!HjDU{4DHbIA{9`hGj zYz%Kq<_Dk3(;?T0wi#in7X1glDYnkoYk%GlM_1%4po)U1-45VdR?>;WwoZhRr;mpU z6wM`V0VSoKeA)G>-%nr{#>UWGDU(CQX(M5dpY7n?bu6A%wj~vmzg|VV672H$mlc$D z@Pa5a=7P!4?bPgZ+^Yp8Q5{{u|~0Z^izw z!EEu2h0V3#6-IV<>vdJmMy$u9Hzis7#uF<;bswHhbiOZ29wF=YJe8}f z3S2?p?Je4O=?fn^U{vwyT6i~wACc)*Zfe6=K;3Pt{g>Gtz`0{f?7y0-szjVgT`!n)}o z|IX?cI;{Zy-wRT4!v@EmGd;Hk53hV!>T1&ve>#vqW9auHeDWgnbM{M=n-1>qQoj3o7y|;{uDqPz~4Fm~)|BSX74=Z8lfIMPwZoa#EtM%QyGa_#TfG<_3w7n3MgXgpiHt_fvxYp)v}!Q}=Fe zfh?of>tjf|m;x)$qctxY*i6A6o~V~s^7 zIo-F8+Uh;O)_=p)J4&9maS^iGR6^17`^GvN(v(u^T{6aU)_N;uXusY`5WkHYW8}SM z=N~tV(&^4h=9S-lado29n>) zrqNQeVebhh1dz*7u?56T?VDr+3S&-}Bg@>OMatHQAa3ECH32G{;zsZNw}?b8SNN`HRbCD;{Bt*#wIq zh{(P@DCepXCQfAC%?4~oy1_4*$#`-I!nkt($AYZ$)8VWu$f0r*D&CLpZx~;jVG5Ro z$d=>imCoR4dz*C6&m3@f6SA<&*SI`1VJH`=Nqy2ruW(xdR-KPc$gAdROyWNO@)L(o@eK5 z^CtY8aCuBHv;WRvIMYN3AJ6CjQnqf*9}x4nd~uGwdP&et2s(`(QB0h>O{bnv$+>tJ z3Dev;TlJvmzMw7t0D5q>gNeq+`z*9;aP}K*9)~ICq)wSL(^yTUf2(lLcSF|?X4b{bSI2kt9pn>0ItjAFa zi>R*=W!D$dtfGS)f;GsEXP>L-2j1!5n?g&t)lp<)uKZCwN7a3p>=-pC&oo}pitKKM zIVrwv?Vh|`aqp|b#d50ht?$!{I=SusCp8;Gg~)_S^tnl!l=f@w>Z0IN$KIV=G;(5( z1=Bx`U$#5{)Kfm*{EWliX!;G(C&av_&%)(Adpu$-M0C z5jY*GtVjHm=cr#hAid#KCfxi=7s+3hY~JgzR}om<3C})Gp+o3u$CNEd2<2ca<2pr2 zbLus^=nkngQIEc8Xu2H#1~n8ksBJfYl%np}vzD~dc{ayNBmMibN9T(!-x2jXXSkv?5xIJHumkxw@2fdX zEOdNQ9K%zO->MJj3jG#MhV#52beNd&fp^*NhWU#4-S|nU$EPF}>SZen(q59~e`8|A z6AxZ#T99RD3}*N!9^E;ks6;!R|Bit637(qg&T(#eoIGksD0q(FK=ueNcKZuPL7m7StZe_| za#DwEeq0m(DdpayGbb$plUzOod(?#E0+9(RXJP0hyfSvKi-}Zz&mRz7) z93Ucvp#Tx-1_fctca;TmD+UHmupT9+JbQ+{;W@?nNiT>7)p4!b67p$96tYfj;g<@; z>DHZ$$G?bolX~*ZsYZ}Fh~(78Ay)hPvI1?jvJy}7Q3?YYvMt*(Z=UiDxM%8iX(mF~ zx?g^C)e{dBLqk35h`Fd~%fP_3K+krFJAqlF+7xH3MRDpG2INiFNv+z#wzjm#BYNXL zlDY7^Y{jD?DXLGR;Af$~e`@NSnKfm?V-y}9k%Zap6HdoHO;J1C$y!A;!#V?zyuDKE z7$k8%Y%H-`(2^y?=3t9JNjpTk_A05AJ+D^{Nt` zu@!xJ_ze+Wm&=>!Gn4FrUCfBvTPgNsWC(SIdS6EYgh!T4K5iA^G7q$yGq99NoA}Y4^SkUs2d&-&vCHEP#t8m$1;(0P6<;6x*ppB^QSJef zdvH%*yWsi+7wiKmKb(E$xhAuZW^qb_0kg5SD>~?RtVn=78hzfkq+A9X+&cqqlOjb3 z(j=>ei zbsv?tHW}_|4LbcEdP*rgbg<;ln~#{^&u5Sfi}4Ow<(?m8Rei6=3;<3LY_6wqM zc56D$mBQBYUXn(~hgl$)NbB$_&+i`0TI-q^fk;Z##hI|1d)e-9ctV_FfsxYSu3Z%F ziH-!Zj%Tf*b>G?p#^mAFa%6{>2O@bTAwp~FHZ;IC=1k#RND@SUS6|zSzc>jFv63?o zV@!7V0AmyztICJVEy`?vY)WB{EK7cYHv63*Vx9Yvf8qhcHMj+T>%bXfmRXAW@haY? zOJ&a7*D9(PDcM2D9A5$UK^go_OExnSI+y_P__o2G?UnYQ(hu>oZ-fwTzLoBDn{9_Tco!sI z;pPV2CPqu&c{$Kkfn*KY+RlUZtsy^U&hyLskG>@IWxx}7$t3UIRNMqEzH?jfLZ%ybDi@LZ*<-F4bF6KG9Ynm%58eBqehwNz+ zx$NI{SsEAXYZ1bVYFDe7f5hU)l1DoqgvUt9B$7$X(jG1Ie#LnCgWvQwc7Ic#vt#}# z@&ME6;5F&cr)5*=)6q1RRs(s;OM4Sag~%k{0^#&92Cn3jKlGUwIkstJs0lK(VRa~= z={MbYjhi`aoQuMIVlV;Pi0{Yx(SfD!%XP9kQ>&)R{$Vv_5gRe!XkaCWpwMC|42$7Y zpEvdx95Y?@W6^#iXTIBojCj{xT8e{HTr7-XjK3LOr&5grgUlQCVFX~OBn1>CbJALb zq`Y|ovntG_C#BMzo@DQ7#^Y#*vhEvyd>_0q_rQmhMHxL=wO1h}rlTM^R%D39zG+I` z?mVu5VZv_WjfmLiC?4`H^rRYhgVqwymdAqUDH>9ARfB4eszP%R9F^}Y3YT7d4n)5F z9L)GnVq_BT+}q(Uh3?(FArsC(~Avu z*Pfs+fr!8+PGBlJX6EAuUwRBDj00{dr&ZTInnG$C+xasZK3OBAk=7uh5*IJx3?+Vl z<*O>x3YQ%w-WQWgFEe_mv7N;WcbMO%rqS`d#^zm&X>a3e$H#a})`YqW6X!>UQrB%qR%XCZW?mg1e+-md7J)x9UNa?uaj znQTw@q!FXW3Qu*9#A$)7ZcDkum!AAOnEOX+Og3$=LnO7 zZ|y*6^xe5`GhJ~VoUfd_hKQP^Xt1?JP#%kbN-ZGxR*J&I5Xne+kJ&e*y2cL=xTmqD z=FA}DHEEwhlt(*1r3}>=>pk38dfOd>xBAMWJ47WA3_z*<4GX|t5_uNsY8ukbv|W82 zys5#i_Td@EuD<-$`k{C(L;ScIDbR8V(V~uSr>@b21BPhC%?skx!AF363V34^fgni1NzYki-TDa=(4Kmudrt}|PJEl2nJzz@{s9F=9SPw-_4Q%hOVJ7LAR{Py zuBHek9lB$S6F99%Apm)8y@+<8wVX$lfJ&_2%N6`4dyT!0;g?%y;w@rAT4%WB#lizS zvPqqO9(^A>=OWsPL{vyp$+t&}HK$4}1^Qffl}pCTn54vd&qiTTj0;W^+;7_>^@$j% z5Cdv$)l`E{%u*m)Ku6G$s-`c#WF)YyqhAl1V<7k}Dm49O_dZKOSdRxD)#~X6g(Vx! zm~-i;Cef5=w~ESzXyKJd8H-ca`ex&OC8h#AWYho&CL}_2t z#?bpU0r~=`p4T@2f&sC-<#2>Pg@wF5j-QhEyn;hEx>{{X`eU3Q|L=o3wWUBpT5;_@ zyWQCE7W7%p#Ld0DW7o&y<9J%qM(;e`n|bSF34wIBrPRQ~$JNI4v52YM?6{TFAAgNW z?IhABO7l_Ktp_n1pp6EVdxAGIOb_}wXX_(8vmdYD)*`^E`O<`>8wcqL%oVg^cly=C zK74I`hW7lvImfmQ$=V5Mv@P8F;^HmqQeL@q4wNJeMjP?O4}>O?0ry@>|3Y#HYRLqy?P++>@If?t6t>^lBfMo17zuhv%vcatmv_z3FoV@PzjfH z569vPPfeF{b7sNZ4z?GQum7-=x%;>Dw)eRPDzL2GPHB*Il)8Qy7tPb4Xg4CyF0HJb zDF@zs8aUSv=zYK^Gu|KaE|0GvpiwvML>ts!yjlCL{eJyH`Ab|$RGMunHGtz+_AuqB zP}b$c$~_EbgsxS;9Ilxj;RS{Xu8c8EQ}r7#=17!Pt;dc0C`Nn zBHz`6v@vyi{Q%8m>sUq3Be)1&y{P-nH6-)t5TI_#l{bxc~5We6GJh?6~3`thH3SU}$6p!Yf*YlR?%s?uNr&OO^f5P4Mv&IgO`Ou7N ztC_$+9So?})@NRy4X~`iz^&jzKG42R`uS(cfFOF8_SLdb^6z%e0mW*ZHzM?_ua2Ny5xfjc| znFI0tNEtnh%`Tf7M_Gj$jK@gs;=t{?^;7d#9ti1-ysh|mv}lsPR3s`AUqe#V3<8TB zS#A(I*8MH>EcQ{XC2XyAMGTa?O{tm&A^uocgVYe%1zoSiDH>j&?$jiE1wg+D=?h%O z5Uz)G!0HYIgMbz<3O@0WgFW)@0H`2upeycJlekutF3kr|wlvuE(d=R5gz#u0_@zi``Y^-!VY(Jv}1xGd9nhLJ9(!b{Gy+8k;ABp4c?)zg9Gud zn#|dKdc&sZz@v5CKc$MW#62Br8KLV6JqkT}{76%vs0ivot@xKWPS`UPss^q(Dhx*p z(2;sV(LY^WsXv*Iwci$zJ&1h0PUAXVk?u@WE_#4xZD80wIHqx+#d^0Y&z2uOpj!Lx zkwo?cl!>3@>rR0mBjUk&Grnb7X5UD4LPX<)K^!pg@NR|%AC5>^Kmjplynzu*7oi{_2E2Z^;JV&L>Dyd0F7@Xen=WI;j$_7|$5;?G~NhdP0gfC_j(9dq(S zoETzR^xJLW8|5{%?-y09y$`WIW642h{Z*Gf?GDng2jvsp7(l0AS?c_KgzahCjq(K3 zwNFH=&Y4kxR%FX|UuGGLn=$pN!;)H9boErlqLxs*Eir<6vlBwWJ@%Y9mA01Qv7iP1 z<8kh$B-Jnv&jT)Lm)$Z#sze~C(9stJ6!^KPWa7i>?kec^MlqLp;~C|7bA-qH77&d; zBxkei!6Y~f(36Fjn)w{Z3ZfCSqP#RF4Gw)K)JXyMgf7f>bj<_;pBB+d7vr^N!DwabQ{rA`=^wH zmoUCFJEQLQA}hiW<$w6qxS~o@b85n-ots{o2eO2CE>tEebq+oaTA>a_AE3Ts zsH{uQ^P^lioEAt~8jGOba# zwZg&?zUm%l>rQIyY-e4dlO_57~-Z9*E;_!UNvqm3TtKY9qYpE-BmY z-^%78i8bOlr5nfBi1%%P4h(*(s-TxAAT4`7`I=Hi#);JW>us!Z-UFe^2MPne$w@^j zzG1~r0b7r89N%?COShinD58Z-N1Wxu%gbv`FT0=LF-sMS@8(C=uC$0=u5PHS)l+Ac z9^8CoB&vxfx7yf-T5f4i!JOU4B1##q3(kTv1JtV%_pCMHNFmC^s+2@gWk)munfma>eP|zgC z0IcPn>@G)@+rm!+C0DYtG+q!?Crl&{ya*PKA;O(WsMLH-GBzbJ;Xu%Tmc*b83eB-M zIqw3TGX7*SY)T$XMhYu#-7KdzxCmG97sf~q+kV#)5z5b(hW|`3kzUM<;HV?yc1wh( z2`ZnnUFlxmAeW=ZF1|YrE00@%B#x%1R^!E{n!^X&TL5*HdjA1Lz<1C&c5g74xb;iR z++52Fm(-GRt0zPws&DP8OSV&W&b%$ne5(Bz)1A^i@t2-td!85Hk-_dj*c#|fAo2Qz zfM_Z-G-BJ%zI;}tV1D#eOZok}wZ>a>mW-iCxEbz_sdg1)riQZ3EwaQ%0ZYk{0ow8_ zld6Z8eP3Dc!>N%$S9k~j0kq&z3qc_JyV}yO`R`m2l?|>CHs5!3n7kb!l-R$k(_AV_ zmZs`kJYQUQUp({W{9!=2p`}n?Nm2DF2Bt1l6C3&G<>{#O&U|i%MYy^z|JQ(@5xgqs z?kyWfE%!X^bTz+7%!S?^s6KFM6)c5`u$AGQl;;bst4h{u>&P#bR@$&2#|zS(HAkLo zmrRIK&qi*f9?yMdRpd>ioo*=2(=n(lKs-vC&q?v5G6*zs`q7z-{E(zv;=SB#SOlg-FSE>v`LQ=2aXG zn^h4qnx6-vF()eyXAbV01C;lTsQukvVHqjAyj7kmNjK4SWRa$n8VCR;9FO!JrH9`7 zZ3UBfQFa6?-%JeW*D9vI_)X$1(3_>#wqr^SDXll>*U*7jm5^}?^d)uOm@b9vAYGU~ zQznRqB|W+3;ACj1?#_7FnG?nJ>t#o8^An_5hD@JEx>J36Y0;{167f5#HIJaA5Ow=_ zF@t4QZ?&0MocyHLv>f>*Ly749EEg_*AzSmyjjg(oA=YfZ>3zz~-Ut##I;(>}629gCqJR?cofA=X8 z>$gzMrwCKbEl+?kstWdZDiBT4JcEd-rWYb3{1mnh+_Q=)w!F`dep8IPm6pu%HU;)E z-VfQcwVqxvj&SFX!BCsA*>11S=v8==YO}5H?oMW?4Eio#be)|lMjW#IkEj>e_YL?v zFyEpH*U9SDJw)Hi1PQlD8FQ?7%vX_ z`<>2|8V8j@KRwT^3_htvo_8d%lKArocj}v9S%E&jN7O4+6#MIV{Vkf1{l4SZ0nbUQ zE)`e}no=Q)R)(_m+in;YTrY2>;i8Jxzx1c!qR5F=2Uto-blp4QU80#9ssuN($l{cv zWQE@yF)qedUCRe<=d-NT$ej+%>KgYg?P+*}DF>oOY_Fb$o(OJ=+hvR((mXO3Hk;RZ z)}n8RkT-wXIf^-|PMLC0hb8P4*G;Ju$NNDH=!V?xKRXx%8|o>KSa2yMEPw5*_n3$nxX`l3 z)T>n6Lz2C=dB;bKfxW6wq#AKH)HNsC7OcysuW_}5)xt*w6(DL`K1%;4NRG$PHMP6G zj;ct1a08Ky}RT7z=8 zA&oAg2R*v}%i;TB?sDCd)K-t_F3t8z7jg z`zKl@yyY3&Ek&8g=hxA~7|9xK_WH8T*jv|&QbHp`8XmTHlu2i%iayR&$(b!(b z9Mjcx*V=>{{VOJ%C}RPet*B|KETYdi%n`{GVVk7d(cSS2k5$EupXP%6xazZTbX~l| zmB8#T`HW`~W0IZ_2zBqVVpyzzY){CXK+h2Ke)NSxs@U-D0_V^h_Yi*%x zqA6BIWIrk*OlT-AwktV$8_lNLlTrhSi7ZzfiB2fDdN8Zq7-1?4%Xk##-bJO`d#Cy~ zq{KQ_Oe-CESv6)qavDa{OpT-w+Uuk&tO)(0Y6Sn7v*FV3ddU6F zq9#(|Yu&mR*ZXFo%0j1+iO+A%1Ovqpe4Oa&xATe1vSBT%Sw`kN$VSg+6VlCbu{7?B z4aH^Jlqz>(AZmw%m7O=$wS7FcYDY47Kfru(urp>|%X8)eZa)g*q|W7f@gns8u^}2R zBl``tNl^>4z-O`{8VNc^;_!gyP%UBq{Lu9-B=j9Z$EqJrsb=kC|L-=1O72=%E%a3; zUa>c!szki-l#TpnK96`r8B`qVv*ppEV&o_dYerVE4~T!>HZ*W0AEjAko?O&$?JEee zNz`?X+{-fO__d<4DID3Hr}_zS$88PGnq@gDNORhXf!mOh`qW_%1eb;3}w#r z2&geMRI{%8R4Bp(gM#%_Z6Y!m?1;L#i~Pu>U2Y;G2=&XD-{U@YaKgX1%u_mO1h$8K z_>A?%jFLYi6E=f;Ght{7L_Z`IfdeCMZG-A8NK(^Ev`oKoVA)M+v0S})xp)cx{BT-(!!g`SD274Yl9&Ljt}B24y@sH1AgQol$@4#vd3s(w2K zf|Bu9FucN$C9(+oB92>0>Fk!l^$?r!bMLJF*t2YKNzRc0}t`iu#Uv|oP{LW>4cFw@`l z*tpvK7GI4(Zo7?rTtz#6F!@!I>Y^2!rL5)A8y&J}&)_24-TeRzu}uiDpo#A*`1X&c zu9`tH>VKvr(lU`TMy~`A1w0jet0{}`us1e6Tq_Hg?tzrdGX=6#x%67--lp-#dNmFh z`tH{K+a;RBFgmTyRjrt(0wMfU6Xjo9o0|*-U=S_r?r%}KnxogQ6eX5`??ByiXQI8F z)!a`GRI?64)Zgg)_k7Fs-o1_-%dAFgl@=M{@w;A%T{8gH9LHv)hDfh z26XqnhNF~r9*8B_S5OKW%o$6+dFaIk-=&srFXD~HJsDklhpi&RE^@Ly)3smS)r&NT z;Fbk63+mzoe-1}1<~)af@aKX4RD!{T=+b`7s8>FGT7svMDmhg0^a?i3Lo_2!y~C?9 z@|(?$uFVIRwW>v{u-@F=bAzSHul`f9rTaW9HC*q$y$EXwPL9Ewj&3TB_PH6=3d|an z{#AF{n;mt@dX^Vr=&@ro41$8ws#TIdoNj7neBz>|g+G`o>muyf`x@5GzqWTzLlS-# z6PhKD=}yhCYq&I$$K`!O;4L%;AUH-D#EiH&IX4`degSs)-6J5hp!@N83O=xmG;m*| z4ogF|_FoE1S#gymo(y5T=;1Lx9$|k*)deBerI~sa;qPslNm5K7QhvwFMx#zT6Vmpa ztyhhp`=5-t10g*}uz3hJP^z)7Xuv13ho~%O@D;!?Y#Ec~ z95KU}ot~Btplaa(T?|sPLWo6jP-~+Tq|;CBgy;s!v%u-_z%(ntJ9L=)OZ=Uv;y@Ur zI$=js+m$j#bEO!qtiAMwrZTmTxW|LQWIr;*7Yk6GqN9|{%`?zaa4s(~h!kGYxwZAa zv~*6i;e*6|jUZO$F?x>t@H&-3fSVP?+Z$OUhThlnu_1Evm`z!v?;KkurL58W?8@E` zz#3@(WB%diba{sbx3cX0j4bnHH(S1OrNk729uwTMlJsd!&f;l3z6Us>H8f^3r7g~; z+T6#3bub~Fug?3Pz&9@Jsn5Jg?P!3?YK2_UeSM8wS*(n4HZP8*$ImTE&4j;)k)(&K z8aVo`c9|PV_mQm=BiuNvTClkZ6bi&NT%{#4#=P|N44{H+lmkY|cu0=f8*XK$-JH3s#hbskQkD zq*%dGe#aHIEzMQa8B)UETNH5Kdlws*A>6MlM(+QvpvA}MGQ`|VYH%Mf&hhMc{6YNt z*K04lCC3G<=^z?BDklyt+~bOJ!I*b4DMUm0Yj+jc49-#;+#j5F8k%g?RYTbd+#e1w zzfvQhikH=rcy5!Q#fG>y)3ZzsQ4fuK+6yFX!s_x}uFg7xj<(QKYr{}D&TVu5CX8|X z&9h_`sTXBc1__#+8ean|^*Kgt*f|DVK^i_C?x+!dDJey%F%4&pB7Y;Afx)#Lw?TDE zb-{aFQ7jnaa_Zp_B+m-JQjqf?MEC~pHNXdp|{vH!S?fs(EvtvWDv365qX&<07BTE|&inTfKds8#7n%Y*v)ZEYNTUH< z6#Bk2XNjxu!{}5QcU~dNl!wvV=IvO!rEZ2QEx{u zv)d!(MrB6WgXV#^{yU~(GGQKgXVtIU^b|%OO?`YU2$(jHT`CG^b$b;*O*{o-VP^q+ z`+32MX|!ryO5|Bn5%XFk27cGfL;wg|wkqm7)9~ z8#uululW%8utv?m%0Rij*qD5)JM!6Rs;*#nV6AZv=z^?v0gQ=yVQ`bTQ(~xrGpJ6| z&B`X&W0#$_Db3UfpJ6;!`Y@Ry3Q3ZU6T2VNw!pL^FwgHW$eE&(YF&1=k`>d0b8KH% z1uD1}dG28H&?^BLqZWqz9p(xD$xoF40&ucR>d9PQ+o!6#u6Ehg)^_FR7_U)%w}*x+ z1RDYB&*n*PY?3Zh%U*d1n8S)chQ~{Te8yG99b1wW@AZMAa=3WG26>H}<)@Z(M1OAH zV+;Cd0A(}+;Zz*XJffj>a9YFM{vjp$?<_243TW%8HZ<9^*`~rvHj1BZO+fzK2UNm% z>WL6WGgLXBSjVRS24+Brt~|)E3{EI%taHRpvaZ#WSKZ_ z-hv1k;No;;Gs2hXXdsvM^jNERe?ZQzCOF^RScSp<=$J$4+wAP$A;frpNYaxT5?-A*8Fxy%1c zJpI;5cYTP>&|pHu3;th#VMdIQo{Ofi*#NG?%{p?BToIz?D3WhCGv~~Eye*a4sqFG0 zkzRA@`nS-TJneldu>gy!cp&hX$mph2t* z2c|y15>9RcnPG+c7z0Ysz+qkcE>WY+3z$->P=a_1XjM26qU`#^StU&M^)S4hY7uyx zPcTi+%0+dN#hOCT#os(oFJ{A6EanmeJDBw#5DDWT!=LFMJZ=0+;>PnkM3U2o~I!=DB@jPCMh*!_J zW1m*I|5DYjX_iqtBGR(*-8VyUhR5({&v+s6gd!D=$V)6Cc7>T?K0H2eiB<3@jg5a4 zw3v|2eR|ax|9*OqAfmCwmel|i>qZx>KyxI}qQw-vlGyn0loE8E2g+9#q7lLSJ9lU*Q5DJEZ;xJQxu4$`aI!wGEx%>PL z!TNfxDJYr=mrtJqYF9F9vdS8DauSBb+oT70-y{^66Yqjr3EB^41BsQO;t$I$8XCX) zu0IUs8Ms7A8vqU4?!cIVJ$3zhLL7Noo!}qLOK87&#fwGt!yJ5AOJc5;lqdpuBwYRg zu1h(g25~IKCladVT>Fqz$WkH{bgGo)>FsJl;SVN=5i2wPVIilde(SfDbd3<9b#Ldi zVs;^N9z;uJt_Q>pckGBb0ghmB0O&q}c1ce9rL1m#`^86+AM=p~!F77g9{MmP|40QFVo`+x39+~MD)fb zK>4%mc`8GNTT&r4&{NC18D$_Ww?{x7V2 zql~MhR~Pd0?9VtV&&q-zj+?C8`qu@?$^f7?l{~9=x{6bcIQ5^1DAF%2b?aICQm1Ux zs#6N9#*s2WDYB3h?kZB$uJ=UdW=#h*hT<H}xvTk` zCupMRwIUUmHk%dpQDb9aY)^4tKQVMMs=B#q>2!8c# zhuv5Tv`HvIU_xH%sxK?1)u_#hlJBQbA#jY%z1W zmBbaM00Kz(5(q7YuNfRP2gfukHI|~uZ8k8HRNL-1sPyktC9VUDO|~U({v>P{^FQtc zFnQ`$pyFcxoVHT^6a}Si$&6&v`3L|9a!HTr-f9?rc{HE{n`95>`-tcMXHz`m19Rw_ zBDocbL|FGeo;z#fccr(wvR|Ohg`N!TPdc4ykjYqLVJFdv$($LtTMScTS-`0SpgOf^O$`PMJoc+5bmHj640Tql_133*g(M|R~BlP!k65Jzp80_V) zDWLnujh?nAtk5f}(y|7=(yF1n8uzeMJVf-sI%1O9FPpw3FC zcq+214ry6!{Ozk zCTfU1<`{rmFLxgWWe@^$$6K&+IoMIEvmJehv}aO$1V)EkE#4M=((45EqU(4Lz2Qq)L@*QJuHEPb%8l zIPJD5S;VUUB*BM<95T>H6H{+n-)cUw35!`QWj^*n6jB*ezNk3AJ>eR?rZ9tcI&vy} z`6M%lf1mAzxET`)6@Po{-{5k9j`0dF8!Idj42Xi;>o^Th2>@9_;r|#ifG<77z)ixAnx4@bbjZ}fHoM##T6iU2*7 zvbY_iyP{qUV0tuuiu1ho=R~=AB~57JX&9)>Qh>|;PoN)AZNi7q%E?!I@5KcJ7kcp@ z(;3tk|Da_3&+DI35_!cE0}(Zl_dm^DyEvx+y0FG5g2w;27{#Za8s7H{jOK6$Q@9# zks4D5wGH)V9&B0$oU`AZv+!t+Qewe!Z@|Sk1-t3O6Cp1HN;NEzDfK2EY`R%&v%StT z_#m#W^#(*hchC^@{|Xc(38*TlZgg~e4JcIe+s%JstP<*J&6=@flwZ;Xnv?yxb4My< zSm^-F7ucxC7swtP-B#>y)?jxa+oRZtNM}3Su}LMhu@oR!(*ZqEn+YfyluYg#6rM>W z`MUlGdlXZD?HyRT(&s7!n{8kDkWT+b*;|hWBgrA@N7;R?XyLJdrM~}ni@EAmwB0L= z+4>u`qVDn>FE`I;tx>?%?}Ay=<7nhhTv5V#f~)JE?~cvfUG2c_xw~h!r7sW;pY_ur zgEL*uPB|nRgb{W160ZP+--gJKpyfd(jX&O~JQ;x~&Yy>aB_In2LHOB8 ztON2#sbW3s<_`Lu&fAE(aj@z$T_)-5)PP&2`gaR*Y5lq#8RebK`{FQpW`X66a;ti0 zXH)=!;qOMr1Sjj;U&|1mqPgkDT64)}$-zGDBYHm@Xq;DLj;hv8MzJqIdFadDR7aaj z#ZS*2Nb6`T4*scYAWtj`JohvZQn*X^*g?DO@eXp1>CD#p#is4mq^D~uy3^nL3@9+& z5pyH5%b~QZ9kGDu=tgrc=m~N7LH49Q3<;C!|zCqSo>u zSW7sAzgB$7PtcO{Mq%&No0wxC!I>JQ%*(F5B%dC6k3xGi)#1sF>%h%JdQiN@_^9pD z`{Z{pNMHi+0_AR>hqy$6)47n(pAc}e9g|{!Y)B_TUlWUWN?V%bn4~*)DgWH(tZclO zHZ#hz)RM7L;|dw^4WsE3vR*iBy%Y>h(Ig}r@$*%dz3#}YEy`1}^0u|JBs0|?<>B$O z+moRTGG&UQDeSigG`@dCc}lBZ+~2~I}^@sREk zQZPj|((OP=K={>LaKc~xwc43P`w2ZF4jd!L3kbeOD+NaAv4ZgCL9dVshw3{`%F`C?M(o2~&NsJ@Pm@{G3xB7)jrbr@JL&N{g@_R6vtP^2PNse$QwYSQW(Cvg?odKz}; z8`o2_oHwR>yKM;G*!8R4IVB1QeTZwD5d&=4d#^+HUyel-3xq)l=6MI);NtHN3`wcP z@s(7rWM30dfMYcQTm;$yx6!+wuh9&=G5Kbcy0T(;=okf*wMPzxVSKICepCig9l!o` z2Gq5sb78DaV?3KVxSLjL{DXgowb^04B40GKF?VCPGuoALgbdh9TaO_d6LTNwxW;ng zA$=LD#4^v-SGGm{;YW9XbKs%=FV2J+24GVHH7{P2LuZe2d^{<^DVS34`Kc=7*!r3^ z-82|9X&MPM)eT3qX%cQogvG3(b3v|n!l#MLsW&;Rno~Oik9{p@4FyIRv~GREqe?{1 zm1J*Ls_^qZQ=F&e)2D6B1Yc1~P~x-QHF_6~(1?6Jge zOVbmU4=z5QEu{CMEaTn(dOVby`++vmI#q&@`lO6Ki^g{d;757QE?G|Fh0R+)40zD8A;u zS(LvaU*s^W;2o`dq;Ks63d6o6XFq-}k9sI@)c(nH`R@<)k1Py>+Vy4506nSa?J>{yo?M>Tx2pHavE7t9P|928|4 zOwjwMg#nCk08X4%Bu;nAJ%o{jVnUyFbp#?^K5nGT^&fO>9;CzG*rmdql;!~glsD!i zDDQc-wkzVPN81D-=|-*LqqJlGL9_d(+57Kh-?ew)F=7Hb_XOulDo^TFc)ov9tM9N7 zufY+(p1Az6#M9fS-Rl8W`-6=!g1F;&s+CL($Akp#Jg{c+RX7dAC8aLm-mVE#)IZSTZwm{UxvJr_<)oj3Yx=2u3Z+x8L^P=}F6Wkbrus(V4Z;R} z{c}J15B@4&|Ho5(07g~u$Cv2J)|YzlA(lu0%aK1|7(K&n+f_%NL7~gbaOo1WB9}bpZ9>4C#;-Zki_xK*hX~bNsa4eOHEB1Wg%1+ zlq>#5qXblZdldcHpTshPtGE|Q9>BA~#`>4TmwNuWX+BlbxuMGO^Du2{{ATR^nKGRt#x0tEJqnX znWq!?#sOdbpDjz|riMMT`o`We{B}&ZXr3L~=mz7l-t1j~A zlfJ%PXZ1_8i>JM2F`Y)Q3HTCPV|XE0qj&7b!MP+Z{r5rXP`j-5Hfmzc##_r)1ejO@ zM;|^0e>;fRedISEwLO$%XS>9*a(QJI=219jW+C}{RKJ5fai;mW9%_)OJ%}j=Rua3< zr;_Q5jiy+G{~_n!Wx5k(doQE&Q?b47e*9tDHMvITf<)#+Rd!oV;@JojyrW_ z-AchS+VvG4jp8pG8?`1leJJU}#=gzEikDC3f8A}ZCz0(eK@g{7UFBt?K=DPP21d-7?sPQnrFcq!}E=3*?2frSLR#e zVMSBZ8}Wp?cN3kY&Zn^T%YJL4PvRH-0!e9))**Ql%#=V~p)1~tonLlj$6^dlS%b`h zDQC4}7Xlq4t6CQt_~32}=NQ25te&8YRZCuz7HX6aVGvN;Lv5Ye$(j)57b%lD+a(gC z!LsJbxPP@V`u{Pw^~3Kq6TWU}PIuPw#SbPNdP|;OOLT?fZjG~Nq1;#6w`ZP=H)r1H z979OvUm5TxIDdKlzcd$1Fvn37C;{eVFr$ikyKmrA$e+n%01Q?Cc=!lD?D zW{?8cHG^{na^>@<0d-Xj2S2`EtQ%b$oau^%nz3E*e5$eWZ-_#@smI_IhrN~H)=ZWa zElOak+XKY{^7-s&ZP{FggR`#d&}#)Btt}g z|AD3XP_<~eQRtKLX|R!5H{=8VT8_WPfF%sExPknt$1OBD(DBz{slo7 z-kcgXafcK`$Hd2eh+_nv+8)<4h9{eq0zTz?H+tuasTvO!izcp%dN(mrEMTE-zMt~+ z+Xr*Cds~I#>%S80WW>N?u3^jE50q-HyollNZeuv-uw80NkBtsPJElkOj4r?Kx+pAp zkII6~l>aVFpw>1#eUeGG)n@bN=jJf_D7lP6dH5K5Z?nBxNUa1FxmeBnOINxl&4qKu z78oSt_cA)N)90JT)fq*@K~3az0qyCD`Lc`Mr;PcMVT-wwC@ze)Opgt>YwPz<2V}S< zH$2ndrlK9NtcW$thv1IuJpL`Z31PUQHNqfI{MoST zqy1A}tk#|On!7sf|0(XeznWUMFenJZB{V^W&=J(oL~0PYbiFhcgrM}I2vQ8?!i5l} zg^q{;L_$%x2!w9vNH^331wskEh?Gc^8UpXcx7Pa?-uY$LS!b7mhpUtM+ zzEKxoiJU-#1U8K&Q1{&3wSS5j&3u|Jh79p|&Wl8-FlD0loTH#h@wmVv1b1+$6d*1M zzg6fmY|r_r!`7w<4czr*#+7pWm{(!0RXD&){HQ9+jKg8+xlD6ehB3+hA@kggmYw9A z+x05(B?~i&iMAATUjKQKvn+}g3Pj4}uSA?hfu2MCQ_~vl@NU*fy~~A)YdDHvmwM8Q z5y&kDncypWb2OFqbtVP4dX;={cwiz2+zl)b*B3gk8&7a96G(27(Elbkm_bdGT<taNZe8pt(@+jJ-rhH$f+#+^~ zjz2Oz98UQ}2QT^qXbnVd;0#+?=#D1r=4-U;uNF&$Z2^C)ci&{|{RkUZo zb$ZVE9bbA2|I>+)__F00bRtYicU{w?SeBQG_0bY>UE-)}-i}<6Wrhav!6C1fgz)q~ zc9|Y~OBrZmci}#n&*;nBu51^2B&mFUwI?(^D78ys3&b@M-M}P)f`W=jWdLnf$Bi-&Bf#}Gc5j%*eAS1id2|!DD3R%Pnl%DAy-el2cBKS zhWgIq5z|)rytA@+PAVlD3`%sSOiqne9A*P+v+-hI=ULm3S_R9(MyyZ8(rvMHaf#Cs z2pF(XG4#U#m~?LQC!f@36@#nc&$4WX8rgU3mBO+0F&75;kBI(HT6Uki3Q&*ax7E-v z8k&qGw|~84Qo&XqswoW00F-V3EPj-Ug@{wn80WUSX@jW<}qtmqc4bQ0^Ad}SuUY7Hm zBP2P~R6r**9*x!e1_jP22_y1jJWpmPla>O%!=Ggy=n?m@UO_%8aJ07Rw`#EpZ?Gah z6)iO1%jWOCB~^%sFaj`dPC`M+zZ0A~u7;g;->pQIP>4i!lH9=Su%B+k1`}b)NNEd7 z_Q6Itnj@RN3AW3Pf#oHY0=Ft2-8;-eI&7j#(ECV)1|93o@6i1w*9OB{mN+4*9gDfX zUIqWSk5$OIt;)7826k`lP?j41*WNM~%b7Ofqz#3sTJElP!ZlRxe5xuBP%XFUdEgLB z6YMd*GPtqx!=iPm(561i!+v0HSYZ#agFcNW0-d424$(Xa?e>df&a*Goec#Fp-omE> zMxckSPRY1;Zg}>Ncj0zpJk*A2p)FP=?rhtkl}u29_^FRHyz47+(sd((e5fWOy*cdD z^W)|V6G+~tCW0zNi}fj3zDu9##NKZ9G8|i18)Gas(Lx6U2d;oOVQrU$-s|dZtT5iH z49>d#vxhP{dpJ5Z(Ik7bGfJ70(sPcu$HmVjVZD|9fLFdkpY-RXw$^wnkB6F3(c0(( zmu;Jgja7SeXjFn0 zfWq0X=_9C$-6_!Dsx**)6IAfL^7&Psi^%rI>k!MYmUV2 zjoOi`CJBk}{WQ{N9&{Is9jwpFBmtA}hS4WKAMvN}_4)g*uJ%S`S#`>qR{z}CRk1>V zJ?4bqkYv$Z@*@-7XnC+)Bt+bk;mJjq%7#*n2vb$90TlO-%gUhSHI!az9T*hg{oU`_ zMNGy%EJ*~Sc32wIW|snolw3#sTTMi9=;!X;GM-tJ#@@YGh|SH&z|l58Kc&FUY*5qh#ch-&v3tB9JKng7t#1tqae4R^n;BSRrGd{skEA znHdWDHx|66S0Bx=S?_#upoHL(wq%F`RFmca@HKZ{dqO z0(*v#^)%J1Eqxp8r5MP1AK*wmK&T^5?7HCR-nl{6aC8w>$MvD8A_G|1jEi`xVF<6C@**B^`AnB8zhs}XRVPv38z3ghHYHo~Y+iZJTl;PXeoBC4G+NiiXem33> zA31eI7haLJ6Gx{owf*RNHH+HaJrQ5m<>Tl!QqZ)7#MEfF=7B7(JMlX pK=a~_KNJ1+{x@!V^Z)$l9|B>1f9!*KvrZq$9RoA{GF`{;{{X7as89d^ literal 0 HcmV?d00001 diff --git a/notebooks/advanced/finn-folding-mvau.png b/notebooks/advanced/finn-folding-mvau.png new file mode 100755 index 0000000000000000000000000000000000000000..bbba00182c888b072432116a3a9eafbb1d8cec0e GIT binary patch literal 29710 zcmeIb2UJs8+bE1}R1kF(MMQ~+^frKiNK-6G69E%K7XhPmP*9{q9l=r1NJpwlQ6RL? zTjIbVB%!Gk5hPLqN+(DO_3jhIL1oa__q+FB|F?#MCAv{Go@FW{5)(Jynr|5jYoRaIe1t={zk{9`rhu-0KFrVI?rwD}t7 zJB}v|E;2D~xdi>UqV*i?3KP@CShb^vPhT^gk4%lL+Ki?(KUZU~&?-B* z{?3gzj;r&Z?(R7J(A)VSuXCpNsXd~_IjY83{kOib5K34qf4@f)_Ipe2mwEjP9awy zdrH{nE{s{AM;|o>&Q63hm3mEe_K}rlXGSV0gbqE2$hVWS1Q~RtB&=v~lel6xwVP6y zH>yGM+vJM*nJ^oDRi zQ2eIIEX)+@k4p$T!C+Z zZSvlBjl4mFPtd7B5EHuPgMfE%i#R$|DiR@B7EnYP8pRkQZ_{?IXE%p%a_uTOr}6PS z;ks}91l>g}AfQZ=@8Fz|heqDc?k!PIHG`XCYz$U_?;cVFMB_R;A)zc6sHSu^x5_2z zIUC}!aiDd*Dvgw#&}o}$!j35Q%{S_RxRq(*VfxCU&CVh9LpQp3bxny5^?@sG0)1mK zqF9o2Vu+`RN?=peJ!+Ta;inz5EnCZ+_ape`P6KL2DFu<&Tapy{mN!GGeI4}mp~Hai zlJO!!gxd~VOEa-PIigtbZHJi%vxqtOdJf}40W1lp5n^(J3;OyznK?qVBciQN-{GQP zJl|ZRj%F~%o#aO*+Yw-!Exk7eq3<#&10sm=Sn1I1JZRkoFV#s=x*xGZL4XoxC&1wj zcs9IBWfpcDaC~qJCchebVj>oz1C{3ruP&Eb+rF}sx2N4Fj8>+cJ;9|v05d{8~_}FB0 z7J87|-yY_NEb{+aXL`<75`ze}o;* z%Z7M1_Hp*86U5}tR(WrGX@oc(49?talw+o2451)wi}yC&K%8~c&G5~Vo^hK)W;TOQ znOp(iYUOp@ldmLZ-IT&!>lYQiNp)=+6i9~ts(Bs!HRFyjv+x=Bg7n8vAx-%X`fTTR zeP~N{^J6-UEw_4a3+LNv3w^KAu*J1_fh1_9xAO6=Y zw)0~1+{EiXHM9qrVUs@ake!;lm2;4iOozk;u=ii`D-GXnO37_K=sxUgrnzqY<|ddG zVKq}-?s(#y<#y#J9`c$qcc<(ls%G2OGMo5eVLd2}NV%tjfyj=lAD|^=v4P73dHBXnn^3ZEF9o`peQdeXaks1gMW9QIxO)@%oP%etQPtGm?wn@(h~hR@ z2c(DWF|r|j$2(jFZN+cXX}Pbhw1xep%4?(6HYhy5kL!73lNO(qbQ7~;%NFISsj19c zihbrc@1K!#?R(u`C}?;tSRo}|rfbEz#9+({F<%i_!N{}l@*OlnAngc+7|73_+Zoc& znJO*Eck?f@^4y5>6H=8dMZtZd6lUMbE(e=dW9? z)r}rijg{zM(g%EZaf~#gXBx%v5s$49f4(xbE|{C0LIKWy+irGf4}$p0Fd(*%OMK4o z@}+^6e7dXvo!e+O2#pFE23t#jtue$!%`LJF@0 zX`sK;w6}Uehse$Gxd4hhp}dpyD$VA5e)IknqE88l-8)#BIoS)jM%yOw7+<9Gh0Sta z#F#?@&@r~e_O+fI$R2oqPktbb9ibw#pZiIp#`R#G28Xa1OWh4liquzUl2+w1vtYiJXVZhDUK<(CAjyQ;vS{bj+s+2 zB}p*EpRrpXNSj9O^x0ayg)e$t8FU!kptsHB!iL?%U!9OfOx8OtXkDj}$|Ku0NHWBmo+rMFNsPaqN6dhcz;|kNN&K@{X z9UWi;C;A{Njw34io#%?jTMP4e@s$uX85^%%Fqn!uDZ_@q3t*>pkfWtSS1#?%e4gA< z748`F+wjpH3lQEED8%q}8(!YmnD~-2(Duv`=iSN9p2_cVXb7PPnG&q}FCY8@B`#D?{$X5C;NoBIm29MMjM|TAPY^&8rz)3t~z7b;PoVs z)jL?6Ss2ZRm`P{mimJd4_go2_dloVmDdykNI7D$)J{9%p;QSnD!Gbj0hSukTm%nD#9&9^BW^f&0JPqb??W_n8c3ZNBPAGL&-9a_$-IkU0$jWH@aeFG2xa^km(W@bP2!{OHMq z$z*nS3Bz*>w)w6azY6Y?$=_F=VNo<~Q+y*tCd-z5R?E+YNt66$@KWb%m36m+Ez0kS zj-Ab_{A9=VBKous9awNzIuE$|g`!jRYhU_=*co6KJe_Lg=~Cr5lCbIUou*RQtRn<0 zWvW$H`5xtBjbhh~&PX?UwtK{BuBB!s(hc3IFyMrVD1LuTTyhXNx>B(9eZMdGTQYo6 z>xc_J#IEvt!?d(fX2UnNI8k3|iG~%wiP{ie5q)fs^!A;I**qV(@`7WQE%D*#iSNL< zgh!^w8F=!h&>K`=0M5 zTD7KjbMiZe8++1um;7^<{D%0a0a8!y!IYKE5}S`k3#W0XW!umuQz~*qmUTE&;`5ny zg1~^`ellp+)=)4#bWkLzbI`dVs6`AmmC5{+gubvNbuSO>-bhX^E-t3K_o@7V+xhL$GaJYTaNX1WNysfULs~fP z4UMbtE7@9#8b%rySM2|5v6PfxUE~UTrs*TT=zdeV-)-2x5t&7C*}TJSZSnH<{RQbE zI`!=U!DQOQcvjKxcmLx^?EUPAj(lw7a7mW>>K{ywN`?6Z-a{do`-@0h!4pyGdKDN#OYTpAeDa2JNVW(q@UY?x9#O+N#tR zF%DBg&(P4&%Z?{?-eso(O9HO_~fYK_rkY^m&Q!28H=q_t&IudscB+quc1L>5-vK^Mvs7hve-r~ z5#+xy3}<=By;$?JUrjNBt>0y8Y@)RCz@@RiCVD@uNHF{R@#K;KDbe_avyG3MFbm}? z1-I}Kb~~OJ5MC2Hs`ICb^@Stk|5!%>%{kP8K3IxbrG6NGN94BBh;?J9qM`pHd-{Vi zpXMT%U|F^?w&r0#W(vu(!6n62s}&W5rq^uhnwuUhV>Oy6QOs;Ydp`fY>Cp?a?t?`Z z^jrBPkBvwjTnOeF+ElQiAX0)?xehWFu8PSjsW!a44Rt8QwpiIL-0x}^vMaq|=-z#oFjq|dankH5lw#zVDD8f>rq%u(K(_7E|$Te z49~o2EN-CMCh8)HF*J!o^b-%KjnZuMhpy&(_E6ZKQruH;zyAz)S5q_&^9qmtb$e^5 zXPvfvCc>5e`W-Oi+jBK7AY%N+oXnHCZ2qLl%j-Bl$=C@F<+K7JFAeAu$PTL4qcn+j z4WtmvCQ`q%OheFdhx1lzW77{xE}O>*>F-8edi(`ve9|XYxxVo6e$x9!@$b_#$5vT6 zcB@rto)DCdVsP-I#scgqn|QH86KsKl{&9lWPm|#d9H&c-^*Z7lk&UZ~R+J~0M9ap% z;57W%1w&1angG;~&CbqFySlZ`BWh}J&Pk?lsKd2E@wmM>=0}D0OYXax5+i@v9)?*WhQ|K@NlL6S`i_t6~kO$Nrl@@em}{rZ4rLK!v5 zOinEz6^T3B|T~WvG!7$9!EwA^tDyMk5Cx$ylG7D2c(cpr%`XDgE<^T`cAv8Gy#Xkce(6B%mdn|!a+dl!!@{?ChiK*}TUW<7wRQ8o#TiI% z(_UKh3aj{NMR!*xjSZ@AYG-TayoM}1skw7R3N=R^*T4U>FWfyiH#?|DrHZ5JeQlf2 zd2VK%MdQVTQLys4cbjIP6)9UQ&3w?VDHlZ<8i1lg2iJIB6N}k|3JA;mB}V+8DIq`Q zOs&7w`3#9sqxWCau`Pb2c6fAdR(aFg;ZB+Sv>UeacNZCur9v=<2CKcODg1>1*8U;v zk}NSZK6ifbQt5OfN`$MAzxkxa=eTnzn~s~A_B)9j(O~4L83AjpO|i$m-`;G>ik00@ ze{I%8?6gg)nX3r(>WES5?U|dRmj!xQHedKY3*MHDF%E`O-=CH>QDPP#+*U>OeR%bx zyAgNC@z*gR*K&pGcM`>0GV`1crJ9dE!MF`cpog`5IX!=D5A%#nkwr+D1Y#avVr`qS ze{wYbX29+~bg`S$?}w|gN$ET%XR_Z&FRLD1gJNQouTWss`0-Z`QHleb%-otJi3IIg zx;%=}0D{R<{D5&Vm~`3E%1_TAOxCtXkG9g4mA#rSnt9C@`?_=O>)>i7h=4v1589Oj z!WW63Y)pMuD=ks1w>Oc8|HX9uc*<;D=xC7^jw)RUyK>GLr;=M01eB(M8tvC-R`@G} z=P=C~i3CL9xg}|L`yoTJ-{jEgbkNZ9f!q5f?nE08Cf8388>jFlsHl4=zZq1}E3Sly zuH`$`74XSqCRG*7iei2I1MNEl&xDV@)_Qr?+~vxZJeP$1z?KVNF!?q6Lx=0rHzzZB zmqgQ@RSYhv%c-iVDV0i!tMJc^iCZn?f8#x)#YM48UTO4snUuI}LVCb7qa$U|dec;Y zV5p)4+&sSAbT8@x(co+;^*%Kr%UO3ZShu``FR_yMq7cE_x74s^%=!a?RPxxP~mG*TIl2PDgkb8ScTRv5rb zU#ef;N_9=H41e|ukB`jNBSty6Y%N&sk$Tr~(WU$zRl)TLhS!yScZQxHB&)kOg$H#s zn3`3Mdy|zeG?-5;UrDxzZxO0dRyT)lU`f}i`B?I6P=03;L;dGlUmTS14xV_j$?L=L zWUL3(l{q;NGVSrY|KJ`4-8jQ=4)mH|$HZv(XQ{dEBXy)m1Q@=BQhdSVG8KCI4S}#`p=mj?J1EfNz2j`Ul5ORdC(k z2Q(msOO9!gqpiShNApAKl5kHwj5C5RQ0brp=C?9>J7*@B zIgzgU-zU0-5>xBsx#IlxsU2W$b)B3f5uwIPp~eEC8VcN8x`>aR53!h$%C{q4ss@z< z-AlzvJ;Cl$_VU2fc!4=}LTfxUF49Ew;cO@Kb|>Y$vL!C%BdW`#Gn|@4yNH;WTkNK= z5uUvZ2+4mH9=*+t(H{epfOfR{H?hbJw=ou}@( z-?oz3<>Wy-l4i@~?(XhulpqDMvWtGxBWwApkfoGpY|yHCd3}>JLpM;5HBYoWR(-+{ z-UkrCXhPazcg1O(=E0nN6YCZ`Uy=hH+ZvZ1K-nn3ZUhT~BLv|CoPj(+Mw=80cRQa8 zwq4y;ogT0)i`;iAJL;UqXZyCOxTr(AKCi0mRKc#*=K^pi?mX5N_)y{5z@cx!k_?Bj zh`<^HGuWtfHpF^^W7j`#3`8ZOOB*H?;W^w|#tqT`vAmi2w|XeE={cS7=u@73mpS@`PPI@-*j>)}E!4EUJQQKe4eEeIzsJ zAPCAY5v6j{w*cZ&_x3tB4|QC7tbo?{u1^|1TkaEIiu){|=H^!0)OpG3Nsnj;vK5bI5<~11?LTS~v)k7M< zSUg%_cxSJ__lDXNr{!o~VMxI(^WYMJ&^kD?CTj&ZK1$y?2$F&$cdubq+l&3vF}E#9 zE1m9!RQlk2{y|&$=q0BtE%SKg?E-h(vG;JLdOEcN{_nDg7!rNCBafshg=~^Z7~|5vmxFmG*Kuq zbMM@T+sn!_pYRMe-uWj2?pq+>ST{=0(Fet;#?9+}&*SJR672T0Cz5sodlod-C3$Q} z|7-HVLxJ%$A#CS{6v+W|OOCimA@2ywSDB>rRf~_Sc+6b7D|dB>EjLIA?lB@}s{fOW zEEu+Cz&wH}oqa+!ZkOVQ{8v`s?96$6;vbAU9nu*vgCp(2T~LAM2w2-h(U3)l6h|joz@+yFi=lckB((SP~X4PFZ+FVl?3$Fvux zQl4687cGVOfmG#EY2KPB`SJS|%)!WU1GxhS@;*lXTsrDE8&9OnZs{i{qYR6B_WxYg z2s`S_E?&*Zw%=AI-rmZ8uDd423|h|fCz4PqDWf34Zans@vcaQ(HoN4#@u9cAsw*w! z-!GN3BQxL4(>40K)bnkk%I1RDHJ^nL=D%$FxZ@!*RQW*47p)chW#G0{FVHuc?-G!K zdkA%&fy#hJW^S&m8NF09@%syX+wcP7KV5`@RUO}hmvw1_f=jE-&rL~7K2Q4{i#-j| zQ!GjcZZA9+GDWh30(Oeb0baKlgyna7X_m@;76<9ko5i*JTLxKq8;R$PlMKT`%tMApSafn%^*ddooBW4 zze!)~Y7(=P_vg=m1i+86#FCVtp6zKkR!4QU@fI)i3{CLz)c%?Ci8sc*Y)snXygj%@ zwg?~0X5`#l$N6(*rs4*&>fAnOlcbUE8u5V#RdXix;)KKLEi$7H%(h?Vu)B}ICQl`K z)Qo%7^d%*yf&y)%KAsjl(lKF|`}SNbOqv?{Z((vY*g4AZZi+eRX_V|zRqWCE zBE#`5I}O9Gt<)LW=;)YG5G!fUz%MtJS}LUN%@Y4?pv*T`r#wyDT%!?omMWrs7>xiO7Tmn%&<#)_bmVf)lD_> z8>qP}HlpPp=Kg>{e)Du@12q=y>?veG#+CI~MR+;T3ZFB4T$?V#+`{#Sd*g`YwgqsDbK)k6BTeJ+%h z{#pINsJDH}kN>RApvumfw*@s znh4#W@|J+Ma#$Z!LGg8Nz8_@RVDaQ3MLzQ%+~0+=-x5VYuar;XR!XCeDZ$=D)d|mxqLeAen2pm zCs%t$;u*JF%}-e7-)u~j-tfc5n>#loUhJ+mQD^-DihbQ5!wz5}y%IhJ(x0~ze#&Pt z=*7`KGH2QQ{y^8j5{43bd|#=JPH^+%3tt+cMG1g~f@I>i%^v0rlZ@sQNBy65nhbh@ z>l?Eh%EoND>@TP_Ga0?uAbruy{;6r=Tj%p>wpUw;H{F3*#DAL*M{CZ7grBU)yBIwB zRp0t2MP?inEq{cJ-}tag_E&xDbpLX46Xk1qv_?hE}{67rN-|4*&|CV$DW-&`4&u< zXw1D5ncG@?y9PZwlF455ZcS|TtE~(Ei`}>Tgk^hyL6%!_Sj}D~VGy^3MoYzm9j(dX zts7X=78RZtyh*4)A&Y1F9SEll?QmK_803iyywq)CsD*wXpvLp1Q=c-2TwOyVT19LG%ok0)?C9(hQ3(3n zr6}jMqHQ4r-9l0rzqhB7(TdOqB8SkF3Oncaziu}{y)|hES>0m#F1?C~Z~J0GhEM_W z>3t|=_ryN}XzJ=Bb`!&g1yj%bT_G$N-?IxEfKjuGa#!7Konn^f*oMPQ1aCW7WCxG? zNZVsUvVZ;Yob{D2reDObn)6z^3wQ^4XgZ!?7ELwE=Jm?=Q>&wyPl8^U;Jw^ZJOWDM!fkKDj++Enkb3Ni3Af?opB!oh!<=NMt`)sLokoCvR zvv0zOvO+p>Re;XL4L}#>RtHV+!E3-&n)S(h>$#n1kJ*dQkNIOf_Q5;XHhroAo6hv; z*AV!LAE*tWMK$$uv5nF|DE?zS=%*d|LsjAIm>)`^6x52NXFoov;hK4`3Z}81k3W~i z594kpu<18=zaBpI+17`#)bWXvk9rp&{bR&@rHFM1lwont2c z;;&pU$-VDn^mHe`a%Y(4y2Kc$;}$H;zxAcLxeJ)^5*_&S;OAv6(JM>x2W1N1U*N@g zysutmqa@Z_DJWcyfe}8IzW(#iKl_G;9EX`-y^A<);>>qlT8-~!9me=#mt2io_4L90 z5B9=B3%bH>DyR0UD(&T^1P_=vw(!uU>uyG^GBw}Q?!mZB&=Ki6C@_;tot&AR< zC|C9xj@plJb4lI@Qcdj}l371TV-{l9(yIrXthC1}oF8dy!p_K4xct@`QQq46kyDBi z*W*soi+*UAHx0dAWcn;pk^_uw&Hwr26fXuigf6N(vf${qI(o@&kT<)_JJ5Q@9PKP|JI)!|+ zMl0G}dQ|pTyjr~~qd}Q5dB>Q`dd!Y+jx^OjL6pXlEF|saO+U*EahTXuTovv{Poj&!P7Z^@xkbvN+(}-aBPiOm=>$!18V6YvvAg)OXj@n+LV1jQD;Jp zgtE36*flyI9L5+ho1YAWkOH*nTZS5W>cS3hBpg1aE@J#yj{tmMdoO^ky;18OZ2R<6 zetoSAcnk~xGDi8JSwg~7@YhxG*RD=AQ}B5%*=^aWofbFpA{Xo87dikNRcr{TLl<^c z%(N))%GjmOnN#3#Fz>bq93+yMkD46gJ)cGqZM1tU0P`?!EmK((by0?&w!w&yPTU@)`l*H}3{i zzmH1~AU^4Y`gDyvhP9HE7{JXB3I09xINuu$*~rJS=_)KQ*$}Ae$OmSpOk_-V=MP+O z89dgy^=m9Gx!q=bdO+sWkZtgT&TTGY1+8m;WIGjhgsxK50jIP0Kh@*%4k{=_MuDaj zr4_--R7$CehI(=$aq6MikFIo__*}?f++R5#{i>WRxM*$9JZb;t^+mIYi@6J49U0JW zy*2(__@s#N5)8h0@bEL;-j}G$oF*nT#s`!KcQZ8Z)vv^J@pR~$1p>wiV4uJ4lCuE* zKN7=mFXo3NzZ%}u`^>Lwu=PQ{$qp)Bu@4nYqh)#j1NiO;H6f|8Yf`6>4_nNb#Q;%$ zhz9^s+TXo9ga4BY1S_UT4az`1Fdlmlw4%2+d>HycMgoh_nrHWce!}BMdr`*3{`k0^ zfhMo(d(XK45SsGdPl{!-6kgqJ`$Jf&^{#JgYpc#=@d!-%p$CQ<)9?K8gOaqlAU!~> z>-Fsf<7=M!C|%LgZ^iYCag7DFcfyG^St7@+8mUK6YWQ03F2#!v6H9tZyjfynGVdmu z!Fqp0$~PyZLzJ;aqvvoHK0+^Y#mU1R<=q%Y)(HOO&mRPJ9k?grrmjq-$xRz%_75TO z6l}c$s4jt$eA6l3(q_c5XmnTQgR*3;TR(`^+LmLDivI~ay}&0bx8|}gF95@?U`pI< zyPVgD=4se8mC{J_#aLXf!eX2Dl#c`=<^ z2MP!AbO{>++)U(kCFnuHRP$`Dg#Dwv7T{VLUXAS%!4rK{4KGC)L{cr#3G0<$nT=QvkR%a`eQ6i8jPpKOcP@w zF;fjj4$!|gc@6#kd034n&`mG}3%V=a3FGc4eKXH&sDrUfI_-gju&%u!xD|nstIsnV zzM~%o8W?%FhW}`S^7iw?ZIDycUMK(!x244A$PX4>9G2Z92D)IB=_!Nj{YNGn30|{O zE4{ZJ)$|QPE*o&{E1t80JLPc#UO3S5cISdky!6+oVc> z>PpNUJtoFM7&bRiUIoe$nA!K3t`0AkBf(&8KxL4&7MtJOoK_PoG1&z6^0 zK7JD1GDgRUhOj*oqHYlGbyt~h0iE-E#(~%jgr(_J?t0+ldVZ=vlu2W!o#s7nSYvt* zx%J6xpure)vBtG3;E`tmEvF88aimfb_y)a~5HPgq6hGm>6rpugGjoC%a%+y{dv0!A zBRUJ*0ull4kYNF~<<_lRoaGl)gyc6b#r*k>Z6zMcQ}Je#@nxcJGZUD(i9TThO_+Ud zW%_!Ep%Z;m-Tim3Tf(uK1=q}YLY|NSHfPV$tAi`HCXWNzjT6L@999NVnDPh&wBURz z@ZRq{B7g-6S(YTSPoan@yD>Y>>*>7oec=S$Dq()l34{T1skW%gyxt<1;DbW44hCKc zLeP|!eWD;;h;p76v{`BJN1qbO!v!Zi^AOAFt^L2im+J10-$GSoS;2X?}p(Sj+U z*=2h!NL;L14nu$pjD{Gj^Dp&$oldkXptXEy%>Zu9s+IlZ$#W)I`hn(#21v%gRjz0O z>@_^(V6!2N!X`&ExW00#ftp!Z4C?tgygY>AMehqhiGjxT131$?E*f!()F01iUf(x( zeJRFV01ogxw{j)v7o)u(FHWHlf;!4EX0C+tO@!}|2cNzLxYuqcB;t9-$sGz6Q%l(; z7_;z02PibSQ=(oWofLQfAgFa%p^!uZbtA&FE)v!%D0j931fGB!?9(^<}TfE@_?8;G!=@ z*TbeP%kq&)DHtzv#5<|SSspxaAS?=$hMWewES;}{nmySo^I=g#YDMp|n9B1_1#>2Z z8Eu)DTLhy!b62Q8;sw(oofmiHYYhZ;vuX(4ZpqR5kbS2}Y$<*5SrCM=%n#1=v|DS& zy$iPv5K6-=#|7pr$tw6J(x$A4Abr{nH>?6moo2c$+VSZxJ0sFGm2N#Kc=*zCt%}LF zuCOv2;t&P$TP}ISS1*t772fsRmtSNhbFyf~l1|KGIlKSgDnbyH9(LWclq+$>e9gzZ zz;Ah&{^rK=EP2R0d(HBEeXYd$I~@2wf)I0N;Up z9Dy(oXNskRLrswxN?mf}a!5LHkyW_qJnyf3(cpV7W!v;CO^0r|B?R^F{S3@Asn`|46FWv>e?E;fOxxs}41!OR<;x#vpMk@`-66jSR`O1S5B$k_k z@f+3WA~4iZ<%tt}YIB`^f=3m(ok0`FcqPUp(tOQfz?~dxd%!&a_&lFr@lrIGq7WJuq6ktPKVy{)gc9 zvrZMnf+5Gt;r|d#eO5#lQon1q{wH+A;jj{QzM#U&pyf5^jo&~b5c(SEn81FWvIH^G zBE?qn9Z!avg)z3jDTCVK*=(sUd3-q?0OIi7yLMHcj65JgIiS%=ah`$-JD59%pZhFp zikOpCNQqX|;OR5M6c3xg;iZyhpAB_hiS>JMsX*qdC)zboZzkou21)f7n_zv(UzUZ< z1j#BYoOTjShs$5=s_bho^Yv9dsol!HoN)&U#Ur|=YC61}wBJmr!A0!L+21%3EVE!j z+{2gD;7D(2-lfCS1=OD~L2jdV+`yTK{!{ktvn|c?uE*IJs&)D$3liOzMNLwPHC&CC zAXnSl-uqqW+)!VaxT%oF+7wtMyWFvEieRf_l$GTV99Rd}WhE{vyn8gw;p$0@G9%53CP^p#f@ePo;RwV^l`Yr3KjBQAcJ*WO1*#?IH zAF>ypQU9AaobO*ZW^vf^!)}beu~S*!b53w0GpN)h(yA!aRhiKJm!NNOl!DbIIQy}oq0PThqHz%42GtD5VN1@SZ*U@_Y2;f2V zn$1npb7-Ir37N#WrvXlc9(bDMASO~(iRPb)Ql4spw!xwlZN|dN70wNtuqg_7*E8}l z5tQk!I?(GBVd6#&_T^obzW#GQgr_6qv+)P+Ep8)7gKWmR-PD*yw&)FqsV|;JHF@W4wP{aXg4-eo4pcnOFmP&(YcEL0FR|!;p^99E%X+4UZTP8y2ylz9i!a&} zWQ5*;POtm+x{(~qpA{5RP33U{QdhZDZRZ$f398j%r`+Zk(Xe$%oiieg8bO!Xh2$1lDGgi(mqmbo>8bw_TGYu4g{xc73Vwv5RrQ?Y zH2fQ=6w=P`brzp8T_Qex16W3n_eWsin5p_SuWZN~()=-zirsQGAo~w)dA5K}y)L_~ z^YI}YLbmlvN1vU8yHSq-v()zi>iR`sb+u~Y0e8Po`J?I>fHKm%*d)MuZwzg z^1vhru%no1Kc;nzo9iD+7?(0L-3R3(aIJdn4)EC_SH{aP4p~4i$Y|0*LYJi91pz6O zC3s&1_bKb(GY z*F!sK?yti!pD4qvI5VsaUgz)*II82v>6l&B2L?bQn`w_cvv3@r@-;K4^AlD9xshj8 z@%rl5y}dwzh6x8MG*>-7u3)-$t38$m>NO0~dY`M<)B$~3TS(J!lS{sXA&vVkK*OUP zN#1mxR(g<$skpZBQV=>8sI+Y={1eX^yB^n;iC&QiNo|2 zrYo5a`JRkckegGw3{W9%zXJL@9h53qXv#Gn`&Tm6?!}k?3B5s~0<=5bnDY*wb)WNT$edCdNnO;A!bNd?z`1X+d5 ztXZ^T26Hk}r5K_VS`RoUaf^s1w_`!Vl6$5Vu^5 zXzL_I7IPbnXB4&lEoYc2 zAks2)m~1JLpm~kVaiTYM6ECbeu$_vr%ayro81odttHkw~K9zJctmDy+efObGJ;StW5 zs(92bx_U^09MqV3R?(~)5eO<}g|s}a>a&^A!syUuYJFG%GtEv@s@8n&b%4QFSsN?nkjfMkgFjmEwbZM+|yd0H75k=*ot1m z4x#a0304EDhFkPKvGXF-QS62o@#gZRa`jG|vv#V^&FOhY+f6hLG5+&_k|qN(3_66H z6Oq|Uytsa&-b;BHQF4JwPM>~6&<&<`@OB-B#dvmC3>$YsV9r#AL+n_fX85rOof=uX zhYYMv(kmVYNtZcFrr8-L7?9k9C5%Wmy^mvCOx17DT(;1%cZA4HY9`aN1#5z>M6Du< zo)6ieJnxb`uWBA4lS11HOfEOvvpOI{cFd<{#8(QE;UP_isWf9ZKWrmyG)S4+D<$&F z=<7yN{d|Xrv?j*~yjGqePTNY$KS-rnx{)TV=svuS=?Cg-0tulr$8gqB!R5plCfaAX zs~l-&d=tHVrQjBcJ8hjNMsUgCagJzD>B!pN%%`0*ovz7D22a@7lKCd6o-QwR>_d?# zSE6(9jkE9-d#T>DJO5~FJTa?u+P=|Zry&B56?ScCx>VdF!CpEJQ%Ycq-eNKYfO&6a zmTOFomd)z3kumPFLdmh_zOcghj~X)-{Df;vqz86_+}^OYm2GJ`vGK5d=-G3@wAQOljiQaXZdTE$aMsW!A^x(V z*lP{5IvdQA73{A>^l>&H>?v4tYVF@bypV!#bd9!4 z@q@Vcvmr&qHnZLhF@;I{g)kiV^n#l8qoT6=YdOL1Rsh)vUo zPl#t#i0H_ve6*&|u?tPGOO|GVvIW3h6Xb8I(a-b);+eCTWv4ZXF= zK0sN-;*N7eJudSIse^o!ph?q@qGnYV+=vyxyjb%L*L*9)laPPwWlPvT_`L7`tv&klYd?{vE@XM!c z+LN}ATmq-tY}8)Bz0Qcw87TvX+NHV<6Vie};SX9- z$qD4*N}SbBe5#+jND~(pcicWuMYr|x#0$9vq`2xJS!*O)Li$*6T4YyK(z^g$zqlS( zuC?5XzZNVzq^r%eJx0MLMg}J*u}`)5=IN1mD%rFc$u?J9V9@Z=EC$X)G<5t2;6S9PQ{SmkkaxEDMqPQHJsqpP4MW# z)SE3nA~oriMlK<6x5^~y>dh>FvuYq(aago0;3!`;b;D+popzE$@kVR_?lIf;f z(dLA!;R-arl&InbxfrWWS zT9Ay#?hryfRs%q^h3R{mSU!l~wR5`@)$MB05o(`OuTyVGuYg%5oNT;-{1{F?bw|`q zQ!t0*wa+Zj%BnGavr*U6-tC8WfA$$TPI{!YKhjLs_*lN8j4#kb!eciv2_wILrnT8v zJW+u6dD4wXMVQ%x^Sp89O5suFNZPLl3})MA8eaQQ?*tu)@4T-(YSY`jiKX0*gtsD} zuD}INS(&gZzv1sq@~Gma8J4B{1vHag_n>>a)t-wr>fq1yhUD)EQK#)JwbR3IEVa8l z!DWRL@>j8&P*B9fdu?Ait^6Z?*D8pU4L}Uyn3M`}^G+KJ(U=w)5uZz)c-`ptv|~Y;bv_tM zUBPI9y)AFS6@Qf{9{U^&;?4_|g-Cb7$u2=@O$An5hS^FQ*wLh18)0(Jt;ZgAo!U%p z3ts2Db0+4zAtr-QUSA4O_GrP4?+jS)?lz4ZZ><}u46%Ye0#-Cbl%ZJ;QN*mu(l!05 zFJY^)PPEB*L{As{Jt?|1DjnebG>DTo!Z1fO+|R7l0;!_*XrFgAE-f3oDIbkg_|Uz< zcehE9JG zlygHYn=zB~f?j0WDu4zQIG^3yv!5;_}YC)tYJ5|DtuhnE?@`Q*0{ruDGBpHAFA9{n_Prk}*PiE8HCZ8#_ VBYQW>FWAHMEj~fQWQ=H$xA+ zXHf6G-``sAAMmdAuH{{Kp~L5M&OUqZ^X%u@d%`r-(r<&?;@!vrx+tVwAivELO&* zmPM=GvXqoWD+zyr-692vnJf~U`aWs2`C0`%V4~~IlBfTL-aX-3_mPS9o-=p%gIio! z&HwX9l1#}Y*p%cyf0C$i7`XoTLn?S7(4_zQ5D8Yeg zb@Yebq)x6c>GShXQpJ3e@uWi0T3TCI<~+8@Nxn{`uz*7*C~W zY=yxqxI4l-cAwE+z9K1=TNa()W)KT`ddL)n2fNc@&ceV&aLaTo08c6+Q#(g3zA2)2 zx0`>;ky%0NCij(NLiKtt#C~?ZXC?+?5T5u;XcFX2@rJ}rnl=f@_oI7TjTd5BSy?db zXL2`b-kX%$Pmo%4B@9FtF1~D*1TW9{IzyeL0b$^x2_m`5z%|#U0AV}i*A-3x_Gon; z(A9fpewsp|WFmEQN1Hj|tp;QaCkW3BYuBDu$)rusN!euJ8bsb*j{QX&}n-CCt|2&^U$mPc*R}WXi&A*@h ze}B3aWcHXfMsCF&PKxk0jM!20iMcJ-AY--D!v-Kc}`%&n#xM1lFrsu_w`UtrJ##f0a$ zZ%xKG1>r$E|K|(?ph-Ix%mEOZ783MF==}EMBznfa_rWi8Nl+@Y01MpwZQ+g4ifoLh zK15X*4-8|#8(=&8s{`Rsf#H?DEynV?XCkXO$}P%P)`=LBI8{63J77j(S=^(ThW=}3FR4ajR^(bI|N`GuP~1|dNhbik!Jr6DSp zE<@hpQtfv$562UaLB2kebk0aF?$cVITbo>Y3cf0bMAtJiP4`(FX7Ll*c(NU~WUQpN z9ty~#KPN{VQs*(goiLIsV9THs;iG9IAJ2HKrKI6Auly(_b8(u%${)$KP{Tm4{|;>J z4$7;zgP$ih`JNsy2rm1&ivMcZpp3_-cumyLH@zzO*l}Zg=o`aN&p}2Vio<)Ixc;}& z3d!CZECpfQ`?IgJ6;D-vRg(nrW7<_!A8ZOsx$LcV_G-0M%x^DOm_{E@Y>fvkW^FKV z(k4=!^Nu2@@(hN8Fof*JDrLKq?<2BVo0G`?W8w9YIH*9GQk^2$6>@m zVThbZML`&t@lBFJbyxLonT!Oi37@)}p;K7eLYZC>SM-!xli@)_fiq@8D=B_;gs1F| z51KyZEll4N;*Obgx075b;mmHEK4p2Ks3D2U09xo>NYx5XDDh_plCWFN=`tgloLMvT z6;}M_bY7DccYHgp#jYt9a>6I)PvX=*MCxbcAygFJ^=HfDDovY6;-G2zp0O^AdSi0O z^6_M8TDMs5*VQz_gz=zOe^vqOU#S_R&VW4==OOiN5sV^H=j?+Q@E%zFy2EP>$G!=X z0k^7gFjT<7%g~qI8p|l_TUTX&wo|0QR&Kf)4RMHpKTNa}@z8nO%nq41C`=P@i@`DG zN5cihC|!u5w+}Q!IR-^Aq!vb;3GF+E?KgTx zVflqMuWsgz?@$+}mZD8;jY|1Q5cj=}b5PlGLtuAqNGed%{LL02>js{Cgm$^;1FdRi zBJ3aSWZ*OEtX0=&0sJ zg}##3W3GbG1Ym&6o!uQ`2rdEQ++18Ucc%##1{By=wp5d=JdMD2ERlj z#5QjC8-W=cOtcHZK2&S%jR#f0sKeOo&r$EtUK;ew<#|RS)IF-=qfmP%MZ+K!%Zs)0 zKQC#1!>=lda4LEg6}di+XWR#S#M_DOF9%n>385b?e+Dqx5t}`vpoSX%)i1cK*s6*_ zw$E%(lC)h(!v}XwJ6tLBu+52DZS)&Lx|{k@!P&D= z$~;`7h?BUX?B;^*mXPucIHXmsA;`%0U1Q;$VURY#@tVZ#v1Kkop^-`W`KzR-T{Z43GOcRr&*N#O=t6sHqe;~YU4w%IPzA$~cqk@nEbN2CYEZvtYe@^-; zCp9spt#FLV^WOV3w7ZqoFaBOLf7*`$c{r76*k&<|G!Sm!^vv`go*3c=Vv zIxvdKxy{#vMx|QU;z61JM7u~wCF+?t|2*?OM#5>z>I<1%P5&m9UrE(!f;u$Z#PG?r zDG}|g%DP%?oo|Gv?SZ?MqhtLo?Y{JcD^ z_$K-vnZTa!F$%ezu9B)w$oPO;A9DS#r#%_VTzrpR4I5>gH3wJ!7m54_pFw)dXk`!3 zC8n2(^y2T88Gx+Ct0&_*$F3O``VXL-=g$e9v|w9txf1MfuXn_vHiv?0(_ehi;F$u6 z_)}jiucF?AN|j)>F>l(7bj6-YeU~kCN39kWczxOO9dyDlU0M0U$#_)|3Q-0*lgRk7 zmoX9mS3urqpnJ#Ig;-~8Xc(jR2@nm+59Yf5IOghvh;&?RUgoVZcs2bt)}`+(|NMZ- z;P?*pR7P8gv~S>4{`}fyqWH&s-T~N%!k)nYC6zjq=7#Qu+@)Wn9TEn{aY1Dr=Dyf` z5mQlX)hI@>U`hf3 zy!-Z_cNGcDIAHZ%h@w$c1f~hQe*_WuM4rR?%u6SQP}K)Z7)DP5lAaWUt+2-r4(jkq zX;&u>x`vl_H!gPoARo#>SR)?i|HnZf9+2_RkE>$j-R5cAKg+)f>uYHzB&LAlh!#D# zTr4W0QI@KtxEo;{ofnrC_SdR^5cWU=iS@7gXF{FP|2+6_D#}qyV-C2Qb}Nfc&u`pI zCz%AS@_#*0(nz9^4V8cZ(0>s?#Qq4FS`71Yp2@G9j~hj2^pw@TKC3eJ>koX%=5ObS5{Pe{LerS?6KF<) z)-E_9d=E#`gNXHPwSM8NoLuQmbF=CSqPyzQa8y&1P7T|X^m#q=EAP?8x#q)mtnw&> z=!X9r^PHU2Lx$&qM0QBhaA(JyreSiSu<-`j_wod z#q1DvoJ*E2>>b_}Jo&Ei%ZbN{wa{}!_i%DZOlOE2Bq7BljeSqD`X0qK;gFB{<29!v38m{u&c7z=Z*LkOakrx7 z^hZ%m`Qg6N>3Vwt;cF9&*)eAGD-*;kyWsitV4$C&xPRvP#Ei_kUPk^JBw02?Dz6L8 z2CA>yj)gKSDDo~d=d?R<48?;pzh&0_Xs2*vC*=F_ew+Gj;vPYAf&IlC4-Flc z_sEd9&afd?o}H^AtPI5qA~>mdh}RcimsehS%}upT$lDgDN3}a-xid33778(bi6Z}> zMEh&lm08#?`DG6ahqFC~{?i&+b;)DxA=Mli(|7)vBvnh%w||9WZ+v~)k| zekYSEmSvkTNd?WJM$9N)1lfW4NtZX5;|k6%o$j^n7JhEWC=So(OM3+b$OJB+>$ zB=}#U+nUNMnEUu8Fay47e?~qu{;ANM7gn#09w)&x&ES~GVJJm863fc{ltL+T4CU{I z?(+>(NWRXyYd1go1XDay6JWnE`5I5&-{#O>zQyi1++%+1o5K4kb*6l->0lyRr~85X z>@7gA;5W%8*?PqW(s-t4=(r< zoyHOz>^&BX&`2U9;+QU$HCvg55dXgULeXz}$05^vH?(8D9k>?yQl2>!1py?KrmY9^P!tI_|8Fp7 z{R{4tEZ|?HDfY~a_coDV;MGw&f#a4Hi8FD3vy)koU=Knazx`0RIFfmSRw|@=?Qlx% zB%{G;=6MUQfjZid<{*K}aDbrrV_A0m$7~PSA|DL#kiaJwPW0@+J@68TLP!HZ!dQ?V z4P54xA|_BKtr?&TP-a~VjeEJ=fn>zltt^x8bC5cC_ThbTw+roFuS4G>mmsAcq!L-z zkk_u|ey3(tOAzjMIb*;@TXpM{S>r&M^9?cS+gn1j!uLmD*H++SSZM(Q* z&A@$4{m*?p;l|9;+xih2`4m`l;hM2Uyb6jv9T)A21gIUYN*o%9Ii`Ya*lGG;sx@by-+vXa1>%YGCO_D z9BY0)=#U!N`mEV9WlDnkA)h@NqaTxyTa-@S!Ti&rnir6*ITl0o=;TrYO#iCpSQm2X z9M6F}hm4CCPvsY>rm|YgIk%%Y0?OFiW!_`h4}Fe?p`hj;#solKfCyXL3Z$K!?D-Pc ziDQ&m)Iei5_{X`pPs^cuIHu9d>kp%Ra||UoxKF9iIh!G4Z;gNO%TLUL z6KbD4#BL}Sz<0Oe%<~$;=Y1s>$vfsLQoN2?_d(fYYvt)z%#KS8;B$v1VB!4ZD6w?p zD8GtySf5R4FcWVa471^jpWygx7*(IeiyF1JD^zn8BsZoN+Pb3cIz@fkmOSXbLDovP zoKAb3iv`$@94Bx#KFzMX^3NpWzQ?GXsTDVqt;bsTm_d-c8t_ig_s)bJ$);|7!0_8Y zx3Un;{ud=)l1P_W3_p8_*6jQBIQ%rm&vc2#GB-Hb@g26##D9^dn4dds&6Zrqw6h__ zRT}!WjdnbbLvsj9jJwp5^^iMTbBgtJ{ANGp43539?vbwyms%@7NXy!jz0qskPW;$C zYk8h?yAoJD!4!}v|6(5is%QbI0{Hm5Y-*z`#AiNRz3*VM>c?m}PS-_9W=fPvG|0B) zCKpjvn)lkzK93W}@Yjc#mD;ZJMIafqYoFK6yzMq}y=G(=5*s(wa%Q{`gXeGB!doO{ zKGl>;$ESL?Dz%ij))kU!C&+jSb(8Pg$f0V$_$JEtL2P1Pk+58;Y?e8O0AY2FA(s30 zJaRr3_)Cstlb#mJg)4}m^KhAvt5X|3n6a3RUlJcM-8(-bvTL&}(z70C+#jGXzU|Ps za}}pE=Q*xlu=U95&9=rcPkFmpVTv|$@y_;W^PLW4-^f~0&~K+lK_glRZ@ii8L0a4W z>v1EY*NbX}V3vNTSJmq;Fw+S|YL}>d66u8ua9E>!b{HP1Kj^MSW1tj#W$rel{OGQi zddfazur2JEM}4jbPA0lAH`z|$C2xP!f*o3YYO`J;;N*5e89%VvAsp9B+Yh_$yxbNZ3~& zW8Isdkm>a{lpxv&>t%OapK8FAW!h-vFhsXxZ+`NBZnNR#`|w~E-axHAV5%C7P3913 zjbAL>$I>F+ooK^xJ7;t$rRro=1nyK7U!IAEw}xl6_Bu>+2qqM~`Xqux5Nm>uFg%tp`;)Pa0} zyHNdHWQVQ}W>k*PX6blXSI0>m(kmJyO2lM_>ldZ)xkrvSUY+Qn8JBSnzh+_+lm7u* z^OHYd>*yZ1T&Or^7`S}^&q4bPmg0Zx_!@|TZj1csXP_gGK5hSN85I@27jA?t*8yx40<9J9ek_VZp@ z+%tDWiSw1u?tAd(XV*7{%=(SEcwF+%JQEM10`{|wvm5@NR^TPdv>OG4`d^g!;q%Mb>WhT@*$NE>4AVY_&A6mQUALj8)9Ula~c8uxqpA={u+VRw8aZZN5vdn{YXLjz<91 z^G}&t5H+5QU@ZI6A3JuOAaHXreUkTt7dl_P9p-QT-7x*x{lT*k`aaFmt+Hqhp)nC5 zb2syEPRCMkURy7>@O;~KZ!h^xj9_)g0n7~K?E`)C&|@=Pp?W9Egoq0+*Mx?Muu~Vv zS)e7HvH8DXRs^b_+N;udaBzk_0oE;XOad>TPgA$R;ftWRcq<8{+P5J>^D2~eSN7pD}p>hGIZU1%5PGm<=} zo^Ave80@JZ;pLl=C$CcoPIxE#1q?lFW$FXKg(F(dy$Zw6l;CSyU0Cfx^|@j@w?38q zJ*g;7ww{>`2S4raRd>u?A$4bBMNBVLWPTGB0G*+o(*;}umYZ>S5XwPzehS0Qv!+Pr z=hBP1Hm}gO7cIAxE^mHywX4eKRh;51u5ngysyZs<>#w?>n+1yQU>%&<$iL2gtUCCn zA7~|T`oHYU*$O%=u;OB-?!auvpWAaYxZv5q1aZuvPmtEgV&9#5g2OWoPHIb&wbe5Y zYO0p@w`!^4+k!%N;^E&Tw3B8UG(^8Bqt_U8i(pva0ZiB6lT2I*0gC6>ObLCFjUSx_T!>i|9wZp=}z;q@PtjW+S{D-CPgQE_t6c79zMeN*S--sr4wD0 zoiEgnuS5046m^WrhNcjN#b@x91Lxw7!<*f)h%)8r)C`B~{}K^EfcOB22m=a1Ct8w< zo+pFeuJdl;QK@-7LWIVIcXLGV6p5DHktOmCaj?w_bUh*7C(lophl=ef*Clc+ zmbIA7`1EOZ4#w{N$}ig?{F!RT@isKS8!Ij6&G8?Tzo2#IFt9 z%>7VZWZjAx>P{THR`hRVVvXI2kCaT zTmdVVyAgi)pK}~!ifZ|?`MFcmUp&5TNkM-)9>TBqc4T)2DN4GS;=2-1<;S7W(#V7|r1#Z5>;1G#e?Wousm9Oo3pTFFMsJ6k} z!MA%VW790{qG21S@!}7?8(zpa0Hbmdq^6DTyQMZJ)N*i53qx>58s*qlUHyJK@)fbiz04LB`f9DwIG(NIdZ|TQX63kDMzvsou zh%~|98`)kM4ot+DOwv`4TDwsEc)*McZ5#`^n?2DLdhohQ>t{_@cQY1bJkMKP zhl)$jd-Q$08M8FZGoG~MR;=`cEWgIS9Q)E5^9T3&%B2W+P9d0?- zdFz9W7uE!WnFA3(8PSP>R`*i>Hsoc;1f>TWe4A?*9Tpk3_QP%QDT;+fy$7^U;c`IE z?&=WV34?ba?Bg*CJ;t;Q1T6w#9a3hL3$4esP8pBijM3|$(rJzwMY+S2*gT8>*wy%^ zmqF_t6|3Vw!MP=+3aI^irtHaTQ%{68sLrvJjW2HD&YQfQK36rK-Aokw${uc{*OL-? zrH!Mkr;9h$E;2I@f+817f;)w{LO^=YTt&MsJ?hAga|33K^JZ) z^&ev2WGr)=3&b}kbyaG~+oL3fzA!9ArSx1|B+I`F3%2JT9)sfy^-!(BMc#%M-l*$( zgc+T&RL;YcH_f-9dsbYYSM#vyaAU?=8e@an# z%2VjI$5wDMrD^0e%H#Cp!f!w0)1%<vwd~)h zy1Q1ra+bS>zFXSu&Fzs6jJAz_4~RKqAeVK#ZhFO}8VV<22zVUM_naJ4|31*%66Lh{ zsFlhCbb*_Jo}2MP!`hg3hJe;E34$*zi9jprQWK3wMRB$Ly}J>Q!)vtp-hO|&?<1{b za^qa+=r*Hh;|+qkq_a*xHt!1zKQ-5;mGZfzL$RheoeOnQPR8%Q+N(D8=1>$fQMUgY z$@6;5rJ!VzL*NzyP&q+TVL=rHu_+kjJDOrFyYA$1@TI%cOHSKue$Q%njp!e51uBJ+ z1jwBY&&N$x8T+4=A3gTTHazNPQo8{?7Y)9KYpcm2{c}Go`$)O*`PmTDMaaijLf9Ye zzibF{P*(wNaK8Ha*|>Dd00JwRI0pE|R2xvyS&B^! z1le3EGY$k}Eq$?dZO|ngeO=a?l5MmQa zWIWcgBDg59>FtNxR&+4?vOcL6!8Li#MBtV~aOdJIC?zL9VNa-S5maIe9J=-FB=0LI z_AxuYF>x6_bRbye*spocHj2~BHdRny$y@h)97oT{lL2^18Ss)zM~rV`kZX8Hp7+C} zayB6m_bJ)+do}_c>^Dk0n#I0*EE?_;Ej>h>+4Ie>SjTa{YlhQ6eN9=Hox9+n-ajJ&IrTsPJ(RJrvyUw zhYHiyokA~GGR*ao3Dmm8lf*<1_lefdWnVW3P`TAdCTE#@{J1OR>+cg%7zZ1ih#;$o zO1-_cafTKmQ@>~I8+9G9H;fxC^^Kz@qE000$R|TWFm<-E`1&ZtcuJrW{MEMqSTjG& zxoG5<{yuv{DcWG8zswZNs?fmZ83|dB&=vW6gJKPWhb~0`+SYMUcVU)}tl779pIn=# z0zX<7&1Fdgwnbg6Sn*-6l?U2eP@mcX6~qea5hUVYL?b3w=-y0UY=xN{e3v)%S?$hn z#w__;ht%Q^Z8MSBTl3s4*(Qq@riKwVf}px4NpCNXRn?>X;)MK0b4<9M=KKpmKfw<) z8#juf0OLtty@%~qdFt;ma6rDTb+aqC8{?a2$#I=G`f|Iu<=gWkLcrelmDIv&34=(7 zRHZ&~+u#YBq6Coc0}@sAi=)A@+}wgP`~fQDwMJ!P?*nH0)Kv46mbB`nTx_+qhitW+ zkIv4nFMgBfwu5*qW~z>?sSO9MiMc3FR~h?~9bT9c`!>&!@buUA$jPvuks9l-AGs}g zC+*aqP3srv-%Z%iZD&(j0VUDDc^Tl6Qu^)Uq0ngV6Li>zu)dGl3+WC{8i`HyO!;d% z$<^29a34;)jBcOX6fGrBj<11h$h)x>4T%m*R2*5;O|HJf&&)wR)cZhw+7<0s5l9Vl zU3?Zr=FrW`KvYbkkSi#-@_e~}#k8Mq<7aIZRd-x!!|-!mkghuTp2s%<(Hh%w)n0^+ zp;#0mT;-Es$J_zw^$%WIYiwIBnv0ose0Al5944r$hYah-5%xP}!hawyBCwUNqZnTXJ?zWt6tZ&UB7zCOddRkp%TTT zimZPV#ZI4%hS#`UT$o(A9S5LBFT^BIWo!Ee!_{!laHaXrU#Od3QsS`FG3G?`I~H4~ zIL2eC3c_ADyW46zR~rwa1)K!7rwL=k3!(oclvdft z)Jt41KXQ2zCtT2EB!sobb?uVcJFy1)Ct#3~A1(D?NKY8_SDAK#^3HZ?dpM73JFPqH zJ*XZx=2H<_noA|d+c^_q`vjIdak<2&14g_(n(|uGWIf*k#Lty~jknCWPj&-=GpkJ- zm15BTq?lJxMuD^D{K+c3$@Xuc0*wNu>dGy(wPI5BPVl9zb9<2jHB2%6+N%uX3;p8l z2lU6YRgK(k3GPVMG-PTM>Z3wkdvh3!j70{ouEsgN(_9!d#d2~ekOwL9FDOOd4<G zT-yc9|CA_gK8|m453scMLX)}=dO)ec zYa{`1gAdtX+yLtF8YBUXk?S!~aJOg9sEod}sKH23?_yrNxmtHb3~dz@ zL7sAI@VA%Wbv~T)f2O9de)c?F+ui)_a&1?5P1jaeeI03Z4ZUNh`;>WW#xBcrBbVue z1EJS$MH!McM{2!wr&RZV25~xJ;QE~!Htr?!itqA@WPe%Q@@oyv4`8B3~O2` zp6wtu&O$sHsCxmSGSiKaj;p83@m3#;ND)_mY3MR67Up*q|KUR4@_9o(-Qgl;3Lf90 z>NqlgEh_!XiKR?O)Q5rKUj9aw#j%KzDYy`hsQ9Rs z;I~y!yb7(9j^!p($r-M5*nX*cA>B#03(1<2%_+JtBy;}Wh;KOMWGLb1zu`vcny7wC zJ_aHr+tO<&T8^`Rqj;}Xm6^|Ya)*_ZdP_3Cwr4m^Ki62o{vIF_*X%-W5HFqCI>!*l zNY`)t$kn6+-OZLjo{C`zxQznWBmjc|Cb*|1O+QQ7%>z%jcC)FNBg zdG|;A`Pd0_Q!{xL7uHbyh9c(26miTv_Gx_d$-h&l=lmFsGWjZ74^69C*fDscEs0=#or@oqcdl?^< z8^v;|UGv934QMyr6ivLPum=660o5^o;n%)Nuu?1}Yq|BCZ46>vtfy_i_N2EQ$VY|? zBb?O6v2*C91#wg0?{>QZOAHKK1q>?zG4LP(L7^>qnfcGu#!qYGn;;(ND^;b~*GAm> z#+HwVDZig@6P}j|&L~8GnPm_RenNghlt59|{MPgO-V(gCa~#1isp|Zd@>JM9ZBaXHD=gg> zG&YTH4Jb4AdsS-gW>zBb={=>`^U-Vzq*YPWxR>vpMJ?MS>t9>k691{0z}<|3vdx^- z;m)-qGT)J)&16^e$KWn)iTCKT-SUMbn^vkCJ>><&H^)=8AHIy)@vv)RdB81{NZdgJ zsR~e#CI2&VOGAlt_O+$z=c#jh-9Y9>VW2;}=x9j>v|1zPcO%;$NR5)|?o<&RDnL?= zQ*r3ikBPgerUG6IE^;E8OwVZBubi8TuSPj1I<$rc{=|G@GSoFDA}w>PQ{=t!bkQNL zme0Z0DFb4s5H;r^C+wy8XJKDNF|+UydBFC?r)dQiNs8R6=$zW1sLqrPCyH+hP&oZD2Y~7v(;%NQ<=z7Tz*>mvl*Vo zmebMOjZ=@Uq>_sR#^PpA9=Fd)@ECLZo^~W2h}q?ia%!m#I1%PP@_~5{oo? zZ6?jS{M#IAFgO@Ovc_R@wV}1g@a{q&SeVXGxxJ@Ck|J7tER2rj;X9_boz!#;m2=*MZ{fYf2i>bS~vvAR77 z)kZ%jNc)^OEAgqjYK8Y_8qOJ_?*?;bvN|4((6bB+p&6A^;<%@sr3fPmIQByX0BSFJ z0kH9=$02&?9_)PwB$nc3s})3d5X!qCX@DrnJZoy@fD{N+;XB9Lg%H^Z->p)X+@82L z?>c){s65@+AaBEB&47iV9g%ZNOt~?J^O+Z70Eby1mpi!@QKdI%dd1#A zpLTQ-`?eDg;iiI5=7&2MS%V7Z=|*i0eetX!LGzt@>BOd`o#%BL$#y5#d8b@%jb0Vp zVdAo`JS=w&^*ZNl5lU4!Iur*dLf`AOE|hRxnEYtZe=9pkOub|w#1MM)!;+byt7WNo zkYkpz*)DD{`)%EJiW8J)6E?uPSNa%ztLI?kDL$WdbGp4w3B$Jta>7t@bS($3GJb zj)%PIw?%F0I=i!ni*+xPMkts>!bNyt$@FFB$1m}lQJP8{OFis6)LDrb27O1PgEt29G%oO6_il@tV7HEMkrz~ z0po$lGc$o~@C;ffwQfP*eYNj{!7eb`#kKlec8gDZQx893Z6F$ov8fj@;Zj^dy; zA}P7@CEAK2oOP4BLF(0E8^tMemAvFPkaf|P!{8_?fG3lnl0t;nq5sPOnr5hy8KrE< zXSwctIp!)w$sG{aL*=!uW%Q0>#DU7}NPO{!X^x}NVy_KTBMBU{|6zm!)zOG4!6r9( zWKf&zchf=_#|4d~!EvhS%J|8(YO&r8ZNw`ug&ZAQKYPrtvTw;3cbvls4$9;m5!K^+ zRuQ97g7WjN#+Z}d8?JT@>(F6c&a=X+N4)(3WsT`=rYSjvsR@SY-t1%aUuC>yluQNy zDGj<&h^nF0PrH+?MoJm*e{Fc?50+HVMiGbM-lRIA?VLP$4ts2HYheDiq0+QeX6|yw zz?xqf3W@$R2-cw%@00UfAmbB<3)K0C(XFAHG?+IbA*ybxq9q2lY&+nr{WB z=3Pa_@3LaoU=}&~C|>S~qDgBUV=xb_Rvvw-?17r`{E8cYQ5Sc=ce>5X|Qhm)@kR&Vg*gI3z3q<`5*;R0S!GI z0G2$ewC>KN#)SOt+e>`$PSFI8eOo=5q$kua=^~#afVfa|C^o{EjP<<4Mp_7Y(8yUb zE6yD)N*T@m&64uld-W1gMKLtO=-bMJS;Ja}=>7;6CC`EU`b~PRG&Oo3RPcth$d_pXD> z^PZluEBzu((R==7m&ku=38(=7z-iQCoQ#;nq?6(5!5gc-(h8U`IP#)9?M^h9VP?`9 z(nx!L<|^gfOP&39CrGRk(vnuH_E>rjk2;2Xfg@a+< zI7g;b|3_7pfWR=;vBO_7jz@S-TGeSJofMp*fe34zTCWJ$gfWiRH1Wc4gqVC(wxQ25 zk5%C?<6qT`hJW{n*mF-R@n!>3)(m}jw$3)iaHJswMrLr=)vF`3=}5uRlv;Qza?JG- zJ%N7f-wv0W*0xbwOP@8HMaO+(qndLN%<>QtDouX{dKOfG8Lq{vq&#&{%;)GiOd%-4 z2&bQaBhK>N22rO9?}>c(HPXHI`mtWBOQ@j2is`=LL;#q|L`?@ss_qf7<9qh|AfrqW zo-qcb_|h5jJDI(t9M2iLA6NcNe+r2s=DY3Aj_(XY==Fio%>h0L!|w?P0e;<6)!8@^ zF{#3(JR~D?Rr7}*<#v{d7!R=cWxg}m4zn`5cOg-}hXFviX7=@Deg`!|UILvh9;w9zBs&eEf1#lzBT(k;pvaJT|7&@`*)P=t}K7Jdj zU$m>{={fru)=j@qdGr2!Nj!9z{}?f3jPU1viC;;voAb2y%jh+A&&@f(#xtb z-Q7hmtlLpGvels|0<*mc)}b(iIzq(Lb=?6CEt({SZ@7>bjkKj>}a<8UmXU`aoA21r@k&CJL zP?<505}3|zWzJKmzmp6R>)gPz{xdq0{PFW>=Ki$2hRiFI)@zC|ptn6bsgy5A6g;{} zl!ga{<1_Yf=7G@h>~Qw1FXX>lr3gX?Oq$Z*FccrdPd95dKdRhedt9l)X?ly!R@gvf zmll@oHldl{POY;hd|kJQGCFT4rY_eFp}+C9q^@bWT-Ihm=sv+Quja<))z(VV{4>A# z-P}QU-c{EIHTb6VZ6y=)-F7PfCi|*2ac)o9zba$Svo5~Tv`E>IHTAf=PZ(K9V4_46 z=XHDr*#?by2tRno!3#cmjEwo6JG(FJKYfXQW@i9=^)@NLeFSWw6SOz)nI&V`4;I`( zP3k^}4v!ZYz_kd#z4(%aXL7Fe2M_)T~{69>;mL60X zX`Q)-`JDWgRsrXs_j4s|Kb^;5%+i*AclI;kmqHp*!l`hQ7mSFILb-2ade-W7pDJa6 zD*VS#o)0Um-!S@*RXG$P6W8F5&6QU#r?rc@Jr~~Sl^=Y0CmFa8mAkDp9iAiE-jECH z#)z-CU{S*IOqP~3x&rr`_j3z$!7+8qL+=7$Q!F=Ock(OLwxSzDh`HXG$-Y0 zFo93`_U&5|0mpX-d)n%vABIdoGa+jJn>k=_#wGAN=~7XBLX1HwAvS(CeX&8pD$mWZ zXJZe>#C1-iG*LDikqnE7ka=9v-IFewpI8Itiom|zb`)A)2{81CE_oqn#TK6&?*^As z)bQJe`qR-m7P>pkn)ig_{8HTEe?Q6r?vG;borI=)D=RD2=O=3wrNl-alC0*J%U}k} z0OIw0$PBdFSX>H*-~SPt!STv`oVu8#jt=3YO4~5-wF4_3(lyXe<(UHv6(HXaRA)?d z#w6pKT3^kafmwMfg8rKO481`COyaw2IV{9u}p&Xaf{Dv-)x@BTSXG2&{65``IQss~_$Q`@3o8f=nr(6~S9TuMj^IO#0#R4BY z_@Y+F<-WJ(R!S#eY>p)!B_%!htRl{N1KoA?S0)A@^<4;CTvAeKx~R8xf2Hk6R${O{ z*ql}SIcs4Dp9}d8wj54h4mK|2jM=fxQALU2*Bq zT53chQ+}tP83`B2e)=O3di6TsOC2t5G8AK~qK6|7&Ia@dsLXRQh$!ypGKC~5tCT$z zc$oPD+)kHE6o;@mgW&*|dqso;1P=$rulKXCd4==<>2GTYNMvzU{HrQ{1=&ot-i<`Y>D6-I(1eaP7@45AMIGU`HFxn<*;Ugh8d(R;vcdGK9;GP?VgnvaS~ z|3~mdyF?8srRi5@{!ckLiC;I}E5+{1(`D7cJDrfH@_nBe`~aNo6T>U(16>$ibZ#fG z_aesvQ2DU)!qO<>p-AbUbLzAf-{@No9;gge6YYdk%6Aed$2vMYq;C1LLKDPk#H7Ew zOQuWwoKGR_Vk0+8=I38$!2_0?vnp6#!k=;i`-E@mj?u@z!|1u8ie45cv)kkd$c+)a zS$oIa1Bz@Qo#;nVisnrqA4{KYEDk-}12>?55Onw@W5C*=?&J!X^Q7#>e$!-F3#B5U z6Via$DuAj4fThT9tqLY@{V$Mu-ubnJncjz+1Ip4&f2R6_p^HXW)+2-LF1|o8*`g78 zD!pGe@=YtydZq@c8xqZG-nUD<_p!NV+aAj&xo_vCE`Q>0_A&ewcBQNNKo>hkMBSAm z2N{eWVDaG-L>R6h)8HRR?=Z!>`Ut&ihwVFPWSS3&Z_3Ws)7uojW*VU%ERvZ^DM;Uf z9RBqC_e$=AgNT#^A=;?7LJ-h%rAAdQ2 zY_zbqNIQemxAlO%3NI9YZ7LV@`9*NZoXc{ASB{~2>lBo;A5&kt==e9u zP)!Or8)Q8D1`Yfj#z97^fRmK4Q)jIs_|ll1L?bZNhy>r9!2#HioT%NEt7aU#9`?By zo4>+9s*#IyEcNjOJrAh!QDHoVBRz5w)%ED@`xv&R zeQ)0um-(C9GyG$xevn60G&VcYBCY_w{2>ToAG+wWKRdgj=E z*>l01NpTuZC}~^k#`psgmU7{wIK5Dh+Q?pi@guaQzQoucn%ssz&xPLmvEO}oAv4+$ zwJIcS{8E);X^aVfO02#7pgzWNiB4gx4)e~|x?Ee^QtUk&u36aN9vU|`>GTsg4A7Dq z=e4u9Qi;d7)l%ETwqn){3NI7jy!K}_)_{e%>fKn9vUeDa$21JvU3B(U#%}UL1pNptPQ-IB08Bbl+jt! z#i$o)8H(}kLO2ksZ<)~*{HkRy^R$+P7P_$?hb(&6*>*GTXf0&*RYRVSo@6n&bYv4S z>?cMsM(Ld5IQ}2D-aDS^K7JoBozh9xk-a%qL@18EId(({$Er}_*qe^*O*mwg$X=0b zGO~(7$ezhc_U89G)cyHkhbrgqjPmT?Kgg zT{Ud^LQuaP_>h?LscEXbJjb>!@Qvx#Y-&442&Fquj_|Ylpq3tZ%lwv8C5V=udqX>t ze*W7iXK(3n#A+IkA@Di4(%GPmM_?YJDZ&xGgnF+o3ssr)ijGoQw^xp(1mduF@DlT7 zX4%mh&D_&S(=9gNohsKBNv~D!vxP%0Okc4+SvsxnK@h6Gj42WP85UK%4_0P)Hqn)!(^jqFdOQE>gbc@kUxb(6 z0}r?fnzIIP%mZdNG$@Sy+m*lokeVZ6WO%|LKLa{SmuM zr4GTN9<*^?F^vN2^cUBSFM^)yKd`{#caU~1M!2F z#lABNL-64eIMsE`ob=q; z6GAa(adlK#MMbt4RaYKq;N(<}@>uwv^UBFq^6PU>;49|8=e65;32?m|KiR+H*|SnI zJ=_z3kButwOW;3$6jDvhv=?c+xDZQpA*TGPi1tq+1tS6gn2jTi{ya<6j58)FU1r|n z8EI{Ns*=NN!%9ruD>15f}!NX&ACBA519j z&+IT+OYbai>_=1WZ5Cl*yK$4Z*0^wI;4a>L7&}Jf5ktp?H^55iMy5X~J0)=y<vnU#2nV4OB60sH80rnbz~+Uc7H+C~=41ID1E~#+|$}9$idm7&PFV+>@CQw zrmAS4+AFm>*}Y`(X%z;P&*Nw2)f$?{6iDc(+Tmy>8B9@Arnf(rA)$e=M!kQ}hFkls zd%R|9c+2rhKd2wfTP7lbIA;Z>(qCWz#Bk4mBJrWi>Ug2^@?6c;?sTbQ>?K^&Ufvx0A_ByaM`vJAZdB@*M!tYe<8tOGyWjjWNt7M3xxuqst1$8^O6cP!83&JGNLq76mw$xX#}aOvN;o z5gdv&C908n3FazvLx2$|gfFs{aD!H?axTHJynBdDQ~WiShU~wXQ9$Jvm>34SAT3rh ztH2g#Z9K~+hhhXJ*HWc*_--2*>lsxKT@aN>0OA}r_$@6p>{b00clfSKtCN(BC%jUJ z>u8q}K}HV5zSRLF*=6e*^hKn+^KW zveM)E;YHCEipJPQ|BYoK_-!3EtkF*R(va_;^DToe*g=`m_iv)TcHs8ifAYAi_DS{o zS_Dq+zo;91V1E7HEj7Fo$5!%85-m_``+;T}oR&LvL(fT8Vl;rzFYwo78oCm4A)DS@7!G?!Iq^FqorU4f16HzP(nT(t<*C7 zsjFuk0BrT@Zk3#Az2d$`9)Tj(v2vOsfo-c(_Yt1*?2& z&{AQmjsz7Q&o=S*dW&nXc`++4V!#$C%Q=yP4^@Dr0fOq2V7_4@OrwMSaqr*Yqm4)V z&E=PVO@%66SYYkitDtkQKyGJ6qz+{WMs{%v2uKHz{9sMxgfgDSjo7iTh}S} zC19?AUgHVR(~}R>3;Ahs`}?k@5*DNnPnaD;e{owox9<2XJ{i(HBJ8S5mDr$F3rF~- z^W0Oz`Tc-lt^WXp_+M6}vCpaOqu)Y=_=tiokg=olE}THM_QC0$mDU`v1B%AU?E!~sBTi8E6<#8}jMH8eF6|h7w1RcQQ|jtC>*Seem^?*p2YVAJH4AiwDeq} zY;aYOG*@+=d13J_?kcfg4~6MVL!YXB2AB@_y@Ia^QdVD?^m(SEZc6{X!$7Zj($qwv zS6_Sbt&$-25~0%iP${zh=n`D0@O6>p72W9Cty=|QhTLzdn}29$q4SWWr_Y}QDnK?g zsvY|lm&(A?&$6ZlDw}>ogxP%ai#7s3TxQJkZsU~I#p_8G(_5kf<(hI0>(FQ11ABdQ zxeQ4N+ZbHP(~X~rCUcCT+zPi6bIQ@}()<)q5VAXGQU(9>LXZ3HQn_J)>ZE1N9aVng z3_42f1SulJYEXpWEgy`_?>CplI03i2@r`|woyurj4%4Ar%J9>t{5;qC6!bie7l^IW z`JWEm9C|7Q!WMwtKs6lZ2wltgc=-W?ROsfDjQ&2t$)_E$2y)WbxH)88`v~Ce^=345 zv{r<Zhcnashr2jNJ*IO2)r+br*9KvBf)ZYjO#wLX zw_Esr6i-0C-h)(}{jfyk+N34-=!bVBFAoD=Bf zPmJv^POHST7{tjmh3m!eXLC%)_eD$m#{D9m zM?_&}K$Ew3#0p}}&jJKT7xn_c4bM(17v|yY9TG6_(LhnD98SqUO(hR-q*GResOSA4Dy+X>r>@@>pzwDrfuZU? z4OhxpyML#MP9tpV6-`h|Vpv?GVmbZr_nfKNMsq_NO$B%z`l`mz)b3AueV5gHB^MSx z41QlyVlmqnL$}vp1T~tf=ae(Qqy@$$xw5W=M3gY7ehUmkA5Iv-b=vm~P@DR%RVNjY zoU6Ly9uz#5Sq~xuR+=N)-S)XMi+>P~Sch>QP$tbJ{{kr#<$ihl1&67@*8#zXk5+v6 z*R_G*K{f>V>1g56l9;HPQj1(02qtAM?=h};nuJ=Mz;*Bv7ady3gV`iG6w%tRaIR+m zMV&iPqr7Y8w6|H{lrJF^2SK2T3^m0z(gMk>sHpe;ri_EbJJjMx`)dvWVNt#lKUgk{ zYxI%-_l42GE5=fsJbJShgt1n%s+m8uI4kt37wV zuJ8i{Zkhoe_l3W3Mn7{^^zwm0sh3tq5*k(^@eN5k+LVvF7K_?5^vWQwn4_=ljF@{G z0vo2C2IEO_H&yW3T3fl&>7eBMJFBaXP#)G1whv=2$=g4oB(@V#iw)pWWq<}0=K_c$ zhrr2>V8@Jn;DA$dC~yqlfsU3iXLH_B{zW*%C+sC>#XCblx<~CSl%*k=A@gKjWW(ICs1CA@e zYQb>m43<--HdpXkQd<;LouW+ZI9@AjJS6-WnZ+o^Oe}P2>EHkF%n8$dH!_D);wmPuWFf+ z4Gh#Z)-8!*d4OM>rl z2!O@Gd?h5Wd7jQ|NWFjU&bViAcz8dm#btZhpe~LJSC)Vi7#XTvA0B(!QCVrlD%iu- zys4>OT@-2^s`*nQf*W&7ZsHoI@Px(yZ=cI^?XTbMZ?ELr1Z^!1E@d49BA3z@%44Ga zla_^~W;WR?2tU^1qalhWs`6bZ@Enug*Mt_C`L&#g6egiN*Jj>R_nA#d;M>pDEh^no z>J+{wa=Qm{Biqc~2VoxO`DOls1_92(gX9lPKhn~SB0K+HE49p1Tw^qyx!X+rxKR@E z`38XE*+3Wp4By||^P-W__mCeLXn%e8P&Gx=S%vT66~SY6jIJ;K>lOh(Y}MkPkZ!yU zbkzxv$Y3VPpHJN?w*%d9*nM88B#aIH2hPlVeyqlIu%&UJ&~Qup1Pa1<(~HJ?Lj!8E zP0!BrUK;mnc|8+-*hr(Vao>pV?v68o$hoe2osB*MD~Tuay2y>sdb!MofX}Y|dZoQ4*R-cZ04+;z;{j010)KNi=3dFR1 zKZJ+|MfdVViLy@uJt}Y=Wx|=tt4!8{+McOGms{4stP(=u8}2^~$JNxkEbA*DCNh9@ zBuB59!3Rgi#jRm?UqFOv020kZHecMA@x0iBnvI`Qq&5m*(G}Bf917ArYa-g0+Liop z;S&sngf?{%vXz`D5M{9i^~cIR@m}xX@UWf&)mDHZUKciQFYt28Mr`Avs2>Y`w1a-< z2xD|_MQ}ck!_8g`I`sV$#ZSF52onRoo~YoTNH8MaDcX9#x*{c(k|2T1qc1A50P-Tn zGdD88QqOe)fvtug;LbCxN@&-z%Us1W@(*Ts+?!_)l3V-7YBIfQRbucINjlZ&_zZNR)v|<+}1e|5dY7lG|N1 z+TL<5P%wGS^?WzA11(AxBbgb8qQI2B`Oug!9S^P*S z2lqL8FX^o9({134VDU}~C4G{SjEl@wAoAJsVTt_nON8a! z9l-rQ3oCF;<@=&Bui^O}^W3<`xlZN^4f<(|**}uF$}D)}xg8rCVJ>!6v2$|)r6$+e z&xK?VhA9g>$rEJs{V(5@l*i=8H0bOpoftmXXsb|Ks1e)OL$V|v| zhQ*ZABPI(Y_f{vF(on?60TGWeD2l%8jnCMR8<^#AZUeSv8)pJjJhI zv6g;5+9gcwK8pQYqn}DWvQ&7_`ofz!c$D0MLwULvo?~pG;*|Dz3*$PgK&h?w{mx+S zeZsXJZ%~Z(Z0o5}_E*waHw4e=6EVHFxb2+E1W0i6vl=28w%ZayW;I$_IwAhHeU=fl zm7qTzEK>Ih|As37f~8Z~@zx^+XW#{V5lTh|t%rn4Oh?%e#Cyqx+AwZ1jpW^8Aq&yv znt7)BTD>C>1->Xj+Hon<+Xpcnic6WZLtt7auJQFXUn-ZbGsX{h&n%+tf~CWOQg4vd ze^9z9n*y=I5#!-)@0?1P@>l@EHIW^BS5{PD{uVmEhOK1F6K*nbh5yF=N~`g!{De=Y zbwd6ab*+)F^P1u)b6{O8Td%}n%2q;GfV%(mACpIaY5Dm(K{o)Hw1ghrV5;0>1J$M* z(X>uqU2gCBT>QC^L#52EqP(KJxdPLa0CZ0R5Qkr$9`lMN7iNd`ed=!VXQ4WQ>1tx7 z&Q5G}scya5wtMCm`au6ck)@05{L|-d?3{vp>-scE*lxqgv7F}00D+9Q2T33)hf zWC+>?&9Hux{6kSjsvh%Bm|8=Lt|0#tYrRY+ig)|tZ;R-P)q=#HD+l?))~>_VZx>)g z9@iFLT=J0A6tKD+OHq5Siz`2okF27t+EcO%A6I1p4Mw~IV+ai4#PZ>;S^YEYX^2;4 zUI6J(5<1EkLuuP(w-ho+trxH zy1bkvodW+|&uw>jNuEZ$Bm;Ex-l6&<^?biw1G|rD3&5!3Qrp5^H2hJh#_Qd^%@gJ< zrQ5~T7$_ThJ4M9t60p_yCW^=C^#_CM-1P*VH0Wh{X#9YS`jj^?jy*!giVxJM`DW5lUx?CR93`cm6i{kQ*%NYpog2c!8z@$lYmvK zD`>hGlC}e=p-t*d%xFb*eu4P~IM1H^Elzp4*$8{e<47Nk@LyDC0HDGvE}9n6={< zM+NH^z8KAQ9wk|0<6TrZgm-}6x}j_5BAu*CnfPMjIe>(!On&`5Ug!D#b>+*@`ma(a zhi|Yi8PDWf{s%*=Vl3!kRlNB$6h(GW-DTBxzoN2ygIajm#QW}#g?+weFNqWj?O!mq ze;2&tr8n7-FcZCR$euF5id;mE>i%ZKkhPRf)YLbeb**5U<_)Bq;1qaKHC>x{HhBrC zyqzddulUdaE5@+?mjR?cxLb0w#yvTZ2 zQtd#OhCi;Gj^-^siqNYqZEbI!3>FPTp8yuzgNuxFO&D6RFLs&jw=4Kx*-aZ}LWD9_ zzUHZr7YbfKZ86X&%JU9JNN0MP7$T$yPtSoAQ>p24hXW*0%lKtoKG6Nx$Z(%jw@ zs27subC#~HMwOo|qwjAs<+@Z>M?wK(xO(O8sSe51U2EGx4oc^?w>uN{-q9L3#H05& zt?T-6Xniznm(l)kjh+;dMSnnFa`H#Acic_RlA~90=t7rCQR#SUk}tE3b{X9UMRRVc zp{crK-MMb}W8nurhrGSx9*ooz$AxuN-PTe;;jLruHO1UtguO(dA@GO_T@;NW7l-Y3^!F z<@ya1miM{=Pv@*BvcoknYDib|w2Z z`SuH?n9>>R7UR1do=4}R|5ohpAAfI*ZzPvzy$_NQ;)ZDGU%(8MskeM|{tD|#tcD)}VCbKmkECluKF26C!+zkwCndTF*OAG0T(4a44 z;-vIVnGI%RYnNk3bh=T(=R^$i;?9abuvo%3zVRQ$O*DN6N(^RahG=9lBhN@@`oOSMn} zLpLxhCm+{ASy=S9EEJIZf677t&3s-Seorvh87~Ly>Pcv6^RX`hlL|WaLt!qkl|vag z5vCA>{!v zkFCcb61Mznpho$+O|n(vWea|((P(@*k33OLfeT2)HlAVMZ+ul~-6!bXzo*Cdfdu5k^EN>V01JTahW7+FitwGb7MR^% z`u0TK(w{2VK_E9Lw=deEM;cYoEO`2SXMiRM46Th_X^9_PlE0+{=s5QlYjyvX#(QvL zt7y+Xc1IPQBMnIC4o3Para!^(!QcbgC=;&8i8c@Q47wF}84a~OVjCs!x&f?lw zoH&@?su_-!hfK>~KrmJQ2D*ylhNRQ?c@bW?FSm5{{=BfR8fBc#v;LE;#)V*+pDX0w zojYCo*#1x=ZBPSu$6s;BwyDWE>^%Olq_-Sg&Zj+9PA{i;a^*+%MJh|xxUXlN=P@Tn zP*9K`Jt91F0Gd+wh%J+CMcjWCDAzM~3=6FzphVcE=6?C|l=`QYMY3-^Fvx*3TaHnbZd z-1{-6&j)5bNU1m2A*1$!8F_#zzmT%fp7KW3_yeTEC#rwRaLOx;f@c1uaC24QZPL3e z;)4!efXD}l<5Q~h(t!0yiQPo~FqzS#ma_^K1O#pQV+AjuOIWu(1B-zySH)D-)VlP` zbh&;#JLVK3)y5;HpcnaGf0qQPNPflPLk5cm*#2B?$$vXP`&Q2k8};~6)<;Cv^j#jE7oy8r z!_FY2E1<8rmrw3GY9BQ4+U?itpiOP|=oveh{+26=3J+-;1)x>3Q^Vg5tAlz{1+}pZ2O>qtG)*fJg>sp4NUT z*8r%G!@7m2?)nVW(+#uE4+q%&x~g9OPw7iW&{v?e#bk_!7OoZ9VwsD#s4%#+m{$Rp265*(xB9HO(|h zQx!a~^0>bLa%5oJ>7uZ&Sq-thKYSPURz!OujI^tw$$r=SL6Hw^p{Mpzm+<;VL5Ush z%pQZD)TJdU4l~~qTX@{JF1%-Dt)bSZ;M^5Zp}W-3zfp|n6@`US@(rjaN#iJGfE&Qr zNVrE3ig&jZqaa0qkBLwTZZN?%a(i5Vy9Bt>?3g-^I^Mwiimk+pHTUr1DStK;NC@o+h->^1@+tT+01=+I z0w~Mq-2+qfmpp>V)?cF^&Ox(kf94J`aOLkbrah&kM;0SmdwhK(!`JBF`6_C3I;|lm z*|Xkh=EDeOkI4&^3<96~~kGe`mT9th2u0pfwI*;gZRotmo z=$HvZPlV5|miWq<>F1Z^Y^=aXlCxCPB^3Ehr%DFf$jF?P2CnvX59LV)5GTJ-0@-a0 zcx%f1b+T+xkoschZEJc3IEv_V4ogp&gXg%FpFG)^09(J&C_*%e{*zvqnTMS_r35$<+c0Bai^;l z9Gy`?5>%ng~8t7nu2WmnUk3%S=U9vDbMo#8* z26@l&qu<407kVk5TILl|fFR@i{GrVLV6v>WXnpsoi`t9EUi0Ku>u<2)uuj>K>qaol^TKjzd5q$ZKw9y)ZxqZt}7!I-8{PitxjK67D z?@%2t|CmTNq!kF62-K$!EfC}ftTTzcm?h4)maH&n7+9ZRB@BW&m`*x`z_>ZzV)LK1 zkOKirzkuf+>x8u?1!BfG$a&;(rg{xwh~B;Jyct6rs+~U!e0B_U;-c7Y+T)}ar;M;5 zWoN4Ec&^O|7^*VaCPU$tOPXH=TpgILm3cZ=eTwe$h?3cpk^)i#&=6e19BYq&#yY zyDge3HxVV&3zWd%MVr&Yr4IO)p%n!+`l%K0A!1vl;plrmmiC|BovyS6tH5MqcPXmI zxiDarC4o}tM8^w`CnI{2YtAD7Mz#KO?W(3Cyf>Le5VRB&)BEjkF3@I97%X}uqDMjCwK%)yeHYbI3UVn?_ifRD@ zJugj1fYQw=B5N(!d($Q^Elmj)VVez(r9XZ7-oK>|IwP>K2v@rrsm+q1)|L6HB7igl z=xZlW(SPJ+=Gkd5gv5g%gvIsv6dW|_c!RlexM0GGkuhh~0^0H^A$QB@UurEK%*>ZN zJ{pwTwf|!u_V43K?Tk7Ocg%mHwUbv=3+{ORJ=&Xe;kPYCWR=$FdyYbh9y`y1?Rf+8!b%BJmZ*461AVXQea0ATO z$)q)NVK$_Jcw=bzJxD%N;HMWqa$-!}=g3!D9Koy~%kA5`d=LFZfNT_uqySaFQLgr< zyY=b37;rdfz}wG7vkDwnG+Q6hQ-Ap0B~iD38+js0uc35RK&ii7!uSmjjPVEvz$t~c z;<{GXvkANXC6G3Hl7>hA3fg#vW}YR_^{Vz#t`GxT2H2VK_R$sCUJKT1UDb9eo?CA* zi}DO=INj44O3r<$;uLB*1Kf89-aU*5T^Jzv)z&}a;(nyot~Oq=Ndro3VRNAmzoVlA z9GvAdu)Jiqcq*>u6B}lQ!_4yu2ag!}Q(s8OZPkU1IK`&850lZZ7c39)m)?S@m?r|- zZ@(xGcW(XV@x9zAVV$$P1w5VL)Ul)H%zyMv1T^y|cpu;7S<+)Z3bBkX-FSlF$ylDS zL0aedMmc!L9DkQWr??U4-ca$p(X0=^tdn5J3pg<mh{G(fnvwg#T> z%6~uxw*ky@wu__H{WH3A5tyOrZ?kN5#-cV9nC(X&>z}A7Zd104O{wkC4$QZ)@^>x? zI%v^^L;;ny%nV)Mbr$)Py*a_cL?Or`d59`!t-%tg&inc}W#%exPj0xM0=3syR&mm-RO&?uOWWmgZNFU=p0^_*raw?OCEsRmP zqU}TILx2U^*2{Z3dZyVwYCLXLMOw2Sr#W;RW-`Z`96*VJYLB|0*#L5AeLF$O?ju=G zo15C@`GpMQ&o&Xvwbx>eu{ha4+8T{Msc$^(T_UyGEpmSV?J&^caB@fHf)zKL)0(iM zWiZo-HVJ_N3eJQ`$tjfiIMQm!*Bz#urPCK7bBal;v#^3sM2Z8{Puan=K0aXpn^6BC zid&JXBDR^RH$I)FJKfz(rvXjD&u$I<21iHGfP-fR^^%OX1{f>%q=`VES|gj6(DJc>Ow5WRrUY(0vXxhgTYUz^S#C(E_t1;7Di(HmcrXb4Ts#O&4Pe+Y7fZkAlf{C@kPj#zCRtjPINt{8DNJuPRJJUS@1=>7eu>$S zAn)FkK;}{wB^C?BR-}<`u(3MIlGly0VhY1vJ}6#&>!g=?#F?;!2nv^Z*zb(zZ~CH9 zz~L&weR^TAI9*7Hj_mMnhylOo?zO!PO_j+;FvASuZf(05rp9F@Z$%D>I=-rIfCltk z(PI-hZg+D#Wer%Fb)M~J2LQDAc;Of}wrbXuU~-L-RphF{XWe^haYwe#1?TADdY`o> z)En5^N2`gfpD!*xqy&2rI0OATd|kg+6(^Z{Op=5H8F1BnTAzlb@F@C1Z(rY*HoHEU z)iD8=k4|rTHkza*8~D+kut+MYn2Nyt&*n@&XDLcT2lDm!Y-*I0l$Nsg{%7!71Ir6` zM10Q!&_C{%Q^YgTfmA(>zJ(V~SV4K>%43%NbN_k@MTG`7a@!U<8UgflcA&+6~-lG|+Bh zDfS?VUNALV$Grzbt$+KZNw$)IO;VmNw4=4dycHcb1oUbeGL^bKaw_$X7(hHlhTTzf z6*T9d)8L>R33BVC30M47dbM)=f+c3A=Xv|+?k(-?(a_!MhwpldNihhpFUGU_Rfo=l zPl%T3db!8REQ_M&{jS|U(<^G}gBu3~i^IxR?a|EUj4&}$vbeKvd~;0%IDUst4~-!N zVz0rRFa9mWE=GW)C3&9x!%m)V{$_6&!Pz-nJ^|Gpi|PfxNv1)F{(JR=#?57y#7y} zkiY*%nTUrw#C)t-M%-S)+^`(|tyaW5`Buwb4(tb(bP}n>`j7CtKLi`%s#puB7Az<0 z{hTEDK0RqNL`vDTYy=4V>srGO?%@ULNjFXAB9kP?n#-+`Ndlx?taeJ;A3g7H_??66dIOS1UU4_q=IHf6KzV`I2x-n6(XxqarGA$9K0qrXtxCTptU*5+to72 z95B)3j{nCWN>=_NwpK6^iksf2l*BfI7`UzOW8ot%pSS7IAWVQt?*41aCu;Qa0-@Hu$^(m@&ebP(vW zJEC3a|L-_V&JFt)ON3L2RYQNdanuH6=1mp78}Iq}i0t76@8Ivw&f8IN2P%PS`T3I` z%Pj#gYPv=VbbzZ=)VCkUxtLd{h$&K?;FKU*RydNB`o$#{Ye2Vt7!91YcV)5pA~|$p z+Q?K2zbob5k9iwW;(-VJH-0nqw(ZD!Js{O{|51&k+qhO>xdOy4vY*t+#W#MzhTNS9 zwJ$BEXYrGC$bDko3aCTtw+aaQ%6hoArnazyuGsG)aNBKYCvYx%^rg3=(5rXJA^nd7 zP8)RE9mF(?TrB@zWvQEVu68FFAGvz1>#_^ftLVqgOz(6anr90olKriMg-GtqWlhpK z>k^x^$jUs0cVAp9o~ptXPT1dqg1EK@#qbhXErS7laQLY6hmr!XQfH*5@H#IeJ~d4L z!w7o;GoC!g0r2EJcA%4buk4?pfiSo8VyL*vk;3H}GyK_@18;5Jjjx+N;m20(qURT3 zr+lG-+L=^nFyUZVUAOYubV{6OeJ|2&Q>oMlpDy_l*IS@%Ndr?WAYkC8vNhmrX!N7; zTK8d>cuhCzwIU#g#+9h9DWP;imfDmk{b_6p5CZ}eZWpL+ zU3|}Veekt*HTswYJ)nJouGf!rr;SqBU$ri3^AhZ7cA2zz@Y{zV$+JE{;@fr61s8;* zVR+@c-|qheWAFI5FUMpk{ytUxgTU7H;L)Q;bqXGUU;tGwutD4AJJiHeRbiT1W1UX4 zdw#T#=3&4O7vHPlRF(O;8;WwS;~XQ-HT=M0vB$L1ntA^RfQVlmyeJuwPnFx5Gj4+b zPND?3HqFMfsU}#BIL)@l43nW0f%jLH+TU->zX+ZcSME{by1&y(_B9@J)hY7*?;BG< zQ*LN*qzqg|m`8rx|E8E3zTjV$@uo0E>K!>Op+M&6>j^VWWKiFu0Kwnzrw1-f&I7j* zdZRTyRzSB5MvB(;7r}RBmynI{(Y?a^K>dQjWy&Tg@EbQb7n#xd=tu6d6ulez znEYPMJJq)#tY4;(G#@+P)ry=v20A0kt}#c-=}}jbgLAm6SUEzBrB#`n46TU*xY&oV z8FY8|$E>r6|1oD{CjfV>iBwFd31kiMx-#Fs%`j>38Fuvo-Z3CpfBjcsmgMO3pTspj zRAjmvd6Ha7ubj2{F8C61+(8Ycca5}f`q+P~E&^#Heo@(YX}VTr5{1O0wDXH|HE$8P zQF6oXg=cjLxTnv6H%&l9?JM@E?9*SW1Lt^R({=vBB`ZxQYT#?}zt2@BMDlKJB|Bz@ zg%#xbTowgROvr*iF!938y{?YeyJ)0TEkH} zAJ^Xj@X%_sr$)?b#Dl^2J7xH>mOToKiITxlI%^qt{sPes9&8*xGeX`3Mx6JC?rrPv2_pfA zP!Eh^UI0^_H$clU+yQucV{pg8us5PAT?_93--jFXvnRS&+N9z@V{$cJS~i~|_zAOhWaT4u5<|f&^R=rq+x$4mDu5>T%K;3Q zG5vO{cwu(c7dqqu-4Z{1*>YJi8DM#sqqi4564PHF=ussD8j`h(ag9I7$jF`x*$guY z2xxoWiiU>cPU`V>Ob_wMZx84w{tM$~F&*mX;l49dh!TiBY1L8%88jkIDkPG9XOo}j`jtf*r8Lnyk78MF;%qiy!z`O_eLzgKK=z~v(_IU|#zuXU_> zEjDeC?ayZb+KE`rv0+%j_n)HyO1yNWbLwwyBfIru2LOz5y~XFxW?@0!4T0eT*eFi| zu89R2H-MhRKtdr2BJS((`{h=coE%OsoPLyRrUw8FbYSgEd5+6dktJ@Hm#nS87h&m| zb!c227g`h+EmD-1U!ZT7NniHSdXFM^C3G%Li4_(yGBa6z6jDC!LC*)Jqm3!^nL71R zz?VG?=!%Ge?RJZ;_WPPs1)A~3F-Uk=wQ0n2!as&-&94#wb6lsxJ9yaq0RIt7)|EZN z_R(5W>KzEiGX zep3)$zgxG&JGsgrYXsdXX9=!zm7-f`vsDsit;**OD&q_rvj0}{FmyqQLCMR$4lKMT zNw=>87MoOoTq%`6>#F`)12|rMQwEcryB|*Of)!wc+F;D33%AnmGOUaHt!gJ~0Z5^` z@|T=EKZEh)N_^HHSa0kc*nqZ+2gJY| zK--#O)*P5~|NV6$2Jsx6=n;hd48Swbh!?CjV8Vr+Tz)J5OtUl7ED7BX+nq46V*KgC|8pw=S!X49@vpfxY!~qt(8x91)H=A#&0U;=& zoL>yXjk#!_rHX$p26?|wb&~ovh)-XqQBe@y>_lF!zRS7ks?bA%UeEZugA0pnm@c>Y ziBbSskZ%*R#*k!Tzb^)qN6l7x&qs^AtIoE429k)^c{cPh=)gI_h^nn^tEGRtN zlDS@EZF!3e16`vAiNZ9{EU^hF#jGq`I5FyN>2NeP{PPK&hNWo1PDKZ*;pk>2Z(`)E zMqDGnn2+_Gt~?A%iB3V9F`d@*aLjegLn21W@cZu{zXrZt!wu9Y1mILKvz3Tf0KWxR zzC|rCM=}jWIiGJ{y^8>Ddp5XFJ;|bA1gq4^J@={>3n_XH2Dg4zLynKl;0d~F*f`r? zF^v+Pc}q^-V0xDz=FV``k zj}rnRP=YJIAZ~==#1E{4!rkppbD+aDqk4-5T+DD{Dv+A^RmQSic;o@;e8;(_ z%;D+Zc(#$P6r-=mZ>(L-)mvYVH^3M(o)uoLLCFU->h!$t4i9&`lpqB&n%V6@*r1QS z&m|EPDayL9R%1e>fLn2^tDDO58@QXh_8)&~P+4{WrFrD-Cj*dMfFV`X(TQV!&Ik;E zAJS_haISkJJvbgPI1CKyW6IgH`kmLocgY}`u(+v>Et37oA@wna7dNAS)JjY4e1X6> zx^X6qQjhlqh{hd2V}^5`gwXzMI6iJAY*Xqf`jJ;; zKeuY$QhzFuYA9K^AZ?5h3fRi*k%EP1fGTAho691Zus)~FPAp^2R$LwH0w&DJyTmI* zJ>AclgXlrO?l+y#2;4p-Ku~2Acrs96)V3jwTh#Uc_hsT5r>0(Bj^w__bB;42bN(3Y zPRZ2;IYOI4nJ@iWpLM{|cS8~r@%hb8>mTtCKCjV4u3vz~n!FYqN=!rRE z1UzpO|L)X3)cBJqzX8jmJM8XO`bcuk+CWN;7fzwJ!d`FUZUW_jB8c4-g7$ zmQB7T-tQQ!FBEU)>Cq_jrIJjHTp2kktnEBv1YRG@R20|##ethu*Ji7d!6_m8e@;nI zKoz`%juJ7H`<+Hv_^RxB$!lbN6(4LVPH#{!pck;0R|YOd08SUc>>#Lo1u^k||7DY2 zu`YO?!jgVsLB>J6H)vixis#hH(Z6_`wp=><$t8=3L^68`N3j7(_7OBj8WbYqh@rito4 zaTc~Zc5NJ@L*T0Z#PNjK!}{5$m}luid@a!j<79>tEH3Y)4^kqm}kUAM7UBSh1vZU`5u1 zNe%h!u)u|B-m6n=`5D&OC+Lk=VmD{E4ec!^G^~uW;7!e=4t#VEN5&$e%#ZFug8U#Z zE#Bd5bx$(%^{H5beoMbZp{VRr45Kh}n@1c4oEa}SKRx^S+`h|s(Dc)iWpqN1Xv5(| zGd^sHG$;zYsrbd*>~a`gD1AWHy9*#Yf+z;8iFt@nF9FGafh-o7%lyf@8F?C#?|{_C z3u=E{*c*inUG|f~8#(bff5h5+8$(H&!CF#5i2y6wis*P8pJgJVr*q&X75 zZ|uIo@7BZ{@%1IpNmv3`fv}?wZC~H#9MTh*;92^(aL~wC)~hazYfkEg zNy=#YEFG@<5{voxE!5JoxQSlp)tLv;l}v@G-p^l%XR|L0ob z%mRVmk&)1P4Wu6Imv75jxuY{aMSBktVd)jV%=+u6#`^KW*Vma#MLi|4b!+|UF5aOh za49Gq&l(3bWo=tu=iBP0P2H?vsh+Xd;lfV4U|eHh!2c8t+7R%9&TfzTO{A$i18coP zS`8P#?Wg!+CfNS4q3H%j^_!e6?P4D3@9IVxBH}~Zw$0*=q9%gk1AYi3BV%(8$J-)$cAw2<2ymiILNUQ&yqIa$Z!=H?hFAVN(k*SY|j`E zg(s$AraNK&a+8{DwfiTQ^}T!>o6O5zbILp>PLyvo zk}x7WlA+jf@nB!R>ACx0D;=W5lQ7Mwg2YulIXr|c+w@b4g4?gEFT1-)qqOwobV$Wf zIYk*qcypyaS*g>Ylt(Ui1tY3Cb*8l(x-+>oVXW%f!q^(L#LzY^(s$FnljpO^K0HI} zh;WVaq3BAm)V@@KuQ~K*ccP!Q>)3OAlc4~+o(GTb+DH{P)9o35>S^JgKz**JoS5Gf z)0ecT`!qgIt-Hlc6?(h&lEtTv32Ru|(2JbG&kRb3cDqkU9*dV-@jg;Z3I*=IGvBZ- zQ;i{Us&TtqyN4ah!$ZRs`^I2?*6kZGh!Jb^83nz#E=y>cxy^8Ax%ydSDEnSw>s;t+J(F(xUbi-*%2Dc2H_mWUv%T zSK4)GemO@z!5swYf`ad)S-Ro!jIJ6Y;|gphSp zVJ?~yrav+Ql)#4}&)7;B4uOLqD;>VtzxoNE?hKzEPbJh7GYH57V_I0d!@?J$fdNrV zX*G;tS?l2(iydnH9LI*7n2OZbRjJOHnzfvQ5gynXX$<>8qQQxe1mkr;jLkxR557MR zdAfV>FTxiJK5Rk5oqA@BHqfOc`?pnzZp1s;seG(cq@tQMU2wi*>^f3Akv>Kk;#kz;jXE*(11nOiyg#A|GYr6!+(FMd|6^hC&ZWVH{a+>sg#!CO(gglxf6&V= zn*PdZEB~=PLPONVn?-}h>JZkx?ywxqv)|fH+f1pg{B%VM{YoWHYNOdYQyk3MsMhuS z0pxAXndYOMArtUA+7dh~=TBIlTM&6KrGc)lG!=q*%5_dcE+?~yZOyeG{logO(rFgD zmCN_uk&ba7otVpSgftVf-&m{1RI@Xa(vaEJV6rQI%gGgQ8!ubjB&L;L&_uP$x0g#ZyAtf6P-ksOLvu0Vr+y_ ziKWYe3~&6{{X*g?e)m$VU57o);4%1zp*_b_p>FmlGOoJ!t- zc1mLd3vazAvUFf)%2mjCZ1_IZptm_)7A}HZdMx5eSB_00-2IjfIXsq?W znY@3Ds*4i58PXT*Rwq`(jFpd4T3t7By0n>8XNza}dMBOQyx^qFg>LH9@{@HFPZO8t zk)=zo!*lh_Cc1}@Em{KSc6ev#rmseaEwN_%w%{>4;>Ms`rG8c2_Cku0me=FVeO zqLwvhxIRQM=|<%n*aKRYPPu&-Ann>!indP8dsMF|wAJp7mXo}{O&`8qF}vk(W#vI? zQRS>)XTTgpUESkiD|mb@^!X|9Ht|||G@JaFJ|^<<82rGLYid_iOx=}`YL9a1%NyWR zms5oY`umq^Sd<6#(PFjiS+C=^fS0Mjez-A*G^3ZKP69fx!I2=Tioq9?p4+PQGy0nW z2P2X|TAHipDoZBj7uo@hK$Ixt{Z=Cmf8+JTG!~lM6hwFcU=H`GwtKsG1 zZK{T73^dsgwK68!jt7?3j0;Euh`6k8;&##ZT2<@9<^#OpPp3bTj~w6b2LSGPEAtFb z-gF%Zg3eV?nN%V)w}j05L<5cQ&|A~1GWzK-&_474m#0mT*mUq%9?%C?mD)nj#&=!G z-6+5GP^wQXk($siKbv?}xTO5LgLIA27yTZ{wsb$6FE?Gr&|cC^w4-U+ipjWqAVq0! zE`SfSTrZ<0dY&fNY4xeG>n@QOn9h@rbj(2t(ItIT1L}dscPAZihM`!`{RKs zJ{g1q855Yr<~-jvYg1l()}fWz81YA$)F&9lnsw+~GIK8-o^gO!R$P?x>S|kEd2BW4 zW8aD)<06x)_`&C=0|K)^c}$a=#YY^D6Ly3ym#+J( z?SSmo0V=oTL8aagitpD0xhY$b`uWzDo=hafJ zH?z#OdB(NkRAP&(+9(Me)&DA3N@@?<$yKGY`Td0L#>bk?(gQ+H4IMwV-T0YoLNj>@ zIsOT>;KX45P($@_w(3LV!)VOK!v4PUsOEAlR~)P8njt*%ck8o9Q926r1dN|bz z?vA|@+wFHo?QrPQ@lFnVt5TyY{=cpbIxHlfbv&{{o|Q_E%ZIzSw9KxHo|c60k5ioU zT6JFDc?JS@FF~g&RkQQ zEb@Ka+~5w!9~9VTV=Lh5x96o>G7FzjA&;&`?#%ON8N&Fa`V;CM%E~bFZMZp!!BYRu zGU(ImLxs;SzAH3;Cmw|UGx2~m*au|My7y__Hco2lXW5{B1L<#9*G*3j>_%<3+*j65 z2y@~xHVM*cZ$5t8h05LNSl>d^8udNeYhBk1irO(u94Eezhk(|f-^cgrq@2p$k_1i7 zm-ZuzUMb(pZWtp&)0FzUP?|NI*z?~pjS1}BAJn)m(z7dFkZsh%nNCm>7%w$1?K9Ne zGKF&Z?YyRdoN`yQ9>k?Y|nN=;^*b`># zDrfs4sMWbklFqc}gE#3O)u_|Bmix=@5L^Hp>l7{u`tGx*&ds8qmu#~7+d z*{FR%8=ivJ>%G7PgR)TR4Z&G-#J}1K>*{?FouFyg?lm$ICJUKbayI$lXbtD;yV98sT6HjP=|z1;zUfR;qN;#y?(xFnuNGCwM`$k zl;>N#{roye`G>0Z62}W{hsHsE2v+(pA6Q*2rUM$!8XSCl+8?jH;xH}wJNKDByvRUt zHeA_dzhyZiuh;i>1IlrU9sZNp{3mFrZWvmVmb^EHl^Jp1H0m7vYVPc}r%$EE4#!AH z$X?*h!D`tJBKVk2E07r|mvw@~JC3y<7p8lnS#}^{}6&~1{Q^N>pBX@$}@g`!W`iwb~ z)AtYq+R?t|aMIx+rw3smP;!K;mZ|t)`%Ap#3I$H@Z92abWBLnFr{b4zRIl0H)@b7S z!q59gn|_56omy0-Qg?#&RAvy@q)!qX(|o&ev~Yx*kvHz`SUT8>YtQue z_7MbfCeV>c29aJHuwgnKXw~83@Z9`by5@sP$5z~y8s^*KLqL-5jdZ&zj51yDsV|}h z-}qoC5zPfOKJaH}8`vNU-k=MS!1ZM`1>U8ps#dpmJF zj>SE?b=DCdhX_Bh8hT$Dj-+#-aqY2oZ6AqBoz7xLQ=fGN;oXk+l>Vxza=f>;6$Nod z6Fe_Ef}Vj22^~Fgi05-1ryatGW38`9`PPq}eO%X*7(e!6!-xvbpIdAoz15OQTx#|i zdD{1fdi+-oYunfeP*v!E6Hpeg9e=eGZxBZFTW?RpXv7->1v6cxL%O8NPS=71JV_zr zrWY8#M+n`7T4JZ9r$|-<(>rX$F`rr_gPd*y)lo%lI60}=kC&|TTQ{;ORbN*T?>x5Y z)n_G=MP)ba2vx^ujHroMLQ$G#W)%IEhftw87W{to_B)}U=8sp`T@Tu01dV2z@e>K1 zh8b8Yh|{c47xD0595=~kJSa3K4xOL#Pm-M#msJxc@x#^K}ykKHr9EBXx=tnjl2V_Z%matnKRSZeVEjc)B zti2t@)REw+uz8`v(={2~_H@#O1YX8=y7OfMBcrJzca0rZl(f03U>Oxs80+i-(Uuve`R5RYJ%mzp3_!`ri@zO_3v`EC%7F;lZ#2Wnf*vH z-M=+exUoxehdE;<3Bbn-`+SD9AAna9RgR-&)^WkoLGVdayh8?O7@(+Kz5XbA z_6l;NtEs8!Np)@)Fkh3k`Dg~mYI_HV_Rc0|OfEWskNW^qxfP!F_3QWraR1ayK9QY+ zAOk@Jk8UGZ?wRvpoJ~cX$)nkVInd-WBh>A<_1io2ez2!Bdx4#krAB*(g$zr?<30Fg zAw5+aimsn=ZpJ1-IUmB^WkAHG8WUzt_G0cF(aFyq>QBH;>!IdgeqaX^qs49Q*ETrW z`C(F0kEGV4J|bfzg;;8Fl6M=G@a8K1(9z!&_nsTuXM`%XXpf&Ti>+ zsQ7&KST_FrDoCWyOb>fF&PNYCebyw`c1`FmkZ`%7dp8&8e@KGD`UbcS&rA-)AC4vd^?JIFH26(zatL@v*>{bl-n1+(y6Ylsyo>3VdJ3I*1$q`M;HGAA%Zd>K? z=^-~**Lz;vLq#JzrRxGlLN456e=GmqTJBepeQ(Q@NWjS@vrWNByUvAlw*q)?6!DF2)|WJ<$&VBP;bnWd!)@ zUr`39ida)$SZ3ySr-T>$yUe{5*Vh#vUIM#Svh+Cu9>Uo*iwhGf@X&qlwSQv59NF7< zhkPrWC^jMbj3o@74%Uzk*POB`TQb{9r!$og9YEx0m%>*Yq$jI!kTD6+qK}?}ucpdL z+0?h*FlfZlG^#f&stIC%HWECPl`zI@DGDAqzmv^SLzfgA*-ddCkJ4niHCB!MZ~(N) zl+7TzmrIns|JgL^qlE@Ux#?Mhduxe1HUc8iLF+2+M7t&=PP%L%);_Iw)YYT;Sn$-&aL#A@nzKxZM3;|eo^RH!HU{|h zrI^IJ^`3T3y~^XdK{@A(UF+p1$v+Uz)DBeorB}Kn1C}ZKm;QcxnvIgzs0-fHb9B~_ z^DsP>ddpAY>w^Ug(hKJe&!hSl-Iy>XtMh5ASyXWO6;jeOWc051Y4GZ9RLXuvSkF2G~4i)xc6IZj`<8S!`n`?G^eQG@_kg{JT5pu}uRgJj2UMrr?-9&1D7b5e*!Fxe=tW z<7s*idCZqfayjl8wE}yk4^`O!yE$$YTEl*vinJf$GWxml-6h^*4l9XpdcW=^R++2Y zeQrRiIl%y(9H406PGCkyWP=~eljha zRG-wk*-XDyL^ZfYBqwQi9I-nlCPa6D5FG%8^UQ|6dyVFA&M)UX-(HR5SsHz~5^+W8 zd|cONRVDPaLa+`At4ndPa(O)DW@d}DToK{NIC&Rtqhvku=jgUX(oAZ1^3;%?eylB} zp7#q;OSja<`F`~l|0<@kv*1|&qghJ@S)@>Py8!Od3&{3q)y)2^yVKDDaz;f46rTPV z3!q3=e8kJ2KF9Sfw~U6_diaV0qDUy{(u5c0NZ;Dn4;vCkJa{xO7bde{eo>L$zS`5+ zEbuk*M>RpqJKD*Xga;lz7%JH4$UH5_mY*D|om;4)cgg$fb z`>{&EKl7lh&+BB3W;`IC%$~F}kq=2ey}#6zm_*KD>$y|bUJd}Q2f+J0)MmArIN=hqx%bideQuE0*tDn2*UE1@Qd zinn+?r{~BFfr4Z~CN~LOAj>*l28|_69xOaUw5MW)fQOlq7=B(x{UUlRAg0Jo#LH|_ zY>u+3x-q9_DiQ~eu2}Ysnep4askB+&*pTO~O9`?W#?x-q%GvC8g-LCt=8CVi zKlYYza`0ObS{MBe3B!4=oJYPU(&4viR4=7s#PJ;GiL~TU zo7&HK)9o^YdAViXDxbNxOM-kT!ZF_=IWHy`QUam>{_^-oodusDvggk#=J~1DwLl;> z>sAV@WMmrSp9$950rK!|Z&yI2L@yhmvNBN{O-9B8m(u%vB4STZM1-J6q9Y^d`E_mh zl9R7ZA?fy*UA(0dQYv~h!mD2+KbcF4&?Z=&^63ms37GD#;LCD2qeVE`!lm1Ki|$d;W~M?^{~qQ@ zH^ju0+?<_uE!&!n0(!^#NYb<>Q<+k`J!7{V2JYjZLXZY6v9t&)m52S^)+#(-sc-P0 z_de6Ekh2#OX{6D^F5W;4Ii9Gzb__S&*Vi{#sn#1hvgYa9BuT-YjAz(D zm;_)~EdP36cGIH#Ab7}}8;Pkc=0*V>8}{jn7|$Ar`J;h!&3+0$P4o%LK-AK|pQk5g zc-%G(!Try2r$N7@rJ}_4$d&FO^9@~vv`V(DkwQ`pE|V5>XKoh@X8J7Vr+n$I)8&ZA z5aCyvViJ!5YfdVqG@qo0+1op(b&;n=SG3)z13z7N?<=)bD^3rFoeuSknff(Ll?<;u zK&3jLdt(j3yHH~dLpDI^7p;i&Dnh1&alXeF;#_%)Wa}~mY;j`=uW;Tiz#WPG3%z;) zS@XA}KnBlKk|V$98+1vc?R%HtdPiXw3NF}it{janRVp3{ zN3>gEnD7<*23dKqXm^L_ME?}>VoCpWt~PQeglVb&$7h?G0xH?a>ym?$3IuzfZ-uBB zKeM-KJ>j&1@PiveoI{RPurP8{D(}}t=;x4Q2(qDQpP7m=oZ&qoOb2!v8yqk7E`ar^ zn%sHdMyg~fFJ^kaw3=}y7el-xK9QN}n~KmRkHoLU5te1dJCV?9_Lk6QFij+^gn2HU z6lp_gM5_zZF@6N=aw&z^%&Glg5rszqY8scR$OwZs+DNli$r4^*TiYd}r@A0FT)lQu zHjuVIg@o_zB}tJHASs( ztyrm{yi$X>z7*8Jf*-mE3VvrUG2ot;&8N~1-FyWl5{o% zFP9pY@A2iX=mp*#769-Q=``>Gm3+zNK24bXyV))xxxTR%P4l4L9>n!xmX|wJo0M>Y zvz5!t;UJX8uRCF`;}sG%lQql2wZUX1=0XG&o=D7WGIBLLfIC)U87bZBwChrb>4D%87L?hNfmw$iVibPgwSatji3dCA*U+X57*q%%VySMZnt+ z^@HUO;=f$Rr%`sNMUGBi{X(zB%A%FyXBzLb97}Zgf3CznC--13)G1mS4rAUWxS0JA z-*~JjI|UVDfL$ljR-y{gNCMWWtOpER8y^UroIdE!Xh^uSke)dRLn^nednM>mI{Ti_ zU<{8J!wN^2vAmxgp1U2Ls&27_v#UigXI~?Ake*62JzJmigo$A2{0lwr%;r@io;MT& zk~JiFd}bj#0%~}YZf@jjyr`1JcLJ=mKZR-Kj@4EKeNjan48rrc=(b`FPbSYtw~#n6 z@`S_8dO3!N2IZliukZY~3JG4S(Ia(6yC3zq&|^4AgX$|ub4XaN=h$gzfSKS*$}2B$ zXWSV|tLtzmh`aBY$tndV4McmlJWRFYrL7_~Pee+hin24%C4qM|IkrTgHQ4G4RJmx9+&IOnMJ09vYO3dEzom_tR?w92ukfJ;o#mXI>F$1KUnt-<(N3L@dD9yUFD+@f>xX774@ zX?wcuno&>@rgs0@S(co2Kqse?qn8uL3~Mr(PrCht%(bK9gJ4p|`RDDexuX5~Y=p4A zp?9wlAJxkOLC#Vh6w5VW%<+vcgInyL2j2@NJGbX`3n@ggv^wkjL zu0bC^z9ZhX=QWIP;F4zfd~1tk|HTvBNHCt&;*LIfRIq7PraXK?1oewXC725ld%mpV zDNm+v*}Rdg50ObcAt>ro!61junj4@H6B#75cXoApvg9LCz}@>}a4EfpE%(@ee}n9OJ+y`5dAs zovwUXXLEw7N2E(2`}-qc0;wLSh?-Nj?V8-7?&8QLSOC)1Fx$e_`Gw1-9W2 z%?IW_t=Eei7rDDQi>z^=p2aei7}R%b(pf&1r*RURoh)%+fJ>BuAFLdekx0GKFAotJ z!e6X_KMGcLk4pTdDY`erOs#+B9_pYt+tVFUJTUqH4b%U2aj^h)S;_3m%O1=;#8Q1e zL6aPN2^nHApwcE~iXATB>QhjsUs8?sVh37syB3K<8y`91@%Zz(iw(gaOmTN0qY`!n zOqEuCW-$P+p7`tH2R8zt;5g~;DdwF8PD&VdG9TkgW>F1-K6PKDNmrF>WFqyXGQb&z z@0wy^2zb?LPs;}uFcH&>r;(DwI7kJLWJ`Y?3|{zUiS&e8LL5$>^X-bAX8;J7Kyat_ zI-Ce~S8i^mGWTzupIq~8#9?o*bOUgigLOxnjrVhRpe>3-y6|Bpk@aH6t7Nv8iE7BQ zeJ|YP_mg&y;@T~?Mh{D}WtiG-7(yNdTw{jrqJJP74~|XBQYg+tK^tme56>zGig8>#9FsP}&b} z)PLt36sA*A!SEb(?A5s@YYWwczP!lJd_etx=7XK@2WX%;LI%S)P&IaBq_9aa?G(-p z(b#hNiTyedt6re~^&;Kgq?= zzBO*2(aMJ+kU_k5YKpcY@*^o3jPz>|Alss#zl0pWq@w3bx0m4e_czB1@z7sRvR;dX zf0-(Kw&U;}aLH@(y0|O@vMLov!RqzIrLd=go7>#wW5LSqn9F^}qYq+Nx_96{M>_I5 z!zmxj-n&KuQ#*Z;B7>w@{$Q=8X>U&$?66zqQSgLbS?auWCK-z_w$Wi#JPai%JZbPj zo$mpHD`~I1Q^OcFdCe0-DE4~ZA1E#dB-C+jfS`bA|HwljfEB{OA$?N501*sHNaq}e zU;GEzZE&_Yh7Lj~Z-{^}ah{}h;^v9_mIktbHY)&MYFHVT%gz-(Y9%oGE92$d5DLr( zH?kV*kjx1d6k@gxle+HU1^IsXg_+$mZ*u@U`DO{NsV@92`&AVWhW{yf0uS!8pdeho znb+z}F2t_30jk`Ghfk?2P~JEkuh+tRqNn%fEi09cU+?8YP40IL)I4nj zQBc95#2j?eS#iNgZkKMIe2Lg|u{q^F;f8r$aSwU@1U&gPs=k2tk)8iUb|^jMrV}^D z!4ka|ORCzlv$|S$o%cAm{@_MJvb2W{RzXlt`>N6kFstYHrxorVNUasLdfh$~qAqx; zS^ZJAN_(JmYxcp1gRN*!Pt#xQ*M6*-%3S4^Q?nSM*I@N9qyz=*+SpKyT>J3cLY+J6 zQF2K&kopXxfm%XFM+e6nltyf+XZPJ*y$}O7k@tItiQxBt5*YKLomnO{8%67yg1fBC zGXxklVo=XN`G`o_@#59-cD%t>kqbLA)@+zjSUT5k8jm;vlo<2`&|acX0o`o|R_8=B zzJ1s>*jM?WGpe_pj$=1ZbL_fx3~=lX{apyi@1Dp-`8d6!b!?9+yuK!R6_GHer>$jM z)2!Zv?lxbC&m?5Cp<_ZdG8g2<@?U9xLKqwk7e0vV<+14!E80q{xJs3i#T5}JY7fF~ zKVl>!Rbq6CYD3&%Gnl8NwO!A z8F}j?M2@t+y7}aCScs!Z!^E6}t4J(?Rm;NGoqi3s*Dxul!Anv}B4cX5=K6~tCFAZH z+P?mWk@X!FOjw_IYr86fb9c$fmU$M;YXSo`v`Q+r@{ftxrZ2iW@#GB^tZ)gSc8q5f z;_hC|nGAOrNP$^oTOBNH_jBC%Pem!D(iR>P9^svi`OcaL?k6(Q8qXVQk3FrZZ8Sd7 zfZJ8SBjoleu2h{FeT6_uvI;E$FAvU`(1`6AVnAil{3VGaN@=uY@id9V)>bkr{UME? z>+gW@t-h@bKaasuQA0G&1S0Ty1W^e(M#DGBByGnr;aqsl^;+O33v{q6Uq>He5Io81 zlZwgA+l6Vw1(^{mg+j^~1FIFRr?L0Yn#=i{jI`vy+v=9^@!t{s!w$4a!)ateo!kPP z(DDkPBdR-!PeLXW@0HqUANw%we5?9GHPL_<8kdcxyXj27fZF`;Mbr~bQsf|_u?81%n@<8n71B;yxkIU)ReS7B#SIs?7Y-CR ztOz68{$!8n8=dWG+8rBeUg~tBUOQ5LwA8Iv&ONg8mb>We9o(j@BT{5eW86e?TidXZ z2jJ1EiQ0hjz1iBW^-2;>jYl|}Z}tNTiX>GgtGcB+muT(x zQhhw@o0W@C<1usvgr3+(6s&NG(!(D!%+{(kw3{r6rCf?a*=vp%3OA|rt6#=@t)LY( zV}T#7Qfda!hgNpL%nnCeAKIUmbHr|bj%cY`g552TFFA?5S*YKFEa(d)BT-t>I^hBRvsr0UuAc4LaxI=4Tb=KFjS^lPa*y^wkoZ9fFJcn)NJLeHD8>*FAt z?HuhOTpAU9n{u?E1JDpN4jtrjo@6iBKPWCnAYWw6xjl^4UIvuhkqw*pnRN9e)g&2; z!{sGk(F^V;!U@-!yEM#ayM}NYRg#YJ>3^LC*$LPn*&gDhORV}$xToy&1P=rk z{vNQ@$l({`a_`|vL*%rsp>q43Y`N&b*e+V}Ua8r0c%oAYeTMs-nG2y{5V zxCJ@GwJi9&lKK2b1`#7;WRPs_jc~zh2+5?%U6WE0_Ez)W$FnwBROls=qyt;zbt>E~ z6u?#PAD#pjg)M{_y~|SR=>rLDkD2#+eKAZWqMRPN!M@7mxMee#!n9=rRV}h9Fx{y$ z@4)7!XXM>KqxG_+Aj+RF63P{$BPPC#M*if{%U+*3%hD$nT+y3*<8$E;7fCl1AwbiH zAITNF9? z5?IjUf-$%sY-?h!Tr*2KvgRuATev?B`TzJBaP}y%*WIvD;%Kl1al}Xwp(O@VBADsg z{F+Wo=p4D92>*=?WrwnQ+PuR#+{ff>9}eK`dsDv7!K|()X&U@m`>;Jt$V3r7j|^at z(?tSTOsQ*$oc#-}Lqix6K-T{sJ84Y-RFYo&2AmtnX*@~)gu-|tmqy`&1kQr0eiYZ; z^`C|+bPqH9akENjnP7{)amTeW23{lp+yBqPR@X@@PYoyj&72r>Ka$SV}tUs<8UOcv7T_lCXQk}r@xf*hgDPSc^udQy_>CL07rZ_wm)M@G3``!Q}atJv!^MFUTm@!sJmYJ7r*(SBU3*VqD| zz;$xnlsBWi92R^g$*s}9)v9g+wxfUks73mphk1w@xhy?zI+O|R$Ex`+_r}nSngUiV zAa{I+Bxd-w!Wl_{Auno5#~A4eTf$y^kCM-O4<`C&jw7d3D=SXbyT~afG7O;2NN409 z^IG1J-hSSc-+ZYQ^_U1;nfmY zzosg07VClPm&BzlvM=rubMrj8FxqI6-a`w3^YPiHuP;vmLwXRi-J3}0u7&o zEUSylvllxW)UXOfG;FAJDPp&o6%>6HYa-1x2MrRrogLx4#bR@mIMr zwztuB9sspV`%$ z`|<5WU_6ytsK;8k{)Z)qAvqZk!l=nHh#OPF5b%2WAq3XR6sUAh!C6lMGC?Q`FFWKT zmsx`ukCYCA?j&fnkqO)BW#{`4gxcTP%xekdg9H4wSRu5pgjcP9x*f3ycsF7Z0<#ng zs-6^bh;vcsqwr&WRqmq_0Oxl(((D}u9Cs~!>xl?NU)818XDu|uyL!cq$;ipmqs!xK zdelxxt$_!Gapj1NDEBHZ@>J-G0MnBaY$3vkvDaO)n5%?+Ug`zOuA3?Dwi4r4~greto*Xy z9+Z-bSGu6qIGwjIaYz_zEDsLHsT1$a z{kaM$oP#i1XMNGe^nO>kVqlp0%nuEo7JVzT-XK!PGT0^co1H!lxjdElA0LG&rcV&m z8K-)92ypzDPGnUwGuKJHf5crSTc1X~2sbJ_OBa*`uWP@go8-R6OdKW>5gnd8-BAGX z#E?JL#+V}anI;9>Us!F z6wkV5M_FE}E#$fMYIPi8hWj=SR?Vws1Z4K(fZgSDyqbOvk{Q2S;Xc(-vFNut(9<&& zf{W@~CuzwFsJI~8*><@_m+CqXE7Y#1|7tyYbEbw$VRt9=`)xYMhzo`oWb5OU+XQ8r zFTGqq;zzKp;rZY!S_|rd>+$-+LK&Rb&br!*exy*VKMGZ=sbm~W^7L`4)rQ(Jwe6kI z%Vh6HcdJ`W-`#CG(x3sVIg`f6yz_1(2v1;has&-KXk88B_5~;s0^zAng3&r4^b&tZ z-74lD$NB@0C=$?$uofs}a&3MYF4o@#s1)8~IaC^Ph?{m1mLE8#No(N5%e5&p#%x6$ zD3uL9MkCk~YZZ@r_M~o(Ki4Sdk&SHO^Fs;BG3~3Shc0S8v>&lgWa&Fx!7A3?>TlzJ zSRFLFCPNcpRTo%q172nvO&d4dwxOnC! z>Yg~CE^E9~Xl1~0CG#w(lK`+ekw2H14P`*Ym6i#66z&PRPmw(KoJkw$#w16)Q-$61 zp6Vz0pWQX?4*Vt1I0JZk3-s8!FC1@7b%({aQk%Lu9&oiu$70VW0NO7iP&+RQmhD*M z@T8|SU{xC5N;%Dw9E3_s%5;8M=(1s=!1?z~PO9;hG zttd8g92c_P%KXJQB*yh{qoU9W_9S4Ve=HGJ9i%Se(s4ApS>h+}$+MM9v#kB{9mU<1 z&gkFMc#b<#h<$5fUmS{HvV^StRqJpoGD8jj&A99Db zf>g*jSU2It4PpPf$s(rUP7Z#7;`hKm&1tmMjGvx*IN3R=tzSRjPAEN|BLGX32G+m0 z>l_El7N#1aQ}CSoc$Oc!pfn4~L((&{xs)Hv zSNqiwl{xbYDLw**=-=hfsM z%ROEfCX7rptp6J2(%6*G^R|j6BTkxC=NkbJynYKCj^;gS>izXPGK1Fn`(= zS;hSkLl1mPJ6AjQ*6qG=wK{uNaC%bWb&2Bsx+4#9C{na!%|_tiy;P#62fvLMv1!v{ zVyFlZRZ=qZG@(ueU%wW0CK4#*=M?`Or>wpmyyRS-?47VJZk)1rvc|}bwWqkP1&^a7 z73Rhq3gmsaxwRf!G@99Sf)^PhOEK0=-3U?D7Co_<5Dad4f%@`#zeOwGgUi#1|8bgx zlfvC=PIXwxA7L95?5Y?(wgmvr;pdOix@PZf$s#WC-1mL3n zxAtwuh$sQgXZq5(T+%}s%IFbCa|cIB{?w!LwgyfOGmI(`nLR5=8d;Rbr?6pRf=jjk z`|3xloUj@O8S#$*##s-KbavCv9Nitra?Hq!%Uprd4#0+1L z4+HHLmnw^pqu5OD@r~8!R~GazGU7vf$?+jrc%V&exrygQXna=`p6 z5RepnzW)>*ma!-jH24hL+XH*>r13zS|7kx89c-7zxA9UPbUy2_7qzl_E0bXlze-km zqDBz8>^kscyuDB0-gL@f;8>R^-q$RBhokF8GV^L|K7+!So1+EE@U&Hk?%y>Pc134S zI9e%FwvEd$xx5&FP|*^}PR78q2a?p(c1z{YNY=kC+G?K_g~-R1a;6+*FHVdaU#Gv{;Qq_lk+7rPzJ z+55lHoC(Y|U@tcZX9q9JFxmjlYwAWN-yB2%4R|-YQTy!y%l^Iy`97KuM*&pTR13!BnmvpKJ&;r=%p$Du|At3laOFY>>^ z_-LrN2g+Xbb3~acY5ybmtFW~Hl}5qF zPaReQPjlkpBhTt)fX#9wh{~`*syeDm1>pGqS~JB*`O>zjLP`xir-U#zwJcR@)&xqw z2vdm-bg!lqqMO4N+qh@NH?&j9j5HTTimW*|+RUT-e6R;_zj$OG7x)(6mPZPQL(C=L zq)6}(t@$4~Wu0q}s2R66chg|U#OxAQF+*kQyQ*Tpn%KSwB zmRZvcqNfP$j{8oaJJ|0&z1r8>ARIq_Q5Z7LMvJXKKz(xzikrPH{8UW*8RYP5A$&wW z)s607bmQM-q8CYgBQ6lY?gaJT|KL&y!@kplwuwvyB(D)aO%xtt%lA94nrL7MT*?xY zurg12{CQKA+ZSfmPsM&$UuAa5{aBY3<=u47s|TPGImh?qr|D6KXhW2}1I)*D%J;T3 zGPUF|Ex|5*pHk8s%$%-=t*K_Nd-c8u`#^Wbl9X6q=GOz|Z;$M`)MZoAP&4u`nh#q) zJ(7sLk^E6Yn_^;%Em%gH?0haVwyuWS3(0nw@EyV{5Ki0O&o$?N-Caj5PDe6!(uNMe+KDO040`&qe|dO^xp3$n-WI~f7<#@^!@Ikxs5?mPZOQ)bpukC{ zIlXQ-q>WH!*WFF^e>l{iKH7T4J0yeVT8Tc&ouX!1@wcthP2pF%1THD{O2v$Wg@K)d zz?N@sY%ILPlpb!yqSyR6JPzfKo6fa8q|^4zP$pVRYR{sj%yL>+Y_iBLF?eM z8J*qtkTnbHCDS*Tz;#L22Xy=EC5Rpv8w;SDNHWq9M1qqCgua5g#{3Wj=NKb9P zJo%C20fxdJ+3S^%q?#AX@@0OO$>8107Ozjx%hLO9WV^E44dRcCx4uEL(Hj~RJEf;= zSeOtPdX~6}lsD4Gwtz}KB0GM7inpj>hP;_NapTq1G(uI8H}?1@KM+n2>W~<;(d&|3 zyC!6Zt1kCBYo~6n>hNLYd43vD0MrfR6wK=P)sJ-)=+BBUHdcB(%qoVs%z{b9zd}FA z;FwgE{H=XLS53dLa4i5{rCgKCg-8&2*p0leS`(a2ZYvDc8qXouOFs;3tpr?9!F`H$ z!xzD<7!EEXhoN|B3`u&O!_kmF>3(^BJh`HLY0bG1*Xe4paC3R6&k8RsXW;jz7bSG?vW)?-K=$&HhuoS zx>ob=uUmx}{&D}ib!cF72bJP2G})j|)=yMD%|M%R|BFgNLnN;&?)FmyyhY({Ey7$m zlV%0_%P!MYkNj6SlnA^$7fA@@bd^v?VB&(IR!_5b(U{kTzQP&wa?z?|IfLt z-F6@D!+8s7RkOyJV~kJw*ZVh$Oy0JSkF{sDo2+z-1iYopa(UtA^arTG1IiV($1prDQE=Pk)&xALu+8?TFkLkU>zh&y(pxS}$3@i$04Z4B5KX)iI&u$pxyd$&1axkWE z=UanF2MP&$34e7HCw4;lg#^^Y?q!f+``W6{QoDEr2vHIOWp0O7oZHd68*sh4Qd^-V zeyxcp=j9z62A1;E3k-SjAa+DjOSZRNJ7nc(Q@PDAW$$-MsJV9h&P6wMUNyFV{8LPQ zS0X4G#{K&$E4aZ0*F#mdGaYvw!_w3wo~aHG(2WEw_7Il2t0hkpjcr&Fhc5Og@MPG* zA1A3m$1Nsz%h2(rl-;YSIAFW5iY@m_JXIRn@y++biwCP51PtdEvp8K&gCCZ{4rW=K zwrRWr?7?}hX0mtxtbYIix;?r7;f0P|OyHJBW#Q;wubDktO!)UFH87VKPNzKZs?aY`#0Yf9 zHtXHsvgBOhqUmsvypPxB85G^dkuGIfVV%(#0}Rw-u{Z1js%Z600tpjnUwKbuS z^Jw)*dDz)p&+SOIxY{}H&2EnJZSeGu>CfIBW&i|BlT!+Miu#1YQ8LkfMFld{T%sNL$V~r^WWoO2Zx#pq_M%?#%NRTUpyuSn-Z)Hx2!)(x%`2Eg7iAKUJ(T z{#2}M<*7oN{Ul>Vzir8*qya&ZnLg*G*4A}acxeGM6?0*BC8~j%^?)D|UF7kTNKz%>1i)3^+3zWu{J{~UP z`duVhs`5HBh8~;QqGCK;3*j@jms@q+?rk{cLa-N zd1DlMS>3y(558e=vwCgE;<+K&2Nh?apQ3{IF!ZA~-E8YX+J5zFa5`q^_vO)tJ}MC1}4HYz%I>o6D$ z6C?m1F-^FE=mX!Y$OZC9gIaaHixoVwH&C?7W0$0?e1xG=K@HVBLJgKI7Sba6Q{q58 z?fhD!r0Nx=I7w@Idw|=wA0Qa%sbL=<>esoB5~Td_R3&_Lu6KOgn@V27wU+zklHTsA z(wiTNc0t=1lTM)Gw2MZyDUD>^?InHx;LQ#YlgDBT)_=y`KzV6Fc+PN@xO%o*(eluI zi|iW-o5|9vGCF`kO$c#$HzeiHb93|#djg#_`o+YJn(c^`#p(=ZSP^}|rrDjG$Dndm zZycaN01z9zQdVx(?QfgDhvRe?nBB}7A)ILU%yURS3z)NB6yvf>&W>!n+p@Y?RW=DW zpRYl6Mx}vpapUIA%ysW}tzfs(mK(_&PHprO?LKiU1+0RHy9#3)B3d?3W-dnR|MZ`=&ns-?7Wr#f$ zQ$zJxZ`krfGPq$vo%da8%N9n*np{dvX)nI43?0N&xGy`lpZ}@xOFF1opVHuyp%9!M zKL8DRnY=>d`*{fBt>e@kvsbq1T&?UA)jMZ2smR6m|^R8p}KQ5pd_Z#z6C^2IjE1=aUsd1f6c@1@8zU zokOBorF!5gas$xJT43wRIr~1t$f??@?bj1Q)On5!NHK814w9Zns~_mGok&C4de0Jv-tcvBh|(- z{Nv3th1AFWZ?4L1O7how4jh5p6;6x+ zBTmZ9@a6X{BkuIH_*zR#50!h>G>VTE8kYxv)UeQOs_V3I>d0cP+Pqdq=?mjGI_LN> z&VSZN6)^{4R!Pih1qeOQ|6sxmWbX`y^zD_IWebRq{F_6cvrOuBx4hsix1pM3y22)% z_=T?PcEcbe<{B(7W}U#JS2bav8@TJR6j0mpDJQ>7K%IA-9V*}bt(eOP5WU(fuU449 z5BRWBV%G@U)j&sJnkl@A`Kxmk$d(`LH-Q8i($h z$1O!zLOp(8++h2(wGBmC{Pz#3Xz9!1ny(yb%Sm9QMN&JW2@UkQuN`=O?+HXyKdkWWOwOjhvpM+fn8!2y4QZ*E*{-F_0CacwYY`p1Ok>WjoB zBC>ety1=q&!G=PEudx)DMnev>e8lBA7Y{i|s$z@Vr*s*h^Fs((Ryi0Wr<(2pvUrvi zTQ`MJDBlbD(LI)*qM-DtI(4;511{W8#6E*;C-_qX!x9kn0UD~DSf95Ib49}<^zW(4 zj(_WBW5R#b_=3}ir?=C?n4dC)esD5N`nE5KWEa5BrA1N_0yF|nb0q^?6=a5OR*2{| z>tT>7QH4(5t9SGW6{IMY^VFypyS)njp=kbEZmH}0<04kux~9SK=4rX*gcN-uJB+q@ zKtid>-+m;Fc7k-_Gx2m{QZ!MtPZp!KfcA{~CfK3K+k52~0l1kHvnx^)~eXga{mY5f8e z!52eiu*(M-TNT}vdBMjVxlD%X8%f4{J#s=}S;GsIWhaAbtpz>BK ziycl-Rn+)aUhRWO*5CauK5t0)BNRJFs24A+`6V@9CGTzD!~BNs>6^aws1Akn;}#05 zQK&H~08!TV*Bo6JL49FP0$I1?wUc}H)rQ{fDgbBYL z0NiV=o4qA&;yTN&I*D%hnN4oL_|-|@W3*-L+ z2xmV5!a$rMq!;-e-^@^bcFWgSUe2$Ti+}d5j`F)Q>X`d8+4B&E@lT%A8~hG^21T{O zK7oK>L+Kfxl;KZ1@&$m1N>S@1Sl-fp7?HeinIAX_?|Av{(aNavvnE-&_^&8-mGkhf zwl+9X#N-#G`wHkjY_qXyq!1ooxQeXb&9<$L`imU2ykn6B=DaiSqCw$et0?sXmIfoi-Sglf>YLH-Z}S*xloyO> z4A=N2=Gfw@ZKqO|X& z`;rF6HOB=be!#_Hw5ZRX#^*Cz-=sz=&&oA18t5gn%u3PV*y;3%5h<&85vRS;dE7-5 z$%CMq+)oAJv-qDR-3J;83WH%Lo3(oOWt919uMuZaNP5yy#air9M?7XfX+;&)$;R=} z!%kw^5%A<*3WwH)kgr?EmAmvsp@Pv_-!&@nH#04<5k_DW4n0DR%!dcf7FU%M|t z=&~{SHLr4V_^q%W`JY7NiT$wV^q)vskfMJpP_F17GV<7geKAyCSb?vy z{*?DG2=ogt*IEX)8h0CE!jxGt>bQujrGbO}T4gITeVXv4dIMXn+qBCEj1LGy=@bhI zJz%Duq?L?dsw#{th9t5h?w`Y#uKI|{Dt}{_|ILpB^3M^7z62R_cbO_5eG3zSO#YEI zY+zgz`*YQzRv5#ou&D^mJoPi!VcR7;f_6G}k|V~F*`Af%cNo|ZUa@If8Sygx$Zg;Qd&%28kQ*7)I=j(A4qM~YKf=jc z_w7|v7qZXs79Hzxt&KU7TWMW@!CW+&&-x|*yuM)z6Bj~k=YTreQ>c7nhsnuW?58#{LrkV!)Aln$5Zf7yZLo(rXPy%Cj_c$ZJdTPx2^FG` z(`9*OunKG$nQ7Gn(2k*sne*0P>E#HiIG`~bb^A5zh-0n3-F5p}R(1`KFTUBlzg4*k zi@181!Vf460UH!;$K>rR1b7_WwmNr2PcsE0fah&GLi?}s7Qg}!LUhyq7s1sM@heR@ zldztFhzoL+h5OD6fp8P2kGDKt4e^O&f~1ON{Ix0`sk zl?2!B5A@di_+M-V-mb;STw|zt2_QPP@Fu?Q2jZg+Ude4uwamt5B$*C!2OHDuL*@S(eT?pU0ntZl)k|T$EfPKP$gc*2 zdfQb^P`RRA(|)x|n7&PKAHe}=lrmayj^W46Qlc`=$xkbha6nDx3G-eWb|sS?D8=V< zc&S34;w$ShDf(RCi8|$)`C)3H4kVB=vCvgWnb2ud;*;c!$x8U{C35y@gWisWV?heK z*XX6~DP8dU>ll(zU;x{-^V9GN^U)3~8@h;2vreM5I*R4u^?E%P&;EC(iqf&Jwv?@M zH}B;;njzmt*`<1R08S+!J~$u_L8X zP@HMGI=E9{m0*J8z5>XXDbUIqxbQK)4%HuwEQP2^VT|OgPD6{LcNwiCfY+3CBpG>i zJ>b5ZSY(I(!8NkF?(-t^Y}hnpD$7Eilx|b`#pOq%k;}$Ywb|Fr4{X%{hGIjkVd7GI zrwMTT zj3LbI&OYaK<~SZcp1Ed-pD%hzi;MV`-524#sc%%7mhf3>4VLN}+dwK~EiuflTS!$B zzwng%Z^XV8)JR8?qhHCO->B{eH<26|#OEwGel!{RO&?O=(9nI>mtGU1N0xO^l@tPg zmUOgl^R0)zk_{IuvUferJ`HB7n8+ZKD%X%9kYS}M3ct0&!5 z1d~|gtZ3B25SH~#>MsI0)&JufcKC>RMShz3C|I^yArM5>LGr&l5@f*u0TvueWjNR? zbG6-p%8SQTlyU0;6r%77##-}^rH%_J0M9`5H<9eW+Tb^sjlLlX$~uvlZ}av6tM+dX_uhW1@5>;s?ONTT%OOr}AWvYep8XkPb~;rY_;VgjeAY zlW~Ug9Y|3K*in2voP?lA8Ar~FFnZx0?Bgj8?8l-r-vKh+Uymzr@NccO&ljZ0VYO3_ zboz^YE(1vKzje6TJEF~Ep9Ut)Q>fjz{E$PPXNeJv%gM7E1l`#KJm(0Yf|V1^EsLmC zZLXXczyRnech1Fu2f5|4(%`~)xVh!Tn`e;bY)ne0>736>GOT;QK=JuyA$Vp*q|F28 z%grV>05sN7{|S(6RK@mlwclBGQ6{^8d!_gx9xHSg;^n*E7pp%L!oYbxJ$u*THw}91+@ z0F%Z2gRI&8YX)W9u(2_Z#Bw*4z^!07;G44yRv9|oP4lV8zIu z1j6-83lPksOaac*v(JoT4s%ZUV!r+SWDc~w$r)Gv7%ujP?EPQAkt=pR00^1@anjh& zOiw0Yep-Je@tlfTn36$%!V#zB0(mb$%lRahV=bXxk_JqW>K~dA_u#mx$SZQ_pBbB& zM=$;Dd}Qc)F9F_U2hN`;FM@@QFv_pzU!&symhXwPITmu#47_|hxWC@W|Epm^0Gcq_ zUZYq+$PIl|@&~r;;5an|nBtR!C{ZQ)5Fn8T)BQ;*iA~H| z$zMkO2?&94_Syhb1e6%@xj5?{K;W=~^v|fDHrGGnJW2Yij*x36daVPToVvfDP`g|) zitiW%e{B!NtUF_1ihxY+|BGQAPO-zrLR*TW3#oNUpB^dq|ErmLmEZ*^B0r;9Z@2w_ z*dS#_i1e2*vA+CLCF($CFZ1I6v{5brlGD@C{%6ZMApq9kzb-5Aksq{w@5=xCk^i~f z|L=SG@Au0X_DJ^>DL-u)PZj^{2UvLc)+Nn*uqf%(&B+b>)SSRGpM)0~=knfZl)8T( z?_!?1uL{@Ohgl~LEW0nP(v>(6j{<9VZiPs7uV#yRhTrV&M}BGTM;vdlo(gmL%M$@~ ze6-5DoU#N)Ed+^hz+{vw?)Vy5Q5lSUf2;Og1aD|BBBUi33^%zIc}+Vhpzi!3%G&f#+)Oi!(viKTkKi9I*x25=#!+Cy+>|$feG=bvb9OM^5I@lGV$Z>p# zyzSAsjd6cSoSZO*?}Iz{R`?KOoVe!0;{vYCj^C&;B&FGX)7mczdF`|c14{$XS??+p zZsNATeZ?PZxboZi5~Ibve;rX4bQKJho74q^5_$i&m*od~f&87r&MUxun^ure<|+@- zDllB8*yEo`hINMG8a&La$FrO923+p@iN&;I=y}mQ*g`uuJqfA@ zg?GSQ-$@v`?zW>@iWn6{@K@m){`QOAIV{>H4pTU9)a&tMV)j_n>Q82c2Wu#c#;X`< z`l|05{$9u5A~}SXS3c~_$rb%&`{NLO)E^RmolMoxs6H4MEFx6lO7Gvt`YSD|j`7rC z@3;_reW^Rx)l|>IZ1$1#XCWbcNpH=LJ4Kxm*pcAemWfW-T%{@)DT-iA_x?EX`HI2uRlf#G zK@^4Adc%cP|DOpQ2pu*>G<$i5oI?)+xeK13P~6}JPX(RQ&`f|Hs8VKJo35~a6y0c8 zC!sVWqxvhb1$7F9?owq`7!M=lh>^ya)4pQHV{Q(}B9u!wp2VX{S3=D#Y)TtWrv)FK z7l;Q*+H!ULhU|JAiH^aj>?kZE?p6|9I@w)awjxHa3U9ng_!DYsk;58aTYO))A)*NUVFb1rOnwYP^m*#ZiI-xA6OmzeKh* z!<(wu49%tfCV{{{`TV=}pYSO~p$dcZQ{W@XLf8J>h1>3(5UV_@&+5_2Hh2P3?{Mku zdc}%VvGK^cN^Ylb$Ff#~J8dq#?5m7-hWZ?CJN9f13C-pf=`{Ca>-31>JN;fu`Z>>M z-h7_=4c8rf(Czy0l0%Lb^+bc~61D9^gX0x{PpnYk@hExf{qh0?0K%SAxkugTOw(N{ zxoS=a*wj02^26Jy%X^d8SC;+MVJVz1n=OoM=GnQp%@Fq#%lTAvF(H~j@1AU%`U=h) z-DD-T!_M?qXnS^REB|=*g*LPS!VJ5H$0LaxUe!z7K_*Ls8m20-{P8+ZcinxPgXM!q zk`*(h;)1)_pwOl-odfooR`1NEJ-;a9t%-%aW`vh5CF-gv+tBaztH9?ohRN;M2^c_l zXouqE@^$m^g#R-s#y|x&S06n4K5|ARaj_WZc?tYOn5`ORuexg(Z?9{WuR3ZE6M5}Q zY@DGbrvlJ;c|&G91)K4H%X%w&$3d}tbfg;HaJm(jDk@mC{HvE2_p*6f=%e+XS5ugG zbrwKPzcG;d__SN}-alvHH2?9UW&Pd7miOrS1HK1BjP0^S-cOr9!K7U^M*KQcpYB}l zhwEKMjD1yIpOdN?EEt6J6{_1Kfs_-T?q=5@NW@)`Ho$9`Z>F{j?;zVL%P~#u?u9IT z3Ay^b^q9RQICIenh1jljYfn2I5gjBT+1^$Ea?3u#xu8Uv8{Ry55v|bfsYI+2&Y&_Ds56 zQbbV6aLzB5n)vg&3p#z6SXS-wsCU5qV%H6tzB3N{%$!Xt_*#A`)Zxl~U4Na$kWp_| z6V1IawwWMy>6_I11+e8ovHgqQ8?u`1v|K5U-WO20j*-(vZhgdJhDl7ip(JJUJw%`8 zrpnd(7>=A3g+LwF7TZc!Mu-B8L#E&4;-UF;;t{#h5oM=$`VOVzL;AAN-nU z2*$kY4fi@RJ-}Rv@Kp1YJmM7n0ygGaB3g?9FBzmMdZgSi)ao$BZ0cbZC-nHZ{bn3c zKa1a@D!h=tNy6J~>FR~{nsjLa;&h^M#Wx;R>vDzgNh9hxKGEk;E8ySqobTu#5HsVZ zt_d?Vuy^ZTZOX_#V4?fm7wNUXEI6jL0^xyQ?s_PHP+7h8p&qd&`*c@ywjR=44R7U8 z@OUw^q#K-cVq<4Q!L4CYJPP}_3qS^3K;>O^fp4F#6|wbXZV)8vJDrto84K^n(M;wPJZZ$;m zdkZzkL43>W;CfChHf0#KL*DyxRczSWI1WiM!EZsIaA0n;^r|GMT{O?0vFoWK?!he~ zDfpa{quDV%bi_^kcqfZdUHEq@IZ3>Cq|7RW3O~`-Uo|2%gK^1J;}W@KL5(jHch5jr z$jy5XCfrdk7}nfUe3ruAgO0I&-_lyoR79l_;I_H#vo9On&{|?7gUNrFe)5I$f-;t4 zAoGSpamBrxlDv|pAMmAqOCgl1V#$R9a#P{!f&>qw69wg13w3@PL4~VM8p;H zG|8AwD-KG^>P)?Pdgzuc)+{9Jt1`%K+#gP4{t5$cXaTIUosJV95@g-d;8AqcaxLI_ zz2T|V|Gd~EXyHC&rdqan5pLL`o8owUiyL=BwVoznThaC1#WtzOwyn1e-REHt^bn@Gd^M`Y__Z=BZbplgAv7Bu34Jl|ENEn* zJ5fdbx8j=N-jsI@&|SgNZM%|c>prGeZfa{4AFq_XIf|nl{*In7gQMc7zJ`?_eEP5l zwESj`!lN22-cZ&k2kZ>yMY^fSXA04O26B% zk9@OmFSB)4=_G*1{5RO}DTLnJ>eq=Xz8vBqs%H3)$Y>hDwQY<~me-h4#}+)W7lRYT zb7?z|NU|;$Lh&(6BASNW(JQJmX*2>yVxksLIaYo-C&X%$=2Y(Ss5~Q+3CKApVbD-OpCt7v&y6Ya4&O@ z>{pw^v~aW>Ukv^O7x^zU1TfOw3A#7nwO&Cjpf{#Qw8INX@xSkjS;Uy$M=|O1aF&Gp z6g~_~Pgy`Zjp`JO2gs{p-oy$EnpWHqF_O%FeT&7NDTG^X<;VXU-{a#pW=>-*e2H)N zR1uu zXEq#Kxi4<_JI&*2X#rH{YRQ9(?oIlZ<+jCR2w ziM2mlW}w_znXufj`MY#cXig^8Z0HhmY1oa;W(q{WOMw>e%E(kcE&=bz=pWGP2YH`v zQRN+uJH=r-SZbI7Jcd_$Y3Lj{iikvtkaP9tvDIQ{rQuy=!blpVB_&otIlUNG1y{s@ z@4HS@2udb{blOLNMwWuRk2Te(aUt@HOh>nM?{hEJ5L{{gfJuANEh`wD4xnNzS-90y za9~82wQ!)n@;1gus0Ndo~Ouw@EJ%+G{2m-VN%!$*tUUW*?IK>cKBJ46>-(N`BGul!gm zfWrq{E&li=srQcP;nXmSriBGDdGHT*KF zA|WCaN;hG5z7rSn0gN$1(S~O^_q0+K<`|v&Y~{{!&?MsH(3w1jUC_jU+-$_2;6`DxL1AcI@LvO zbT|*r*ds@i<#2i5GRWQ92jIE@a+-HG5zwl>s2OIlVzwHJh4s;)=_Z$YB z<#CRmu(ZkrUkeXqaGNcGGHwBSp>&yY6umCB1Zxmd#%TbkYq7l|^08yG-f%e20m1Rg z_8BGpLTJ&A9m~nwke#_0A7&+zRX}SC^@Mlr39Bsm!8OjFI;#rg5ZyhpJ$WDRsO(d! z@1?(8_3)YuLKMNi8Qc=clSfA4H!RHpXVJk@=KxafyTN_WyXuB~S;7%8r=^m?Uq3-vzm7|)fIbxv$ zWF?blVrpp%y{1|5eetKxclJYg8EXEplDfD3nb;gqxOe|}SYJ(VYC zVp*rZRe~Mb?QPvT&kP=}7e?^p)PspVXg1ecdwXzA)1OZzq>?cpmTyCIwp4O98r3(u zsd}L|qUN?lntqwjHSP6VcWY(kNT?7*t?c_5O?*HLM!7doV-mEF;`3th5$W;_;R*&p zpn}hwE`3I79%qgOq$uSCx}H}}ILmuT?iMBOpdvDh&i@;{wpI9iUBuX@>%bD1|Mesz zb}9f&X8UaMaVOQ@lPSA$)Ocv`8LOt7s-*{tAqqMM%wo5y)N{uD6|A|?6$DnORL-+F zXXdUe?%iihf8p~f3a1!XG|rL5;lMrZR0eF(y)oslCCw4{e&oH}ND0Q^7&I+NFVMS5 z`VxD!;d6q{cQAwNsaGSFZ8$)?5N#d$b%UrS4DW-nS&bG-@W}6jS@s_5i_~Ugv)bp8 z>t*CAOAIN#%C%AFHC$BXN_tK+^w#!vSBH1@Kk2?7iKXR?KC^`2WC`TiDYVrb@m1w% zu?y)P@@iLzzG-bhfaqB|xk;gn-P*fqwmdp-YXyC$pCKzx{DtDv3^Q^9V)k3RMmy`t zs6nk({TDJfo#94twrw3&T%v*(WKylr6O%3V)+~= zjtk@@XuC(taTWQD=)c%nm=fxFZ*1ZQ{VU7?L8h7vkMQ#H0Z&*_t<7VP)DTIAcldT) zKBU<2XsSQ4aIOblYAWG`Jw!aYvMgVc>JGsP(h5+oEwyhRnbpaYia~}T6 zgo&%HuCq|7MeI{N<4Nppf_Y+|Xc!{-Ie|*B%=J{On1p6){I1edw0 zk0_;FQa4kw+tvW4x0C6^i^VqHig%Lpkj7yi8-+3sZ>2O!%J;I_&n{|!lp(rnE30>2 z!fu3WLUn0;H=O9Otkns;jUVXka>gn%YRv|FA^4jNvPC5rw+WdMDE7NiWIg1)()-~W zLIutO`taPQPxnB3mpT_)S8mt?GKNKfveut6&QlV}CbUoKj(+n-O$1tuN$7xMal9M5u!o52WnLGr=`$ZbG;VLHp(=_3|$bko%;vQoutRF&FAIA;KwDYD=nJVbFP4sr-N?+ zAWJj=LjfU;*-$Le@?OqD#a?Jx&b4z+RnA2iF&TU1Xk`N(UxgvGv!yAF18zHQsoE`n z)#h4#<7Vpd)vfmLduw|Ci8<-i)o_U{Mo}jn$-z${Z zUD+HyTRW}1(?kZNjJ=W2d~760`P#9o12-VFEShh^o5wW*4&>~5%$5?d-hIq{&!!RJ zeU=kX1OtH+h_U)!${`iW)^7rY9bU9^`AG!>?nc(s+lhA-HjEQi`QU$ZCimw0D)xjK z8OK8w7!4r83`s%ryM;S_{6Zt`#g)EYx`IRYw=td&EWFQ6Ojm{;l z=_I1I(vagP2j^}yyRgUoZQps7UE+jc(;0zLUFn|i;o3bWsMU@(p<#?D;u%@_4*Mt< z^*cw1=`;SW2KEG{8s=@RhXT{-u9c0-fWo=0fpPfVm|nO{@4Zm1njI~m*ec2Pr$QcY z{P~X|iGwWWbQ4ZJ`z{Qu&U<&9cN<7{ky9BmlrdIeH?Qv$M}E@*IX9cqV2A>_&rZWRh5vH>)lMQG3L&|x9cM$bR!=@hhP?#>RVjd7@3ljl`FOA2%xzfGUOo+i`h^=Q$!(!iX_MX_NWk>j?!-_%LS&KP0^BwxtbEyNHan&37)W z=j1oL(R7*;z^0r*nRPu(>i2VA;-82+oOJV9HI$|@^>fFIB7ECBjL&;pkroqJs^Dup zZ&#rfIxdl@!Y8?&;SwyL85dl6Rdjx+4`eEl@X{!B+DS3zljDg4d6%1?mCogLY9|rT zi@30WX!!Uru4mY=?Mm!zB+x0iygbQ?-5A`!R8uesNqYhN}m(1E&p2{Fe49jAN8P9&;?{LG}9DkcWHDCf5gv zoKLSk@6zlTTc*8Zvm}jA@26D`aI%$94UTKaF_@VS8LB4`2mH{I0QSoM78u`hU9d(T zVt#d!e3G=2=l9C#nMJrCK83VPfV{8-p`GG!_i4RQ8wC8$c@RWbUc zC$j&61KMM#e19|<28p&5GT)UgVvJysR<*=QdB}8jWV!gJGI7YzFNXvaCdR_bs?d(? z-@`hqE0V*i;09DJXtH58+h!v~(=z*Y;A9cjWQ&a+@Z z$_pWj-EMrtBZ;WxMgA$|t5)iqxE07e4e;+pt$=^e1u``OCNJ`4qgNwzE=+f?aZ5%P zwEZuC*l8v3Pm>d((1yzu^ryP>Z?nDFA~A5-^c(xEK2~<)NUi*0{kgF{D1#WHm}~zX zl;L|qIf=*yuAPQ#uBM29GAw}FkIwQiRShWP0X2>$D@I2UvDm5-rnb|LghYMb*|5IT zykd9|nTSY5%?Z@Zzs@XQpryt-UGO*6hm)rAPWi{W&B`nujK#?3PAlp_H++iFxf_=P z(a?!rg9BGP3@B2lRkcjH5KU{2c9@7=67h(JUbVs(=|+5G&!^+{12|qg*<`k2HrON$ zx(OEI4a|#LBPi-hwN)7<7@UfH^mm$%oLCLw5dB}%^7w4M0fup_Jy`2qSeKrp@OYI~ zFe{4Bv1EgjdIpS4Kd(|6IB*Avt-v#-%(0}Yesbgvx`ezcifQ5W55{?wgx3D^nP9)a zNUkrUWu(pd4{DxYju;bp!C!gdZNj*6Y=9PF8M$oNJTl`>Pv04i@W6kz|dV_AqCD*s~6yQsMA2nomt zMh$I0Cim?k@65U|I-yt8#Dw3Jj_YyI6Ja};;hMiSB7BGUOHKk5~i=bCTf<+ZYHq#^R@(w0Gkpwz4&nKTP~`G$sMXEx@?){(6h-RhI$0_Zu~ zDki8tIo^A29tN9OU`d}k?@N8t;5WE8>_W?y3@x~_;f{5U4LQWm9WXS&6dN<)9zH*? z=%z34n@$op`rH~`62mChV9p}>#0UvJFxmx8FLZZd5J|cX5;4R1Ya>J5{4={hRlL($ z8^YIHHOrNq_+?<=0o)8$9UcctdmtvzR&e^aoz87Rp;XwhxnKWe_R$^>Kq0T3+3!#^ zZ2XLcL(4?VaDl^TZ6^MLBOj*)^YpH>nbXXDD=|pNP3Cl&Z(;no5A7T^utM$`=}oUg zT8<)G5BS|InLa9BBQwKJw)mSh33=${sl`*CY(DR?s9yZ-TlnJ<+sJh%ur)VT#4f0Y zE0G2(v7{mE5xlB%7x3F08I53^y~|$sFqbasUE}ITV?qY}CNZ^qqCV*(MuxY{>;hkU;h{_yRpdBeZS(X$I z*ui*amc>*zoqp78=;{kYIfj=G51;!NiFtsqw=DT;xlvq75X%FrFA0|NP7{ZPO^tp- z=(}~W>c($kP-s)axD*Ewl-w$ixxEXuTLm$BB#*x9@jf{0jiQmQWLWwnX6QYjAAOCN`e8^PlK8>W{1 zZ+`WOW-w1oHy-a+{LdS;%%@EXBtn1khY;aqQ+u^j@=dO<>e*B-J{HJxZV|M%t-fx|e{) zEo&^dTMBwGqk}@Nx4z`82c~bxy;ER%xPpnXVmSL=h36#)pp=($Xp#*rTJk`HsA@eR z&<8w2?+J(eJ}U48m-gK5n^C->3$zc<>GYlS?nUK7bh#)hiAIR#&9I`id7+EX;fnpX zeexk&i6@$UQqXWzy;C?PTO5(`S&PTKXk-6`|{xurd#h1)2 zTnq~sXWrDrQR;|$F=b2_)J4T)7dJxVRdoF^XJtMK2154NH0z0zZ98-7rWe_AsdIG+ zDn=ktfF$!X4W`*yh=z3a=1d@!FONAm8LHW%A*?oj?qs@4d&-je?iw;kx@P1w171)>}CQ3*n`p%Z3lty+u zr?AcZCla&vqY5Zv)MV4LO6;R(4iwUzy8?{)VZn7>@W-_`HQkTCrtcVP<{~vZ6z+(f z%O{nTJqgQ1S6Th#y3rX>yC_cH-X;VMp?mXzSHgC>%a^iHVuduBK7i_204Kbh-E*R< z9196HcR@^M#xI!)TC8S{t0koK{?4F&szBQ=&C=n;ba}A+69vaD;f<%QZ0#QE2k$0Ne!9=rq^SI`i!x zDSTEaLjcQOv#1}AX+feZrovqZ5c7QJe>-f_tzAKAPNXx>_0@)-^h?A5C`+=a^vZPu zHyKP+l$=**s--hSsK3!|0^A5QB>nISbSxWYzp_GZqqmfac~x$N#cH)coE5j=<62Y;YsPr zQ<2OLed@fpefiy@x?R1K`ncUD&6u>?DWoav4=-}eU#F~sZp%$O0!?SKX*66&!;Nlj zLaITqWz?{--o7J&ol^S^2%NlCs8aZ15!dTt18qsRqGvn7*B)GnvnWVS-&~gY*FD~{ z50Om(`~Dw8?F1O=+UHSz%)bw9xpbw7z&HzNYJFt_y@#&86Gk>C>&<*n+PY<6Bp{{f zqF_XtV)qVDJL@y&7DOZ($6VU!j@+{Kn7Ab>z5f2Ml|BPjnsm*A^Epo`tuW<21zJO1 zT)Bfi9X7}9C;oM+mF%qo3WNxDJPDa}4k;9YHzbVRWIIO^(Usgy22v#?F#IBxXoaPLd+~nX zXMYsl$o!wDc`6a8TqZ>W8X;=AhRe%38ZTik6z`KEuH=W+_Xm$ATh}GfF|09pj>edV zT5q$nhlkuPQ>w$2mk(Sngx*>$k&hqYI6r+X3*A$(#|D^1KtLCF4%}{m!;)p1MHUZP zWj(;Rp*;@zf%N4T?`Su1Xt>_o^sC`f8M817D9RcwPe5E$^Rz90JBIY$3tKjE4Yc}m z2e8!nqAB&NXPau5t}OhB@^f8-7;0s>aEtqq1iM~c+q;*?Z%*JcQ|H^Ns{+g22IYQI zCuxI&gfq#?4K^}SnN1(qiJD!pBFPPpR<8N9_$vumJe0*v^{|SEBWqpH$!ztiVtPNH zNybklXaGEUJDjHi^i(D$ZNC-neYd4{)4_;UDXMRzA6=oN_dXtlF^K>_f&q-_^o{^c)P0t`s6ft~pxgBb4 zNRwT?xu**9#Pp|fDgC{dF%Zx46Ry`-75&pIE5DtX9bHre{fC;YI@eQ7azmZv>*hYr zN;gV(Wpq)!A2_{o{i^sdea5sixGWco64QDSp>0_LldG#58NsX%Z(bzn=P1;-4y?;q z(h~x$CjVuqrwW0kP5g2a`NOIPqReMl_T6Waz)Jy_oDC{Gi`lK|62A{LxZ0|~KR2$-XDvvsbAEvB$KBp)vi zOLd$F-hQ;gA0+`D^to(pPIF`j4g6Xt0RMgF$JNhzr;r;{B}#??1hy!S&w*exnhkcJ zV}43N&#T1-F=U##IR0rJ5QO!1sMQ}v{Tg(LpEL|KmU<~%S9FwUcJkft*m>as*6P9* zsacOq?qN?Vrs6W}*sHBf6`phv078_1iXX)7@E{b%8#7T+fhfC!tvsILjz<`j2B2)Q zY=gNCs4~=lOGqMqtTR=QX6rbhi0w_ysD2=sTeK;2k z@21n5zdVUzpVF=mDUxNLq`y7fl<^8iDt^l2PFR_m=Zg*y;2jC+)IW4T?7=0&{w-s1 z%BJ@&ken)h0f-b>an5Miupzu7WCt^4_!`ME@!7lno_x;BQH*Fqkzu0#Z%STETLBC6 z6bs31jw0j)Jba~6_#f?kESF-7TqvbPG#Jh=d4IOLvD#BPj?-EGeB! z2ugReq;z*YvtIXg|K7jg{qj6tIN9BqIcMhRZaCka%4HoqJWAS1s=#oU{J3p96mRZ; zhQZ~u$?y-}nr|B#-D@ivl>OaC?^J%qqHVd7oAk$Its?gzC{r+)(&A?=IZR3->PSzZ z*jfJ5e;?&#TNU~FfYp|zvxVv0tIQ1E36NYq^ArKm1+H?hfg%gwp%wb~S?Ymj!I?q= zYAn)?y~&hzuyCMui<;x0m9q@u>WQuR86b05yLa?Z#|y~I{>PUSD4xF`g*NH*y-QUh@qYz9ckT($O5}`ySp7lByr@CEVD)PH2DG6@9hw`AZZ$pq z)3EtU)M;)ua9`RlUHXyl%{j?=Z{LJ1A>(3Q`1HV%rH_Bt(nWVbLmNEOQ-hwejfX1A zuV+Z}2e|E?oL5ua>Ce1++676gj2oJUWjFVKpoajV3vi>6zOcovdkPvYK3yC-y6g~p zV7kwk8g;!h2ex0(5}AL5irB5*c_Kw9Rb1B~Rh`w$(>9)-@TB+cG961dPh3~R$bX!K zc|)ln6gqE>X`B3uH4aT`JY}>j-`U7n+x@lwWOQPWYx<@N*DB@vUauQzxOTFZmB#g_fDYp@NL z${{}j3&hRu6K{Ci78IARDLzqdlC`|{P5ibn>$y@?^E)!KDBWmAS@vg~q5DR$r!oVa z$44RuJLoKXCH(e_2>r8g_!d#Bp-}bXWFYyb*V@*6Rvz{K+Wk=pDhE;}_=Hi=`i3^s({!C1@5m@Gjcw&~FYB=j%W_}z^?=DC+_bP<$5I_9n;}j;qYlSN}`L4=k-9V5%5T7 zz52w^Jv7s}0~j!)JFwzl(UhM74dq=boRY<-UyXY3|G%nqUBwNm>#u z#CjERI5p0g_y>o>&q84Eiee!}=jBqnhS~AMkp*z%#+pD()(P&UBr2X$xY*b>L-p;_xf&LsFDhf# z`W<@Cb-l)jLMFa)X!|I}@-S4}GRumqL@kEQ9PvzVz8K8ou$>?4j@OVMSv}a%OYzzA z=2l%*V&vR+O2E~h!IWyq&T^Ulki}^Ke!7g|F2n$ouxe($9KC zz`P;anJ-5L`k3HmU3pXeVpWRVT^FD_c<{v>_l;FH{5ml-hP3ZhuE2f9e>2eO zz+;=YmZYD(5z8FZ#nI8=?7abs&#hc$$a8M;PY=6Ddr#cdA!~SeO7F_`4cU@X`F{9y z%p?6i(l5U4`yxPgOv)ASF^ktTzhF?n2<@nDJM6k$*%~bp?F(}5C~Oge@m zft}h9E@p-z2;}-W+OfRpnE~f#U4X-{;-ezFqYI$@$o))$YFSF+;(Q-P=8NK?)U<|O z+1vJVC4gB{hklB=?hZb)Hu(@Bp$o_>39$cH(b zCa#wepMX*s(FpQPasxBeGz_w}fnBt~zMUVnLF%a5?jkTr9 zu;fLxTC9iXq4-zwW$J`OqA8!q`r|&cvziW6YbLmyvlW5n_>tZ{)GhQy_jJT3nKcG} zuz?@v>%{@%CNC=M9AEOCG81hNKJ+rM`d4lHTCX$(pr^J#&?(n{jH>pFU|Kho^`IpY z5#LRTTtZ{s>{|%kxlX|P=yXXZ7=cIB#@*ZseNBse^QN|ci@J#{A-3bd3iRPgFfh~e z0Um78lui)+Z7SY;6*kG5Gy&D%QUNUtDX{?TIeeKo-><;C{VwUZA|_Qx;lRG(n|P&_ zA1l^CL0eoRX_p`P873#8uN}Ec7A9rVV)y&R(zPOXq zDqeV}W^xP?cA5>TRc(VMkgoJ`Q!cey>{Z@pC4J05$aTS&qc}eiobVfx{WglV#J7+L zv9J;A^<$&xLu_jW#w%-ia}@D;<0YhjIN=UDH;pVNJ&aJay9g}VyD^yOgXc-+T8e*@ z)|aMC%?c){EWyP&{3?3}SGdC~W4&*kvxI`3QNDlPsH zMNXRzO9Ar+I69mN_P5prLjT<;}0hltOcX=exP?K4J}P-fUy`Z(mPPh44J=H!&+!N-;_u$ zgiTBM=5>ceHMdFfr=IzC{D#i=9Am1ae)z$gzR@ih_{-m7UHPDX^g0r4y8gRW)PvEk zWA=<0_r5YA`u5=~W68@Iu^G@ed)hu?U(q@hp?j-TVdpysMimbPmcXF22k}m<6r!{u768ii}+|8vt`~zfjTSTJ1}Z0E^lO2&^Ww|&w9>C()=j{phQ%V zuubE<=;TMeCmnL)l=X#>Cd5id#Gkdep=jA|SWEIJ&j4D{hl{B^$Co3+1G;1PXncwDPj%E^ ztV1R=TwXDwdo}xuvPTW|jlLt)o%+qpL<`t|{9a*WI41cKyvr=#us{S z8}mAn6z$(JKSs_bx81^eMUHaLMt`_>^#Onbnxn(PBfh6S6cA)GTBt@ddoCsNK==>T z*y^8iPaW12clU#Xj&%t-enUIob=8Rgk|TU_tgZKaZ!UBWYQ2iBwr@(NSRM5(rQv&S zZzp5&$wi4dZeZ;???!#u={ehEEnzmDCLIn4gA77jz4*Ia@&k#zE3tAcsT804KKkwT z6ytX!K|qqiiNC()VT!z@e;2ZkPTHDsiCm?16P^=tn}-9_*;7BXn@BS2$6@V58o!nDd96?5o#TwjA9m!w zucxq~zAU-N^mTpGszW#U7CR87#Ju@zrFjed%5ANGiAX}d%WH&9;p!&AeJ4je%Ru~r zD!fkNW*>#mebMaJ>N~bV1mL85Azt|d`u4SY2qh^IPI(BDiIX7(%Ox@R2d^ZJLs-&w zH6i92eNsUThhLm%UQJw<-$JM8({}=M60)bx*%2h-cZAaL2v5CE6-?X(8cxSN&UBlZ zK1iIY)XC1bpuR;o#~3^OJgi-qGz@ayYgxnn+Vg!XC`o4Vi+R#}*BDTLKx3-^fc_W< zb`ylHcQ?N*D$+k4JAc~0&cgExuNC9y=%Y3E8`?&7!`S=_!}kbzF05To_E8}S2h-qvLJ#?{@gzqxE>4>tivc9DZzfbmUi3zWf3W!T0N4e`w_KmJaW;uDl;1yCyr{<&Qgz(Ul?QVhPMyHJ*$m)eCjsH z_lEi<}UOvZz`EdC`n8j@`3GsIH#Q6}Rt zX|E2IJ{_30awZ5Nz_k-UAA~Y};zdn%_kBx9IcqE{{GJY-I1xf&tr#5ou(cxx0RP2= z%&9u=?MrJIkXlI{z`p9QA-WKp^=*2tt={hShO&md0pqzKU?+VH7^ghrIIxu7jvD$o zYGcYke~1+$aEi(;Dcj6yMT%_89*}nhqAB-T&S+xRPoJXF<;})p0Pp;{Tog_Bkf?K< zi6Hp|$#)tl&HM6&YQmcnmIwhRwDpdMCNCywM^&s$z+}?N~NgCAN8@{ z_qJz+k+juena+dwP@~U}DmPhH)P1|uvfwMW9I;ds)rxCAu5gqtyw{~+?W5oY(>R-J zrL^M23kMSs-^1OEe6GoK9=-h>APxohaB?>dA$Qs~Vz)Y_@45|3CI=cP*<=!wku>VTO zrDx3f#Kd8^LZ#?;7Q(vv^L*zA<6?8-A7EJ$aKO{w7Jovq#&5ZPrE}k;>!yW_>(U zPkm~N#E&#|%&Bki>p&I3yjn7m&a&lIo)vSbrPAI*Hp*ghzksemyA0>qTw6N&s4+l3 zMCtwJwDV4(ZCLSB=STJ$ZI*ZFB$YG^oiLw<+QPJr=IHrITCb!j6nl+c62P=Qcd~$E znR2NKWD5={0dIu38h&Sf2e~f@SB-79ot{+R*V>65#-loV?oilkK@U~qke9g{$Q=z-cd7NE1yKciJzu)WVXTvj`OS9j-JbpPefI z%)ROM%UTVO=V0`A=klo*hFN|mx>B@oevq@2UWp?2kK&R>s9F)@`=Ly^KuhX^oi7D; zd36E1C#L;1TGBHka1Nb=*8oIzaJriRD|J(?GXNK@sO0~${(Ab$cD zwhClyinDY-F#IbT>Ln}e$9aH8+@l#P5~f-3j&p9@=)Nn4O^l50@BABM&vZMVS`yax ztrBSpRUiA_9(%yh2YQ+ao8$p&#^d9#~G!KSLTz7y0& zr#CnXL~~tUgkBf;w>9pJ(7S9SL&S|h`9=mlXqwZ9Q0^PCVZ!RJyVCrE0 zBD}&cwas1`l%GHPS=fm`;KDt~?cSs)3vz(FMYG5l&U>l&k?mA=fP!L5BD ztet=8-Rn-1yxnAJ#@?n>`f`ba$g>cB@w2%&T$DA(wXwURvBY^bZ&rx&uFf)cND0JRH~Uw89U+ zbdmDM5ejZ_g78}|Wik_jP%uF7;#NOtMghspaD5~t8IfX29@!fATMA?pOX{rN`egA5 zOdJpr>Xt7x6!ct9jz>u4@;iulE#xfy!qzdkvdz0Q(_$#EvjnxRSZY|p92@O?EJ=MY zo@~@{aXSs(vyQqdcV@i)MWI$sALe=)mbU>_ueX~;!W(pqhQmegqEBIQmlDsJP{a16 zTkX?Ky}%Vb$L@2&h>YyhOcku-x+#Vdnzy-2P71J#o|^uhngb+VPD`0FH(5&e*o8BPC=4;=V@ z*{k3D7&s@AdqtzG`T&_l|07g2x(*6{=61Jl7IgO%&|f0N!vB209g}-*eVRe!!r*t- z#VK+KdJ|>6-Q|@FfuhkxX^G!6hg-Hy51r6SE3MzoP z>%`cAHt~{+V|2`*%^ndE<8poyzaI1gXFS2(F`9LS)GS#?>G6z1f&JplR@-kIxr+4i z!w!l|yG7Fwl=3P^Wgv=}c&2**I27gxC=&xsl`5bjw#T{yVws)Zf zh722>)Z+qpV!uPM^YNI;v6y`G-dNNp2t!-k+8>QPx=_CC%O{O}^;e}bfQc#H!ACr{ zIQwlM9=AYs3U1@YMNkfG4C=5-`Ir~wHK>5xB|&_TQ#q}>&Z5zK9FN|Mqx0-FV4LlW z$RZZn((6mq+`*daw~b@bx~&|7gjV7$X-_cop)>}n`XTwsuDhqsXmXGC_KqNczb z4grji%Uq2Y*6AEe^IM(&a4e#oEhqE+rbyKAl9JGNe!}|0y{AQBR@ct9qoKfK8e6z5 z1E@6uX9@o4Iy6D-eo0kimwH{oas!FuWP_O!S#=&9JDFFxZU7Z=|L*unjvnfRvUHqV z)gE(y@_U1mKeRn}>)IEUHrpuLg=R^qSh52X7x!7Kw%$n5l&*u=&GWk^95?)z7I&L1 zBh5CV*lWMA%f`*m$86;H^OBCtHnj||cR=Tl0G0ORwdgY*CZHF4bHJ`j7jo4KsTZXhnMaHsIxle5e)t}9_~FZGi>qL}+`vk+4UihfQw4g2Xr@pG_{qWAtVV`=&X>dD(D4x z?N1|f$8CKaD3CeNuNP(?Mms8vSt0;RR>epSZ3iTYI-iV%0po3LBT7`nK}y*mE_pJZ z`QKNkm-%+Q;?+x@H)%`c-q&{3`LZ)ntY8Ka{^*>sUJ*>au!oqCCV)>S1EH+oI*a}q z9O+s3R`r0Ox2PA;aC%j03p~oJJww)DQqC_GAqtD4S-Q|jG$+gn`$qHqi7syN8OV$3 z6IUbUKSGP?rRX*pAsIjiXIiYT&21^CLt3;nwzQf?Ps~`32n^Z(xHuXKtUX>&t9$Lg zOPUn;8W?aQ59gxG%kldf^A+spHvgpG52y^Q*|~`D1X_{xet^vt96u4|C7{1j$+gcn zOKn`|q@5Ax0Oc6G=MuNCba7q&Il zl1ctDL({Ppn8NMaiRLmr2847Xx;+1AwHeg~1Qf?6n5C{U0wm5KAU>qfYCx0&n#IF!pA>V?5G%#QuBVxl4f@9-<4|W@JdB~=!cUh&I z-*{&R2j+ewY-mjSwOXJB?%zvP;v-t#B2H^g-a5R!mjMn&zi+rp=u-ciZW5vQ?^XX^ zTJAX|(1i7$&Hp*=tHuA?0}dbNDp4pOJ$e-K>`v!xJiaCH@1sWqEP+UfwUdb#vB;*P yq9az!Pwj~1k;P+j#PUQ983nP1yf8;B0i}Cv(5~61D_#1Ck1rHoDwNBa2mKETJWQ+r literal 0 HcmV?d00001 diff --git a/notebooks/advanced/finn-hw-arch.png b/notebooks/advanced/finn-hw-arch.png deleted file mode 100644 index e5631ab97d0d6bdce91aea4b916a7c7a2780560d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 110452 zcmdqJ^;?_G^9EYn-5rV*Z*jNa?(W6iDeh3*-HW?xAXq65#ogWA?WCXgd;W)WogZ># zpXA9ryEFIf&d$!A2qgtc6huPAPoF-aNK1*SeEI}s@aYreCwNHkCnVDJso*yV7Zpj- zPc;+7N8k%+3lVvdPoL`JkY0^pz}E7!U$OhoO6!D$YR z7PdTDkI-Jwd1AZ@1~it)sr^6?t_ZanvKllLI*yo{a1js#LZk=?jI)o&MM~NK(Y*wu z6gRws5l`Q^@%J%x@NjqcaDQlRWNZ9*frAVbCP#;dhNSp3hyH(UQOF7k=>OUw&}sni zm;bv5p)Bm*jsM!1N+=Zn*QUxR6#buoKw&9ij*$O^1;z(h!~eg6A^!g(cxW={LyPob z^|9lcWFzxFS8!Tv2)gvePPyph`>qP^6(;8osJz{2a zUN7nK-&}^m`{b%Q=dY#S`x8b+shtE%f0yMm;R9d!z#tKV0^Q62OIE|P@P~%~j-Kx~ zy<2dcmeH5n_5*r}HNE(BHaNs8qJj&<)~t2d)~y#8o?6JCixtJ}chX2-+o~&SZ*E>v zA$Qi;%BBtnxG?>tQTQrkD?zvS?_#9AEjWM3SLQD>+U!DniQE8*8IYlC*G<=e7c!N8 zP|YNn4r1>4R?$E{o_?Io`>QVpm96LGr0r93L178kN|ysoBE_s%sakW z^ni==)6;j9?I}`6cT@q~8yoAN{wK9w!=TgegOfPhwXGgfH!^iy_Qyx()tbbi8 z={F2avu2#FgR*FfafmV=J`h?GwuuaIXjvJR34NEhegqyO1KHyOiC<)`7ZWT#Q@5RA zoUi%gQu#Cn-y#!^G(bYXN6LI2_HlI&x-Pr0hnZW{u+SS7wk8E$fjf)Qd|HBvHzvR>4Tyu2eRf9 z2gmPjMc+=v_yn)Jx7BY-{kVg{SO65tnz0Wqj^F-LY{|GLzqE_9)*$B`@r@SnNwNp! zfX4>Gn7b+hZ^hB<>A)cHQ;&Vl^rm5DJvdQ95%i62De)y5L0GLc7CA$qwn5-nval}nKloS;Im7B@K9*x_SD;3 zyCta=Gl6h&p#L`5TJ-xnIYZncMQKfghL=4ovr4 z8fikE4z;)6sONxjcQr?#GAqg@K9E@%;r51UCsi?0UA+^0PZ#aE=xdgokjXJ;3y5HR zQFBZ;ET~ORsZ+nC z+t86Lpjy7OUJ3`7Q4CsI|8!uAbDjoP{LssEG)3W8JqgD}^YQl8h|f6}*LG`7OpU}R zk{_UiGgY%odSP3ii@#qze9qW;c?jxqx|_O*4yz4=wygZ=-s%4|i_v%f^@p*aHS?Rt zEV`{{&s)DqHoIlr-irfK24-vL2YK+R1fP%+wYjG9+NP)Vnr5*l`{l(AWc9gTGGC|1 z-VQen&sLcXiI9m_JDHL@=IxfP+RkUQ{DpnB0eA2Y+mnAP7eTf|{pYkY*d zR-l@np${!L&hqxXOCDN|Ai4Pc*qb$58B1l!VOnBAR`1<=_}RylwOBDZ1cKe$OgUmI zF5Hy)G4Q46ac9Wjn%vG|NB01BriMg{XhS0p$F|m6`Mkq>3fOYrLDdRJ?;MVrtMm~;tKd;#q^7-4XRZ5288T$#{93ca(#_|{8VJL{x#2}v}fUNO(mx>kJ%p2 z);t~;yz`ALF_*M8#p->;l2@9gtC&yC@2hZ7xp=Ym`!gKg#P0r97G;a^h2Rk-S0_C$ zyvHxDtGK=UK)v6T?T&2?R;WV*yy_bxExGISwt2LKtiNrmy2!|^9Zk+uBFs!&mX0hf zGafzoWyqYRp^xyRR+NqbQJJVK ztEUd5m2$pCt!GPg>nzx`bLF6tN3hQQrFT?9QM`4uvpeW|$K~nCgl}@q?{;>zFY)K_ zoxtbSQ$|J~-E-Pb3VgJ8`Jk=xw^FyY?@rX7Q?V7g`_7IN1|9>zWXBbe!)i2OtD(u# z`bkSchPbDbeQWW+lh&7^5|p#BX+T)h=x_{&(7%YqCTc)b;YsMG{(&4I?L1xL0BZUrv}YFPt%^O z$n5N{j4t>1CG!W}ZSA@$a(X^fjD6C7T8|9mb`H*SGqo&&8N5>i`Xu^&SD)u7Ghz}G zM{&nZ3X2O$zV9dSpJYCeVsD<`$Hgq6MkYdLSFMKIR<2V@mbLDD*H67^$7$r&<|$%g z=xMs2{^DT+^|Z&RW_f$s)k;bftiO{cuLSUsomL$HT=Oz3FAaEpn>gkZYil>JRlfQ{ zg&l>lse5Cfy_%rQmzER}=AMx_e^h;<@zar2Nwu+RD%S25nz)I2SnQ~Bme~_KpffSi zQsHaDM#EiEtc{wzwv|Fr@bkHNqj|%(uZ#wYBLUSHc}t9a1?gv{QfnPnK?ZKBwGCPR}E?Qmmqo*{`o_I{Pe%IrA4U2=yMfLY0 z^3pty`BG3}W#gHuP14E~LGqWeYj1)(CNdq}mH12%pADFG@4z=4VqVJt>;F76am?*sVaZO zPBYUD%=|mr(q3QeW2W@RZ*l)c%|}?BxGeiZDR& zUs34%66lIewGik^2s=Hs))Q1zxT_zvihFfY*vyGIrdLX!jF1YkboBg6b8|#Ws>2(9 zaDel>1JB1IVhh*2&cj3J&c=ugRQbn*i-DRDIDUF(5#-k#TFJbxSDB--vAnhreqsNj zw9UxSuxEVEHG7!{%55nb%3?^T8~XcMvv8dps{VE+!%<)E@~8TGR*Uigde!kV=7ihw zaEL+NP=$>Cu2}2wK_?vR-K=3=|HJ=H5&FVZ(bU7lJLA<#8Zj zL>6p}i9sca#vKL^`=d450M3az|IKlrHrBoR@|(QvZ1QbiWbRzZ`1w#PZ%2UlmeoPk zG8&11QEE%;Q!3n()wh}OJEe2`>H%sA+I@{~qBWb|iVoo;8$X&+K-7TLx*fNFRUtEcqoJ=MSvPC!J9l?LoEYVHP9w7jE+Ox9ED{R?|`wK zYL_1i>xguA63>tUxcsjgj2#7#yHL*8oW9@dheyDM>iBt;(c*-wGJ5AMNh z#=bvrN$&Zqf7(8kn#U!*-tI1%Wg?`+Ttw)uhku}Zd#vbznz@2yGhrKS+>JP~piUrR zTDQ?Q2-z_WPbwSCW7`sK6qtT|fetO;khY;F;^4U>hM8O-OYjLwzOaL%+kmIHe-6ra z($CXv7(>NWN%dfYypO$jegzee?SEwvz#V8mexx=+<1tg`P;bx0ORZNjE(>iX;qkjq zun-mTbdwhks=u}MLabPlVX+o+cW?If(Z%zThfKE7DQK3gT@>37f?=3mRo8G*G5*T# zVoHp-L9s`+p>5ufm4qwXKHl_7as?mQ32I5$FX=boB1(Zw`Fx&vP-^~7#faK|AeD<8 zSc0|0!C`Q)VBfKuE516usbtE(+AFUp&f>aU?V22pw%gW%opF9pGN@3xy-~iGW1we{ z9Yef8<0$wLG&^qx+D+9KU~N_{nq78gGW#vgsqY^FrMrk!^L
  • aPJ;Fl>y0GbIRO zh}zg&u2^`Tu2ql;W<*lY!K3<2!~oX$lm<#EsE57i!sq*I&K)uP;k(t3B2E8<40bdq zo}m>~`P!z>@vYD%>t5EK%vEsOqn|8L>1^WMuf8e;SG8kzzrnXutlr}<3VfNskz`qQ zL!M-=hK3u~HG|Q^&fufg`3wlyzxmskIRt5W_m77L4tfOZ<AaM|pv|?5pUcS~%5*7i19p!qwLI;QH`6 z?cf}8hh&xAm56jNc!k+%%K!by6VJ)*8woCe){hM~y-(=FRa`%7GGG9{Tnu#PFLB{3 zf7-!Nfh(IHnvSGUKe{DvC&h{2O0}jVGc@qc!$f@$uNEtN^GN-1b@Z3SWn0YHf~p-X zrl_jDr;b90C#_8_^ic>0sw^CVC~VP=#=`uX`lT!F-yRgSo3K?AuXV`^<`Oth124*s z(9QHmSOzhu@G^?c;2RN6Bd<5>pRS2!gk}P|=0p2cmEznG=NP zQMH>8^@u2~pFoSTW9qtjX+N9QS4yuhlxEw8lH*qXbdzwMMxp0My8Sh~D~YyE#NovB z5lio-4C*!nY}D@ePew`;2#o=Djcby&8jt!Mr_Agt3_U${3M@#E9LA)=lP_P~5aLO5 z(!IFT2^+TPvji_CI}DEb~7qSUoR=pO4TlU|XE81OD~du$2gNdtl3i^WMMji~}(b zi^4Qi;?4{6PfA|%D0rXANz_+jgt%Z7)N-i!67bKlM?`vb?8LETG5|`IFde;A*!kx)%G<#@r=cWsN-zS~1 zt?PcaIC^SChqEg` zRaxgLwaz0awy6oh7|p#Slw8{5&!OS~zZ5O&1>TSfkJSH#xZ4rXh)|>?xfPGD2ReK;hEoh`V zI}JPHleM93tOU6~0yI<{;6Y75 zrzOsLzRJ`b9x}U6?Nc=-7k!#Vx&>bg->m_tTXd!wNr;NLs>i_fPLERZm--hO@r}ZS^E%$t!wy()h5-VnMWh+prF-*>LyI?XHS-JBQNrt6h<{E zUSL>BzoML^(96z&>)MwF}cA~6JN{r+CvO^N4(MmaIP3mPsnzlJSH8i#E^tIN4t z1nG;6b2_~J^=%f9&dY&0t&H&^xDdk2kRC58-sC$|<(gID6=gvT2EI&s00`t!UU=72 z&CGVO=m9|8>)>kkSVm(?$$}^BH>o(`fQN~29`GQ5jbyf)MKlMhm(@h{i@tf^XMcmE zyCHGraCQvSMm8x=W=bCOa&tsw*PpHyYlh(E^EBqH|32)Kv4d?}-hE4T<(LTzivY|9 z-NiyRK;tilyEODr?_`CsN6Wi}J)yJU6J9ew6>3Qx4^B~j_eL#<2$tF{K6Wt+)0UkL zl~=m!SeJUK>~jZtN4c#P0e$zF?Nf0KLey zXxjZm=)~nQhZ1*|@+05#8GRpQ_Dady;3H&p?N1?=A%v{aO8s$@AeDaqbzq=X|6CoL zn3*6&eOp1TUT?xOe-ebL-N|K@Qypt?)tdOa>BkI?zsnLbq4RFNk8rYxLI6lv$=;LI zv^pnY;rC+sFrHpi)eC&AxyM956F>PGNB=Y~ZAsU;y(m;-+(bUoFI%<6Vy>NI*oHY$ zS1ZAE8b1b*|MCJj9irQm2@OYb7CkV{MY7WipduxH53_cBBXpJzeP#cbav%R||K&|* z7NhKU9RA80@$Rba9XDriqKXP-OIc^fK1W36@yJMIWaLQ4Lk_g;qYjF09@$PlWYhq* zpZB*qXH)Ufb8|E=x;EjtrE}v#;et^0@8MjZj=gujj7vo1Dm3^1f*nF@ORzOB7FMmE z8$@AzpWj30!wK_@ljoU=)yX3}$S+Cm#Cgln`^1ra0KrId`~4h9Yu3rhszYxz8YGfg&Ei9YZ=KU%?n;H zom`TG;(jAs16se3qCCU%0{%=w$Y(EazARS$a|W3UA-A3Kv@TY33&y-YbdJ|e=Uu}& zbi)frM;azrrXoxQ_8z}yo$NTr8RHUFdk#9vMKy~{Pm27oZJlx^!0ctxg0!HQb;OBr zJ2l`zLj7a?@XYtbNi={oO1q4dZ%p2=DExZC`lo4HIBFDb2lg`erWw316SJh}VLdre zQFa||NtWuene(OHiq1Uume4%;<95qiS`pz9R&rP1o7ee3yvPpV(8fss)-PS5eW2}; zM8#qj$6r4>1(Dn35uL+l=Lh^%`&kK2u;2oYuN{J8@RKHG1lxTw`fU7No$6dZA*3NW zJ{5s`3TnafW!l{34ARq*T+{YKP(x$|h!?wgv-6ClxBm#ai=C`&Yg!kfA3UbGK4SBD z9i#ojayKsE=!D5H({%?ns;q#Cy3y{kutK>LWNIqdCbu#|ziVY|Xl+%LF-Bqz8z~MS z%|0eElmmUlnzz5=I;b1lKG@3r)qkjiZO$UwnZE^R>fJhPG5rc7KWk@n-L8NjC@hw@ zl!0o%aZSh%Bt!^aIz|R|q~{O<#ROVnZ{2@jbDY{R+^KAQ5mpoz9~@z7Fi=>b-n%Px zdKW#w{`7B|BG5;fB>zL>2ltKBr*Yh##`)T_qW+@$hHjx2d`@S}Fd!zPWt+Z^614q(F3fDmx`PPdP`#I5gp=s<+gCXA+IQsJ zQon3JNQbR6*AhUE=bfIQhi)k1A3Qa8N&G8JHo_WydTCTt`rC!|?TYa>m#{;@yABN^ z)ns75E8H&6lj3yuub(=_F7uVi9K8$^xYWyo9II&WZn75XoJpqWJt{!of3g2|7 zFxE_1i+~Z{B1kRoruX02I3r#O1>q|K4rP)wLd8(QMv-5ftynXtpB}h9MX#{XQ?qNZvMI z20nyw=kAe~rK`f^yd?F8?h=R1O3=*wrCFHy4kQdZwKp$?SW>^NCfol(4#eC4rW|m3 ze1HLM&)qc;rfc!n=+Hw~19_eh6>8T73GUloM?rI#)Cit z-#Q4lPnbMdRr5<`PU}8%x)bNK-FA{Q!49~uHbz_m3=du~CmUtWBNz=jyaqvCD+@JR zU8+;$bw+iQ;wO!TY-RC!rZK5eG)ACJ|Bsq5@|^F)YFfwY4{YIG&l2QCGpaVtS)rK2 zjWG3j1D|q?@%E>Um2C(43F8+!gv!DKK)LsE4#$X;b~W|@XVHNUIi{wJzjatu&aND< za2tf)-w7zGVIDBT8y<=|;c!pchP{RA)fB zVRlLOYV!U&?4PSiS@qbpj#GG#qzqGyH+2cKc!5YLoDdvT-+6Xb_-+%0N~|C0o(g99 z6#}~?r01mig*Ne)Ix79eQLgSdU$`y5&6KxhU^2kL9U^xlucC~*|32d|UhpZLziekj zBjLG=^rkbk!+B>~BzinIDXi`h7}i4NRmbA>j(;4`>4#l;%XTFP8mgnZi|c9k$RRYG z@Y-@vUEa;V#9v%rF+3uBSI7LFTKqe`wlZTx8{<0oi4G5~I-9zvGajbMXEF4V)oW}H z{lSrvn=0Dd4u16AtGZx;4njiacyAe@pO_eeIC4MM!N<(Wl>@pwCwBl^1t{Rd3Q5si zW3rR8>Z%pV;kDHNXZ4+=Ic)v9F{4~~{#o-I{JR~N6Tn{?C86mcpozM98@@I+oHhR5 zUl|~5s3>V8>#H2S6C?0>ELn>_wU{215?oC~A%vm^7*f2suwKmZ)D1$FmEgHp9bFJ( zqX_gT`&GWRv>3Q*`M+9y|x-O(0jB-R_#2uF@K^qv-D7 zIcVEkGv3(Sh!#^g(?x+AC6Av+v$wAs5`QH|j#Okmrz*a=*}$)9FOT96i7-z*tN9aT zAjwa{7O{=Rl~G7ZCe{n(6nuuBZjULt;nCMk420a(o&Y+fPx@V@W=R7kI#(J(Eax3a zikHg1V)HQ`!*awIh3U#0V#(JsaZz-Iato4!UPfO!)NY_4+3^vLQ@*MgraF{^mvs_x z8en_bFoDK~3lM1h?&Mwc!FbfFOweAzeiLj2qzhpV1wDzjmBp2WJw9ALv|OrdzOqat zWbVwp+?cym?SWCol^KbNi+V0aF54Q-Q|-gOrwfQ* z<2aoq2eO{`<6K77_SwS9?B|qt_7B90Nv_@%Z*IZ%`~IczUz`zhC_d3|+-=PR@-@27 z*U#i9Bv(olRdI(WBzlG;(CvLxgpI+ShL;;#6dixYrC(O>u=*X#aiJFniBhy^t#=<= z6bNPAHe9#MXNTf)UJDnVsv@=CWw}VREhKihepm6tz3c~3Nv4kibh+jHlmT1bj)T#m z@DTigUhX|v`3wk7>(g^2tvjzDaUe*%*0hFiIp8LPf*DAqr8X?5_|oG5x@CFk2g9$j zH34aO*^0}g_S{c+{`};JdY420M+2ezf#Bkhcz8j!_j2>*+0UlC_@;!@_Xfh2y;4(- zRt#qCAxDHe0#4c*#x45bDzmWf93PZ)$(W;^I2je z#eEly^$^^wlAd=P;e9i>;AH~BUNt*B=LN1p%o1nn5@t5#jaF3_Rh_)Nrh| z*V@CI-t0ZwTTk=IYhr#}oLk9t=nRfR9Bx#7Lf<%=0JUZ(nBNZ{q)^FPKO`MMA+StI z9o4GZu=p|y<)N~X0N3lHjl3Ss@yx_&`(7+Q3tV(6ApBLD&_JaD6%KUQZ-kd^rGjIV zv6bDzFUaS;*`2FFJsj_Q^2iPJ)TPtCYa2Hcx_+EYd#5geXmmN{De}nidPmg?A@@65uf(t}Y71_27I7geLFGk81t1CY8AjC=^1theq@fq^-678zYI-0-& z?BNTxhbpGBk##Rrn3G!(TXCh}{#E^3YfwVwZ&0jgqwlM$7IJu#d@uUWr-nMzBE}sZ zIhl3_g*V9+Ld;nu*ET|vvKD^2&z%`G(>c|xxk<6#OPy%S!+&`9@KBf|k`si~!(i}^ zbWJkBl+wCa`>pFH%DBPO2e*F(>>t}q&aOZwBgpf4x3>6y%@_KBHG4@JZpjnf57WyS3U=L~1_tnXz|%!&j}|UpC5j)xY27l+OzpArvT13A?h*Rs0Q<^dh=ax%{fbWNXk)!XO(; zxzlDm07)1Td7XhaJH( zX|)C_i``ziL#R225{U$^)MZTh1MyQb2EMqs+clMDS#Rx?UH5Z)#bdt*#O6uVh#Rz^ zt|o4lUZ81l2nSrA_b!)3$iGCyxnC-o9N}eOF;aTsgXk_3@k@VJ=`&xUYzaL!(CTV^ zsbfEV4eGmr#lX<-w7!68T5drQZ&f-R5m1%o0pd8 z1HXY?)R3&IHqZIO^lfr;Ug_DDfrvWHIW;*|RZ4G+;UudRn*&>O%0&5aNye{McfHyD zj&u2RNB6T$!u&=Xy5aY(v?K+k^%oB1Wbe^v&Ojmxa20@$N$8x?WnN;l2CJ&cSC`37 z!g;74?3eN_FHdPhnWgvsb1)yv=45zpt1|{1zs0ZAOICU(daF9)UWCh@53BDr4E~6O zmgqYV1AiD*ej?ekFDcWnf;es>u!T^< zkO4wAUE7oobs-_>2i}N82dqXzLs~_Qh?$vzsSqm;!}s+*w|{48Z*<836w@iLs?iaU z;tWp1JYR?O6<0V5-{)YIFZD#fx$LNC1(X@#t+7cyRPPoG7nY?~wgat@RFgggmC+D9@7vyl z@buOt*#~tc%0>>*2D%UmtgOzkHJ(NedZPD#bf%tGEqbD z3mL9+$qs~R21 z1AEdBH90d8E!eR&*;I`GHY#|ha?6~yU5u$^QS_-SJ%`7P$&D78-9FknEZyADj$(7EGf_;N12}6+5_PQXePe;cfDNm zevwS~z5F(1Wz%PnlK*0w8NxoFAOTYW?fZ zMNzyw)E}`#obOXpwYJUJla^nq%zyp)1*71Fy+FAFHNoc7ya&qIT#{Ddi{Xw&-i+EH zpm2O`H(MPEufU?Pu?aUj`?cNchJN=TCil2lwkX5a&4$ z&$zg6jOS#byRRmhud;WmMB8g(zxbmu+~Z6oi{3UaLS_ViAihTMW7CEt4yget$B~&> z=L+#j8eFsP7q&iP`7$_C$vr$=B43KV>asS(({gO*R~9v#60?e3W;HWxanWwIo%Ga# zw6U_Dc;@bw^a;h9mmo7cGAt}r+Nd9Ap{jR9&d{c6A5p^@J2lp~WGFUEkh;1j=5olS zA`8ZH3V;hdO&oC+(z-S9IwRC7E2 z7~sN?#3$q3Ds9*jQ47n3F|9{ZOp2E_`Dux)*A+W8(`;RO!8u53i%+LvO`U6!kO}YO zR%4)DpsUUrmS@pGrnieLx9yL57X!QtM^{5cI3|ZLZ+nkIGc73{if}gkk=_a`KMBr3 zbl5vyMF{^1nf_Ss2!qpQ1&AQ=z?Xb--bsBZCg>|#ekp~LF9#fTYGQ6maFoXs>G(kg z%>IiO-8aj97;m-PykgFl1?>yt;^OZa&DWP^B;Q?BNs6mw)5xYh)g!59lyUc7rR)*n zXRT(e@QAzYwo~T0JCVrJ2dqEj{uD2K4oB{LLADAQWxvVWa)m*9ybjnLRZVV+jq(L*L^oH@&cI?5~_I zX^|D0ieJ@0wn3vbbUZf7ndtVHXg1JHPI)bdf;Us!PSQ++)8tCYdB#UgA;-$;rXF}N zCOg{K+h}yaQTpgQ3h-&tVnuX!;z>$hi19R@h*OAy@)vJ8i{Dr6UVYFv9QmbB`rvT+ z+MoGnQk5vacJwarG>ktLH#GQa~h);cd`P5aYcQG^6rL5qGBy{hxfzOgP)r_FVw`*qijV$kTWV4 zQl8DG@YU`_N-0$m$Z_u**$vFif(0yJA9y zY&BJTWM?DO++CN7$IXW*D=iNIOm^Ka_HU|C$!`;r?_DcFX;bYLPt!lhhZVi1{soX2 z<{jIWx5x+J^7#^`dW=&0p8QRmUpT5gCCg4u7m3K_%<-H07DGF!dc1)r-XYl4;f9O8 zZIzxu#KP|=?Xxy-AoZg_PQZKV*ljw^HCIQMz$#m-RZHc$#lyav{_`z+n5FG*adCc; zMFzzE+Fb^>){iW=j9y0!Tw)cz*JdoZlD`j(bX|VZT+qkh}Mpl6sqOecx++ zm3j<1_v#sKVELv+nj>V(6ZEU_^Plm@X$e6cWM@6`dp*D1oF!395x}RSuonYs-yDa= zY?hj>0&3b)bEBMIYI48L7RjJ~D`WE_Oy@cZBgOA!`{shYGBUi|_p6T56V2RXqr!pa zm`mkVJBeB=*?3##`EA)rgr}l}2~b?oJ`Kl+pY>wYIj<>!guC)Gdj*<*h(w_Xphf5H z&vFOFVhP7G8{|dr2JL+ud^6+b?nqn?dpV#-8=r!5)upDw7mgrepG&EG2}=}94S(#G zY&GsKdx$HamsG4Ad<`J@wn~(Im%U>H`Xmoy<-L}eIVdMbgpEz}&M`8NpgtsZ)Wsp4 zi>t|bJ#Ad7Seo5+6A1#SX{-9}y2#$04I!2$-2|#^B`VzzjJ?oZy=y>tF>;IQ_35gu zHDn4d@R9CWn3?Vy()b#z^vsGqtWNAzqFQ=xgSdtucMbn-?g*YcK%EX&Wy}mrbaZsp zKh|#V`_pRHCnm|(IW^MINKBZLB$Du*-7-8AoYIqN&gG4X)lD<~7@ zYik{0)P%eq!N*J7B{*`j{s$3x9%CJz^!tQ#GZo(Q!eT&B`oSU|s>Z%lZZM_`gLU`; zLBPci&7}L0`ORJ^tk%oyV=hClaOq3qc(&(f{=Jm**LeXg4FaqqXQEotJCXi zwETrt?@&%3buS^2zx-zK9Q#kNVVzw zrhstL&WSkry`#X`KWMIZ@_PLZm6<}ga60Mjp=Q{ca~$2YJ`07_tkb>12lsaX*&ps1 z=~yo8VVRW{$7=Xm=}q1r`nFW`lDc+JfL|33F6lh)R%4xxQC`!1=zdF z{5|6LiBT7?8y*iz;!vu5l4M@sY{?S!ZA9Inu$)H>(99pQZtr|Nb#?I{`j(XxH8*CN z7%P84JVnACX+nl1BY`rUJ-knVe=-qGFv_iLJwMv9HrC#c-m*FA6+l3WCiEDB96_#b0tsxQ5^1DPH| zGpaq*+^smcL(#v9xsQi8Kr6CA)!AUm62iKpY$+AN@MH;!p$^=Xtn!cY5LeO|5G;CU)l7_Ms|F#( zDYGCOq89!5JlF~fQ@x`TaZtp=)%TOM*eSu;xK&6UA&NY{&+_C~EK$CAJ|2U3b&OIi zzLzt@VjYNQ3RV1%+|5pxvQPnnJ`ReSgK2*(Xww+FKf5oJUmH4qOQrxHI>8`s@p+!Z zQ9+tpy{#&AWP>ZxL>uOWTnsfOB^4b!Ecg;9B)_h-P7~nx?6C3FztOaj9cI4XRV@2i ztA9W?)C&4r7$)3PHw11Tfb-pol$Y5UQI{Fd3cJ&k<`}##!VHdA)rF+psLy&)u zO!3(ee;V*)$KYg$fw*hQK%PosLeqQkTh(&cPuzXH)VLHkArNr&q)U4>c?r&T0(&3` zxmUW+!$dd3TMkMIR!~SL(u+`-Qvq@4ZNzS$(+Xo*iV8OnsvVKR#JvpGO&k5a5N+=J z#pIajsb7^NH_@|uns0VTrMO+ixZY_a+^@O|yWKFaz>t?|pgpu&%sm3yP|-F~@PZsO zIB$tQ)CNA9U^P3W`_Yse3JDnP)=(IorsZucD!Gy$%?cZN-4zC|Gyu_TUU1RuHo1n6 z?8kiej$iqys@te0#LV|%`4Zd!ndWcRn+cjd53~0C_X@QnKgi%SI&ZNB*ZZV{%>*~U zX3TxvO@wElc2Cf8g(??fg=PhTnGAvjhqLHAwD+nCoh4QFRLntsH-{$?>_z>=MOda1-;Gwp^$JYof@2Hr zE&8}$yxR^GmVo>J4wH6>Z0GIefkC4xUC94^rPV@|M8O7*yv(mL2E@1W&*$R{Ez%QLn3hL|Y7j67|{J#S> zFr)bW(+_?Fad8;@t&jHyaE0pq(M%~4JUkT*&He3d{C>c{u0Z_P6{t-|Zc@@*0ygtc zblAAK8cIs}B_*6(To;Fj--SpJdutgWq&Pfh|c++AIJu58HBhsgdjpPX0#H~0ztGGg%1 z*ID3L2>g`p;PZ2P3yXs|GNE^=Ir4N#RaI3vxzqQz7kPR4I!+9*ayb7}PMfNM0W%BB zj~LE;ZgzN^c@#DLLP-Bx=PL_}*_+taf%>bH5ccyT|!cS_0Frk0jO8eeg+ z7ETswMXV1da=_RLZ3eT%P4)h79HK5RHz41as;Vk_dU}xh93BzTXityhW>4Tn83hLU zPx4+2=c9uIkGtbJJQjma{|{1nN@z#|tp8Mp|95^~!_ZJjZ7ra3ZhqeH{q>1&8W$T| zOi_`AmiFOjhIYCrZ)>((m64HAPfrgCmyz51zN)sC#r2=1{!f?PsHAa|6BF}OQ`+?= z17O8{G+%==O$G+8uC5~^BR%BONq%9|(<(?vQC;2-tY5$Wou0-dxw=1F5t>FI1cOro z=6_E7irC!Ttf8R+bU(`!@aDwA!h(Sr0FP}Q=iLNrK@fg!&(ltyb68@R)RdPfWxPn23vu*BW+{l91@DtKU&V z%^(kqjYVf>K2PNE+1c3@Nq7Gapfoo#!@n#TWJLiAhq`AAh6MnBT=+aSA`l?n0c9Q@MPB5Lv z)%7(^hI=zvcxt`Vlg)2K26hQHN+%wV$syWL@U%rpNCLif1fvzsm;yN4}>CngsBzRpfEd2Eg zvgB}koAp=6?cK=&pppnYYAUFIz{$|@dAg=%VjBPbd-rgv5Dd~bFa`GKdN0_R>gwt} zgr&2xbKpeZ#KgqI#ReEVX(=gtyStq}PnhZ3KP6Hx*1L4;O+rILLZYL`z%w{_4vUJ4 z>J2K&13zK|?Vpuc)xUCb3jVm<{3RY`KAt)7=Z}Q6G|%@x(O}Pg>xKBWv%L)o1tsMB zd~5jJ5Df=a!u>0#uCvqUY`M9qsYy*uZES38b#*l;CS4>FH^9c6KmaPUfqDUU&3$8Q|a4TD;_(uC&JG{XfLLby$|&vo@@BmvkxJ z%}s|$cS=b}H_{!_Eh!z+ARS66jUe6K-QD>up69o}ckg4r$M^5&pXz;G*P1nR&N;JY zW@WW}|85CRuj^hsYA7qmW@G>-;`#LGO+b*6FPp^9#^&zniC$(5`=p_w0>wWwKQF+^ ziH$(DKLwDI>7T8P_gh?CtgdE%{rWY~`Q;DKlCoQA53Lvy7ZZCS-?m~09LcA}UpLWf zgOP}>B|SYo02?21A(T)K$d`+)b6dZD{Q`K5wg9v2&tW#0S6FBUuxVo>gWt70IT@da zxkB%^|JMkN9+-b{{bp|jV8RbeoMUubTU%;sY6l00kCdRKj$#~Rx?gNhxXSo1*0BYR9UdN@o#hh{Ain0?Ux4zH zgZW2#EzsXE0cym?#@gH4QxLxs5%J5+^jT>SVrOSZe=UT9hW2O0?&D^d@O(emy3d4M zTwHOFnPTVX=Pw|iEG*m~Zk;VGENpF=k6ThxQ!z8NffktdC$|ABc%)!v_E~OyxvBxb zf^3ekuK159mD352i17CEA{B76J6-?#l|f@LjT@b(bMd$5hYug59Wh>zC#R%nsi>%E zXk4vygtQc$TD1AYjuk3?va}?B5G;P#ZPJPVR=?S+s;*8$Rdue#SD4jv`6?kH0R;sm zI5?Q1G0M}^vjuEF4Glc;Qp?^3Kg{XWzwbr}9hj1szrex9zP`E9($p*}D*DWcfQ)=J zQzNOb@7syD=;7h8z^v!#+JdRZ&v4Z*O3)M=j+_>BKN_a6+D!*5*SfJyXjkZc13YySu<8 zA|W9Gv-%80NJJzlE*@0`nXfc}Mn@mopQ!;#pU9%`bA4<8U`JnHzdnR4=3hcWfal@q zSyEEc(cUg6FE1!4C^_E_9D*Nacvu*TpvO6IUBJDABn{wns$~W6w6dO_cX_poC(!%l z0A+xQe3^D$egYZ~R`ubhbqc>LGXw&u+E{IH+-mL5OF?)+0WZ$X#f6JK(o$qX4|D4I z&p4Mqall8LTUtV`#3v+BUJ@FZnJs}|gnpr*qM~YOXvojc2g?3wsoBQLD((7r7}={V zpyU8E)YR2+ad5yV!otIU(9!}}7KtyXtgW4zl%(sc* z^o+wigXfI}SSs)*B(Gk@r=-k`jftkpypSSeU|;~RL`5CAzd36Yyxts01Ei9|5;Ys) zoey+pV#IQrk=Sp>%D2n@K+}=&m?_cfr6eU?w}-OsXZ%vlR9EG!HP5(g#^6w%(EjkO23){3J6 zKoax7qKk`*8yg$*@+fI&Xuz)0e#^>Q3Z)JMU;;47%gd|1y}gqSZ(?F1BO^l+r4-sv zj{P4cG61Ij`Gcvh?m}p|AlNYgL_qPtwrFc>D=RCrc1=&abuOHrZ4E6q&rM8R)G#A# zezLZP*~?Hd27gRVq5P;H1R*c9xw5}UiKM_Q@_fb&nw*-#tN&J8dk!!K?vKtqSjscs zcf_EjrG0^}uv|)@9DwF`pZ|=y1)R%z2(}ggp??1S$qihx*EMUEgp#bRl~@x4&|j;m z@|5Ic7+BbsX>|1TP{?0ac-Vmi3=Iv*RU>&}qB^Wd}Lk6UUv-KL$wLs9g${QmvBww9B}%G|;NNVd%#X0TwQL<72vazNz8{mk(2 z3oBf*SFb^^(31x{ANc+IcXA2}8*A%wpl;j4WQd4}K<6-jLx2&rMIG!;lmL3s*4EZZ z*tYc4dvv_g@fOt?^$6xq3ySn<2%FYg^NB8@b}BIV=q4CG!lOoJ}%uvhi)jg%@WYtq@^gqU^uD)g-61RWzf<v4K?uRa%WF1}3Y$6knIsNGRn%~RHO91j1zQi)6d1Pv z;1GaL11=@nJ@q2E&S68wj$yXRwE{?|p@FwbqVllhk?IvpQLri{bseEk2+$a892`H# z;iaV~5VIvQ>&^m1I9zD@psfu+5CI7ZYUtB)VAPi{!Or0^!cf4@)>)wOvlA010d-Df z)FP*(?Cj_ONcK)%9`*i{nDa+zX=#Ag&d!jksAZVf$)E`fJB+R*7~09%+57&w3Rqfr zYv^RivRI}Fz>beVOVYSMHQTND2L*M7R)~wfS$hszjs2kxU$-9IKRn3F%7#o5pI=^v zhlT?4t*owQ)^FBPP&fp!8i)b_3-fh`g;wjJ14FG^6ajSsY%Vo*7Wf=MrNph_$*HN~ z)Z#edUw#Y+#_Rk22Hx1=InYX>1=O@KKfj^A9%$H0#WlaYi{QQkjZBY=4pb~NQ z^z{L~57?6+B7RTG?(%G{k0=1`bvr@?q4BN&s{%BcgN;oY5OY@6(0{Riqc4eK5jKFA zW(fmd3AGT6I${|GVWhb6%Zf_gaOb|MC7Fu<*>{sF-A|tbKrL) z{{pkWukY;qe0*di_Ul(&z&%4kx|Oz|lOBPY1LP$FRUdG0z#6cGIj=5wEaS1B)!`fA z7ewHwc6N3EsR!{ipw1BRu}n+{KW*kDoGks``hea=5M4rNUJs@7)_C1G7#J8BR$u}z zcwGMk>?0{EMZ^LlVF3h&4Erf>^8?8pr4=V03mrN8R+#7O87$Q zg!&->FE8YCuc@x?GE;5x?%g}UGzgSQ@CgVIbt>!Y@%vE@Lo2OTrrtu*$zM(!yFz8E zk&uuKW$?p~#{g{1$RN1~aWa3FkT*HVR(Qca8^qMJ&AQeHA>$z-AXHXW0pK4Q8v__I z|L2csC(+8G@YUDY7TABK{HPH6BMii3H#awcE^iHGWdU~U=l7DB3iFiY8BT>^NTXTk z>4C)(-Xh`O`-0D0Rsrg#$Hc_M!cqYo4xmhRpO)TX-fMj$REYg-d0n`@y=`u8{^H9t z=RNWc(DFLl1*oJ7y3sY%(~4)kYnB97^zkFev}`6aeGn59YlX$u(UWe$d@-f&k zixOYw7#)mu3J2;PCZVQ=Ei5byXw2*ntJi`aYG6HcbF#On>{QQbfygfb0nhoEBt|VL z=2Vb$T3=rW4h}>giRxUSMO@m0EmO=)sM-t0O6m!B#^L&$L-D8R!MO&h)usHBz%@!r@J<241@a@VxA;8ArN{tQ;Ca@WXfl0n)6Pm}7v}*@}%IoOb8>P(;9Kcpw+j95mbWAU7G>1^ zIV_+x&ZJV5g%RfH)um8-yu&@9OX&CvRCHb;}gbD>*#?HY( zP{;5yqk6V_;2V~K)!df>>YFa$uA-{?%BRNI;RRS@r+!&52yPX#geFEup)^Pl0z>`$ zL;%k+-c0}kKGgv5{KKRVmyj?Ocz|Etceu^`nJ~|fJ<%vJkdg}h_U#P^3_A?SYiejr zU0%AV(B0etqq@0syE)AVLGCaD;5s~gPlI z!;SG-v6<6zati!`CEv3FrrO!rsYE*tcn<*Y70>m*|2Vk#x2TF4@dKq&1Xa4M1*SU9 z_OZcPcPTum6Vk+ac~wuLaDr**#c|539w;|kd6o41NQ8HkP^X&9Uyj3^2*v;2S_J_ zU<&T$W$O>O*;)$#VtFlOPjIx_AdLWGMlziGmjBp9d$Jr*3U<@v)KvebZ+W2O?m6_o`_^`{f1 zD43O66|6auKnc|#QvZ4Nv4Cv;D{~__@V+%5L<6~X5a<18iO&zd28+di1(N*#wSwm# z`aJ)O7~228kGv+z`Nr7UcTv6zu}Nwlzu$~)X~*vXUr)dRz81gbc^%~NY8@w0n|Ytj z_`X+{h{5t|O>>!3b#KYJK^XfzZH;oB6->{9BsTbJsS>9zn#Z)RA~L* zPqP2A-+w>+MluDKF~K<5Wa`x#bu%Gui-KG~Vr>KEAB7OA-1Of^*-?rP=3W*a2Cr;8 z(_gXvvxh)Ze^{Pn*aHXwx0v$a!GKO}a*G%$V(f;G2^lhkz$H7~MtfEN9E%wEqyFb_ zzX$3b8Ahyx>MM$kvwOH+(Df*8Ilb)bNfe`1`z^;^O-ubGRG-G~f9>hYr_h|7yQcb` zM{)>b@8p7D1ZhO7`>+se*(Hlxv;YRMvH!W>*F=U+5NkP<%gH8p`NR6sbCCuknL67K z-JR=PZ%Ea3Zb$#l4EOA2lVL?i?pp`Qup`I7+FKyHY+gpUIZ+VFw)mg>Q@>NWsCpc7 z8m~y_q4reTybt0pUqpn=bbfDMovIS?37*}hc~1fz$B{*_NB=id$bP1mqRKaT1S-jx zMKCRlxa-H6fr=NiJ&v#R|7}6aSYm-J$k1 z4?q86!U82DtLvlbL}B*E4uSN~XlG-t2t>PTRz_auo%P2H23!ZWk+A_y-@k&w`5&dw zLc|?&e&n#eq6#8x1RmU^Ui&{ zXj{X)fkxergPFU6vFCmtG=E@Q4r2E8=aG?!uxIIiU^iKT~jW?e$LdHy>-+9UFiL?1JaZm*u3#g zubRoq*_D13b$ZSPOOlGt-IkB7soDR4(El*u7P4mbj{9XtLk{!gItw`5S0x+lf)z}L z*?*2RIiskZ?^-t~&26&8Wfd4aCi%&0np@iR%dRe%kgo{bsV3fxCgOP;o#~8-PWsUO zpbTLscj2o;p`}V)`=+O9A#sxC6cG;;QGsP6e-Of_6NaDRQ;zG`N2NqCfdK! zQ3W_HSXdboJf~BLr2iMqT(yEQmZP{I8th__q^M?nP{Q3d&2@2tg{Ok+avx{?&KSRL zgF2!fPTsGk2E`uzPKG0t#l_K|xm?Gmhn0*pg!5%T;<#>~{1A1u^VZ`UJX*$^z<#c< z8Ag;QFGwpk#m$|$%NEc>`AyA?+G{AGXg zR6F+1Iq^6;WP2r;wR}6SFY=0_&?1rhY$+kdDV+ZWiY;Uk7FGze_Y`(R?|s|p`L#4 z$nxU8;=g@6SmgDK1636(m-Hsv{W}3lnm=p7JVdC8YsX5Eeui>X4?C&PnQ^v{;t|{7 zr8(prT7Tbf2?UJoAsi*8t&z$&jw&OjA0zbPyR8YZenKF~^aXNlanaPsI~e zpV&hGjEa%c@2xbGu(J-082KZR1WA62a6EcaV9N#egt=e5-P=4Fh~V7@6RQlj9(uzR zJK{mk^^5L$USX5AK!ev}58cDXlHK=78j0?PraNB2Xhivvv~LqD&v@xo7CfUo3$Jl} z_DR49mmqWEMX$uA?10JsbL-RD-%J|38Wok310RgnM7B)7jqj-H(v92Sa=nR*ias4y zRbu(QlB&eJDlj0=5ojs!Y7-7kftrCTpF>@Fp3b6S3w`Dib~kPbc< z$@C{Xu_QR=9_6%{D>=nRSgM(N=Z==i*1igXDM$m&8Z^fx23Fy+Boi$2PTf?B>qna+&lDkE5%m@7 zP?w2 zFf;nlNIiQhYdOTYbS{zl9ycRegaLo|6^&Hu*xZJ{*J zzGii(Cxm*6sGjFeiuDiv#C5dKbd_W10OyIi#tuEmwD#?t5rH_`_t`?~jaLViT=RNH zN7;AfCtsIu(Z`iyhEX@*$18gkVp**R2z8J0FgqqW|1>TvHf~hAclPYdm;Qs#)LDeo zL}j@&Pif;ATl53cK@y^nbXK_@yxs=3TdNoYUg~;6uIK7peS6KD=B9X!ygL2Bi2OxA z@%?pLc85!6`a|)ZAd1bW+<#qo@E-{ULKKgce7jXU)?ybK#}wMXv}QI+Tt9p^txCN& zrZeBqtKqZLBf3*~>Qb~+ra!iszs1I#%hDQ@2@-ca8bhQdcYaSTVK2Y@Dk#W0ftC|> zE`1{nElb!{TK;ibPr7QLFtf93gUf#_hBG4m9tE3rP&B$dw#phV>4DxMUMHIAWBwb8 zHNKCp+1KyfcvDi!CAeK!Qhpo<$_~SP`i$^QH|0z*wA5$#-a&^b|b=nn&T-?(<~Z!rGtvh;T+wkR}&T0Z~5j6X-FRny0e=8phQrr!f#C~9$Trhp)B;G z;5Fiu5i;t=twej}+(O0N3|~3%?kEkiqFT@@-!EbHnrRXc^z@e zj?%;$KNRG8L`%zsV>i`&f6EK>`-fORj0MwDkU4O0kR0~Po{r}JRnoMClb|+DOj%W# zn)iygaG}0Osl5yR`T3JP#TLeg zmb;?!>{syUfA?eKv?=JQdph1plX;B`FtL4tYLSQg$G6J8g!G49)K`i+Jn~U7MI
    |E!^TU!U)bRxihCMkPT+ca%)?hBaRDtPiP)3JflSNf=9HvQ1#5|Kn-lN^j5P-HkQH!YH{8zhHCKA#p3swXV@`!KfJ2w-cSpAqs)a22llCr%gNj`n z!W@l`m-h#rlPhF96#PsZ)N_Y;dQ6|<6M3}xTT0KZ_0jP%_ACn(Gx)uCnwxFR-#Vb4 za_Xs(yavr{7rl!+L1nqo@vb@_y|gyX6?fjm$Bq;W={*MW>QD3icJd)zJ;b@~XXqU0 zSZYgQ9pPn6W+fa9HK9@ehZV-_qwZxDm#^eeT9q>Jd zAlz~-zy=*@rEEcrk1B54?B^2_&}61Z-d8z0g3NZ=l9a~;M#TU zvoS+XP0L9tJf>IQu~x_Q8@`88L>D>L)ai}!{oAfn4!a%b^%_jUJvR)8b5FC{6 zgJx>E^);I0tDxI{FP;8ht@VMF zI;vo2lbOxjzv0i|*?oy2Bw`nl;oY9X>13&A{}q%jS!-estP&j(I78!*rZm?jB9YX; z4(2o)WGKE+`!GaDX@;e`@U%ajzU!qPL&!nGp*kC>RF-VS(j^f_zBXr+jFB}J$Y9yi zI`Po-0c11G@0lz$ldZfjK40eybk`Ce%zMip(wAYU3rgxJaDRiMle&Rr=`a7J#gh~r z60M=5cjWE?MH?TOSIy3D!vYMbaidnmT#V~995o3yJC~Bd@uIl6rsY6&A-6>AdNcHs zvO8|NZW8f+nX9Trhd(cIznrY`55lE1sYRq?B7l? zPWO&jVQ9x7sz(`mbw*`;V80`#+F!jipb&rhEtSGBBo}JL6X~dsHiu6)%1-! zJO)H3=RQu!MpW)b9~yw%8jErVV>ClfM!(5f{q*BcRMuvYR{_z3no@_qPWK!Vk@}Hf{=xco8gVSPov9EL zp`c?jVIrqlpui$e0F*pS_P$=+`*Cb5Y6iyQ-2@ohz#f!(%G?W-b^JEuLH&z3A{2kZ zoNPigkYuZhsF=V=O4&5tG2RI2qqXi;khSOh8yRjNLgehtvaRC+JT7mSF@CbqTuOpXzDrd`Td#%e zBcgjHnv>ZLN%3<*8Vdv>Hxre+HwVyXW~UX+>nH40a2C{>81}YpQhm$vN68n92JeDIA^CF@5IKuVRrgJZ zgLhKy?liVTX3I83Z;3`MK-`hoj#RB9uAp;k7ppX3eSZ#eFB4nm(6#aN6iZgIF~}H8 zzN+{7#cS03Z3Ac)$vh)!I-Pla-o{(rTVqN*&J`I;rY{3jLoWiiVl>8uSRDYxt9NuV{hBdYIztEtES|hj-h|jj(#{&P}81qvRw57SA z4ytTw{O?cU9ENO+L=|aEx*V=sWiJeWAd#K=<7|$6>@gR|$&|!^Qzg;qy%@xa@b8Bj zMK&o44ge_(lKk9n;q8Xep`I=vYg_v7m*$b&ax zTb%3EO~hLD5`XDvzD_fg)JvHW@#6%=l&bnzzO;y~fGS!^2RDCAJxcV><*8l#Zfm5T zE-^NIP6IsWR6*$CWtl9d)^Q;Q>FgpZHEE5@#)HwEc<9y{EsMI6K$UIBfw zV>5%IUkX-mGCS%zI4hrEiLtsi(2$TVhK)6G*>>h0p#ygB zipfr_Xg|^cI12!9rt1Cr2q&$pX)`rU-`3KAqdc!o)?{YNGFf93!zh>XoubHJS2^KQ z>c%Z9degPQNdg8|eA_gRgecT!_vURP5`EVt0l>jm2Lc&2!jc5ml8<~cQ(5y$Iw zJglsBbGF;KNg6Busp-z=7CUWJjt{W7`npKm`elcS;qQ#!g0;#=Ip~zfieX*M@#}+m zQiWoKsedOxi*jX1+~=gtP%sqw-;&im{n7NKBD3T~>R>TR|0+?cPA2ZCT{+X-a9*@y zPKXw3!mr5ieV)&Eb9~YS%W8-R-UQQw&1~+-wD4{032`O;8FG^v@un&X-2Ku=UeBbqIAA?_ajGg~Y*j<)O!N(F{bSpWwgTfFuT4~n4Q#d@L*}aE z`thqR#_;i8PfdV0V2Gi%rS?lkI*NGLAVa;9*Bz|9>`AsxCK zOdNDdV+95?T_!Ah?ssMopPCLNHixl{fl}M7xqi_vsX|c_O1r_E6s|*Jt9bX+;%_0u zP1{d|z1nevxLXSg?FvC~jkxbdf3&}=StEDSL)T#O|}=iYZk->p{_>#a-rLN2WWv6{tYEvZ*ei6=b33nk+3W+cE5^2|D-fR7K7o3c?G4w_G-33!rlUu`)$YXq zBlWW2gNDTJF4NJRLNb~Ose){a1}bbFS$>~p51(-MVvs0>@G`VcEe*Ey&9=?`rJ=*W zVOLT0co(_hje4VMhNG61>{igZF)8kztfSQEwbAByq}l$BL-Z9L{qS5{-F3Qtzj+7M z@~N!oy`E4*VX>RcPN4rSD)i7+PmIGGSmO!53jckQw(q~Kbws^8&f=b&bspQlPbWvd z5Uxz%?x_sW(0MCn3uT)Fj_-m1V7tT;71xEQAO-0atm>zJnf{Q^s}e6MK~;I@pB-IN z68}dlXra$EF1M$svqFfC+jBiJO=ulCs^RY<4IX=JF5A2c?n}`>&hUPH(mr->FDVjH z#5?F{8?KI1_L7*3i5IF&|29TQv~?P+gau5+*pu3!yr#>b$l)4Su$nT|Go92T$FkBW zU9615FPRLDU`mIWUCFm#z1yUb_y+(DWlC1IDoem2=9nL>f~aaue>#rHZvc)hvejxH|tPQ=W7Kj3ErF`m8?86b~DZd-b91Xa*;u?qTcbY(yx*drm;q#rWje*tbH(j!Ib>eXeR0 z`nx~hf(OD5XXo(j0*niyVBGBr^>gyE)`l|L9ulUY8q8F}!&1gjC%LX+_RGw^ z=K+!LidtHn93tfY#2jU0j-vWvr7!Xp(Fyv_^B)w7R!0>_XEXrcp$Z&g3 zj>eo)uXl(3)xr7su1F_Dy$v(sD=Tc}%q-S1y>2aLFNk(@|UjH0f zJXu@bqDRIz&_3PXks$I93`?gy)0NG$y~WK(e_d%I1L~phmm)VQ@ygDE=}u;Zd9=r1 zLW>tr^O=_RCfnXKTy+JdT0AKY67Z723rP!=x3z1bXtb(=;LPh< zeLWw9Q@;+i-PEYnI)r<{EojYy8|UPDaE!drSLyCK6e%>VgRQ+Za<|9^nagsxBMc*1 zAlOC&L~e7x)W0&cHfFj#0g<2UNFOBPpNz-v<`eZeHIzOcw!_M*mkH*XV5y-9^K&si zpZh_=8N4nOSu#ah)(xd7!Tb86pZ(d0hvoSB(*sNqFa1asppc+?_@4vMCu2^kYi(Tt zd*iy=do3Hu!f*Grh!e2Jyt5>L37LJb3KJ*I4Py)jK+VR2H)pCYj2b^dU*er zzxSqDt8^qvsNJmLC3-0fF^eU3H+~TrbI7@A>{yOl1Ny#?BPFALN3jJVP5MHVKR)P5 zE^X!qltrcXC&K1n?QLY%Q(-i_W+koMuV=58Huoq#*g)>EK zrs}TswttDn7s6HX3hzTFcR)&Pl!nukcT(r>e!BpAL}3z|Z8A-3SEr-Y+~ivY%RYyb zF0XyLk0OgV{0aS7uk54h=SM?_Tlv}dwgys|Xl)4N)S(lY8KAFv*NXd7#T~D=WEK>o zhAaj{y)L$Y-nw8?J79-@W$k+D}TRa#86Pq++fk<8Gm+?mTPf z(~~guL3Ho_d_Fth-kaP*iTA^X!@SaXzXUzAA=_ZtbPIRc+}*d170hDPGoIR^aNy>Q z6xd&Y-j6IVT`eFh#Eh-#QD~p&?9THLOnhaUC#F2~!Ew2NqTANx@Z}}7Yo3_a-w|xN zvyIn53FKL1Or@vFOFa?+WcvAgSvvAX2UdfBrJEbVz*JwY0@BxM3khkeSR`2mO&Xsw ziJ81cFWl`+#dMTqL|&EC4xQ$V-{zt->ia{uj5n}hr*#uQ=H?oF@M!0R(0zhZ`>S~H zGkAc^mIaAu>4$&R(@P<6(*()p>IW20T3_h@ehzL|O3rSG&Yo#MCI&75UUW$T@qaSF3;-yr`)&5Q-Ff1PVPD|Sgdu`p>s3BdF9%?kT6Zs(8uNnt9 zreMqBNM$8E>eB{g9iQSS)u0}b)UvqYhJiDI%_lUonmi_l9rZkbh+LLU3q|?Mi)+#zW z^49+8!BeX2sE;B!e@tI$I}GYZk73ybcxT&L44H^&(JHC6Bk9GaTzr&CoD;`1rAK+P zI7>cdRAC4$K*(4K9V)B8W4CDCvb0eMXID1d?7b5+<@`)HL)KX0M>zUv3W-_f=#!9N zAA(RF%JiHQm}BS{`Wqs?cW@(v+UNTgmFT|qr^A?IsI?HI_xX6NQ6P`U+jpNc{1OxG zXHcgCdxd?=lC>>EpOo+qS9WnUPEv|INgzA11_s$nZ-%?|ba#JA?!{nMkS#rznVf(c zELLgG&%$d0hCI|9YScQ{Xb$P1EyRY`YRhoH@B1 z4;vP$mW`aris&%)>55|)tuTuBO`n!NIvHME*?&22;DhfH?@ySXamZXD2vv<6kFmd6 zI9>%wc!u&y9E9zUULj&vzO9x4ok# zgtoOYEo>XMb;D-WwBliOc!&8(H2!3t`>#wt5ud+GR8S-IzsMv zcX}i>L}#{^R6F9}K21zmkDHGh{xFmN%G**J+|}iw^6NaoF?!AzRdo&#M-|+7>OXX> z93y|X^i-f6YEkIDvvcQ1+B>c5a?}JOT9A_SWoKVwCwEvm4iU!vPZ?P^=qUy)G*1R4$Iypx3{x6XEevyzAi6(6hGUkwR^=J&|pM|lLpU=}!p;ZudgS)j+ zy~n3s+T?3pE#s&#-1Pkj#(^7!I>)Y+t$>j*Av>xF5mQx>kv1KHsi%{pg6z|A&UL}g z+Yxq^#*uoxO00b8!)0xl_1y7A@huHH6@w0}P zWTC^_byS&DqZ7WDrhlt3v{s{KAAI;pwj-*~Ht=bap*l{HC2tl_*^M%@ zbb2Mi+QDTnIPv0Y9oonQWUNUs{-0p7qmk`;jjya$_c_fOOSYRmxmGsvvVWdASWkR; z{R;;p1u}eaJJdD48+@v$f0c9v?sB@qcxrD!biQJ;eLQZkQ4^};GHDW0+}{rB!2d_T zcUKy+WT(`rPRtH+bRHIGjuKov$l}wn z(JqQ!zC58Sz8R=r<-^;^FI6*d%*P|0y-VHOCHhS-Js{=bCKve?DyU#3waIHKwuW1+ zT<7adjk4G8Q_+_8^LE7JV~(zIJ09|!biIajvEn}y}P*Vc=;t>(WL(9xq4L+CCoKty@z1G!zvI&%wzhPer z;F1zJvm|~E($QZ0;QE!9=cJ=|kmxSQxKh&Hup>Q*Ui(#nE|bSoatWFHI(ypxbQlke znfjhN5iQ41gyw0Q@1u89zaMm_{UtN3lR;dyHo99?)w83}C0H>-huq4swR4qS*ldfY z2b*71_35LIk*&0&WkPgtKcAjQzf;1pEox239~VU(RrqFs#0+esa@Vc&U#nHGwg!uY zf1oiB-1NCEjL)ZcHfPhMexZU9iz4?1AXt*j{I2?u#f$e1}Et4^~? zXlh?;wm3Od{%+Jr_Z=LK+R}k?3{!M|^BuZ>!u6Aon|9yi>_z^~+8n$F6E@bId+!@Q z#2Z6Xo0y0$dGTWL8n@(>#7yUCPmi0P#;+x~NDdssHT}syR*%>6@(_j??zoxaHL2UV z1N`jahE_XV_56?r($ecOC^GguHA0X3KW_4x@8r3f_I;l)JTt2rbJGKJNWK5)7I{C4ff@TGxZ$>R%S{||LR z+A8C|lQ}dlW$9fVpxx_3e_1wl5hD;`)?3JW1t}lY1<$w?`;MMLR%W2Pj$|&0CRU zguslX$ENQZd~#nK?Rr&h22v=3_EBg<_>Z+(ngj&c);HH}?|<;`&i?UGOlQ>X(4+cn zg}tJpbNYX`xC>Q8LkUU7HnQ%;iRz%Zg5an zJ)gVwtU!$&!ZFeypxZszArPQGFKbKPWnvq;SYWs=vUIhAS>H*etckgavP0#7o0)>W|N4%C|w7 z;ibZmvKl=8#ztD3k-#HBLdRL%-pG=jnY`Z|nS&SUw`Lm^8!aC{c>_|iiEVn?eIshf zVe`3cQ9pgDqPz?zZ$luQ>;SXR41KH;Jg-v-2Kk(%=Y*Vg=`le`5Bo7?Y9^MWH#9$Q zqI;D3J0#Yh{--^aZu}cK%Og&N8OMnniv8(cjG3v69OtKK3g`B}8nM?c2_MpJl8&_R zre~)2cD3dL6AIjy^twbtaMY_Q^9hhg^WK>0<)*|$dbJg^QBXau7+?~fJB>3^&E=(% z{W+V}GI%+~$CQ-ZTXpn-}2ny6`U9eP4w z(#A?cJ6l5pUM-2hs+9zG-u%9`t7C2n9&=Amrl9vqX1T64Bkgej$M~NR7=eiqH6`cs zq3?Y%vX!G_Dgwn$r~ZraNjHHsi3NEvcpltpZ4J3ariWOn(3!6hkhv%&B{c<`eGmQr z)|^ijv$esc?$T@|DCmxJc=%B?v-inIR{yl)Bv%tAYAVd%;P$WKp}Wp6CaBt7Xk#lS zOrNdamkaTfu`=Z_`m1!`bU5#6ZDf8rDqNCTD~ufOWi3C!tQW(ZP3x*~mucfg>_Clm zc1&bMUXl&ER6%1_a`JR6g(MiS09uDUJVO>HsM86_{TRG5($n!U(}?2X!rPR{h(b4s zb=-BxnaP=1xXO0*7t8NK>9!JDU$iV!Xyibc9HTVJO>j8=fRiSi_o7trQ#srl8F=t)^&7MKts6LJ}P4~9KxHvB7)`qrKBYjK$eT!WPHlb zjoMHLF)QfYksitylb5VH7(uL%@G3$mtO_QTv8{eOcf%xsZ#ggv(=)C#id0^B`US z87p_Qn_Y*14>R>*Zli+Yx{Hk-b8~WZ6e43!os!H9AF8|4UnVr<%NB3zvg34KM|3R- z<4OOJlG1Zx;VYVMb>?J>H?>CwE)IH*5`jz5s1s%HmSfQ%j$<1o+!wYB_NOwE4xzFU zqpFEX@4XGJkBltbu6n-T)C8*U*WU_IHa!ijeToPx4F>J1-!S}DWE2$8h%t5kg09M~ zogFYrM6UE8Gsf~7e(iZYT7Gh`<@D4#S%+*8iqo~tBOwIz^o>kS`Lbqyw(705=(xni zjo4UQ`_SO90eak8gEm1U@LGQGkMdsG?aKbNi#z^OKU(?=SC?UKkIjrwFJcaww`RNy zys2x`W9zeISIY%%$OL-Jwra8of($YIhKmtK_iD7Y-_k$ad>&i>vt9OFm1sR4PDCt2 z-JzYBh82aY?42oXJwCr&*oaa@*5Pti)VBFXKJ0MIFG4;`$SB&v0FHH>JbiPvVw`os z_!Tto?wzG3Mj`#*cY{Bj?p9PKP1d6^SBu7xNI|)f<>toJ8p1(E7y}84jz?TsSX2r+ zTBwnWc6e^=paGkU9sCC0DWgylr=faqJ_^k=hVsN#-sS9rpX1lfoOii+85aceIBz5h zK0CZ?pSy>5sL8&uPU|JEuXoG4nrUeYc2=D?Uilk*_p9mn<;V$)67R}epQFuN* zAASbcy1AyfpRZ&0BV!BXAfjW{_gAKQc3vl?k9uY%ezxivsE9;!5N?W#|KdJ*%3<3R zAYx$R0M`bT88aCnX^PPz5Z;d?W%c-pIiC?Ju3$=%}c$05J9@)b>k97WcJI>?bTg zpmCq=JSZ-3cYcn>zE1AcInS&i(ij?B`}5 zgECDDt=p708vZ=OM!0eNO$kxYo{U0LS)^yN23FS&lZN4q+}8p966NTe%8`v@OvQ%r zKQMVI)Nd|CJPh*9UygLy`B-f4Sg)US@HegD8#}XaZ%ms~_9iXXyC2MOZ|YD&l^s5p za--Fw6=lZJ<0Mvp9;P$)12+(^AVBLxZ#S7C1krhWpOe1`jELC$fNGA%u6UawlEtSQ zU;q3h%%BRZKeVXmwm(|sUY`g}E`VozsI02OK;D??=<9o*=5XVKuu_=O79; zFgTkk=p&H0eO+v5lQ`%8(p`vq{RBz*B^@INzd!_0h}p6+VU$_`oyB{1N72YAo7Gtj zrP)W@FCmtla+_}~`unw}3;5_r!(T)jIwYru6Li-rbW%`~B3m5$(^1!OHz%eMh>>|O zooLw;WRemmq?A?!{Xmza$s|gN4B_iBdG|Hw^=>wHf(J13>Cc|ku^D%RH-I!26_aUh z)qtNKcmc5q9-nO&4$H$;75R|s&dXiE0x;!e3*Fo!6A%EwIo6rK%;!^&mk-D}0NjLaaF51}-kc_?@>*7&iNbZjS|R_aHRJjZ#g+T0)QPEI%Ok$Wyc z#{Ca^E$au3dqjxpr=rY#mnr75PY)&3Aa@@3{@)wVS@8ecBNsAa(fRhsAWL9;@w3hS z-@~(OP^z$jI1X33j&O*O1B!-7&Yc4$#PJyp{wpCMGQRL~+>tz|u*nY_d7PjK{`oCB zq}ER53oeeqSwt^n3uvsu)+^kI=F}%+`Y`nYv%};1C`x)`1_ltYECoja)&dPRwPb#~ zUE8IqxOmUhl+$25!%EK0AvAH<$qNw~Yl5BW>uFX#%G{9I$`X=EzFNM(`6Z34y*|>jOGx}mXp_Nn8`O(8Nc>FCi z5U&(&ist-S4CZKR`fZ2+>U}uuMI)q|swC$1R`iXCO9N8+^;_qY%e*$sN}zU(Gk{eg zl?#=v_qyjOoO$E}U<*W-cyNGQ^>}v)faHK-d|<9w5hS!>RzE+pSYJZ^+iSXK_?@XL z@&3H1*!I)a6`~JcAhgco+6F)$D1Ur#=t1sDFFcqNZ5R zgRWzz*PTEVMVUD{CIHo>P&Aw{EG%qg;Z_S^UEt#4N^ZY?sjmL_^*dBeO-)g8@%Xqn zCuirUq#8I-dTN|4EI+tE$by5?=VO0rNLam(kf~m17yBVTEMR63sm;v%dYp7^#LQXx zg1yKoB1k)F)7_hq|E<}TR%4D3By?)NTiE|bcPGRB@9fWHiFau>@~xr$;sc0Mprot4 zZgrFbpaIu^H;TJ@X1A}+ez$GOTMJYiigq@k9856jQtPx|X6mzbymHae(?0)~4H9)B zu3PPMXP3vR7VpXyf|6~;0iL2J=@NQfLE-I3;=N|%FB=RMi2x12tyqa zBTV(~o}71Ys9KPuFJ6C)CcxB5)#rNIqI;bJOezCNE`XYpLwd9Adbx+?I)eb6nVb}5 zO*2^20qQV>hgPN*h5~&tywWMyfu(|l7GK4aS>LFTS=WQi`30V^m}0+!M-o|#GfwYa z%b`=W8E{Q{X8`915Do!&2Ed^bc~^-i2n-Kaa?!qcK;@M=k{a57(g#a!ba53d0jbij zvJNWZ4bhROgf%ooSL59ChMc_I!On-5pM*K1o=e~;yuaXuhD*qipOS?Sn$Sqv*RmS% z@ybm$ERO(9p8eKqVQwZ#{X_&g@z#}4^NkfdzE8<9i%z9lSZ1g>9Jp#VNXU2bpCde@@j!YfF3(x?Jt86!mzmaaWn-hTlSzi7*cdVf7Gk z9-IP_C-LTM`GgXB?c?~zv1ctvs>8sw2lRb#%mAx65+<}??ietZ-C{iR=87us`+36p zo{58l7C@$>Y5bxPU4XU+0OJ4*O;u&(Kd2+~8u|hJ70@G7U_%YykO&9}*7M;%KSI$R zZrt49tTAz5|2(r*(ns#tBwZ`^$;QK{@EUll5U3p@CL@D<RRQ_9!k#;bi zPMCq|$GNu6+t4g{&>ILjAep%xt|~I%gz&Nc2ZUwzWwH^FWaL&ovbv;h=;-3r3S5&TNW|qeqR8r} z2{t1>CTYUvRTYj(+K-~Jv@3B+6FnSKso`Hp$Z`=wxx(2qXk2Jt2?~F$K3Jw@7nAgL zet^)AG*$)S<{MasgC31iO5D8Ww^w@>P^lO0@~4dMMO{` z9H6b$1N_jkGF#xT08p-0qQw)mDKOm976BTFtynILJ4;8+mdjj`LBqQ>#)C~e$v*en zWg_MrF5on&Jvfrv9X)0XL8M@x#&>{tW;XZ&Iz3h2HGwJ}RP#wOI0P(1m5DvJ4Djl!PJrP7EGsH1YJYz}aPbUkHv|6D{X_#faCHjCxPK(-C%d(Dr+x;C z7EDa<-fiiK+`sB^I05Ur-&`_0#F|Wup;X0N@#ngV$!tDo75+vyQf?^imqgoaTAL=> z!ig_8Vkx1EwW^X5X~x9iP1Wh?X?zUU(MPDjmk6gDzxdGYGg)EP7GR%sss1g6#O!c5 zv7KmOSDlKB<`1>Lo2Bx)STr~qCcXPcEz*px4-%)>)kW(z$NKZ+2rFq=LM>8fdrWckQWqC(EobjHrzqa> zEwTj=-7o2w$-Vv|t}ZtVvG*QjPg4qq9EUUC&94A@P-2 zv2Qrzt#~dg_s#ysBaEK0BBxBqqE@M7^w%jQQlJ*w4G%g;zH*oD@n|dhQvUsu3Q`;RoK~sPCLXh#(X;D`tv77#W^P>TQ%~2^g|H}R zSm2~`QaCyvDLjK1*><)VY6}j=ie-A~C@aAL6%~N<`8Iae3WNaIlb*mtd#Gf1W=5X* zmxq@-7VUBg1VyV6Y-vD;wl49uZCR5w5t-SYBMRs>GA}Y+#7UlvJChyx_Ul@|0|Kg_ zY;?D?T%s3pKxbnQ$e#R|+pm*%S^}MCYiLJB)j73yMB;B&75ME5i zeQi8&N*MR7`JOHICP!dw^#0o`9M$K%WS z_%Yb6TwRth+w5BuT+AsH5~U#i{L4-wD|+A^PYEZ@H#RXfH^k9w(wHap zJ2~Z1z7xr&0aV=VQKebgK`Ctyw}aO|C+*o=eO2$G#)h=r4qDVcm&EL4u| z4u)Io^HAo0GT0~vojZ=#*Ae3VH3vJxH+GBf1YT-|`SFNF=7A+KAdz{L(UdYi$?=Ap zovbRlt8UhWY`_1Igd9{W>AOYf0i~{ok57N<;B_X~WWpDktU$l03Xgz4dCDl0tFr+t zAzDS!jIgw>MfbLUuxL1rBW2an**l6ooysq4WSkf2~74|FZZhAEtS#n3` z5jrCZl#m8}N=Nd&Ia`zSgjHK~f!jn43k(v6@ z-_fi`GGt|BOLU_NefL{U?H@rzSNf;lD{}GPVvR*frP-umf64ltUu9sgxwpSyr2LEI z(OpgjL*}PP1iQ_?8aiIQl3+=Mq@hJdz~V6S0IPih<9Q<1EX``;kX--A7PLvmpIlytTNr|^Cgaz0Ft=_Yfu#uBWiAFfpM%&vh?fgz`OS`g z;2TEH>51_b-=;O#Eq*fPlXuWm670!^DNWF8w(Ua9j1_e@a=SEbtM4y=)oI{W1l^#p zI|+LprCHP2DcJ!Mr$fN!r@Tk$fHH(Q}!IlDJ?V>hgff*FOh zx&#u3Pi^)F>$Xc;*9ha~l#3lxVmuu;3X<&25aV7$aZAH46ni9>K)5A)9@cK2sO7au z5(`;Xd+nJ2th99l4weG=TQkY{-Gh^|$Y@#F${+eY2yw63f&X0CjQosr_wen~#M#QW=k( zuGad1+7yNdM{t>-?T7bGS1IPhb^k@;qz#S<}f^X?L5%EbOHr41Xg zkf4F04)Qh@vbxTiR^A=8(6_<(6BEc}##No`$mo*G@9|=A4>Ac-w6N17F|cEPcoJCL z1Vl(G=_aJc;sawy113spR=PstbM1Dm{Y`rOT*B}x7qt%_J!@aJ% zNspGt=zfDc)nBtWA15PI&D;0*CCb2j%r(trFj(V8#?JWbogoX^sjdLq(LVcOw?O9;60AR>4(D`hC*h^R^Nx8jM07Fs5&k57v6R z6w!YFh)$g>*ri4SQ*x-Qqo#GgQ>9;9gL~~C*Z%LPdYYJDd~|p{cH!i5{QxrJa+*9r z>&XD4?q~A-CZig#+V1$t`3U!9v`qyzOF_cHT{*h=cj27drZKBDQW0E1H@lN9z^An} zgS11iP1fMLvZ$t^rXnpWyIzL+?4?vzE!H&9^ypgek)+d@r0vGjEQ=ytFf_st6ewxu za>ZP0&-VKYb}q3uS;cxTB0~D}AU#Q|K54f33}qyA)Ov`t29mSvx>^{P))KX4{irjC za4qMVCJ~SNykKpt>>4?CxSzifyHSSA2U7^`|K&KL#t%?Gf<(TVE2_U*eM!T~wm7y@ zx>o35Zlg8(b|atckn}U^{u&vnRXSf&f#X)ekQT*dKIJI7@jF5F3;h4$mQ-Xw61nGc zc&HkurIwnn^*mMFU8TKx4U`NeUlgs@N&Gm*Ld^IVedO1Hw}7^4od!+55ackV=@`A% zvmE1371!5w0N%|2Nq2QWhg{cHR^Czjl9M(%#le2iXx*X17{+3u)|q=K*!=};@twMb#L&5q6AtIJ>yg?I zy5)U7h4jz=fYLoC!9Wje^QDrwH1D(|qk+R`&Pr3sIHTnR(qmo?M}4*GXA0-{Q+7?+ z87I4$TCoMY8!|S03KWo-igVyEEesyl5mH{Fill>gU>MKCHE zg2D=6RTOBkoya@He;!@Z`lfc+{BkCv5%jqm6$Pcd@x$xJ*J2fK53o^$l^db}6!Os# zW+bChK|l;m(x3jzqar5HIM|LSNvc{86T#-Bs{acbHbs$W??W zlFb{J*c1iNFMLly(amNzWZx(4V#VCbuih?HVbN&lRuB{|qjlm+f|MTQPJv{qL}|yC zmX2);q}=%wRf^X7Ngt&#d0oHMJo5{$kv=~-P5;N_hwf!U;P)z#_onRc$pjfJG^>0a ztRXW$qG^Vppc36Fb&3F0zyN;M^M#>{^rd<){f+U$bIvkB0m|Eg%J)2n|qa71hSfdSh?P z^M?lP6maAij7=dTx&`>nY$5Lzx|i*L#_dCGa!YM;O3ZT31h`Bz5yT$7A10uh1x;SV z&EG1w^gH8u@f{w2ch`=Z;0lIA^VP-Xsky8J|93SxIByU{8{PdrDpY%j*1a?BHv2%F zdh3Fq+_@Syq?%2&%~68McdDq<*(beQKZCIJ7Wr>*3v2`)j3c14L*GD(;&{ySIQ!o zJ}xMlAb|U(r>mRl{T8770U1}~X0?Nan}B1;L+z`=)pGHys-bZ119k_oSQr`yZA2K7 zH+^5{=gyj=@3x9YZbrJ5jVX`6oiHr=ds&8-oQ{$*^QsJ3GE+~RzWdQ#LGmAYK;LR)TX12QKi29f2BQ`oJqe7j{57z|oMZ9g6_ z;4NSz+Sw|1HVe7YKc4XlU?u8&kf8i`oEKS`A%2f@01p+wRN_1!F+Z>OMgZjCz}yD} zNzilDXDC-m)E6(HNR058p;BI{`=(bSHd$}oJU=(pxnqbk{FAlVarmfF9_2b_EGLLw za_BBGQI{Y+oHvFr+sOt2*|Xv}NKM$$-PGP)$_nZxjUDS|JNrhQdV-}gP;<-arEro! z#SJSP@{!lmsGgXgYR`P%7))TDp4YbA$FE*`=*jsxU8+ zt?x36GvE5c>+w?Vuxz7mtp7fn8;UFlU;+Z%`!60#^~WbmWcha(bn3sT$jC4-F#*^i zdHfmvC&+`uf4-o!RnGV1v}8F?M{O}>tCFOKha`xK1x+6nG3!Yi&=?HZ&N( z|}puy35(;`(pI5Y@X6g!;0s8x6BJ4Dx+lAr$d+ay1G(h3Pwk0WA{M z><*Rp$M=jiO|{nDN>2BZzB%C|H?>nAzS`0UUr+jY3rT*tB1b?{?5bGr+oc-Ts=q$N zpZHcCJa(ojk^`rHI>QB~(&OVhSZoKX@#51A88iE@o+>y5lsE*U1cNxCaQ5nIAeHfb z5deh`PL~-3nS$Quz@H_7liS$P0HlY3PwKLV^D-U3gz(A0uFTNcRCzAorhr*5NrKe8 z%)q0--dvCg1Ja%IdU;au{@@ihIQZ6RKfQ*|=$X}(4y~uLCc#Yi zJe-ORp=RNQH$760)iE{wnqrvk9BJsso~FZhD$vzTQ5Axpr=!Ir;o%B#w=`oU-az8> zs^&_*G|iiYrOMJHAGBMbOTzk0G>O64;5d^hh{nL{G1a*Lt zFeNp$D5;F<%+rGf&MPBQpIv^|SZ6iA&mpLJ~nLB3V6gdk>nR?W?hZC;hi zqCFUte%^fZ;`6gE-f?apgF-!Du27NCs#|*v_8->%nW{AxXKA# zBHlDIH8T-BYjI}h>p$^`1ZUkeu3t}`93gYlLX#KYMHHuS?PxY!QaK;-%E}96{`$2z zHz!ShiX#NQiRiK|BI)hw_da!ly}hJ`{~aITIbqwPf+_4V5b~4Z;NTqC1G#hn(+;(? z^fCYZSfJ5Kx{hy2IWb0Gzjc)G+11l50=$Q&3JM9eE#1t?*R!7*W=cD;>L7wl-phlk z=*SMzer=oOq?K>h1Yd;bdK_y~O5{eInKzL^9>jky5h0wxjHzArr>aRH1TX#>sd`*` zNE6I}mbR8BvySFd#Rt?L!H9_j8=x7Xe{c}^MH;HAZC97Y@4ozJzk|Wa^#}~w1A?Y= zC`aiCRM*u2>R6<8ZFMyX5VL4fbs+c?f|#F z2~f)bipsv9o}vj!0kdSJIH{;K>iazkZ|zUvdA?}bHI>;&lAt%=EKmXR-u@u4H&y<5 zAB73|(zQYB-ji2pT46Tg2VEeRrru%Ei z3Xy)NlkWEm1R?5kdL}vu-Xi@fzBK9#E5y!JJpT%Xz3@G2Lq+V5a9^W=a1EjMeyqg_ z32VSo0`L0rn?jOcL7X4O@Ln-S6-7ngE4dUF%e#{L#~@%^V`5^c0EHmur;cBJ|K1B+ zOo(T(^*~!+wcED7N2_MpPRWyB;>~NY_CJjwz)?$lWa&B>6DJZ!vcE%Q!)DNLejXAb zRFe{tM;JK0U{fcKQ)Vq!kMbeq3P`el&!=m#KbI1LWovFYQ`_Cxftj3{tid&3~MoCPQ3oXt5XrvVF&r9k=TeH2J3sq~Z zK^PK%1^WM2<$;z?N|pZs!>7P<)8^~;V!CFXQ0IHs6}|P;)pICpVzL2Tr3<_trSNfS9=2`9#4c`AbgF9w z->4wdgE|<{nF(kKspQK8&xVbpWM7#;sKZ4uiKw);kfdamf}J?Iq>j28R9#C|w9?1K zTX?HY!tbGA=tkc(PgLipPV+t-CM-HfP5ez5c!rk0qN+8LIPb2-xn%CFLSDc6&k+K= zT}sxo1=-8Ag?fk8j!m{Kbk6jzU{}*?g7+%Qta|ivIme4`1?eA%*79jDch8+xsun)h6s{*d_oz1A1b6F!g3nG&R#8Zs}Z6b*W|b_GPvOCcG0&?4>q>bl`3Q*8Owd&@=}4@~vaDs!E!X=?02pljnd0(?G;; z!XKy6kAfr1neZgyn6SeOb~#nZa`w%<@VqQPEgL!V^|jk`B``2lW5;^x&8`o4dh={=T zo|~No%x*9>+NJ%}-`^~-uuJ<%jK!MuYGuVf1Lhb0A0$DDj!vG|k7xC{n;3{;Q}vAF zNczKv*{hgqz&1E9#fKy?rb1jOgIk%*MpK;5pMJ+ia}7x!n*$f`JZk5{eGmn^s2*ps ziFKWAz?kgyk?m$quG#VlIW>JQDGNvcRJXVl1s!CTtEb*$M38fGoHNQN_e6ZUw|&ZB zdymKL#@C|VS&7?}zt|;sV04q7kW6Nsz`*ur5Z(Dt4Ksq$p|*GPbq#|LiOEU?2Sr|f z3I6tcvlkQb-iYLANb;w3)h0deFy4rn%8e)=$}P0QRg75|CGM%j(wppWf;m}P;!rO3 zC?ZNP$Mr0?rY4!`5?w0SmH0F{%~J#Wfe&lM8l9y*vMyof)1ySZbw0;w{2tCey`_Zs zI10R0s+_cGK94R1nZaqVC@9`?bJzd*ZhViLE_^!uhWL5!biKdkXf9O~H%x2y5qKng z1kmXzT(!Px)zCEbek6WtPFj-~6zscQE%$|VRmo0_HXwgoB@%{2qYcNVGWC*`Weis6 z{fQW^B-k&fAl38x*NJ>cX)G#w^IqdwIT8$Yx zNSrK+d0nmCv_E(mf-2yGN3qTy1%9%ON;18?dV;OK_6T39JO4XJxl?NcC4ikO6dP{8 ztL^oOKP{h72`A>wv!%YqoeT?Rq45a-(|#RLR#di{t)E^J{7XTO*KL4dJBb1mh&hfI zxagshqkWDi8@IU+mOU|1B#;MKPM6;zD4;kpXVP`si&-ol{*YOGK041CI3E&HqFbi_~ zZw06azAId1V%CJZiXW)U>NA*?1sJIY6v}*HAYA-YYzXx51Fe%l-B&_HMnXskF@;^Z zGhOVT-&|L~{F5_6bY!vaNpf?frQR`iBL=PYSV`npOx0(QTbVoj%cSCQhofSrSS>?~ zMyJ4F&U5Kd(d5v0||3$iKg5vvBKD#mLg!qFV-0w4BtZjpRv_QOP zSe)ZgF>IrYo+~#=RokM>sk~mQb}ZN4B921!t^YiUY+Odjo3QiCO9&??CmS2D*B$$X zLJnLo|FRt#*~i0g zQK}U1{iw`Mwl_A6`uayC?bOB*aS0VFZ*e9+gs*8F^YJ_Gb+pH9-xHTNvO8adFhSd5 z;YQTPb(0@<_V(_AC8VSfIbuv-S#Gc|otMorr-h;vu3mp!&vN!0=1cp{iFJTZIw6>6 zk}w}!_MpEGbVL~IPKB3sCs^Pw>uu4h|JU(t;{6TW5bquse?+2dcQ)PMkj6LU=u0vz zV52tZ`^BJ`7>A)mmUGtx+07(wZ$3OAD(obBzG#N)fr1*gIgUK=6wh+EVUXo9r419} zC%i&K!P^Pw4t6|GP)tg2@>YT`9DX&Fn=-b;zER%8Tx5s_D24z6eX**O6XwIFUrfNf z&h*Ni$60+lw1V$IkY%~$qlC6607H(ahmrVMq|>F7Dv;|LZIZc>b>VKc2(elf!-emU zYqNFKAHDfd&1jscunXLBaEAg84NW0GAh-oQh5gYK0JD9%X*YGp@KGzy5N~tfPTfBF*oUax<1J)Er($oI_q7`Xh92E@qNn4X z9j+010SUDpaE4Z$KXUm2|A`Dj9#-OciBYvf(>pQB@2NA~JwBkUy>|C~N~N&BcLH%i zWnxUFLOw`_NtK^3EX_A`zwiDyO(;{FD z{kaB=O>LV;h95v2{P6nE`%Qx{z}hZ*ekpOQWq6&t;wYp;uD4l&90mXma@J9h#nq+zovv?Y-gq_iOh@m4XPZt+nwf;i-ySb3e?9g_Gtz1}0B#uCN6H zV_gmUFH=#fFNsfV7cftZVAJpEJWF*FWTvW663DJd;yXC$!s#4Krm&0#K7>oEwn4$A?CFo6(bu{RrvD=n>@ zmD{j@6?NV_ve;7g#T-ZZ1OCRvLr+}Qv}r7Twr_Uy;$qW9by(aKO5D^i=uo&RTLyMa z(@*{W0`!}iRYi$YRh?~|`4ooo$?cFewvfaH%V4Wn-_*z;JT-}=)#r#+N#^+GSY9gJ z7l>y3I?G#ITglP@v0Xr$f&{RM^rsQUnR`juWwM023wK>p{WobVZ!sLp41NBsyBP*B zkuSEq@-yB{mrp)*&B@KL{wXamXVHb??owBI%$If|DXrcL3R=%%;^7t0KRwKnch=!I_;?fyS_$_uHWJbbmT2x(%GsJt3lsKMaL6k5*!nz z5+&Vzq%zksjarrR{@8k%7x0vjgl_7V>~jyQiru*wV*?yE;M3*sdEEh-l%t?fsENFq zFigc~tuHA=Bm>T~wTB*IvsCAT)m;!Wuh7Nck$PXnbk`e@NgmLnR&&YEgSP`^Km-lm zonDg#`KW->o2LR93S1&2eF{s?g@w+$=hM4`)%JVChx}ryQ_6Z1shAJQTz%}W$8nRDYh|zMc35U`rMpk*5Lq! z0$_|Tg~M7}MutqDG_$B^FF_vfRkj=`03vWlo32~49%m{F8!zyv?E=v2^-Uq1Hp|Uo zcD6pgmPlv^YflAWVNPt#!O@=fo{k=0qsh1TZ=CykIoFY&p3%PfXoH^j@8pqUPor!O9M{#mf@nPM@CxJJN|@|QTJSxW&)9g|2B|ET4+j$6Lw4osD*UJ8 zx0`-zc{h^X5ZOsVrA|Nc{6^Lm!|aXJHA$6D;;6{Tw!XePK(7x_e_hL2268daL1&5z zIM#YPDz&b*63xgdwLjGn@lF5P%pE8&2+C_dLICJ0{XKpefX|8Q;_~_-fGg+?5fRZ& zXR9MG{U0BQES*d$vqcBcX~!rd&~Pt|+D(llv`RKP$=*9c6x5H{S@L)3)VA1=lW;gU z$h)RTyU5nmbOUqhf`LC98SKp%MivIR7t^*_euSIqB&#Y}(+UPnW9i>o{hL$FRS_g{ z^Wvk>X|#1T3zB?t;N4j2;~g^cI&U^ET)rKi^z0~$2nh)RnsykwhD@ks2JAj#Sy=dg ztrnb4;#>4YcXxPA|E)w*Pw(*f_;?A~w+EP}SX@|01y&&_*b0d3d<@y)Y%PMz4wcBa z2LX1TT3o3~^{=6k8MpHlvvQ~RpG%r)6}2_Ll{-^pvs2mCRCWv2=}Elv_>v`WRpBYN zMhjJ4*&e_RS)CiMOL@PHH|s8L>Mmw}NtHL8$nSihc^!E1oDcs^f;g+Ml%!Z`ZY8B^ zX#I7xw?Otew83v}YbNy7@ech%FTzdWeer{`h6|e0YfGobugeELw%hNK1?uYS@!rOD zV|f*`b8z=hj?0TJ%TGffTaKK(izZY^{qzCSFxd?IYDA2e4Tfln%E&n}#(oOftUM3;IN@ z*+}qJKscJda)7$5aTYG8^U!rDg?Va&>P?cw=}z6$hr(4?nTYZy!8o(k=*{(>u={7I zrtXnZrX%z^doENA{wOI))H9UHCOj16;kDjZDnkp5nw+q3 z^xStFC*^~&kp{{lj^|o#t5cm#oOEH$W%-Zi_`TC7Ov?-@GyEvIOVje37h@qBOSpYB zgK2Y(9=%E|ANv!~ndnQ{_SX@i)HzT3I(JbL@-{yH9YLWhh>(?^Zfy*T zi*`O`AReu5@QdE6D~YA@_)Dk^x*94_=#z=Z;P=w+pP=L$WuhEAnbl`hPurY6@Uq|Q zSc{P(vN6uf(e>nH#bmNR4U)&<97_7F(ay|_%z1w9rIMIf?=o00-+Q+Sp` z%Om95z>aYYoPPmqH$u=iQaN=oDEf-!}N4DUNCa~!^r5&3Y`tG`hWiy}&G`CU7`FWBAu%HH)2tdjJBELCYLy){G1A1tewplt-(KC? z*4G4_hbkA{ac4Hi6OZE^+X6RJB8In{)gi~xMeEYZ*|LoG6Ux^GhOwKMco*!*wgDId zJz}l%ZQ^pR>VHY9uwAt9%HSxL#QSft#c?1MCK9@v<~hmkt9q=Z1vjE$a`fy3@0RY( z*N-7X6-%T1u%Lo^5vZ3ZOvSOFoiMI0@cBG2l_YEoXx#>Z0BNXlDyXe3mHZulB=`xa z=|+CY+%?rzaq9uHcGf*5Ne)}rREb)ehPF-6$N09KOhTNS2;g{m7a2;l1h2%u6TmYC z{c46gIQ_2Wn2yhz4MZg1c$k@EIV+<9Qq{)LnbgQ54?`M^PEx7GjpUo?Jm&fN2i+1( zD_)`-^)774-PfQ)Z%o|4svd-VoD{9R_j=J2-nq~du&~t8xIbMlQz1BiswaV6LdPIQ zUgBI1#K5SKots9pg(b_?oM7Ezjt+WsciS!@g5(&YkDrsFh|n;`qnl(J@!s%c($>_1 zvB~)WVUIY6)1n{I*N(6FCXkwp1j7rq=R&a|1djiA(ck;U4w!+AesBZ!9Y@+@(6%iE z7dI)M-T*IBGmr+<{RznOJ{1|3cGRfVJ`U>mIC8E|vj!lCpzUbXkU$P_M6%%+R1V+G zSC(vq#Rb@*iJA{%4@cT&;!81S&8KU#L?;yJ44no}Rw;i(CYaVkdV z?r7;_?P`wanK3)wZ&qqFY858>=dKqUg?JPi!hWSEOf8-ZBj?$}MzAzp4rX=I(b-W< z$-hmW5C@T{bl%;$1H)E8?1T1~AFVB;c=aO5;bHAucwAt?o6_NQi+>E?#RpwtL^mfd z^CxrAdF+nUjqCY$%ma@5RyK*TQNWBSKn|de80$Zq&7R-(=i4q7LBH)`ASk;MgqJWNEqa8%^Z4wUq8eT`rgAYr+1!eLLN)N?VwGo^PYOo++ZXR36D7tCH5Qkm40L_o*UG)57(POH*>JJj2DDK^51U_>`^E=5mK&Hy(Si8u zvL&ba(8O;VB3F++UAE`O7l)NbNMMYRqK6-zX+)_qgH5|Nw%mo8sJ8#RL96L2XoZ=v zS=e8fm7gHd3i^Vcxs&OdRWSUwZ*y<56GYg^DaO?{Ptv>sOnGh4_7qW$uXP8kw)>7K>Wby^`1#stW@eC$t|w`q#* z$_n;jx{cG(3SFn>f?{G(OWgp&X0FEGk0u zAQcPqv7(by(Onc+oC?Pui#;@T5RwCI<1+NU{fu@T(N_|W$F^j*nBD$fUy^V_8F5a- zzXQz)1}QiBYTkGZqfJjZlacK2I~BK>^xc8Xqts2v;6Gf!x{=(~P-=x72hSTHrZ2+! z4?nC`w$gr+d?kj}WGU+mH>Q|XjWff}PZ$lV9fup%nJ^z^jDo?wKOUF#^}9uddRlIS ze2t-2hQ`9ahe^3HxSHHl4howaY48x5iTq&$vYVMsHa^wIDJ3GX{Q!4~{v$Ad= zCoj}qYcv(bhO@eU=`s2`I{22OYSa-V%v2iv)RnO2Qmmf z<5xZ#_ul9c(~^1z9vy8zvU56Sc9YG-^&mPI4P+ch_5%NQu>WqzKxsb zj9}FBlAxnq9P88*n-^Vbo1o>QBESD(>zdP&nbZ{jJ53|l$4o>RmfQHNov^=ABX=1e z?in~7q8vZvNs>?U%ji?F>7DxW6l*2Hb8xI}5XctTR#|y@GZT}A3gdU~r6j=yNI7u7 zn<#LwpgZD(#qN&9*IjRPOw8C5Ug9STPm}e;F9$m~fO=JNvj|opLSue;<&z>Ph17HM zy0noTL!BUK(oPYN5^|7PoP9W$P@{C}x3B+T3Eja+$u6*DEViM~wqbW^H#%t&EQM#d z;GH;}%s-rJnP$4xW%lx`1S;tgM@1iF#Na31t`lc>=jJiVBS~>Jm|mpLTLzDoA%(H> zl3w(2HE%r|L{k@`$_*h8m2@}%EUV+;tD>Z>>Jns}=WXHQi`ek@AR~(e1C`@O{OhwBj0D+nL#i53eooT-A7HfRY%T)PVNx|P16p}LD*ER^YXDj*i4ab(Y| zs-~>UrYr~F%KhvK-a#Fv`om3&^(@s*TpPH#-&8p1-VK&LAK&Oi}~nfw_8hZ>%V<#@{UNvr}#bzgh0!LB6)D2 z6blr~|4Cm1q4aJo9xKH2R}pd4B}-{Wi(rM!4K)RY8WY3{3GLe5`|u`@f!B|GNvBH{ zE+$bVqc_`P-P=H>urx#A|E+j)YxXNYmVo{KcQ;{4eGx0&HUOkOIw`wMJD7@%J?DP+ zTbmizq&YoeaB$w7vlRP-J05nVn5&Cwb9zktPSRk4!CsS6Jg>{w=Tc*%ZE^!?$s{du zjqbd)DZ6_e>&xkFc>-;^a?e%i)e!g7x_1+1`FnJNivkHr!7xAoi}iZbszjM~;TL8% z`KCcZYeJllleBAZn)#DA-ZJ;mhj+?=htb*jq1aI213a@tyH#tatpe!mxTeBQ()#D% zra}{mueD9iQI52b)ejfkcNSJ1j3g9(VhJ$7RF(j^(ergrf)b7-2q*+CvY0?M)zlzB zASxDut){My4g{jWW>)U?jMYOD1#guOxE+0?i_jpU!+jOXACm@!d!0V_GG@|JHgNb~xv&bi(3L4)gp zmI!vwEx_}dikz-hQg5PZ>mR7s8=+q{1`whe8PC6drCt!smx4V~ZgUV!{P-6dOQ@f#%b}}tznI6BV^hIKU-;8k_r}&aHY`KT`S?+&M98e8C-Jbe@b4o+BeM?or z81*NVwwFJEL~wU?q%d^BgUB6I9Gim*8^=;#&|``ZLSDu)C*r&wt+%aqa7pm6%=;GC zVp)4YUV{I!MAy~A5#~_cJ?v%`irhHGJa@E|g(!<$>gJZeF3{8c^*FU>sBW*`!c&Z3 zGDJfpFD{z)Nb9427$W!BS_&6l-p_bsruHyi8d`6!FJO%AcAR#xIuDQof&9?6mb3aY zX(>->9SzUWMy9r^;+EYKd(gj2bHc0(-9eAeDY4BhuzZrqG(Y^P!8PQp`XVMZEgq|5-E3zbHW0#@>8F)VU=FzFUXi!uC*Vlin*oT(MWtYM}1r*)&xj zQ-+c?U=)n6w{)m8|01Pgq`H zvpM+bMXa@hD>nWFm?p#zX<>MI`3O?d_49#_(apgQJmJ@@Abi3YsMP73lOSYLWC*MT?dglt+fZ;AGLJng3HoJD!ssInO|bB-PvUWQxTAJ0l~ z!Sfp>93@Ny3~wYuAN77g}I#?BiNESP| z8GgcCfy_@@_emTSvj5;^MI4C8l*SV0!T3B!7Gbh*MknqUSa9;QmKknqI?uJOFCkW#6WxB~LMw(eHp72fce@oIFrzbs1dX1K_VOo!v*AJA zaYyP*wPkZH$K#zk5xHHAdOU6lqts3Jzm=OO6`$Utk){0U%xIbXprNfNsajo8u@#rS zpJ#73yhM_VAHu69)n~Z#KRaJO8JS>P4=>Cv{mmQPN@>IIiRf_~RR;fos@&;A4wnUG zJ^=dK?~#!^ZE=BQsTnZcPj9zoQVMj_*(v-S>0vPhdg**5K|=Tchqbqgs-s)CMgKqu z1cC+k0Kr`*F2Nmw2M_M<9tcd_-CZZ{6Ad0DxVyW%b1G}Cz0Td|`L@ObJWy4mYK%{N z@1OPx@s564{r+&tv4IK7fGR2DsJgww@La2FH;de2kMz>AA4P=EL6O_HITK1<~mlrro&j#P0 z_sMI-K#@blRNr*^FhyQgSJ^h5Bon9wGg)u}@D*S$`DKG0cSf;&?KW>P&ff%#`E;He zZPHNF>@sa^>ePGy+TeESKYsR$~kJsZ=V;$?al2}aFahz%uW8DIsdbn)=#{^Q>41NYRLoz&Qhl; zYFAU--QH~w&ZwrtnO}bVAO@bQmsu;v2dB~E2qqhO$%#c!V4!hY4aT}|cP#6CH(z)e z?S!+m^?nRV^wkDfyMat9*f?_TLHB81rph|0{#%_Y_b$+uE3o;~p;V%Dp zTboQtiGhZf6S~8I;wNyx=l(EDld2j=`Hj>*Za5t6rlh2b75mk38fsaslOsNC{8Ekn z6N~#M%6zRwJVLEOaXEgOv%o+NCsixO^6M}imP_Sa~d1s&(9qLkhr48hM zWo-w&vt0c{d2I8jHj2c*kFnR?nUJ!wvaYT!ir*dVmsvz)WNV-#<DbfmUC6S>dCUhEf?#gp&*yIYkQ4-lR2&(n_AVSOZj}Eirh!-#qvU;-*xdESp zYAZ2ln`1*4TRK@u1us<*WA%~dd+&8j$-Mvz_nUo?pcmB3J|*y|_xYomOUU%{?u{K2 z6LMG$kM~-L%~6m~_$Tvld8R8F+WxjO)Z2@RgW?ZQLo%D8T@D286x_0ZJvJ$fA1(qO z9-gf1UlgX5nc1iR`hqG~Q4{r)=6CxVEr*&=Tl6IvAt~-X3$A2fM0kuZ?dVKDB4dhv z6X&N*%D#NceOf-~=fmv;mUJdoym$b{F@@&$8Ts-}Jn zM-2pAa~WA61RYF6)NL0V$^m@z-!K?R2xvSaQo$xbOLa)S9OGMUwV;rk2(QU7E`#Il zT#jOQY;*J(n)R`3qm4E%P|qGmYGf4n>Q%C9L-fq)Mhf(K|8sLw)1UYiT8%blQ_R%k zuDhAXc4gWj2AF*S6KF}8kd4mH1-L)pt`J%tG-^$rcl2Z9NxzDyk}rY?8W}YEI?VP% zU;3LFcy&aSH^PGRWWk?4B0~H(*_MT4(?up*KMn=CuKiwn_doXokP#mW@Kbs4#@WWA z@YqZ`V~xX-Dn(gB{nOK7T;+#U7|QA2sBHrkE@fqmqXW~CbnfOUX7;w>VbI*~&e`9c zMX&n%c-V1KE8!rn|84;&6q*eN8g)eYpsCua{JbJ9$||*?rQW%5)L!aP;7R?6+S$HJ zKQre8uM!{^jRO5QCji8#kD3pbL`FyG`BGe5jE05feZJ8<#@7nY1_LNBl+m(ZNBxT7 zXIvgPA1Sb|#~|xP3R!E++};mP0HoiC=5MYh`Uwpdz*{rW(Ln$SuiDx;uKim8E8N&; zS3>$^=GXlh6?OD))ly@Uf@Ia>;-OPvBj-xTlMLU1LU4#+c5Yz`a|j!CL2?}- zrY|F~vR5=YIXNH$RDI5FZRG=^J@~&@h8Khlrn`7m1StRCZl|9pC6VD#{^u=irFH=w z&VAf?v}6^d9sGRj{Zixgi?}gn$J{*A@A}4*jpub29~y6?A{cN}i{8GroG1N#AcMbd z5BQvD9jJt}Bm!Q=`T6YM(k6aPM~9}qYaQdIG(BWo--gUE>D8Z5RaeoBS#QD@8RM0hvU^6 z-AgOsJV||3p8WMx#Izjz`$1B3=d0)nd`Hj4oKJ@;JqN!ab1i8*E&v$=4(eM2v%mKE zUyr8%KxWw3E<>^C#DnoIw3;Ty*?!<0d}~hOg1@4Y*K2-1=x~(&?d~f?KVd#CjQ3VM z;pPQRC;0y0k-Fl-!PV{hYJ_w>k8>D_{6b>8bX`pPt1)R&^)!ptO)NC-I0Y%_v0U!~oJ-TOlhu-=SD;JI8 z13-F${(wNQq4mwsX@YPfuT5|d$|mat9~n{?5gzAU$}qrLms|^gG1L!dv31(|Fdun4ZPy@i2@mkZ2XlZwd}&2z|cy03q_ zzX5-Gjeq_apOo|y|Kj{S;tMF=?E(O!nT&Q~oDx0L^LIaB0EaF%PMI%G{k0l@228p0 z&|~B&Vd~kRAdVI56+Ut^S}aFx6FEdrCj0((aZprK#~_pA?Zq@l^{hki`M?85Q&ZP{ z+f_v%2IY|wE2`wReCaPaCVI!)vnjXJ;zP{sHKP4=Y0V+#9%d8~z&hL&{ok$V?0gMI zCIIZq3LrF;e&zY08DE;u2P;8Yj^|QA_^MBbmi8bEXz@TH5f6bPU1<$j&_aU!B(3#h zW8c%Rb%w=TLa*-+0)(y*Jr(VMrwqr(%94Y?zm2y3UijawDqa;fl$BRI@n@MCyI6JI zbcUCg)(qE`SxYUkuTvNusEPre$ViuR@h2oLXIn9$IaiM7{GJNLP^Z^EwtIMR(2R#suwXM4oUho3oxkJ8d}y z#Qg4P966`l^rR(0^u?=chy(?2l|5rWrtH>$IN*~{RzpL>*w`506K`%@fH=so2coZI zrt{AdkKcgd+o+cS*9D5i?({>k04M-vL(9i0zPbcrtOYS-EnoT>bC~d zA1zxI-1PKp4NIJ}7sHXXRy`2X$yQi})87Q`(;#oMn#7;x3p2AZo!k-CH0BL1qiVI| z)ixeE`XokJN@yn~P1b+*9~)SR?dKVZ6LSd~!{%`UaJ)L`MF745dT`F0|2hHjX8es@ zma&a@Oh}8lTA4ocP2;pIeh!vY%(B=MRSJ+0CUbd7myg_6UwKQ(8k)ElL`?Wk_rm+5YP4;rQAW;#U~&)!dEZkzR+SwE|t>P5F(l6>4; z+fuQN6;-0EU~OokSvnurBzBRG zB)a93$qmJSzOqFWJi8evRiHL^a&NOF8?Csk1nHIHx4N62iA$+nE%kPjAwEc{R`=FF zB;l}3cq@w#DeAskfGeWEd|z}+c0J<2!i>ZhiSkkiKGIq-0g88@KOdeX_xz&G-%kNM z3WekMy|X|VS>MuZRlX~X&2<#F({5H5e?AkS^TY+HL_$Kky6sSC${pVS$NZ8$?zQZhDw--*^F(EDmd-)^%J zzT8NJl!s`~cxR1^s2||ayj}q$U%b;xlck;zePN%>z>ze=#YB6@(PX-+-7{FnwER?G z7hGw86bJZcQ&XyQHkfnupZ_@@;-6sEx&hpk%|f;3eW~+WuW^9+#sg!AfAbpL<9YToJ4Ms_#Kh=Oj-^4z!@}qvbaZH_qevHa$`fE*#hQT-EvG z64wYgu!rfJ3yA|w=(J5CaL`l-Ft+)$ecKeXz`$-yw(5Sq z72x-7pshKGL~AzT+M_@rWnnS50Pkq0xbdwvF_p^?QXv}&!ydPaBc&)J1c z$0aB4qG*adOIA>iSW)2FWklAXN0weI}Nq; zXRv#i2M=d7NZ)048FEMLUfg|US92TjUmwbPN6k|p)*t`j8FkDM@`OG4i zO-%k>-?S38y}P`weddEJ3g7Mda9OvvYVy6?<5q~)_TNzE73M#2+s3V*4k{4S#8SwB zZ)0Zpft0EXWW4YPeuAJ8DJD+&i3Ma88OAct={jRl^8Gda-I9CDQd1DG=`g?XMtURx zt899CagX@(NFkKaiC)>1QMX>@dp9st%a{)-_d>9#Su?C6MN*E%{n%PLZ|tHeSKOEU zoI<;-EPb-6=ufh6@c!jaCAa+|&!6d`AMJm+&< z!<>zzq)dS911Hg=Kge@bPX*CVS&-kdv*9`R6jOGY9i)4*-1-=I`cFP>>VsQO+3tGntG#W2{nG9I`N)L z&VZLm4m*13j&(I+&yRP?C&ip1;rF$-SxrsgB-HUYl$Ddyh3pNcyqlN64k|Kgacib+ zo%AdLX6Z%~Y5oJ}<5WKp(?sKQULD!stJsuupT|jyx?&~`!5=Wk9l(QcDx7()C68y) zX?6k%mYoN=2t9yc!@skJe=STzNrt3Nt)2c}Qg&l~@H%ba_rPm5FpOKQAV~}hbe&Ef zKKg)`3ZFQ`CgO@YEAe^PL(KTG$aU3zj_%`xa8{-OIY8NcOMym-&Dmp=f^6dNs5Oh#J7M_)wC&b`V@J4scYeii81jRm}DZEbBpOm7eRzk)5aDLWva zBNs&`C99=_jSB;QDkKGRa|>VMfI0xWzO(Ci{(KtAf@ZIGI2~oOwzXZ=W7y&?$>Po$ zN#N!3Qy%006VKDq(E;Gro45M7gHK{!1{B9pEzX+ zwz!iPivlOw0D*(2#KaW*0Ja?|j8i$)ZaB8urorSoH7Rz}H4|hLE92pQ%WMyLm$k!oR=)oQM|Mm0S zK2N~ZiP-Hh&eORj<6TNwdX4sAz_^D5Ix~2+=^y|QTWpRE`BUWp(sDBX)dp5(5qG+a2uAXx8Z9pmY>&1~VJmXc}i! z6C)0zcB@-8pd}BeGTJYbqXl;Yl=gpR&rbqXa3v5(hwp29eSHF8o_~02eE*(3ih6c& zQBz%A4UkX1gbSCIm+K>coGplo0(1-A-MKyLSzuV2{+sm_wYBjBr8hvJH81Zcke+<` zgw_o}bpDl90>2~l=m8L~|Ns6v*Hf2i?97}lf4=Py{;mj0;hzEXf{_RJV%kX=}3 z9D*U1_YXM7xGxsb_$Y)x>h!wc_(7g2i{HDGw0^5_yteX%e<9RRTh6$Wc&^o8WjLe8 z{#q|Miri|Hg>OjE^uhn<=|=+1*IWtAhF$y&jAUNJPNC_$OylRgx?TLRvUGT?(w=E= zVt(ZJ=2?0m-#&lX!DAFbp z9&D_wR2V<*unUlJF;Q-FZYu&Rw4otd#4?$(X@**d%Wwhosa!{^sAG=3LLzxiz8d zlB%XKW*e4Uc|z@e-SE)NGeN{tPRJmj{LTWq^n6Q(L~P1sQN~oxzkVXDsD=`Kb>#C+ zT+9@Tb%HTThf{1i40yYSlzuj7^w{@PzW}XlonoRtVz-Q-FA;wR>_mfVj1T3bKv|tJ zX2CMN^|ZT`KF5Y7m)XF8Iv>jG9UIcmf(GSYCg*`7!R9IJ>HFyTo`v^LTU$dUfeD-u zh@dB>lsL39&&;R@aJWO7fw(H$|6RZvr+N+*Rgf3n@9C|IK07S;e$*_9O|e9ipi90F zn3(w%!IoitesyB%h5-j!{S)Z+A!QJnjSQHr3F%ZMdnixy%dgM_i%_qky>AoY>Uqn7DfCmAuL{T=4A*E&Uni!`n0V3>QVphxNi#>tj-Mx!X=3mrDgUzv zR&&e8b|x9?@ES&NWmEOE zL|>c|XU?yfIY>8CK$3&o92=auX*gilyL%M)Sx((9&w6A1C(>&*>& zK&i;o6<`s|dVl>gtDA;(^u727P-IYdD1C>be~+x!atJma3OjF@P+~`9w0%k5xYt#c zf0xM6QvEDrEtFmv(GXjqcb;@}171SYA}Zjt-I^ZuVzWH6o}E`gTQ@4Eebf}6{{`AN ztRx^mugCX>fGXRI)X2)Q1_ac21|2H(251Sm0^2&c($;xFpCdDS>ctEX8@ts^4SfQ_ zLcqBnY^(<$1l-_TdbB!`x`HdImjQMoOe8VqV{JaW==35myLq)8jm z9=euD*R?GBT<$I_V(FBwiT#5_p3GIV+7Zr@{$EYC}{p z_Vyq8NU<377E33e%j>X=jpDD;H-@+A zgMzleqgk}Et;3*|c0Zzo67|7;pRJjBZNfw2kkzpV$w4hI`$qf*mBI(jkFgHn49_p( z{UhRYKU2-uJ-FMak3}30JyKCu-2|HYkRHauEnB}3=ul{w@lo$A)}Zgy%yf=UFQ~=M z?`n%K96EHRuWmM6s5{U;xOC2gKJz!$_LVi_3%^D=a{84&f+|o`7qvu4=wDwN5PO*Y z|CRqekvn-0UaPb@KEG&r%fL&}W$aRM((AQI$j(+PzQp2VQ1@UmQ4+W*b7i?3~n647?#GVgoDknpGFjf2khUb3#9&Q_Z{hNJXkVS?8y1+E|?xcNf?fwlzS70i=7 z@HT_Bjt;~9y1e*Au0Nf7k^QMsctm|%hr7e;h;vwxqr<2srBgp8K?m%s60X6+&B%$n z)7Iv1QxdPj;@rZD?f5B#pxFvAJA5FO!s>zM+`N`qTT|6f-`+xzkIm~?wWA&>GV}x%ApZ!qtDU{yh-|H;T-oUDjl($^ z*iY6-I~-i)_jH@|G6~Jpr`AE97eet+7_q`g7EE{y79f;H@SVG7(LNoiUY4){B-m=| zr;^YYdzq^4=a7>4ym;%d!KpM$%WPT1h^fwN`&!0k@csGk(z(7!M@lQ$)65G41VUbW znvlbQPWz6>IscqmR^ihV`(o*pU$I{k{*h9B+bpo{4q5GR*VCQtTIrO&?M?afbm*d| zbT;yOb?vBF4Bc36yTr4D9{CFYrl@YGB}zQpPdTpAR*u|DU~h17u2(C($6pbwdEXrV z%R~|Dw8voM6kO4VYTk}WwwNti6u`>tEh+W762c!HE5JxGKP21~pUJz+A+p%GK?~&7 zZ^$Vsy83$%4{0ZV5?*IzF1I^$PSFH8hvBzh(VrycemM?OuMkvYt*U8cn~cR?Y;-?- z8Da&5nW&bWyx+llzaC83>=7%!-R>qo#{4W#f~MVJ^bY5?tf1w4Ag4fEVdF$v_yQZo^}z zOkRaB`^Ww&2Yg)MGq{mNp=a3|)Yf#~LPaJF!G1r~??cF6B>d{~pHSh)Na!*eLdeTu%Ct@Os#9^sQUR5J3f3zd?Y~a`NcDqR zLs{fL*KgRMTX*C2@&7f@g4m~q{m#OHl@{A?T#?kFBax zxgXOyYNFio!!T9ln@~#mM{#u-cX6xi6Ak7x`*{CU5&=K_2GyOOz=fr){`Y<9&$n69 z9n;J;HMNZ@0v+jru?}4`jnz&4&2_U0J4RHNgz1tZ^3_ij5>^i4CTjM}vm=p%*^Q>C z!GpY_yR$wo3z~YE)(+-07|28EZ8e<+tk)S@oTvJ!uIN?ss9+&9vaKUjtJ4zyuq#D##l>xmdP{k;wo}cab z03jz97yrzQ+`Ex50R=k8?dNQm?Y`W zD`OE=ln$RD-DFI!Ti`U72|f&xHXuA*S{k+=Z;K62T`4!VA(q+zQ!sPglfyFo4N~%D z*jbWxYKG@NJzpZ_tbRRFAy3v+x75iTy|hlR+uBSHWUs-Ow285L?^&06nlCOY#rAnV zv%=G1;B9lD0f#w>;HFY&Jk~~T(Kb(hobGEuT-8~sMAJDIj|9aAomC*>W_DNau*xQl zYb)KJOIgK^N}Lw&-Ar(?XOSAaul1|87Md|*b!M3LV5p((vwczcLa75rmlcIttx{af z=!)lV^^_JeKtAJ@NeH9kHY?`h7z$8tT#ITce}8&&Spl`cQlGf0&nrI{2|Wzcscmcv zft*~TGw4`5-?uCAHtqlJXFFCJ`qVb|!u@o;o1DMUx2ai&ZJqOQdj3|%@`KD$`MaezT7^1A1gEM?bL(s5 zF?{B9s8!JMQ5k7$f*vFJC{oPUOvc8Iu8`mF^=dhb$Y_0=l7Uc8DA`+i5NY1O5|dAA z5aqbLKlbHKG$_J3SCO03S9LVRM1p4*1?4BxxNUZ_xi6VmSw5%*_`;}E z-6QAtS0sj(HGPLw6i{1IqT8LSb8-eH7WO{0@p5OSgoaD4HT4jcxJIXzLm#~Lb=52o zANF}Op*S2bUr=~0$?7YvK2M`aack@K$@eoG4}0uiEK0(EU;XAct)HZJ7Z#XD0Ofo7Hsf7sC zdzUap);3jpkdYsj=}fHj%2_w%1N{KY*go`jXLVU}Bz+MpOIjIcU#RQLje5*>1R#%! z@UL1*cjvBS%}k7Y*(KxLt}oD*P!EjwZS~H`CAXGC+hgc6En?&`Z3yzJ z%c3=2sjv)+c|o-7ftW;f^C!aa5R2ffscAEJgO4ZK&f3$U!lw7GVzm%%T@U_~@1$j@ z5*%GtSwx~uW&<=c1J_HIbBQIcPMsT$iO@67yZ5ioAh+dT8t;$&wTKhkC@dQIhvX!L zIIgqBjpkj4Zsz5zC}H95>&ORB#^LTP6LrdsN7S)dlwzx`qM2``X@jtPjbjU|9AS{T zyXCJ5h1R4RXlz{(R%OqmA*E!`+&A(GG}O7OJ$B5Ebhobg%gKu<80fKLy2|I1 zKAxJGKHTVu zyvTYT#iZ>p!q-x?-Gbk0yKWU?@*>yhaEJUQSsGELK0PAQJ4@b%yt*{{(aVl2m1$p3 ztoT5t^duw@V<+sJu`|x%_4vsK3^pxd1t8US246iYpUW2>A8)zp7-W-msuBooRix zoG~kdQ9cN9w)N#iw>$DwbbB&o1LZ&e-X+BTk9*R$!D9c zLSm$>MwxO@)vG4Ij7Yj%=D=nZlT)7tewh#dkg_|~F@ zY|1gCd>1nUx&JhHFXd7WTK#k}T*yng_O)4liY@d7+g|wBnwWz`t4~XvnrZ@mg8eV! zHAvetny2j^03mSw`#a0Ej0k(%Y#8Ys?*;C9QPbDTT2WI$PI7vXF7kQYg7S8wd|{Ch zT2tm1`)U`03I|Yzx+d{i5&*%SS@HYl=T>M@o`|?DgO~NI7KUoR);KB(5;f zIUpaw;y3xzAP+l9$5ckdIV&$0UUBlRh4FOV+MqR(Z-WUd`6F?CoyF7ct7dm*S*oN2 z=oBnr4(oF|Ic?;sk4*=<9~QZ2Snuh3jQ`vi7ZwvMCCShhdS=RuW5dNhOkqlp7zNB$ z@vNwv@7qnRd5W`Jre;@t+6OeUU=tWb2#UMvh`>aeu* zFqGxWL`{(;5dUo)go+q+O~~PqAANpq*ed+7tqiUnS5jg@>C#g?DGRTjaWmb^TAdi5 zU-^c4XmjQqI(e{|-6mV-#vjl1ILG;*u_%?1)Kjs3*5 zqt1RuBpFD^1>ZqaC_*?l^-b1t<*-S1zVhU-Axc4AU*g9!B&6gL z?x-{@XDx=vQJW-FZ{JGNy1?`Gkh*+}ISo9^yaJQGBqVDDOS+NEXYx+-?|&wlTCUS) z_sfB&OZ6;bmS6O`M>m&l*;V*el%G7YE4edL%B3Z%+TwirZ@8{Hs!Zw=5T9_t(l zJQi}F4b$5qLrd+y3#^wq0>rx^XQPiqy{-U}Q3862C165Jd-9WK`MvYZ-O6Hv1kV}l zxhZnKQBk?gUeYAsrQ51;l{{K4d5GY8omg--LP|)A_0|78=B2NFSM}Is0vO&gyG39x`d1f1BZsUTM(J3lc{a`E%l7DGF$Y2^Z z)K6_z+bWRyf~~8~Lzf4;X{q_hY6C;G%{ZG_J@M6%WK5dO#-yt1w(&Fpg_?r#{g|qy z!-qOQQDk1o#<4TI9cOpLy5^^qp{;=vyfJTgMs9Dj&-=3ZPFr}ySwnA!EOhUz#e@_S zA`Ot%C2%L3lLC}o^t3C7-;Ce=@2Q}fjd%}EU9l`8g%7a4I5)t*8GTUlJ-!p9r>;QWaYarioQVWra*JRzk3lJ*{? z!LZXi@4a)AZwK7bd-(fvgCzR|*wwNBq5Eql&#*syCQy zW_SE@nY__dW*jps@E+iJk$X#b6}ROAjDJFRHg}py-X^RUU8O?E)lbi?8ukgqPWfZV zY)Y95dT$w1hx>m2YM0~KA!p26v6x$P@jic>NCO#dXTBKvk@b`j?Idd_JiU98g%`XI zXs`oq46ZpRHC&=}NjYw@Dd>*OEeeu^gym3@EqwM3lb*n=3A{#WyB945B=`B?j2@qr z`JYi+i#X`u#f2*Io%*DIaX{d)$dC#y?1n4%=bE60 z+n`_V?dl??is?sXJ{eEtsZ1wH_Wh!7N&;-?jtjt9+Klo=MH+6vAo0GH&Y0k~h`^MR zk`kyV+4{|^+U*b<-r4r|wa@X>WZLe8h!i9^=)rifKKsH-rO; zdbUx#y03-&3gP7Ez*BmX2UKcGk$h@=$dKV4DN}-Jm++QHlv-rc zB3r{Kpj0_j6!o(K@sT6289UIuCr8jz<;oEk&You@zKiN&=&(s{ezN1_Bl&tNY0QYh zv3L3v=4Q(6ArPDu(XGq|I%h?lj zXr&ciyw>BsjFu_Mt?bp9D$a$|rjh2iIZk~Ndo9sj_?Drxs8aDwK__Kf7yn0IP*43A zy1Fkm!4VIKyGk{Knw61%OmlP7DG%<*K}V$xEzIyoE+fdJrwa5g*oTB`CIQA+Grc&g z2QJQx-xdaO##iZatx!0N-R?ILp0YP?_((+lxDHBxF4mM#_cn(x^MZbcfFHwE8)?z+ zDwwX8fO@1bbo!FA0_KG(0P$=pEh`mqNVF6@55Ob19x~llP*t`6`KR)ml&FwO1uE*u zfct!Lzp`ztOvTI9a$i_p?ZnfSEVkweTjFXMIVH_T8#tdlo>cLvc)MR8tH-=Y4YhvS zO~mYavz2}-esW~dnaLLNcn$Z-8)lTO`Sq=O-P?-2ZB&sK=j!RrcTDiZ*H`_H-WN^K z;GcfC`MEi8`wbRSb84G(XPc@XdS6e95uyd5Rv-B{^W7nVBP+`6sJ|<0UX~sv1%h#v z{++%5l#`fQl#`9$I_e)5yzv&ni@X`+vP?PL6+In1(DW2H!h{!GH+>8HtCUns&tmW8 zC)1CL9XB{3M*P76-h@rNSwl;2+e%&6{=@!m;@~TFWd4fbw~Fh&0MziPl!NEDkjNkS zitFe5U#Diy9T!Lz-;6&E3oXhZQ=D1234YjhYSN?{Q@Z5;9Bg3AbD`2u`_q%j{YO?u z5J?H?Nf%uReuH}7;@~nSdK+Dy;Wm1VF6NUJ7U8IGDOhK^FNkV<{7c|nt@d+a8*8W9 z%g%k6t8ra9UzfdVxyjE`#*b4~#3^)HY^th;bUkxGI~p%00IA!Ix%v%9G06`D~aRw;shU~o#B>#`r@ zV#Hj~SI1FZre;up_?>iLLF2MaYDdEkZsro1LHdKYy7gW;_*gaM25|$2am72&c%L{^ z4fo05`7KYWIgU>;mw{4*`=l7R1VwP2dVxa25RR$;(p>{FKLa@AD{cx5B(`@)3;KlVEr>v2KQ z)2ZbXBm$thQ_o2DtF9}YdP_(|9@sFxqiy*4;LeW&@tiEee^|4&p6?zzF+Wz7LJ5t{ zO*(0vsk1|pVfc^;4lQX{?j~rP9+Lo)cw#tVT#?SI|rqmQY6nCi#`R&{DLc< z0O%{^ghyf2S`Q(OhwawCzDwPRC>GK|5*>vr{@*Q`bKqh^%Q{Gb+I`S$OL$aR z-cXp62Z2y@AB?ONTsBH3gK^k5Y%(pQd`W+a-alF(fG{=FGbAJi>W(T*=ZjeThhVg_ zf>hAKVy^+6%H2G*c^Kw2^DdSqSNPp?^WW4JaNn~5>QSaHRv(Ed(xTt3`-5oiG#4kg zIe{uc#4dIM4{2 zR2;Jik8-Bf30)Hswton{c{#A`m_n1-EeZ`saK&&x|4G-hN4*bKOP>iYec2z)Zh)y} zyG$uDGs$2ZEe%#dTOmD(4$}~9n>uq$T+S1LL26uLM;NZ?Eiv`nNQ=M#a9+Kdk6d`v zeYFUOJLo6(hlSJX4lGZS`ty}ZVHg!Y#p|<1mpHdQm3rjsg|X`9m%GB{-r659&>b5vP+*B7x-=u^JMH?b_vkK|zDe<3Z_zfVqYd6-iY-E%z@H1ZF%$JLA= z+Uh{!6D$AZC>>HNPL?G~|1e~DNh1bb-RXdOmXWW%V-aBe3Eu?!c=RVB4fIbLz}0j9 zECj#SgIBb=M8yR3hYr6leZqT^b!$%DM85WdoZtEb)S(TFZNt71_aD`~TcB$MO{Iln;}ss*a!Obw_%0x*9$YP+LO<1s($(<`S9WdJ>@P~_ z`&;ThJ(ccP9@-_FwReejCkd(@?NVncBm)_)uilYBE=tRPJXjP%q7_Wz49S^f(*62^ z>E_B!@W<@}I?F%UKGfE`eL)Cl$U9f&A<)i|fUFpXhpq2IZD()#Vn6G7+kcb7l!dGX zjVbhByLdTu)ca?ELmi{S7p~=6GVc?=*CRr}S>#3y=gbooULSQM396B=igUVAnW%jc zn;ZQz5vEWyOaBbYnFFKkom)zk6-ngduNFp4t$fR zUe&utV;p!*S;yb=Au?K%HFz}QFQ7Ewzs%yFJN^0WffyMv+KSjt;I@2CGcYw**^1Yg zbWjd17t?S@w!VAxTqKZ*d;VZ+n*j^}sk;by?ayq%NAE`b7cRnXY@fcCo0otuz@l`` zakuQ)60=}aaRR!hRzJk0UyzNv@^By+K3{d7Q&O}_CLfe++qPH)ZA~IaQ zPd2!% zwA+$)mne6pP2m`M*j?1J#gZCvq5XBPfg7?9O!4}MuOcrv=Kvx0V-Xn1bJUzN@TqYhnUC8fOZkOT^X zOsco}X;cF&bvyEL!5!78BC`^C-EsD+QdA!cDr#pQXOv86Luqgb1vjiqYlr3IlR_-` zrsIjCjwjD{tA5<1VlM9Dkj!mus88bnVeXpiyVu%eTN606X*&rfAe4j%JoOez)nU@t z?l57-M|}<_qZ+#&zoW)^nx-vrGtiHYeH;9_H~tc3JD|wp>9$wZiV_1$1vjDutrEQ0 z5|raK{Z3$>IVCY2e4FY>dahvav{i~IZ?c80a)75>Z4`lc%c4Gz_HT{k!Zf*3DfFGMfhdor+@ZAQI7 z?Dr&0Wl<{`-)wNS-q>V>wo_G^I?<(+$-!?`j17~Wl@c_erjB!@bKUOU@zsivn_~Sr z&a}g@5eN8ciZXVic;yEED*1zU_xd)DY2AYhkwf|}!wk)~vs*02Q)Oh4j)9NBzgo9V zMe9m3w0M3T%Y@JjcMn%T#JIk{Df^T;;?Pg$)kJC?9j~f%dyObLDD;5HNb@u^eh}zO zFzCuOc&ONowp3Rk9j82*gNncx@p)%XW7pGlT2xc|q*hJL`7P*a!-t6TSHA5&(urC( zlc?X*fVNJ(z^$4>E_qQ#^=71^sMKwATSHV#^G9{8p9rw8EvsMvvzr&o{Gnh*RlbkE zWA_#SRAsJ&aML?YiX8)S;`>RoZU9mQAieC)_RBu_w1vTo2BbRn`dKo?*IzqL8J%XW zwrt*6uA=Jk@DLD=DCAL)j5ZuJSGa0hq<0jQPEA`+7XR*JJM|(!+d9BoU0FHT3;T({ z$-q5-bhuLGOMxA}8>Dx2R^Ou=kApI(!WR@#7Pu zh7$!N?r(m#U>6}x?ot27SwY_SN&l{uy#*sKT_e@9`q0w@H=pdj>s=b^;p!xS(d$sSl9VzVg6D13~)4UI&uVF_|ED-Tf> z92t_8+#e>AltNW%_B0{MkC*&v^x^AtfkyaUX&s+I>?Tavs+8U;3#=N)r3)U3Z*ggP zc{#0$%c_-l==oG!(rL?+tfCCg!^}^lF5ak)3tF=9uQo`{Edhb4Ub)d_>XHf*nrQG z+|YJ@(_J*Rg6-w{o=&AnW`>RSox1)Bj&>l4Qvg*bYRGlXa;4gd#YkOiePmc-qQqXk z0_i#FO4SbL?MO=Pv!5Zyd4QaH+xC7*)6f%YcYDU0r=!pok{AQO01{oGbcRo?SN$3& z5=G?d!!F4G(;1)RRbcNSy~6(cJ8}x{U4e)m+YW^0{io~}U)HP6YgU>&q(U8JM)fb! z1K;j@ruim5?O+cc*iqPv8W~c*e()Y4u2_`B|9VGUN{NX>E68@YZyO(90%H)Xj$0 z#((YQHphW;i&(@?jW{ulC+WTkU$twy#a1 zB5z8};n>K6Rl0#2(s(N1^-|HAGoQzBv_?%h6uF}4EJq)DY5$eCHrdZ$>07gVkS;El zIM#-q*Q{FR)^4#+!z~UjkEKXf`EGWa<49rltQ0!Ep7h5jE4|Y+ArS!^zTFX1O2%<9 zIb#|>##8y>ygXdaOv7#>*!~}NgN3m@UpO<6WIEgXSSs6dHgVH+ZtH6ywZkVD25Qm{ zGtfgRZ4_>hM?BjIg0(>i#Iht&URT-HdN91(Z}1iw_BBvYL`M3oA*KBl_IV zS<)EK8TCx%Kl+IJn}lV%8L1)ItoIKrBdt-If6nT=99%%gEdya?8Nv$k1AGy-%QwrO zIqSxsT^%OAMzb(-U-$i{co7lNZ|ztn&|f6cyeZGM`}%bOY{kRS{2%Jx z@~`SA+yWJ(yHUD3rMpX78lOd7C%4B zxBRy;;_aXLX3th#Yr#(M`)NB=!{sd+C*@o@bTh?0%7^TQBl_b+TVOTk+hfP6qKXKHQC~veiWg2iJVZKWeGUyt7{_na zS3eo-Lmk+!P(XH_#nBms3;9~rQf0rL_1?fR69*c%;#Zf3zm5ES6k!C`OX3gcKYJT) ze%Ya)6H}aJbhx=<&cs26h8|L~ZFE~bJo(cYmicQu?{ut|f@4K6BwnN>p1!nwm)~4y zBu_>4(HGVpk({1O{{1_txWvb84ea%+Hwj%DA2V_fT9dJ22LwddOSLT!Z5Kj71@Q`; zZ}Ok0W94&H>z<_@oRfQCB4xzsZM-ZBZE$$&DdFTCR{V%7&VFxFrukT!79LPVU~%Kf z+|_s)8rqqfpGxUup z{Pew!7=0DN=>F6=GD^5qzj2I5Jty@C>jqT*W$zZ!=f?EFL|J9| zmvm|}W6R7;!cD(j^qq7os-wy6K)bsjznF{HrUaqp!aTipy6qNtz zjQLIFl>~1>v0E*oQ5VTT9%enaTPsQW*PXSGHA~3n_PzoBEY$w%m*A%J3*F$<%(wRx zu3SEYVd!02xv01>eN_0fK6VUaN)e7tU9`mx`y_ZN8jqz7MQmLCV@E>v(!{1-PpPhh zFwc8Rkw?1K;+0h^4Oo`tqX_kyDe?ZtW8o{Fm4Aqm@U0wo`yaHdj}O~>)!q{5mLa_> z5M(|@Mzh02vOE1!S6)s(x3(8uxTmwcIO!V3WP6?X=TdWXk~&-qZq=#}*^_wY2U;0A z>$8H0`&)=&+QSmIo zeRp}Nnkh;4<%Mb568=~=F0NnR28OD?yxb`_J9STahJbZE@!ChNSE|#?kFZ*K_+Q2A zjv4NC6Nq;{l$@M*PiY}pw}$o~|Fmrni{%-ZnN>+A*|o5#i2Y+uqa?;&N`1jR6S98_ zg?8*T<+>^W*I(#od8>u3Ir?uCrDhy0a< zCPLXhOuV?mdp9YWSsVFuKt)1Mm&ukc7>Vd*ethI_VGUe!Di}Ad7j|hnkELJA*q9fG z7ZG?+o2I`mBsH5;8K{`AxGBp^=*z3=>px`G2G%y>zGB+=xhPUp-a<9nE4lyUXLcd9 z*q4*T^Q}T6MHEOpgU#dOL?ZqN26wFkqWW8xk8L&IT8jjglf1P0s0u#{#Sb&&Dqp+f zZ{fVkdiQ?roqjN5f1*lMke{pWk57U19)5RM&GaJK<@K17uio6Dbm~@e8LW0NIp5X~ z&fL{PPKIkf*HkV3k)JgER(bjdDV>g&`{k^>j6_&K1y%?85CfeUsdsUi&0)&B)ju;M zVa9-~5^OeI@9y0$s_P_Xq^0R=Pt1qD{=7J&s~4Y14k(F>eCr(=$IYk6GIEqVZTaHhg{Q{q~Y<71S(NBEb z#Q2W52=#yB-kgZ!J3pjW&9=TBIu3y3v@kCTTI#$Ud^@|~D@r=yAbI6H9QQt6 z*?Vx9rvAlItfQU-Nxi5@BWt`FGwFKmqllvh&hEx7wClDn>(oa@095-Hwpd9i<*sf-K_C)BW4UepK&}a`Q6>tT4?R*%>3BsK0`*4qV~M9 z)*Bbo!-073w5*xB>6^PDtsr-JWsThiR$q3K!s+9{CC9b z5x3srwIS|ejoZVej||{0bbI`2<&FE}rv(BG^eLn3^zE`ix0r-dn-c&1%hP}ugucE1 z$VaZV##MKUy&tYuSMHSGdlnhv8_WmWL&N~b%BH*y@Iy)W(UfiO_G`ZC@4hcnbC&PW zQhvb<`;7Q6M?N{uEpI3y<6>c9Z?74X*&)Y3kbJt<)l1BNMTq-ajEsSXybk3`kZ^Mj zp^xr=4`hPVR4|*Y|5;+#^VzEZ&pw&*&eW8bBU@KiR0Ka^&~DtE@2g2kNx8UioVxI6 zXlSqzbGX>pFyc(k&&%E~E85vHk1VgQD&0rz{+X+0bJ5o)E{Y2cg-gA8CR!Qu;1>`u zHZQ^9;GlxzkcfZ970hSsJ2Ij|qhM=o-J?)lS{fkE&)?yt=Afsim$N@JH`hB-QwV;y z?qIz0`}c3*DH|&*a^EAk#TIupi8B?1dqi`7oFxGvaW#oIscGcKsjuT{efwWBqjU7? zc8#DYLUEX;6YK<}dGT;@o+(cT;Y!{|eHx#xVPNOLhCo+9HZ25$&mlZqu2iY7r>29m z$1T))U1hO2+Q+g*Ev@M~$IfpLO4>8h$5rnZuyad|`OjfST*Z~U>Pg7kecjZ>mG@Sr zgJTvgl>MFAI3(1%TuV}1`R_@cgW&oK;pL$?p3IfWT-6nn{bN;3k7M4-qDN37}^ zX zFaxy8b`~3KQmL=b&jmft3hU}PXwaRUfGTIKDZkG9_sjcJ#TOSB?DCI0KQpjtm4KRV z9E+!u6Z;4gBV*+LECd2kG4=Kqw2B_LwiwG!N=)o41oB?4zo+S0Sy|r#p&hPokP8MT zw4#KNPoE^Bi;9X;&keqLK<$gUBow72B)p`iqo+5v1_#VPolj0q%1#AkL+uNBUt_2h zL`S0*;$vX=m#V6(p95Kf)Y+f~GNEg1g&@(daU|+u_ffQ5T)5WLw|k zr)&XtG(7AC5>D7c_@CnGB==AEH-7JKZ6BE-?TLA({R^klazC|jc6@}CcvF&$Thn(L(Jh1PF zhlh(P`}^RXa%6XPiOP;GE)vvrkcjxAD584FO@apzg^V+^|B;uHf}I>58R+gS!8D8>52$`fR{mRG5;J zqnu814l~aQzCW9imq%WRkBu#nPBQXxUkqGxcXJbZzoo6sbTUXEPvMi0FnFr4Fa&MG z(T-8`kZYn5(OlS`g*qB=oXD%u`F|M0V%|T5Xw#XJD8d~T% z6B84dMuLO(f3E`CLX0PaI-vHuo1RBUj4$<^iJ6&PXfFV~SGB+W3nN|hWN7e>Xpm~^ zj6-sH0V?MDkO^5bDnM#tLt@givZ5DgyNaM}pb|(b=;t$ym6aA%^F#+xXIYCbj2vFv z3C(jDxy9EE2buwFYDb>rc=M*`KpRk*k+lZ~LjN1ilx}k%H~VaV-QcmjN63nD?R{bNb$2WWJz93rXCoJut7nwBF4FtV@{Ud@>Uz4o;u#uNt+`^YyM!)o0#1CWCLUrp9Yx{ees;9nlz>P*h7#49Q#nN}MS& zF#93r`T6-^u9M*Ri3L3{60*z67z*JDQHrFYpaPN@wM=x;&4b_qM7lW@J5hp!V>tMuEz zjWPUp#35rjB4GB*2`786-)d^k0+uD?u}QCL_&>vAW3lzeVp71@eR|7I2|8OUg#JSK0TD}~C~XO4?XM=V&jwVZn|C3K!M~4?+2uPE_z-bz!n$igG$68 zHGilS(I3&V}_s z4&=K)g9m}85#lwHAZWJ(ruyu6_7KOHHsS!x0gi|`lK%I30F>O1mb>do z?(;*>^9u@|FI5A%u--!iuAxuPW3|HvIRoweIQ66IhWomi9>$|Sa8SQ!mD3Duw~KF3<`;H83O+U zbkfT>l6rFUrf+7$cyvU!=%*Yc*VJ(P0fFE$T=QNju+A^30aBa`&C7QwVcjAY(N-9C zk_>@`(~hQ#2qCkAj33lU0Hm=Ja@3SXpT@3R12|c~`U#!%8ENlCAx6>QJ=S4~xyZ?( zrocpLBZKG62svk4MTZrK089gL(hhZSB|68pf4EQwuL%W4SHFh3CsslMLiS-?mE9sF zm$qvt1lWS}#yormAJI2i0fvLtynTov{_Nyr;5nerV9($?sLjnI6MDxL{5(`{5q#F? zpP&p>Lf5ow{!;7PzVMyxg?j1sxljn*-^|U;jYdD@1rr}md}d}8JQZ_K6Knv)9RHjv9fc?+#pZH6* z4VbD(6l~*DcCH>ggneVTj&bZV#l$KSfk|7ed$3o z_@65f>7GV7gW5Naafy2|9mb~$)rGE^`uei?pJVFy#3z6tswN7SUEzNB6FE;*98^sU zFy<|i1d@Z(JleP;Bq(j4IFg+%^sZP&S@f8SXFp(!rvD<37Vxbl6v69=LB_G!p1s`p z+1VKzR}Twv5pcBNTLF0{)-YS2&PAdAZ%u;plki|!=={6+v-O&XN)Q)94LE~XR3C37 z_EZWWxq-AaoVgCLDl(yw0GQ|HE&|6k46Oz}vLHjC)d8wZkpb5lM{=*!e-5)(^1t_o zT)}8t|G&I&A^a#37%iP~V6yYT1Ix?F1)L8J4YB%VyoJq31?9{`+2R zu;~Q1h+r*&o6*68U+~MKKd-HmPvfXBz|`iD3u#mQLoEW}bQj?#gDT+?NNjc3IO>`I zg->|E@1_S9g&wAG3ejOW2a%Gep68;2IEfVeBeNz7e0q+Wrobl_RhscHr;IIABKQ!+-4TBC4je^L|#PM2* zJ~NQ~`^&3~|DnkE_wReN|9^jyM9&fSVPIf*tE?Og;%h|2)3Y;YCnqOI$C`$Q20#?j z2+p2HsVc+{}5D7Heue5^T2E-;6 zfOPia_Y4RTgQ-B!&_6cT>2|mXq6Y*ZdI5Y?BZy6~u&}BP+9xL_pw^;eVnR*f;-z3V zt6~0^Yemf;aeI4vVPWCN@bFse$3N4hY69*@Fi${ll7N_)7@6=cY5~ZyR%(BRJOGN? zCL}~;B+>n$`rm0J(#a2N3g8%sw!JsoDAU%fr*t%G!Fh#bviI9DBVW*9YjSX7M^vpfWQf zGv1x+xZGT8RqJ+|0p&KrMkK(8_RJzm?1d&eDh?lWyzGupRI%D@cQe!wY`Z|Kke`=l zW^NuD0=u@-;d2LaJ+IoIqoa?v7mQyaFwpEn|4Ys3N8m{57z2{Gva&Le+w}MMvl@1w zoc>@asmjgm>h9k7kw6RFB1r7m2Z@-8g@6>MJnZbjyvxb-_dkI1=Pl!Y$t!J}jJ8*c zo*6B0OCBQu*8#y|6DbsSWOhl@9goeif!h(1l29 z3YjlaY}*kQI(#io_8QJE?#7hE4b~c-2HylUB(!vzwfQ|gg4~gifIwPCrgw-Sb%FvT z3={f4`jvoJI9*g-_rQR>eS8iN52K@^fFPq`hxZNOrLD;XT*3O^JWdZ5>cEs?8B0;#mn@Rl` z1j2Spjf#LD8?fb4QkvGxLIcWdlmE(p2z_09PGS}o79T%;j1y!N5fkg?vYGV{2r%w4 z0Ld}pGD2|B=d=)0z~Wyjh=EPxR9oZ3#`b1na-vu3#RxDwxW8jr0`4Hv296L=BX@v> z4W3#|1+a0Mn3&{N8467x2uQL`s}(CT>$fHXcuMeEEF$(|kr#}gd-L@&0zr{gna;k) zaw=Eyy>XDAcn2_nTQB%<+G_U$KHfANBmgT+1eRZcY3nvAP0XOK?wJmb4SmH$`RnE5szk`FGFN|cU<5K|J+C5Ht z1_p{-T3%;KN|H$fxs-q0!TC@Aq=`vMo*o{SmX?AZC+lF~*8>6V?AFc>9BgLak9Lo= z7khKr60|WnZQdKVD;eltj^uubB&+)5!41@O(;(Q$8#*$HSI0@ML%9zbJm zO6TOiN!0vL zF!E;8QLaoYqMez6SnZ~@fC27qi-*kmE9}S5d>Z6dq|20(%tF1n7@o{(7z2OYbBOU< zP_|jIwR`*7KZk@Q#m1svx`IrU3plv&@O>Y7B5xE##1=q!2AmGiahkPVbUP0DmS!`a zBOo(Bh9JwpS##lg{niTLwkt{$09+xbO(0Dx!pwXa_$2@ZzhApvgqp}D40?=Si8`?C*~fp6K-^GUQJDO=9?kwRv2GF^G|Lvn;Z{Gwfxk#SJdK3C%3xX`uG^Yz*3j?3a5j2hY4p_~=2R-h^$ibKd zV-pco6ciLZIyj#FlAG6{;35L&Z!yD!>FMbw=H@^F=d4IE!-&w>#CY4VZX;@**~*Mq zZCA82I8X%g@_i0wx`Fv2ZeF1U`RR2MAiqSu(Hx)PaCCA)L_!kwx*QoA`VG7vkY6%4 z+v0V4jHpWTlAImrGZs>t7a)3x4F+4-7>l;=tBc8Tlx>ij{HH5db3bSp)tT{~)Il>% zH?%lsIk_JoknxRaktUNyIProLv|6mUs+qdRC!?eU@?#@kM|k7G>bfwGazYv+SLTTC{9y#4GhRg$hSE^>;Uj! z66!JW+fwzDz&6rn#o(X10t)s04Wir5kPz5ZYFo6(6vX#!cO!${JPb_)lrWSWwm$hF2r&O(^7-93 z!PnwVQPc}NK?mWuvoKjwef-aUI7(vS@XBHS#LwZdyLiDn3`$baiVp;NpxFn(vZA7; z72xM*pa)daPFw5UL48^|6O*;IP9y1UqDF@_-9Dd`-#0@ZR{zRWCw)nB{l%rY`g#G7 z#kaekHJu^3qbN8c%DExSe?(UP8#ZzhHFOs7<1GB=X~d{As-hD@Slwj=n+xij+N+5M zXDyD#1PvZviaVwIiQ&I<_N4qDeKW=%am(kFH`2r;@?Xp`y-APPY0!+-3`TC9~)w z=}OBei?|#GMlXdZ88p+J{%T0fZ;S|N$ZL;nH>_}Yzq+1%8o^wGuEcy1Cfjrx16&BC z-0pV$%Vx+QcQ;bO0FLig93@tk-6|j|WTc|%>*?v~8mxbs*TMLy3N$HZS#L*Obb!Fe zb^e4;k{f4eatjCTm3sF6)P$K?-{3O^>N5f6Jsw~~-q_dxfR@6dzxd6QqZyx>_~BcK zeB-e&nX|^(1|w)~7`xwmIw&8zjRMN^arNk6iUE_pLxaL`N1O$=EUI%VpfmaVXH@?)kV{x-ut@>X6CJH+WMpJ+PII6- zL2ceA`@Ea^kC8JDrLR8FB<+Pt&B=Kh&y(2P+ys&I>gp;G6>zkr zvFAhh*9d9!Tt>J>mNj;DJqIHKBGd1ZpW{8k8BIs*}dTRswTC zTsH=^H9%Hv&|p0S@EZv51_lPEXJkxloA-@_Zp{B4Ldw`U4}=<{SpsQkX)Q$xOFk#p zfRJ^^p1fc?fc1&z9vjKEav2Q#B1QAqDC^F#$0|~U2o88EtqOyuoh-32l`>#H{(a)| zu`1L;!@%=_mELMU9D|s^yRg8=ZNKaRP#9>EY;Kxd`$#S4V}yyIgTx33rUYDzaB^~L z>*{6;dSbQy?ECQ^BxN0{9Ii-ji4M`(k2pX!;W%c&B3g{1dd~#Hw~bFQ_e;BEPS^Sy zggF*F1m|xxj(>%eBi|%XO-_K4MK-UKxSSk44GmnoGAuigf+BkL9E6Zg&ddOl0J&g? zQ8zR{Bow4PUvL@#!|Q~P2c60}hxe9q!${`yOV*q6Fy6uN%7P-d1W;c4cF1#s2Mi+A>82E@@)3206Our)m;BFrAus`}0{B+Lv+*n2?P65z>ZCr{gcC4>M0AH= z_rP&9tq;e?nvY};^!K~1cR>LTSCF1#@QA>eoPh3#e+nUMs+yW^`;*lC{D3eL)_~43 zq%coLv_O>o8cHuYzUn(jGk{VEzz}QTy;)c&;fVhS^-rVI(+NO@=%mMtajqR|r>}(AV_AymfU`JO`f_JZna`QNffZ^i$3928U*fpVdff4ka z%%37#&l|jUC?NB+zH0sAQl#|3EWfzG)W|%8({8o)qHSzhvtGH(o|Kx919*tg%gak( zFJN1eNYGQ5ses63Ts#2!J4UdVo3mo5&U|Vmy!?_ec>8jD#%eO*gMe{1O}{1g`?2|1jJigj7&_Dv}l-} z&kJk9kn5g^xy}=d+GAt>>xXFtqD&xBk|_lAixxPFmA5}w6cUKIF~QCm{5&5cE-*5c zwhrrZJX+=hDkP@^)%ky|*#;PJ=u}X2fNGW9;@6Cbh{egtT0kM(>_dU^5*EzIUNM0i zUg@zB4XtTr-Qn_z)=>Iwzj?AIH;|*4)x<06honcmYl=Z~R1~&0xcG=8I89PY9+=qL z%1=Tzxt)lt-B7=KA z-vx$&X$I5eOFBaZQL6smFIO5{F@Q1>FCTh9L3XC5uE6v|hky^*BOiz*b+ECa^p(C} z^6>JSnw+d!s28I6L%BmY^CUYs`tclPdaAWEG%_F}FpwzFIlqhgy9E;eMN!;9!QWg5A@U{}Q|n zq?%4`pgPC37igKZYjVrVBAUa2-jA%keH8#<5)yzgG&Ds`4r@J6U?i=tc(5ZLxN7W)8oTHZYJ-iz=7nXq{BC;l9196>*| z$Lg}z3roBa-mlA)8Ehrlsx+saRRQT z-l?ge-9`|Zbs<{VlMD=${0og5_2nY3Q z6q|KeEWbm-ANlKJ0yI%nmnqI8Bt&gCxRH)lgTx#U-yi9e zyZL5xc+KsYO{J`&S%W3=YG2eZL?|px?}RK$5snLIPXe{TC#ycN+%z&B`;AX^PD0&a zN%+A1#u%l@*snueUzWwwUyLlVu3YK>zBb({I%2{Xw^3nMh(}M~ zabv1fp>9wS*CW49^j{j>a%Zn>d^vu#OGr2+G%-BR$WdFvF3h2!wX<`pGxBSzKjyWF z@^xipazk-|&0AY?_NCy=_k=dXbIwCohGlR6G%r!B4`4e;R{)FSsXt^{BkxE5{gbi2 z>5bQ^^!67yS0nqDPfdnnC~%|U0XJM?-NHl1(H~JS`5=I?@kmP{YP!{(7=F%j>f};{CC>7=B>UrpF{e7n{1k{3Ai>@R#xkKE4n(z zfBn2sr{!=wyhpb{3%@Hds))(HvB=M_Y``Fx=vRHHhq3aoyE5?#?{!&0`BmrT=*#S~ zOZ0VpuVWReiR4fUN^%@>-N|?KQku0Emm~`dl;nDstA7`97dQtE>`ouYTCN!Aj}0B< zFT6!Mm2)z^MqRR9j#_rT$2b%#vo7quRotr`YhF_1MHvedkBj$K6c#rhvB zTiXvNCgNnmF-`DkCZIQ{M9~066cx%(0Mwe}2v(Jbo>Z;pb?xXznemq|thUcSn0?$= zv}t&N4$Kt1Z}Yh3gJs=ct{SN-b>)a#qjZ%V?V1z9JqyjIQ#PfK!S$FgAtahTQ@o3n zClaEr#8RI{YKp2TW2u>Gr=pX%!b&bJHFS$KWwb|uJr_#aNGw$w1h7>fUqYt`b%60J{ zx3bmBx(EBT{XTxLqxh!xYH!~PI7gPe!?=g~-xA@rV@8muF|(E+0Z+87X%}wpxg;E{ zRrk$SG0TtSJ4Sg^8HLr3VeVrF)B7`jOUnM%&Ef2>v|3ZYoEja*Q~lb{;Oun`tKzx7 zn^fm2XIJV!jq&aVVa(;--Ayc^cE2#4o+zq~UIug~3R>jR0)0`1`F>Hkn{#Xz`F{B* zNlPsI=-k9>!Xt?8`wu(}4?@4$_hCzmhHBs2GMzJOH)WwKL9UZRrsOFzy^rJK)rdrV zya@kY7U!mAGDL3awvQG3@})bgsT)PR5F zM=>fxB9uJdUOzi8@BGvhB^6b0bhO4JZgSR(}=}`Ke;f|ad&6fa+0l}KuM9AB`Vv&vHn3J7t z7)~$NPcyi}zY%7Zl;tJgHtGG~-hG{!ki9|l6v_1AdXKnb+2=mBEc$cwwlSKn_x0e3 zW%GyF%x}&UUElk}qdx__^8NF@XtgvUX^lT(XhTv?)BoEz~;1bEa-rgi?xgsm=9FBqY8@ zM(ZF7X7AK^u`aT)yDI~5>Pb*#8SIg652vJ#y?7eRUn02dZ}Y@&6g(pICn`o*w8R2EB*bd@knSBXAL5#1(F}J>{Ri z!Ov(7yyucsu@}m7c|5>GGe;sKg0p%Wo5qOAOMH=J$9ZCOHjL3{qYNuU<(b1@W|BUw z$zIG|(tE_iYgBa0qodm^*X}o8`B;*_nR11vF5`HEH}$zI=FRj3b8F?sVAh}asZksD zaW%tAew><1@x{&9E|-pJ=r%Ib`0i1Z$cG-5Du@)aH+BRODu}aWTcgJYa$(USox_~@ zPA}ruha^{+ae}qyIWuotU@INnDL#}Z+Qc-&r(9`Wp#)nU^rsTBl!sZp+YI`g zKFUuFr7J#-7IxJ_BlIdz9;Pk*Jx+6SZ~EIB{iga{T>hr5g_t+4aY&S5b3~^xzWyw6 zgwb3$xjjTI90SH&c1;*+Kh@e6@}|mA}-`NrEmsXp7;vf;?hMNZn|N%9XGe?*}>I$LrL3 zHmtFX1Hs%5oAX}G+?AfcB&42>u>!uPoil2ILN~}Z0>!9atjU6657=j_saYpoA4*~b zZK~)jWm2`;T$!E^2d97iGN?Qu^Bp75}Dp&nI2Lqms#hYRxafm{#B z#9~Q=hQ`LmMn^lWXBgfgyg5sXD%Y8FSgbb_U|}IaW!AUX-~DG`wAQjeOZO~|%qbXa zl3&HD?hIS=>f9k%xp`0`vUvaPDOzBLXkS*ioPN61arM?gZCg5wt)>j+mQ;!4pEa8m zq6b|kd2R5Tq%ibXs2`jJs5lFp;O<|&?9Ja{XBn{}EH9IRgOT!}5@HG{fAi^ORgD-o zqEAdNpRMYo)(nS^+EGvJgn+kwU4Oh=JprE8NhMX_2iQpKY7Nvx-Zuj#_`0YmpOFO` za<%m831XxzT}Uh}3bV$?pTO8jaAxzx zj>6{A#Gp?Ee}b}Ip;p(IoN+YXs3UL|QAh1JV`C`FqY_3U#pusHiE;$h4>Xkiv%UHJ zxhdg~c+y)@FznW0Qa|ldQ>OSeXHF!adzqr4^Trl2a z`^4CjazrJRsV4F$7xkr;$WRe!7g3DdPV*5(m@gJy=oe2Mrl@C6K8|ygb#Uz0syba4 zR%z6)bXQhX^(pSCySwJi@O>dk!a++mzVQd|VtekCCDDe~u`#rDbih_D^j{9R7mti_ z2*4z;v9QEN;=pF6gfh$sy#Lccpf#`1f?7h3+Wh<-(qWLpot|}hdHH9TTu#}PColJ|mw1A0xLv-WQLo(OW9xDNKl!aiac>-%w@3|k}Iy!EZ1KYp7i z#MIkwoY+9`8B@k`|H@Fa_tN!7@q=RkPM~XK#n;x)mC2>iXNUnGZRXyvS8q)WN#ip8 zRF!xiGfbazW&c%#Oc$*R0U_zA+`=Ggrc+dnDDwHtJ&+m&Z|u(TK)5e34&OijODRG z1gL!SQm6GptB$j=8-7a71&?c7BJ?OmjDBy|AV#-UB5rqWLrm(qzu4H@|0vK#qW?Ok zVY6OBgVo!a1pUTfF>pboGShCGfZ@EyI^6fG8=RxA_Hpu;=Uq zDH_2I@y?Hz_i5jJ?iy=rLE-TNbQX_Kg@A^E_y}`&{OVs52`SUAQMSv=f%(UUIMgZ2 ztd`#IA4;JYo8d7qF^_+(CK#^iUm5NW1#LyHsI>CG9}CMw+t1eM4UcFY&94fN-3}y1 z&m41#E6~NJkZl^eolfyxv2~4J~=SO6M7TK*| z@s)=TBX}?osD9!Gw?%uEn2()_-z!(;oT?eyTpnyEUpJXajkbbNlKljI!rr>1(6`Ti zXZW!jcj1%n-vR=bUi?{R2=uW{3;`AbA8o;~j$g08!4H%QeXTcdjE#-;fk2t1B?4ri z_r3v&erCbLW!qepk-E}xnS;H z`v(TV83eEi0Jg?+@g)g#AuPfHYDh!U+&n&%YVk;ahIMbB^skYQMKA?i6*LXL?mZ@W zlKSX-tD=&|?k>NXh&h(xdJKa3V-;sA>hbp^6FNJ5)AHxfUvysDp%OnWHa`(%3aAW2 zTfd@dfvyaVdu5i-!(%+!XZR4^#MDQ zFi9}0IJECh=$)T37n9etgURvNbTel9*L){fc#yueUiq+++3={3GQXx1){hVH~A1*0L%bMIFoOZ0GP|Apo$N2b8 z^T)L@Ln0&e7)N}*b(#}*CYp?UTUvSOJLFPhedS|2A!CTEh@&`-xtrBPl=9iV4hmfP zk+>eu`4?VOhD3P#Z1e92euo;>zlEvlciwXep6Hbs8~9vtNyvi|C;x3y#P8OW>iue+ zrq9XAO8WZk4Gk9$UMG@J-i+VBe+L~lV1xYoJ*qYga>>R>q|A>h$WR1>^s1+Rb+N^I zw@snD97DqddZzf&r>50Y5*=g<>-cp1&k^Hn=sAIz42b{OE zl(N`s(VY^OWdq7`rQ+;0KGI90t3b9jSqSfzATXS=srjc~TI zD*<#uq}5Bk1Xa`V|D;drAU^RC6pNIHQoZAO)JN44w?>$Ao>gxhRqeH|Czzf6p1B&z za4SB3*m+Y^XbPQX9JrtNElPX6fM7I>uKJO1dK|AHR8fp{kzllJL(n%jfs(F? zL2AvW$Mt)qUDYYV$gmx8$m6{vD)FNfs+XNeJ=cfc5@S2lh#nUH<{-9Z)Gvq)hmswP zMRb^c?fLSrTd5p4M;$d$<){?c9;7-jl5Px4Dt?u);%7YOYK_p6Jb5bMNLt$!huqzh z(R~;hQu%S|swc;hf2lm^%uNC7^B`(H7^^Cg|8UfqLLiPsN|f{Qc2k2um*0H$VzJ2W z=3<3fvE413T!7beaPY@iCZC#y1}q#MoHr%dVS&+_n5#A7N%S)}Ko{BHC-#YS%iHgO zxzeLPY!9L=Ff{s5Qq4U4LtuCNPNW9T=y!Kzg23E(wf1mz2LB0MRDT^zYIe4gyu5(V zEhj4KPX_bksTB2Z_Ov*werJqdFo+NNkPi0Z1qXFYU=9g{-MIFC50AWUoK9X6ml<&c ztd2#(PWcsd(o(uR6%L9{cUx@w8`~vNuqhiAzh>k{{k`gXmNbJI*$ajvYf}3T>W=$@ z3GNcr-J)$mn<)OOEVE?DQ2W>w?jyPk1OX=UN6Qs@{^BsN8z$ z^^%i@pzd|sU~tkN#MKaJ;x9Ux79v@cYs!%ex( zsmp9F60mM;QF==h_Kt z59J(U)2d+aKr*;K|SZLslV8o7y+RcHm5{d(|-a>*~h*x^G%+KcQm zOS-I(_(<|(o5iQPr0ymW6GX5ma@6?OeQB6%*x^TKkKD_{9LwlLb63W1FuHXGk|8#H zkf&=eVyt8Idlu`|5L>Q#=Z%H7*tjrD+A9)dtr?gu9w&um=CTrIyB_*_Q_Kl2ZK?DsE_5TVYGxh3{7i}iw7({kUy`;RI!R#{ds0OmS+9#}hB(`>&5eV`{l%wo{Qkz`M}%*B zY^|JA^<^Y3vr)%9k(k21?$#=7(TrEMgd@m+ycc4#fm!=FJ9c;-HN+y+}+(Bio3fMm*Vbh z+}+*v%zHk5bIt6@B$?#NTKDo87B;_e7FFR$p;4qrB-@XMW(@UkeZMl z^vo0&xXx9rq8P`2_EhgnC(ByNsV!oOn}i~zf6gIhoRN&qS`etsGb?T^}O%t_H zIFCLYwKbD!m&$1S>U!zhLV$c}`dQ^WnxB{903~QsbiG zu#_^#z{UoicorI{h_o>#pLhAux5~;|+lSZb7hxqVqN=T3lUzDWQK|9<(qg!ro8GeBt*L2F}uhLk!Lrrv9O}Evw!|o8LtQwTNrjMP?O=$L4Rp%4kT9&DG+T8^f0NUPsX#^+Ey~0+~WH}?`CxK$N zwQlG0BNx;nGM+QsWTD)x`crC_AY zHz4JyehkeC>uL(Am@6nqfFZ|52;JE33rbrnnoA1mItz<2CGli^Zox3kb*-DX-6IGL*?3%y8SHDQR zUaeKn2Fdu?Vr9;#+|U({_O1%?$uf5~x;X!u78LAzdpesdlQWebtM3UeKhR%oH8{?n z5OUw0UMNJ@ekt&fQBjjP|L&%&D7z7G_l=6~ z*@7g=i!ZMBz~u3WaBe%gO0Li|HSKD%n-*xK9z?=do4OgX%Ect3C?62(chq8U)yv=WfcddtRPB!URI2wqMd>DVI-aQ>@0$^ z%TAK!WV;wygR}hSl2Of4wJR<75?l{94NKC-V9S9-8GB%Z zCL$~h3kyqMcJzrJ>Izp_tfli&R1yLrT-)87MWz#4QVjxE>RlHmk&oSfr(OuvUNZf; zv#h48)k0cjFfsJcE?^bX%VYkuc~B$Hl40$FLekw&Y`9M5x|tKW>-nQk#BF|N^fk3U zx4pcjHsU*`g(_k$S@mqv9hnYUH3Pp!(akfa@+?obx6U;=6Z8fajI+{`_XQ$;(+7(?PYGt%B3Ycx zmpstiNy{2ZO!^%4Kp3#vpKz+x zS7>&6x_NkT9hM<{dNspP>^(ZTTcR3%;En&jSN5ZixI_;fD?L#CS4dy8{7wDyRf#N` z56(tTFkz$XO9e@f>Rsr+^7$0)4&mLbr%lJqXt2y7ac?CmEH` z+C1B@p|$C0F7A#hBxdXJ+gbipblbm-pW8%DE-WH(JC8h8AKb6Ksji=PXVW~gIK`TJ z-wcog1t(~$*WdiA=l1vaA0Ho|F(y>-NpA?Si1>XUaeMwPC;bUj1~iP329W=2*oBAh zSn|w-KePDiLYVs6K(m>)Y~P`tYFC78HnG!ninRrtgu0h<;zAUAA!Hmig`k596vV$2 zAJaDLvk1o$B84>m!46aNT@XXQT=^A=u=>DT7mm#NchMq|SuZZbZ5peLu2HFnyE-4J z?U{W)_h1Tf8TS@bCrT3DtRGq|S|xO4xw|%3(Gs_x?bGXX`toqS&Nf?}y|CWWkc5R9 za_(w2dGZDH>SKX`fI!6ZtI`ECzi2AFRA_k%zEi`n0pi~WR|9UJJ# z7>m5MLj?Oi)NMiDI=UKDAvo?OS_5Em&X>w-qdgt;LEL6=^Y)A@{`Z~d_7qgjdN$Y? zOFH?H`=&feS#@JIue79_t=<>iE2VMYSy)}^n18x^JH?a)>LC)~m^naUAAqMu4s15o zU6#e9_=oB~%HmoIlL~`^3X>}H(`{Tetmw;jZI9KndQ;aB`q+O9OScauIZ4x=Wwh^r zpf_ZMGmSA7>)<4RmQaSNu8EA+$uT!tW&M691_Wzp$jBLL^HAs`MEtLrb}u>vU&#I@ zCeB@6GVFK$_q{{-TBcuDR#Q>+Hz1fjVvRTfJ?A*4*d4fOUBCb4UMpsQ-NF!^I1Y z;P@O}22ZuYp*=)4!tu zW|tqt#7o`%UqycbZ}K9Niz?{5@*JRY0n{%4KZ4lrh{&o;9IrYgO>|iM$w+iR!gvz9 zzAEi>;9F&Sp&fFZugmdXm|1ynzv9rLXJFIX#p=jO4YnF{t%fHU4&GO$s~<_&*3=xy zirsZ}53P@-#}(t#(pvFACe$mPj!SH25m8In8{@=8Kl`PZ*p**h$Da2K_;$d3didAd9Jz0_ffyMA?Zs$pd8 zp+WjCAM@e1&gET(4Nd){C`gCnP!n>(1*1XHz>r6GcNkGjZZkSL1d++kHiO-6Pxk&j&G zSJPynCvZB=Y+8HH4lPJ`u_~x49PS&H7a7{CtUQ)Q2Y7Tl7(KK*d0XEi7!^=xX#Ob| zeDV`yBdV}7tIZd3aU7r^U((Rf`v#yG3%oDeqJJW09%EI~TyKZuDB+_&=sjL!9G$0x zB!|%gI>6lAt@!g`rkic`GyJz+!S;+c_Mir|>?og$;_bQoDH{KDHO7j)HjEXHXB;vU zW_pcTEpRk^IL%Ue=5mMr+q@S%wU~3I7yy#-Q$I@ogKc5ucKwXMI!7=e_$MA27cH?> z)bVA=MI688_P0DDpP_pR?}UUVV7MI>ZL%2LZx0=doLhIYgQH2$q{q5ck#-5u)hjl{ z=(1x&ddey6$j|>>lf|Zd?4gRNTRm9BZn4YZ5&WRqTlC3ImR!>Q`Ajkni$2{u z%iLa%7O3x6tE=bU*fpa6ii8joUYPZ%n`vTTo<}aTN_e#V?!}-a<9Fav9jw%aI?akO zcNt>@>?fF2_w3RFFbKc;@yDJySi7@`shf~%?%YLZH;kVJ!E8f%lG$V@i>|lp8*SOs zP@c*f87a@Ll?+{O`qu2W9@zYQ${1a1b4mU4N0o_5oqeMWM40uk`#5UwYeK_>g=3cn z7D5!e_taEHmr4j-KSeEJyVnp|uaXmQN@4zK6vl`kYWkpmS-2wR_`lmOw-9 zB~Lzd6p*GCZ`A7Gh6R*OReQjW!ASO1bNOBxfF(y&|*GTvklKk=t-85sq^;)?`)t_IFGvu+>u zml7+z&p1XZ=ygT4E#*F_Ym_JqDS#!rxwTa=1}6Cq*M{BoZBJW~dUVF-uq>Os*cnVr zyElEfFmJCaT3$`sJE$QDVox@plf2UfGxUo3r@=l9mJmCZF^}C<4GR=6^Rl!&l^>t` z$n+g7oNn)9BJ)tX2uLifPO~wQ0g9o<7q+Y-x!J`uU}pf(3xFeqKP%medi&$H5533N z@iQaO_kJlC)796&dBt4B`veRNI$3|=5DX7#Hn+e=)IB$Ua*7HYPr575dL=IZEfiTKNTH4shkfQFeo9zsRWva$=viIO z09@ndh~@D``WkMt*$R}`8+omJcCu#);Z+~@(A4=X$r(5rW67_yj4Xk9t?0bqJ#Dlu z5w^tU_G6(s(?-h;5djey8KKecevjJ*|EiV%8)ZoyZ8fV&oBhyXvM4|!Ff%n}WNZvT zp1wKIKZ3{FPpRQue*LOz+cPY+mpspCbrSApZVo+nV97{5rHs6_M+%J$gLiIr-v6^I zqWzE<(rfnF7Jh@X)9-z<#2yN*%1)W7PEcEkiqd>2$r+^%=rjFW=z+ z1sR)%%|)oX*aToq8CXu(R}l?}5ZTU$mxGQrnp1y!bse5t9`p7?CLGQUYSsl*{8G8Y z^9=B^a$i|0S;UNk_12bCBmI=tmF&PuBtc$n$@r>Yz6 zGkq75fW8dik2v1wb`Le!gO4w8VGoxFyQwHC|71bi&`6~L1fz5|8^A3Ke3M6phCKrN zl6l;DFVwGZ@*7)@tPNyswN#O*1?{=Y2>$l3M%CEcvf07iwLj#P+|+yb@jxyuVOwYM zk~f~St>#1Av{IWCFIAQatji9GQlNiN!PVtboMp~PN!hZW9i4mA(dwOT7;BGR^7_)R zX3`V`zUP+9Ze#eII{VJe#WmhPKDxNR!264D?{nhNUGS8$Y?f=|?b{d1 zwMBD!OP`n?Ak7Qxr`w z&u9sPUT>LgjNjP2BCWKd@Ln^{zZbJbO$^Vdm2L?eds*3v{|V|y50h%Pk$vGA4N(3H zoTXWKSXxGA)`!GZHaYQ&YMl|e7bk%opB0tmP;SMnnJ0Lag6ZiMNJm#ZBA3(;H5x=H-42TG{MaX>-L{aaV^a7zBSCJ0fA$ zQB*xV+p#b>(Lcw=RGmx4bGeYc+iY@yD>K85oGiaWh+wO+=Wr7UOK_n_zq3EyYrt!J ziq38^+w!C-t$BAjI*K{PQczwX`$57BSPM{()47>deKe))M~~M=a)Xj2I6V%k>xr$u=F%qgq(K`uytjnW}Jie%1?$Bv&$8zT$%wt|#2lhJ0am*`YfQ zv_eAQ2#gclGDLamVqnr6%`=gTH(2+?F*FQd96C)f)mx5lwTBkFNrjlAoI9Apj<9cw$<$G*;M&{ z5;ugU#$}n4woRFfq-YIM^DgU)4dN5ha8NO#xRK&B&+g=TxEaqOlHf{lZJ}nCrP<3# z8S4}4TT`-l!xgTa4KuF8hXk=QRp*)RaXFN8Ug=bv2MKxH!=DAKBDbGwaQU8kXJw)0 zlhOBw=umo&4~Yb@%i?LZBeEV`$@M$Paa+@rl%vk(OvxHt-jaAfl~XNM3jSI7FA zc&ge3rZzsbM=-VreYQ7!rTe^VGrVQ(fzRG)ifIl2qq-RfJDp8Te;;YHCD8L~+rSwvi9dMfMM!9Ydl4(-y21 zo@e*=f6{?T@u8ys@*bad=R8?5Kv#(-x&HgrahxznOdORAXxJGK2N#xzjldf@}C>Ry+O)%7psTd5s=-!aY8H zsr=CsssUFS4BXrNnmHc1hO*Bwx}AXj7pAt<7vqKY5}R$Oh!ljcWQkm1=`3&bI{Ri0 zIrdH3Y!6G1T!;J~o-tKLePTqH4{f|X4Zu&dMb;LHmc%a%R>F@J*}FM5mK+PqH=cwI>lrITWF-Umhf^k#f2%^y671jlNJ#P0`RUdFffdaaFaUv zEIiCzLy37XU1e);!p3)*q!tpk`YRg2_2H&n6~V1R3Bmrbq^mJl5r0G!Y3mB{v;*g7 zvk!~u>)beaS-K2(LT}1ZU7aL9&N-M()85LW%Qnd2?3S*j+_~h50az6(oax zK9;VUf2AVQm)9n(h`WWnqABup*I}L{tvuRhOA-4HgV?24$R;ypUFMjZN7~Y$U@WjP zhB*b+b|aG&8nV2Vg`gPg|CK7GIlBj^upi3byl*mxEvjNP<(K1AjC>`n61U7J(dty9 zrMr5+$#6v@&>%4g?l83hCpz<=NGpT;GglGTU^mxLFPIrrd4{_(Q@>+Q(9nh03GeVr z$WEzuBklsaPCUsQjbM0(cBCr5gRH*XW#SWcT?h(@GqYOI@0Xm#+h6oNDBmy`2hBgy z{Ka@AP^#=brJ<$-1jiJ5myI%(7f!xadDMu=y&AE(H)8R$+>SOKzE(I5!=_|S9i#>q6#;0H8I?A= zpaCAds-yG!Ho7r>f3Xn@oN!N7E$t&iFY}l1JFz!SDNgU9FOklDCj+b`s7wvFG-hT; z5871 zztKNU3jOrD*OvMOdVDue>o->*IDb2K!ST>2_ms6MAn~Pp|2rZPBev0b0ngj=`5D(w zsMh(yJV3KlM1)0~expOz?1@ha2~T8t{=o+H_|J|{bX||dbA!;s?ZlphvW?R%_EKO; z2GKUCe&ykMZEn_al(}Rzh|N)`g{wX;9C;#BQE~DxJ(a$E3$hrpeCWyqYCZI6cMZ?YRUM8@W)EH zHStSyUVP)|2!nPisqm0HdMWtc6Y}g_+WzIW@TV%`N2=R2o`t6o7wzqro~9wWZW5F2 zu3c^3M?()vNEWCu0B`*Ykag6mb+o&@c>w7g4CcmB1T`yxK-kmYt{mjr{M%zX_y$2e zMscke^I&1*Flo-V00<P3+@Vk*flrQ#igNTw02IWeL)w}q~;XcL3~im;mcO(Z5xA1^2w z;MQ|i`Xru)Kz3OFHR{rmiIy`C%qB&Cx5DivcrgxnZPO%ZBo=b|u~$^g%Ac#+=mig! zzq_nr+%4Tl`3vHZGa&pO*IHI=i4DY3;W5!cUO$6pr#WJ#Hs(mcwm*9>m~dG<347La z&ec|sMzt`1jA%+Q-rox#4%XXPTxduHMu{}bwAJLG*JbA-|IM6w>dwPxO%B8fDEhms z45D4Gsl|%c*wr8SR7Q7)a=>MpdIxoP@VmVhncR?YuBMnmTsJcAJEmX#ouHPQ`ht;q z!2tPVYAb@r9T}SuVI9nJ+cygCF0yXbO8-|3*NfcaHcHssj<28}&M8es+N;o+zo6c6 zhG_`guwz6m2*8UO7#v%(sx$A98&+jpVup3p(q}xzvP0E6^~wJ6l8-qJsR=I5PAiHh z#i*$|B$V(A2mquU@JrGbs%~C@D!-`3ZqxNHl_bAATI!q(P6C3qc2~vpbY$nz!O`KK zRSl~T&|^qQuR9q^ay8qT-7^fEU>Db6ceqSr@I|EC>f6@B1*X=!?%btS<~T2y{LvbM8K-I&D!A-khAfKcw~f4J{>%~p$ER4;8zM?Ir> z^mGoB>8;VYyLHRl@Qp)S&Q~p*)HlE67)5A86pWQ}rBHDZ;`k}vR*Qzg0~O{g#0?TM zs_Lhc6jV0&{rxq$=_DAm+-5*RiU;AKm?jExA3&7}qJ*6yxf4JXGrYam^F+~=FaKnv zhE}*G`STD${AcSM-Gh`ukfH-kBGMX&J^;hH2`7SX&=F<*Byr{!l&0mlRq9_`I6{BL zeNh36^?+iLC^8Ut%_6aQ&|e36I&j$BX1?gbmk331($e}o3wC<0{{91N@nL48hf91f zph2gpQfnMmdvkHjymTTZhjCo;!CSSIV)ksj8ezyT&mdYhsjz6rfW2W&JWHUW6n z?9*F`v65OEXp+%{F=T7Vx$nw|7AJ;#jq=^aF=kx?a2ep`XT1Cfv8Vgc(=Hr%Leho@ zrSvVd$Q;y0p`!c;Ya&=eBLBpdg7!`C*I}nveW9>zB`XX*{qHy$?>{ee4#y)qN3xj) zj6h35xDQ0b`4pukDb)rh9Nzcw3ul33Oky0j0e4;LL0nrb%7~iLo$vYE62z@=13t7Arq^it2C-iGh2#J`E3PUWi+R_swH+ za6QK!=8?rS3a6(X*ScIh0{+srp|)AyzISzbn)K2dy6z;zf_MxQ%)s4pIl>Ykb=@T~Tn3Uf=V z6BH;OOVqC*sq|;>M5+k|AqbSoF)^$i2_}>b+ToR-gg4E37L=PzxRTb$KkyFbBF`Mh z@g1Ee>?(B!8{Ef1-aDHw0pRQs;&YQIf5G4C`{(B$rNp_-66o$*MozhB*3^!QfCAE7 z#1?M8-I}t5U}WvA=ml_N^crUR0@hWmsJjv{@{cwn$I6D>inLUM{gttH6T}@H;yrfd zr4_(Ap})|v#=uaV5B(jje+eZ`67$n`LdX|B~C8#nZCBR}FE(9s9T?mNVTq z-&a7izsn)sL#BfKn+EC0DcR+I8NY}kkYeF?aQ#tYV@C{F<^xIpEIr-txRMDleLPyF zOC6{&c>^|wdABnmC#FDqeh=<;WEH;`pSjo-Cct}^7^P^#p<%$yUfpaNPflP`MOY8o zbEar8sCk}M6!Q0~DlOR_;7|`T$ZPMUV!g_$kGN||peWp%bYMow1INnG?VQZja7f#e zflT6!VX2S){ge*dNtwIcNozid)<|c6@)~;@|5O}U1g{*E-8vXx$2wCkEyNbF0<57a3hOl#8pFXS5Bdwd;poP= zZ9Sqs(G}vv@8hj+X{<+UA6CEeKJbPnrZ<~UZ$=i()Bqf5WlKeFw#IYLKBfxY0h?nw zr@NKOhJb`fVMT=}-~}suGT{X~sf9&k!NT~7jOU}u&oMqf--(-wm?)eqrk9q!e9NoB zAsPC%c8ulC!jqQQnVF2mL`IOXomGq!u)9=}+XR0K#xx*$N+XD1Q@Ei%#`1Y7>*AG6 z^59^6zZTMY2+=|CaeEqNyK}bPW5{TgMuP4 zLpPLu9Im?F;g2;z*A(tUoUt}NF;bB?KfMq@g~KJ^vcpPu>~_s?h{x$7jhWXby2&*D zUc+252Ni1U+HQi59}^3vHLTVJuS75iZo{!*1X{#yJp3@bWQ^b3cECn|h?bK<+E7{K z7YdYW6XPp%K#<%q?lD$o`<`L7%u$2G+D0H;N*uR1zV9uLIEX6E(hk2em?LD>Ts|Nr z=HI0?vUBxs#$4l>?V1p5%l>+*F@Sz6ZQ2t8AaS?8UWu@96FDJ##nsffk4z4$yhm)i zzy+Wu0KwVw?il^Q5ikZ2pFyBw0LKTA-sAGipqd>=>B$Q2ApRRme(5c2~n`6vPG9%LR!>u_+>sYDPMEpaVnr8a+Je7M+O(gueUD}dBaePEZEq29szvd1 zgZ8*X+LbG|F=I@+!U`{0)@y%Y;85mmzmb?$RXf$E^+bacDLn>ADgfK;+S-~_lf>j0 zGSlI*0PEp{zf_0FtA9~kP-eLr>}?s-oXVb$_t&yS0-rW}SM`Hk`BpkMNVo#yhIUC= zeMW(Wpg-bUvRHVSHt3FBSP@FwXlhJ1QKK~UQM0Ie>qI$t@JN0)BwJrcVPIP0-mfkH z%q8_maSp(=9V^~`Ih4qu!8aYZTbMf@OvvXE_gTE0qFJLm`s2yQqKscb)e?j;L6`pc zWKq9_@5Yl+Iyfp0e-M2Tu0%=gVvER{$d&zCHQY2?ZNY8aB(S1Cdo!fVRu>caS~GUB zih<*Jtkxr68Z#eqg1_F~?4B)aCaT}KAu<8BXo|B^pfl?WS#B{3qq*G2TFAdRA3-KqR-d$E+3zR34DN^V}Kg}rYN@NpGUI{&F2|2 z^)Wk=-V=YV^-6+ZWnFfZN>ySuJVHaj?P*XQ%3cth(eB}5dsTO_x!Tuhi1u<%yW#>$ zUG}=u@&tUMSJJqQk-aFkJO2ce*#SHtNr9g{uUMThC{+=;1yMv1S zlmlf4oYr!3zMO=Jt%|9Zd z*@Gmf3L7$_Y}rl^zNZ3KvZ?8Tnbp;?9xC>Z_u+`9$@23>GaYV^^Kc+T)LlqTyFEX8 zdx;sddTE`?=X%?C9N!d)uj^k8*A_8>YSDb>5z^*#(TAJbOVHj0dCnXpLn#+@@d8gO z)^XWnlx=-|9RnM4(_I*i`z3;6@A4dfF$YaIsix2pZMZ$NLYM3Qcj`7?xFL#!sq3T9 zMuAcLg&ZvoGcBI?kr31_m+&?Om?29b0qo8k&~ZALiF=NR?XID9uw+j6K5@`p6DPIp z8Rls*6>H^*Ts6P=Fdh5kXV`t6*xuwa*SDp+W=5VlFouIS6OMd4zBum~(sDqosn~x^NB{8W9`K1> z%g~1hBY)i=&6QMTWx_a(@A0va+I*dH!#es`u}bR*;rFmn5tEhg$27-)Zywbx^Xr-b zmQQ}G+?k%!q_A}k1kEZR#0+#9ei>Y83qt{4ju~?g%XfJloe|8 zA*iMF<-|;DX1E-Z7ne&D!7e%JQ{$<(PHHJH9M1N0579ZBoye;UT%wc0=K6Ua_NF45`B7C~{A7;26eDpW z6T$ZBC0n;OYP$B-=PzU+SP`p9VR6rI)xNMOjA0e8dt+tEglp$DIk|(hhv#7qYN`ws z{>}D#|0Nz>%m$l+w%uG-^5-XXTyH^Rb@U7@(+f+`)q$YIk9QFtHBHS)B2ZatDkdpu zH83&pTsL%l(YQBozHbgn_CZrZd*iX9EjgVyBKWjdRqiEw4$Zs&4Y8r5xDqj|>rR*X z>K%MJ8=Wjho7s_X!=Us=3xAs{D-g2$8UOfGL7J{d36P5bu=)Xd>}Xdoz~o|QXP1`t z6^+vURI=6eMZR~#e{5{*Y^5ndNm0ASBvyChW)x?7QbCB@`A&0hQs3$r#FU^H__e_H zu*H$dULhfLZmyX6D?@HX*dawHZYTkR87<)^0T*aiy5(`du%|#?ggHi ziZ=riDl?F+EW~~RQdKTYhk|Tq<4xH*4~<@DRCrJ+$B}!&o?YrEoi@%agno~zcFGv z2tZ{IjGUs$@|8t?V%3p>F$=e}3ztQ8m+3UAeSy^GKs6+Uee|lLC(lxHY$80*eg0qPD3sIH%?3c%ucT_zBh?r0NtrYUz}|VE zc>&*ljbz4?+tCdh_DUR!RSKsCJvX>MSdBos#fq*!hyW8OJux+1_qgD;l;(PY2a-n5 zsl2T8c0T3bFLABl4#|;uvfSFoUS$uSH*9meW0*hkDmk`Mm%P29D|>I?H3x9)TWnad zwtU@3W{wnuG}L2(qb3p!3^?CJGnKOXr*lhdNc^V`o@o=euFd%9zP4zX-;6@ehSL${ z&_K3#-ZN7Qx!KS)BM`av50E-HUX&D^2BD=yNgJk(=FTmz81~RYO;%FTM7!c~4LC_*gWGYxAvM)e@2l)rP@4h%ctO+2@Z@f-cpVVrimk$5oo zgd7gL8ENs_8q1`Iua2owwKdmxT!km+SXuj+Mih-%!O=1rd-srY{M&>~_g^8NQPvRJ z+f2@&kM|t2^x+&Xnl=dJw~zM5>E7dNzFYeh+5!kifRGQwnc8gR?e73cE7S&@>#u}E zC7#NzA9mt`(YngVT_NGoRJYfpR8*;PaRBmker~S8vFJbt{xt$9(h_(-8*TPuh7_uA zgRqhli;Y3$=9X_!egP3uqM{}i7AL=8fMkk8L8$6RDZkWIY%VS?2ndLGF9Y?VM%|+z z`LRlQ45o)$q`xi?bBZlQbfi8SgT1u*_4ZYVj(2@QFFH!9v9Yn=k&}^m?2r5<^z_W> zftShBzIUEeW?WKWZYwgod5p*xGPZg{EigL0Z5L5On3l4k%3zD98 zK+4GcjTssm(fY**M?ET{qLL4uS{oMqlF*skY9;LIW@;c^laK=cWK|13c^;Hae(UK$^s;=?2 zC~A6mM=VH~OgN^rWNj?+mL-|P?)|avsQ_M-1hnHC!b{)=5VUe#y z5q*ZnL;1t)`cGX;m=7_x>)q-8VFyhH4@aAej7-z*QUWuwS!wy%VU-1Zj7~vezT0(P zXO@k^J%!U!*H2LEz^|x^uuzHn@X!NgA1|9XdJzdF8(YuDtROT;q>UOk8aOyA4R`># zNlrnL;yNn$PZ7E@_@(dL9@xb#gd$v$U~7ry^`h2plvPWM8yXs#m^k)) zy6oj2pul6+a#?tP-jf0)Gd@u8+8}dg={qYc3cgN7MTV7T7l}ux^^z7(2+>0c_w{XH zO`&1<@$+B&bs#1q>#*JGNoUjJpU|BIk`Cspb>&1lR@s>L7Ub?(x+=H$&7FXJICDpD zC4rR`m~tUr%Z?N!oH#{P%~gdNj;nE5hOSC|H;0#w)0vWrw1(jBN#nzlyn42BSnMQ? z@2P8lW`AkCIF&}FfaeB{lz%Aa!QX6>^#<(?l{C~n#z=KCwZ7a)mR}Sl4J0|MIm#)6 z`NS!$C)_@sMBY2sebXLKemiDjq7Bob>|kdH zU`qCY1#?nT(tk(*Mc7BWF_r)7b;5l=DCjAlM2iA{?EWNow!Plm+y<00{;1jiJwP>c zlDavT`X9CWQ*OfL`{%c@vC;eZhWN%|dR6*-SMb`?>7e`eLYcng@Y{t>?;q85VSaJg zIoq&LJSIR|#(1U;F;|zVL&*nLrZIH}P;J0;{vZQk6*6;$F!?nz$_pWHU;t`ZruwSl zoV-r8xl5mm!X&GJ(n6$woqGKCUSN2{WU6lNqUUIK^UKNt{9|Wzb#(yTo|u>jXd$O? zU@pC$v=DPc1l>;fWv#^YEQ4)~uDf8mYOn}+JA;BmfCvX*^9m$Oo4GXm;&g`dbx8=X za4``5s%=F&m*jo>^;-9@Ft(~id;NfkS^exN^L^XGy~2kD5c1#rXTuyA=uB5&eOUQW zQJBT@FyP@{V}POy*~$#gUY~3@seNgYIYdDN#C6&dM>;tP^nlL`k@0B9)%8)fbL%Gf zN!U4hf5C+UV=jnnuLW8d`*)j;GMLx#6#?i~$xgUo65?u=27qs%)$^&Yqr(db3sO=- zE2)HhCBje?;`vIE)zQP%?E1lDmOLxaQ{#4H0!RtHz2AY6;NI1u}v-{Of`r0J|*&vkDX zM6DbgTywxEpF7XES{m(btuR%YU`Ij)ydA!F^@~m3+$J7cP>496FC!Z3ozT@L%)3C= zY0$KKcl-)?^b$G<<^e?t_51!o_-He?7MM<^>s)3#rL#C3&FmtfBORroZ1uCEA-e;( zxz|8|)cH!&1rRC)ybvIf4M=7L4(W2t`8LJsV{zcmQ~ve$2JFi5D+}6o?Hc&|#=^#j zOiaj1&-l>%ha)k|Wkdp!Gq7<0c{wVCB@`hZlR%} z{2;72jn`2rM2qxv5`%azYYO)bS$>i2%C@CiK6S)dChN7QKc6o`8#XfOI*BXR zUM0kbRT=BAPtn)=$GgJ3xO&ULQ>aY#*Whxs|6)Vz1=Q7-fLtsNPEJu#Q4B!T>*&be zBsLX|4ETw~5g%aB2K)Q#TaS={Xp+i*k+Fu;!V&adH>cv?V?9X}$bqLBDE-NtnX`2;>XvbvNQ zz%_4(_~jeNP4wF8DtC78czvd3SF_8D^Z4})#+%-yOzRg&L ze|`WYrhqz~cQvdHc2V%M1QIQDwfN;LZ zlh42F{{O&Hpv}(q_DEJ+hB_GH6d5Y?0^M1s`-jo) z_V)IFe#8yjPc^sv<-|r#fN;OglpX~Y1(M}e^nW+d$a;S#h7-SS4xW6UY!0RewYM|> zYYz6+}+>r$5wACS~jofF!UASWP% zM9B;Iow|ZFK zE?W21(tkvoVXDZeC<=0NhAz5Kn=br7CS+owWA-Akl@izdT{_VuB0ne`K_6K zRdnZ+IIg3;#13)TZYZ_sVjc1A!=ag-b>{8T4v~hM`u|!w?|3TPKaL+HNu-pSab#44 z5~qx;)JbJz$2lYsMay_1E2D&Xvf_ydg{;hMg(5^mMy0F{*<@w>KAz`y|HG?r?)$p0 z>$|S+^?9R5g2*ho`?<*TstvQ+g#C2(n^f1&&yYw(%I8NYaE$6g>~J=Mk78qi z5(BzLL0nv%xJ~jNSC)OZ+s1c;!awCT33T)|UsCM_U8X_@IChANi5+zwF)%aZ-uUpQP3MJZIq0XEBy411q9k zI@K*KJe&dIT${-3Sf>fmby#*+J9B*Q-Q#v!2-MnjG#h)?zs5P@cZ5@7%z73nZik|a zW;N)7Icg?$VpA*ORR%V-P%S!F?pA=5$<>v#M|#6D-7c`X&sO)_N>gNWOB}889(j8* zZjwac5@dw^RSVX_&pT~XMBXBr;*XBd^S=FQ-s*=>j@My~_!<{YG&e167^o4@)q#Zs zA6gN(PKa6;#+4G!L@0V`ngS8p(bOgV21imP_o~P<#Tc#1R3?@trxoQd-a^Q6@eh0M z&E)h8uSP%4lr3eQw*FQAG=SE4KPOqKPTJuML&4Qfib1=})<7Sle_Wfq7kY$c#QFn# zxt#nZD>C|wHMNF47$mqHS3MAXFE20SR91C$b%F=+d$TH9YC$`ENGD8N5)(5$t*Ss* zE~MJV73WgZ7vJ46 z(lEWHDM9k$3srT>urUR|ZaoN?0znQrs-NEmELf08fFh%mx}dE4}NvM5YWF((ACJ0TVg4UP8pcF6T2H^ z1>j6YrLv;psBYjE0-*(se74kY^D0fpANZ;Y8vNiovrXT^OxcQISA&+BSdh4uV zNw4#mmh5a%Y~Bojsk^&7E%r%l?3-oWGr@Aoexal_*lVnC<;MNkB{6_CI%IG{mve}8 zdW&;5McK<~!tMA`iGc>M;pfvA9-$(!YuD)74jf2q>0qDq{cC=la{Ba^SrdP1XyHU( zhn#C;s{P`)G4H+J16r|ei~h9$%sD9CN$ncf1D|WYX^byN!qtZr9+Z_8 zRCL+m)|2+)g|C!N<3(27Ri5No*5`umr>_5}Av6B16OC zDieIvx{_8G%IuqS{_;5W6MAQVF>0npx2XZZo1U7w@aGRnPR{3{I)RYGj?U=${cOVO zr1XGqQlGd7S1h9xex6^eAXVn%O`pJW^Q1={fipA+D4?LlC!hC3zhvm9cp6Mb$vai{ zwYTIwsb6y)#0UIQaend0bYX*LXuOf`#{&mkD+X`=q?$_|tgQG0Mg8HVpDkljhlrnR zRa)yh#vaB#cr-hOjkUk^bX(in5Uznb^}4-XPDMqq@^3ZAr4yZ9k!|n3?GFrSgjrN!qU|-)(*tIb^)LIQ z1}nKZUPivHeeq9AnZ!?0NbLA^MEmpi`JyaC?~Cq4#p!~rR9`;_8k1-vBkKa#sDEW- zWWXQHO7Iko^&P!NqooBIM-~LLW|&`5zEfV!Q60`AWN~`GJN9HuTR$1EWOtaf$r*iA zFj=1D$)AP+t;^p|bHC1dFSlek`@+yQJ;OgOQ?dwEf5|&0g6)-%0NI!5hzL+?op0Ve zrK95ohfKQ&;Rw!ps~+Bpi3evp)wRjm&1LZpeD9>B90jPf^RH~T3O0{ZdOAA}=(0a_ zD2|;R7|Z5)ZFBUJ+@X9I=dBN}Zw*{|K$j92beHK9yG-=wYzq|GF5B4wIt?AvnbDmc zGC8ne5&At-rN##|INvl?PSmBb)VBn2=2SD%w-&G%XNQM91op|u*tqi6%(J{a#1!D> z)_nQ0JU!TeEKkv8)Y8gISX5N>d4hRuV{W8xxP{4RRpNz%;sb+HdnmdUS+~E^ViVh) zIn2iR=EBB?O%IQlDf}`!qT!_;-Dc>(q3Qe;VpiJm#t#+Tw60awXpbxZBIRXUTMLU3 zQxZ*qnYBT4_LK#2n!WLQ((<51(Nh^kHhq0+XStTO14F>rV$AWO~XOzv^j zy$Nmu&t26`322%mv&JBt#-8m=#{^G{GCztz!@hJ?gc)yvYBVmuK7wf|LKOX4c(drs zq6BGI*_&{d_kzFIo)t8n)cjpKzX<`dpo;t6mkFE^{GI}gA)(pkvO>F`cNSd!jDv)5 zvO&c{x~euEy}er(g`FI*mf&Qe)`^n(uA{e2balgk^9ElQ8IOcfh7eqUu$Y*7NGZ!2 z3q`|vAdLKURYYv}?xe)T^Olz0M&Ak@_y9Rg{s)%*P8Nwmp%@!~266_VI_PoN*Pl%A zB@n)wndI}T&%fe|yX9jO%{Jvg)ztK3*lNmfgh(e^ecUmS`(WO1R z)%T829u2t+@8e&*rKh0<1$y=+$h!ApgX;LpO>0+wUTu)NMIk3{ky7};G(m}>t z4SIE9yDr%SU6H4}0z#kg@EC?>+5e-fdsd?aJkFV+CcZ}J9GMmcB)=gcA;^LeJRu1I zrkqNpvazv2u^Gh|_-1r9tox-s!NI{8h|#gJtKZ6o`}-Nbet{Xeaq}ke>y@il1$lYt z%Y*3%Rr7-qpV)1dYWrgw$riS!cJrTcc0S|4aP!23qFr6Qg-pVaO1iHSiA6u71T|1U zhK}$7cX~oa4Olpc90BSqD_efRuXyiX;Ah89$y>7#QEB|v=Z1GSRVMe3c(GNMrn&pg z^>eV_<5w8s+?97M;Nl{b)MU5Cn_rv0vz<}2ozrCwMts67e4X^Eth}d*}BYXZ0xXb=7i0`()&0<*{MdW%N=0a5FagBjiXH`j`!C$clFJ$5 zjf5)%Wr%<%#}ykFaswNgUzzB&I4$SFFYA8OR#3e6+Qeisx^T!mXamVj?}6ma44>P5 z0j2#8;}Tc-Q%AT1Bps;A(g==|(?~^y9Ni+s$~nj5rY2VLUIyv5mmY^t@ROYqYNXSK zp;)SsUWBU4{uM3kzSS`nIrNC(jZP2LqvmqU%?*n}ofkREZyUlmlON?04z(xd->{ zdyl4B?9gayk5~CHK;H`v%fP?@rO<|9qnwPMSCPI!#;VqppFZvuZ*GV$y{)CT82ohP zNXq5S`?~M*x@mo8HFP@NKHG@$Wd3OfXU9$p+5EEfix~+|88ew3ZcI)FdRW~>$ly9Y zbfM?{>k`j4t>a>crv@8HNW7un4b7K*C};8Ux!<~lkWFo<`y+UPgAL7$G&G1ISOqRM zAti-UZ2$XjeXc9YzFuuooofOyhWjCQC!KD8wL3R1BKS0tvDdE?)6$ro*s<2U_Q8F7 zr_^Fmo0*y6@lbdplkb6TiSTnEXnJ}YNe(9`Cs2SOaLxdDf#};Po7x1aIjh>u0y77fe88emGL-(u(%``Moy_B>%pC*90z~x zb%rwPm_n3z(hVYFy8mWmltS2xkkC*S&*cTc!r&}nBf;Z^)Hci{#s)DR@+2q`_=&Uv z(%aqLja~binhn$3ust|irlZpg8-6Jql3MFfVt@R=iBJJGW|&vc{!MYZ-@?cu`T9IO zJg~Hzwd_%1#VgIC)0@A~8&+wi5*a%V(Jq_bx*gEcK+6+WYD;S?*0s7zd{oq2UoDHS zFT�zYy9Hyst7&H^5ZG18Hj3#wogPhv0N?qm=V#L`g+)cyAmn`yEM#E!nPgE zq{4?{W&S2rJ_yj#&wHqdu5pKwwdOf~*rD%8=+bFLLBh4s5U+F1peydP8?8 z_-SlFVoG57-TusLy>$ck^7|qlGGTY%_5~eVGO0n}x2;hD4vB4iYyE2DF({&$lM3~8 zKR#4&vF9SG45T52MuP^9d;p<{caF(oXlTg6!BMpdpKrki3qY8xj){*SA0L+j!izqhxuqig$u1zg4S@05Ci1xZHc3WPRS15cpidcy$7p$#pD3vh|J=$zi-{QWwA9k7X*dj&UQ#d}* z{pup0NVJ4;=Ch}XqGCkTKz#tqc!ix2CwHB*4!Ps}Uv^0}vbz%ZhB=HM`UiyFUkB`9I3zQFW#@Bmw z#s>zW3_=xo+wlX03ZC_O{@=BZAp8Hf-0K7U-zD?!D*N|)%XL@%-&IBUv_1gqmv`S2 buT`dm;Z>s*_pG<;|8i1O`&gE`<(>Zlk_qm? From 5180d1f987f11a27471a86a0ee061b333f9d7fba Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:51:41 +0100 Subject: [PATCH 515/628] Change end2end tests to use board type as opposed to kind to allow for more boards to be parameterized Signed-off-by: Fionn O'Donohoe --- src/finn/util/test.py | 22 +++---- tests/end2end/test_end2end_bnn_pynq.py | 84 +++++++++++------------- tests/end2end/test_end2end_cybsec_mlp.py | 6 +- tests/end2end/test_ext_weights.py | 6 +- 4 files changed, 54 insertions(+), 64 deletions(-) diff --git a/src/finn/util/test.py b/src/finn/util/test.py index 4250079ef3..3545e2be8e 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -106,26 +106,26 @@ def load_test_checkpoint_or_skip(filename): pytest.skip(filename + " not found from previous test step, skipping") -def get_build_env(kind, target_clk_ns): +def get_build_env(board, target_clk_ns): """Get board-related build environment for testing. - - kind = either zynq or alveo. + - board = any from pynq_part_map or alveo_part_map """ ret = {} - if kind == "zynq": - ret["board"] = os.getenv("PYNQ_BOARD", default="Pynq-Z1") - ret["part"] = pynq_part_map[ret["board"]] - ret["build_fxn"] = ZynqBuild(ret["board"], target_clk_ns) - elif kind == "alveo": - ret["board"] = os.getenv("ALVEO_BOARD", default="U250") - ret["part"] = alveo_part_map[ret["board"]] + if board in pynq_part_map: + ret["kind"] = "zynq" + ret["part"] = pynq_part_map[board] + ret["build_fxn"] = ZynqBuild(board, target_clk_ns) + elif board in alveo_part_map: + ret["kind"] = "alveo" + ret["part"] = alveo_part_map[board] ret["build_fxn"] = VitisBuild( ret["part"], target_clk_ns, - alveo_default_platform[ret["board"]], + alveo_default_platform[board], strategy=VitisOptStrategy.BUILD_SPEED, ) else: - raise Exception("Unknown test build environment spec") + raise Exception("Unknown board specified") return ret diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 27aaa1986d..5274d923c1 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -296,7 +296,7 @@ def topology2dataset(topology): @pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.end2end class TestEnd2End: - def test_export(self, topology, wbits, abits, QONNX_export): + def test_export(self, topology, wbits, abits, QONNX_export, board): if wbits > abits: pytest.skip("No wbits > abits end2end network configs for now") if topology == "lfc" and not (wbits == 1 and abits == 1): @@ -313,7 +313,7 @@ def test_export(self, topology, wbits, abits, QONNX_export): export_finn_onnx(model, torch.randn(ishape), chkpt_name) assert os.path.isfile(chkpt_name) - def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): + def test_import_and_tidy(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "export" ) @@ -329,7 +329,7 @@ def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): ) model.save(chkpt) - def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): + def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "import_and_tidy" ) @@ -366,7 +366,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): model.save(chkpt_name) assert os.path.isfile(chkpt_name) - def test_streamline(self, topology, wbits, abits, QONNX_export): + def test_streamline(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "pre_post" ) @@ -389,7 +389,7 @@ def test_streamline(self, topology, wbits, abits, QONNX_export): get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") ) - def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): + def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "streamline" ) @@ -455,7 +455,7 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): for (op_type, exp_count) in exp_layer_counts: assert len(model.get_nodes_by_op_type(op_type)) == exp_count - def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): + def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "convert_to_hls_layers" ) @@ -474,7 +474,7 @@ def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): ) dataflow_model.save(dataflow_model_chkpt) - def test_fold(self, topology, wbits, abits, QONNX_export): + def test_fold(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "dataflow_model" ) @@ -483,7 +483,7 @@ def test_fold(self, topology, wbits, abits, QONNX_export): model = folding_fxn(model) model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold")) - def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): + def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "fold" ) @@ -497,7 +497,7 @@ def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): @pytest.mark.slow @pytest.mark.vivado - def test_cppsim(self, topology, wbits, abits, QONNX_export): + def test_cppsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "minimize_bit_width" ) @@ -520,49 +520,46 @@ def test_cppsim(self, topology, wbits, abits, QONNX_export): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_ipgen(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_ipgen(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "fold" ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) + model = model.transform(PrepareIP(build_data["part"], target_clk_ns)) model = model.transform(HLSSynthIP()) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + board) ) @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): + def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipgen_" + kind + topology, wbits, abits, QONNX_export, "ipgen_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] + test_fpga_part = get_build_env(board, target_clk_ns)["part"] model = model.transform(InsertAndSetFIFODepths(test_fpga_part, target_clk_ns)) fifo_layers = model.get_nodes_by_op_type("StreamingFIFO") assert len(fifo_layers) > 0 model.save( get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) ) @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): + def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] + test_fpga_part = get_build_env(board, target_clk_ns)["part"] model = model.transform(InsertDWC()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(AnnotateCycles()) @@ -582,7 +579,7 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): ) os.environ["RTLSIM_TRACE_DEPTH"] = "3" rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) model.save(rtlsim_chkpt) parent_chkpt = get_checkpoint_name( @@ -596,10 +593,9 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): + def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) n_nodes = len(model.graph.node) @@ -615,8 +611,7 @@ def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): + def test_validate_top1(self, topology, wbits, abits, QONNX_export, board): if "TEST_END2END_VALIDATE_TOP1" not in os.environ: pytest.skip("TEST_END2END_VALIDATE_TOP1 not set") prepostproc_chkpt = get_checkpoint_name( @@ -632,7 +627,7 @@ def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): topology, wbits, abits, QONNX_export, "cppsim" ) rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) dataset = topology2dataset(topology) assert measure_top1_accuracy(prepostproc_chkpt, dataset) > 80 @@ -643,34 +638,33 @@ def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_build(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_build(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - cfg = get_build_env(kind, target_clk_ns) - model = model.transform(cfg["build_fxn"]) + model = model.transform(build_data["build_fxn"]) model = model.transform(AnnotateResources("synth")) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + board) ) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "build_" + kind + topology, wbits, abits, QONNX_export, "build_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - kind_to_driver_platform = {"zynq": "zynq-iodma", "alveo": "alveo"} - model = model.transform(MakePYNQDriver(kind_to_driver_platform[kind])) + board_to_driver_platform = "alveo" if build_data["kind"] == "alveo" else "zynq-iodma" + model = model.transform(MakePYNQDriver(board_to_driver_platform)) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + board) ) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 5e402bdeb4..ba1de29735 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -48,10 +48,9 @@ import finn.builder.build_dataflow_config as build_cfg from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.basic import make_build_dir -from finn.util.test import get_build_env, load_test_checkpoint_or_skip +from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 -build_kind = "zynq" build_dir = os.environ["FINN_BUILD_DIR"] @@ -183,14 +182,13 @@ def test_end2end_cybsec_mlp_export(QONNX_export): def test_end2end_cybsec_mlp_build(QONNX_export): model_file = get_checkpoint_name("export", QONNX_export) load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) output_dir = make_build_dir(f"test_end2end_cybsec_mlp_build_QONNX-{QONNX_export}") cfg = build.DataflowBuildConfig( output_dir=output_dir, target_fps=1000000, synth_clk_period_ns=target_clk_ns, - board=build_env["board"], + board="Pynq-Z1", shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index bef2e0ffa7..8bbfb4be9a 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -38,10 +38,9 @@ import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg from finn.util.basic import make_build_dir -from finn.util.test import get_build_env, load_test_checkpoint_or_skip +from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 -build_kind = "zynq" build_dir = os.environ["FINN_BUILD_DIR"] onnx_zip_url = "https://github.com/Xilinx/finn-examples" onnx_zip_url += "/releases/download/v0.0.1a/onnx-models-bnn-pynq.zip" @@ -83,7 +82,6 @@ def test_end2end_ext_weights_download(): def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) folding_config_file = pk.resource_filename( "finn.qnn-data", "test_ext_weights/tfc-w1a1-extw.json" ) @@ -93,7 +91,7 @@ def test_end2end_ext_weights_build(): verbose=True, folding_config_file=folding_config_file, synth_clk_period_ns=target_clk_ns, - board=build_env["board"], + board="Pynq-Z1", shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, From 56e43152931207189741034659b34e626da63705 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:56:08 +0100 Subject: [PATCH 516/628] Add test_deploy method for BNN end2end tests for Jenkins setup Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 52 +++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 5274d923c1..02ea7c24ff 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -38,6 +38,8 @@ import warnings from brevitas.export import export_finn_onnx, export_qonnx from dataset_loading import cifar, mnist +from distutils.dir_util import copy_tree +from shutil import copy from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -89,7 +91,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root +from finn.util.basic import get_finn_root, make_build_dir from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -290,6 +292,42 @@ def topology2dataset(topology): raise Exception("Unrecognized topology") +def deploy_based_on_board(model, model_title, topology, wbits, abits, board): + if os.environ.get('FINN_DEPLOY_DIR') is not None: + deploy_dir_root = os.environ["FINN_DEPLOY_DIR"] + else: + deploy_dir_root = make_build_dir(prefix="hw_deployment_" + board + "_") + # Set it for the next round if multiple bitstreams are selected for generation + os.environ["FINN_DEPLOY_DIR"] = deploy_dir_root + + # create directory for deployment files + deployment_dir = deploy_dir_root + "/" + board + "/" + model_title + os.makedirs(deployment_dir) + model.set_metadata_prop("pynq_deployment_dir", deployment_dir) + + # get and copy necessary files + # .bit and .hwh file + bitfile = model.get_metadata_prop("bitfile") + hwh_file = model.get_metadata_prop("hw_handoff") + deploy_files = [bitfile, hwh_file] + + for dfile in deploy_files: + if dfile is not None: + copy(dfile, deployment_dir) + + # create input and output test files + (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( + topology, wbits, abits, return_topk=1 + ) + np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy) + np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) + + # driver.py and python libraries + pynq_driver_dir = model.get_metadata_prop("pynq_driver_dir") + copy_tree(pynq_driver_dir, deployment_dir) + model.set_metadata_prop("pynq_deploy_dir", deployment_dir) + + @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("abits", [1, 2]) @pytest.mark.parametrize("topology", ["lfc", "tfc", "cnv"]) @@ -668,3 +706,15 @@ def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, board): model.save( get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + board) ) + + def test_deploy(self, topology, wbits, abits, QONNX_export, board): + prev_chkpt_name = get_checkpoint_name( + topology, wbits, abits, QONNX_export, "driver_" + board + ) + model = load_test_checkpoint_or_skip(prev_chkpt_name) + model_title = "%s_w%d_a%d_%s_QE-%s" % ("bnn", wbits, abits, topology, QONNX_export) + deploy_based_on_board(model, model_title, topology, wbits, abits, board) + # save the model to be able to link it to the parent + model.save( + get_checkpoint_name(topology, wbits, abits, QONNX_export, "deploy_" + board) + ) From 5c03333b923194a1f3c4d2359b7f8701aa2f4410 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:59:58 +0100 Subject: [PATCH 517/628] Add parameterized tests for all supported boards. Split test matrix by board marker Signed-off-by: Fionn O'Donohoe --- src/finn/util/basic.py | 3 ++ tests/end2end/test_end2end_bnn_pynq.py | 69 ++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 3bc5b803db..abbf85d37d 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -31,6 +31,9 @@ import sys import tempfile +# supported boards +test_support_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] + # mapping from PYNQ board names to FPGA part names pynq_part_map = dict() pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e" diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 02ea7c24ff..30bbadb6fc 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -328,11 +328,70 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): model.set_metadata_prop("pynq_deploy_dir", deployment_dir) -@pytest.mark.parametrize("wbits", [1, 2]) -@pytest.mark.parametrize("abits", [1, 2]) -@pytest.mark.parametrize("topology", ["lfc", "tfc", "cnv"]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -@pytest.mark.end2end +# parameters that make up inputs to test case(s) +def get_full_parameterized_test_list(marker, wbits_list, abits_list, topology_list, QONNX_export_list, board_list): + test_cases = [ + (f'{marker}_w{param1}_a{param2}_{param3}_QE{param4}_{param5}', { + 'wbits': param1, + 'abits': param2, + 'topology': param3, + 'QONNX_export': param4, + 'board': param5 + }) + for param1, param2, param3, param4, param5 in itertools.product( + wbits_list, + abits_list, + topology_list, + QONNX_export_list, + board_list, + ) + ] + return test_cases + + +def pytest_generate_tests(metafunc): + idlist = [] + argvalues = [] + scenarios = [] + + # Full set of test parameters + wbits = [1, 2] + abits = [1, 2] + topology = ["lfc", "tfc", "cnv"] + QONNX_export = [False, True] + + # Separate the full list of markers used on command line. + # This allows a user to select multiple markers + all_markers_used = metafunc.config.getoption("-m").split(" ") + + for marker in all_markers_used: + if "sanity_bnn" in marker: + # Define a set of sanity tests that target each of the supported boards with fixed parameters + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[1], topology_list=["lfc"], QONNX_export_list=[False], board_list=[test_support_board_map[0]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[2], topology_list=["cnv"], QONNX_export_list=[True], board_list=[test_support_board_map[1]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["tfc"], QONNX_export_list=[False], board_list=[test_support_board_map[2]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["cnv"], QONNX_export_list=[True], board_list=[test_support_board_map[3]])) + + if "bnn_" in marker: + # Target the full set of parameters for a single board + # Extract the board name from the marker used, as it is in the form of 'bnn_' + bnn_board = next((element for element in test_support_board_map if marker.split("_")[1] in element.lower()), None) + test_cases = get_full_parameterized_test_list("bnn", wbits, abits, topology, QONNX_export, [bnn_board]) + scenarios.extend(test_cases) + + if len(scenarios) > 0: + for scenario in scenarios: + idlist.append(scenario[0]) + items = scenario[1].items() + argnames = [x[0] for x in items] + argvalues.append([x[1] for x in items]) + metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + +@pytest.mark.sanity_bnn +@pytest.mark.bnn_pynq +@pytest.mark.bnn_zcu104 +@pytest.mark.bnn_kv260 +@pytest.mark.bnn_u250 class TestEnd2End: def test_export(self, topology, wbits, abits, QONNX_export, board): if wbits > abits: From 8c98882a1609f5c5cbd6aa853756806132ed545b Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 08:03:06 +0100 Subject: [PATCH 518/628] Add scripts used by Jenkins to test bnn end2end hardware tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/hack_driver_script.py | 53 ++++++++ docker/jenkins/test_bnn_hw_pytest.py | 177 +++++++++++++++++++++++++++ 2 files changed, 230 insertions(+) create mode 100755 docker/jenkins/hack_driver_script.py create mode 100755 docker/jenkins/test_bnn_hw_pytest.py diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py new file mode 100755 index 0000000000..cd3becf7cf --- /dev/null +++ b/docker/jenkins/hack_driver_script.py @@ -0,0 +1,53 @@ +import os + +def remove_cache_dirs(dir_list): + tmp_list = list(dir_list) + for i in range(len(tmp_list)-1, -1, -1): + if ".pytest_cache" in tmp_list[i]: + del tmp_list[i] + elif "__pycache__" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def hack_driver_script(board, test_dir): + test_script_file = "driver.py" + # Read the contents of the test script file + with open(test_script_file, "r") as f: + lines = f.readlines() + + # Specify the line to be replaced and the new line + line_to_replace = "ishape_normal" + if "cnv" in test_dir: + new_line = " \"ishape_normal\" : [(1, 32, 32, 3)]," + else: + # Usually a size of (1, 784) to being with + if board == "Pynq-Z1": + new_line = " \"ishape_normal\" : [(1, 28, 28, 1)]," + else: + new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," + + # Iterate over the lines and replace the specified line + for i in range(len(lines)): + if line_to_replace in lines[i]: + lines[i] = new_line + "\n" + break # Only replace the first occurrence + + # Write the modified contents back to the test script file + with open(test_script_file, "w") as f: + f.writelines(lines) + +if __name__ == "__main__": + current_dir = os.getcwd() + board = os.path.basename(current_dir) + + # Get list of local directories - removing the Python cache directories + local_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + local_dirs = remove_cache_dirs(local_dirs) + + # Now create the full paths for each relative path + local_dirs_full_path = [os.path.join(current_dir, name) for name in local_dirs if os.path.isdir(os.path.join(current_dir, name))] + + # Change the driver.py script for each of the test directories + for dir in local_dirs_full_path: + os.chdir(dir) + hack_driver_script(board, dir) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py new file mode 100755 index 0000000000..09e62fd1d9 --- /dev/null +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -0,0 +1,177 @@ +import os +import numpy as np +from scipy.stats import linregress +import subprocess +import pytest +import itertools +import logging + +# no __init__ constructors allowed in Pytest - so use global variables instead +base_dir_global = os.getcwd() +default_test_run_timeout = 30 # seconds +output_execute_results_file = "output.npy" +execute_results_reference_file = "output_reference.npy" +output_throughput_results_file = "nw_metrics.txt" +throughput_results_formatted_file = "throughput_metrics_formatted.txt" +logger = logging.getLogger(__name__) + + +def remove_cache_dirs(dir_list): + tmp_list = list(dir_list) + for i in range(len(tmp_list)-1, -1, -1): + if ".pytest_cache" in tmp_list[i]: + del tmp_list[i] + elif "__pycache__" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def remove_destructive_board_tests(board, test_list): + tmp_list = list(test_list) + if "Pynq" in board: + # both tests are destructive to the Pynq-Z1 board and require a board reboot + for i in range(len(tmp_list)-1, -1, -1): + if "bnn_w2_a2_cnv_QE-True" in tmp_list[i]: + del tmp_list[i] + elif "bnn_w1_a1_tfc_QE-True" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def delete_file(file_path): + # Check if the file exists before deleting it + if os.path.exists(file_path): + try: + os.remove(file_path) + logger.info(f"File '{file_path}' deleted successfully.") + except Exception as e: + logger.error(f"An error occurred while deleting the file: {e}") + else: + logger.info(f"File '{file_path}' does not exist. Continuing with the script.") + +def get_platform(board_str): + return "alveo" if "U250" in board_str else "zynq-iodma" + +def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, platform_list): + test_cases = [ + (f'{marker}_{param1}_batchSize-{param2}_platform-{param3}', { + 'test_dir': param1, + 'batch_size': param2, + 'platform': param3, + }) + for param1, param2, param3 in itertools.product( + test_dir_list, + batch_size_list, + platform_list, + ) + ] + return test_cases + +def pytest_generate_tests(metafunc): + idlist = [] + argvalues = [] + scenarios = [] + + # Separate the full list of markers used on command line. + # This allows a user to select multiple markers + all_markers_used = metafunc.config.getoption("-m").split(" ") + current_dir = os.getcwd() + test_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + test_dirs = remove_cache_dirs(test_dirs) + + for marker in all_markers_used: + platform = get_platform(marker) + if "Pynq" in marker: + remove_destructive_board_tests("Pynq", test_dirs) + scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + elif "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + + if len(scenarios) > 0: + for scenario in scenarios: + idlist.append(scenario[0]) + items = scenario[1].items() + argnames = [x[0] for x in items] + argvalues.append([x[1] for x in items]) + metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + + +@pytest.mark.Pynq +@pytest.mark.U250 +@pytest.mark.ZCU104 +@pytest.mark.KV260_SOM +class TestBnn: + def test_type_execute(self, test_dir, batch_size, platform): + # Enter into test directory and clean any files from a potential previous run + os.chdir(os.path.join(base_dir_global, test_dir)) + delete_file(output_execute_results_file) + + # Run test option: execute + result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + assert result.returncode == 0 + + # Load the output and reference arrays + output_array = np.load(output_execute_results_file) + reference_array = np.load(execute_results_reference_file) + + # Compare the arrays + try: + assert np.isclose(output_array, reference_array).all() + except AssertionError as e: + logger.error("AssertionError occurred: %s", e, exc_info=True) + raise + + def test_type_throughput(self, test_dir, batch_size, platform): + os.chdir(os.path.join(base_dir_global, test_dir)) + delete_file(output_throughput_results_file) + + result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + assert result.returncode == 0 + + # Check if nw_metrics.txt now exists after test run + assert os.path.exists(output_throughput_results_file) + + with open(output_throughput_results_file, "r") as file: + res = eval(file.read()) + + # try a range of batch sizes, some may fail due to insufficient DMA + # buffers + bsize_range_in = [8**i for i in range(5)] + bsize_range = [] + ret = dict() + for bsize in bsize_range_in: + if res is not None: + ret[bsize] = res + bsize_range.append(bsize) + else: + # assume we reached largest possible N + break + + y = [ret[key]["runtime[ms]"] for key in bsize_range] + lrret = linregress(bsize_range, y) + ret_str = "" + ret_str += "\n" + "%s Throughput Test Results" % test_dir + ret_str += "\n" + "-----------------------------" + ret_str += "\n" + "From linear regression:" + ret_str += "\n" + "Invocation overhead: %f ms" % lrret.intercept + ret_str += "\n" + "Time per sample: %f ms" % lrret.slope + ret_str += "\n" + "Raw data:" + + ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( + "N", "runtime[ms]", "fclk[mhz]", "fps", "DRAM rd[MB/s]", "DRAM wr[MB/s]" + ) + for k in bsize_range: + v = ret[k] + ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( + k, + np.round(v["runtime[ms]"], 4), + v["fclk[mhz]"], + np.round(v["throughput[images/s]"], 2), + np.round(v["DRAM_in_bandwidth[MB/s]"], 2), + np.round(v["DRAM_out_bandwidth[MB/s]"], 2), + ) + ret_str += "\n" + "-----------------------------" + largest_bsize = bsize_range[-1] + + # Dump the metrics to a text file + with open(throughput_results_formatted_file, "w") as f: + f.write(ret_str) + assert os.path.exists(throughput_results_formatted_file) \ No newline at end of file From b3166e4548253afa9b780d6643998e983a213b10 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 09:37:17 +0100 Subject: [PATCH 519/628] Add U250 xclbin for end2end bnn testing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/test_bnn_hw_pytest.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 09e62fd1d9..f2b437e800 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -105,7 +105,8 @@ def test_type_execute(self, test_dir, batch_size, platform): delete_file(output_execute_results_file) # Run test option: execute - result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" + result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 # Load the output and reference arrays @@ -123,7 +124,9 @@ def test_type_throughput(self, test_dir, batch_size, platform): os.chdir(os.path.join(base_dir_global, test_dir)) delete_file(output_throughput_results_file) - result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + # Run test option: throughput + bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" + result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 # Check if nw_metrics.txt now exists after test run From 74918647cfe44e66e917c8de4874008d2bedda42 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 27 Jun 2023 10:51:09 +0100 Subject: [PATCH 520/628] [notebooks/docs] Update second half of folding nb and update internals doc --- docs/finn/internals.rst | 38 + notebooks/advanced/3_folding.ipynb | 2133 ++++++---------------------- 2 files changed, 482 insertions(+), 1689 deletions(-) diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index d0c4cd2065..9c1ff626b2 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -206,6 +206,44 @@ How to set *mem_mode* --------------------- When the nodes in the network are converted to HLS layers, the *mem_mode* can be passed. More detailed information about the transformations that prepare the network and the transformation that performs the conversion to HLS layers can be found in chapter :ref:`nw_prep`. The *mem_mode* is passed as argument. Note that if no argument is passed, the default is *const*. + +.. _folding_factors: + +Constraints to folding factors per layer +========================================= ++------------------------------------+------------+----------------------------------------------------------------+ +| Layers | Attributes | Assertions | ++====================================+============+================================================================+ +| addstreams_batch | PE | inp_channels % PE == 0 | +| channelwise_op_batch | PE | channels % PE == 0 | +| checksum | - | - | +| concat | - | - | +| convolutioninputgenerator | SIMD | inp_channels % SIMD == 0 | +| convolutioninputgenerator1d | SIMD | inp_channels % SIMD == 0 | +| convolutioninputgenerator_rtl | SIMD | inp_channels % SIMD == 0 | +| downsampler | SIMD | inp_channels % SIMD == 0 | +| duplicatestreams_batch | PE | channels % PE == 0 | +| eltwise | PE | inp_channels % PE == 0 | +| fmpadding_batch | SIMD | inp_channels % SIMD == 0 | +| fmpadding_rtl | SIMD | inp_channels % SIMD == 0 | +| globalaccpool_batch | PE | channels % PE == 0 | +| iodma | - | - | +| labelselect_batch | PE | num_labels % PE == 0 | +| lookup | - | - | +| matrixvectoractivation | PE & SIMD | matrix_height % PE == 0 & matrix_width % SIMD == 0 | +| pool_batch | PE | inp_channels % PE == 0 | +| streamingdataflowpartition | - | - | +| streamingdatawidthconverter_batch | - | - | +| streamingfifo | - | - | +| streamingmaxpool_batch | - | - | +| templates | - | - | +| thresholding_batch | PE | matrix_height % PE == 0 | +| tlastmarker | - | - | +| upsampler | - | - | +| vectorvectoractivation | PE & SIMD | kernel_height * kernel_width % SIMD == 0 & channels % PE == 0 | ++------------------------------------+------------+----------------------------------------------------------------+ + + RTL ConvolutionInputGenerator ============================= diff --git a/notebooks/advanced/3_folding.ipynb b/notebooks/advanced/3_folding.ipynb index b1baf69cab..a411d3bc88 100644 --- a/notebooks/advanced/3_folding.ipynb +++ b/notebooks/advanced/3_folding.ipynb @@ -8,7 +8,9 @@ "--------------------------------------\n", "**Note: To run this notebook, you first need to run the build flow in the 3rd cybersecurity notebook as we utilize one of the intermediate models generated in that process in this notebook.** \n", "\n", - "This notebook describes the use of FINN parallelization parameters (PE & SIMD) to efficiently streamline models so as to extract the maximum performance out of them.\n", + "This notebook describes the use of FINN parallelization parameters (PE & SIMD) to efficiently streamline models so as to extract the maximum performance out of them. \n", + "\n", + "Please be aware that the folding factors can not be selected arbitrarily, each layer has constraints on which values the parallelization parameters can be set to, for more information see here: https://finn-dev.readthedocs.io/en/latest/internals.html#folding-factors\n", "\n", "We'll use the utility function `showInNetron()` to visualize and interact with our network in the Jupyter Notebook and `showSrc()` to show source code of FINN library calls." ] @@ -84,7 +86,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Serving 'step_convert_to_hls.onnx' at http://0.0.0.0:5920\n" + "Serving 'cybsec_PE_SIMD.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -102,7 +104,7 @@ " " ], "text/plain": [ - "" + "" ] }, "execution_count": 2, @@ -113,9 +115,9 @@ "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", "model = ModelWrapper(\"../end2end_example/cybersecurity/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")\n", - "model.save(\"step_convert_to_hls.onnx\")\n", + "model.save(\"cybsec_PE_SIMD.onnx\")\n", "\n", - "showInNetron(\"step_convert_to_hls.onnx\")" + "showInNetron(\"cybsec_PE_SIMD.onnx\")" ] }, { @@ -168,7 +170,7 @@ "output_type": "stream", "text": [ "Stopping http://0.0.0.0:5920\n", - "Serving 'step_convert_to_hls.onnx' at http://0.0.0.0:5920\n" + "Serving 'cybsec_PE_SIMD.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -186,7 +188,7 @@ " " ], "text/plain": [ - "" + "" ] }, "execution_count": 3, @@ -195,7 +197,7 @@ } ], "source": [ - "showInNetron(\"step_convert_to_hls.onnx\")" + "showInNetron(\"cybsec_PE_SIMD.onnx\")" ] }, { @@ -250,7 +252,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -289,7 +291,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -321,7 +323,7 @@ " 'DSP': 0}}" ] }, - "execution_count": 8, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -347,7 +349,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -385,73 +387,75 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# QuickNote : StreamingDataWidthConverter Layer" + "## Modify Parameters\n", + "\n", + "We now modify the parallelization parameters of the first network layer to reduce its overall latency.\n", + "We individually extract the `MatrixVectorActivation` blocks from the `.onnx` file and set the config values manually (although this can be done automatically by the FINN compiler as mentioned in the introduction).\n", + "\n", + "In the first step, we left the `PE` & `SIMD` values for all the layers on default (=1) to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", + "\n", + "To set `PE` & `SIMD`, we will utilize functionality from the FINN compiler. Each layer type has a Python wrapper which can be instantiated using the `getCustomOp()` function. The wrapper offers several helper functions like `get_nodeattr()` and `set_nodeattr()` to access and set the attributes of a node." ] }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Instream Width = 1 Outstream Width = 2\n", - "Instream Width = 2 Outstream Width = 2\n", - "Instream Width = 2 Outstream Width = 2\n", - "Instream Width = 2 Outstream Width = 1\n" + "The parallelization parameters of MatrixVectorActivation_0 were: \n", + "PE: 1\n", + "SIMD: 1\n", + "The parallelization parameters of MatrixVectorActivation_0 are updated to: \n", + "PE: 2\n", + "SIMD: 5\n" ] } ], "source": [ - "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", - "for fcl in fc_layers:\n", - " fcl_inst = getCustomOp(fcl)\n", - " print('Instream Width =',(fcl_inst.get_instream_width()),'Outstream Width =',int(fcl_inst.get_outstream_width()))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also view the `instream_width` and `outstream_width` of each layer using the `get_instream_width()` and `get_outstream_width()` helper functions. These widths are of particular importance as for a (balanced pipeline?) these width's should be the same.\n", + "from qonnx.custom_op.registry import getCustomOp\n", + "\n", + "list_of_mvaus = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "mvau0 = list_of_mvaus[0]\n", + "\n", + "mvau0_inst = getCustomOp(mvau0)\n", "\n", - "For example, the outwidth of a given layer of the network should match the inwidth of the next layer for the (pipeline to be stable?). If they are not the same then the FINN compiler adds an extra `streamingdatawidthconverter` (which increases the overall resource utilization of the design slightly) layer to make sure these widths match.\n", + "# Get the node attributes to check the current setting\n", + "print(\"The parallelization parameters of %s were: \" % mvau0.name)\n", + "print(\"PE: \" + str(mvau0_inst.get_nodeattr(\"PE\")))\n", + "print(\"SIMD: \" + str(mvau0_inst.get_nodeattr(\"SIMD\")))\n", "\n", - "Note, that if these widths are the same then even if we call the `InsertDWC()` transformation on our model (responsible for adding the above layer), the datawidth conversion layers will not be a part of our model as shown in the below cells. " + "# Set the new node attributes\n", + "mvau0_inst.set_nodeattr(\"PE\", 2)\n", + "mvau0_inst.set_nodeattr(\"SIMD\", 5)\n", + "\n", + "# Get the node attributes to check the updated setting\n", + "print(\"The parallelization parameters of %s are updated to: \" % mvau0.name)\n", + "print(\"PE: \" + str(mvau0_inst.get_nodeattr(\"PE\")))\n", + "print(\"SIMD: \" + str(mvau0_inst.get_nodeattr(\"SIMD\")))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "
    \n", - "Question in the first and the second line of the above cell.\n", - "
    " + "We save the model and view it. On expanding the first `MatrixVectorActivation` we can view the updated `PE` & `SIMD` parameters for that layer." ] }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "from finn.transformation.fpgadataflow.insert_dwc import InsertDWC\n", - "model = model.transform(InsertDWC())" - ] - }, - { - "cell_type": "code", - "execution_count": 12, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Stopping http://0.0.0.0:5901\n", - "Serving './cybsec_DWC_not_inserted.onnx' at http://0.0.0.0:5901\n" + "Stopping http://0.0.0.0:5920\n", + "Serving 'cybsec_PE_SIMD_modified.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -461,7 +465,7 @@ " " + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.save(\"cybsec_PE_SIMD_modified.onnx\")\n", + "showInNetron(\"cybsec_PE_SIMD_modified.onnx\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From the above total folding formula, we have reduced the total folding of our layer from `600 x 64` to `120 x 32`. Hence, resulting in an estimated `10x` decrease in the execution latency of our layer. \n", + "This can be observed in the new estimated clock cycles." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'MatrixVectorActivation_0': 3840,\n", + " 'MatrixVectorActivation_1': 4096,\n", + " 'MatrixVectorActivation_2': 4096,\n", + " 'MatrixVectorActivation_3': 64}" ] }, "execution_count": 12, @@ -478,8 +514,8 @@ } ], "source": [ - "model.save(\"./cybsec_DWC_not_inserted.onnx\")\n", - "showInNetron(\"./cybsec_DWC_not_inserted.onnx\",localhost_url='xirxlabs53')#localhost_url='xirxlabs60'" + "cycles_dict_updated = model.analysis(exp_cycles_per_layer)\n", + "cycles_dict_updated" ] }, { @@ -488,1665 +524,397 @@ "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "class MatrixVectorActivation(HLSCustomOp):\n", - " \"\"\"Class that corresponds to finn-hls Matrix_Vector_Activate(_Stream)_Batch\n", - " function.\"\"\"\n", - "\n", - " def __init__(self, onnx_node):\n", - " super().__init__(onnx_node)\n", - " self.decoupled_wrapper = templates.decoupled_wrapper\n", - "\n", - " def get_nodeattr_types(self):\n", - " my_attrs = {\n", - " \"PE\": (\"i\", True, 0),\n", - " \"SIMD\": (\"i\", True, 0),\n", - " \"MW\": (\"i\", True, 0),\n", - " \"MH\": (\"i\", True, 0),\n", - " \"resType\": (\"s\", False, \"lut\", {\"auto\", \"lut\", \"dsp\"}),\n", - " \"ActVal\": (\"i\", False, 0),\n", - " # FINN DataTypes for inputs, weights, outputs\n", - " \"inputDataType\": (\"s\", True, \"\"),\n", - " \"weightDataType\": (\"s\", True, \"\"),\n", - " \"outputDataType\": (\"s\", True, \"\"),\n", - " # FINN DataType for accumulator -- auto-computed and updated\n", - " \"accDataType\": (\"s\", False, \"INT32\"),\n", - " # use xnor-popcount for binary weights/inputs, thus treating them\n", - " # as bipolar\n", - " \"binaryXnorMode\": (\"i\", False, 0, {0, 1}),\n", - " # no-activation mode (produce accumulators)\n", - " \"noActivation\": (\"i\", False, 0, {0, 1}),\n", - " # number of input vectors, examples:\n", - " # [1] is a single vector (like a FC layer with batch=1)\n", - " # [4] is four vectors (like a FC layer with batch=4)\n", - " # [1, 4, 4] is four * four vectors (like a conv layer with batch=1)\n", - " \"numInputVectors\": (\"ints\", False, [1]),\n", - " # memory mode for the FC weights\n", - " # const -- embedded weights, default, long compile/synth times\n", - " # decoupled -- streaming weights with weight streamer packaged inside IP\n", - " # external -- streaming weights with external streamer\n", - " \"mem_mode\": (\"s\", False, \"const\", {\"const\", \"decoupled\", \"external\"}),\n", - " # FPGA resource type for memories in decoupled mode\n", - " # auto -- let Vivado decide\n", - " # block -- use BRAM\n", - " # distributed -- use LUTRAM\n", - " # ultra -- use UltraRAM (URAM), must have runtime_writeable_weights=1\n", - " # see also https://www.xilinx.com/support/answers/38070.html\n", - " \"ram_style\": (\n", - " \"s\",\n", - " False,\n", - " \"auto\",\n", - " {\"auto\", \"block\", \"distributed\", \"ultra\"},\n", - " ),\n", - " # FPGA resource type for threshold memories (if noActivation is False)\n", - " # auto -- let Vivado decide\n", - " # block -- use BRAM\n", - " # distributed -- use LUTRAM\n", - " \"ram_style_thresholds\": (\n", - " \"s\",\n", - " False,\n", - " \"auto\",\n", - " {\"auto\", \"block\", \"distributed\"},\n", - " ),\n", - " # (mem_mode = decoupled only) whether weights will be writable through\n", - " # an AXI-lite interface during runtime\n", - " # 1 for enabled, 0 for disabled.\n", - " # see finn-rtllib/memstream/doc/README for more about the memory\n", - " # address map used for writable weights\n", - " # IMPORTANT: After using AXI lite to either read or write the weights,\n", - " # always \"flush\" the accelerator by first passing a dummy input\n", - " # vector through the accelerator. This will get rid of any old\n", - " # weight data from the weight FIFOs.\n", - " \"runtime_writeable_weights\": (\"i\", False, 0, {0, 1}),\n", - " }\n", - " my_attrs.update(super().get_nodeattr_types())\n", - " return my_attrs\n", - "\n", - " def calc_wmem(self):\n", - " \"\"\"Calculates and returns WMEM.\"\"\"\n", - " mw = self.get_nodeattr(\"MW\")\n", - " mh = self.get_nodeattr(\"MH\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", - " assert mw % simd == 0, \"Requirement MW divisable by SIMD is violated.\"\n", - " wmem = mw * mh // (pe * simd)\n", - " return wmem\n", - "\n", - " def calc_tmem(self):\n", - " \"\"\"Calculates and returns TMEM.\"\"\"\n", - " if self.get_nodeattr(\"noActivation\") == 1:\n", - " return 0\n", - " else:\n", - " mh = self.get_nodeattr(\"MH\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " return mh // pe\n", - "\n", - " def make_shape_compatible_op(self, model):\n", - " oshape = self.get_normal_output_shape()\n", - " return super().make_const_shape_op(oshape)\n", - "\n", - " def infer_node_datatype(self, model):\n", - " node = self.onnx_node\n", - " idt = model.get_tensor_datatype(node.input[0])\n", - " if idt != self.get_input_datatype():\n", - " warn_str = \"inputDataType changing for %s: %s -> %s \" % (\n", - " node.name,\n", - " str(self.get_input_datatype()),\n", - " str(idt),\n", - " )\n", - " warnings.warn(warn_str)\n", - " self.set_nodeattr(\"inputDataType\", idt.name)\n", - " # set output datatype from property\n", - " odt = self.get_output_datatype()\n", - " model.set_tensor_datatype(node.output[0], odt)\n", - "\n", - " def verify_node(self):\n", - " info_messages = []\n", - " # verify that \"backend\" is set to \"fpgadataflow\"\n", - " backend_value = self.get_nodeattr(\"backend\")\n", - " if backend_value == \"fpgadataflow\":\n", - " info_messages.append(\"Attribute backend is set correctly\")\n", - " else:\n", - " info_messages.append('Attribute backend should be set to \"fpgadataflow\"')\n", - "\n", - " # verify that all necessary attributes exist\n", - " # TODO collect automatically from get_nodeattr_types\n", - " try:\n", - " self.get_nodeattr(\"code_gen_dir_cppsim\")\n", - " self.get_nodeattr(\"executable_path\")\n", - " self.get_nodeattr(\"resType\")\n", - " self.get_nodeattr(\"MW\")\n", - " self.get_nodeattr(\"MH\")\n", - " self.get_nodeattr(\"SIMD\")\n", - " self.get_nodeattr(\"PE\")\n", - " self.get_nodeattr(\"inputDataType\")\n", - " self.get_nodeattr(\"weightDataType\")\n", - " self.get_nodeattr(\"outputDataType\")\n", - " info_messages.append(\"All necessary attributes exist\")\n", - " except Exception:\n", - " info_messages.append(\n", - " \"\"\"The required MatrixVectorActivation attributes do not exist.\"\"\"\n", - " )\n", - "\n", - " # verify the number of inputs depending on noActivation value\n", - " # check noActivation value to determine the number of inputs\n", - " no_act = self.get_nodeattr(\"noActivation\")\n", - "\n", - " if no_act == 1:\n", - " if len(self.onnx_node.input) == 2:\n", - " info_messages.append(\"The number of inputs is correct\")\n", - " else:\n", - " info_messages.append(\n", - " \"\"\"MatrixVectorActivation needs in no\n", - " activation mode 2 inputs (data input and weights)\"\"\"\n", - " )\n", - " elif no_act == 0:\n", - " if len(self.onnx_node.input) == 3:\n", - " info_messages.append(\"The number of inputs is correct\")\n", - " else:\n", - " info_messages.append(\n", - " \"\"\"MatrixVectorActivation needs 3 inputs\n", - " (data input and weights and threshold values)\"\"\"\n", - " )\n", - " else:\n", - " info_messages.append(\n", - " \"\"\"noActivation attribute contains {} should\n", - " be 0 or 1\"\"\".format(\n", - " no_act\n", - " )\n", - " )\n", - "\n", - " return info_messages\n", - "\n", - " def uram_estimation(self):\n", - " P = self.get_nodeattr(\"PE\")\n", - " Q = self.get_nodeattr(\"SIMD\")\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " D_in = self.get_nodeattr(\"MW\")\n", - " D_out = self.get_nodeattr(\"MH\")\n", - " omega = (D_in * D_out) / (Q * P)\n", - " mem_width = Q * W * P\n", - " mmode = self.get_nodeattr(\"mem_mode\")\n", - " mstyle = self.get_nodeattr(\"ram_style\")\n", - " if (\n", - " (mmode == \"decoupled\" and mstyle != \"ultra\")\n", - " or (mmode == \"const\" and self.calc_wmem() <= 128)\n", - " or (mmode == \"external\")\n", - " ):\n", - " return 0\n", - " width_multiplier = math.ceil(mem_width / 72)\n", - " depth_multiplier = math.ceil(omega / 4096)\n", - " return width_multiplier * depth_multiplier\n", - "\n", - " def bram_estimation(self):\n", - " \"\"\"Calculates resource estimation for BRAM based on:\n", - " - FINN-R: An End-to-End Deep-Learning Framework for Fast\n", - " Exploration of Quantized Neural Networks\n", - " - M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,\n", - " Y. Umuroglu, M. Leeser and K. Vissers\n", - " - 12. Sep 2018\n", - " \"\"\"\n", - " # TODO add in/out FIFO contributions\n", - " P = self.get_nodeattr(\"PE\")\n", - " Q = self.get_nodeattr(\"SIMD\")\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " D_in = self.get_nodeattr(\"MW\")\n", - " D_out = self.get_nodeattr(\"MH\")\n", - " omega = (D_in * D_out) / (Q * P)\n", - " mem_width = Q * W * P\n", - " mmode = self.get_nodeattr(\"mem_mode\")\n", - " mstyle = self.get_nodeattr(\"ram_style\")\n", - " if (\n", - " (mmode == \"decoupled\" and mstyle in [\"distributed\", \"ultra\"])\n", - " or (mmode == \"const\" and self.calc_wmem() <= 128)\n", - " or (mmode == \"external\")\n", - " ):\n", - " return 0\n", - " # assuming SDP mode RAMB18s (see UG573 Table 1-10)\n", - " # assuming decoupled (RTL) memory, which is more efficient than const (HLS)\n", - " if mem_width == 1:\n", - " return math.ceil(omega / 16384)\n", - " elif mem_width == 2:\n", - " return math.ceil(omega / 8192)\n", - " elif mem_width <= 4:\n", - " return (math.ceil(omega / 4096)) * (math.ceil(mem_width / 4))\n", - " elif mem_width <= 9:\n", - " return (math.ceil(omega / 2048)) * (math.ceil(mem_width / 9))\n", - " elif mem_width <= 18 or omega > 512:\n", - " return (math.ceil(omega / 1024)) * (math.ceil(mem_width / 18))\n", - " else:\n", - " return (math.ceil(omega / 512)) * (math.ceil(mem_width / 36))\n", - "\n", - " def bram_efficiency_estimation(self):\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " D_in = self.get_nodeattr(\"MW\")\n", - " D_out = self.get_nodeattr(\"MH\")\n", - " bram16_est = self.bram_estimation()\n", - " if bram16_est == 0:\n", - " return 1\n", - " wbits = W * D_in * D_out\n", - " bram16_est_capacity = bram16_est * 36 * 512\n", - " return wbits / bram16_est_capacity\n", - "\n", - " def uram_efficiency_estimation(self):\n", - " \"\"\"Function for URAM efficiency estimation: actual parameter storage\n", - " needed divided by the allocated URAM storage (from estimation)\"\"\"\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " D_in = self.get_nodeattr(\"MW\")\n", - " D_out = self.get_nodeattr(\"MH\")\n", - " uram_est = self.uram_estimation()\n", - " if uram_est == 0:\n", - " return 1\n", - " wbits = W * D_in * D_out\n", - " uram_est_capacity = uram_est * 72 * 4096\n", - " return wbits / uram_est_capacity\n", - "\n", - " def lut_estimation(self):\n", - " \"\"\"Calculates resource estimations for LUTs based on:\n", - " - FINN-R: An End-to-End Deep-Learning Framework for Fast\n", - " Exploration of Quantized Neural Networks\n", - " - M. Blott, T. B. Preusser, N. J. Fraser, G. Gambardella, K. O'Brien,\n", - " Y. Umuroglu, M. Leeser and K. Vissers\n", - " - 12. Sep 2018\n", - " \"\"\"\n", - " # TODO add in/out FIFO contributions\n", - " P = self.get_nodeattr(\"PE\")\n", - " Q = self.get_nodeattr(\"SIMD\")\n", - " MW = self.get_nodeattr(\"MW\")\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " # determine tdt with input and weight data types\n", - " idt = self.get_input_datatype()\n", - " A = idt.bitwidth()\n", - " # parameters from experiments in paper mentioned above\n", - " c0 = 300\n", - " c1 = 1.1\n", - " c2 = 0\n", - " mmode = self.get_nodeattr(\"mem_mode\")\n", - " mstyle = self.get_nodeattr(\"ram_style\")\n", - " if (mmode == \"decoupled\" and mstyle == \"distributed\") or (\n", - " mmode == \"const\" and self.calc_wmem() <= 128\n", - " ):\n", - " c2 = (P * Q * W) * math.ceil(self.calc_wmem() / 64)\n", - "\n", - " # multiplication\n", - " res_type = self.get_nodeattr(\"resType\")\n", - " if res_type == \"dsp\":\n", - " mult_luts = 0\n", - " else:\n", - " mult_luts = Q * (2 * math.ceil((W + A) / 6) - 1) * (W + A)\n", - " # adder tree\n", - " addertree_luts = (W + A) * (2 * Q - 1)\n", - " # accumulator\n", - " acc_bits = W + A + np.ceil(math.log(MW, 2))\n", - " acc_luts = acc_bits\n", - " # thresholds and threshold comparators\n", - " thr_luts = 0\n", - " comp_luts = 0\n", - " noact = self.get_nodeattr(\"noActivation\")\n", - " if noact == 0:\n", - " odt = self.get_output_datatype()\n", - " B = odt.bitwidth()\n", - " thr_luts = (2**B - 1) * acc_bits * math.ceil(self.calc_tmem() / 64)\n", - " comp_luts = (2**B - 1) * acc_bits\n", - "\n", - " return int(\n", - " c0\n", - " + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts))\n", - " + c2\n", - " )\n", - "\n", - " def dsp_estimation(self):\n", - " # multiplication\n", - " P = self.get_nodeattr(\"PE\")\n", - " res_type = self.get_nodeattr(\"resType\")\n", - " Q = self.get_nodeattr(\"SIMD\")\n", - " wdt = self.get_weight_datatype()\n", - " W = wdt.bitwidth()\n", - " idt = self.get_input_datatype()\n", - " A = idt.bitwidth()\n", - " if res_type == \"dsp\":\n", - " mult_dsp = P * Q * np.ceil((W + A) / 48) # TODO: more accurate modelling\n", - " else:\n", - " mult_dsp = 0\n", - " return int(mult_dsp)\n", - "\n", - " def get_exp_cycles(self):\n", - " pe = self.get_nodeattr(\"PE\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " num_inp_vec = self.get_nodeattr(\"numInputVectors\")\n", - " mh = self.get_nodeattr(\"MH\")\n", - " mw = self.get_nodeattr(\"MW\")\n", - " # since mmv != 1 is not supported yet, we set mmv for now to 1\n", - " mmv = 1\n", - " exp_cycles = (mh / pe) * (mw / simd) * np.prod(num_inp_vec) / mmv\n", - " return int(exp_cycles)\n", - "\n", - " def get_input_datatype(self, ind=0):\n", - " \"\"\"Returns FINN DataType of input.\"\"\"\n", - " # when performing FIFO insertion on an FC layer with ext weights, the ind\n", - " # parameter can be > 0 (referring to the weights) so handle that here\n", - " if ind == 0:\n", - " return DataType[self.get_nodeattr(\"inputDataType\")]\n", - " elif ind == 1:\n", - " return DataType[self.get_nodeattr(\"weightDataType\")]\n", - " else:\n", - " raise Exception(\"Undefined input ind for this layer type\")\n", - "\n", - " def get_weight_datatype(self):\n", - " \"\"\"Returns FINN DataType of weights.\"\"\"\n", - " return DataType[self.get_nodeattr(\"weightDataType\")]\n", - "\n", - " def get_output_datatype(self, ind=0):\n", - " \"\"\"Returns FINN DataType of output.\"\"\"\n", - " return DataType[self.get_nodeattr(\"outputDataType\")]\n", - "\n", - " def get_instream_width(self, ind=0):\n", - " i_bits = self.get_input_datatype().bitwidth()\n", - " in_width = i_bits * self.get_nodeattr(\"SIMD\")\n", - " return in_width\n", - "\n", - " def get_outstream_width(self, ind=0):\n", - " o_bits = self.get_output_datatype().bitwidth()\n", - " out_width = o_bits * self.get_nodeattr(\"PE\")\n", - " return out_width\n", - "\n", - " def get_weightstream_width(self):\n", - " \"\"\"Returns weight stream width. Used only in decoupled mode.\"\"\"\n", - " if (\n", - " self.get_nodeattr(\"mem_mode\") == \"decoupled\"\n", - " or self.get_nodeattr(\"mem_mode\") == \"external\"\n", - " ):\n", - " pe = self.get_nodeattr(\"PE\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " wp = self.get_weight_datatype().bitwidth()\n", - " w_width = pe * simd * wp\n", - " return w_width\n", - " else:\n", - " return 0\n", - "\n", - " def get_weightstream_width_padded(self):\n", - " \"\"\"Returns weight stream width padded to a multiple of 8. This is required\n", - " by the AXI Stream spec. Used in decoupled mode.\"\"\"\n", - " weight_width = self.get_weightstream_width()\n", - " return roundup_to_integer_multiple(weight_width, 8)\n", - "\n", - " def get_ap_int_max_w(self):\n", - " # base class impl (max of inp/out stream widths)\n", - " max_of_io = super().get_ap_int_max_w()\n", - " # decoupled mode weight stream\n", - " weightstream = self.get_weightstream_width()\n", - " # single PE weight entry\n", - " weight_bits = self.get_weight_datatype().bitwidth()\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " single_pe_w = simd * weight_bits\n", - " return max([weightstream, max_of_io, single_pe_w])\n", - "\n", - " def get_folded_input_shape(self, ind=0):\n", - " mw = self.get_nodeattr(\"MW\")\n", - " mh = self.get_nodeattr(\"MH\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " sf = mw // simd\n", - " nf = mh // pe\n", - " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", - "\n", - " if ind == 0:\n", - " # calculate shape of input 0\n", - " folded_input_shape = tuple(vecs + [sf, simd])\n", - " elif ind == 1 and self.get_nodeattr(\"mem_mode\") == \"external\":\n", - " # calculate shape of input 1 (weights)\n", - " folded_input_shape = tuple(vecs + [sf * nf, simd * pe])\n", - " else:\n", - " raise Exception(\"Undefined input shape for requested input\")\n", - "\n", - " return folded_input_shape\n", - "\n", - " def get_folded_output_shape(self, ind=0):\n", - " mh = self.get_nodeattr(\"MH\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " nf = mh // pe\n", - " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", - " folded_output_shape = tuple(vecs + [nf, pe])\n", - " return folded_output_shape\n", - "\n", - " def get_normal_input_shape(self, ind=0):\n", - " mw = self.get_nodeattr(\"MW\")\n", - " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", - " normal_input_shape = tuple(vecs + [mw])\n", - " return normal_input_shape\n", - "\n", - " def get_normal_output_shape(self, ind=0):\n", - " mh = self.get_nodeattr(\"MH\")\n", - " vecs = list(self.get_nodeattr(\"numInputVectors\"))\n", - " normal_output_shape = tuple(vecs + [mh])\n", - " return normal_output_shape\n", - "\n", - " def get_number_output_values(self):\n", - " nf = np.prod(self.get_folded_output_shape()[:-1])\n", - " return nf\n", - "\n", - " def get_template_param_values(self):\n", - " \"\"\"Returns the template parameter values according to input, output and weight\n", - " data types.\"\"\"\n", - " ret = dict()\n", - " inp_hls_str = self.get_input_datatype().get_hls_datatype_str()\n", - " out_hls_str = self.get_output_datatype().get_hls_datatype_str()\n", - " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", - " # out_is_binary = self.get_output_datatype() == DataType[\"BINARY\"]\n", - " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", - " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", - " if (inp_is_binary or wt_is_binary) and (not bin_xnor_mode):\n", - " raise Exception(\"True binary (non-bipolar) inputs not yet supported\")\n", - " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", - " # out_is_bipolar = self.get_output_datatype() == DataType[\"BIPOLAR\"]\n", - " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", - " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", - " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", - " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", - " # fill in TSrcI and TWeightI\n", - " # TODO check these with Giulio\n", - " # TODO handle non-bipolar binary inputs\n", - " if inp_is_bipolar and wt_is_bipolar:\n", - " ret[\"TSrcI\"] = \"Recast\"\n", - " ret[\"TWeightI\"] = \"Identity\"\n", - " elif (not inp_is_bipolar) and wt_is_bipolar:\n", - " ret[\"TSrcI\"] = \"Slice<%s>\" % inp_hls_str\n", - " ret[\"TWeightI\"] = \"Recast\"\n", - " elif inp_is_bipolar and (not wt_is_bipolar):\n", - " ret[\"TSrcI\"] = \"Recast\"\n", - " ret[\"TWeightI\"] = \"Identity\"\n", - " elif (not inp_is_bipolar) and (not wt_is_bipolar):\n", - " ret[\"TSrcI\"] = \"Slice<%s>\" % inp_hls_str\n", - " ret[\"TWeightI\"] = \"Identity\"\n", - "\n", - " # fill in TDstI\n", - " ret[\"TDstI\"] = \"Slice<%s>\" % out_hls_str\n", - "\n", - " return ret\n", - "\n", - " def get_hls_compatible_weight_tensor(self, orig_weight_matrix):\n", - " \"\"\"Convert the original numpy weight matrix orig_weight_matrix into\n", - " a form suitable for passing to the hlslib call:\n", - " * ensure MH % PE == 0 and MW % SIMD == 0\n", - " * for bipolar {-1,+1} weights, convert to binary {0, 1}\n", - " * interleave rows between PEs\n", - " * reshape into (1, PE, WMEM, SIMD) and return\n", - " \"\"\"\n", - " mw = self.get_nodeattr(\"MW\")\n", - " mh = self.get_nodeattr(\"MH\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " wmem = self.calc_wmem()\n", - " assert orig_weight_matrix.shape == (\n", - " mw,\n", - " mh,\n", - " ), \"\"\"Weights matrix doesn't\n", - " have expected shape (mw, mh)\"\"\"\n", - " assert mw % simd == 0, \"Requirement MH divisable by SIMD is violated.\"\n", - " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", - " # start by transposing the original weight matrix, since ONNX and\n", - " # finn-hlslib use different assumptions\n", - " # ONNX uses (in_features, out_features) and matmul(x, W)\n", - " # finn-hlslib uses (out_features, in_features) and matmul(W, x)\n", - " ret = orig_weight_matrix.T\n", - " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", - " # convert bipolar to binary\n", - " ret = (ret + 1) / 2\n", - " # interleave rows between PEs and reshape\n", - " # distribute rows between PEs\n", - " ret = interleave_matrix_outer_dim_from_partitions(ret, pe)\n", - " # create SIMD as innermost dimension and add a dummy outer dim\n", - " ret = ret.reshape(1, pe, wmem, simd)\n", - " # reverse the SIMD dimension\n", - " ret = np.flip(ret, axis=-1)\n", - " return ret\n", - "\n", - " def minimize_accumulator_width(self, model):\n", - " weights = model.get_initializer(self.onnx_node.input[1])\n", - " # since in the calculation the values of the weight matrix are used,\n", - " # for the bipolar case they need to be converted to bipolar\n", - " if self.get_nodeattr(\"binaryXnorMode\"):\n", - " weights = 2 * weights - 1\n", - " if len(self.onnx_node.input) > 2:\n", - " thresholds = model.get_initializer(self.onnx_node.input[2])\n", - " else:\n", - " thresholds = None\n", - " idt = self.get_input_datatype()\n", - " # calculate minimum and maximum values of accumulator\n", - " (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt)\n", - " if thresholds is not None:\n", - " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", - " # set threshold datatype (and accumulator datatype implicitly)\n", - " min_threshold = thresholds.min()\n", - " max_threshold = thresholds.max()\n", - " # clip threshold values\n", - " clip_upper = None\n", - " clip_lower = None\n", - " if max_threshold > acc_max + 1:\n", - " clip_upper = acc_max + 1\n", - " if min_threshold < acc_min:\n", - " clip_lower = acc_min\n", - " if (clip_lower is not None) or (clip_upper is not None):\n", - " warnings.warn(\"Clipping some thresholds in %s\" % self.onnx_node.name)\n", - " thresholds = np.clip(thresholds, clip_lower, clip_upper)\n", - " model.set_initializer(self.onnx_node.input[2], thresholds)\n", - " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", - " min_threshold = thresholds.min()\n", - " max_threshold = thresholds.max()\n", - " # get range required by threshold values\n", - " tdt_min = min(acc_min, min_threshold)\n", - " tdt_max = max(acc_max, max_threshold)\n", - " if tdt_min < 0:\n", - " if abs(tdt_min) > tdt_max:\n", - " tdt = DataType.get_smallest_possible(tdt_min)\n", - " else:\n", - " tdt = DataType.get_smallest_possible(-tdt_max - 1)\n", - " else:\n", - " tdt = DataType.get_smallest_possible(tdt_max)\n", - " assert np.vectorize(tdt.allowed)(\n", - " threshold_tensor\n", - " ).all(), \"Thresholds in %s can't be expressed with type %s\" % (\n", - " self.onnx_node.name,\n", - " str(tdt),\n", - " )\n", - " self.set_nodeattr(\"accDataType\", tdt.name)\n", - " else:\n", - " if acc_min < 0:\n", - " if abs(acc_min) > acc_max:\n", - " adt = DataType.get_smallest_possible(acc_min)\n", - " else:\n", - " adt = DataType.get_smallest_possible(-acc_max - 1)\n", - " else:\n", - " adt = DataType.get_smallest_possible(acc_max)\n", - " # ensure a datatype divisible by 8-bits in case this is the last node\n", - " bw = roundup_to_integer_multiple(adt.bitwidth(), 8)\n", - " new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw))\n", - " adt = DataType[new_adt_name]\n", - " self.set_nodeattr(\"accDataType\", adt.name)\n", - " # for no-activation nodes, output dt = acc dt\n", - " self.set_nodeattr(\"outputDataType\", adt.name)\n", - " return DataType[self.get_nodeattr(\"accDataType\")]\n", - "\n", - " def get_hls_compatible_threshold_tensor(self, orig_thres_matrix):\n", - " \"\"\"Convert the original numpy weight matrix orig_weight_matrix into\n", - " a form suitable for passing to the hlslib call:\n", - " * ensure MH % PE == 0\n", - " * for bipolar weights&inputs, ensure thresholds are positive\n", - " * interleave rows between PEs\n", - " * reshape into (PE, TMEM, n_thres_steps) and return\n", - " \"\"\"\n", - " mh = self.get_nodeattr(\"MH\")\n", - " pe = self.get_nodeattr(\"PE\")\n", - " tmem = mh // pe\n", - " assert mh % pe == 0, \"Requirement MH divisable by PE is violated.\"\n", - " assert (\n", - " orig_thres_matrix.ndim == 2\n", - " ), \"\"\"Threshold matrix dimension is\n", - " not as expected (2).\"\"\"\n", - " n_thres_steps = orig_thres_matrix.shape[1]\n", - " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", - " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", - " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", - " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", - " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", - " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", - " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", - " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", - " if inp_is_bipolar and wt_is_bipolar:\n", - " # ensure all thresholds are nonnegative\n", - " assert (orig_thres_matrix >= 0).all()\n", - " # ensure all thresholds are integer\n", - " assert (orig_thres_matrix.astype(np.int32) == orig_thres_matrix).all()\n", - " ret = orig_thres_matrix\n", - " # workaround for vivado_hls threshold bug\n", - " if ret[0][0] == 0 and n_thres_steps == 1:\n", - " ret = np.copy(ret)\n", - " ret[0][0] = 1\n", - " warnings.warn(\n", - " \"Setting 0-valued first threshold to 1 to avoid vivado_hls bug\"\n", - " )\n", - " # ensure channels = mh , duplicating if necessary\n", - " if ret.shape[0] == 1:\n", - " ret = np.tile(ret, (mh, 1))\n", - " assert (\n", - " ret.shape[0] == mh\n", - " ), \"Channels of threshold matrix are not as expected (mh)\"\n", - " # distribute rows between PEs\n", - " ret = interleave_matrix_outer_dim_from_partitions(ret, pe)\n", - " assert (\n", - " ret.shape[0] == pe\n", - " ), \"\"\"First dimension after distribution of the\n", - " rows between PEs is not as expected (pe)\"\"\"\n", - " assert (\n", - " ret.shape[1] == tmem\n", - " ), \"\"\"Second dimension after distribution of the\n", - " rows between PEs is not as expected (tmem)\"\"\"\n", - " assert (\n", - " ret.shape[2] == n_thres_steps\n", - " ), \"\"\"Third dimension after distribution of the\n", - " rows between PEs is not as expected (n_thres_steps)\"\"\"\n", - " return ret.reshape(1, pe, tmem, n_thres_steps)\n", - "\n", - " def make_weight_file(self, weights, weight_file_mode, weight_file_name):\n", - " \"\"\"Produce a file containing given weights in appropriate format for this\n", - " layer. This file can be used for either synthesis or run-time reconfig\n", - " of weights.\n", - "\n", - " Arguments:\n", - "\n", - " * weights : numpy array with weights to be put into the file\n", - " * weight_file_mode : one of {hls_header, decoupled_verilog_dat,\n", - " decoupled_runtime}\n", - " * weight_file_name : filename for the weight file to be generated\n", - "\n", - " \"\"\"\n", - " # convert weights into hlslib-compatible format\n", - " weight_tensor = self.get_hls_compatible_weight_tensor(weights)\n", - " export_wdt = self.get_weight_datatype()\n", - " # we have converted bipolar weights to binary for export,\n", - " # so use it as such for weight generation\n", - " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", - " export_wdt = DataType[\"BINARY\"]\n", - " if weight_file_mode == \"hls_header\":\n", - " weight_hls_code = numpy_to_hls_code(\n", - " weight_tensor, export_wdt, \"weights\", True, True\n", - " )\n", - " # write weights into C++ header file as dictated by finn-hlslib\n", - " f_weights = open(weight_file_name, \"w\")\n", - " if export_wdt.bitwidth() != 1:\n", - " f_weights.write(\n", - " \"const FixedPointWeights<{},{},{},{}> weights = \".format(\n", - " self.get_nodeattr(\"SIMD\"),\n", - " export_wdt.get_hls_datatype_str(),\n", - " self.get_nodeattr(\"PE\"),\n", - " self.calc_wmem(),\n", - " )\n", - " )\n", - " else:\n", - " f_weights.write(\n", - " \"const BinaryWeights<{},{},{}> weights = \".format(\n", - " self.get_nodeattr(\"SIMD\"),\n", - " self.get_nodeattr(\"PE\"),\n", - " self.calc_wmem(),\n", - " )\n", - " )\n", - " f_weights.write(weight_hls_code)\n", - " f_weights.close()\n", - " elif \"decoupled\" in weight_file_mode:\n", - " # create a weight stream for various flavors of decoupled mode:\n", - " # transpose weight tensor from (1, PE, WMEM, SIMD) to (1, WMEM, PE, SIMD)\n", - " weight_tensor_unflipped = np.transpose(weight_tensor, (0, 2, 1, 3))\n", - " # reverse SIMD flip for saving weights in .npy\n", - " weight_tensor_simd_flipped = np.flip(weight_tensor_unflipped, axis=-1)\n", - " # PE flip for saving weights in .dat\n", - " weight_tensor_pe_flipped = np.flip(weight_tensor_unflipped, axis=-2)\n", - " # reshape weight tensor (simd_flipped and pe_flipped) to desired shape\n", - " pe = self.get_nodeattr(\"PE\")\n", - " simd = self.get_nodeattr(\"SIMD\")\n", - " # simd_flipped\n", - " weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape(\n", - " 1, -1, pe * simd\n", - " )\n", - " weight_tensor_simd_flipped = weight_tensor_simd_flipped.copy()\n", - " # flipped\n", - " weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape(\n", - " 1, -1, pe * simd\n", - " )\n", - " weight_tensor_pe_flipped = weight_tensor_pe_flipped.copy()\n", - " if weight_file_mode == \"decoupled_npy\":\n", - " # save weight stream into npy for cppsim\n", - " np.save(weight_file_name, weight_tensor_simd_flipped)\n", - " elif weight_file_mode == \"decoupled_verilog_dat\":\n", - " # convert weight values into hexstring\n", - " weight_width = self.get_weightstream_width()\n", - " # pad to nearest 4 bits to get hex strings\n", - " weight_width_padded = roundup_to_integer_multiple(weight_width, 4)\n", - " weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string(\n", - " weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix=\"\"\n", - " )\n", - " # add zeroes to pad out file to 1024 entries\n", - " weight_stream = weight_tensor_pe_flipped.flatten()\n", - " weight_stream = weight_stream.copy()\n", - " with open(weight_file_name, \"w\") as f:\n", - " for val in weight_stream:\n", - " f.write(val + \"\\n\")\n", - " elif weight_file_mode == \"decoupled_runtime\":\n", - " # memstream axi-lite interface will map each mem line to\n", - " # one or multiple 32-bit words\n", - " weight_width = self.get_weightstream_width()\n", - " words_per_memwidth = 2 ** math.ceil(math.log2(weight_width / 32))\n", - " if words_per_memwidth < 1:\n", - " words_per_memwidth = 1\n", - " weight_width_padded = words_per_memwidth * 32\n", - " # first, pack and ensure padding to 32 bits\n", - " weight_tensor_pe_flipped = pack_innermost_dim_as_hex_string(\n", - " weight_tensor_pe_flipped, export_wdt, weight_width_padded, prefix=\"\"\n", - " )\n", - " weight_stream = weight_tensor_pe_flipped.flatten()\n", - " weight_stream = weight_stream.copy()\n", - " with open(weight_file_name, \"w\") as f:\n", - " for val in weight_stream:\n", - " # split into groups of 8 hex digits (= 32 bits)\n", - " words_32b = textwrap.wrap(val, 8)\n", - " words_32b.reverse()\n", - " for word_32b in words_32b:\n", - " f.write(word_32b + \"\\n\")\n", - " else:\n", - " raise Exception(\"Unknown weight_file_mode\")\n", - "\n", - " else:\n", - " raise Exception(\"Unknown weight_file_mode\")\n", - "\n", - " def generate_params(self, model, path):\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " code_gen_dir = path\n", - " # weights, if not external\n", - " weights = model.get_initializer(self.onnx_node.input[1])\n", - " if mem_mode == \"const\":\n", - " # save hlslib-compatible weights in params.h\n", - " weight_filename = \"{}/params.h\".format(code_gen_dir)\n", - " self.make_weight_file(weights, \"hls_header\", weight_filename)\n", - " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " weight_filename_sim = \"{}/weights.npy\".format(code_gen_dir)\n", - " # save decoupled weights for cppsim\n", - " self.make_weight_file(weights, \"decoupled_npy\", weight_filename_sim)\n", - " if mem_mode == \"decoupled\":\n", - " # also save weights as Verilog .dat file\n", - " # note that we provide two different .dat files, one for synth\n", - " # and one for synthesis. this is because URAM-based weights always\n", - " # need zero weights for synthesis, otherwise they get inferred\n", - " # as BRAM\n", - " weight_filename_rtl_synth = \"{}/memblock_synth_0.dat\".format(\n", - " code_gen_dir\n", - " )\n", - " weight_filename_rtl_sim = \"{}/memblock_sim_0.dat\".format(code_gen_dir)\n", - " # sim weights are always the true weights\n", - " self.make_weight_file(\n", - " weights, \"decoupled_verilog_dat\", weight_filename_rtl_sim\n", - " )\n", - " ram_style = self.get_nodeattr(\"ram_style\")\n", - " if ram_style == \"ultra\":\n", - " # UltraRAM must have no memory initializer, or only zeroes\n", - " # otherwise BRAM will be inferred instead of URAM\n", - " # as a workaround we provide a zero-weight init here\n", - " synth_weights = np.zeros_like(weights, dtype=np.float32)\n", - " else:\n", - " synth_weights = weights\n", - " self.make_weight_file(\n", - " synth_weights, \"decoupled_verilog_dat\", weight_filename_rtl_synth\n", - " )\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", - " currently no other parameter value is supported!\"\"\"\n", - " )\n", - "\n", - " # save thresholds in thresh.h\n", - " if len(self.onnx_node.input) > 2:\n", - " thresholds = model.get_initializer(self.onnx_node.input[2])\n", - " if thresholds is not None:\n", - " threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds)\n", - " # use UINT32 threshold export for bipolar times bipolar\n", - " inp_is_bipolar = self.get_input_datatype() == DataType[\"BIPOLAR\"]\n", - " wt_is_bipolar = self.get_weight_datatype() == DataType[\"BIPOLAR\"]\n", - " # reinterpret inp/wt as bipolar if bin_xnor_mode is iset\n", - " inp_is_binary = self.get_input_datatype() == DataType[\"BINARY\"]\n", - " wt_is_binary = self.get_weight_datatype() == DataType[\"BINARY\"]\n", - " bin_xnor_mode = self.get_nodeattr(\"binaryXnorMode\") == 1\n", - " inp_is_bipolar = inp_is_bipolar or (inp_is_binary and bin_xnor_mode)\n", - " wt_is_bipolar = wt_is_bipolar or (wt_is_binary and bin_xnor_mode)\n", - " # get computed threshold datatype from attribute\n", - " tdt = DataType[self.get_nodeattr(\"accDataType\")]\n", - "\n", - " assert np.vectorize(tdt.allowed)(\n", - " threshold_tensor\n", - " ).all(), \"Thresholds in %s can't be expressed with type %s\" % (\n", - " self.onnx_node.name,\n", - " str(tdt),\n", - " )\n", - " thresholds_hls_code = numpy_to_hls_code(\n", - " threshold_tensor, tdt, \"thresholds\", False, True\n", - " )\n", - " # write thresholds into thresh.h\n", - " f_thresh = open(\"{}/thresh.h\".format(code_gen_dir), \"w\")\n", - " tdt_hls = tdt.get_hls_datatype_str()\n", - " # use binary to export bipolar activations\n", - " export_odt = self.get_output_datatype()\n", - " if self.get_output_datatype() == DataType[\"BIPOLAR\"]:\n", - " export_odt = DataType[\"BINARY\"]\n", - " odt_hls = export_odt.get_hls_datatype_str()\n", - " f_thresh.write(\n", - " \"static ThresholdsActivation<{},{},{},{},{},{},{}> threshs \\\n", - " = \".format(\n", - " self.calc_tmem(),\n", - " self.get_nodeattr(\"PE\"),\n", - " threshold_tensor.shape[-1],\n", - " tdt_hls,\n", - " odt_hls,\n", - " self.get_nodeattr(\"ActVal\"),\n", - " \"comp::less_equal<%s, %s>\" % (tdt_hls, tdt_hls),\n", - " )\n", - " )\n", - " f_thresh.write(thresholds_hls_code)\n", - " f_thresh.close()\n", - "\n", - " def execute_node(self, context, graph):\n", - " mode = self.get_nodeattr(\"exec_mode\")\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " node = self.onnx_node\n", - "\n", - " # TODO ensure codegen dir exists\n", - " if mode == \"cppsim\":\n", - " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", - " elif mode == \"rtlsim\":\n", - " code_gen_dir = self.get_nodeattr(\"code_gen_dir_ipgen\")\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Invalid value for attribute exec_mode! Is currently set to: {}\n", - " has to be set to one of the following value (\"cppsim\", \"rtlsim\")\"\"\".format(\n", - " mode\n", - " )\n", - " )\n", - "\n", - " # create a npy file fore each input of the node (in_ind is input index)\n", - " in_ind = 0\n", - " for inputs in node.input:\n", - " # it is assumed that the first input of the node is the data input\n", - " # the second input are the weights\n", - " # the third input are the thresholds\n", - " if in_ind == 0:\n", - " assert (\n", - " str(context[inputs].dtype) == \"float32\"\n", - " ), \"\"\"Input datatype is\n", - " not float32 as expected.\"\"\"\n", - " expected_inp_shape = self.get_folded_input_shape()\n", - " reshaped_input = context[inputs].reshape(expected_inp_shape)\n", - " if self.get_input_datatype() == DataType[\"BIPOLAR\"]:\n", - " # store bipolar activations as binary\n", - " reshaped_input = (reshaped_input + 1) / 2\n", - " export_idt = DataType[\"BINARY\"]\n", - " else:\n", - " export_idt = self.get_input_datatype()\n", - " # make copy before saving the array\n", - " reshaped_input = reshaped_input.copy()\n", - " np.save(\n", - " os.path.join(code_gen_dir, \"input_{}.npy\".format(in_ind)),\n", - " reshaped_input,\n", - " )\n", - " elif in_ind > 2:\n", - " raise Exception(\"Unexpected input found for MatrixVectorActivation\")\n", - " in_ind += 1\n", - "\n", - " if mode == \"cppsim\":\n", - " # execute the precompiled model\n", - " super().exec_precompiled_singlenode_model()\n", - " # load output npy file\n", - " super().npy_to_dynamic_output(context)\n", - " # reinterpret binary output as bipolar where needed\n", - " if self.get_output_datatype() == DataType[\"BIPOLAR\"]:\n", - " out = context[node.output[0]]\n", - " out = 2 * out - 1\n", - " context[node.output[0]] = out\n", - " assert (\n", - " context[node.output[0]].shape == self.get_normal_output_shape()\n", - " ), \"cppsim did not produce expected output shape\"\n", - " elif mode == \"rtlsim\":\n", - " sim = self.get_rtlsim()\n", - " nbits = self.get_instream_width()\n", - " inp = npy_to_rtlsim_input(\n", - " \"{}/input_0.npy\".format(code_gen_dir), export_idt, nbits\n", - " )\n", - " super().reset_rtlsim(sim)\n", - " super().toggle_clk(sim)\n", - " if mem_mode == \"external\" or mem_mode == \"decoupled\":\n", - " wnbits = self.get_weightstream_width()\n", - " export_wdt = self.get_weight_datatype()\n", - " # we have converted bipolar weights to binary for export,\n", - " # so use it as such for weight generation\n", - " if self.get_weight_datatype() == DataType[\"BIPOLAR\"]:\n", - " export_wdt = DataType[\"BINARY\"]\n", - " wei = npy_to_rtlsim_input(\n", - " \"{}/weights.npy\".format(code_gen_dir), export_wdt, wnbits\n", - " )\n", - " num_w_reps = np.prod(self.get_nodeattr(\"numInputVectors\"))\n", - " io_dict = {\n", - " \"inputs\": {\"in0\": inp, \"weights\": wei * num_w_reps},\n", - " \"outputs\": {\"out\": []},\n", - " }\n", - " self.rtlsim_multi_io(sim, io_dict)\n", - " output = io_dict[\"outputs\"][\"out\"]\n", - " else:\n", - " output = self.rtlsim(sim, inp)\n", - " odt = self.get_output_datatype()\n", - " target_bits = odt.bitwidth()\n", - " packed_bits = self.get_outstream_width()\n", - " out_npy_path = \"{}/output.npy\".format(code_gen_dir)\n", - " out_shape = self.get_folded_output_shape()\n", - " rtlsim_output_to_npy(\n", - " output, out_npy_path, odt, out_shape, packed_bits, target_bits\n", - " )\n", - "\n", - " # load and reshape output\n", - " output = np.load(out_npy_path)\n", - " oshape = self.get_normal_output_shape()\n", - " output = np.asarray([output], dtype=np.float32).reshape(*oshape)\n", - " context[node.output[0]] = output\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Invalid value for attribute exec_mode! Is currently set to: {}\n", - " has to be set to one of the following value (\"cppsim\", \"rtlsim\")\"\"\".format(\n", - " mode\n", - " )\n", - " )\n", - "\n", - " def global_includes(self):\n", - " self.code_gen_dict[\"$GLOBALS$\"] = ['#include \"weights.hpp\"']\n", - " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"activations.hpp\"']\n", - "\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " if mem_mode not in [\"const\", \"decoupled\", \"external\"]:\n", - " raise Exception(\n", - " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", - " currently no other parameter value is supported!\"\"\"\n", - " )\n", - " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"mvau.hpp\"']\n", - " if self.calc_tmem() != 0:\n", - " # TODO find a better way of checking for no pregenerated thresholds\n", - " self.code_gen_dict[\"$GLOBALS$\"] += ['#include \"thresh.h\"']\n", - "\n", - " def defines(self, var):\n", - " # Only ipgen mode: Make sure that SIMD parameter satisfies minimum requirements.\n", - " if var == \"ipgen\":\n", - " SIMD = self.get_nodeattr(\"SIMD\")\n", - " MW = self.get_nodeattr(\"MW\")\n", - " condition = SIMD >= (MW / 1024)\n", - " msg = (\n", - " f\"HLS synthesis of MatrixVectorActivation requires: \"\n", - " f\"SIMD >= MW / 1024. This is not fulfilled with: SIMD={SIMD} \"\n", - " f\"and MW={MW} for node: {self.onnx_node.name}.\"\n", - " )\n", - " assert condition, msg\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " numInputVectors = list(self.get_nodeattr(\"numInputVectors\"))\n", - " numReps = np.prod(numInputVectors)\n", - " self.code_gen_dict[\"$DEFINES$\"] = [\n", - " \"\"\"#define MW1 {}\\n #define MH1 {}\\n\n", - " #define SIMD1 {}\\n #define PE1 {}\\n #define WMEM1 {}\\n\n", - " #define TMEM1 {}\\n #define numReps {}\"\"\".format(\n", - " self.get_nodeattr(\"MW\"),\n", - " self.get_nodeattr(\"MH\"),\n", - " self.get_nodeattr(\"SIMD\"),\n", - " self.get_nodeattr(\"PE\"),\n", - " self.calc_wmem(),\n", - " self.calc_tmem(),\n", - " numReps,\n", - " )\n", - " ]\n", - " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " wdt = self.get_weight_datatype()\n", - " self.code_gen_dict[\"$DEFINES$\"].append(\n", - " \"#define WP1 {}\\n\".format(wdt.bitwidth())\n", - " )\n", - "\n", - " def read_npy_data(self):\n", - " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", - " dtype = self.get_input_datatype()\n", - " if dtype == DataType[\"BIPOLAR\"]:\n", - " # use binary for bipolar storage\n", - " dtype = DataType[\"BINARY\"]\n", - " elem_bits = dtype.bitwidth()\n", - " packed_bits = self.get_instream_width()\n", - " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", - " elem_hls_type = dtype.get_hls_datatype_str()\n", - " npy_type = \"float\"\n", - " npy_in = \"%s/input_0.npy\" % code_gen_dir\n", - " self.code_gen_dict[\"$READNPYDATA$\"] = []\n", - " # note: the innermost dim is reversed for the input\n", - " self.code_gen_dict[\"$READNPYDATA$\"].append(\n", - " 'npy2apintstream<%s, %s, %d, %s>(\"%s\", in0, false);'\n", - " % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in)\n", - " )\n", - "\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " wdt = self.get_weight_datatype()\n", - " elem_bits = wdt.bitwidth()\n", - " packed_bits = self.get_weightstream_width()\n", - " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", - " elem_hls_type = wdt.get_hls_datatype_str()\n", - " npy_type = \"float\"\n", - " npy_in = \"%s/weights.npy\" % code_gen_dir\n", - "\n", - " self.code_gen_dict[\"$READNPYDATA$\"].append(\n", - " 'npy2apintstream<%s, %s, %d, %s>(\"%s\", weights, false, numReps);'\n", - " % (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in)\n", - " )\n", - "\n", - " def strm_decl(self):\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " self.code_gen_dict[\"$STREAMDECLARATIONS$\"] = []\n", - " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", - " 'hls::stream> in0 (\"in0\");'.format(self.get_instream_width())\n", - " )\n", - " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", - " 'hls::stream> out (\"out\");'.format(self.get_outstream_width())\n", - " )\n", - "\n", - " if mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " self.code_gen_dict[\"$STREAMDECLARATIONS$\"].append(\n", - " 'hls::stream> weights (\"weights\");'.format(\n", - " self.get_weightstream_width()\n", - " )\n", - " )\n", - "\n", - " def docompute(self):\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " map_to_hls_mult_style = {\n", - " \"auto\": \"ap_resource_dflt()\",\n", - " \"lut\": \"ap_resource_lut()\",\n", - " \"dsp\": \"ap_resource_dsp()\",\n", - " }\n", - " tmpl_args = self.get_template_param_values()\n", - " if self.calc_tmem() == 0:\n", - " odtype_hls_str = self.get_output_datatype().get_hls_datatype_str()\n", - " threshs = \"PassThroughActivation<%s>()\" % odtype_hls_str\n", - " else:\n", - " threshs = \"threshs\"\n", - " if mem_mode == \"const\":\n", - " self.code_gen_dict[\"$DOCOMPUTE$\"] = [\n", - " \"\"\"Matrix_Vector_Activate_Batch\n", - " (in0, out, weights, {}, numReps, {});\"\"\".format(\n", - " tmpl_args[\"TSrcI\"],\n", - " tmpl_args[\"TDstI\"],\n", - " tmpl_args[\"TWeightI\"],\n", - " threshs,\n", - " map_to_hls_mult_style[self.get_nodeattr(\"resType\")],\n", - " )\n", - " ]\n", - " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " wdt = self.get_weight_datatype()\n", - " if wdt == DataType[\"BIPOLAR\"]:\n", - " export_wdt = DataType[\"BINARY\"]\n", - " else:\n", - " export_wdt = wdt\n", - " wdtype_hls_str = export_wdt.get_hls_datatype_str()\n", - " self.code_gen_dict[\"$DOCOMPUTE$\"] = [\n", - " \"\"\"Matrix_Vector_Activate_Stream_Batch\n", - " (in0, out, weights, {}, numReps, {});\"\"\".format(\n", - " tmpl_args[\"TSrcI\"],\n", - " tmpl_args[\"TDstI\"],\n", - " tmpl_args[\"TWeightI\"],\n", - " wdtype_hls_str,\n", - " threshs,\n", - " map_to_hls_mult_style[self.get_nodeattr(\"resType\")],\n", - " )\n", - " ]\n", - "\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or \"external\",\n", - " currently no other parameter value is supported!\"\"\"\n", - " )\n", - "\n", - " def dataoutstrm(self):\n", - " code_gen_dir = self.get_nodeattr(\"code_gen_dir_cppsim\")\n", - " dtype = self.get_output_datatype()\n", - " if dtype == DataType[\"BIPOLAR\"]:\n", - " # use binary for bipolar storage\n", - " dtype = DataType[\"BINARY\"]\n", - " elem_bits = dtype.bitwidth()\n", - " packed_bits = self.get_outstream_width()\n", - " packed_hls_type = \"ap_uint<%d>\" % packed_bits\n", - " elem_hls_type = dtype.get_hls_datatype_str()\n", - " npy_type = \"float\"\n", - " npy_out = \"%s/output.npy\" % code_gen_dir\n", - " shape = self.get_folded_output_shape()\n", - " shape_cpp_str = str(shape).replace(\"(\", \"{\").replace(\")\", \"}\")\n", - "\n", - " # note: the innermost dim is not reversed for the output\n", - " self.code_gen_dict[\"$DATAOUTSTREAM$\"] = [\n", - " 'apintstream2npy<%s, %s, %d, %s>(out, %s, \"%s\", false);'\n", - " % (\n", - " packed_hls_type,\n", - " elem_hls_type,\n", - " elem_bits,\n", - " npy_type,\n", - " shape_cpp_str,\n", - " npy_out,\n", - " )\n", - " ]\n", - "\n", - " def save_as_npy(self):\n", - " self.code_gen_dict[\"$SAVEASCNPY$\"] = []\n", - "\n", - " def blackboxfunction(self):\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " if mem_mode == \"const\":\n", - " self.code_gen_dict[\"$BLACKBOXFUNCTION$\"] = [\n", - " \"\"\"void {}(hls::stream> &in0,\n", - " hls::stream> &out\n", - " )\"\"\".format(\n", - " self.onnx_node.name,\n", - " self.get_instream_width(),\n", - " self.get_outstream_width(),\n", - " )\n", - " ]\n", - " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " self.code_gen_dict[\"$BLACKBOXFUNCTION$\"] = [\n", - " \"\"\"void {}(\n", - " hls::stream> &in0,\n", - " hls::stream> &weights,\n", - " hls::stream> &out\n", - " )\"\"\".format(\n", - " self.onnx_node.name,\n", - " self.get_instream_width(),\n", - " self.get_weightstream_width(),\n", - " self.get_outstream_width(),\n", - " )\n", - " ]\n", - "\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Please set mem_mode to \"const\" or \"decoupled\", currently no other\n", - " parameter value is supported!\"\"\"\n", - " )\n", - "\n", - " def pragmas(self):\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " ram_style_thresholds = self.get_nodeattr(\"ram_style_thresholds\")\n", - " self.code_gen_dict[\"$PRAGMAS$\"] = [\n", - " \"#pragma HLS INTERFACE axis port=in0 name=in0_\" + self.hls_sname()\n", - " ]\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " \"#pragma HLS INTERFACE axis port=out name=out_\" + self.hls_sname()\n", - " )\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " \"#pragma HLS INTERFACE ap_ctrl_none port=return\"\n", - " )\n", - "\n", - " if mem_mode == \"const\":\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append('#include \"params.h\"')\n", - " # the weight tensor is ap_uint [PE][WMEM]\n", - " # partition for parallel access along the PE dimension (dim 1)\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " (\n", - " \"#pragma HLS ARRAY_PARTITION variable=weights.m_weights \"\n", - " \"complete dim=1\"\n", - " )\n", - " )\n", - " elif mem_mode == \"decoupled\" or mem_mode == \"external\":\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " \"#pragma HLS INTERFACE axis port=weights name=weights_\"\n", - " + self.hls_sname()\n", - " )\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " \"#pragma HLS stream depth=8 variable=weights\"\n", - " )\n", - "\n", - " else:\n", - " raise Exception(\n", - " \"\"\"Please set mem_mode to \"const\", \"decoupled\", or external,\n", - " currently no other parameter value is supported!\"\"\"\n", - " )\n", - "\n", - " # the threshold tensor is acc_type [PE][TMEM][N_THRES]\n", - " # partition for parallel access along PE and N_THRES\n", - " # dimensions (dims 1 and 3)\n", - " if self.calc_tmem() != 0:\n", - " # TODO find a better way of checking for no pregenerated thresholds\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " (\n", - " \"#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds \"\n", - " \"complete dim=1\"\n", - " )\n", - " )\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " (\n", - " \"#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds \"\n", - " \"complete dim=3\"\n", - " )\n", - " )\n", - " # add resource pragma for thresholds if set\n", - " if ram_style_thresholds == \"distributed\":\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " (\n", - " \"#pragma HLS RESOURCE variable=threshs.m_thresholds \"\n", - " \"core=ROM_2P_LUTRAM\"\n", - " )\n", - " )\n", - " elif ram_style_thresholds == \"block\":\n", - " self.code_gen_dict[\"$PRAGMAS$\"].append(\n", - " (\n", - " \"#pragma HLS RESOURCE variable=threshs.m_thresholds \"\n", - " \"core=ROM_2P_BRAM\"\n", - " )\n", - " )\n", - " elif ram_style_thresholds == \"auto\":\n", - " # no pragma needed\n", - " pass\n", - " else:\n", - " raise Exception(\n", - " \"Unrecognized ram_style_thresholds value:\" + ram_style_thresholds\n", - " )\n", - "\n", - " def code_generation_ipi(self):\n", - " cmd = []\n", - " # add streamer if needed\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " if mem_mode == \"decoupled\":\n", - " runtime_writable = self.get_nodeattr(\"runtime_writeable_weights\") == 1\n", - " if self.get_nodeattr(\"ram_style\") == \"ultra\":\n", - " assert (\n", - " runtime_writable == 1\n", - " ), \"Layer with URAM weights must have runtime_writeable_weights=1\"\n", - " node_name = self.onnx_node.name\n", - " sname = self.hls_sname()\n", - " # create a hierarchy for this layer, with the same port names\n", - " clk_name = self.get_verilog_top_module_intf_names()[\"clk\"][0]\n", - " rst_name = self.get_verilog_top_module_intf_names()[\"rst\"][0]\n", - " dout_name = self.get_verilog_top_module_intf_names()[\"m_axis\"][0][0]\n", - " din_name = self.get_verilog_top_module_intf_names()[\"s_axis\"][0][0]\n", - " cmd.append(\"create_bd_cell -type hier %s\" % node_name)\n", - " cmd.append(\"create_bd_pin -dir I -type clk /%s/%s\" % (node_name, clk_name))\n", - " cmd.append(\"create_bd_pin -dir I -type rst /%s/%s\" % (node_name, rst_name))\n", - " cmd.append(\n", - " \"create_bd_intf_pin -mode Master \"\n", - " \"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s\"\n", - " % (node_name, dout_name)\n", - " )\n", - " cmd.append(\n", - " \"create_bd_intf_pin -mode Slave \"\n", - " \"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s\" % (node_name, din_name)\n", - " )\n", - " # instantiate the hls ip\n", - " cmd.append(\n", - " \"create_bd_cell -type ip -vlnv %s /%s/%s\"\n", - " % (self.get_nodeattr(\"ip_vlnv\"), node_name, node_name)\n", - " )\n", - " # instantiate a streamer and connect it to the HLS IP\n", - " strm_vlnv = \"xilinx.com:user:memstream:1.0\"\n", - " strm_inst = node_name + \"_wstrm\"\n", - " cmd.append(\n", - " \"create_bd_cell -type ip -vlnv %s /%s/%s\"\n", - " % (strm_vlnv, node_name, strm_inst)\n", - " )\n", - " cmd.append(\n", - " \"set_property -dict [list \"\n", - " \"CONFIG.NSTREAMS {1} \"\n", - " \"CONFIG.MEM_DEPTH {%d} \"\n", - " \"CONFIG.MEM_WIDTH {%d} \"\n", - " \"CONFIG.MEM_INIT {%s} \"\n", - " \"CONFIG.RAM_STYLE {%s} \"\n", - " \"CONFIG.STRM0_DEPTH {%d} \"\n", - " \"CONFIG.STRM0_WIDTH {%d} \"\n", - " \"CONFIG.STRM0_OFFSET {0} \"\n", - " \"] [get_bd_cells /%s/%s]\"\n", - " % (\n", - " self.calc_wmem(),\n", - " self.get_weightstream_width_padded(),\n", - " self.get_nodeattr(\"code_gen_dir_ipgen\") + \"/\",\n", - " self.get_nodeattr(\"ram_style\"),\n", - " self.calc_wmem(),\n", - " self.get_weightstream_width_padded(),\n", - " node_name,\n", - " strm_inst,\n", - " )\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_intf_net [get_bd_intf_pins %s/%s/m_axis_0] \"\n", - " \"[get_bd_intf_pins %s/%s/weights_%s]\"\n", - " % (node_name, strm_inst, node_name, node_name, sname)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aresetn]\"\n", - " % (node_name, rst_name, node_name, strm_inst)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/aclk]\"\n", - " % (node_name, clk_name, node_name, strm_inst)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]\"\n", - " % (node_name, rst_name, node_name, node_name, rst_name)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_net [get_bd_pins %s/%s] [get_bd_pins %s/%s/%s]\"\n", - " % (node_name, clk_name, node_name, node_name, clk_name)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", - " \"[get_bd_intf_pins %s/%s/%s]\"\n", - " % (node_name, din_name, node_name, node_name, din_name)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", - " \"[get_bd_intf_pins %s/%s/%s]\"\n", - " % (node_name, dout_name, node_name, node_name, dout_name)\n", - " )\n", - " if runtime_writable:\n", - " # expose axi lite interface for writeable weights\n", - " axilite_name = self.get_verilog_top_module_intf_names()[\"axilite\"][0]\n", - " cmd.append(\n", - " \"create_bd_intf_pin -mode Slave \"\n", - " \"-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s\"\n", - " % (node_name, axilite_name)\n", - " )\n", - " cmd.append(\n", - " \"connect_bd_intf_net [get_bd_intf_pins %s/%s] \"\n", - " \"[get_bd_intf_pins %s/%s/%s]\"\n", - " % (node_name, axilite_name, node_name, strm_inst, axilite_name)\n", - " )\n", - " # TODO calculate and pass in segment size here\n", - " cmd.append(\"assign_bd_address\")\n", - " cmd.append(\"save_bd_design\")\n", - " elif mem_mode == \"const\" or mem_mode == \"external\":\n", - " # base class impl sufficient for const/external modes\n", - " return super().code_generation_ipi()\n", - " else:\n", - " raise Exception(\"Unrecognized mem_mode for MatrixVectorActivation\")\n", - " return cmd\n", - "\n", - " def get_verilog_top_module_intf_names(self):\n", - " intf_names = super().get_verilog_top_module_intf_names()\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " sname = self.hls_sname()\n", - " if mem_mode == \"external\":\n", - " intf_names[\"s_axis\"].append(\n", - " (\"weights_\" + sname, self.get_weightstream_width_padded())\n", - " )\n", - " if mem_mode == \"decoupled\":\n", - " # only expose axilite interface if attribute is set\n", - " runtime_writable = self.get_nodeattr(\"runtime_writeable_weights\") == 1\n", - " if runtime_writable:\n", - " intf_names[\"axilite\"] = [\"s_axilite\"]\n", - " return intf_names\n", - "\n", - " def get_op_and_param_counts(self):\n", - " in_features = self.get_nodeattr(\"MW\")\n", - " out_features = self.get_nodeattr(\"MH\")\n", - " weight_bits = self.get_weight_datatype().bitwidth()\n", - " inp_bits = self.get_input_datatype().bitwidth()\n", - " num_inp_vec = self.get_nodeattr(\"numInputVectors\")\n", - " num_repetitions = int(np.prod(num_inp_vec))\n", - " mac_count = in_features * out_features * num_repetitions\n", - " # cannonicalize op type: highest bitwidth operand first s.t.\n", - " # e.g. mac_8bx4b and mac_4bx8b don't appear as two different op types\n", - " bw1 = min(inp_bits, weight_bits)\n", - " bw2 = max(inp_bits, weight_bits)\n", - " mac_op_type = \"op_mac_%dbx%db\" % (bw1, bw2)\n", - " weight_param_type = \"param_weight_%db\" % (weight_bits)\n", - " weight_count = in_features * out_features\n", - " ret_dict = {mac_op_type: mac_count, weight_param_type: weight_count}\n", - " if self.get_nodeattr(\"noActivation\") == 0:\n", - " tdt = DataType[self.get_nodeattr(\"accDataType\")]\n", - " thres_bits = tdt.bitwidth()\n", - " thres_param_type = \"param_threshold_%db\" % (thres_bits)\n", - " thres_count = out_features\n", - " ret_dict[thres_param_type] = thres_count\n", - " return ret_dict\n", - "\n", - " def derive_characteristic_fxns(self, period):\n", - " n_inps = np.prod(self.get_folded_input_shape()[:-1])\n", - " io_dict = {\n", - " \"inputs\": {\n", - " \"in0\": [0 for i in range(n_inps)],\n", - " },\n", - " \"outputs\": {\"out\": []},\n", - " }\n", - " mem_mode = self.get_nodeattr(\"mem_mode\")\n", - " if mem_mode in [\"decoupled\", \"external\"]:\n", - " n_weight_inps = self.calc_wmem()\n", - " num_w_reps = np.prod(self.get_nodeattr(\"numInputVectors\"))\n", - " io_dict[\"inputs\"][\"weights\"] = [\n", - " 0 for i in range(num_w_reps * n_weight_inps)\n", - " ]\n", - " super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict)\n", - "\n" - ] + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA28AAAHWCAYAAADglbFoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABr3UlEQVR4nO3de3zO9f/H8ee1sRmzOc1mmTkVhhEVS0ORpZFCpcRESOQYWvV16uDQwSGhE+P7JYWiyGFOU4iS5SzkVMxktjnObO/fH267fi4bdnHNtYvH/Xa7bnW9P+/P+/P6XNf1uux1fT6f98dijDECAAAAAORrbs4OAAAAAABwfRRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8Abljjxo3VuHFjZ4fhUAcOHJDFYlFMTIyzQ3GIPXv2qFmzZvL19ZXFYtH8+fNvajyLxaJhw4Y5JLYrrV69WhaLRXPnzs2T8R0tL1+Lqzl27Jjatm2rkiVLymKxaNy4cbd0+7dCp06d5O3t7eww8j2LxaJevXrd0Lrly5dXp06dHBsQgFuC4g24DcXExMhisVz18csvv+R6rB07dmjYsGE6cOBA3gV8AyZNmnTbFFh5KSoqSlu3btW7776r//73v7rvvvucHRJuQr9+/bR06VJFR0frv//9rx577DFnh3TbO3v2rIYNG6bVq1c7OxQAUAFnBwAg74wYMUIVKlTI1l65cuVcj7Fjxw4NHz5cjRs3Vvny5W2WLVu27GZDvGGTJk1SqVKl+PX4Gs6dO6f169frzTffvOFf6JG/rFy5Uq1atdJrr73m7FDuGGfPntXw4cMl6bY70wCA66F4A25jzZs3z9MjLR4eHnk2Nm7e8ePHJUnFihVzbiBwmMTERIe+n+fPn5eHh4fc3DgRxxUYY3T+/Hl5eXk5O5Q8c/HiRWVmZvLvC3AVfFsDd7jZs2erbt26Klq0qHx8fFSzZk2NHz9e0qXTL59++mlJ0sMPP2w97TLr9KErr3nLumbpm2++0fDhw3XXXXepaNGiatu2rVJSUpSWlqa+ffuqdOnS8vb21osvvqi0tDSbeKZNm6ZHHnlEpUuXlqenp0JCQjR58mSbPuXLl9f27dsVFxdnjenyOJKTk9W3b18FBQXJ09NTlStX1ujRo5WZmWkzTnJysjp16iRfX18VK1ZMUVFRSk5OztXrlnVq6tq1a9W/f3/5+fmpSJEieuqpp6xF0+UmTZqk6tWry9PTU4GBgerZs2eut5WTzZs3q3nz5vLx8ZG3t7eaNGliczrssGHDFBwcLEkaOHCgLBZLtiOnVzp//ryGDRume+65R4UKFVKZMmXUunVr7du376ZiyZKcnKx+/fqpfPny8vT0VNmyZdWxY0f9+++/Vx07LS1NLVq0kK+vr9atW3fD8RtjVL58ebVq1SrH9Xx9fdW9e/ebfi3++ecfde7cWf7+/vL09FT16tU1derUbP0+/vhjVa9eXYULF1bx4sV13333adasWVcdN+vzZozRJ598Yv3cZ/nrr7/09NNPq0SJEipcuLDq16+vRYsW2YyRlZ+zZ8/WW2+9pbvuukuFCxdWamrqVbebmZmpcePGqXr16ipUqJD8/f3VvXt3nTx50qbfggULFBkZqcDAQHl6eqpSpUp6++23lZGRkW3MDRs26PHHH1fx4sVVpEgRhYaGWr9zrnwtn3zySXl7e8vPz0+vvfZajuNdqXz58mrRooV+/vlnPfDAAypUqJAqVqyoGTNmZOt7ve+KAwcOyM/PT5I0fPhw6+s+bNgwff/997JYLNqyZYt1vHnz5slisah169Y226lWrZqeffZZ6/OLFy/q7bffVqVKleTp6any5cvrjTfeyPZ9mLUvS5cu1X333ScvLy99+umnV933d955R25ubvr444+v+zpdLikpSa+99ppq1qwpb29v+fj4qHnz5vrjjz+sfU6fPq0iRYqoT58+2db/+++/5e7urpEjR1rbcvM9nHWN8QcffKBx48ZZX48dO3bYFT9wJ+HIG3AbS0lJyfaHscViUcmSJSVJsbGxeu6559SkSRONHj1akrRz506tXbtWffr0UcOGDdW7d29NmDBBb7zxhqpVqyZJ1v9ezciRI+Xl5aXXX39de/fu1ccff6yCBQvKzc1NJ0+e1LBhw/TLL78oJiZGFSpU0JAhQ6zrTp48WdWrV9cTTzyhAgUK6IcfftArr7yizMxM9ezZU5I0btw4vfrqq/L29tabb74pSfL395d06RSnRo0a6Z9//lH37t1Vrlw5rVu3TtHR0Tp69Kh1ggdjjFq1aqWff/5ZL7/8sqpVq6bvvvtOUVFRdr3Gr776qooXL66hQ4fqwIEDGjdunHr16qWvv/7a2mfYsGEaPny4mjZtqh49emj37t2aPHmyfv31V61du1YFCxa0a5vbt29XeHi4fHx8NGjQIBUsWFCffvqpGjdurLi4ONWrV0+tW7dWsWLF1K9fPz333HN6/PHHrzkJREZGhlq0aKEVK1aoXbt26tOnj06dOqXY2Fht27ZNlSpVuuFYpEt/+IWHh2vnzp3q3Lmz6tSpo3///Vfff/+9/v77b5UqVSrb2OfOnVOrVq3022+/afny5br//vtvKv4XXnhBY8aMUVJSkkqUKGFd94cfflBqaqpeeOGFm3otjh07pvr161snkvDz89PixYvVpUsXpaamqm/fvpKkzz//XL1791bbtm3Vp08fnT9/Xlu2bNGGDRv0/PPP5zh2w4YN9d///lcdOnTQo48+qo4dO9ps98EHH9TZs2fVu3dvlSxZUtOnT9cTTzyhuXPn6qmnnrIZ6+2335aHh4dee+01paWlXfMIR/fu3RUTE6MXX3xRvXv31v79+zVx4kRt3rzZ5rMbExMjb29v9e/fX97e3lq5cqWGDBmi1NRUvf/++9bxYmNj1aJFC5UpU0Z9+vRRQECAdu7cqYULF9oUBRkZGYqIiFC9evX0wQcfaPny5frwww9VqVIl9ejR46rxZtm7d6/atm2rLl26KCoqSlOnTlWnTp1Ut25dVa9eXVLuviv8/Pw0efJk9ejRQ0899ZS1KAsNDVXZsmVlsVi0Zs0ahYaGSpJ++uknubm56eeff7bGcvz4ce3atcvm1OWXXnpJ06dPV9u2bTVgwABt2LBBI0eO1M6dO/Xdd9/Z7Mvu3bv13HPPqXv37uratauqVKmS4z6/9dZbeu+99/Tpp5+qa9eu132NLvfXX39p/vz5evrpp1WhQgUdO3ZMn376qRo1aqQdO3YoMDBQ3t7eeuqpp/T111/ro48+kru7u3X9r776SsYYtW/fPtev7eWmTZum8+fPq1u3bvL09LTJTwBXMABuO9OmTTOScnx4enpa+/Xp08f4+PiYixcvXnWsOXPmGElm1apV2ZY1atTINGrUyPp81apVRpKpUaOGuXDhgrX9ueeeMxaLxTRv3txm/bCwMBMcHGzTdvbs2WzbiYiIMBUrVrRpq169us22s7z99tumSJEi5s8//7Rpf/311427u7s5dOiQMcaY+fPnG0lmzJgx1j4XL1404eHhRpKZNm1atrEvl/UaN23a1GRmZlrb+/XrZ9zd3U1ycrIxxpjExETj4eFhmjVrZjIyMqz9Jk6caCSZqVOnXnM7OXnyySeNh4eH2bdvn7XtyJEjpmjRoqZhw4bWtv379xtJ5v3337/umFOnTjWSzEcffZRt2eX7J8kMHTrU7liGDBliJJlvv/32quNnfX7mzJljTp06ZRo1amRKlSplNm/e7JD4d+/ebSSZyZMn2yx/4oknTPny5a39bvS16NKliylTpoz5999/bdZp166d8fX1tX62W7VqZapXr37dfcqJJNOzZ0+btr59+xpJ5qeffrK2nTp1ylSoUMGUL1/e+rnLen0rVqyYY55d6aeffjKSzMyZM23alyxZkq09p/G6d+9uChcubM6fP2+MuZRfFSpUMMHBwebkyZM2fS9/XaOioowkM2LECJs+9957r6lbt+514w4ODjaSzJo1a6xtiYmJxtPT0wwYMMDaltvviuPHj2d7r7NUr17dPPPMM9bnderUMU8//bSRZHbu3GmMMebbb781kswff/xhjDEmPj7eSDIvvfSSzVivvfaakWRWrlyZbV+WLFmSbduXfxYGDBhg3NzcTExMzHVfn6xxo6KirM/Pnz9v8/1kzKXvD09PT5v3YenSpUaSWbx4sU3f0NBQm+/j3L62Wd9RPj4+JjExMVexA3c6TpsEbmOffPKJYmNjbR6LFy+2Li9WrJjOnDmj2NhYh263Y8eONkeT6tWrJ2OMOnfubNOvXr16Onz4sC5evGhtu/xajqwjh40aNdJff/2llJSU6257zpw5Cg8PV/HixfXvv/9aH02bNlVGRobWrFkjSfrxxx9VoEABm1/x3d3d9eqrr9q1r926dbM5fS08PFwZGRk6ePCgJGn58uW6cOGC+vbta3NdUdeuXeXj45Pt1LbrycjI0LJly/Tkk0+qYsWK1vYyZcro+eef188//3zN0+CuZt68eSpVqlSO+3/5/t1oLPPmzVOtWrWyHQXKafyUlBQ1a9ZMu3bt0urVq1W7dm2HxH/PPfeoXr16mjlzpnVZUlKSFi9erPbt21v73chrYYzRvHnz1LJlSxljbD57ERERSklJ0e+//y7pUt79/fff+vXXX6+7X7nx448/6oEHHtBDDz1kbfP29la3bt104MCBbKegRUVF5eqaqTlz5sjX11ePPvqozf7UrVtX3t7eWrVqlbXv5eOdOnVK//77r8LDw3X27Fnt2rVL0qXTa/fv36++fftmu24vp9f15ZdftnkeHh6uv/7667pxS1JISIjCw8Otz/38/FSlShWb9XP7XXEt4eHh+umnn6z7/ccff6hbt24qVaqUtf2nn35SsWLFVKNGDUmX3i9J6t+/v81YAwYMkKRs3wkVKlRQREREjts3xqhXr14aP368/ve//9l95kAWT09P6/dTRkaGTpw4IW9vb1WpUsX6uZWkpk2bKjAw0CaHtm3bpi1btliPXEv2v7Zt2rSxnp4K4No4bRK4jT3wwAPXnLDklVde0TfffKPmzZvrrrvuUrNmzfTMM8/c9PTj5cqVs3nu6+srSQoKCsrWnpmZqZSUFOupnGvXrtXQoUO1fv16nT171qZ/SkqKdayr2bNnj7Zs2XLVPwQSExMlSQcPHlSZMmWynUp4tVOSrubKfS1evLgkWa8JyirirhzXw8NDFStWtC7PrePHj+vs2bM5xlmtWjVlZmbq8OHD1lPDcmvfvn2qUqWKChTI/T8L9sSyb98+tWnTJlfj9u3bV+fPn9fmzZtzvR+5jb9jx47q1auXDh48qODgYM2ZM0fp6enq0KGD3WNd7vjx40pOTtZnn32mzz77LMc+WZ+9wYMHa/ny5XrggQdUuXJlNWvWTM8//7waNGiQ6+1d7uDBg9bTUy+XdXrzwYMHrYWDpBxnoM3Jnj17lJKSotKlS+e4PGt/pEunz7711ltauXJlth8Psn50ybpe8PJYrqZQoULZcrh48eLZrrW7mivzMqf1c/tdcS3h4eGaMmWK9u7dq3379slisSgsLMxa1HXt2lU//fSTGjRoYC2ODh48KDc3t2yz/gYEBKhYsWLZvhOu9X7NmDFDp0+f1uTJk/Xcc89dN96ryczM1Pjx4zVp0iTt37/f5trCrO9mSXJzc1P79u01efJknT17VoULF9bMmTNVqFAh6/XRkv2vbW4/kwAo3oA7WunSpRUfH6+lS5dq8eLFWrx4saZNm6aOHTtq+vTpNzzu5ddC5KbdGCPp0h93TZo0UdWqVfXRRx8pKChIHh4e+vHHHzV27NhsE47kJDMzU48++qgGDRqU4/J77rknl3uRO9fbJ9ivVatWmj17tkaNGqUZM2Y4dCbEdu3aqV+/fpo5c6beeOMN/e9//9N9991nd9F+pazP5gsvvHDVox9Z10VVq1ZNu3fv1sKFC7VkyRLNmzdPkyZN0pAhQ6xT0uel3M5UmJmZqdKlS9scZblc1h/mycnJatSokXx8fDRixAhVqlRJhQoV0u+//67BgwfnKm+vdLW8utn1L89LR3xXZB3tXLNmjf766y/VqVNHRYoUUXh4uCZMmKDTp09r8+bNevfdd7Ote7WjuFe61vvVoEEDxcfHa+LEiXrmmWdu+Fqx9957T//5z3/UuXNnvf322ypRooTc3NzUt2/fbO9fx44d9f7772v+/Pl67rnnNGvWLOukQlnsfW1v59kzAUejeAPucB4eHmrZsqVatmypzMxMvfLKK/r000/1n//8R5UrV871HxiO8MMPPygtLU3ff/+9zS/nl5+eleVqcVWqVEmnT59W06ZNr7mt4OBgrVixQqdPn7Y5+rZ79+4bjP7q28ka9/JTCy9cuKD9+/dfN84r+fn5qXDhwjnGuWvXLrm5uWU7wpkblSpV0oYNG5Senp7rCVTsiaVSpUratm1brsZ98skn1axZM3Xq1ElFixbNNtvozcRfokQJRUZGaubMmWrfvr3Wrl2bbfKEG30tihYtqoyMjFy9p0WKFNGzzz6rZ599VhcuXFDr1q317rvvKjo6WoUKFcrVNrMEBwdf9T3IWn4jKlWqpOXLl6tBgwbX/ON69erVOnHihL799ls1bNjQ2r5///5s40mXTrOz93OfF3L7XXGt78By5cqpXLly+umnn/TXX39ZT9Vs2LCh+vfvrzlz5igjI8PmdQkODlZmZqb27NljM/nTsWPHlJycbNf7VblyZY0ZM0aNGzfWY489phUrVqho0aK5Xj/L3Llz9fDDD+vLL7+0aU9OTs42mVCNGjV07733aubMmSpbtqwOHTqUbXbL3L62AOzHNW/AHezEiRM2z93c3KxHB7KmrC5SpIgk3dS09rmV9Wv55b+Op6SkaNq0adn6FilSJMeYnnnmGa1fv15Lly7Ntiw5Odl6fd3jjz+uixcv2hQGGRkZdk+xfT1NmzaVh4eHJkyYYLNfX375pVJSUhQZGWltO3TokPUP7qtxd3dXs2bNtGDBAh04cMDafuzYMc2aNUsPPfSQfHx87I6zTZs2+vfffzVx4sRsy652FNGeWNq0aaM//vgj20x6Vxu/Y8eOmjBhgqZMmaLBgwc7NP4OHTpox44dGjhwoNzd3dWuXbsbHiuLu7u72rRpo3nz5uVYpF5++4gr887Dw0MhISEyxig9Pf3qO3kVjz/+uDZu3Kj169db286cOaPPPvtM5cuXV0hIiN1jSpdyKSMjQ2+//Xa2ZRcvXrTmX055e+HCBU2aNMlmnTp16qhChQoaN25cttx1xpHq3H5XFC5c2NqWk/DwcK1cuVIbN260Fm+1a9dW0aJFNWrUKHl5ealu3brW/o8//rgkZfvR4KOPPpIkm++E3AgNDdWPP/6onTt3qmXLljp37pxd60uX3sMr34M5c+bon3/+ybF/hw4dtGzZMo0bN04lS5ZU8+bNbZbn9rUFYD+OvAG3scWLF+dYDDz44IOqWLGiXnrpJSUlJemRRx5R2bJldfDgQX388ceqXbu29Rfh2rVry93dXaNHj1ZKSoo8PT2t92FztGbNmlmPBHbv3l2nT5/W559/rtKlS+vo0aM2fevWravJkyfrnXfeUeXKlVW6dGk98sgjGjhwoL7//nu1aNHCOjX4mTNntHXrVs2dO1cHDhxQqVKl1LJlSzVo0ECvv/66Dhw4oJCQEH377be5mhTFHn5+foqOjtbw4cP12GOP6YknntDu3bs1adIk3X///TYX+Xfs2FFxcXHX/UP2nXfeUWxsrB566CG98sorKlCggD799FOlpaVpzJgxNxRnx44dNWPGDPXv39/6R+iZM2e0fPlyvfLKKzneH82eWAYOHKi5c+fq6aefVufOnVW3bl0lJSXp+++/15QpU1SrVq1sY/fq1Uupqal688035evrqzfeeMMh8UdGRqpkyZKaM2eOmjdvnu2zfKOvxahRo7Rq1SrVq1dPXbt2VUhIiJKSkvT7779r+fLlSkpKknTpcx4QEKAGDRrI399fO3fu1MSJExUZGXlDR01ef/11ffXVV2revLl69+6tEiVKaPr06dq/f7/mzZt3w6edNmrUSN27d9fIkSMVHx+vZs2aqWDBgtqzZ4/mzJmj8ePHq23btnrwwQdVvHhxRUVFqXfv3rJYLPrvf/+b7XPs5uamyZMnq2XLlqpdu7ZefPFFlSlTRrt27dL27dtz/EM/L+X2u8LLy0shISH6+uuvdc8996hEiRKqUaOG9dq98PBwzZw5UxaLxXoapbu7ux588EEtXbpUjRs3trkdQ61atRQVFaXPPvvMesrpxo0bNX36dD355JN6+OGH7d6X+vXra8GCBXr88cfVtm1bzZ8/365bkLRo0UIjRozQiy++qAcffFBbt27VzJkzbc4WuNzzzz+vQYMG6bvvvlOPHj2ybSu3ry2AG3CLZ7cEcAtc61YBumwa/Llz55pmzZqZ0qVLGw8PD1OuXDnTvXt3c/ToUZvxPv/8c1OxYkXj7u5uc9uAq90qYM6cOTnG8+uvv9q0Dx061Egyx48ft7Z9//33JjQ01BQqVMiUL1/ejB492jp1+/79+639EhISTGRkpClatKiRZBPHqVOnTHR0tKlcubLx8PAwpUqVMg8++KD54IMPbG5hcOLECdOhQwfj4+NjfH19TYcOHczmzZvtulXAlfuU9RpceWuFiRMnmqpVq5qCBQsaf39/06NHj2zTpTdq1Mjk9mv5999/NxEREcbb29sULlzYPPzww2bdunU2fey5VYAxl6Z7f/PNN02FChVMwYIFTUBAgGnbtq3NbQCUw5TpuYnFmEuvd69evcxdd91lPDw8TNmyZU1UVJR1av2rfX4GDRpkJJmJEyfedPxZXnnlFSPJzJo1y6GvxbFjx0zPnj1NUFCQdb0mTZqYzz77zNrn008/NQ0bNjQlS5Y0np6eplKlSmbgwIEmJSXlmvuXtc0rbxVgjDH79u0zbdu2NcWKFTOFChUyDzzwgFm4cKFNn6u9vtfz2Wefmbp16xovLy9TtGhRU7NmTTNo0CBz5MgRa5+1a9ea+vXrGy8vLxMYGGgGDRpknVb+ylz4+eefzaOPPmqKFi1qihQpYkJDQ83HH39sXR4VFWWKFCmSLY6s74vrCQ4ONpGRkdnar/y+Mib33xXr1q0zdevWNR4eHtne9+3btxtJplq1ajZjv/POO0aS+c9//pMtlvT0dDN8+HDr5ysoKMhER0dbb6twvX0xJufPwoIFC0yBAgXMs88+m23q/yvHvfJWAQMGDDBlypQxXl5epkGDBmb9+vU5vmZZHn/8cSMpx1w3Jnevrb3fUQCMsRjDVfUAgDtLv3799OWXXyohIcF6WhyA3Hvqqae0detW7d2719mhAHcUrnkDANxRzp8/r//9739q06YNhRtwA44ePapFixbZ3GIDwK3BNW8AgDtCYmKili9frrlz5+rEiRPq06ePs0MCXMr+/fu1du1affHFFypYsKC6d+/u7JCAOw7FGwDgjrBjxw61b99epUuX1oQJE1S7dm1nhwS4lLi4OL344osqV66cpk+froCAAGeHBNxxuOYNAAAAAFwA17wBAAAAgAugeAMAAAAAF8A1b7mQmZmpI0eOqGjRorJYLM4OBwAAAICTGGN06tQpBQYGys3t1h4Lo3jLhSNHjigoKMjZYQAAAADIJw4fPqyyZcve0m1SvOVC0aJFJV16g3x8fJwcDQAAAABnSU1NVVBQkLVGuJUo3nIh61RJHx8fijcAAAAATrmciglLAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABBZwdAADkBxaLsyNwLmOcHQFuR+SVsyMAcLvhyBsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAXkm+Jt1KhRslgs6tu3r7Xt/Pnz6tmzp0qWLClvb2+1adNGx44ds1nv0KFDioyMVOHChVW6dGkNHDhQFy9etOmzevVq1alTR56enqpcubJiYmJuwR4BAAAAgOPki+Lt119/1aeffqrQ0FCb9n79+umHH37QnDlzFBcXpyNHjqh169bW5RkZGYqMjNSFCxe0bt06TZ8+XTExMRoyZIi1z/79+xUZGamHH35Y8fHx6tu3r1566SUtXbr0lu0fAAAAANwsizHOncj29OnTqlOnjiZNmqR33nlHtWvX1rhx45SSkiI/Pz/NmjVLbdu2lSTt2rVL1apV0/r161W/fn0tXrxYLVq00JEjR+Tv7y9JmjJligYPHqzjx4/Lw8NDgwcP1qJFi7Rt2zbrNtu1a6fk5GQtWbIkVzGmpqbK19dXKSkp8vHxcfyLAMDpmNLc2RHgdkReOTsCAHnBmbWB04+89ezZU5GRkWratKlN+6ZNm5Senm7TXrVqVZUrV07r16+XJK1fv141a9a0Fm6SFBERodTUVG3fvt3a58qxIyIirGPkJC0tTampqTYPAAAAAHAmp96ke/bs2fr999/166+/ZluWkJAgDw8PFStWzKbd399fCQkJ1j6XF25Zy7OWXatPamqqzp07Jy8vr2zbHjlypIYPH37D+wUAAAAAjua0I2+HDx9Wnz59NHPmTBUqVMhZYeQoOjpaKSkp1sfhw4edHRIAAACAO5zTirdNmzYpMTFRderUUYECBVSgQAHFxcVpwoQJKlCggPz9/XXhwgUlJyfbrHfs2DEFBARIkgICArLNPpn1/Hp9fHx8cjzqJkmenp7y8fGxeQAAAACAMzmteGvSpIm2bt2q+Ph46+O+++5T+/btrf9fsGBBrVixwrrO7t27dejQIYWFhUmSwsLCtHXrViUmJlr7xMbGysfHRyEhIdY+l4+R1SdrDAAAAABwBU675q1o0aKqUaOGTVuRIkVUsmRJa3uXLl3Uv39/lShRQj4+Pnr11VcVFham+vXrS5KaNWumkJAQdejQQWPGjFFCQoLeeust9ezZU56enpKkl19+WRMnTtSgQYPUuXNnrVy5Ut98840WLVp0a3cYAAAAAG6CUycsuZ6xY8fKzc1Nbdq0UVpamiIiIjRp0iTrcnd3dy1cuFA9evRQWFiYihQpoqioKI0YMcLap0KFClq0aJH69eun8ePHq2zZsvriiy8UERHhjF0CAAAAgBvi9Pu8uQLu8wbc/rgflbMjwO2IvHJ2BADywh19nzcAAAAAwPVRvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHAB+fom3bg67p3j7AgAAACAW4sjbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFOLV4mzx5skJDQ+Xj4yMfHx+FhYVp8eLF1uWNGzeWxWKxebz88ss2Yxw6dEiRkZEqXLiwSpcurYEDB+rixYs2fVavXq06derI09NTlStXVkxMzK3YPQAAAABwmALO3HjZsmU1atQo3X333TLGaPr06WrVqpU2b96s6tWrS5K6du2qESNGWNcpXLiw9f8zMjIUGRmpgIAArVu3TkePHlXHjh1VsGBBvffee5Kk/fv3KzIyUi+//LJmzpypFStW6KWXXlKZMmUUERFxa3cYAAAAAG6QxRhjnB3E5UqUKKH3339fXbp0UePGjVW7dm2NGzcux76LFy9WixYtdOTIEfn7+0uSpkyZosGDB+v48ePy8PDQ4MGDtWjRIm3bts26Xrt27ZScnKwlS5bkOG5aWprS0tKsz1NTUxUUFKSUlBT5+Pg4bmdvgsXi7AicK399anE7IKecHQFuR+SVsyMAkBdSU1Pl6+vrlNog31zzlpGRodmzZ+vMmTMKCwuzts+cOVOlSpVSjRo1FB0drbNnz1qXrV+/XjVr1rQWbpIUERGh1NRUbd++3dqnadOmNtuKiIjQ+vXrrxrLyJEj5evra30EBQU5ajcBAAAA4IY49bRJSdq6davCwsJ0/vx5eXt767vvvlNISIgk6fnnn1dwcLACAwO1ZcsWDR48WLt379a3334rSUpISLAp3CRZnyckJFyzT2pqqs6dOycvL69sMUVHR6t///7W51lH3gAAAADAWZxevFWpUkXx8fFKSUnR3LlzFRUVpbi4OIWEhKhbt27WfjVr1lSZMmXUpEkT7du3T5UqVcqzmDw9PeXp6Zln4wMAAACAvZx+2qSHh4cqV66sunXrauTIkapVq5bGjx+fY9969epJkvbu3StJCggI0LFjx2z6ZD0PCAi4Zh8fH58cj7oBAAAAQH7k9OLtSpmZmTaThVwuPj5eklSmTBlJUlhYmLZu3arExERrn9jYWPn4+FhPvQwLC9OKFStsxomNjbW5rg4AAAAA8junnjYZHR2t5s2bq1y5cjp16pRmzZql1atXa+nSpdq3b59mzZqlxx9/XCVLltSWLVvUr18/NWzYUKGhoZKkZs2aKSQkRB06dNCYMWOUkJCgt956Sz179rSe9vjyyy9r4sSJGjRokDp37qyVK1fqm2++0aJFi5y56wAAAABgF6cWb4mJierYsaOOHj0qX19fhYaGaunSpXr00Ud1+PBhLV++XOPGjdOZM2cUFBSkNm3a6K233rKu7+7uroULF6pHjx4KCwtTkSJFFBUVZXNfuAoVKmjRokXq16+fxo8fr7Jly+qLL77gHm8AAAAAXEq+u89bfuTMezlcDffOcXYEuN2QU86OALcj8srZEQDIC9znDQAAAABwTRRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABTi3eJk+erNDQUPn4+MjHx0dhYWFavHixdfn58+fVs2dPlSxZUt7e3mrTpo2OHTtmM8ahQ4cUGRmpwoULq3Tp0ho4cKAuXrxo02f16tWqU6eOPD09VblyZcXExNyK3QMAAAAAh7G7eDt37pzOnj1rfX7w4EGNGzdOy5Yts3vjZcuW1ahRo7Rp0yb99ttveuSRR9SqVStt375dktSvXz/98MMPmjNnjuLi4nTkyBG1bt3aun5GRoYiIyN14cIFrVu3TtOnT1dMTIyGDBli7bN//35FRkbq4YcfVnx8vPr27auXXnpJS5cutTteAAAAAHAWizHG2LNCs2bN1Lp1a7388stKTk5W1apVVbBgQf3777/66KOP1KNHj5sKqESJEnr//ffVtm1b+fn5adasWWrbtq0kadeuXapWrZrWr1+v+vXra/HixWrRooWOHDkif39/SdKUKVM0ePBgHT9+XB4eHho8eLAWLVqkbdu2WbfRrl07JScna8mSJbmKKTU1Vb6+vkpJSZGPj89N7Z+jWCzOjsC57PvUAtdHTjk7AtyOyCtnRwAgLzizNrD7yNvvv/+u8PBwSdLcuXPl7++vgwcPasaMGZowYcINB5KRkaHZs2frzJkzCgsL06ZNm5Senq6mTZta+1StWlXlypXT+vXrJUnr169XzZo1rYWbJEVERCg1NdV69G79+vU2Y2T1yRojJ2lpaUpNTbV5AAAAAIAz2V28nT17VkWLFpUkLVu2TK1bt5abm5vq16+vgwcP2h3A1q1b5e3tLU9PT7388sv67rvvFBISooSEBHl4eKhYsWI2/f39/ZWQkCBJSkhIsCncspZnLbtWn9TUVJ07dy7HmEaOHClfX1/rIygoyO79AgAAAABHsrt4q1y5subPn6/Dhw9r6dKlatasmSQpMTHxhg4bVqlSRfHx8dqwYYN69OihqKgo7dixw+5xHCk6OlopKSnWx+HDh50aDwAAAADYXbwNGTJEr732msqXL68HHnhAYWFhki4dhbv33nvtDsDDw0OVK1dW3bp1NXLkSNWqVUvjx49XQECALly4oOTkZJv+x44dU0BAgCQpICAg2+yTWc+v18fHx0deXl45xuTp6WmdATPrAQAAAADOZHfx1rZtWx06dEi//fabzYyNTZo00dixY286oMzMTKWlpalu3boqWLCgVqxYYV22e/duHTp0yFowhoWFaevWrUpMTLT2iY2NlY+Pj0JCQqx9Lh8jq0/WGAAAAADgCgrcyEoBAQE6ffq0YmNj1bBhQ3l5een++++Xxc5ppaKjo9W8eXOVK1dOp06d0qxZs7R69WotXbpUvr6+6tKli/r3768SJUrIx8dHr776qsLCwlS/fn1Jl2a+DAkJUYcOHTRmzBglJCTorbfeUs+ePeXp6SlJevnllzVx4kQNGjRInTt31sqVK/XNN99o0aJFN7LrAAAAAOAUdhdvJ06c0DPPPKNVq1bJYrFoz549qlixorp06aLixYvrww8/zPVYiYmJ6tixo44ePSpfX1+FhoZq6dKlevTRRyVJY8eOlZubm9q0aaO0tDRFRERo0qRJ1vXd3d21cOFC9ejRQ2FhYSpSpIiioqI0YsQIa58KFSpo0aJF6tevn8aPH6+yZcvqiy++UEREhL27DgAAAABOY/d93jp27KjExER98cUXqlatmv744w9VrFhRS5cuVf/+/a1T9N9OuM9b/sO9c+Bo5JSzI8DtiLxydgQA8oIzawO7j7wtW7ZMS5cuVdmyZW3a77777hu6VQAAAAAA4PrsnrDkzJkzKly4cLb2pKQk63VmAAAAAADHsrt4Cw8P14wZM6zPLRaLMjMzNWbMGD388MMODQ4AAAAAcIndp02OGTNGTZo00W+//aYLFy5o0KBB2r59u5KSkrR27dq8iBEAAAAA7nh2H3mrUaOG/vzzTz300ENq1aqVzpw5o9atW2vz5s2qVKlSXsQIAAAAAHc8u2ebvBMx22T+w6cWjkZOOTsC3I7IK2dHACAv5PvZJrds2ZLrAUNDQ284GAAAAABAznJVvNWuXVsWi0XXO0hnsViUkZHhkMAAAAAAAP8vV8Xb/v378zoOAAAAAMA15Kp4Cw4Ozus4AAAAAADXYPdskyNHjtTUqVOztU+dOlWjR492SFAAAAAAAFt2F2+ffvqpqlatmq29evXqmjJlikOCAgAAAADYsrt4S0hIUJkyZbK1+/n56ejRow4JCgAAAABgy+7iLSgoSGvXrs3WvnbtWgUGBjokKAAAAACArVxNWHK5rl27qm/fvkpPT9cjjzwiSVqxYoUGDRqkAQMGODxAAAAAAMANFG8DBw7UiRMn9Morr+jChQuSpEKFCmnw4MGKjo52eIAAAAAAAMlirnfn7as4ffq0du7cKS8vL919993y9PR0dGz5Rmpqqnx9fZWSkiIfHx9nhyNJslicHYFz3dinFrg6csrZEeB2RF45OwIAecGZtYHd17xNmzZN586dk7e3t+6//37VqFHjti7cAAAAACA/sLt4e/311+Xv768uXbpo3bp1eRETAAAAAOAKdhdv//zzj6ZPn65///1XjRs3VtWqVTV69GglJCTkRXwAAAAAAN1A8VagQAE99dRTWrBggQ4fPqyuXbtq5syZKleunJ544gktWLBAmZmZeRErAAAAANyx7C7eLufv76+HHnpIYWFhcnNz09atWxUVFaVKlSpp9erVDgoRAAAAAHBDxduxY8f0wQcfqHr16mrcuLFSU1O1cOFC7d+/X//884+eeeYZRUVFOTpWAAAAALhj2X2rgJYtW2rp0qW655579NJLL6ljx44qUaKETZ/ExEQFBATcNqdPcquA/Ifpl+Fo5JSzI8DtiLxydgQA8oIzawO7b9JdunRpxcXFKSws7Kp9/Pz8tH///psKDAAAAADw/274Jt13Eo685T98auFo5JSzI8DtiLxydgQA8oJL3aS7d+/emjBhQrb2iRMnqm/fvo6ICQAAAABwBbuLt3nz5qlBgwbZ2h988EHNnTvXIUEBAAAAAGzZXbydOHFCvr6+2dp9fHz077//OiQoAAAAAIAtu4u3ypUra8mSJdnaFy9erIoVKzokKAAAAACALbtnm+zfv7969eql48eP65FHHpEkrVixQh9++KHGjRvn6PgAAAAAALqB4q1z585KS0vTu+++q7fffluSVL58eU2ePFkdO3Z0eIAAAAAAgJu8VcDx48fl5eUlb29vR8aU73CrgPyH6ZfhaOSUsyPA7Yi8cnYEAPKCS92k+3J+fn6OigMAAAAAcA12T1jiSCNHjtT999+vokWLqnTp0nryySe1e/dumz6NGzeWxWKxebz88ss2fQ4dOqTIyEgVLlxYpUuX1sCBA3Xx4kWbPqtXr1adOnXk6empypUrKyYmJq93DwAAAAAcxqnFW1xcnHr27KlffvlFsbGxSk9PV7NmzXTmzBmbfl27dtXRo0etjzFjxliXZWRkKDIyUhcuXNC6des0ffp0xcTEaMiQIdY++/fvV2RkpB5++GHFx8erb9++eumll7R06dJbtq8AAAAAcDNu6po3Rzt+/LhKly6tuLg4NWzYUNKlI2+1a9e+6kyWixcvVosWLXTkyBH5+/tLkqZMmaLBgwfr+PHj8vDw0ODBg7Vo0SJt27bNul67du2UnJyc420PrsQ1b/lP/vnU4nZBTjk7AtyOyCtnRwAgLzizNrD7yNvff/991WW//PLLTQWTkpIiSSpRooRN+8yZM1WqVCnVqFFD0dHROnv2rHXZ+vXrVbNmTWvhJkkRERFKTU3V9u3brX2aNm1qM2ZERITWr1+fYxxpaWlKTU21eQAAAACAM9ldvDVr1kxJSUnZ2teuXavHHnvshgPJzMxU37591aBBA9WoUcPa/vzzz+t///ufVq1apejoaP33v//VCy+8YF2ekJBgU7hJsj5PSEi4Zp/U1FSdO3cuWywjR46Ur6+v9REUFHTD+wUAAAAAjmD3bJP169dXs2bNtGrVKhUtWlSStGbNGrVs2VLDhg274UB69uypbdu26eeff7Zp79atm/X/a9asqTJlyqhJkybat2+fKlWqdMPbu5bo6Gj179/f+jw1NZUCDgAAAIBT2X3k7YsvvlC5cuXUsmVLpaWladWqVYqMjNSIESPUr1+/GwqiV69eWrhwoVatWqWyZctes2+9evUkSXv37pUkBQQE6NixYzZ9sp4HBARcs4+Pj4+8vLyybcPT01M+Pj42DwAAAABwJruLNzc3N82ePVsFCxbUI488oieeeEIjR45Unz597N64MUa9evXSd999p5UrV6pChQrXXSc+Pl6SVKZMGUlSWFiYtm7dqsTERGuf2NhY+fj4KCQkxNpnxYoVNuPExsYqLCzM7pgBAAAAwBlyNdvkli1bsrWdOnVKzz33nCIjI9WjRw9re2hoaK43/sorr2jWrFlasGCBqlSpYm339fWVl5eX9u3bp1mzZunxxx9XyZIltWXLFvXr109ly5ZVXFycpEu3Cqhdu7YCAwM1ZswYJSQkqEOHDnrppZf03nvvSbp0q4AaNWqoZ8+e6ty5s1auXKnevXtr0aJFioiIuG6czDaZ/zCDFxyNnHJ2BLgdkVfOjgBAXnBmbZCr4s3NzU0Wi0WXd738edb/WywWZWRk5H7jV/lWnzZtmjp16qTDhw/rhRde0LZt23TmzBkFBQXpqaee0ltvvWXzQh08eFA9evTQ6tWrVaRIEUVFRWnUqFEqUOD/L+lbvXq1+vXrpx07dqhs2bL6z3/+o06dOuUqToq3/Id/EOFo5JSzI8DtiLxydgQA8kK+L94OHjyY6wGDg4NvKqD8iOIt/+EfRDgaOeXsCHA7Iq+cHQGAvODM2iBXs03ejgUZAAAAALgSuycsGTlypKZOnZqtferUqRo9erRDggIAAAAA2LK7ePv0009VtWrVbO3Vq1fXlClTHBIUAAAAAMCW3cVbQkKCdZr+y/n5+eno0aMOCQoAAAAAYMvu4i0oKEhr167N1r527VoFBgY6JCgAAAAAgK1cTVhyua5du6pv375KT0/XI488IklasWKFBg0apAEDBjg8QAAAAADADRRvAwcO1IkTJ/TKK6/owoULkqRChQpp8ODBio6OdniAAAAAAIBc3uctJ6dPn9bOnTvl5eWlu+++W56eno6OLd/gPm/5D/fOgaORU86OALcj8srZEQDIC/n+Pm858fb2tk5ccjsXbgAAAACQH9g9YUlmZqZGjBghX19fBQcHKzg4WMWKFdPbb7+tzMzMvIgRAAAAAO54dh95e/PNN/Xll19q1KhRatCggSTp559/1rBhw3T+/Hm9++67Dg8SAAAAAO50dl/zFhgYqClTpuiJJ56waV+wYIFeeeUV/fPPPw4NMD/gmrf8h+sI4GjklLMjwO2IvHJ2BADygjNrA7tPm0xKSlLVqlWztVetWlVJSUkOCQoAAAAAYMvu4q1WrVqaOHFitvaJEyeqVq1aDgkKAAAAAGDL7mvexowZo8jISC1fvlxhYWGSpPXr1+vw4cP68ccfHR4gAAAAAOAGjrw1atRIf/75p5566iklJycrOTlZrVu31u7duxUeHp4XMQIAAADAHe+Gb9J9J2HCkvyHTy0cjZxydgS4HZFXzo4AQF7I9zfp3rJlS64HDA0NveFgAAAAAAA5y1XxVrt2bVksFl3vIJ3FYlFGRoZDAgMAAAAA/L9cFW/79+/P6zgAAAAAANeQq+ItODg4r+MAAAAAAFyD3bNNjhw5UlOnTs3WPnXqVI0ePdohQQEAAAAAbNldvH366aeqWrVqtvbq1atrypQpDgkKAAAAAGDL7uItISFBZcqUydbu5+eno0ePOiQoAAAAAIAtu4u3oKAgrV27Nlv72rVrFRgY6JCgAAAAAAC2cjVhyeW6du2qvn37Kj09XY888ogkacWKFRo0aJAGDBjg8AABAAAAADdQvA0cOFAnTpzQK6+8ogsXLkiSChUqpMGDBys6OtrhAQIAAAAAJIu53p23r+L06dPauXOnvLy8dPfdd8vT09PRseUbqamp8vX1VUpKinx8fJwdjiTJYnF2BM51Y59a4OrIKWdHgNsReeXsCADkBWfWBnYfecvi7e2t+++/35GxAAAAAACuwu4JSwAAAAAAtx7FGwAAAAC4AIo3AAAAAHABuSre6tSpo5MnT0qSRowYobNnz+ZpUAAAAAAAW7kq3nbu3KkzZ85IkoYPH67Tp0/naVAAAAAAAFu5Kt5q166tF198UcOHD5cxRh988IFGjBiR48MeI0eO1P3336+iRYuqdOnSevLJJ7V7926bPufPn1fPnj1VsmRJeXt7q02bNjp27JhNn0OHDikyMlKFCxdW6dKlNXDgQF28eNGmz+rVq1WnTh15enqqcuXKiomJsStWAAAAAHCmXN0qICYmRkOHDtXChQtlsVi0ePFiFSiQfVWLxaIhQ4bkeuNxcXHq2bOn7r//fl28eFFvvPGGmjVrph07dqhIkSKSpH79+mnRokWaM2eOfH191atXL7Vu3Vpr166VJGVkZCgyMlIBAQFat26djh49qo4dO6pgwYJ67733JEn79+9XZGSkXn75Zc2cOVMrVqzQSy+9pDJlyigiIiLX8QIAAACAs9h9k243NzclJCSodOnSDg/m+PHjKl26tOLi4tSwYUOlpKTIz89Ps2bNUtu2bSVJu3btUrVq1bR+/XrVr19fixcvVosWLXTkyBH5+/tLkqZMmaLBgwfr+PHj8vDw0ODBg7Vo0SJt27bNuq127dopOTlZS5YsuW5c3KQ7/+HGp3A0csrZEeB2RF45OwIAecGZtYHds01mZmbmSeEmSSkpKZKkEiVKSJI2bdqk9PR0NW3a1NqnatWqKleunNavXy9JWr9+vWrWrGkt3CQpIiJCqamp2r59u7XP5WNk9cka40ppaWlKTU21eQAAAACAM93QrQL27dunV199VU2bNlXTpk3Vu3dv7du376YCyczMVN++fdWgQQPVqFFDkpSQkCAPDw8VK1bMpq+/v78SEhKsfS4v3LKWZy27Vp/U1FSdO3cuWywjR46Ur6+v9REUFHRT+wYAAAAAN8vu4m3p0qUKCQnRxo0bFRoaqtDQUG3YsEHVq1dXbGzsDQfSs2dPbdu2TbNnz77hMRwlOjpaKSkp1sfhw4edHRIAAACAO1yuJiy53Ouvv65+/fpp1KhR2doHDx6sRx991O4gevXqpYULF2rNmjUqW7astT0gIEAXLlxQcnKyzdG3Y8eOKSAgwNpn48aNNuNlzUZ5eZ8rZ6g8duyYfHx85OXllS0eT09PeXp62r0fAAAAAJBX7D7ytnPnTnXp0iVbe+fOnbVjxw67xjLGqFevXvruu++0cuVKVahQwWZ53bp1VbBgQa1YscLatnv3bh06dEhhYWGSpLCwMG3dulWJiYnWPrGxsfLx8VFISIi1z+VjZPXJGgMAAAAA8ju7izc/Pz/Fx8dna4+Pj7d7IpOePXvqf//7n2bNmqWiRYsqISFBCQkJ1uvQfH191aVLF/Xv31+rVq3Spk2b9OKLLyosLEz169eXJDVr1kwhISHq0KGD/vjjDy1dulRvvfWWevbsaT169vLLL+uvv/7SoEGDtGvXLk2aNEnffPON+vXrZ+/uAwAAAIBT2H3aZNeuXdWtWzf99ddfevDBByVJa9eu1ejRo9W/f3+7xpo8ebIkqXHjxjbt06ZNU6dOnSRJY8eOlZubm9q0aaO0tDRFRERo0qRJ1r7u7u5auHChevToobCwMBUpUkRRUVE2NwyvUKGCFi1apH79+mn8+PEqW7asvvjiC+7xBgAAAMBl2H2fN2OMxo0bpw8//FBHjhyRJAUGBmrgwIHq3bu3LLfhTV24z1v+w71z4GjklLMjwO2IvHJ2BADygjNrA7uLt8udOnVKklS0aFGHBZQfUbzlP/yDCEcjp5wdAW5H5JWzIwCQF5xZG9h92uTlbveiDQAAAADyixu6STcAAAAA4NaieAMAAAAAF0DxBgAAAAAuwK7iLT09XU2aNNGePXvyKh4AAAAAQA7sKt4KFiyoLVu25FUsAAAAAICrsPu0yRdeeEFffvllXsQCAAAAALgKu28VcPHiRU2dOlXLly9X3bp1VaRIEZvlH330kcOCAwAAAABcYnfxtm3bNtWpU0eS9Oeff9oss9zpd+MEAAAAgDxid/G2atWqvIgDAAAAAHANN3yrgL1792rp0qU6d+6cJMkY47CgAAAAAAC27C7eTpw4oSZNmuiee+7R448/rqNHj0qSunTpogEDBjg8QAAAAADADRRv/fr1U8GCBXXo0CEVLlzY2v7ss89qyZIlDg0OAAAAAHCJ3de8LVu2TEuXLlXZsmVt2u+++24dPHjQYYEBAAAAAP6f3Ufezpw5Y3PELUtSUpI8PT0dEhQAAAAAwJbdxVt4eLhmzJhhfW6xWJSZmakxY8bo4YcfdmhwAAAAAIBL7D5tcsyYMWrSpIl+++03XbhwQYMGDdL27duVlJSktWvX5kWMAAAAAHDHs/vIW40aNfTnn3/qoYceUqtWrXTmzBm1bt1amzdvVqVKlfIiRgAAAAC441kMN2i7rtTUVPn6+iolJUU+Pj7ODkeSZLE4OwLn4lMLRyOnnB0BbkfklbMjAJAXnFkb2H3apCSdPHlSX375pXbu3ClJCgkJ0YsvvqgSJUo4NDgAAAAAwCV2nza5Zs0alS9fXhMmTNDJkyd18uRJTZgwQRUqVNCaNWvyIkYAAAAAuOPZfdpkzZo1FRYWpsmTJ8vd3V2SlJGRoVdeeUXr1q3T1q1b8yRQZ+K0yfyHU1HgaOSUsyPA7Yi8cnYEAPKCM2sDu4+87d27VwMGDLAWbpLk7u6u/v37a+/evQ4NDgAAAABwid3FW506dazXul1u586dqlWrlkOCAgAAAADYytWEJVu2bLH+f+/evdWnTx/t3btX9evXlyT98ssv+uSTTzRq1Ki8iRIAAAAA7nC5uubNzc1NFotF1+tqsViUkZHhsODyC655y3+4jgCORk45OwLcjsgrZ0cAIC/k+1sF7N+/P6/jAAAAAABcQ66Kt+Dg4LyOAwAAAABwDTd0k+4jR47o559/VmJiojIzM22W9e7d2yGBAQAAAAD+n93FW0xMjLp37y4PDw+VLFlSlstOaLdYLBRvAAAAAJAH7C7e/vOf/2jIkCGKjo6Wm5vddxoAAAAAANwAu6uvs2fPql27dhRuAAAAAHAL2V2BdenSRXPmzMmLWAAAAAAAV2F38TZy5EjFxcWpcePGevXVV9W/f3+bhz3WrFmjli1bKjAwUBaLRfPnz7dZ3qlTJ1ksFpvHY489ZtMnKSlJ7du3l4+Pj4oVK6YuXbro9OnTNn22bNmi8PBwFSpUSEFBQRozZoy9uw0AAAAATmX3NW8jR47U0qVLVaVKFUnKNmGJPc6cOaNatWqpc+fOat26dY59HnvsMU2bNs363NPT02Z5+/btdfToUcXGxio9PV0vvviiunXrplmzZkm6dBO9Zs2aqWnTppoyZYq2bt2qzp07q1ixYurWrZtd8QIAAACAs9hdvH344YeaOnWqOnXqdNMbb968uZo3b37NPp6engoICMhx2c6dO7VkyRL9+uuvuu+++yRJH3/8sR5//HF98MEHCgwM1MyZM3XhwgVNnTpVHh4eql69uuLj4/XRRx9dtXhLS0tTWlqa9XlqauoN7iEAAAAAOIbdp016enqqQYMGeRFLjlavXq3SpUurSpUq6tGjh06cOGFdtn79ehUrVsxauElS06ZN5ebmpg0bNlj7NGzYUB4eHtY+ERER2r17t06ePJnjNkeOHClfX1/rIygoKI/2DgAAAAByx+7irU+fPvr444/zIpZsHnvsMc2YMUMrVqzQ6NGjFRcXp+bNmysjI0OSlJCQoNKlS9usU6BAAZUoUUIJCQnWPv7+/jZ9sp5n9blSdHS0UlJSrI/Dhw87etcAAAAAwC52nza5ceNGrVy5UgsXLlT16tVVsGBBm+Xffvutw4Jr166d9f9r1qyp0NBQVapUSatXr1aTJk0ctp0reXp6Zru2DgAAAACcye7irVixYledXCSvVaxYUaVKldLevXvVpEkTBQQEKDEx0abPxYsXlZSUZL1OLiAgQMeOHbPpk/X8atfSAQAAAEB+Y3fxdvnMj7fa33//rRMnTqhMmTKSpLCwMCUnJ2vTpk2qW7euJGnlypXKzMxUvXr1rH3efPNNpaenW48SxsbGqkqVKipevLhzdgQAAAAA7GT3NW+OdPr0acXHxys+Pl6StH//fsXHx+vQoUM6ffq0Bg4cqF9++UUHDhzQihUr1KpVK1WuXFkRERGSpGrVqumxxx5T165dtXHjRq1du1a9evVSu3btFBgYKEl6/vnn5eHhoS5dumj79u36+uuvNX78eLvvSQcAAAAAzmQxxhh7VqhQocI17+f2119/5Xqs1atX6+GHH87WHhUVpcmTJ+vJJ5/U5s2blZycrMDAQDVr1kxvv/22zQQkSUlJ6tWrl3744Qe5ubmpTZs2mjBhgry9va19tmzZop49e+rXX39VqVKl9Oqrr2rw4MG5jjM1NVW+vr5KSUmRj49PrtfLS3beUu+2Y9+nFrg+csrZEeB2RF45OwIAecGZtYHdxdv48eNtnqenp2vz5s1asmSJBg4cqNdff92hAeYHFG/5D/8gwtHIKWdHgNsReeXsCADkBWfWBnZf89anT58c2z/55BP99ttvNx0QAAAAACA7h13z1rx5c82bN89RwwEAAAAALuOw4m3u3LkqUaKEo4YDAAAAAFzG7tMm7733XpsJS4wxSkhI0PHjxzVp0iSHBgcAAAAAuMTu4u3JJ5+0ee7m5iY/Pz81btxYVatWdVRcAAAAAIDL2D3b5J2I2SbzHz61cDRyytkR4HZEXjk7AgB5wZm1gVNv0g0AAAAAyJ1cnzbp5uZ2zZtzS5LFYtHFixdvOigAAAAAgK1cF2/ffffdVZetX79eEyZMUGZmpkOCAgAAAADYynXx1qpVq2xtu3fv1uuvv64ffvhB7du314gRIxwaHAAAAADgkhu65u3IkSPq2rWratasqYsXLyo+Pl7Tp09XcHCwo+MDAAAAAMjO4i0lJUWDBw9W5cqVtX37dq1YsUI//PCDatSokVfxAQAAAABkx2mTY8aM0ejRoxUQEKCvvvoqx9MoAQAAAAB5I9f3eXNzc5OXl5eaNm0qd3f3q/b79ttvHRZcfsF93vIf7p0DRyOnnB0BbkfklbMjAJAXnFkb5PrIW8eOHa97qwAAAAAAQN7IdfEWExOTh2EAAAAAAK7lhmabBAAAAADcWhRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAFOLd7WrFmjli1bKjAwUBaLRfPnz7dZbozRkCFDVKZMGXl5ealp06bas2ePTZ+kpCS1b99ePj4+KlasmLp06aLTp0/b9NmyZYvCw8NVqFAhBQUFacyYMXm9awAAAADgUE4t3s6cOaNatWrpk08+yXH5mDFjNGHCBE2ZMkUbNmxQkSJFFBERofPnz1v7tG/fXtu3b1dsbKwWLlyoNWvWqFu3btblqampatasmYKDg7Vp0ya9//77GjZsmD777LM83z8AAAAAcBSLMcY4OwhJslgs+u677/Tkk09KunTULTAwUAMGDNBrr70mSUpJSZG/v79iYmLUrl077dy5UyEhIfr111913333SZKWLFmixx9/XH///bcCAwM1efJkvfnmm0pISJCHh4ck6fXXX9f8+fO1a9euXMWWmpoqX19fpaSkyMfHx/E7fwMsFmdH4Fz541OL2wk55ewIcDsir5wdAYC84MzaIN9e87Z//34lJCSoadOm1jZfX1/Vq1dP69evlyStX79exYoVsxZuktS0aVO5ublpw4YN1j4NGza0Fm6SFBERod27d+vkyZM5bjstLU2pqak2DwAAAABwpnxbvCUkJEiS/P39bdr9/f2tyxISElS6dGmb5QUKFFCJEiVs+uQ0xuXbuNLIkSPl6+trfQQFBd38DgEAAADATci3xZszRUdHKyUlxfo4fPiws0MCAAAAcIfLt8VbQECAJOnYsWM27ceOHbMuCwgIUGJios3yixcvKikpyaZPTmNcvo0reXp6ysfHx+YBAAAAAM6Ub4u3ChUqKCAgQCtWrLC2paamasOGDQoLC5MkhYWFKTk5WZs2bbL2WblypTIzM1WvXj1rnzVr1ig9Pd3aJzY2VlWqVFHx4sVv0d4AAAAAwM1xavF2+vRpxcfHKz4+XtKlSUri4+N16NAhWSwW9e3bV++8846+//57bd26VR07dlRgYKB1Rspq1arpscceU9euXbVx40atXbtWvXr1Urt27RQYGChJev755+Xh4aEuXbpo+/bt+vrrrzV+/Hj179/fSXsNAAAAAPZz6q0CVq9erYcffjhbe1RUlGJiYmSM0dChQ/XZZ58pOTlZDz30kCZNmqR77rnH2jcpKUm9evXSDz/8IDc3N7Vp00YTJkyQt7e3tc+WLVvUs2dP/frrrypVqpReffVVDR48ONdxcquA/Ifpl+Fo5JSzI8DtiLxydgQA8oIza4N8c5+3/IziLf/hUwtHI6ecHQFuR+SVsyMAkBe4zxsAAAAA4Joo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAF5OvibdiwYbJYLDaPqlWrWpefP39ePXv2VMmSJeXt7a02bdro2LFjNmMcOnRIkZGRKly4sEqXLq2BAwfq4sWLt3pXAAAAAOCmFHB2ANdTvXp1LV++3Pq8QIH/D7lfv35atGiR5syZI19fX/Xq1UutW7fW2rVrJUkZGRmKjIxUQECA1q1bp6NHj6pjx44qWLCg3nvvvVu+LwAAAABwo/J98VagQAEFBARka09JSdGXX36pWbNm6ZFHHpEkTZs2TdWqVdMvv/yi+vXra9myZdqxY4eWL18uf39/1a5dW2+//bYGDx6sYcOGycPD41bvDgAAAADckHx92qQk7dmzR4GBgapYsaLat2+vQ4cOSZI2bdqk9PR0NW3a1Nq3atWqKleunNavXy9JWr9+vWrWrCl/f39rn4iICKWmpmr79u1X3WZaWppSU1NtHgAAAADgTPm6eKtXr55iYmK0ZMkSTZ48Wfv371d4eLhOnTqlhIQEeXh4qFixYjbr+Pv7KyEhQZKUkJBgU7hlLc9adjUjR46Ur6+v9REUFOTYHQMAAAAAO+Xr0yabN29u/f/Q0FDVq1dPwcHB+uabb+Tl5ZVn242Ojlb//v2tz1NTUyngAAAAADhVvj7ydqVixYrpnnvu0d69exUQEKALFy4oOTnZps+xY8es18gFBARkm30y63lO19Fl8fT0lI+Pj80DAAAAAJzJpYq306dPa9++fSpTpozq1q2rggULasWKFdblu3fv1qFDhxQWFiZJCgsL09atW5WYmGjtExsbKx8fH4WEhNzy+AEAAADgRuXr0yZfe+01tWzZUsHBwTpy5IiGDh0qd3d3Pffcc/L19VWXLl3Uv39/lShRQj4+Pnr11VcVFham+vXrS5KaNWumkJAQdejQQWPGjFFCQoLeeust9ezZU56enk7eOwAAAADIvXxdvP3999967rnndOLECfn5+emhhx7SL7/8Ij8/P0nS2LFj5ebmpjZt2igtLU0RERGaNGmSdX13d3ctXLhQPXr0UFhYmIoUKaKoqCiNGDHCWbsEAAAAADfEYowxzg4iv0tNTZWvr69SUlLyzfVvFouzI3AuPrVwNHLK2RHgdkReOTsCAHnBmbWBS13zBgAAAAB3Koo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcQAFnBwAAAADg+iwWZ0fgXMY4OwLn48gbAAAAALiAO6p4++STT1S+fHkVKlRI9erV08aNG50dEgAAAADkyh1TvH399dfq37+/hg4dqt9//121atVSRESEEhMTnR0aAAAAAFzXHVO8ffTRR+ratatefPFFhYSEaMqUKSpcuLCmTp3q7NAAAAAA4LruiAlLLly4oE2bNik6Otra5ubmpqZNm2r9+vXZ+qelpSktLc36PCUlRZKUmpqa98EiV3grAMcipwDHI68Ax8ovOZVVExgnzKByRxRv//77rzIyMuTv72/T7u/vr127dmXrP3LkSA0fPjxbe1BQUJ7FCPv4+jo7AuD2Qk4BjkdeAY6V33Lq1KlT8r3FQd0RxZu9oqOj1b9/f+vzzMxMJSUlqWTJkrLc6XO06tKvDUFBQTp8+LB8fHycHQ7g8sgpwPHIK8CxyKn/Z4zRqVOnFBgYeMu3fUcUb6VKlZK7u7uOHTtm037s2DEFBARk6+/p6SlPT0+btmLFiuVliC7Jx8fnjk9ewJHIKcDxyCvAscipS271Ebcsd8SEJR4eHqpbt65WrFhhbcvMzNSKFSsUFhbmxMgAAAAAIHfuiCNvktS/f39FRUXpvvvu0wMPPKBx48bpzJkzevHFF50dGgAAAABc1x1TvD377LM6fvy4hgwZooSEBNWuXVtLlizJNokJrs/T01NDhw7NdmopgBtDTgGOR14BjkVO5Q8W44w5LgEAAAAAdrkjrnkDAAAAAFdH8QYAAAAALoDiDQAAAABcAMVbHipfvrzGjRvn7DBczoEDB2SxWBQfH5/n2+I9cj28ZzeGvMLV8H7dGHIK18J7dmPIq1wwt7moqCgjyXTv3j3bsldeecVIMlFRUbkaa//+/UaS2bx5c676JyYmmjNnzuSqb4sWLUxERESOy9asWWMkmT/++CNXY13NqlWrjCRz8uTJmxrnSmfPnjXFixc3JUuWNOfPn7dr3aioKNOqVSubtosXL5qjR4+a9PR0h8U4bdo04+vrm63dnvfIUSZOnGiCg4ONp6eneeCBB8yGDRtu6fYdgbz6f+SVb7b2W51XcXFxpkWLFqZMmTJGkvnuu+9u2bYdhZz6f+SUb7b2W51T7733nrnvvvuMt7e38fPzM61atTK7du26Zdt3FPLq/5FXvtnab3VeTZo0ydSsWdMULVrUFC1a1NSvX9/8+OOPdo9zRxx5CwoK0uzZs3Xu3Dlr2/nz5zVr1iyVK1fO4du7cOGCJMnPz0+FCxfO1TpdunRRbGys/v7772zLpk2bpvvuu0+hoaEOjfNGGWN08eJF6/N58+apevXqqlq1qubPn3/T47u7uysgIEAFCuT9nSzseY8c4euvv1b//v01dOhQ/f7776pVq5YiIiKUmJh4y2JwFPLKscirG3fmzBnVqlVLn3zyyS3bZl4gpxyLnLpxcXFx6tmzp3755RfFxsYqPT1dzZo105kzZ25ZDI5CXjkWeXXjypYtq1GjRmnTpk367bff9Mgjj6hVq1bavn27fQM5uKjMd7Kq+ho1apj//e9/1vaZM2ea0NBQ06pVK+uvLosXLzYNGjQwvr6+pkSJEiYyMtLs3bvXuo4km0ejRo1stvHOO++YMmXKmPLlyxtjjAkODjZjx441xlz6xaNgwYJmzZo11vFGjx5t/Pz8TEJCgklPTzf+/v7m7bffton/1KlTxtvb20yePNkYY8xPP/1kHnroIVOoUCFTtmxZ8+qrr5rTp09b+58/f94MGjTIlC1b1nh4eJhKlSqZL774wvqL0eWPrP0+f/68efXVV42fn5/x9PQ0DRo0MBs3brSOmfVrzY8//mjq1KljChYsaFatWmVd3rhxYzNlyhQzefJk8+ijj2Z7D7Zt22YiIyNN0aJFjbe3t3nooYfM3r17zdChQ7PFtGrVKptftzIyMsxdd91lJk2aZDPm77//biwWizlw4IAxxpgPP/zQ1KhRwxQuXNiULVvW9OjRw5w6dcom/ssfQ4cOzfYeGWPMwYMHzRNPPGGKFCliihYtap5++mmTkJBgXT506FBTq1YtM2PGDBMcHGx8fHzMs88+a1JTU7Ptd04eeOAB07NnT+vzjIwMExgYaEaOHJmr9fML8oq8yk95dTm58JE3coqcyo85ZcylIxSSTFxc3A2t7yzkFXmVn/PKGGOKFy9uvvjiC7vWuWOKt48++sg0adLE2t6kSRMzduxYm8SdO3eumTdvntmzZ4/ZvHmzadmypalZs6bJyMgwxhizceNGI8ksX77cHD161Jw4ccK6DW9vb9OhQwezbds2s23bNmNM9g/FwIEDTXBwsElOTja///678fDwMAsWLLBZXqlSJZOZmWltmzp1qvHy8jLJyclm7969pkiRImbs2LHmzz//NGvXrjX33nuv6dSpk7X/M888Y4KCgsy3335r9u3bZ5YvX25mz55tLl68aObNm2ckmd27d5ujR4+a5ORkY4wxvXv3NoGBgebHH38027dvN1FRUaZ48eLW/cv64IeGhpply5aZvXv3Wpft3bvXeHp6mqSkJHPixAlTqFAhazIZY8zff/9tSpQoYVq3bm1+/fVXs3v3bjN16lSza9cuc+rUKfPMM8+Yxx57zBw9etQcPXrUpKWlZTs14bXXXjMPPfSQzfs6YMAAm7axY8ealStXmv3795sVK1aYKlWqmB49ehhjjElLSzPjxo0zPj4+1u1kJfXl71FGRoapXbu2eeihh8xvv/1mfvnlF1O3bl3rF7QxlxLX29vbtG7d2mzdutWsWbPGBAQEmDfeeOOqn8EsaWlpxt3dPdsflh07djRPPPHEddfPT8gr8iq/5NWVXL14I6fIqfyWU8YYs2fPHiPJbN269YbWdxbyirzKr3l18eJF89VXXxkPDw+zfft2u9a9Y4q3xMRE4+npaQ4cOGAOHDhgChUqZI4fP26TuFc6fvy4zZfV1c53joqKMv7+/iYtLc2m/crETUtLM7Vr1zbPPPOMCQkJMV27drXpv3PnTusvD1nCw8PNCy+8YIwxpkuXLqZbt2426/z000/Gzc3NnDt3zuzevdtIMrGxsTnuT07nO58+fdoULFjQzJw509p24cIFExgYaMaMGWOz3vz587ON+cYbb5gnn3zS+rxVq1bWXzSMMSY6OtpUqFDBXLhwIceYcjrf+crXefPmzcZisZiDBw8aY4z1l5isX6JyMmfOHFOyZEnr86ud73z5e7Rs2TLj7u5uDh06ZF2+fft2I8n6K9TQoUNN4cKFbX5lGThwoKlXr95VY8nyzz//GElm3bp1Nu0DBw40DzzwwHXXz0/Iq/9HXvlm63cr8+pKrl68kVPkVH7LqYyMDBMZGWkaNGhg97rORl79P/LKN1s/Z+TVli1bTJEiRYy7u7vx9fU1ixYtyvW6We6Ia96kS+e1RkZGKiYmRtOmTVNkZKRKlSpl02fPnj167rnnVLFiRfn4+Kh8+fKSpEOHDl13/Jo1a8rDw+OafTw8PDRz5kzNmzdP58+f19ixY22WV61aVQ8++KCmTp0qSdq7d69++ukndenSRZL0xx9/KCYmRt7e3tZHRESEMjMztX//fsXHx8vd3V2NGjXK7cuiffv2KT09XQ0aNLC2FSxYUA888IB27txp0/e+++6zeZ6RkaHp06frhRdesLa98MILiomJUWZmpiQpPj5e4eHhKliwYK5julLt2rVVrVo1zZo1S9Klc/ETExP19NNPW/ssX75cTZo00V133aWiRYuqQ4cOOnHihM6ePZvr7ezcuVNBQUEKCgqytoWEhKhYsWI2r0X58uVVtGhR6/MyZcq45DVrjkBe5Yy8+n/klX3IqZyRU//vVudUz549tW3bNs2ePdvudfML8ipn5NX/u1V5VaVKFcXHx2vDhg3q0aOHoqKitGPHjlyvL91htwro3LmzYmJiNH36dHXu3Dnb8pYtWyopKUmff/65NmzYoA0bNkj6/4tPr6VIkSK5imHdunWSpKSkJCUlJWVb3qVLF82bN0+nTp3StGnTVKlSJWsinj59Wt27d1d8fLz18ccff2jPnj2qVKmSvLy8chXDjbpyH5cuXap//vlHzz77rAoUKKACBQqoXbt2OnjwoFasWCFJDoupffv21sSdNWuWHnvsMZUsWVLSpWllW7RoodDQUM2bN0+bNm2yTlyQm/fOXld+CVksFusX1bWUKlVK7u7uOnbsmE37sWPHFBAQ4NAYbyXy6uaQV5fcaF7djsipm0NOXeKInOrVq5cWLlyoVatWqWzZso4M75Yjr24OeXXJzeaVh4eHKleurLp162rkyJGqVauWxo8fb1cMd1Tx9thjj+nChQtKT09XRESEzbITJ05o9+7deuutt9SkSRNVq1ZNJ0+etOmT9atKRkbGDW1/37596tevnz7//HPVq1dPUVFR2d7wZ555Rm5ubpo1a5ZmzJihzp07y2KxSJLq1KmjHTt2qHLlytkeHh4eqlmzpjIzMxUXF5fj9nOKv1KlSvLw8NDatWutbenp6fr1118VEhJyzf358ssv1a5dO5svkvj4eLVr105ffvmlJCk0NFQ//fST0tPTrxpTbl7P559/Xtu2bdOmTZs0d+5ctW/f3rps06ZNyszM1Icffqj69evrnnvu0ZEjR+zeTrVq1XT48GEdPnzY2rZjxw4lJydf97XIDQ8PD9WtW9f6pSZJmZmZWrFihcLCwm56fGchr8ira8nrvLodkVPk1LXcipwyxqhXr1767rvvtHLlSlWoUMEh4zoTeUVeXYuz/q3KzMxUWlqafSvZfaKli7nyfNqUlBSTkpJifZ51vnNGRoYpWbKkeeGFF8yePXvMihUrzP33329z/UR6errx8vIy77zzjklISLBe7JnTObvG2J5Le/HiRVO/fn3Tpk0bY4wxR44cMSVLlrSeU3y5Ll26mOLFixt3d3fzzz//WNv/+OMP4+XlZXr27Gk2b95s/vzzTzN//nyb2Qs7depkgoKCzHfffWf++usvs2rVKvP1118bYy5dOGqxWExMTIxJTEy0XrDZp08fExgYaBYvXmxzsWpSUpIxJufzpBMTE03BggXN4sWLs8X/448/Gk9PT3PixAnz77//mpIlS1ovVv3zzz/NjBkzrPeLeffdd025cuXMrl27zPHjx82FCxeuel55gwYNTK1atUzRokXN2bNnre3x8fFGkhk3bpzZt2+fmTFjhrnrrrtsYl67dq31QuPjx49b7+tx+XuUmZlpateubcLDw82mTZvMhg0bcrxYtVatWjZxjR071gQHB2d7HXIye/Zs4+npaWJiYsyOHTtMt27dTLFixWxmM3IF5BV5ZUz+yatTp06ZzZs3m82bNxtJ5qOPPjKbN2+2XiPhCsgpcsqY/JNTPXr0ML6+vmb16tXWSR6OHj1qsz+ugLwir4zJP3n1+uuvm7i4OLN//36zZcsW8/rrrxuLxWKWLVuWq/Wz3HHF25Uuv1g1NjbWVKtWzXh6eprQ0FCzevXqbBe/f/755yYoKMi4ubllmyb2Spd/KIYPH27KlClj/v33X+vyefPmGQ8PDxMfH2+z3rp164wk8/jjj2cbc+PGjebRRx813t7epkiRIiY0NNS8++671uXnzp0z/fr1M2XKlDEeHh6mcuXKZurUqdblI0aMMAEBAcZisVj3+9y5c+bVV181pUqVuuY0sZcn7gcffGCKFSuW40WoaWlpplixYmb8+PHGmEtfOM2aNTOFCxc2RYsWNeHh4Wbfvn3GmEtfAFn7oxymib3cpEmTjCTTsWPHbNv86KOPTJkyZYyXl5eJiIgwM2bMyBbzyy+/bEqWLOmQaWIvZ0/iGmPMxx9/bMqVK2c8PDzMAw88YH755Zdcr5tfkFfkVZb8kFc5TQUt5f7mu/kBOUVOZckPOZVTPkky06ZNy9X6+QV5RV5lyQ951blzZxMcHGw8PDyMn5+fadKkid2FmzHGWIwxxr5jdQAAAACAW+2OuuYNAAAAAFwVxRvgIIcOHbKZwvfKR26mGwZgi7wCHIucAhzvVuYVp00CDnLx4kUdOHDgqsvLly+vAgUK3LqAgNsAeQU4FjkFON6tzCuKNwAAAABwAZw2CQAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAADkUuPGjdW3b99c91+9erUsFouSk5PzLCYAwJ2D4g0AkOc6deoki8WiUaNG2bTPnz9fFovFrrHKly+vcePGOTA6AABcA8UbAOCWKFSokEaPHq2TJ086OxS7Xbhwwdkh3JT09HRnhwAAcACKNwDALdG0aVMFBARo5MiR1+z3888/Kzw8XF5eXgoKClLv3r115swZSZdOWzx48KD69esni8Uii8UiY4z8/Pw0d+5c6xi1a9dWmTJlbMb09PTU2bNnJUmHDh1Sq1at5O3tLR8fHz3zzDM6duyYtf+wYcNUu3ZtffHFF6pQoYIKFSqUY6yLFi2Sr6+vZs6cmavX4MSJE3ruued01113qXDhwqpZs6a++uor6/IZM2aoZMmSSktLs1nvySefVIcOHazPFyxYoDp16qhQoUKqWLGihg8frosXL1qXWywWTZ48WU888YSKFCmid999VydPnlT79u3l5+cnLy8v3X333Zo2bVqu4gYA5A8UbwCAW8Ld3V3vvfeePv74Y/3999859tm3b58ee+wxtWnTRlu2bNHXX3+tn3/+Wb169ZIkffvttypbtqxGjBiho0eP6ujRo7JYLGrYsKFWr14tSTp58qR27typc+fOadeuXZKkuLg43X///SpcuLAyMzPVqlUrJSUlKS4uTrGxsfrrr7/07LPP2sSyd+9ezZs3T99++63i4+OzxTpr1iw999xzmjlzptq3b5+r1+D8+fOqW7euFi1apG3btqlbt27q0KGDNm7cKEl6+umnlZGRoe+//966TmJiohYtWqTOnTtLkn766Sd17NhRffr00Y4dO/Tpp58qJiZG7777rs22hg0bpqeeekpbt25V586d9Z///Ec7duzQ4sWLtXPnTk2ePFmlSpXKVdwAgHzCAACQx6KiokyrVq2MMcbUr1/fdO7c2RhjzHfffWcu/6eoS5cuplu3bjbr/vTTT8bNzc2cO3fOGGNMcHCwGTt2rE2fCRMmmOrVqxtjjJk/f76pV6+eadWqlZk8ebIxxpimTZuaN954wxhjzLJly4y7u7s5dOiQdf3t27cbSWbjxo3GGGOGDh1qChYsaBITE22206hRI9OnTx8zceJE4+vra1avXn3N/V61apWRZE6ePHnVPpGRkWbAgAHW5z169DDNmze3Pv/www9NxYoVTWZmpjHGmCZNmpj33nvPZoz//ve/pkyZMtbnkkzfvn1t+rRs2dK8+OKL14wXAJC/ceQNAHBLjR49WtOnT9fOnTuzLfvjjz8UExMjb29v6yMiIkKZmZnav3//Vcds1KiRduzYoePHjysuLk6NGzdW48aNtXr1aqWnp2vdunVq3LixJGnnzp0KCgpSUFCQdf2QkBAVK1bMJqbg4GD5+fll29bcuXPVr18/xcbGqlGjRnbte0ZGht5++23VrFlTJUqUkLe3t5YuXapDhw5Z+3Tt2lXLli3TP//8I0mKiYmxTviS9RqNGDHC5jXq2rWrjh49aj0tVJLuu+8+m2336NFDs2fPVu3atTVo0CCtW7fOrtgBAM5H8QYAuKUaNmyoiIgIRUdHZ1t2+vRpde/eXfHx8dbHH3/8oT179qhSpUpXHTOrGIqLi7Mp3uLi4vTrr78qPT1dDz74oF1xFilSJMf2e++9V35+fpo6daqMMXaN+f7772v8+PEaPHiwVq1apfj4eEVERNhMiHLvvfeqVq1amjFjhjZt2qTt27erU6dO1uWnT5/W8OHDbV6jrVu3as+ePTbX5l0Zf/Pmza3XCx45ckRNmjTRa6+9Zlf8AADnKuDsAAAAd55Ro0apdu3aqlKlik17nTp1tGPHDlWuXPmq63p4eCgjI8OmzWKxKDw8XAsWLND27dv10EMPqXDhwkpLS9Onn36q++67z1rMVKtWTYcPH9bhw4etR9927Nih5ORkhYSEXDf2SpUq6cMPP1Tjxo3l7u6uiRMn5nq/165dq1atWumFF16QJGVmZurPP//Mtt2XXnpJ48aN0z///KOmTZvaHCWsU6eOdu/efc3X6Gr8/PwUFRWlqKgohYeHa+DAgfrggw/sHgcA4BwceQMA3HI1a9ZU+/btNWHCBJv2wYMHa926derVq5fi4+O1Z88eLViwwDphiXTpPm9r1qzRP//8o3///dfa3rhxY3311VeqXbu2vL295ebmpoYNG2rmzJk2pzc2bdrUuv3ff/9dGzduVMeOHdWoUaNspxpezT333KNVq1Zp3rx5dt20++6771ZsbKzWrVunnTt3qnv37jazXGZ5/vnn9ffff+vzzz+3TlSSZciQIZoxY4aGDx+u7du3a+fOnZo9e7beeuuta257yJAhWrBggfbu3avt27dr4cKFqlatWq5jBwA4H8UbAMApRowYoczMTJu20NBQxcXF6c8//1R4eLjuvfdeDRkyRIGBgTbrHThwQJUqVbK5Jq1Ro0bKyMiwXtsmXSrormyzWCxasGCBihcvroYNG6pp06aqWLGivv76a7vir1KlilauXKmvvvpKAwYMyNU6b731lurUqaOIiAg1btxYAQEBevLJJ7P18/X1VZs2beTt7Z1teUREhBYuXKhly5bp/vvvV/369TV27FgFBwdfc9seHh6Kjo5WaGioGjZsKHd3d82ePTu3uwsAyAcsxt4T9gEAQJ5r0qSJqlevnu3oJADgzkXxBgBAPnLy5EmtXr1abdu21Y4dO7JdFwgAuHMxYQkAAPnIvffeq5MnT2r06NEUbgAAGxx5AwAAAAAXwIQlAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHAB/wdjk8kS8Ch0wAAAAABJRU5ErkJggg==", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ - "#To view the source code of the matrix vector activation function\n", - "from finn.custom_op.fpgadataflow.matrixvectoractivation import MatrixVectorActivation\n", - "showSrc(MatrixVectorActivation)" + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(cycles_dict_updated.keys(), cycles_dict_updated.values(), color ='blue', width = 0.3)\n", + "plt.xlabel(\"Network layers\")\n", + "plt.ylabel(\"Number of clock cycles\")\n", + "plt.title(\"Estimated no. of clock cycles for each network layer\")\n", + "plt.show()" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'MatrixVectorActivation_0': {'BRAM_18K': 8,\n", + " 'BRAM_efficiency': 0.5208333333333334,\n", + " 'LUT': 418,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.006944444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0}}" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "res_dict_updated = model.analysis(res_estimation)\n", + "res_dict_updated" + ] + }, + { + "cell_type": "code", + "execution_count": 16, "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABerUlEQVR4nO3deZxO9f//8edldrMazIxl7IlhxogwtlGWsaYoKTGWJA0VX4o+IaRpp/qgtKBPpKyVT5IsQ7askd1EFIMsM5YMM/P+/eE35+Myg5kxHMvjfrudW3O9z/uc8zrXdb2vPK+zXA5jjBEAAAAAwDYF7C4AAAAAAO50BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwB50qhRIzVq1MjuMvLV3r175XA4NGnSJLtLsRXPQ85NmjRJDodDe/fuvWrfH374QZGRkfL09JTD4dCJEyeue303msPhUJ8+fewu46aW+Z5Zu3ZtrpddsmSJHA6HlixZkv+FAbAdwQy4zWT+T/9y06pVq3K8rq1bt+qVV17J0T86b6Rx48bZGhoy/3E0Y8aMy/a50j9QZ8yYYf3jKnNdOZlw6zp69Kg6dOggLy8vjR07Vv/5z3/k7e1td1m3vRUrVuiVV165LUMwgNuPq90FALg+RowYobJly2Zpr1ChQo7XsXXrVg0fPlyNGjVSmTJlnOb9+OOP11pino0bN05FihRR165dbashv1SuXFn/+c9/nNoGDx4sHx8f/etf/7KpKuS3NWvW6OTJkxo5cqSaNGlidzl3jBUrVmj48OHq2rWrAgIC7C4HAK6IYAbcplq0aKGaNWtet/W7u7tft3XfSYKDg/XEE084tb3++usqUqRIlnbcug4fPixJ+RoOTp8+zVG3W8jZs2dv+89N3pPAteFURuAONm3aNNWoUUO+vr7y8/NTeHi43nvvPUkXTol85JFHJEn33XefdTpd5rUNl15jlnlK3tdff63hw4erRIkS8vX11cMPP6zk5GSlpqbq+eefV1BQkHx8fNStWzelpqY61TNx4kTdf//9CgoKkoeHh8LCwjR+/HinPmXKlNGWLVuUkJBg1XRxHSdOnNDzzz+v0NBQeXh4qEKFCnrjjTeUkZHhtJ4TJ06oa9eu8vf3V0BAgGJjY2/J050OHTokV1dXDR8+PMu8HTt2yOFw6N///rck6dixYxowYIDCw8Pl4+MjPz8/tWjRQr/++utVt3O5awq7du2a5WhqRkaGxowZoypVqsjT01PBwcHq1auXjh8/7tRv7dq1iomJUZEiReTl5aWyZcuqe/fuV63F4XDolVdeydJepkwZp6Oo58+f1/Dhw3XXXXfJ09NThQsXVv369bVgwQKn5bZv366HH35YgYGB8vT0VM2aNfXtt99mWf+WLVt0//33y8vLSyVLltSrr76a5X2VnUaNGik2NlaSdO+998rhcDjVOX36dNWoUUNeXl5WIP/rr7+c1tG1a1f5+PgoMTFRLVu2lK+vrzp16nTF7f7111/q3r27goOD5eHhoSpVquizzz5z6nPu3DkNHTpUNWrUkL+/v7y9vdWgQQMtXrw4y/oyMjL03nvvKTw8XJ6enipatKiaN2+e7bVSc+bMUdWqVa3t/vDDD1d9ni7+DBk1apRKliwpT09PNW7cWLt3787Sf/Xq1WrevLn8/f1VsGBBRUdHa/ny5db8V155RQMHDpQklS1b1vq82Lt3r9q1a6d77rnHaX1t2rSRw+Fweu1Xr14th8OhefPmWW2///67HnnkEQUGBqpgwYKqU6eO/vvf/2a7L9OmTdPLL7+sEiVKqGDBgkpJScl2348fP65atWqpZMmS2rFjx1Wfq4stW7ZMjzzyiEqVKiUPDw+FhoaqX79++ueff6w+EydOlMPh0IYNG7Is/9prr8nFxcXpPXe151a68Pw6HA5t3bpVjz/+uAoVKqT69evnqnYAzjhiBtymkpOT9ffffzu1ORwOFS5cWJK0YMECPfbYY2rcuLHeeOMNSdK2bdu0fPlyPffcc2rYsKGeffZZvf/++3rppZdUuXJlSbL+eznx8fHy8vLSoEGDtHv3bn3wwQdyc3NTgQIFdPz4cb3yyitatWqVJk2apLJly2ro0KHWsuPHj1eVKlX0wAMPyNXVVd99952eeeYZZWRkKC4uTpI0ZswY9e3b1+lUv+DgYEnSmTNnFB0drb/++ku9evVSqVKltGLFCg0ePFgHDx7UmDFjJEnGGLVt21Y///yznn76aVWuXFmzZ8+2/vF8KwkODlZ0dLS+/vprDRs2zGneV199JRcXFytg//7775ozZ44eeeQRlS1bVocOHdJHH32k6Ohobd26VcWLF8+Xmnr16qVJkyapW7duevbZZ7Vnzx79+9//1oYNG7R8+XK5ubnp8OHDatasmYoWLapBgwYpICBAe/fu1axZs/KlBunCPxzj4+P15JNPqlatWkpJSdHatWu1fv16NW3aVNKFsFWvXj2VKFFCgwYNkre3t77++ms9+OCDmjlzph566CFJUlJSku677z6lpaVZ/SZMmCAvL6+r1vGvf/1Ld999tyZMmGCdYly+fHlJsp6ne++9V/Hx8Tp06JDee+89LV++XBs2bHA6wpaWlqaYmBjVr19fb7/9tgoWLHjZbR46dEh16tSxrnUsWrSo5s2bpx49eiglJUXPP/+8JCklJUWffPKJHnvsMfXs2VMnT57Up59+qpiYGP3yyy+KjIy01tmjRw9NmjRJLVq00JNPPqm0tDQtW7ZMq1atcjo6//PPP2vWrFl65pln5Ovrq/fff1/t27fXvn37rM+fK3n99ddVoEABDRgwQMnJyXrzzTfVqVMnrV692uqzaNEitWjRQjVq1NCwYcNUoEAB64udZcuWqVatWmrXrp127typL7/8UqNHj1aRIkUkSUWLFlWDBg30zTffKCUlRX5+fjLGaPny5SpQoICWLVumBx54QNKF0FOgQAHVq1fPel7r1q2rM2fO6Nlnn1XhwoU1efJkPfDAA5oxY4b1fsk0cuRIubu7a8CAAUpNTc32iNnff/+tpk2b6tixY0pISLDeGzk1ffp0nTlzRr1791bhwoX1yy+/6IMPPtCff/6p6dOnS5IefvhhxcXFacqUKapevbrT8lOmTFGjRo1UokSJHD+3F3vkkUd011136bXXXpMxJle1A7iEAXBbmThxopGU7eTh4WH1e+6554yfn59JS0u77LqmT59uJJnFixdnmRcdHW2io6Otx4sXLzaSTNWqVc25c+es9scee8w4HA7TokULp+WjoqJM6dKlndrOnDmTZTsxMTGmXLlyTm1VqlRx2namkSNHGm9vb7Nz506n9kGDBhkXFxezb98+Y4wxc+bMMZLMm2++afVJS0szDRo0MJLMxIkTs6z7Ypn7On369Mv2kWTi4uKynXel5/VK+3c5H330kZFkNm/e7NQeFhZm7r//fuvx2bNnTXp6ulOfPXv2GA8PDzNixAintkufh0tf70yxsbFOr+OyZcuMJDNlyhSnfj/88INT++zZs40ks2bNmhzvZyZJZtiwYVnaS5cubWJjY63H1apVM61atbriuho3bmzCw8PN2bNnrbaMjAxTt25dc9ddd1ltzz//vJFkVq9ebbUdPnzY+Pv7G0lmz549V9xO5ri8eH/PnTtngoKCTNWqVc0///xjtc+dO9dIMkOHDrXaYmNjjSQzaNCgK24nU48ePUyxYsXM33//7dTesWNH4+/vb421tLQ0k5qa6tTn+PHjJjg42HTv3t1qW7RokZFknn322SzbysjIsP6WZNzd3c3u3buttl9//dVIMh988MEVa84cV5UrV3aq6b333nN6f2dkZJi77rrLxMTEOG37zJkzpmzZsqZp06ZW21tvvZXt67NmzRojyXz//ffGGGM2bdpkJJlHHnnE1K5d2+r3wAMPmOrVq1uPM98Hy5Yts9pOnjxpypYta8qUKWONr8x9KVeuXJbPtYvfCwcPHjRVqlQx5cqVM3v37r3i83Pxei/+7MjuczM+Pt44HA7zxx9/WG2PPfaYKV68uNNnwPr1653Gem6e22HDhhlJ5rHHHrtq3QByhlMZgdvU2LFjtWDBAqfp4tNxAgICdPr06SyndV2rLl26yM3NzXpcu3ZtGWOynKJWu3Zt7d+/X2lpaVbbxUcfMo/4RUdH6/fff1dycvJVtz19+nQ1aNBAhQoV0t9//21NTZo0UXp6upYuXSpJ+v777+Xq6qrevXtby7q4uKhv37553m87tWvXTq6urvrqq6+stt9++01bt27Vo48+arV5eHioQIELH/vp6ek6evSofHx8dPfdd2v9+vX5Usv06dPl7++vpk2bOr0GNWrUkI+Pj3WKXOaRoLlz5+r8+fP5su1LBQQEaMuWLdq1a1e2848dO6ZFixapQ4cOOnnypFXr0aNHFRMTo127dlmnd33//feqU6eO09GCokWLXvV0witZu3atDh8+rGeeeUaenp5We6tWrVSpUqUsp8dJcnrPXo4xRjNnzlSbNm1kjHF6HWJiYpScnGy93i4uLtZRnIyMDB07dkxpaWmqWbOm03ti5syZcjgcWY7KSspyx9AmTZo4HfWJiIiQn5+ffv/996vWLkndunVzOrLUoEEDSbKW37hxo3bt2qXHH39cR48etfbt9OnTaty4sZYuXXrVU0yrV68uHx8f6zNh2bJlKlmypLp06aL169frzJkzMsbo559/trYvXXgf1KpVy+mUPR8fHz311FPau3evtm7d6rSd2NjYyx5V/fPPPxUdHa3z589r6dKlKl26dI6en0tdvP7Tp0/r77//Vt26dWWMcTp1sUuXLjpw4IDTaapTpkyRl5eX2rdvLylvz+3TTz+dp7oBZMWpjMBtqlatWle8+cczzzyjr7/+Wi1atFCJEiXUrFkzdejQQc2bN7+m7ZYqVcrpsb+/vyQpNDQ0S3tGRoaSk5Ot05uWL1+uYcOGaeXKlTpz5oxT/+TkZGtdl7Nr1y5t2rRJRYsWzXZ+5g0Y/vjjDxUrVkw+Pj5O8+++++6r7F3+yq9b4BcpUkSNGzfW119/rZEjR0q6cBqjq6ur2rVrZ/XLvEZo3Lhx2rNnj9LT0615OTnFLCd27dql5ORkBQUFZTs/8zWIjo5W+/btNXz4cI0ePVqNGjXSgw8+qMcff1weHh75UsuIESPUtm1bVaxYUVWrVlXz5s3VuXNnRURESJJ2794tY4yGDBmiIUOGXLbeEiVK6I8//lDt2rWzzL+W98wff/xx2XVUqlRJP//8s1Obq6urSpYsedX1HjlyRCdOnNCECRM0YcKEbPtkvg6SNHnyZL3zzjvavn27U0i++K6uiYmJKl68uAIDA6+6/Us/AySpUKFCWa4xzOnyhQoVkiRr+cygfaVTj5OTk63lsuPi4qKoqCgtW7ZM0oVg1qBBA9WvX1/p6elatWqVgoODdezYMadgdrn3QeYp3n/88YeqVq1qtWd3Z9xMnTt3lqurq7Zt26aQkJDL9ruaffv2aejQofr222+zPMcXf6HVtGlTFStWTFOmTFHjxo2VkZGhL7/8Um3btpWvr6+kvD23V9pHALlDMAPuUEFBQdq4caPmz5+vefPmad68eZo4caK6dOmiyZMn53m9Li4uuWo3//+ahMTERDVu3FiVKlXSu+++q9DQULm7u+v777/X6NGjc3SThYyMDDVt2lQvvPBCtvMrVqyYw724dh4eHk4X318sM3RefJTkWnXs2FHdunXTxo0bFRkZqa+//lqNGze2rquRLlzkP2TIEHXv3l0jR45UYGCgChQooOeff/6qz6/D4cj2+pGLw5104TUICgrSlClTsl1PZmjO/B24VatW6bvvvtP8+fPVvXt3vfPOO1q1alWW0JwTl9bSsGFDJSYm6ptvvtGPP/6oTz75RKNHj9aHH36oJ5980trnAQMGKCYmJtt15ubnJa63i494Xknmfj3xxBOX/Qd2Zjj94osv1LVrVz344IMaOHCggoKC5OLiovj4eCUmJuapzquN9WtdPnP/3nrrLadr4C6Wk/dP/fr1NWrUKJ09e1bLli3Tv/71LwUEBKhq1apatmyZde3qxcEst650DWK7du30+eef67333lN8fHye1p+enm5dn/biiy+qUqVK8vb21l9//aWuXbs6jWsXFxc9/vjj+vjjjzVu3DgtX75cBw4ccLr7a16e25xcZwkgZwhmwB3M3d1dbdq0UZs2bZSRkaFnnnlGH330kYYMGaIKFSrc0B81/u6775Samqpvv/3W6Rvz7O4Od7m6ypcvr1OnTl31d6JKly6thQsX6tSpU07/yMjt3dCuto3LrS+zPa+nLmXnwQcfVK9evazTGXfu3KnBgwc79ZkxY4buu+8+ffrpp07tJ06ccApw2SlUqFC2p6JlHvXJVL58ef3000+qV69ejv7BVqdOHdWpU0ejRo3S1KlT1alTJ02bNk1PPvnkFWu59A6a586d08GDB7P0DQwMVLdu3dStWzedOnVKDRs21CuvvKInn3xS5cqVkyS5ubnl6D2T3SmR1/KeyXz9d+zYofvvvz/LevP6/ihatKh8fX2Vnp5+1f2aMWOGypUrp1mzZjmNq0tPWSxfvrzmz5+vY8eO5eio2fWUeZqkn5/fVffvSp9hDRo00Llz5/Tll1/qr7/+sgJYw4YNrWBWsWJFK6BJlx/X27dvt+bnVN++fVWhQgUNHTpU/v7+GjRoUI6XzbR582bt3LlTkydPVpcuXaz2y52i3qVLF73zzjv67rvvNG/ePBUtWtTpS4ncPLcA8h/XmAF3qKNHjzo9LlCggPUteuZt7DN/j+ZG3EY+81vyi79VT05O1sSJE7P09fb2zramDh06aOXKlZo/f36WeSdOnLCuZ2vZsqXS0tKcbsWfnp6uDz744Fp3w9KyZUutWrVK69aty1LHlClTFBkZeU2nL10qICBAMTEx+vrrrzVt2jS5u7vrwQcfdOrj4uKS5ajF9OnTs9yaPTvly5fX9u3bdeTIEavt119/zXIL7Q4dOig9Pd06pfJiaWlp1ut2/PjxLLVkfkN/6c8oZFdL5rVBmSZMmJDliNml73EfHx9VqFDBWn9QUJAaNWqkjz76KNtQd/G+Zr6ev/zyi9P8yx0ZzImaNWsqKChIH374odM+z5s3T9u2bVOrVq3ytF4XFxe1b99eM2fO1G+//ZZl/sX7ld24W716tVauXOm0TPv27WWMyfZnGXJ6JCy/1KhRQ+XLl9fbb7+tU6dOZZl/8f5d6TOsdu3acnNz0xtvvKHAwEBVqVJF0oXAtmrVKiUkJGQ5WtayZUv98ssvTs/P6dOnNWHCBJUpU0ZhYWG52pchQ4ZowIABGjx4cJafBsmJ7F4/Y4z1syeXioiIUEREhD755BPNnDlTHTt2lKvr/76jz81zCyD/ccQMuE3NmzfP+hb3YnXr1lW5cuX05JNP6tixY7r//vtVsmRJ/fHHH/rggw8UGRlpXS8RGRkpFxcXvfHGG0pOTpaHh4f1O2P5rVmzZtYRvF69eunUqVP6+OOPFRQUlOUfzTVq1ND48eP16quvqkKFCgoKCtL999+vgQMH6ttvv1Xr1q3VtWtX1ahRQ6dPn9bmzZs1Y8YM7d27V0WKFFGbNm1Ur149DRo0SHv37lVYWJhmzZqVoxuMXGzmzJnZPsexsbEaNGiQpk+froYNG6pXr16qVKmSDhw4oEmTJungwYPZBs5r9eijj+qJJ57QuHHjFBMTk+XHjFu3bq0RI0aoW7duqlu3rjZv3qwpU6ZYR46upHv37nr33XcVExOjHj166PDhw/rwww9VpUoVp99mio6OVq9evRQfH6+NGzeqWbNmcnNz065duzR9+nS99957evjhhzV58mSNGzdODz30kMqXL6+TJ0/q448/lp+fn1q2bHnFWp588kk9/fTTat++vZo2bapff/1V8+fPz3LULywsTI0aNVKNGjUUGBiotWvXasaMGerTp4/VZ+zYsapfv77Cw8PVs2dPlStXTocOHdLKlSv1559/Wr/x9sILL+g///mPmjdvrueee866XX7p0qW1adOmqz5/2ckMBd26dVN0dLQee+wx63b5ZcqUUb9+/fK0XunCLecXL16s2rVrq2fPngoLC9OxY8e0fv16/fTTTzp27JikC++JWbNm6aGHHlKrVq20Z88effjhhwoLC3P6h/l9992nzp076/3339euXbvUvHlzZWRkaNmyZbrvvvucntPrrUCBAvrkk0/UokULValSRd26dVOJEiX0119/afHixfLz89N3330n6cJnhXThJws6duwoNzc3tWnTRt7e3ipYsKBq1KihVatWWb9hJl04Ynb69GmdPn06SzAbNGiQvvzyS7Vo0ULPPvusAgMDNXnyZO3Zs0czZ87M0amml3rrrbeUnJysuLg4+fr65uqH5StVqqTy5ctrwIAB+uuvv+Tn56eZM2de8Xq+Ll26aMCAAZKUZVu5eW4BXAc3+jaQAK6vK90uXxfdFnnGjBmmWbNmJigoyLi7u5tSpUqZXr16mYMHDzqt7+OPPzblypUzLi4uTrdpvtzt8i+9hXx2twk35n+3Wj5y5IjV9u2335qIiAjj6elpypQpY9544w3z2WefZbnddVJSkmnVqpXx9fU1kpzqOHnypBk8eLCpUKGCcXd3N0WKFDF169Y1b7/9ttNt/I8ePWo6d+5s/Pz8jL+/v+ncubPZsGFDrm6Xf7kp81baf/75p3nyySdNiRIljKurqwkMDDStW7c2q1atuuL6c3u7/EwpKSnGy8vLSDJffPFFlvlnz541//d//2eKFStmvLy8TL169czKlSuzvJbZ3S7fGGO++OILU65cOePu7m4iIyPN/Pnzs9wuP9OECRNMjRo1jJeXl/H19TXh4eHmhRdeMAcOHDDGXLhN92OPPWZKlSplPDw8TFBQkGndurVZu3btVfczPT3dvPjii6ZIkSKmYMGCJiYmxuzevTvL7fJfffVVU6tWLRMQEGC8vLxMpUqVzKhRo5zeB8YYk5iYaLp06WJCQkKMm5ubKVGihGndurWZMWOGU79NmzaZ6Oho4+npaUqUKGFGjhxpPv300zzfLj/TV199ZapXr248PDxMYGCg6dSpk/nzzz+d+sTGxhpvb++rPjcXO3TokImLizOhoaHGzc3NhISEmMaNG5sJEyZYfTIyMsxrr71mSpcubTw8PEz16tXN3Llzs31d09LSzFtvvWUqVapk3N3dTdGiRU2LFi3MunXrrD66zM9EXPraZOdynyGXez9u2LDBtGvXzhQuXNh4eHiY0qVLmw4dOpiFCxc69Rs5cqQpUaKEKVCgQJbXauDAgUaSeeONN5yWqVChgpFkEhMTs9SZmJhoHn74YRMQEGA8PT1NrVq1zNy5c3O0L8Zk/15IT083jz32mHF1dTVz5sy56nN08e3yt27dapo0aWJ8fHxMkSJFTM+ePa2fKMjus+zgwYPGxcXFVKxY8bLbyclzm91nOIBr4zCGXwMEAAC4E/z9998qVqyYhg4detm7kQKwB9eYAQAA3CEmTZqk9PR0de7c2e5SAFyCa8wAAABuc4sWLdLWrVs1atQoPfjggypTpozdJQG4BKcyAgAA3OYaNWqkFStWqF69evriiy9UokQJu0sCcAmCGQAAAADYjGvMAAAAAMBmBDMAAAAAsBk3/5CUkZGhAwcOyNfX1/qBSQAAAAB3HmOMTp48qeLFi+fph+PzimAm6cCBAwoNDbW7DAAAAAA3if3796tkyZI3bHsEM0m+vr6SLjz5fn5+NlcDAAAAwC4pKSkKDQ21MsKNQjCTrNMX/fz8CGYAAAAAbvglTtz8AwAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZq52F4CsHMMddpdgKzPM2F0CAAAAcENxxAwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZjdNMHv99dflcDj0/PPPW21nz55VXFycChcuLB8fH7Vv316HDh1yWm7fvn1q1aqVChYsqKCgIA0cOFBpaWk3uHoAAAAAyLubIpitWbNGH330kSIiIpza+/Xrp++++07Tp09XQkKCDhw4oHbt2lnz09PT1apVK507d04rVqzQ5MmTNWnSJA0dOvRG7wIAAAAA5JntwezUqVPq1KmTPv74YxUqVMhqT05O1qeffqp3331X999/v2rUqKGJEydqxYoVWrVqlSTpxx9/1NatW/XFF18oMjJSLVq00MiRIzV27FidO3fOrl0CAAAAgFyxPZjFxcWpVatWatKkiVP7unXrdP78eaf2SpUqqVSpUlq5cqUkaeXKlQoPD1dwcLDVJyYmRikpKdqyZctlt5mamqqUlBSnCQAAAADs4mrnxqdNm6b169drzZo1WeYlJSXJ3d1dAQEBTu3BwcFKSkqy+lwcyjLnZ867nPj4eA0fPvwaqwcAAACA/GHbEbP9+/frueee05QpU+Tp6XlDtz148GAlJydb0/79+2/o9gEAAADgYrYFs3Xr1unw4cO655575OrqKldXVyUkJOj999+Xq6urgoODde7cOZ04ccJpuUOHDikkJESSFBISkuUujZmPM/tkx8PDQ35+fk4TAAAAANjFtmDWuHFjbd68WRs3brSmmjVrqlOnTtbfbm5uWrhwobXMjh07tG/fPkVFRUmSoqKitHnzZh0+fNjqs2DBAvn5+SksLOyG7xMAAAAA5IVt15j5+vqqatWqTm3e3t4qXLiw1d6jRw/1799fgYGB8vPzU9++fRUVFaU6depIkpo1a6awsDB17txZb775ppKSkvTyyy8rLi5OHh4eN3yfAAAAACAvbL35x9WMHj1aBQoUUPv27ZWamqqYmBiNGzfOmu/i4qK5c+eqd+/eioqKkre3t2JjYzVixAgbqwYAAACA3HEYY4zdRdgtJSVF/v7+Sk5OvimuN3MMd9hdgq3MsDv+LQkAAACb2JUNbP8dMwAAAAC40xHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACb2RrMxo8fr4iICPn5+cnPz09RUVGaN2+eNb9Ro0ZyOBxO09NPP+20jn379qlVq1YqWLCggoKCNHDgQKWlpd3oXQEAAACAPHO1c+MlS5bU66+/rrvuukvGGE2ePFlt27bVhg0bVKVKFUlSz549NWLECGuZggULWn+np6erVatWCgkJ0YoVK3Tw4EF16dJFbm5ueu211274/gAAAABAXtgazNq0aeP0eNSoURo/frxWrVplBbOCBQsqJCQk2+V//PFHbd26VT/99JOCg4MVGRmpkSNH6sUXX9Qrr7wid3f3674PAAAAAHCtbpprzNLT0zVt2jSdPn1aUVFRVvuUKVNUpEgRVa1aVYMHD9aZM2eseStXrlR4eLiCg4OttpiYGKWkpGjLli2X3VZqaqpSUlKcJgAAAACwi61HzCRp8+bNioqK0tmzZ+Xj46PZs2crLCxMkvT444+rdOnSKl68uDZt2qQXX3xRO3bs0KxZsyRJSUlJTqFMkvU4KSnpstuMj4/X8OHDr9MeAQAAAEDu2B7M7r77bm3cuFHJycmaMWOGYmNjlZCQoLCwMD311FNWv/DwcBUrVkyNGzdWYmKiypcvn+dtDh48WP3797cep6SkKDQ09Jr2AwAAAADyyvZTGd3d3VWhQgXVqFFD8fHxqlatmt57771s+9auXVuStHv3bklSSEiIDh065NQn8/HlrkuTJA8PD+tOkJkTAAAAANjF9mB2qYyMDKWmpmY7b+PGjZKkYsWKSZKioqK0efNmHT582OqzYMEC+fn5WadDAgAAAMDNztZTGQcPHqwWLVqoVKlSOnnypKZOnaolS5Zo/vz5SkxM1NSpU9WyZUsVLlxYmzZtUr9+/dSwYUNFRERIkpo1a6awsDB17txZb775ppKSkvTyyy8rLi5OHh4edu4aAAAAAOSYrcHs8OHD6tKliw4ePCh/f39FRERo/vz5atq0qfbv36+ffvpJY8aM0enTpxUaGqr27dvr5ZdftpZ3cXHR3Llz1bt3b0VFRcnb21uxsbFOv3sGAAAAADc7hzHG2F2E3VJSUuTv76/k5OSb4nozx3CH3SXYygy749+SAAAAsIld2eCmu8YMAAAAAO40BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbOZqdwEAcL05hjvsLsFWZpixuwTchhhXjCvkL8YUY4ojZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM1uD2fjx4xURESE/Pz/5+fkpKipK8+bNs+afPXtWcXFxKly4sHx8fNS+fXsdOnTIaR379u1Tq1atVLBgQQUFBWngwIFKS0u70bsCAAAAAHlmazArWbKkXn/9da1bt05r167V/fffr7Zt22rLli2SpH79+um7777T9OnTlZCQoAMHDqhdu3bW8unp6WrVqpXOnTunFStWaPLkyZo0aZKGDh1q1y4BAAAAQK45jDHG7iIuFhgYqLfeeksPP/ywihYtqqlTp+rhhx+WJG3fvl2VK1fWypUrVadOHc2bN0+tW7fWgQMHFBwcLEn68MMP9eKLL+rIkSNyd3fPdhupqalKTU21HqekpCg0NFTJycny8/O7/jt5FY7hDrtLsJUZdlO9JXEbYEwxppD/GFeMK+QvxtTNM6ZSUlLk7+9/w7PBTXONWXp6uqZNm6bTp08rKipK69at0/nz59WkSROrT6VKlVSqVCmtXLlSkrRy5UqFh4dboUySYmJilJKSYh11y058fLz8/f2tKTQ09PrtGAAAAABche3BbPPmzfLx8ZGHh4eefvppzZ49W2FhYUpKSpK7u7sCAgKc+gcHByspKUmSlJSU5BTKMudnzrucwYMHKzk52Zr279+fvzsFAAAAALngancBd999tzZu3Kjk5GTNmDFDsbGxSkhIuK7b9PDwkIeHx3XdBgAAAADklO3BzN3dXRUqVJAk1ahRQ2vWrNF7772nRx99VOfOndOJEyecjpodOnRIISEhkqSQkBD98ssvTuvLvGtjZh8AAAAAuNnZfirjpTIyMpSamqoaNWrIzc1NCxcutObt2LFD+/btU1RUlCQpKipKmzdv1uHDh60+CxYskJ+fn8LCwm547QAAAACQF7YeMRs8eLBatGihUqVK6eTJk5o6daqWLFmi+fPny9/fXz169FD//v0VGBgoPz8/9e3bV1FRUapTp44kqVmzZgoLC1Pnzp315ptvKikpSS+//LLi4uI4VREAAADALcPWYHb48GF16dJFBw8elL+/vyIiIjR//nw1bdpUkjR69GgVKFBA7du3V2pqqmJiYjRu3DhreRcXF82dO1e9e/dWVFSUvL29FRsbqxEjRti1SwAAAACQa7YGs08//fSK8z09PTV27FiNHTv2sn1Kly6t77//Pr9LAwAAAIAb5qa7xgwAAAAA7jQEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm7nmZaFdu3Zp8eLFOnz4sDIyMpzmDR06NF8KAwAAAIA7Ra6D2ccff6zevXurSJEiCgkJkcPhsOY5HA6CGQAAAADkUq6D2auvvqpRo0bpxRdfvB71AAAAAMAdJ9fXmB0/flyPPPLI9agFAAAAAO5IuQ5mjzzyiH788cfrUQsAAAAA3JFyfSpjhQoVNGTIEK1atUrh4eFyc3Nzmv/ss8/mW3EAAAAAcCfIdTCbMGGCfHx8lJCQoISEBKd5DoeDYAYAAAAAuZTrYLZnz57rUQcAAAAA3LGu6QemjTEyxuRXLQAAAABwR8pTMPv8888VHh4uLy8veXl5KSIiQv/5z3/yuzYAAAAAuCPk+lTGd999V0OGDFGfPn1Ur149SdLPP/+sp59+Wn///bf69euX70UCAAAAwO0s18Hsgw8+0Pjx49WlSxer7YEHHlCVKlX0yiuvEMwAAAAAIJdyfSrjwYMHVbdu3SztdevW1cGDB/OlKAAAAAC4k+Q6mFWoUEFff/11lvavvvpKd911V74UBQAAAAB3klyfyjh8+HA9+uijWrp0qXWN2fLly7Vw4cJsAxsAAAAA4MpyfcSsffv2Wr16tYoUKaI5c+Zozpw5KlKkiH755Rc99NBD16NGAAAAALit5fqImSTVqFFDX3zxRX7XAgAAAAB3pBwdMUtJSXH6+0pTbsTHx+vee++Vr6+vgoKC9OCDD2rHjh1OfRo1aiSHw+E0Pf3000599u3bp1atWqlgwYIKCgrSwIEDlZaWlqtaAAAAAMAuOTpiVqhQIR08eFBBQUEKCAiQw+HI0scYI4fDofT09BxvPCEhQXFxcbr33nuVlpaml156Sc2aNdPWrVvl7e1t9evZs6dGjBhhPS5YsKD1d3p6ulq1aqWQkBCtWLFCBw8eVJcuXeTm5qbXXnstx7UAAAAAgF1yFMwWLVqkwMBASdLixYvzbeM//PCD0+NJkyYpKChI69atU8OGDa32ggULKiQkJNt1/Pjjj9q6dat++uknBQcHKzIyUiNHjtSLL76oV155Re7u7lmWSU1NVWpqqvU4t0f6AAAAACA/5SiYRUdHW3+XLVtWoaGhWY6aGWO0f//+ayomOTlZkqwQmGnKlCn64osvFBISojZt2mjIkCHWUbOVK1cqPDxcwcHBVv+YmBj17t1bW7ZsUfXq1bNsJz4+XsOHD7+mWgEAAAAgv+T65h9ly5a1Tmu82LFjx1S2bNlcncp4sYyMDD3//POqV6+eqlatarU//vjjKl26tIoXL65NmzbpxRdf1I4dOzRr1ixJUlJSklMok2Q9TkpKynZbgwcPVv/+/a3HKSkpCg0NzVPdAAAAAHCtch3MMq8lu9SpU6fk6emZ50Li4uL022+/6eeff3Zqf+qpp6y/w8PDVaxYMTVu3FiJiYkqX758nrbl4eEhDw+PPNcKAAAAAPkpx8Es8wiTw+FwOpVQunADjtWrVysyMjJPRfTp00dz587V0qVLVbJkySv2rV27tiRp9+7dKl++vEJCQvTLL7849Tl06JAkXfa6NAAAAAC4meQ4mG3YsEHShSNmmzdvdrqphru7u6pVq6YBAwbkauPGGPXt21ezZ8/WkiVLVLZs2asus3HjRklSsWLFJElRUVEaNWqUDh8+bJ1euWDBAvn5+SksLCxX9QAAAACAHXIczDLvxtitWze999578vPzu+aNx8XFaerUqfrmm2/k6+trXRPm7+8vLy8vJSYmaurUqWrZsqUKFy6sTZs2qV+/fmrYsKEiIiIkSc2aNVNYWJg6d+6sN998U0lJSXr55ZcVFxfH6YoAAAAAbgm5vsZs4sSJ+bbx8ePHS7rwI9KXbqNr165yd3fXTz/9pDFjxuj06dMKDQ1V+/bt9fLLL1t9XVxcNHfuXPXu3VtRUVHy9vZWbGys0++eAQAAAMDNLNfBTJLWrl2rr7/+Wvv27dO5c+ec5mXeLTEnjDFXnB8aGqqEhISrrqd06dL6/vvvc7xdAAAAALiZFMjtAtOmTVPdunW1bds2zZ49W+fPn9eWLVu0aNEi+fv7X48aAQAAAOC2lutg9tprr2n06NH67rvv5O7urvfee0/bt29Xhw4dVKpUqetRIwAAAADc1nIdzBITE9WqVStJF+7GePr0aTkcDvXr108TJkzI9wIBAAAA4HaX62BWqFAhnTx5UpJUokQJ/fbbb5KkEydO6MyZM/lbHQAAAADcAXJ984+GDRtqwYIFCg8P1yOPPKLnnntOixYt0oIFC9S4cePrUSMAAAAA3NZyHcz+/e9/6+zZs5Kkf/3rX3Jzc9OKFSuy3MYeAAAAAJAzuQ5mgYGB1t8FChTQoEGD8rUgAAAAALjT5Poas/Xr12vz5s3W42+++UYPPvigXnrppSy/aQYAAAAAuLpcB7NevXpp586dkqTff/9djz76qAoWLKjp06frhRdeyPcCAQAAAOB2l+tgtnPnTkVGRkqSpk+frujoaE2dOlWTJk3SzJkz87s+AAAAALjt5TqYGWOUkZEhSfrpp5/UsmVLSVJoaKj+/vvv/K0OAAAAAO4AuQ5mNWvW1Kuvvqr//Oc/SkhIsH5ses+ePQoODs73AgEAAADgdpfrYDZmzBitX79effr00b/+9S9VqFBBkjRjxgzVrVs33wsEAAAAgNtdrm+XHxER4XRXxkxvvfWWXFxc8qUoAAAAALiT5DqYXY6np2d+rQoAAAAA7ig5CmaBgYHauXOnihQpokKFCsnhcFy277Fjx/KtOAAAAAC4E+QomI0ePVq+vr6SLlxjBgAAAADIPzkKZrGxsdn+DQAAAAC4djkKZikpKTleoZ+fX56LAQAAAIA7UY6CWUBAwBWvK5Mu/PC0w+FQenp6vhQGAAAAAHeKHAWzxYsXX+86AAAAAOCOlaNgFh0dfb3rAAAAAIA7Vo6C2aZNm1S1alUVKFBAmzZtumLfiIiIfCkMAAAAAO4UOQpmkZGRSkpKUlBQkCIjI+VwOGSMydKPa8wAAAAAIPdyFMz27NmjokWLWn8DAAAAAPJPjoJZ6dKlrb//+OMP1a1bV66uzoumpaVpxYoVTn0BAAAAAFdXILcL3HfffTp27FiW9uTkZN133335UhQAAAAA3ElyHcwyf6/sUkePHpW3t3e+FAUAAAAAd5IcncooSe3atZN04QYfXbt2lYeHhzUvPT1dmzZtUt26dfO/QgAAAAC4zeU4mPn7+0u6cMTM19dXXl5e1jx3d3fVqVNHPXv2zP8KAQAAAOA2l+NgNnHiRElSmTJlNGDAAE5bBAAAAIB8kuNglmnYsGHXow4AAAAAuGPl+OYfhQoVUmBgYJapbNmyiomJ0YIFC3K98fj4eN17773y9fVVUFCQHnzwQe3YscOpz9mzZxUXF6fChQvLx8dH7du316FDh5z67Nu3T61atVLBggUVFBSkgQMHKi0tLdf1AAAAAIAdcnzEbMyYMdm2nzhxQuvWrVPr1q01Y8YMtWnTJscbT0hIUFxcnO69916lpaXppZdeUrNmzbR161brVMl+/frpv//9r6ZPny5/f3/16dNH7dq10/LlyyVduPFIq1atFBISohUrVujgwYPq0qWL3Nzc9Nprr+W4FgAAAACwi8MYY/JjRe+++65mzJihFStW5HkdR44cUVBQkBISEtSwYUMlJyeraNGimjp1qh5++GFJ0vbt21W5cmWtXLlSderU0bx589S6dWsdOHBAwcHBkqQPP/xQL774oo4cOSJ3d/erbjclJUX+/v5KTk6Wn59fnuvPL47hWX+O4E5ihuXLWxKwMKYYU8h/jCvGFfIXY+rmGVN2ZYNc/47Z5bRu3Vrbt2+/pnUkJydLkgIDAyVJ69at0/nz59WkSROrT6VKlVSqVCmtXLlSkrRy5UqFh4dboUySYmJilJKSoi1btmS7ndTUVKWkpDhNAAAAAGCXfAtmqampOTo6dTkZGRl6/vnnVa9ePVWtWlWSlJSUJHd3dwUEBDj1DQ4OVlJSktXn4lCWOT9zXnbi4+Pl7+9vTaGhoXmuGwAAAACuVb4Fs08//VSRkZF5Xj4uLk6//fabpk2bll8lXdbgwYOVnJxsTfv377/u2wQAAACAy8nxzT/69++fbXtycrLWr1+vnTt3aunSpXkqok+fPpo7d66WLl2qkiVLWu0hISE6d+6cTpw44XTU7NChQwoJCbH6/PLLL07ry7xrY2afS3l4eMjDwyNPtQIAAABAfstxMNuwYUO27X5+fmratKlmzZqlsmXL5mrjxhj17dtXs2fP1pIlS7IsX6NGDbm5uWnhwoVq3769JGnHjh3at2+foqKiJElRUVEaNWqUDh8+rKCgIEnSggUL5Ofnp7CwsFzVAwAAAAB2yHEwW7x4cb5vPC4uTlOnTtU333wjX19f65owf39/eXl5yd/fXz169FD//v0VGBgoPz8/9e3bV1FRUapTp44kqVmzZgoLC1Pnzp315ptvKikpSS+//LLi4uI4KgYAAADglpDjYHY9jB8/XpLUqFEjp/aJEyeqa9eukqTRo0erQIECat++vVJTUxUTE6Nx48ZZfV1cXDR37lz17t1bUVFR8vb2VmxsrEaMGHGjdgMAAAAAromtwSwnP6Hm6empsWPHauzYsZftU7p0aX3//ff5WRoAAAAA3DD5dldGAAAAAEDeEMwAAAAAwGY5Dmbdu3fXyZMnr2ctAAAAAHBHynEwmzx5sv7555/rWQsAAAAA3JFyHMxycqMOAAAAAEDu5equjCdPnpSnp+cV+/j5+V1TQQAAAABwp8lVMKtYseJl5xlj5HA4lJ6efs1FAQAAAMCdJFfBbMaMGQoMDLxetQAAAADAHSlXwaxevXoKCgq6XrUAAAAAwB2J3zEDAAAAAJvlOJiVLl1aLi4u17MWAAAAALgj5fhUxj179lzPOgAAAADgjpXjYFaoUCE5HI4s7f7+/qpYsaIGDBigpk2b5mtxAAAAAHAnyHEwGz16dLbB7MSJE1q3bp1at26tGTNmqE2bNvlaIAAAAADc7nIczLp27XrF+ZGRkYqPjyeYAQAAAEAu5dtdGVu3bq3t27fn1+oAAAAA4I6Rb8EsNTVV7u7u+bU6AAAAALhj5Fsw+/TTTxUZGZlfqwMAAACAO0aOrzHr379/tu3Jyclav369du7cqaVLl+ZbYQAAAABwp8hxMNuwYUO27X5+fmratKlmzZqlsmXL5lthAAAAAHCnyHEwW7x48RXn//nnn3rqqac0YcKEay4KAAAAAO4k+XaN2dGjR/Xpp5/m1+oAAAAA4I6Rb8EMAAAAAJA3BDMAAAAAsBnBDAAAAABsluObf7Rr1+6K80+cOHGttQAAAADAHSnHwczf3/+q87t06XLNBQEAAADAnSbHwWzixInXsw4AAAAAuGNxjRkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANjM1mC2dOlStWnTRsWLF5fD4dCcOXOc5nft2lUOh8Npat68uVOfY8eOqVOnTvLz81NAQIB69OihU6dO3cC9AAAAAIBrY2swO336tKpVq6axY8detk/z5s118OBBa/ryyy+d5nfq1ElbtmzRggULNHfuXC1dulRPPfXU9S4dAAAAAPJNjn/H7Hpo0aKFWrRoccU+Hh4eCgkJyXbetm3b9MMPP2jNmjWqWbOmJOmDDz5Qy5Yt9fbbb6t48eL5XjMAAAAA5Leb/hqzJUuWKCgoSHfffbd69+6to0ePWvNWrlypgIAAK5RJUpMmTVSgQAGtXr36sutMTU1VSkqK0wQAAAAAdrmpg1nz5s31+eefa+HChXrjjTeUkJCgFi1aKD09XZKUlJSkoKAgp2VcXV0VGBiopKSky643Pj5e/v7+1hQaGnpd9wMAAAAArsTWUxmvpmPHjtbf4eHhioiIUPny5bVkyRI1btw4z+sdPHiw+vfvbz1OSUkhnAEAAACwzU19xOxS5cqVU5EiRbR7925JUkhIiA4fPuzUJy0tTceOHbvsdWnShevW/Pz8nCYAAAAAsMstFcz+/PNPHT16VMWKFZMkRUVF6cSJE1q3bp3VZ9GiRcrIyFDt2rXtKhMAAAAAcsXWUxlPnTplHf2SpD179mjjxo0KDAxUYGCghg8frvbt2yskJESJiYl64YUXVKFCBcXExEiSKleurObNm6tnz5768MMPdf78efXp00cdO3bkjowAAAAAbhm2HjFbu3atqlevrurVq0uS+vfvr+rVq2vo0KFycXHRpk2b9MADD6hixYrq0aOHatSooWXLlsnDw8Nax5QpU1SpUiU1btxYLVu2VP369TVhwgS7dgkAAAAAcs3WI2aNGjWSMeay8+fPn3/VdQQGBmrq1Kn5WRYAAAAA3FC31DVmAAAAAHA7IpgBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADazNZgtXbpUbdq0UfHixeVwODRnzhyn+cYYDR06VMWKFZOXl5eaNGmiXbt2OfU5duyYOnXqJD8/PwUEBKhHjx46derUDdwLAAAAALg2tgaz06dPq1q1aho7dmy289988029//77+vDDD7V69Wp5e3srJiZGZ8+etfp06tRJW7Zs0YIFCzR37lwtXbpUTz311I3aBQAAAAC4Zq52brxFixZq0aJFtvOMMRozZoxefvlltW3bVpL0+eefKzg4WHPmzFHHjh21bds2/fDDD1qzZo1q1qwpSfrggw/UsmVLvf322ypevPgN2xcAAAAAyKub9hqzPXv2KCkpSU2aNLHa/P39Vbt2ba1cuVKStHLlSgUEBFihTJKaNGmiAgUKaPXq1Zddd2pqqlJSUpwmAAAAALDLTRvMkpKSJEnBwcFO7cHBwda8pKQkBQUFOc13dXVVYGCg1Sc78fHx8vf3t6bQ0NB8rh4AAAAAcu6mDWbX0+DBg5WcnGxN+/fvt7skAAAAAHewmzaYhYSESJIOHTrk1H7o0CFrXkhIiA4fPuw0Py0tTceOHbP6ZMfDw0N+fn5OEwAAAADY5aYNZmXLllVISIgWLlxotaWkpGj16tWKioqSJEVFRenEiRNat26d1WfRokXKyMhQ7dq1b3jNAAAAAJAXtt6V8dSpU9q9e7f1eM+ePdq4caMCAwNVqlQpPf/883r11Vd11113qWzZshoyZIiKFy+uBx98UJJUuXJlNW/eXD179tSHH36o8+fPq0+fPurYsSN3ZAQAAABwy7A1mK1du1b33Xef9bh///6SpNjYWE2aNEkvvPCCTp8+raeeekonTpxQ/fr19cMPP8jT09NaZsqUKerTp48aN26sAgUKqH379nr//fdv+L4AAAAAQF7ZGswaNWokY8xl5zscDo0YMUIjRoy4bJ/AwEBNnTr1epQHAAAAADfETXuNGQAAAADcKQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNbupg9sorr8jhcDhNlSpVsuafPXtWcXFxKly4sHx8fNS+fXsdOnTIxooBAAAAIPdu6mAmSVWqVNHBgwet6eeff7bm9evXT999952mT5+uhIQEHThwQO3atbOxWgAAAADIPVe7C7gaV1dXhYSEZGlPTk7Wp59+qqlTp+r++++XJE2cOFGVK1fWqlWrVKdOnRtdKgAAAADkyU1/xGzXrl0qXry4ypUrp06dOmnfvn2SpHXr1un8+fNq0qSJ1bdSpUoqVaqUVq5cecV1pqamKiUlxWkCAAAAALvc1MGsdu3amjRpkn744QeNHz9ee/bsUYMGDXTy5EklJSXJ3d1dAQEBTssEBwcrKSnpiuuNj4+Xv7+/NYWGhl7HvQAAAACAK7upT2Vs0aKF9XdERIRq166t0qVL6+uvv5aXl1ee1zt48GD179/fepySkkI4AwAAAGCbm/qI2aUCAgJUsWJF7d69WyEhITp37pxOnDjh1OfQoUPZXpN2MQ8PD/n5+TlNAAAAAGCXWyqYnTp1SomJiSpWrJhq1KghNzc3LVy40Jq/Y8cO7du3T1FRUTZWCQAAAAC5c1OfyjhgwAC1adNGpUuX1oEDBzRs2DC5uLjosccek7+/v3r06KH+/fsrMDBQfn5+6tu3r6KiorgjIwAAAIBbyk0dzP7880899thjOnr0qIoWLar69etr1apVKlq0qCRp9OjRKlCggNq3b6/U1FTFxMRo3LhxNlcNAAAAALlzUwezadOmXXG+p6enxo4dq7Fjx96gigAAAAAg/91S15gBAAAAwO2IYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2Oy2CWZjx45VmTJl5Onpqdq1a+uXX36xuyQAAAAAyJHbIph99dVX6t+/v4YNG6b169erWrVqiomJ0eHDh+0uDQAAAACu6rYIZu+++6569uypbt26KSwsTB9++KEKFiyozz77zO7SAAAAAOCqXO0u4FqdO3dO69at0+DBg622AgUKqEmTJlq5cmW2y6Smpio1NdV6nJycLElKSUm5vsXm1Fm7C7DXTfM64PbBmLK7BNyOGFd2l4DbDWPK7hIsmbUYY27odm/5YPb3338rPT1dwcHBTu3BwcHavn17tsvEx8dr+PDhWdpDQ0OvS43IHf/X/e0uAbitMKaA/Me4AvLXzTimTp48KX//G1fXLR/M8mLw4MHq37+/9TgjI0PHjh1T4cKF5XA4bKzMfikpKQoNDdX+/fvl5+dndznALY8xBeQ/xhWQvxhTzowxOnnypIoXL35Dt3vLB7MiRYrIxcVFhw4dcmo/dOiQQkJCsl3Gw8NDHh4eTm0BAQHXq8Rbkp+fHwMTyEeMKSD/Ma6A/MWY+p8beaQs0y1/8w93d3fVqFFDCxcutNoyMjK0cOFCRUVF2VgZAAAAAOTMLX/ETJL69++v2NhY1axZU7Vq1dKYMWN0+vRpdevWze7SAAAAAOCqbotg9uijj+rIkSMaOnSokpKSFBkZqR9++CHLDUFwdR4eHho2bFiWUz0B5A1jCsh/jCsgfzGmbg4Oc6PvAwkAAAAAcHLLX2MGAAAAALc6ghkAAAAA2IxgBgAAAAA2I5jlUZkyZTRmzBi7y7jl7N27Vw6HQxs3brzu2+I1uvXwmuUN4wqXw+uVN4wpXAmvWd4wrnLA3MJiY2ONJNOrV68s85555hkjycTGxuZoXXv27DGSzIYNG3LU//Dhw+b06dM56tu6dWsTExOT7bylS5caSebXX3/N0bouZ/HixUaSOX78+DWt51JnzpwxhQoVMoULFzZnz57N1bKxsbGmbdu2Tm1paWnm4MGD5vz58/lW48SJE42/v3+W9ty8Rvnl3//+tyldurTx8PAwtWrVMqtXr76h288PjKv/YVz5Z2m/0eMqISHBtG7d2hQrVsxIMrNnz75h284vjKn/YUz5Z2m/0WPqtddeMzVr1jQ+Pj6maNGipm3btmb79u03bPv5hXH1P4wr/yztN3pcjRs3zoSHhxtfX1/j6+tr6tSpY77//vtcr+eWP2IWGhqqadOm6Z9//rHazp49q6lTp6pUqVL5vr1z585JkooWLaqCBQvmaJkePXpowYIF+vPPP7PMmzhxomrWrKmIiIh8rTOvjDFKS0uzHs+cOVNVqlRRpUqVNGfOnGtev4uLi0JCQuTqev1/qSE3r1F++Oqrr9S/f38NGzZM69evV7Vq1RQTE6PDhw/fsBryC+MqfzGu8u706dOqVq2axo4de8O2eT0wpvIXYyrvEhISFBcXp1WrVmnBggU6f/68mjVrptOnT9+wGvIL4yp/Ma7yrmTJknr99de1bt06rV27Vvfff7/atm2rLVu25G5F+RwYb6jMNF61alXzxRdfWO1TpkwxERERpm3btta3JfPmzTP16tUz/v7+JjAw0LRq1crs3r3bWkaS0xQdHe20jVdffdUUK1bMlClTxhhjTOnSpc3o0aONMRe+qXBzczNLly611vfGG2+YokWLmqSkJHP+/HkTHBxsRo4c6VT/yZMnjY+Pjxk/frwxxphly5aZ+vXrG09PT1OyZEnTt29fc+rUKav/2bNnzQsvvGBKlixp3N3dTfny5c0nn3xifdNz8ZS532fPnjV9+/Y1RYsWNR4eHqZevXrml19+sdaZ+S3L999/b+655x7j5uZmFi9ebM1v1KiR+fDDD8348eNN06ZNs7wGv/32m2nVqpXx9fU1Pj4+pn79+mb37t1m2LBhWWpavHix07dS6enppkSJEmbcuHFO61y/fr1xOBxm7969xhhj3nnnHVO1alVTsGBBU7JkSdO7d29z8uRJp/ovnoYNG5blNTLGmD/++MM88MADxtvb2/j6+ppHHnnEJCUlWfOHDRtmqlWrZj7//HNTunRp4+fnZx599FGTkpKSZb+zU6tWLRMXF2c9Tk9PN8WLFzfx8fE5Wv5mwbhiXN1M4+piuoWPmDGmGFM345gy5sKRBUkmISEhT8vbhXHFuLqZx5UxxhQqVMh88sknuVrmtghm7777rmncuLHV3rhxYzN69GinQTljxgwzc+ZMs2vXLrNhwwbTpk0bEx4ebtLT040xxvzyyy9Gkvnpp5/MwYMHzdGjR61t+Pj4mM6dO5vffvvN/Pbbb8aYrC/4wIEDTenSpc2JEyfM+vXrjbu7u/nmm2+c5pcvX95kZGRYbZ999pnx8vIyJ06cMLt37zbe3t5m9OjRZufOnWb58uWmevXqpmvXrlb/Dh06mNDQUDNr1iyTmJhofvrpJzNt2jSTlpZmZs6caSSZHTt2mIMHD5oTJ04YY4x59tlnTfHixc33339vtmzZYmJjY02hQoWs/ct8U0dERJgff/zR7N6925q3e/du4+HhYY4dO2aOHj1qPD09rYFijDF//vmnCQwMNO3atTNr1qwxO3bsMJ999pnZvn27OXnypOnQoYNp3ry5OXjwoDl48KBJTU3NcrrAgAEDTP369Z1e1//7v/9zahs9erRZtGiR2bNnj1m4cKG5++67Te/evY0xxqSmppoxY8YYPz8/azuZA/bi1yg9Pd1ERkaa+vXrm7Vr15pVq1aZGjVqWB++xlwYlD4+PqZdu3Zm8+bNZunSpSYkJMS89NJLl30PZkpNTTUuLi5Z/tHYpUsX88ADD1x1+ZsJ44pxdbOMq0vd6sGMMcWYutnGlDHG7Nq1y0gymzdvztPydmFcMa5u1nGVlpZmvvzyS+Pu7m62bNmSq2Vvi2B2+PBh4+HhYfbu3Wv27t1rPD09zZEjR5wG5aWOHDni9EF0ufOLY2NjTXBwsElNTXVqv3RQpqammsjISNOhQwcTFhZmevbs6dR/27Zt1jcGmRo0aGCeeOIJY4wxPXr0ME899ZTTMsuWLTMFChQw//zzj9mxY4eRZBYsWJDt/mR3fvGpU6eMm5ubmTJlitV27tw5U7x4cfPmm286LTdnzpws63zppZfMgw8+aD1u27at9U2EMcYMHjzYlC1b1pw7dy7bmrI7v/jS53nDhg3G4XCYP/74wxhjrG9QMr9Bys706dNN4cKFrceXO7/44tfoxx9/NC4uLmbfvn3W/C1bthhJ1rdHw4YNMwULFnT6dmTgwIGmdu3al60l019//WUkmRUrVji1Dxw40NSqVeuqy99MGFf/w7jyz9LvRo6rS93qwYwxxZi62cZUenq6adWqlalXr16ul7Ub4+p/GFf+WfrZMa42bdpkvL29jYuLi/H39zf//e9/c7xsplv+GjPpwnmkrVq10qRJkzRx4kS1atVKRYoUceqza9cuPfbYYypXrpz8/PxUpkwZSdK+ffuuuv7w8HC5u7tfsY+7u7umTJmimTNn6uzZsxo9erTT/EqVKqlu3br67LPPJEm7d+/WsmXL1KNHD0nSr7/+qkmTJsnHx8eaYmJilJGRoT179mjjxo1ycXFRdHR0Tp8WJSYm6vz586pXr57V5ubmplq1amnbtm1OfWvWrOn0OD09XZMnT9YTTzxhtT3xxBOaNGmSMjIyJEkbN25UgwYN5ObmluOaLhUZGanKlStr6tSpki6c+3748GE98sgjVp+ffvpJjRs3VokSJeTr66vOnTvr6NGjOnPmTI63s23bNoWGhio0NNRqCwsLU0BAgNNzUaZMGfn6+lqPixUrdkteI5YfGFfZY1z9D+MqdxhT2WNM/c+NHlNxcXH67bffNG3atFwve7NgXGWPcfU/N2pc3X333dq4caNWr16t3r17KzY2Vlu3bs3x8tJtdLv87t27a9KkSZo8ebK6d++eZX6bNm107Ngxffzxx1q9erVWr14t6X8Xcl6Jt7d3jmpYsWKFJOnYsWM6duxYlvk9evTQzJkzdfLkSU2cOFHly5e3BtmpU6fUq1cvbdy40Zp+/fVX7dq1S+XLl5eXl1eOasirS/dx/vz5+uuvv/Too4/K1dVVrq6u6tixo/744w8tXLhQkvKtpk6dOlmDcurUqWrevLkKFy4s6cKtVVu3bq2IiAjNnDlT69ats24CkJPXLrcu/YBxOBzWh9CVFClSRC4uLjp06JBT+6FDhxQSEpKvNd5IjKtrw7i6IK/j6nbEmLo2jKkL8mNM9enTR3PnztXixYtVsmTJ/CzvhmNcXRvG1QXXOq7c3d1VoUIF1ahRQ/Hx8apWrZree++9XNVw2wSz5s2b69y5czp//rxiYmKc5h09elQ7duzQyy+/rMaNG6ty5co6fvy4U5/Mb0PS09PztP3ExET169dPH3/8sWrXrq3Y2NgsL2aHDh1UoEABTZ06VZ9//rm6d+8uh8MhSbrnnnu0detWVahQIcvk7u6u8PBwZWRkKCEhIdvtZ1d/+fLl5e7uruXLl1tt58+f15o1axQWFnbF/fn000/VsWNHpw+JjRs3qmPHjvr0008lSREREVq2bJnOnz9/2Zpy8nw+/vjj+u2337Ru3TrNmDFDnTp1suatW7dOGRkZeuedd1SnTh1VrFhRBw4cyPV2KleurP3792v//v1W29atW3XixImrPhc54e7urho1algfWJKUkZGhhQsXKioq6prXbxfGFePqSq73uLodMaYYU1dyI8aUMUZ9+vTR7NmztWjRIpUtWzZf1msnxhXj6krs+n9VRkaGUlNTc7dQrk9+vIlcev5qcnKySU5Oth5nnl+cnp5uChcubJ544gmza9cus3DhQnPvvfc6Xa9w/vx54+XlZV599VWTlJRkXTiZ3Tmyxjifu5qWlmbq1Klj2rdvb4wx5sCBA6Zw4cLWObwX69GjhylUqJBxcXExf/31l9X+66+/Gi8vLxMXF2c2bNhgdu7caebMmeN0l7+uXbua0NBQM3v2bPP777+bxYsXm6+++soYc+EiTIfDYSZNmmQOHz5sXfz43HPPmeLFi5t58+Y5Xfh57NgxY0z25yUfPnzYuLm5mXnz5mWp//vvvzceHh7m6NGj5u+//zaFCxe2LvzcuXOn+fzzz63fQxk1apQpVaqU2b59uzly5Ig5d+7cZc/jrlevnqlWrZrx9fU1Z86csdo3btxoJJkxY8aYxMRE8/nnn5sSJUo41bx8+XLrot0jR45Yv1tx8WuUkZFhIiMjTYMGDcy6devM6tWrs73ws1q1ak51jR492pQuXTrL85CdadOmGQ8PDzNp0iSzdetW89RTT5mAgACnu/7cChhXjCtjbp5xdfLkSbNhwwazYcMGI8m8++67ZsOGDdY1CbcCxhRjypibZ0z17t3b+Pv7myVLllg3TDh48KDT/twKGFeMK2NunnE1aNAgk5CQYPbs2WM2bdpkBg0aZBwOh/nxxx9ztHym2yqYXeriCz8XLFhgKleubDw8PExERIRZsmRJlgvJP/74YxMaGmoKFCiQ5Vapl7r4BR8+fLgpVqyY+fvvv635M2fONO7u7mbjxo1Oy61YscJIMi1btsyyzl9++cU0bdrU+Pj4GG9vbxMREWFGjRplzf/nn39Mv379TLFixYy7u7upUKGC+eyzz6z5I0aMMCEhIcbhcFj7/c8//5i+ffuaIkWKXPFWqRcPyrffftsEBARke0FnamqqCQgIMO+9954x5sKHSbNmzUzBggWNr6+vadCggUlMTDTGXBjcmfujbG6VerFx48YZSaZLly5Ztvnuu++aYsWKGS8vLxMTE2M+//zzLDU//fTTpnDhwvlyq9SL5WZQGmPMBx98YEqVKmXc3d1NrVq1zKpVq3K87M2CccW4ynQzjKvsbocs5fyHY28GjCnGVKabYUxlN54kmYkTJ+Zo+ZsF44pxlelmGFfdu3c3pUuXNu7u7qZo0aKmcePGuQ5lxhjjMMaY3B1jAwAAAADkp9vmGjMAAAAAuFURzIAc2Ldvn9NtbC+dcnLLXQDOGFdA/mJMAfnvRo4rTmUEciAtLU179+697PwyZcrI1dX1xhUE3AYYV0D+YkwB+e9GjiuCGQAAAADYjFMZAQAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAd7RGjRrp+eeft7sMAMAdjmAGAMiTrl27yuFw6PXXX3dqnzNnjhwOR67WVaZMGY0ZMyYfq7t+9u7dK4fDoY0bN9pdCgDgNkIwAwDkmaenp9544w0dP37c7lJy7dy5c3aXkK/Onz9vdwkAgGtAMAMA5FmTJk0UEhKi+Pj4K/b7+eef1aBBA3l5eSk0NFTPPvusTp8+LenCqYR//PGH+vXrJ4fDIYfDIWOMihYtqhkzZljriIyMVLFixZzW6eHhoTNnzkiS9u3bp7Zt28rHx0d+fn7q0KGDDh06ZPV/5ZVXFBkZqU8++URly5aVp6dntrX+97//lb+/v6ZMmZKn5yQxMVFt27ZVcHCwfHx8dO+99+qnn36y5o8YMUJVq1bNslxkZKSGDBliPf7kk09UuXJleXp6qlKlSho3bpw1L/Oo3VdffaXo6Gh5enpqypQp+uOPP9SmTRsVKlRI3t7eqlKlir7//vs87QcA4MYimAEA8szFxUWvvfaaPvjgA/3555/Z9klMTFTz5s3Vvn17bdq0SV999ZV+/vln9enTR5I0a9YslSxZUiNGjNDBgwd18OBBORwONWzYUEuWLJEkHT9+XNu2bdM///yj7du3S5ISEhJ07733qmDBgsrIyFDbtm117NgxJSQkaMGCBfr999/16KOPOtWye/duzZw5U7Nmzcr2VMSpU6fqscce05QpU9SpU6c8PSenTp1Sy5YttXDhQm3YsEHNmzdXmzZttG/fPklS9+7dtW3bNq1Zs8ZaZsOGDdq0aZO6desmSZoyZYqGDh2qUaNGadu2bXrttdc0ZMgQTZ482WlbgwYN0nPPPadt27YpJiZGcXFxSk1N1dKlS7V582a98cYb8vHxydN+AABuLFe7CwAA3NoeeughRUZGatiwYfr000+zzI+Pj1enTp2sG2zcddddev/99xUdHa3x48crMDBQLi4u8vX1VUhIiLVco0aN9NFHH0mSli5dqurVqyskJERLlixRpUqVtGTJEkVHR0uSFi5cqM2bN2vPnj0KDQ2VJH3++eeqUqWK1qxZo3vvvVfShdMXP//8cxUtWjRLnWPHjtW//vUvfffdd9Z686JatWqqVq2a9XjkyJGaPXu2vv32W/Xp00clS5ZUTEyMJk6caNU1ceJERUdHq1y5cpKkYcOG6Z133lG7du0kSWXLltXWrVv10UcfKTY21lr3888/b/WRLhw1bN++vcLDwyXJWh8A4ObHETMAwDV74403NHnyZG3bti3LvF9//VWTJk2Sj4+PNcXExCgjI0N79uy57Dqjo6O1detWHTlyRAkJCWrUqJEaNWqkJUuW6Pz581qxYoUaNWokSdq2bZtCQ0OtUCZJYWFhCggIcKqpdOnS2YayGTNmqF+/flqwYME1hTLpwhGzAQMGqHLlygoICJCPj4+2bdtmHTGTpJ49e+rLL7/U2bNnde7cOU2dOlXdu3eXJJ0+fVqJiYnq0aOH03P26quvKjEx0WlbNWvWdHr87LPP6tVXX1W9evU0bNgwbdq06Zr2BQBw4xDMAADXrGHDhoqJidHgwYOzzDt16pR69eqljRs3WtOvv/6qXbt2qXz58pddZ3h4uAIDA5WQkOAUzBISErRmzRqdP39edevWzVWd3t7e2bZXr15dRYsW1WeffSZjTK7WeakBAwZo9uzZeu2117Rs2TJt3LhR4eHhTjcbadOmjTw8PDR79mx99913On/+vB5++GFJF54vSfr444+dnrPffvtNq1atuuL+PPnkk/r999/VuXNnbd68WTVr1tQHH3xwTfsDALgxOJURAJAvXn/9dUVGRuruu+92ar/nnnu0detWVahQ4bLLuru7Kz093anN4XCoQYMG+uabb7RlyxbVr19fBQsWVGpqqj766CPVrFnTCiaVK1fW/v37tX//fuuo2datW3XixAmFhYVdtfby5cvrnXfeUaNGjeTi4qJ///vfud19y/Lly9W1a1c99NBDki4Erb179zr1cXV1VWxsrCZOnCh3d3d17NhRXl5ekqTg4GAVL15cv//+e56ucwsNDdXTTz+tp59+WoMHD9bHH3+svn375nl/AAA3BsEMAJAvwsPD1alTJ73//vtO7S+++KLq1KmjPn366Mknn5S3t7e2bt2qBQsWWAGoTJkyWrp0qTp27CgPDw8VKVJE0oXrzP7v//5PNWvWtG5i0bBhQ02ZMkUDBw60ttGkSRNr+2PGjFFaWpqeeeYZRUdHZznd73IqVqyoxYsXq1GjRnJ1db3q76rt2LEjS1uVKlV01113adasWWrTpo0cDoeGDBmijIyMLH2ffPJJVa5cWdKFMHex4cOH69lnn5W/v7+aN2+u1NRUrV27VsePH1f//v0vW9Pzzz+vFi1aqGLFijp+/LgWL15sbQMAcHPjVEYAQL4ZMWJElhASERGhhIQE7dy5Uw0aNFD16tU1dOhQFS9e3Gm5vXv3qnz58k7XgEVHRys9Pd26lky6ENYubXM4HPrmm29UqFAhNWzYUE2aNFG5cuX01Vdf5ar+u+++W4sWLdKXX36p//u//7ti344dO6p69epO06FDh/Tuu++qUKFCqlu3rtq0aaOYmBjdc889WZa/6667VLduXVWqVEm1a9d2mvfkk0/qk08+0cSJExUeHq7o6GhNmjRJZcuWvWJN6enpiouLU+XKldW8eXNVrFjR6Tb7AICbl8Nc68n0AAAg14wxuuuuu/TMM89c8SgYAODOwKmMAADcYEeOHNG0adOUlJRk/XYZAODORjADAOAGCwoKUpEiRTRhwgQVKlTI7nIAADcBghkAADcYVxEAAC7FzT8AAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJv9P5KKOBMzm6wXAAAAAElFTkSuQmCC", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "## Modify Parameters\n", - "\n", - "We now modify the parallelization attributes of the first network layer to reduce its overall latency.\n", - "We now individually extract the `MatrixVectorActivation` blocks from the onnx file and set the config values manually (although this can be done automatically by Vivado tools also as mentioned in the introduction).\n", + "# Extracting LUTs from res_dict\n", + "LUTs_updated = [res_dict_updated[key][\"LUT\"] for key in res_dict_updated.keys()] \n", "\n", - "In the first step, we set the `PE` & `SIMD` values for all the layers to be '1' to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilization\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(res_dict_updated.keys(), LUTs_updated, color ='green', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"LUT Utilisation\")\n", + "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "From these numbers, we see that the first layer has been removed as the bottleneck and that the entire network can now perform one inference in ~4096 clock cycles (when the pipeline is full) as compared to the earlier configuration where it took ~38400 execution cycles.\n", "\n", - "We utilize from (`getCustomOp()`) as the helper function to set different properties of the node. The (`set_nodeattr()`) function within this function call helps us set these values." + "This decrease in execution latency of the network though comes at a cost of a 45% increase in LUT resource utilization for layer 1 of the network." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Important Note : StreamingDataWidthConverters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next to resources and performance, folding factors (or parallelization parameters) are influencing also other properties of the generated design. Since we are able to generate results in parallel, the data that gets feed into the layer needs to be packed in a specific format to provide the correct data at the correct time for the internal parallelism. Also, the data that comes out of a layer will be in a specific format depending on the internal parallelism." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To analyze the influence of the folding factors on the data streams between layers, we first will import the original model (with `PE=SIMD=1`) and then we will import the updated model, so that we can compare the two of them." ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ - "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\") \n", - "# (PE, SIMD, in_fifo_depth, out_fifo_depth, ramstyle) for each layer\n", - "config = [\n", - " (2, 5, [16], [64], \"block\"),\n", - " (1, 1, [64], [64], \"auto\"),#8,8\n", - " (1, 1, [64], [64], \"auto\"),#8,8\n", - " (1, 1, [64], [1], \"distributed\"),\n", - "]\n", - "for fcl, (pe, simd, ififo, ofifo, ramstyle) in zip(fc_layers, config):\n", - " fcl_inst = getCustomOp(fcl)\n", - " fcl_inst.set_nodeattr(\"PE\", pe)\n", - " fcl_inst.set_nodeattr(\"SIMD\", simd)\n", - " fcl_inst.set_nodeattr(\"inFIFODepths\", ififo)\n", - " fcl_inst.set_nodeattr(\"outFIFODepths\", ofifo)\n", - " fcl_inst.set_nodeattr(\"ram_style\", ramstyle)\n", - " num_inp_vec = fcl_inst.get_nodeattr(\"numInputVectors\")" + "model_orig = ModelWrapper(\"cybsec_PE_SIMD.onnx\")\n", + "model_updated = ModelWrapper(\"cybsec_PE_SIMD_modified.onnx\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We again save the model and view it. On expanding the first `MatrixVectorActivation` we can view the updated `PE` & `SIMD` parameters for that layer." + "In the next step we extract the information from all layers. For MVAUs the input shape is (1, MW/SIMD, SIMD) and the output shape is (1, MH/PE, PE)." ] }, { "cell_type": "code", - "execution_count": 16, - "metadata": { - "scrolled": true - }, + "execution_count": 25, + "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Stopping http://0.0.0.0:5901\n", - "Serving './cybsec_PE_SIMD_modified.onnx' at http://0.0.0.0:5901\n" + "In the original model (pe=simd=1): \n", + "Layer: MatrixVectorActivation_0\n", + "Input shape: (1, 600, 1)\n", + "Output shape: (1, 64, 1)\n", + "Layer: MatrixVectorActivation_1\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 64, 1)\n", + "Layer: MatrixVectorActivation_2\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 64, 1)\n", + "Layer: MatrixVectorActivation_3\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 1, 1)\n" ] - }, + } + ], + "source": [ + "# Original model\n", + "list_of_mvaus = model_orig.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "print(\"In the original model (pe=simd=1): \")\n", + "for mvau in list_of_mvaus:\n", + " mvau_inst = getCustomOp(mvau)\n", + " print(\"Layer: \" + mvau.name)\n", + " print(\"Input shape: \" + str(mvau_inst.get_folded_input_shape()))\n", + " print(\"Output shape: \" + str(mvau_inst.get_folded_output_shape()))" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "In the original model (pe=simd=1): \n", + "Layer: MatrixVectorActivation_0\n", + "Input shape: (1, 120, 5)\n", + "Output shape: (1, 32, 2)\n", + "Layer: MatrixVectorActivation_1\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 64, 1)\n", + "Layer: MatrixVectorActivation_2\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 64, 1)\n", + "Layer: MatrixVectorActivation_3\n", + "Input shape: (1, 64, 1)\n", + "Output shape: (1, 1, 1)\n" + ] } ], "source": [ - "model.save(\"./cybsec_PE_SIMD_modified.onnx\")\n", - "showInNetron(\"./cybsec_PE_SIMD_modified.onnx\",localhost_url='xirxlabs53')" + "# Updated model\n", + "list_of_mvaus = model_updated.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "print(\"In the original model (pe=simd=1): \")\n", + "for mvau in list_of_mvaus:\n", + " mvau_inst = getCustomOp(mvau)\n", + " print(\"Layer: \" + mvau.name)\n", + " print(\"Input shape: \" + str(mvau_inst.get_folded_input_shape()))\n", + " print(\"Output shape: \" + str(mvau_inst.get_folded_output_shape()))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "From the above total folding formula, we have reduced the total folding of our layer from `600 x 64` to `120 x 32`. Hence, resulting in an estimated `10x` decrease in the execution latency of our layer. \n", - "This can be observed in the new estimated clock cycles." + "We can see that the input and output shape for MatrixVectorActivation_0 has changed after we have changed the folding factors. These changes have direct influence on the in/out stream width. We can have a closer look at the formula to calculate the stream width of an MVAU." ] }, { "cell_type": "code", - "execution_count": 17, - "metadata": { - "scrolled": true - }, - "outputs": [], + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " def get_instream_width(self, ind=0):\n", + " i_bits = self.get_input_datatype().bitwidth()\n", + " in_width = i_bits * self.get_nodeattr(\"SIMD\")\n", + " return in_width\n", + "\n" + ] + } + ], "source": [ - "cycles_dict_updated = []\n", - "cycles_dict_updated = exp_cycles_per_layer(model)" + "showSrc(mvau_inst.get_instream_width)" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 31, "metadata": {}, "outputs": [ { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA28AAAHWCAYAAADglbFoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABknklEQVR4nO3de3zO9f/H8edlszls1+a4mS2nFcYQwnIsMoz4pvqSmFBh9EVJvt9y6qD0LVLR6Zvp+yPH6CA0pyGrhDkTIsJG2OY4s71/f7jt83XZsM3m2sXjfrtdt7ren/fn83l9Ptf1nj33OdmMMUYAAAAAgEKtiLMLAAAAAADcGOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDUCh16pVK7Vq1crZZeSrAwcOyGazKTo6ulAtKzvR0dGy2Wz69ddfC2T5+amg98W17NmzR23btpWPj49sNpsWLlx4S9d/K7Rq1Uq1a9d2dhmFWub379///nee5rfZbBozZkz+FgXgtkJ4A5Bnmb/UX+v1008/5XhZO3bs0JgxY3TgwIGCKzgPpkyZcsuDAFxPZGSktm7dqtdff13//e9/1bBhQ2eXdNs7cuSIxowZo/j4eGeXAgC3jLuzCwDg+saNG6cqVapkaQ8ODs7xMnbs2KGxY8eqVatWqly5ssO0H3744WZLzLMpU6aobNmy6t27t9NqQOF2/vx5xcXF6V//+pcGDRrk7HLuGEeOHNHYsWNVuXJl1atXz9nlAMAtQXgDcNPat29foEcaPDw8CmzZwM06fvy4JMnX1zfflnn27FmVLFky35aHgpWRkaGLFy86u4wCdeHCBXl4eKhIEU7aApyJEQjglpg1a5YaNGggb29v2e12hYaG6r333pN0+fTLxx57TJL0wAMPWKddrlq1SlLWa95WrVolm82mOXPmaOzYsapYsaK8vb316KOPKjk5WampqRoyZIjKly8vLy8vPfXUU0pNTXWoZ9q0aXrwwQdVvnx5eXp6KiQkRFOnTnXoU7lyZW3fvl2xsbFWTVfWkZSUpCFDhigoKEienp4KDg7WW2+9pYyMDIflJCUlqXfv3vLx8ZGvr68iIyOVlJSU432XlJSkoUOHqnLlyvL09FRgYKB69eqlv/7667rzrVixQs2bN1fJkiXl6+urzp07a+fOnVn6HT58WH379lVAQIA8PT1VpUoVDRgw4Lq/jJ46dUqNGjVSYGCgdu/enef6z5w5o5IlS+of//hHlvn+/PNPubm5afz48Te9L3bt2qVHH31UpUuXVrFixdSwYUN98803Dn3S0tI0duxY3X333SpWrJjKlCmjZs2aKSYm5prLHTNmjCpVqiRJGj58uGw2m8OR402bNql9+/ay2+3y8vJS69ats5xOnHn6cWxsrAYOHKjy5csrMDDwutuTmpqq0aNHKzg4WJ6engoKCtKLL76Yp+95psWLF6tly5bWGL3vvvs0c+bMLP127NihBx54QCVKlFDFihU1YcKE69aayWazadCgQVq4cKFq164tT09P1apVS0uWLMnS9/Dhw+rTp4/8/Pysfp9//rk1fdWqVbrvvvskSU899ZQ1PqOjozV58mS5ubk5jLF33nlHNptNw4YNs9rS09Pl7e2tESNGWG1nz57V888/b43p6tWr69///reMMdluy4wZM1SrVi15enpmux2SZIzRM888Iw8PD3311Vc52leZ/vjjDw0cOFDVq1dX8eLFVaZMGT322GMOp5f//vvvstlsmjhxYpb5161bJ5vNpi+//NJqu9G+lf73M3bWrFl6+eWXVbFiRZUoUUIpKSm5qh9A/uPIG4CblpycnOWXZ5vNpjJlykiSYmJi1L17d7Vu3VpvvfWWJGnnzp368ccf9Y9//EMtWrTQc889p8mTJ+uf//ynatasKUnWf69l/PjxKl68uF566SXt3btX77//vooWLaoiRYro1KlTGjNmjH766SdFR0erSpUqGjVqlDXv1KlTVatWLT388MNyd3fXt99+q4EDByojI0NRUVGSpEmTJmnw4MHy8vLSv/71L0mSn5+fJOncuXNq2bKlDh8+rGeffVZ33XWX1q1bp5EjR+ro0aOaNGmSpMu/uHXu3Flr165V//79VbNmTS1YsECRkZE52rdnzpxR8+bNtXPnTvXp00f169fXX3/9pW+++UZ//vmnypYtm+18y5YtU/v27VW1alWNGTNG58+f1/vvv6+mTZtq48aNVsA4cuSIGjVqpKSkJD3zzDOqUaOGDh8+rHnz5uncuXPZHvX866+/9NBDD+nkyZOKjY1VtWrV8lx/vXr19Le//U2zZ8/Wu+++Kzc3N2veL7/8UsYY9ejR46b2xfbt29W0aVNVrFhRL730kkqWLKk5c+aoS5cumj9/vv72t79JuhzExo8fr379+qlRo0ZKSUnRr7/+qo0bN+qhhx7KdtmPPPKIfH19NXToUHXv3l0dOnSQl5eXtd7mzZvLbrfrxRdfVNGiRfXxxx+rVatWio2NVePGjR2WNXDgQJUrV06jRo3S2bNnr7lPMzIy9PDDD2vt2rV65plnVLNmTW3dulUTJ07Ub7/95nCzlJx8z6XLAbJPnz6qVauWRo4cKV9fX23atElLlizRE088YfU7deqU2rVrp0ceeUSPP/645s2bpxEjRig0NFTt27e/Zs2Z1q5dq6+++koDBw6Ut7e3Jk+erK5du+rgwYPWz4vExEQ1adLECkjlypXT4sWL1bdvX6WkpGjIkCGqWbOmxo0bp1GjRumZZ55R8+bNJUn333+/kpOTlZGRobVr16pjx46SpDVr1qhIkSJas2aNVcumTZt05swZtWjRQtLlsfrwww9r5cqV6tu3r+rVq6elS5dq+PDhOnz4cJZwtGLFCs2ZM0eDBg1S2bJls5zuLV0OiH369NHs2bO1YMECRURE3HAfXWn9+vVat26dunXrpsDAQB04cEBTp05Vq1attGPHDpUoUUJVq1ZV06ZNNWPGDA0dOtRh/hkzZsjb21udO3fO8b690quvvioPDw+98MILSk1N5SwIoDAwAJBH06ZNM5KyfXl6elr9/vGPfxi73W4uXbp0zWXNnTvXSDIrV67MMq1ly5amZcuW1vuVK1caSaZ27drm4sWLVnv37t2NzWYz7du3d5g/LCzMVKpUyaHt3LlzWdYTHh5uqlat6tBWq1Yth3VnevXVV03JkiXNb7/95tD+0ksvGTc3N3Pw4EFjjDELFy40ksyECROsPpcuXTLNmzc3ksy0adOyLPtKo0aNMpLMV199lWVaRkaGMcaY/fv3Z1lWvXr1TPny5c2JEyests2bN5siRYqYXr16WW29evUyRYoUMevXr7/m8jM/5/Xr15ujR4+aWrVqmapVq5oDBw5ct/ac1r906VIjySxevNhhep06dRz2fV73RevWrU1oaKi5cOGCQ//777/f3H333VZb3bp1TURExA236WqZ63z77bcd2rt06WI8PDzMvn37rLYjR44Yb29v06JFC6stc/82a9bsumMk03//+19TpEgRs2bNGof2jz76yEgyP/74o9WWk+95UlKS8fb2No0bNzbnz5936Ju5X425PA4lmS+++MJqS01NNf7+/qZr1643rFuS8fDwMHv37rXaNm/ebCSZ999/32rr27evqVChgvnrr78c5u/WrZvx8fGxtmn9+vXZjqH09HRjt9vNiy++aG1DmTJlzGOPPWbc3NzM6dOnjTHGvPvuu6ZIkSLm1KlTxpj/jdXXXnvNYXmPPvqosdlsDnVLMkWKFDHbt2936HvldyEtLc38/e9/N8WLFzdLly694f7JXO7o0aOt99l9fnFxcVk+h48//thIMjt37rTaLl68aMqWLWsiIyOttpzu28yfsVWrVs22BgDOw2mTAG7ahx9+qJiYGIfX4sWLrem+vr46e/bsdU8/y4tevXqpaNGi1vvGjRvLGKM+ffo49GvcuLEOHTqkS5cuWW3Fixe3/j/zyGHLli31+++/Kzk5+Ybrnjt3rpo3b65SpUrpr7/+sl5t2rRRenq6Vq9eLUn6/vvv5e7urgEDBljzurm5afDgwTnaxvnz56tu3brW0aEr2Wy2bOc5evSo4uPj1bt3b5UuXdpqr1Onjh566CF9//33ki4fwVm4cKE6deqU7TWLVy//zz//VMuWLZWWlqbVq1dbpwvebP1t2rRRQECAZsyYYU3btm2btmzZoieffDJXy7rayZMntWLFCj3++OM6ffq09TmdOHFC4eHh2rNnjw4fPizp8vd0+/bt2rNnzw2360bS09P1ww8/qEuXLqpatarVXqFCBT3xxBNau3ZtllPQnn76aYcjj9cyd+5c1axZUzVq1HD47j344IOSpJUrV1p9c/I9j4mJ0enTp/XSSy+pWLFiDuu6er96eXk5fCYeHh5q1KiRfv/99xvWLV3+rK88UlunTh3Z7XZrfmOM5s+fr06dOskY47B94eHhSk5O1saNG6+7jiJFiuj++++3xuDOnTt14sQJvfTSSzLGKC4uTtLlo3G1a9e2rlX8/vvv5ebmpueee85hec8//7yMMQ4/0ySpZcuWCgkJybaGixcv6rHHHtN3332n77//Xm3bts3R/rnalZ9fWlqaTpw4oeDgYPn6+jrsh8cff1zFihVzGENLly7VX3/9ZX1eedm3kZGRDjUAcD5OmwRw0xo1anTdG5YMHDhQc+bMUfv27VWxYkW1bdtWjz/+uNq1a3dT673rrrsc3vv4+EiSgoKCsrRnZGQoOTnZOjXrxx9/1OjRoxUXF6dz58459E9OTraWdS179uzRli1bVK5cuWynHzt2TNLla1YqVKhgnUqXqXr16jfYusv27dunrl275qhvpj/++OOa66hZs6aWLl2qs2fP6syZM0pJScnxs7t69uwpd3d37dy5U/7+/jmaJyf1FylSRD169NDUqVN17tw5lShRQjNmzFCxYsWsayFzuqyr7d27V8YYvfLKK3rllVey7XPs2DFVrFhR48aNU+fOnXXPPfeodu3aateunXr27Kk6derkap3S5ZuYnDt37pqfQUZGhg4dOqRatWpZ7dndsTU7e/bs0c6dO2/43ZNy9j3ft2+fJOXoexAYGJgl0JUqVUpbtmzJUe1Xj9nM+U+dOiXp8n5LSkrSJ598ok8++STbZVy5fdfSvHlz63ThNWvWqEKFCqpfv77q1q2rNWvW6KGHHtLatWv1+OOPW/P88ccfCggIkLe3t8OyMk/fzhxXma73eY0fP15nzpzR4sWLb+oZlefPn9f48eM1bdo0HT582OHauyv/yOTr66tOnTpp5syZevXVVyVdPmWyYsWKVqjPy77N6XcSwK1DeANQ4MqXL6/4+HgtXbpUixcv1uLFizVt2jT16tVL06dPz/Nyr3WU4lrtmb/47Nu3T61bt1aNGjX07rvvKigoSB4eHvr+++81ceLELDccyU5GRoYeeughvfjii9lOv+eee3K4Fa7jkUce0RdffKH33nvP4SYi+aFXr156++23tXDhQnXv3l0zZ85Ux44dbxiibyTzs3zhhRcUHh6ebZ/MR1q0aNFC+/bt09dff60ffvhBn332mSZOnKiPPvpI/fr1u6k6ciKnRzgyMjIUGhqqd999N9vpmX+8yI/v+dVuNLZudv7Mmp588slrXheakzDdrFkzpaWlKS4uTmvWrLGuiWvevLnWrFmjXbt26fjx41Z7Xlzv8woPD9eSJUs0YcIEtWrVKssRzZwaPHiwpk2bpiFDhigsLMx6CHy3bt2yfH69evXS3LlztW7dOoWGhuqbb77RwIEDrbtD5mXfctQNKHwIbwBuCQ8PD3Xq1EmdOnVSRkaGBg4cqI8//livvPKKgoODr3naW0H49ttvlZqaqm+++cbhSMCVp5tlulZd1apV05kzZ9SmTZvrrqtSpUpavny5zpw543D07UZ3aLxyPdu2bctR3yvXea117Nq1S2XLllXJkiVVvHhx2e32HC9/8ODBCg4O1qhRo+Tj46OXXnop3+qvXbu27r33Xs2YMUOBgYE6ePCg3n///Twt60qZpywWLVr0hp+VJJUuXVpPPfWUnnrqKetmFmPGjMl1eCtXrpxKlChxzc+gSJEiWY4Q51S1atW0efNmtW7d+rrjJqff88zTGLdt25arZzMWhHLlysnb21vp6ek3/Lyut+2NGjWSh4eH1qxZozVr1mj48OGSLgf0Tz/9VMuXL7feZ6pUqZKWLVum06dPOxx927VrlzU9p5o0aaL+/furY8eOeuyxx7RgwQK5u+f+V6558+YpMjJS77zzjtV24cKFbO9W265dO5UrV04zZsxQ48aNde7cOfXs2dOanpt9C6Dw4po3AAXuxIkTDu+LFCli/YU389bmmc+0ys0t9PMq86//V5+CNG3atCx9S5YsmW1Njz/+uOLi4rR06dIs05KSkqzr6zp06KBLly453J49PT09SzC5lq5du2rz5s1asGBBlmnXOtpRoUIF1atXT9OnT3eofdu2bfrhhx/UoUMHSZc/hy5duujbb7/Vr7/+mqPlv/LKK3rhhRc0cuTIa95yPq/19+zZUz/88IMmTZqkMmXKZLl7YV72Rfny5dWqVSt9/PHHOnr0aJbpmc9ok7J+T728vBQcHJzl9vs54ebmprZt2+rrr792uK17YmKiZs6cqWbNmslut+d6udLl797hw4f16aefZpl2/vx5606VOf2et23bVt7e3ho/frwuXLjgMC2nR9Tyi5ubm7p27ar58+dnG9Sv/Lyu9zOjWLFiuu+++/Tll1/q4MGDDkfezp8/r8mTJ6tatWqqUKGCNU+HDh2Unp6uDz74wGFZEydOlM1my9HdNK/Upk0bzZo1S0uWLFHPnj3zfKTz6s/g/fffV3p6epa+7u7u6t69u+bMmaPo6GiFhoY6HEnLzb4FUHhx5A3ATVu8eLH11+kr3X///apatar69eunkydP6sEHH1RgYKD++OMPvf/++6pXr551PUm9evXk5uamt956S8nJyfL09LSeT5Xf2rZtax0JfPbZZ3XmzBl9+umnKl++fJZf8Bs0aKCpU6fqtddeU3BwsMqXL68HH3xQw4cP1zfffKOOHTuqd+/eatCggc6ePautW7dq3rx5OnDggMqWLatOnTqpadOmeumll3TgwAGFhIToq6++ytFNUaTLzw6bN2+eHnvsMfXp00cNGjTQyZMn9c033+ijjz5S3bp1s53v7bffVvv27RUWFqa+fftajwrw8fHRmDFjrH5vvPGGfvjhB7Vs2dK67fzRo0c1d+5crV27NtsHT7/99ttKTk5WVFSUvL29HW5gcTP1P/HEE3rxxRe1YMECDRgwwOFmNDezLz788EM1a9ZMoaGhevrpp1W1alUlJiYqLi5Of/75pzZv3ixJCgkJUatWrdSgQQOVLl1av/76q+bNm6dBgwZdc/uu57XXXlNMTIyaNWumgQMHyt3dXR9//LFSU1Nz/Gy07PTs2VNz5sxR//79tXLlSjVt2lTp6enatWuX5syZo6VLl6phw4Y5/p7b7XZNnDhR/fr103333acnnnhCpUqV0ubNm3Xu3LmbOrU5L958802tXLlSjRs31tNPP62QkBCdPHlSGzdu1LJly3Ty5ElJl48Y+vr66qOPPpK3t7dKliypxo0bW9dpNW/eXG+++aZ8fHwUGhoq6XKYr169unbv3q3evXs7rLdTp0564IEH9K9//UsHDhxQ3bp19cMPP+jrr7/WkCFDrvtIjGvp0qWLdYq43W7Xxx9/nKv5O3bsqP/+97/y8fFRSEiI4uLitGzZMuva3av16tVLkydP1sqVK63Hslwpp/sWQCF2a29uCeB2cr1HBeiKW3jPmzfPtG3b1pQvX954eHiYu+66yzz77LPm6NGjDsv79NNPTdWqVY2bm5vDYwOu9aiAuXPnZlvP1be9Hz16tJFkjh8/brV98803pk6dOqZYsWKmcuXK5q233jKff/65kWT2799v9UtISDARERHG29vbSHKo4/Tp02bkyJEmODjYeHh4mLJly5r777/f/Pvf/3Z4hMGJEydMz549jd1uNz4+PqZnz55m06ZNOXpUQOb8gwYNMhUrVjQeHh4mMDDQREZGWrf7zu72+MYYs2zZMtO0aVNTvHhxY7fbTadOncyOHTuyLP+PP/4wvXr1MuXKlTOenp6matWqJioqyqSmpl5zv6anp5vu3bsbd3d3s3Dhwpuq/0odOnQwksy6devydV/s27fP9OrVy/j7+5uiRYuaihUrmo4dO5p58+ZZfV577TXTqFEj4+vra4oXL25q1KhhXn/9dYfPMjvXelSAMcZs3LjRhIeHGy8vL1OiRAnzwAMPZNm2a31vr+fixYvmrbfeMrVq1TKenp6mVKlSpkGDBmbs2LEmOTnZ6pfT73lm3/vvv9/6vjRq1Mh8+eWX1vSWLVuaWrVqZaklMjIyy6M4siPJREVFZWmvVKmSw+3sjTEmMTHRREVFmaCgIFO0aFHj7+9vWrdubT755BOHfl9//bUJCQkx7u7uWT73RYsWGUlZHh3Sr18/I8n85z//yVLL6dOnzdChQ01AQIApWrSoufvuu83bb7/t8MiE623Ltb4LU6ZMMZLMCy+8kO2+uXK5Vz4q4NSpU+app54yZcuWNV5eXiY8PNzs2rUr232WqVatWqZIkSLmzz//zHZ6TvbttX7GAnA+mzG3+JwIAACu4W9/+5u2bt2qvXv3OrsUwCXde++9Kl26tHVdH4DbC9e8AQAKhaNHj2rRokUON1kAkHO//vqr4uPj1atXL2eXAqCAcOQNAOBU+/fv148//qjPPvtM69ev1759+3L8HDkAl29GtGHDBr3zzjv666+/9Pvvv+f58QQACjeOvAEAnCo2NlY9e/bU/v37NX36dIIbkEvz5s3TU089pbS0NH355ZcEN+A2xpE3AAAAAHABHHkDAAAAABdAeAMAAAAAF8BDunMgIyNDR44ckbe3t2w2m7PLAQAAAOAkxhidPn1aAQEBKlLk1h4LI7zlwJEjRxQUFOTsMgAAAAAUEocOHVJgYOAtXSfhLQe8vb0lXf6A7Ha7k6sBAAAA4CwpKSkKCgqyMsKtRHjLgcxTJe12O+ENAAAAgFMup+KGJQAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4ALcnV0AABQGNpuzK3AuY5xdAW5HjCtnVwDgdsORNwAAAABwAYQ3AAAAAHABhDcAAAAAcAGENwAAAABwAYUmvL355puy2WwaMmSI1XbhwgVFRUWpTJky8vLyUteuXZWYmOgw38GDBxUREaESJUqofPnyGj58uC5duuTQZ9WqVapfv748PT0VHBys6OjoW7BFAAAAAJB/CkV4W79+vT7++GPVqVPHoX3o0KH69ttvNXfuXMXGxurIkSN65JFHrOnp6emKiIjQxYsXtW7dOk2fPl3R0dEaNWqU1Wf//v2KiIjQAw88oPj4eA0ZMkT9+vXT0qVLb9n2AQAAAMDNshnj3BvZnjlzRvXr19eUKVP02muvqV69epo0aZKSk5NVrlw5zZw5U48++qgkadeuXapZs6bi4uLUpEkTLV68WB07dtSRI0fk5+cnSfroo480YsQIHT9+XB4eHhoxYoQWLVqkbdu2Wevs1q2bkpKStGTJkhzVmJKSIh8fHyUnJ8tut+f/TgDgdNzS3NkV4HbEuHJ2BQAKgjOzgdOPvEVFRSkiIkJt2rRxaN+wYYPS0tIc2mvUqKG77rpLcXFxkqS4uDiFhoZawU2SwsPDlZKSou3bt1t9rl52eHi4tYzspKamKiUlxeEFAAAAAM7k1Id0z5o1Sxs3btT69euzTEtISJCHh4d8fX0d2v38/JSQkGD1uTK4ZU7PnHa9PikpKTp//ryKFy+eZd3jx4/X2LFj87xdAAAAAJDfnHbk7dChQ/rHP/6hGTNmqFixYs4qI1sjR45UcnKy9Tp06JCzSwIAAABwh3NaeNuwYYOOHTum+vXry93dXe7u7oqNjdXkyZPl7u4uPz8/Xbx4UUlJSQ7zJSYmyt/fX5Lk7++f5e6Tme9v1Mdut2d71E2SPD09ZbfbHV4AAAAA4ExOC2+tW7fW1q1bFR8fb70aNmyoHj16WP9ftGhRLV++3Jpn9+7dOnjwoMLCwiRJYWFh2rp1q44dO2b1iYmJkd1uV0hIiNXnymVk9slcBgAAAAC4Aqdd8+bt7a3atWs7tJUsWVJlypSx2vv27athw4apdOnSstvtGjx4sMLCwtSkSRNJUtu2bRUSEqKePXtqwoQJSkhI0Msvv6yoqCh5enpKkvr3768PPvhAL774ovr06aMVK1Zozpw5WrRo0a3dYAAAAAC4CU69YcmNTJw4UUWKFFHXrl2Vmpqq8PBwTZkyxZru5uam7777TgMGDFBYWJhKliypyMhIjRs3zupTpUoVLVq0SEOHDtV7772nwMBAffbZZwoPD3fGJgEAAABAnjj9OW+ugOe8Abc/nkfl7ApwO2JcObsCAAXhjn7OGwAAAADgxghvAAAAAOACCG8AAAAA4AIIbwAAAADgAghvAAAAAOACCG8AAAAA4AIIbwAAAADgAgr1Q7pxbTw7x9kVAAAAALcWR94AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AAAAAwAUQ3gAAAADABTg1vE2dOlV16tSR3W6X3W5XWFiYFi9ebE1v1aqVbDabw6t///4Oyzh48KAiIiJUokQJlS9fXsOHD9elS5cc+qxatUr169eXp6engoODFR0dfSs2DwAAAADyjbszVx4YGKg333xTd999t4wxmj59ujp37qxNmzapVq1akqSnn35a48aNs+YpUaKE9f/p6emKiIiQv7+/1q1bp6NHj6pXr14qWrSo3njjDUnS/v37FRERof79+2vGjBlavny5+vXrpwoVKig8PPzWbjAAAAAA5JHNGGOcXcSVSpcurbffflt9+/ZVq1atVK9ePU2aNCnbvosXL1bHjh115MgR+fn5SZI++ugjjRgxQsePH5eHh4dGjBihRYsWadu2bdZ83bp1U1JSkpYsWZLtclNTU5Wammq9T0lJUVBQkJKTk2W32/NvY2+CzebsCpyrcH1rcTtgTDm7AtyOGFfOrgBAQUhJSZGPj49TskGhueYtPT1ds2bN0tmzZxUWFma1z5gxQ2XLllXt2rU1cuRInTt3zpoWFxen0NBQK7hJUnh4uFJSUrR9+3arT5s2bRzWFR4erri4uGvWMn78ePn4+FivoKCg/NpMAAAAAMgTp542KUlbt25VWFiYLly4IC8vLy1YsEAhISGSpCeeeEKVKlVSQECAtmzZohEjRmj37t366quvJEkJCQkOwU2S9T4hIeG6fVJSUnT+/HkVL148S00jR47UsGHDrPeZR94AAAAAwFmcHt6qV6+u+Ph4JScna968eYqMjFRsbKxCQkL0zDPPWP1CQ0NVoUIFtW7dWvv27VO1atUKrCZPT095enoW2PIBAAAAILecftqkh4eHgoOD1aBBA40fP15169bVe++9l23fxo0bS5L27t0rSfL391diYqJDn8z3/v7+1+1jt9uzPeoGAAAAAIWR08Pb1TIyMhxuFnKl+Ph4SVKFChUkSWFhYdq6dauOHTtm9YmJiZHdbrdOvQwLC9Py5csdlhMTE+NwXR0AAAAAFHZOPW1y5MiRat++ve666y6dPn1aM2fO1KpVq7R06VLt27dPM2fOVIcOHVSmTBlt2bJFQ4cOVYsWLVSnTh1JUtu2bRUSEqKePXtqwoQJSkhI0Msvv6yoqCjrtMf+/fvrgw8+0Isvvqg+ffpoxYoVmjNnjhYtWuTMTQcAAACAXHFqeDt27Jh69eqlo0ePysfHR3Xq1NHSpUv10EMP6dChQ1q2bJkmTZqks2fPKigoSF27dtXLL79sze/m5qbvvvtOAwYMUFhYmEqWLKnIyEiH58JVqVJFixYt0tChQ/Xee+8pMDBQn332Gc94AwAAAOBSCt1z3gojZz7L4Vp4do6zK8DthjHl7ApwO2JcObsCAAWB57wBAAAAAK6L8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAugPAGAAAAAC6A8AYAAAAALoDwBgAAAAAuwKnhberUqapTp47sdrvsdrvCwsK0ePFia/qFCxcUFRWlMmXKyMvLS127dlViYqLDMg4ePKiIiAiVKFFC5cuX1/Dhw3Xp0iWHPqtWrVL9+vXl6emp4OBgRUdH34rNAwAAAIB849TwFhgYqDfffFMbNmzQr7/+qgcffFCdO3fW9u3bJUlDhw7Vt99+q7lz5yo2NlZHjhzRI488Ys2fnp6uiIgIXbx4UevWrdP06dMVHR2tUaNGWX3279+viIgIPfDAA4qPj9eQIUPUr18/LV269JZvLwAAAADklc0YY5xdxJVKly6tt99+W48++qjKlSunmTNn6tFHH5Uk7dq1SzVr1lRcXJyaNGmixYsXq2PHjjpy5Ij8/PwkSR999JFGjBih48ePy8PDQyNGjNCiRYu0bds2ax3dunVTUlKSlixZkqOaUlJS5OPjo+TkZNnt9vzf6Dyw2ZxdgXMVrm8tbgeMKWdXgNsR48rZFQAoCM7MBoXmmrf09HTNmjVLZ8+eVVhYmDZs2KC0tDS1adPG6lOjRg3dddddiouLkyTFxcUpNDTUCm6SFB4erpSUFOvoXVxcnMMyMvtkLiM7qampSklJcXgBAAAAgDM5Pbxt3bpVXl5e8vT0VP/+/bVgwQKFhIQoISFBHh4e8vX1dejv5+enhIQESVJCQoJDcMucnjnten1SUlJ0/vz5bGsaP368fHx8rFdQUFB+bCoAAAAA5JnTw1v16tUVHx+vn3/+WQMGDFBkZKR27Njh1JpGjhyp5ORk63Xo0CGn1gMAAAAA7s4uwMPDQ8HBwZKkBg0aaP369Xrvvff097//XRcvXlRSUpLD0bfExET5+/tLkvz9/fXLL784LC/zbpRX9rn6DpWJiYmy2+0qXrx4tjV5enrK09MzX7YPAAAAAPKD04+8XS0jI0Opqalq0KCBihYtquXLl1vTdu/erYMHDyosLEySFBYWpq1bt+rYsWNWn5iYGNntdoWEhFh9rlxGZp/MZQAAAACAK3DqkbeRI0eqffv2uuuuu3T69GnNnDlTq1at0tKlS+Xj46O+fftq2LBhKl26tOx2uwYPHqywsDA1adJEktS2bVuFhISoZ8+emjBhghISEvTyyy8rKirKOnLWv39/ffDBB3rxxRfVp08frVixQnPmzNGiRYucuekAAAAAkCtODW/Hjh1Tr169dPToUfn4+KhOnTpaunSpHnroIUnSxIkTVaRIEXXt2lWpqakKDw/XlClTrPnd3Nz03XffacCAAQoLC1PJkiUVGRmpcePGWX2qVKmiRYsWaejQoXrvvfcUGBiozz77TOHh4bd8ewEAAAAgrwrdc94KI57zVvjwrUV+Y0w5uwLcjhhXzq4AQEHgOW8AAAAAgOsivAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAvIdXjbuHGjtm7dar3/+uuv1aVLF/3zn//UxYsX87U4AAAAAMBluQ5vzz77rH777TdJ0u+//65u3bqpRIkSmjt3rl588cV8LxAAAAAAkIfw9ttvv6levXqSpLlz56pFixaaOXOmoqOjNX/+/PyuDwAAAACgPIQ3Y4wyMjIkScuWLVOHDh0kSUFBQfrrr7/ytzoAAAAAgKQ8hLeGDRvqtdde03//+1/FxsYqIiJCkrR//375+fnle4EAAAAAgDyEt0mTJmnjxo0aNGiQ/vWvfyk4OFiSNG/ePN1///35XiAAAAAAIA/hrU6dOtq6dauSk5M1evRoq/3tt9/W9OnTc7Ws8ePH67777pO3t7fKly+vLl26aPfu3Q59WrVqJZvN5vDq37+/Q5+DBw8qIiJCJUqUUPny5TV8+HBdunTJoc+qVatUv359eXp6Kjg4WNHR0bnbcAAAAABwojw95y0pKUmfffaZRo4cqZMnT0qSduzYoWPHjuVqObGxsYqKitJPP/2kmJgYpaWlqW3btjp79qxDv6efflpHjx61XhMmTLCmpaenKyIiQhcvXtS6des0ffp0RUdHa9SoUVaf/fv3KyIiQg888IDi4+M1ZMgQ9evXT0uXLs3L5gMAAADALWczxpjczLBlyxa1bt1avr6+OnDggHbv3q2qVavq5Zdf1sGDB/XFF1/kuZjjx4+rfPnyio2NVYsWLSRdPvJWr149TZo0Kdt5Fi9erI4dO+rIkSPWNXcfffSRRowYoePHj8vDw0MjRozQokWLtG3bNmu+bt26KSkpSUuWLLlhXSkpKfLx8VFycrLsdnuety8/2WzOrsC5cvetBW6MMeXsCnA7Ylw5uwIABcGZ2SDXR96GDRump556Snv27FGxYsWs9g4dOmj16tU3VUxycrIkqXTp0g7tM2bMUNmyZVW7dm2NHDlS586ds6bFxcUpNDTU4WYp4eHhSklJ0fbt260+bdq0cVhmeHi44uLisq0jNTVVKSkpDi8AAAAAcCb33M6wfv16ffzxx1naK1asqISEhDwXkpGRoSFDhqhp06aqXbu21f7EE0+oUqVKCggI0JYtWzRixAjt3r1bX331lSQpISEhy10uM99n1nOtPikpKTp//ryKFy/uMG38+PEaO3ZsnrcFAAAAAPJbrsObp6dntkeifvvtN5UrVy7PhURFRWnbtm1au3atQ/szzzxj/X9oaKgqVKig1q1ba9++fapWrVqe13c9I0eO1LBhw6z3KSkpCgoKKpB1AQAAAEBO5Pq0yYcffljjxo1TWlqaJMlms+ngwYMaMWKEunbtmqciBg0apO+++04rV65UYGDgdfs2btxYkrR3715Jkr+/vxITEx36ZL739/e/bh+73Z7lqJt0OaDa7XaHFwAAAAA4U67D2zvvvKMzZ86ofPnyOn/+vFq2bKng4GB5e3vr9ddfz9WyjDEaNGiQFixYoBUrVqhKlSo3nCc+Pl6SVKFCBUlSWFiYtm7d6nCny5iYGNntdoWEhFh9li9f7rCcmJgYhYWF5apeAAAAAHCWXN9tMtPatWu1ZcsWnTlzRvXr189yQ5CcGDhwoGbOnKmvv/5a1atXt9p9fHxUvHhx7du3TzNnzlSHDh1UpkwZbdmyRUOHDlVgYKBiY2MlXX5UQL169RQQEKAJEyYoISFBPXv2VL9+/fTGG29IuvyogNq1aysqKkp9+vTRihUr9Nxzz2nRokUKDw+/YZ3cbbLw4Q5eyG+MKWdXgNsR48rZFQAoCM7MBnkOb/my8mv8VJ82bZp69+6tQ4cO6cknn9S2bdt09uxZBQUF6W9/+5tefvllhx31xx9/aMCAAVq1apVKliypyMhIvfnmm3J3/98lfatWrdLQoUO1Y8cOBQYG6pVXXlHv3r1zVCfhrfDhH0TkN8aUsyvA7Yhx5ewKABSEQh/eJk+enOMFPvfcczdVUGFEeCt8+AcR+Y0x5ewKcDtiXDm7AgAFodCHt5xciyZdPpL2+++/33RRhQ3hrfDhH0TkN8aUsyvA7Yhx5ewKABQEZ2aDHD0qYP/+/QVdBwAAAADgOnJ9t0kAAAAAwK2X6/DWtWtXvfXWW1naJ0yYoMceeyxfigIAAAAAOMp1eFu9erU6dOiQpb19+/ZavXp1vhQFAAAAAHCU6/B25swZeXh4ZGkvWrSoUlJS8qUoAAAAAICjXIe30NBQzZ49O0v7rFmzFBISki9FAQAAAAAc5ehuk1d65ZVX9Mgjj2jfvn168MEHJUnLly/Xl19+qblz5+Z7gQAAAACAPIS3Tp06aeHChXrjjTc0b948FS9eXHXq1NGyZcvUsmXLgqgRAAAAAO54OXpI952Oh3QXPnxrkd8YU86uALcjxpWzKwBQEJyZDXJ9zVtkZCR3lQQAAACAWyzX4S05OVlt2rTR3XffrTfeeEOHDx8uiLoAAAAAAFfIdXhbuHChDh8+rAEDBmj27NmqXLmy2rdvr3nz5iktLa0gagQAAACAO16uw5sklStXTsOGDdPmzZv1888/Kzg4WD179lRAQICGDh2qPXv25HedAAAAAHBHy1N4y3T06FHFxMQoJiZGbm5u6tChg7Zu3aqQkBBNnDgxv2oEAAAAgDtersNbWlqa5s+fr44dO6pSpUqaO3euhgwZoiNHjmj69OlatmyZ5syZo3HjxhVEvQAAAABwR8r1c94qVKigjIwMde/eXb/88ovq1auXpc8DDzwgX1/ffCgPAAAAACDlIbxNnDhRjz32mIoVK3bNPr6+vtq/f/9NFQYAAAAA+J8cnzaZnp6uLVu26NFHH80S3M6dO6ctW7YoIyMj3wsEAAAAAOQivP33v/9Vnz595OHhkWWah4eH+vTpo5kzZ+ZrcQAAAACAy3Ic3v7zn//ohRdekJubW5Zp7u7uevHFF/XJJ5/ka3EAAAAAgMtyHN52796tJk2aXHP6fffdp507d+ZLUQAAAAAARzkOb2fPnlVKSso1p58+fVrnzp3Ll6IAAAAAAI5yHN7uvvturVu37prT165dq7vvvjtfigIAAAAAOMpxeHviiSf08ssva8uWLVmmbd68WaNGjdITTzyRr8UBAAAAAC6zGWNMTjqmpaWpbdu2Wrt2rdq0aaMaNWpIknbt2qVly5apadOmiomJUdGiRQu0YGdISUmRj4+PkpOTZbfbnV2OJMlmc3YFzpWzby2Qc4wpZ1eA2xHjytkVACgIzswGOQ5v0uUAN3HiRM2cOVN79uyRMUb33HOPnnjiCQ0ZMiTbxwjcDghvhQ//ICK/MaacXQFuR4wrZ1cAoCC4THi7UxHeCh++tchvjClnV4DbEePK2RUAKAjOzAY5vuYNAAAAAOA8hDcAAAAAcAGENwAAAABwAYQ3AAAAAHABuQ5v27Ztu+a0hQsX3kwtAAAAAIBryHV4Cw8P1/79+7O0z58/Xz169MiXogAAAAAAjnId3vr166c2bdooISHBaps9e7Z69eql6OjoXC1r/Pjxuu++++Tt7a3y5curS5cu2r17t0OfCxcuKCoqSmXKlJGXl5e6du2qxMREhz4HDx5URESESpQoofLly2v48OG6dOmSQ59Vq1apfv368vT0VHBwcK5rBQAAAABnynV4Gzt2rDp06KA2bdro5MmTmjlzpp566il98cUXeuyxx3K1rNjYWEVFRemnn35STEyM0tLS1LZtW509e9bqM3ToUH377beaO3euYmNjdeTIET3yyCPW9PT0dEVEROjixYtat26dpk+frujoaI0aNcrqs3//fkVEROiBBx5QfHy8hgwZon79+mnp0qW53XwAAAAAcIo8P6S7R48eWr9+vQ4fPqyZM2eqc+fON13M8ePHVb58ecXGxqpFixZKTk5WuXLlNHPmTD366KOSpF27dqlmzZqKi4tTkyZNtHjxYnXs2FFHjhyRn5+fJOmjjz7SiBEjdPz4cXl4eGjEiBFatGiRw/V63bp1U1JSkpYsWXLDunhId+HDg0+R3xhTzq4AtyPGlbMrAFAQnJkN3HPS6ZtvvsnS9sgjj2jNmjXq3r27bDab1efhhx/OczHJycmSpNKlS0uSNmzYoLS0NLVp08bqU6NGDd11111WeIuLi1NoaKgV3KTL1+UNGDBA27dv17333qu4uDiHZWT2GTJkSLZ1pKamKjU11XqfkpKS520CAAAAgPyQo/DWpUuXa077/PPP9fnnn0uSbDab0tPT81RIRkaGhgwZoqZNm6p27dqSpISEBHl4eMjX19ehr5+fn3XNXUJCgkNwy5yeOe16fVJSUnT+/HkVL17cYdr48eM1duzYPG0HAAAAABSEHF3zlpGRkaNXXoObJEVFRWnbtm2aNWtWnpeRX0aOHKnk5GTrdejQIWeXBAAAAOAOl6MjbwVt0KBB+u6777R69WoFBgZa7f7+/rp48aKSkpIcjr4lJibK39/f6vPLL784LC/zbpRX9rn6DpWJiYmy2+1ZjrpJkqenpzw9PfNl2wAAAAAgP+T6bpPPPfecJk+enKX9gw8+uOY1ZNdijNGgQYO0YMECrVixQlWqVHGY3qBBAxUtWlTLly+32nbv3q2DBw8qLCxMkhQWFqatW7fq2LFjVp+YmBjZ7XaFhIRYfa5cRmafzGUAAAAAQGGX6/A2f/58NW3aNEv7/fffr3nz5uVqWVFRUfq///s/zZw5U97e3kpISFBCQoLOnz8vSfLx8VHfvn01bNgwrVy5Uhs2bNBTTz2lsLAwNWnSRJLUtm1bhYSEqGfPntq8ebOWLl2ql19+WVFRUdbRs/79++v333/Xiy++qF27dmnKlCmaM2eOhg4dmtvNBwAAAACnyPWjAooVK6Zt27YpODjYoX3v3r2qXbu2Lly4kPOVX+MewtOmTVPv3r0lXX5I9/PPP68vv/xSqampCg8P15QpU6xTIiXpjz/+0IABA7Rq1SqVLFlSkZGRevPNN+Xu/r+zQletWqWhQ4dqx44dCgwM1CuvvGKt40Z4VEDhw+2Xkd8YU86uALcjxpWzKwBQEJyZDXId3mrXrq3+/ftr0KBBDu3vv/++pk6dqh07duRrgYUB4a3w4R9E5DfGlLMrwO2IceXsCgAUhEL/nLcrDRs2TIMGDdLx48f14IMPSpKWL1+ud955R5MmTcrv+gAAAAAAykN469Onj1JTU/X666/r1VdflSRVrlxZU6dOVa9evfK9QAAAAABAHk6bvNLx48dVvHhxeXl55WdNhQ6nTRY+nIqC/MaYcnYFuB0xrpxdAYCC4FKnTWY6fvy4du/eLUmqUaOGypYtm29FAQAAAAAc5fpRAWfPnlWfPn1UoUIFtWjRQi1atFCFChXUt29fnTt3riBqBAAAAIA7Xq7D27BhwxQbG6tvv/1WSUlJSkpK0tdff63Y2Fg9//zzBVEjAAAAANzxcn3NW9myZTVv3jy1atXKoX3lypV6/PHHdfz48fysr1DgmrfCh+sIkN8YU86uALcjxpWzKwBQEJyZDXJ95O3cuXPy8/PL0l6+fHlOmwQAAACAApLr8BYWFqbRo0frwoULVtv58+c1duxYhYWF5WtxAAAAAIDLcn23yffee0/h4eEKDAxU3bp1JUmbN29WsWLFtHTp0nwvEAAAAACQh/BWu3Zt7dmzRzNmzNCuXbskSd27d1ePHj1UvHjxfC8QAAAAAJDH57yVKFFCTz/9dH7XAgAAAAC4hhyFt2+++SbHC3z44YfzXAwAAAAAIHs5Cm9dunTJ0cJsNpvS09Nvph4AAAAAQDZyFN4yMjIKug4AAAAAwHXk+lEBAAAAAIBbL8fhbcWKFQoJCVFKSkqWacnJyapVq5ZWr16dr8UBAAAAAC7LcXibNGmSnn76adnt9izTfHx89Oyzz2rixIn5WhwAAAAA4LIch7fNmzerXbt215zetm1bbdiwIV+KAgAAAAA4ynF4S0xMVNGiRa853d3dXcePH8+XogAAAAAAjnIc3ipWrKht27Zdc/qWLVtUoUKFfCkKAAAAAOAox+GtQ4cOeuWVV3ThwoUs086fP6/Ro0erY8eO+VocAAAAAOAymzHG5KRjYmKi6tevLzc3Nw0aNEjVq1eXJO3atUsffvih0tPTtXHjRvn5+RVowc6QkpIiHx8fJScnZ3vDFmew2ZxdgXPl7FsL5BxjytkV4HbEuHJ2BQAKgjOzQY4e0i1Jfn5+WrdunQYMGKCRI0cqM/PZbDaFh4frww8/vC2DGwAAAAAUBjkOb5JUqVIlff/99zp16pT27t0rY4zuvvtulSpVqqDqAwAAAAAol+EtU6lSpXTffffldy0AAAAAgGvI8Q1LAAAAAADOQ3gDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABfg1PC2evVqderUSQEBAbLZbFq4cKHD9N69e8tmszm82rVr59Dn5MmT6tGjh+x2u3x9fdW3b1+dOXPGoc+WLVvUvHlzFStWTEFBQZowYUJBbxoAAAAA5CunhrezZ8+qbt26+vDDD6/Zp127djp69Kj1+vLLLx2m9+jRQ9u3b1dMTIy+++47rV69Ws8884w1PSUlRW3btlWlSpW0YcMGvf322xozZow++eSTAtsuAAAAAMhv7s5cefv27dW+ffvr9vH09JS/v3+203bu3KklS5Zo/fr1atiwoSTp/fffV4cOHfTvf/9bAQEBmjFjhi5evKjPP/9cHh4eqlWrluLj4/Xuu+86hLwrpaamKjU11XqfkpKSxy0EAAAAgPxR6K95W7VqlcqXL6/q1atrwIABOnHihDUtLi5Ovr6+VnCTpDZt2qhIkSL6+eefrT4tWrSQh4eH1Sc8PFy7d+/WqVOnsl3n+PHj5ePjY72CgoIKaOsAAAAAIGcKdXhr166dvvjiCy1fvlxvvfWWYmNj1b59e6Wnp0uSEhISVL58eYd53N3dVbp0aSUkJFh9/Pz8HPpkvs/sc7WRI0cqOTnZeh06dCi/Nw0AAAAAcsWpp03eSLdu3az/Dw0NVZ06dVStWjWtWrVKrVu3LrD1enp6ytPTs8CWDwAAAAC5VaiPvF2tatWqKlu2rPbu3StJ8vf317Fjxxz6XLp0SSdPnrSuk/P391diYqJDn8z317qWDgAAAAAKG5cKb3/++adOnDihChUqSJLCwsKUlJSkDRs2WH1WrFihjIwMNW7c2OqzevVqpaWlWX1iYmJUvXp1lSpV6tZuAAAAAADkkVPD25kzZxQfH6/4+HhJ0v79+xUfH6+DBw/qzJkzGj58uH766ScdOHBAy5cvV+fOnRUcHKzw8HBJUs2aNdWuXTs9/fTT+uWXX/Tjjz9q0KBB6tatmwICAiRJTzzxhDw8PNS3b19t375ds2fP1nvvvadhw4Y5a7MBAAAAINdsxhjjrJWvWrVKDzzwQJb2yMhITZ06VV26dNGmTZuUlJSkgIAAtW3bVq+++qrDDUhOnjypQYMG6dtvv1WRIkXUtWtXTZ48WV5eXlafLVu2KCoqSuvXr1fZsmU1ePBgjRgxIsd1pqSkyMfHR8nJybLb7Te30fnEZnN2Bc7lvG8tbleMKWdXgNsR48rZFQAoCM7MBk4Nb66C8Fb48K1FfmNMObsC3I4YV86uAEBBcGY2cKlr3gAAAADgTkV4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABdAeAMAAAAAF0B4AwAAAAAXQHgDAAAAABfg1PC2evVqderUSQEBAbLZbFq4cKHDdGOMRo0apQoVKqh48eJq06aN9uzZ49Dn5MmT6tGjh+x2u3x9fdW3b1+dOXPGoc+WLVvUvHlzFStWTEFBQZowYUJBbxoAAAAA5CunhrezZ8+qbt26+vDDD7OdPmHCBE2ePFkfffSRfv75Z5UsWVLh4eG6cOGC1adHjx7avn27YmJi9N1332n16tV65plnrOkpKSlq27atKlWqpA0bNujtt9/WmDFj9MknnxT49gEAAABAfrEZY4yzi5Akm82mBQsWqEuXLpIuH3ULCAjQ888/rxdeeEGSlJycLD8/P0VHR6tbt27auXOnQkJCtH79ejVs2FCStGTJEnXo0EF//vmnAgICNHXqVP3rX/9SQkKCPDw8JEkvvfSSFi5cqF27duWotpSUFPn4+Cg5OVl2uz3/Nz4PbDZnV+BcheNbi9sJY8rZFeB2xLhydgUACoIzs0GhveZt//79SkhIUJs2baw2Hx8fNW7cWHFxcZKkuLg4+fr6WsFNktq0aaMiRYro559/tvq0aNHCCm6SFB4ert27d+vUqVPZrjs1NVUpKSkOLwAAAABwpkIb3hISEiRJfn5+Du1+fn7WtISEBJUvX95huru7u0qXLu3QJ7tlXLmOq40fP14+Pj7WKygo6OY3CAAAAABuQqENb840cuRIJScnW69Dhw45uyQAAAAAd7hCG978/f0lSYmJiQ7tiYmJ1jR/f38dO3bMYfqlS5d08uRJhz7ZLePKdVzN09NTdrvd4QUAAAAAzlRow1uVKlXk7++v5cuXW20pKSn6+eefFRYWJkkKCwtTUlKSNmzYYPVZsWKFMjIy1LhxY6vP6tWrlZaWZvWJiYlR9erVVapUqVu0NQAAAABwc5wa3s6cOaP4+HjFx8dLunyTkvj4eB08eFA2m01DhgzRa6+9pm+++UZbt25Vr169FBAQYN2RsmbNmmrXrp2efvpp/fLLL/rxxx81aNAgdevWTQEBAZKkJ554Qh4eHurbt6+2b9+u2bNn67333tOwYcOctNUAAAAAkHtOfVTAqlWr9MADD2Rpj4yMVHR0tIwxGj16tD755BMlJSWpWbNmmjJliu655x6r78mTJzVo0CB9++23KlKkiLp27arJkyfLy8vL6rNlyxZFRUVp/fr1Klu2rAYPHqwRI0bkuE4eFVD4cPtl5DfGlLMrwO2IceXsCgAUBGdmg0LznLfCjPBW+PCtRX5jTDm7AtyOGFfOrgBAQeA5bwAAAACA6yK8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyC8AQAAAIALILwBAAAAgAsgvAEAAACACyjU4W3MmDGy2WwOrxo1aljTL1y4oKioKJUpU0ZeXl7q2rWrEhMTHZZx8OBBRUREqESJEipfvryGDx+uS5cu3epNAQAAAICb4u7sAm6kVq1aWrZsmfXe3f1/JQ8dOlSLFi3S3Llz5ePjo0GDBumRRx7Rjz/+KElKT09XRESE/P39tW7dOh09elS9evVS0aJF9cYbb9zybQEAAACAvCr04c3d3V3+/v5Z2pOTk/Wf//xHM2fO1IMPPihJmjZtmmrWrKmffvpJTZo00Q8//KAdO3Zo2bJl8vPzU7169fTqq69qxIgRGjNmjDw8PG715gAAAABAnhTq0yYlac+ePQoICFDVqlXVo0cPHTx4UJK0YcMGpaWlqU2bNlbfGjVq6K677lJcXJwkKS4uTqGhofLz87P6hIeHKyUlRdu3b7/mOlNTU5WSkuLwAgAAAABnKtThrXHjxoqOjtaSJUs0depU7d+/X82bN9fp06eVkJAgDw8P+fr6Oszj5+enhIQESVJCQoJDcMucnjntWsaPHy8fHx/rFRQUlL8bBgAAAAC5VKhPm2zfvr31/3Xq1FHjxo1VqVIlzZkzR8WLFy+w9Y4cOVLDhg2z3qekpBDgAAAAADhVoT7ydjVfX1/dc8892rt3r/z9/XXx4kUlJSU59ElMTLSukfP3989y98nM99ldR5fJ09NTdrvd4QUAAAAAzuRS4e3MmTPat2+fKlSooAYNGqho0aJavny5NX337t06ePCgwsLCJElhYWHaunWrjh07ZvWJiYmR3W5XSEjILa8fAAAAAPKqUJ82+cILL6hTp06qVKmSjhw5otGjR8vNzU3du3eXj4+P+vbtq2HDhql06dKy2+0aPHiwwsLC1KRJE0lS27ZtFRISop49e2rChAlKSEjQyy+/rKioKHl6ejp56wAAAAAg5wp1ePvzzz/VvXt3nThxQuXKlVOzZs30008/qVy5cpKkiRMnqkiRIuratatSU1MVHh6uKVOmWPO7ubnpu+++04ABAxQWFqaSJUsqMjJS48aNc9YmAQAAAECe2IwxxtlFFHYpKSny8fFRcnJyobn+zWZzdgXOxbcW+Y0x5ewKcDtiXDm7AgAFwZnZwKWueQMAAACAOxXhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcgLuzCwAAAABwYzabsytwLmOcXYHzceQNAAAAAFzAHRXePvzwQ1WuXFnFihVT48aN9csvvzi7JAAAAADIkTsmvM2ePVvDhg3T6NGjtXHjRtWtW1fh4eE6duyYs0sDAAAAgBu6Y8Lbu+++q6efflpPPfWUQkJC9NFHH6lEiRL6/PPPnV0aAAAAANzQHXHDkosXL2rDhg0aOXKk1VakSBG1adNGcXFxWfqnpqYqNTXVep+cnCxJSklJKfhikSN8FED+YkwB+Y9xBeSvwjKmMjOBccIdVO6I8PbXX38pPT1dfn5+Du1+fn7atWtXlv7jx4/X2LFjs7QHBQUVWI3IHR8fZ1cA3F4YU0D+Y1wB+auwjanTp0/L5xYXdUeEt9waOXKkhg0bZr3PyMjQyZMnVaZMGdnu9Hu06vJfG4KCgnTo0CHZ7XZnlwO4PMYUkP8YV0D+Ykz9jzFGp0+fVkBAwC1f9x0R3sqWLSs3NzclJiY6tCcmJsrf3z9Lf09PT3l6ejq0+fr6FmSJLslut9/xgxfIT4wpIP8xroD8xZi67FYfcct0R9ywxMPDQw0aNNDy5cuttoyMDC1fvlxhYWFOrAwAAAAAcuaOOPImScOGDVNkZKQaNmyoRo0aadKkSTp79qyeeuopZ5cGAAAAADd0x4S3v//97zp+/LhGjRqlhIQE1atXT0uWLMlyExPcmKenp0aPHp3l1FIAecOYAvIf4wrIX4ypwsFmnHGPSwAAAABArtwR17wBAAAAgKsjvAEAAACACyC8AQAAAIALILwVoMqVK2vSpEnOLsPlHDhwQDabTfHx8QW+Lj4j18NnljeMK1wLn1feMKZwPXxmecO4ygFzm4uMjDSSzLPPPptl2sCBA40kExkZmaNl7d+/30gymzZtylH/Y8eOmbNnz+aob8eOHU14eHi201avXm0kmc2bN+doWdeycuVKI8mcOnXqppZztXPnzplSpUqZMmXKmAsXLuRq3sjISNO5c2eHtkuXLpmjR4+atLS0fKtx2rRpxsfHJ0t7bj6j/PLBBx+YSpUqGU9PT9OoUSPz888/39L15wfG1f8wrnyytN/qcRUbG2s6duxoKlSoYCSZBQsW3LJ15xfG1P8wpnyytN/qMfXGG2+Yhg0bGi8vL1OuXDnTuXNns2vXrlu2/vzCuPofxpVPlvZbPa6mTJliQkNDjbe3t/H29jZNmjQx33//fa6Xc0cceQsKCtKsWbN0/vx5q+3ChQuaOXOm7rrrrnxf38WLFyVJ5cqVU4kSJXI0T9++fRUTE6M///wzy7Rp06apYcOGqlOnTr7WmVfGGF26dMl6P3/+fNWqVUs1atTQwoULb3r5bm5u8vf3l7t7wT/JIjefUX6YPXu2hg0bptGjR2vjxo2qW7euwsPDdezYsVtWQ35hXOUvxlXenT17VnXr1tWHH354y9ZZEBhT+YsxlXexsbGKiorSTz/9pJiYGKWlpalt27Y6e/bsLashvzCu8hfjKu8CAwP15ptvasOGDfr111/14IMPqnPnztq+fXvuFpTPobLQyUz1tWvXNv/3f/9ntc+YMcPUqVPHdO7c2fqry+LFi03Tpk2Nj4+PKV26tImIiDB79+615pHk8GrZsqXDOl577TVToUIFU7lyZWOMMZUqVTITJ040xlz+i0fRokXN6tWrreW99dZbply5ciYhIcGkpaUZPz8/8+qrrzrUf/r0aePl5WWmTp1qjDFmzZo1plmzZqZYsWImMDDQDB482Jw5c8bqf+HCBfPiiy+awMBA4+HhYapVq2Y+++wz6y9GV74yt/vChQtm8ODBply5csbT09M0bdrU/PLLL9YyM/9a8/3335v69eubokWLmpUrV1rTW7VqZT766CMzdepU89BDD2X5DLZt22YiIiKMt7e38fLyMs2aNTN79+41o0ePzlLTypUrHf66lZ6ebipWrGimTJnisMyNGzcam81mDhw4YIwx5p133jG1a9c2JUqUMIGBgWbAgAHm9OnTDvVf+Ro9enSWz8gYY/744w/z8MMPm5IlSxpvb2/z2GOPmYSEBGv66NGjTd26dc0XX3xhKlWqZOx2u/n73/9uUlJSsmx3dho1amSioqKs9+np6SYgIMCMHz8+R/MXFowrxlVhGldXkgsfeWNMMaYK45gy5vIRCkkmNjY2T/M7C+OKcVWYx5UxxpQqVcp89tlnuZrnjglv7777rmndurXV3rp1azNx4kSHgTtv3jwzf/58s2fPHrNp0ybTqVMnExoaatLT040xxvzyyy9Gklm2bJk5evSoOXHihLUOLy8v07NnT7Nt2zazbds2Y0zWL8Xw4cNNpUqVTFJSktm4caPx8PAwX3/9tcP0atWqmYyMDKvt888/N8WLFzdJSUlm7969pmTJkmbixInmt99+Mz/++KO59957Te/eva3+jz/+uAkKCjJfffWV2bdvn1m2bJmZNWuWuXTpkpk/f76RZHbv3m2OHj1qkpKSjDHGPPfccyYgIMB8//33Zvv27SYyMtKUKlXK2r7ML36dOnXMDz/8YPbu3WtN27t3r/H09DQnT540J06cMMWKFbMGkzHG/Pnnn6Z06dLmkUceMevXrze7d+82n3/+udm1a5c5ffq0efzxx027du3M0aNHzdGjR01qamqWUxNeeOEF06xZM4fP9fnnn3domzhxolmxYoXZv3+/Wb58ualevboZMGCAMcaY1NRUM2nSJGO32631ZA7qKz+j9PR0U69ePdOsWTPz66+/mp9++sk0aNDA+gFtzOWB6+XlZR555BGzdetWs3r1auPv72/++c9/XvM7mCk1NdW4ubll+cWyV69e5uGHH77h/IUJ44pxVVjG1dVcPbwxphhThW1MGWPMnj17jCSzdevWPM3vLIwrxlVhHVeXLl0yX375pfHw8DDbt2/P1bx3THg7duyY8fT0NAcOHDAHDhwwxYoVM8ePH3cYuFc7fvy4ww+ra53vHBkZafz8/ExqaqpD+9UDNzU11dSrV888/vjjJiQkxDz99NMO/Xfu3Gn95SFT8+bNzZNPPmmMMaZv377mmWeecZhnzZo1pkiRIub8+fNm9+7dRpKJiYnJdnuyO9/5zJkzpmjRombGjBlW28WLF01AQICZMGGCw3wLFy7Mssx//vOfpkuXLtb7zp07W3/RMMaYkSNHmipVqpiLFy9mW1N25ztfvZ83bdpkbDab+eOPP4wxxvpLTOZforIzd+5cU6ZMGev9tc53vvIz+uGHH4ybm5s5ePCgNX379u1GkvVXqNGjR5sSJUo4/JVl+PDhpnHjxtesJdPhw4eNJLNu3TqH9uHDh5tGjRrdcP7ChHH1P4wrnyz9buW4upqrhzfGFGOqsI2p9PR0ExERYZo2bZrreZ2NcfU/jCufLP2cMa62bNliSpYsadzc3IyPj49ZtGhRjufNdEdc8yZdPq81IiJC0dHRmjZtmiIiIlS2bFmHPnv27FH37t1VtWpV2e12Va5cWZJ08ODBGy4/NDRUHh4e1+3j4eGhGTNmaP78+bpw4YImTpzoML1GjRq6//779fnnn0uS9u7dqzVr1qhv376SpM2bNys6OlpeXl7WKzw8XBkZGdq/f7/i4+Pl5uamli1b5nS3aN++fUpLS1PTpk2ttqJFi6pRo0bauXOnQ9+GDRs6vE9PT9f06dP15JNPWm1PPvmkoqOjlZGRIUmKj49X8+bNVbRo0RzXdLV69eqpZs2amjlzpqTL5+IfO3ZMjz32mNVn2bJlat26tSpWrChvb2/17NlTJ06c0Llz53K8np07dyooKEhBQUFWW0hIiHx9fR32ReXKleXt7W29r1Chgktes5YfGFfZY1z9D+MqdxhT2WNM/c+tHlNRUVHatm2bZs2alet5CwvGVfYYV/9zq8ZV9erVFR8fr59//lkDBgxQZGSkduzYkeP5pTvsUQF9+vRRdHS0pk+frj59+mSZ3qlTJ508eVKffvqpfv75Z/3888+S/nfx6fWULFkyRzWsW7dOknTy5EmdPHkyy/S+fftq/vz5On36tKZNm6Zq1apZA/HMmTN69tlnFR8fb702b96sPXv2qFq1aipevHiOasirq7dx6dKlOnz4sP7+97/L3d1d7u7u6tatm/744w8tX75ckvKtph49elgDd+bMmWrXrp3KlCkj6fJtZTt27Kg6depo/vz52rBhg3Xjgpx8drl19Q8hm81m/aC6nrJly8rNzU2JiYkO7YmJifL398/XGm8lxtXNYVxdltdxdTtiTN0cxtRl+TGmBg0apO+++04rV65UYGBgfpZ3yzGubg7j6rKbHVceHh4KDg5WgwYNNH78eNWtW1fvvfdermq4o8Jbu3btdPHiRaWlpSk8PNxh2okTJ7R79269/PLLat26tWrWrKlTp0459Mn8q0p6enqe1r9v3z4NHTpUn376qRo3bqzIyMgsH/jjjz+uIkWKaObMmfriiy/Up08f2Ww2SVL9+vW1Y8cOBQcHZ3l5eHgoNDRUGRkZio2NzXb92dVfrVo1eXh46Mcff7Ta0tLStH79eoWEhFx3e/7zn/+oW7duDj9I4uPj1a1bN/3nP/+RJNWpU0dr1qxRWlraNWvKyf584okntG3bNm3YsEHz5s1Tjx49rGkbNmxQRkaG3nnnHTVp0kT33HOPjhw5kuv11KxZU4cOHdKhQ4esth07digpKemG+yInPDw81KBBA+uHmiRlZGRo+fLlCgsLu+nlOwvjinF1PQU9rm5HjCnG1PXcijFljNGgQYO0YMECrVixQlWqVMmX5ToT44pxdT3O+rcqIyNDqampuZsp1ydaupirz6dNTk42ycnJ1vvM853T09NNmTJlzJNPPmn27Nljli9fbu677z6H6yfS0tJM8eLFzWuvvWYSEhKsiz2zO2fXGMdzaS9dumSaNGliunbtaowx5siRI6ZMmTLWOcVX6tu3rylVqpRxc3Mzhw8ftto3b95sihcvbqKiosymTZvMb7/9ZhYuXOhw98LevXuboKAgs2DBAvP777+blStXmtmzZxtjLl84arPZTHR0tDl27Jh1weY//vEPExAQYBYvXuxwserJkyeNMdmfJ33s2DFTtGhRs3jx4iz1f//998bT09OcOHHC/PXXX6ZMmTLWxaq//fab+eKLL6znxbz++uvmrrvuMrt27TLHjx83Fy9evOZ55U2bNjV169Y13t7e5ty5c1Z7fHy8kWQmTZpk9u3bZ7744gtTsWJFh5p//PFH60Lj48ePW8/1uPIzysjIMPXq1TPNmzc3GzZsMD///HO2F6vWrVvXoa6JEyeaSpUqZdkP2Zk1a5bx9PQ00dHRZseOHeaZZ54xvr6+DnczcgWMK8aVMYVnXJ0+fdps2rTJbNq0yUgy7777rtm0aZN1jYQrYEwxpowpPGNqwIABxsfHx6xatcq6ycPRo0cdtscVMK4YV8YUnnH10ksvmdjYWLN//36zZcsW89JLLxmbzWZ++OGHHM2f6Y4Lb1e78mLVmJgYU7NmTePp6Wnq1KljVq1aleXi908//dQEBQWZIkWKZLlN7NWu/FKMHTvWVKhQwfz111/W9Pnz5xsPDw8THx/vMN+6deuMJNOhQ4csy/zll1/MQw89ZLy8vEzJkiVNnTp1zOuvv25NP3/+vBk6dKipUKGC8fDwMMHBwebzzz+3po8bN874+/sbm81mbff58+fN4MGDTdmyZa97m9grB+6///1v4+vrm+1FqKmpqcbX19e89957xpjLP3Datm1rSpQoYby9vU3z5s3Nvn37jDGXfwBkbo+yuU3slaZMmWIkmV69emVZ57vvvmsqVKhgihcvbsLDw80XX3yRpeb+/fubMmXK5MttYq+Um4FrjDHvv/++ueuuu4yHh4dp1KiR+emnn3I8b2HBuGJcZSoM4yq7W0FLOX/4bmHAmGJMZSoMYyq78STJTJs2LUfzFxaMK8ZVpsIwrvr06WMqVapkPDw8TLly5Uzr1q1zHdyMMcZmjDG5O1YHAAAAALjV7qhr3gAAAADAVRHegHxy8OBBh1v4Xv3Kye2GAThiXAH5izEF5L9bOa44bRLIJ5cuXdKBAweuOb1y5cpyd3e/dQUBtwHGFZC/GFNA/ruV44rwBgAAAAAugNMmAQAAAMAFEN4AAAAAwAUQ3gAAAADABRDeAAAAAMAFEN4AALiBVq1aaciQIc4uAwBwhyO8AQAKTO/evWWz2fTmm286tC9cuFA2my1Xy6pcubImTZqUj9UVnAMHDshmsyk+Pt7ZpQAAbiOENwBAgSpWrJjeeustnTp1ytml5NrFixedXUK+SktLc3YJAICbQHgDABSoNm3ayN/fX+PHj79uv7Vr16p58+YqXry4goKC9Nxzz+ns2bOSLp+2+Mcff2jo0KGy2Wyy2WwyxqhcuXKaN2+etYx69eqpQoUKDsv09PTUuXPnJEkHDx5U586d5eXlJbvdrscff1yJiYlW/zFjxqhevXr67LPPVKVKFRUrVizbWhctWiQfHx/NmDEjT/tk37596ty5s/z8/OTl5aX77rtPy5Yts6aPGzdOtWvXzjJfvXr19Morr1jvP/vsM9WsWVPFihVTjRo1NGXKFGta5tG/2bNnq2XLlipWrJhmzJihP/74Q506dVKpUqVUsmRJ1apVS99//32etgMAcGsR3gAABcrNzU1vvPGG3n//ff3555/Z9tm3b5/atWunrl27asuWLZo9e7bWrl2rQYMGSZK++uorBQYGaty4cTp69KiOHj0qm82mFi1aaNWqVZKkU6dOaefOnTp//rx27dolSYqNjdV9992nEiVKKCMjQ507d9bJkycVGxurmJgY/f777/r73//uUMvevXs1f/58ffXVV9me9jhz5kx1795dM2bMUI8ePfK0T86cOaMOHTpo+fLl2rRpk9q1a6dOnTrp4MGDkqQ+ffpo586dWr9+vTXPpk2btGXLFj311FOSpBkzZmjUqFF6/fXXtXPnTr3xxht65ZVXNH36dId1vfTSS/rHP/6hnTt3Kjw8XFFRUUpNTdXq1au1detWvfXWW/Ly8srTdgAAbi13ZxcAALj9/e1vf1O9evU0evRo/ec//8kyffz48erRo4d1U5C7775bkydPVsuWLTV16lSVLl1abm5u8vb2lr+/vzVfq1at9PHHH0uSVq9erXvvvVf+/v5atWqVatSooVWrVqlly5aSpOXLl2vr1q3av3+/goKCJElffPGFatWqpfXr1+u+++6TdPlUyS+++ELlypXLUueHH36of/3rX/r222+t5eZF3bp1VbduXev9q6++qgULFuibb77RoEGDFBgYqPDwcE2bNs2qa9q0aWrZsqWqVq0qSRo9erTeeecdPfLII5KkKlWqaMeOHfr4448VGRlpLXvIkCFWH+ny0ceuXbsqNDRUkqzlAQAKP468AQBuibfeekvTp0/Xzp07s0zbvHmzoqOj5eXlZb3Cw8OVkZGh/fv3X3OZLVu21I4dO3T8+HHFxsaqVatWatWqlVatWqW0tDStW7dOrVq1kiTt3LlTQUFBVnCTpJCQEPn6+jrUVKlSpWyD27x58zR06FDFxMTcVHCTLh95e+GFF1SzZk35+vrKy8tLO3futI68SdLTTz+tL7/8UhcuXNDFixc1c+ZM9enTR5J09uxZ7du3T3379nXYZ6+99pr27dvnsK6GDRs6vH/uuef02muvqWnTpho9erS2bNlyU9sCALh1CG8AgFuiRYsWCg8P18iRI7NMO3PmjJ599lnFx8dbr82bN2vPnj2qVq3aNZcZGhqq0qVLKzY21iG8xcbGav369UpLS9P999+fqzpLliyZbfu9996rcuXK6fPPP5cxJlfLvNoLL7ygBQsW6I033tCaNWsUHx+v0NBQhxukdOrUSZ6enlqwYIG+/fZbpaWl6dFHH5V0eX9J0qeffuqwz7Zt26affvrputvTr18//f777+rZs6e2bt2qhg0b6v3337+p7QEA3BqcNgkAuGXefPNN1atXT9WrV3dor1+/vnbs2KHg4OBrzuvh4aH09HSHNpvNpubNm+vrr7/W9u3b1axZM5UoUUKpqan6+OOP1bBhQyu81KxZU4cOHdKhQ4eso287duxQUlKSQkJCblh7tWrV9M4776hVq1Zyc3PTBx98kNvNt/z444/q3bu3/va3v0m6HMYOHDjg0Mfd3V2RkZGaNm2aPDw81K1bNxUvXlyS5Ofnp4CAAP3+++95uu4uKChI/fv3V//+/TVy5Eh9+umnGjx4cJ63BwBwaxDeAAC3TGhoqHr06KHJkyc7tI8YMUJNmjTRoEGD1K9fP5UsWVI7duxQTEyMFZIqV66s1atXq1u3bvL09FTZsmUlXb7u7fnnn1fDhg2tG2+0aNFCM2bM0PDhw611tGnTxlr/pEmTdOnSJQ0cOFAtW7bMcmrhtdxzzz1auXKlWrVqJXd39xs+d2737t1Z2mrVqqW7775bX331lTp16iSbzaZXXnlFGRkZWfr269dPNWvWlHQ58F1p7Nixeu655+Tj46N27dopNTVVv/76q06dOqVhw4Zds6YhQ4aoffv2uueee3Tq1CmtXLnSWgcAoHDjtEkAwC01bty4LEGlTp06io2N1W+//abmzZvr3nvv1ahRoxQQEOAw34EDB1StWjWHa9Jatmyp9PR069o26XKgu7rNZrPp66+/VqlSpdSiRQu1adNGVatW1ezZs3NVf/Xq1bVixQp9+eWXev7556/bt1u3brr33nsdXomJiXr33XdVqlQp3X///erUqZPCw8NVv379LPPffffduv/++1WjRg01btzYYVq/fv302Wefadq0aQoNDVXLli0VHR2tKlWqXLem9PR0RUVFqWbNmmrXrp3uueceh0cMAAAKL5u52RP3AQBAgTDG6O6779bAgQOvezQNAHBn4LRJAAAKoePHj2vWrFlKSEiwnu0GALizEd4AACiEypcvr7Jly+qTTz5RqVKlnF0OAKAQILwBAFAIcVUDAOBq3LAEAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXADhDQAAAABcAOENAAAAAFwA4Q0AAAAAXMD/A+rVYcrBq9R7AAAAAElFTkSuQmCC", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + " def get_outstream_width(self, ind=0):\n", + " o_bits = self.get_output_datatype().bitwidth()\n", + " out_width = o_bits * self.get_nodeattr(\"PE\")\n", + " return out_width\n", + "\n" + ] } ], "source": [ - "layers_updated = list(cycles_dict_updated.keys())\n", - "cycles_updated = list(cycles_dict_updated.values())\n", - "fig = plt.figure(figsize = (10, 5))\n", - "plt.bar(layers_updated, cycles_updated, color ='blue', width = 0.3)\n", - "plt.xlabel(\"Network Layers\")\n", - "plt.ylabel(\"Clock Cycles\")\n", - "plt.title(\"Estimated clock cycles for each network layer\")\n", - "plt.show()" + "showSrc(mvau_inst.get_outstream_width)" ] }, { - "cell_type": "code", - "execution_count": 19, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "res_dict_updated = model.analysis(res_estimation)\n", - "res_dict_updated" + "The input stream width can be calculated by multiplying the input bit width with SIMD and the output stream width can be calculated by multiplying the output bit width with PE." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To connect two layers with each other for the final design, the input stream width of a node needs to match the output stream width of the preceding node. If that is not the case FINN inserts DataWidthConverters (DWCs) to resolve this mismatch. Let's have a look at the input/output stream width of the layers before updating the parallelization parameters." ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 32, "metadata": {}, "outputs": [ { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABaUElEQVR4nO3de3zP9f//8ft759nRsM1hmFNMYyLMoSmHEVJEfMScSpoKH4rvJ+ei+nyK6oMijT6Rckjlk0oIOR9L5pxTZVPG5pCx7fn7w2+vj7cNG+Mlu10vl/cl7+fz+Xq9Hq/3+/1cu+91eDuMMUYAAAAAANu42F0AAAAAABR2BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwDXpUmTJmrSpIndZRSogwcPyuFwaMaMGXaXYiteh7ybMWOGHA6HDh48eM2xX331laKiouTl5SWHw6GTJ0/e9PpuNYfDof79+9tdxm0t+zOzadOmfC/73XffyeFw6Lvvviv4wgDYjmAG3GGy/6d/pce6devyvK7ExESNGjUqT7903kqTJ0+2NTRk/3I0b968K4652i+o8+bNs365yl5XXh746zp+/Lg6deokb29vTZo0Sf/5z3/k4+Njd1l3vDVr1mjUqFF3ZAgGcOdxs7sAADfHmDFjFB4enqO9UqVKeV5HYmKiRo8erSZNmqh8+fJOfd98882NlnjdJk+erOLFi6tHjx621VBQqlWrpv/85z9ObcOGDZOvr6/+8Y9/2FQVCtrGjRt16tQpjR07Vs2aNbO7nEJjzZo1Gj16tHr06KHAwEC7ywGAqyKYAXeoVq1aqU6dOjdt/R4eHjdt3YVJSEiIHn/8cae2V155RcWLF8/Rjr+uY8eOSVKBhoMzZ85w1O0v5Ny5c3f8z00+k8CN4VRGoBCbM2eOateuLT8/P/n7+ysyMlJvvvmmpIunRHbs2FGSdP/991un02Vf23D5NWbZp+R98sknGj16tEqXLi0/Pz89+uijSk1NVXp6ugYMGKDg4GD5+vqqZ8+eSk9Pd6onISFBDzzwgIKDg+Xp6amIiAhNmTLFaUz58uW1Y8cOrVixwqrp0jpOnjypAQMGKCwsTJ6enqpUqZJeffVVZWVlOa3n5MmT6tGjhwICAhQYGKi4uLi/5OlOycnJcnNz0+jRo3P07d69Ww6HQ//+978lSSkpKRo8eLAiIyPl6+srf39/tWrVSj/88MM1t3Olawp79OiR42hqVlaWJk6cqOrVq8vLy0shISHq27evTpw44TRu06ZNio2NVfHixeXt7a3w8HD16tXrmrU4HA6NGjUqR3v58uWdjqJeuHBBo0ePVuXKleXl5aVixYqpUaNGWrJkidNyu3bt0qOPPqqgoCB5eXmpTp06+vzzz3Osf8eOHXrggQfk7e2tMmXK6KWXXsrxucpNkyZNFBcXJ0m699575XA4nOqcO3euateuLW9vbyuQ//rrr07r6NGjh3x9fbV//349+OCD8vPzU9euXa+63V9//VW9evVSSEiIPD09Vb16db3//vtOY86fP68RI0aodu3aCggIkI+Pjxo3bqzly5fnWF9WVpbefPNNRUZGysvLSyVKlFDLli1zvVZq4cKFuvvuu63tfvXVV9d8nS79GfLyyy+rTJky8vLyUtOmTbVv374c49evX6+WLVsqICBARYoUUUxMjFavXm31jxo1SkOGDJEkhYeHWz8vDh48qPbt2+uee+5xWl/btm3lcDic3vv169fL4XBo8eLFVtvPP/+sjh07KigoSEWKFFH9+vX13//+N9d9mTNnjl588UWVLl1aRYoUUVpaWq77fuLECdWtW1dlypTR7t27r/laXWrVqlXq2LGjypYtK09PT4WFhWngwIH6888/rTEJCQlyOBzaunVrjuXHjRsnV1dXp8/ctV5b6eLr63A4lJiYqL/97W8qWrSoGjVqlK/aATjjiBlwh0pNTdUff/zh1OZwOFSsWDFJ0pIlS9SlSxc1bdpUr776qiRp586dWr16tZ577jndd999evbZZ/XWW2/p//7v/1StWjVJsv57JePHj5e3t7eGDh2qffv26e2335a7u7tcXFx04sQJjRo1SuvWrdOMGTMUHh6uESNGWMtOmTJF1atX10MPPSQ3Nzd98cUXevrpp5WVlaX4+HhJ0sSJE/XMM884neoXEhIiSTp79qxiYmL066+/qm/fvipbtqzWrFmjYcOG6ejRo5o4caIkyRijdu3a6fvvv9dTTz2latWq6dNPP7V+ef4rCQkJUUxMjD755BONHDnSqe/jjz+Wq6urFbB//vlnLVy4UB07dlR4eLiSk5P17rvvKiYmRomJiSpVqlSB1NS3b1/NmDFDPXv21LPPPqsDBw7o3//+t7Zu3arVq1fL3d1dx44dU4sWLVSiRAkNHTpUgYGBOnjwoBYsWFAgNUgXf3EcP368+vTpo7p16yotLU2bNm3Sli1b1Lx5c0kXw1bDhg1VunRpDR06VD4+Pvrkk0/08MMPa/78+XrkkUckSUlJSbr//vuVkZFhjZs6daq8vb2vWcc//vEP3XXXXZo6dap1inHFihUlyXqd7r33Xo0fP17Jycl68803tXr1am3dutXpCFtGRoZiY2PVqFEj/etf/1KRIkWuuM3k5GTVr1/futaxRIkSWrx4sXr37q20tDQNGDBAkpSWlqb33ntPXbp00RNPPKFTp05p+vTpio2N1YYNGxQVFWWts3fv3poxY4ZatWqlPn36KCMjQ6tWrdK6deucjs5///33WrBggZ5++mn5+fnprbfeUocOHXT48GHr58/VvPLKK3JxcdHgwYOVmpqq1157TV27dtX69eutMcuWLVOrVq1Uu3ZtjRw5Ui4uLtYfdlatWqW6deuqffv22rNnjz766CNNmDBBxYsXlySVKFFCjRs31meffaa0tDT5+/vLGKPVq1fLxcVFq1at0kMPPSTpYuhxcXFRw4YNrde1QYMGOnv2rJ599lkVK1ZMM2fO1EMPPaR58+ZZn5dsY8eOlYeHhwYPHqz09PRcj5j98ccfat68uVJSUrRixQrrs5FXc+fO1dmzZ9WvXz8VK1ZMGzZs0Ntvv61ffvlFc+fOlSQ9+uijio+P16xZs1SrVi2n5WfNmqUmTZqodOnSeX5tL9WxY0dVrlxZ48aNkzEmX7UDuIwBcEdJSEgwknJ9eHp6WuOee+454+/vbzIyMq64rrlz5xpJZvny5Tn6YmJiTExMjPV8+fLlRpK5++67zfnz5632Ll26GIfDYVq1auW0fHR0tClXrpxT29mzZ3NsJzY21lSoUMGprXr16k7bzjZ27Fjj4+Nj9uzZ49Q+dOhQ4+rqag4fPmyMMWbhwoVGknnttdesMRkZGaZx48ZGkklISMix7ktl7+vcuXOvOEaSiY+Pz7Xvaq/r1fbvSt59910jyWzfvt2pPSIiwjzwwAPW83PnzpnMzEynMQcOHDCenp5mzJgxTm2Xvw6Xv9/Z4uLinN7HVatWGUlm1qxZTuO++uorp/ZPP/3USDIbN27M835mk2RGjhyZo71cuXImLi7Oel6zZk3TunXrq66radOmJjIy0pw7d85qy8rKMg0aNDCVK1e22gYMGGAkmfXr11ttx44dMwEBAUaSOXDgwFW3kz0vL93f8+fPm+DgYHP33XebP//802pftGiRkWRGjBhhtcXFxRlJZujQoVfdTrbevXubkiVLmj/++MOpvXPnziYgIMCaaxkZGSY9Pd1pzIkTJ0xISIjp1auX1bZs2TIjyTz77LM5tpWVlWX9W5Lx8PAw+/bts9p++OEHI8m8/fbbV605e15Vq1bNqaY333zT6fOdlZVlKleubGJjY522ffbsWRMeHm6aN29utf3zn//M9f3ZuHGjkWS+/PJLY4wxP/74o5FkOnbsaOrVq2eNe+ihh0ytWrWs59mfg1WrVlltp06dMuHh4aZ8+fLW/MrelwoVKuT4uXbpZ+Ho0aOmevXqpkKFCubgwYNXfX0uXe+lPzty+7k5fvx443A4zKFDh6y2Ll26mFKlSjn9DNiyZYvTXM/Pazty5EgjyXTp0uWadQPIG05lBO5QkyZN0pIlS5wel56OExgYqDNnzuQ4retGde/eXe7u7tbzevXqyRiT4xS1evXq6ciRI8rIyLDaLj36kH3ELyYmRj///LNSU1Ovue25c+eqcePGKlq0qP744w/r0axZM2VmZmrlypWSpC+//FJubm7q16+ftayrq6ueeeaZ695vO7Vv315ubm76+OOPrbaffvpJiYmJeuyxx6w2T09Pubhc/LGfmZmp48ePy9fXV3fddZe2bNlSILXMnTtXAQEBat68udN7ULt2bfn6+lqnyGUfCVq0aJEuXLhQINu+XGBgoHbs2KG9e/fm2p+SkqJly5apU6dOOnXqlFXr8ePHFRsbq71791qnd3355ZeqX7++09GCEiVKXPN0wqvZtGmTjh07pqefflpeXl5We+vWrVW1atUcp8dJcvrMXokxRvPnz1fbtm1ljHF6H2JjY5Wammq9366urtZRnKysLKWkpCgjI0N16tRx+kzMnz9fDocjx1FZSTnuGNqsWTOnoz41atSQv7+/fv7552vWLkk9e/Z0OrLUuHFjSbKW37Ztm/bu3au//e1vOn78uLVvZ86cUdOmTbVy5cprnmJaq1Yt+fr6Wj8TVq1apTJlyqh79+7asmWLzp49K2OMvv/+e2v70sXPQd26dZ1O2fP19dWTTz6pgwcPKjEx0Wk7cXFxVzyq+ssvvygmJkYXLlzQypUrVa5cuTy9Ppe7dP1nzpzRH3/8oQYNGsgY43TqYvfu3fXbb785naY6a9YseXt7q0OHDpKu77V96qmnrqtuADlxKiNwh6pbt+5Vb/7x9NNP65NPPlGrVq1UunRptWjRQp06dVLLli1vaLtly5Z1eh4QECBJCgsLy9GelZWl1NRU6/Sm1atXa+TIkVq7dq3Onj3rND41NdVa15Xs3btXP/74o0qUKJFrf/YNGA4dOqSSJUvK19fXqf+uu+66xt4VrIK6BX7x4sXVtGlTffLJJxo7dqyki6cxurm5qX379ta47GuEJk+erAMHDigzM9Pqy8spZnmxd+9epaamKjg4ONf+7PcgJiZGHTp00OjRozVhwgQ1adJEDz/8sP72t7/J09OzQGoZM2aM2rVrpypVqujuu+9Wy5Yt1a1bN9WoUUOStG/fPhljNHz4cA0fPvyK9ZYuXVqHDh1SvXr1cvTfyGfm0KFDV1xH1apV9f333zu1ubm5qUyZMtdc7++//66TJ09q6tSpmjp1aq5jst8HSZo5c6Zef/117dq1yykkX3pX1/3796tUqVIKCgq65vYv/xkgSUWLFs1xjWFely9atKgkWctnB+2rnXqcmppqLZcbV1dXRUdHa9WqVZIuBrPGjRurUaNGyszM1Lp16xQSEqKUlBSnYHalz0H2Kd6HDh3S3XffbbXndmfcbN26dZObm5t27typ0NDQK467lsOHD2vEiBH6/PPPc7zGl/5Bq3nz5ipZsqRmzZqlpk2bKisrSx999JHatWsnPz8/Sdf32l5tHwHkD8EMKKSCg4O1bds2ff3111q8eLEWL16shIQEde/eXTNnzrzu9bq6uuar3fz/axL279+vpk2bqmrVqnrjjTcUFhYmDw8Pffnll5owYUKebrKQlZWl5s2b6/nnn8+1v0qVKnncixvn6enpdPH9pbJD56VHSW5U586d1bNnT23btk1RUVH65JNP1LRpU+u6GuniRf7Dhw9Xr169NHbsWAUFBcnFxUUDBgy45uvrcDhyvX7k0nAnXXwPgoODNWvWrFzXkx2as78Hbt26dfriiy/09ddfq1evXnr99de1bt26HKE5Ly6v5b777tP+/fv12Wef6ZtvvtF7772nCRMm6J133lGfPn2sfR48eLBiY2NzXWd+vl7iZrv0iOfVZO/X448/fsVfsLPD6YcffqgePXro4Ycf1pAhQxQcHCxXV1eNHz9e+/fvv646rzXXb3T57P375z//6XQN3KXy8vlp1KiRXn75ZZ07d06rVq3SP/7xDwUGBuruu+/WqlWrrGtXLw1m+XW1axDbt2+vDz74QG+++abGjx9/XevPzMy0rk974YUXVLVqVfn4+OjXX39Vjx49nOa1q6ur/va3v2natGmaPHmyVq9erd9++83p7q/X89rm5TpLAHlDMAMKMQ8PD7Vt21Zt27ZVVlaWnn76ab377rsaPny4KlWqdEu/1PiLL75Qenq6Pv/8c6e/mOd2d7gr1VWxYkWdPn36mt8TVa5cOS1dulSnT592+iUjv3dDu9Y2rrS+7PbrPXUpNw8//LD69u1rnc64Z88eDRs2zGnMvHnzdP/992v69OlO7SdPnnQKcLkpWrRorqeiZR/1yVaxYkV9++23atiwYZ5+Yatfv77q16+vl19+WbNnz1bXrl01Z84c9enT56q1XH4HzfPnz+vo0aM5xgYFBalnz57q2bOnTp8+rfvuu0+jRo1Snz59VKFCBUmSu7t7nj4zuZ0SeSOfmez3f/fu3XrggQdyrPd6Px8lSpSQn5+fMjMzr7lf8+bNU4UKFbRgwQKneXX5KYsVK1bU119/rZSUlDwdNbuZsk+T9Pf3v+b+Xe1nWOPGjXX+/Hl99NFH+vXXX60Adt9991nBrEqVKlZAk648r3ft2mX159UzzzyjSpUqacSIEQoICNDQoUPzvGy27du3a8+ePZo5c6a6d+9utV/pFPXu3bvr9ddf1xdffKHFixerRIkSTn+UyM9rC6DgcY0ZUEgdP37c6bmLi4v1V/Ts29hnfx/NrbiNfPZfyS/9q3pqaqoSEhJyjPXx8cm1pk6dOmnt2rX6+uuvc/SdPHnSup7twQcfVEZGhtOt+DMzM/X222/f6G5YHnzwQa1bt06bN2/OUcesWbMUFRV1Q6cvXS4wMFCxsbH65JNPNGfOHHl4eOjhhx92GuPq6prjqMXcuXNz3Jo9NxUrVtSuXbv0+++/W20//PBDjltod+rUSZmZmdYplZfKyMiw3rcTJ07kqCX7L/SXf41CbrVkXxuUberUqTmOmF3+Gff19VWlSpWs9QcHB6tJkyZ69913cw11l+5r9vu5YcMGp/4rHRnMizp16ig4OFjvvPOO0z4vXrxYO3fuVOvWra9rva6ururQoYPmz5+vn376KUf/pfuV27xbv3691q5d67RMhw4dZIzJ9WsZ8nokrKDUrl1bFStW1L/+9S+dPn06R/+l+3e1n2H16tWTu7u7Xn31VQUFBal69eqSLga2devWacWKFTmOlj344IPasGGD0+tz5swZTZ06VeXLl1dERES+9mX48OEaPHiwhg0bluOrQfIit/fPGGN97cnlatSooRo1aui9997T/Pnz1blzZ7m5/e9v9Pl5bQEUPI6YAXeoxYsXW3/FvVSDBg1UoUIF9enTRykpKXrggQdUpkwZHTp0SG+//baioqKs6yWioqLk6uqqV199VampqfL09LS+Z6ygtWjRwjqC17dvX50+fVrTpk1TcHBwjl+aa9eurSlTpuill15SpUqVFBwcrAceeEBDhgzR559/rjZt2qhHjx6qXbu2zpw5o+3bt2vevHk6ePCgihcvrrZt26phw4YaOnSoDh48qIiICC1YsCBPNxi51Pz583N9jePi4jR06FDNnTtX9913n/r27auqVavqt99+04wZM3T06NFcA+eNeuyxx/T4449r8uTJio2NzfFlxm3atNGYMWPUs2dPNWjQQNu3b9esWbOsI0dX06tXL73xxhuKjY1V7969dezYMb3zzjuqXr2603czxcTEqG/fvho/fry2bdumFi1ayN3dXXv37tXcuXP15ptv6tFHH9XMmTM1efJkPfLII6pYsaJOnTqladOmyd/fXw8++OBVa+nTp4+eeuopdejQQc2bN9cPP/ygr7/+OsdRv4iICDVp0kS1a9dWUFCQNm3apHnz5ql///7WmEmTJqlRo0aKjIzUE088oQoVKig5OVlr167VL7/8Yn3H2/PPP6///Oc/atmypZ577jnrdvnlypXTjz/+eM3XLzfZoaBnz56KiYlRly5drNvlly9fXgMHDryu9UoXbzm/fPly1atXT0888YQiIiKUkpKiLVu26Ntvv1VKSoqki5+JBQsW6JFHHlHr1q114MABvfPOO4qIiHD6xfz+++9Xt27d9NZbb2nv3r1q2bKlsrKytGrVKt1///1Or+nN5uLiovfee0+tWrVS9erV1bNnT5UuXVq//vqrli9fLn9/f33xxReSLv6skC5+ZUHnzp3l7u6utm3bysfHR0WKFFHt2rW1bt066zvMpItHzM6cOaMzZ87kCGZDhw7VRx99pFatWunZZ59VUFCQZs6cqQMHDmj+/Pl5OtX0cv/85z+Vmpqq+Ph4+fn55euL5atWraqKFStq8ODB+vXXX+Xv76/58+df9Xq+7t27a/DgwZKUY1v5eW0B3AS3+jaQAG6uq90uX5fcFnnevHmmRYsWJjg42Hh4eJiyZcuavn37mqNHjzqtb9q0aaZChQrG1dXV6TbNV7pd/uW3kM/tNuHG/O9Wy7///rvV9vnnn5saNWoYLy8vU758efPqq6+a999/P8ftrpOSkkzr1q2Nn5+fkeRUx6lTp8ywYcNMpUqVjIeHhylevLhp0KCB+de//uV0G//jx4+bbt26GX9/fxMQEGC6detmtm7dmq/b5V/pkX0r7V9++cX06dPHlC5d2ri5uZmgoCDTpk0bs27duquuP7+3y8+WlpZmvL29jSTz4Ycf5ug/d+6c+fvf/25KlixpvL29TcOGDc3atWtzvJe53S7fGGM+/PBDU6FCBePh4WGioqLM119/neN2+dmmTp1qateubby9vY2fn5+JjIw0zz//vPntt9+MMRdv092lSxdTtmxZ4+npaYKDg02bNm3Mpk2brrmfmZmZ5oUXXjDFixc3RYoUMbGxsWbfvn05bpf/0ksvmbp165rAwEDj7e1tqlatal5++WWnz4Exxuzfv990797dhIaGGnd3d1O6dGnTpk0bM2/ePKdxP/74o4mJiTFeXl6mdOnSZuzYsWb69OnXfbv8bB9//LGpVauW8fT0NEFBQaZr167ml19+cRoTFxdnfHx8rvnaXCo5OdnEx8ebsLAw4+7ubkJDQ03Tpk3N1KlTrTFZWVlm3Lhxply5csbT09PUqlXLLFq0KNf3NSMjw/zzn/80VatWNR4eHqZEiRKmVatWZvPmzdYYXeFrIi5/b3JzpZ8hV/o8bt261bRv394UK1bMeHp6mnLlyplOnTqZpUuXOo0bO3asKV26tHFxccnxXg0ZMsRIMq+++qrTMpUqVTKSzP79+3PUuX//fvPoo4+awMBA4+XlZerWrWsWLVqUp30xJvfPQmZmpunSpYtxc3MzCxcuvOZrdOnt8hMTE02zZs2Mr6+vKV68uHniiSesryjI7WfZ0aNHjaurq6lSpcoVt5OX1za3n+EAbozDGL4NEAAAoDD4448/VLJkSY0YMeKKdyMFYA+uMQMAACgkZsyYoczMTHXr1s3uUgBchmvMAAAA7nDLli1TYmKiXn75ZT388MMqX7683SUBuAynMgIAANzhmjRpojVr1qhhw4b68MMPVbp0abtLAnAZghkAAAAA2IxrzAAAAADAZgQzAAAAALAZN/+QlJWVpd9++01+fn7WF0wCAAAAKHyMMTp16pRKlSp1XV8cf70IZpJ+++03hYWF2V0GAAAAgNvEkSNHVKZMmVu2PYKZJD8/P0kXX3x/f3+bqwEAAABgl7S0NIWFhVkZ4VYhmEnW6Yv+/v4EMwAAAAC3/BInbv4BAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM1sD2a//vqrHn/8cRUrVkze3t6KjIzUpk2brH5jjEaMGKGSJUvK29tbzZo10969e53WkZKSoq5du8rf31+BgYHq3bu3Tp8+fat3BQAAAACui63B7MSJE2rYsKHc3d21ePFiJSYm6vXXX1fRokWtMa+99preeustvfPOO1q/fr18fHwUGxurc+fOWWO6du2qHTt2aMmSJVq0aJFWrlypJ5980o5dAgAAAIB8cxhjjF0bHzp0qFavXq1Vq1bl2m+MUalSpfT3v/9dgwcPliSlpqYqJCREM2bMUOfOnbVz505FRERo48aNqlOnjiTpq6++0oMPPqhffvlFpUqVumYdaWlpCggIUGpqKl8wDQAAABRidmUDW4+Yff7556pTp446duyo4OBg1apVS9OmTbP6Dxw4oKSkJDVr1sxqCwgIUL169bR27VpJ0tq1axUYGGiFMklq1qyZXFxctH79+ly3m56errS0NKcHAAAAANjF1mD2888/a8qUKapcubK+/vpr9evXT88++6xmzpwpSUpKSpIkhYSEOC0XEhJi9SUlJSk4ONip383NTUFBQdaYy40fP14BAQHWIywsrKB3DQAAAADyzNZglpWVpXvuuUfjxo1TrVq19OSTT+qJJ57QO++8c1O3O2zYMKWmplqPI0eO3NTtAQAAAMDV2BrMSpYsqYiICKe2atWq6fDhw5Kk0NBQSVJycrLTmOTkZKsvNDRUx44dc+rPyMhQSkqKNeZynp6e8vf3d3oAAAAAgF1sDWYNGzbU7t27ndr27NmjcuXKSZLCw8MVGhqqpUuXWv1paWlav369oqOjJUnR0dE6efKkNm/ebI1ZtmyZsrKyVK9evVuwFwAAAABwY9zs3PjAgQPVoEEDjRs3Tp06ddKGDRs0depUTZ06VZLkcDg0YMAAvfTSS6pcubLCw8M1fPhwlSpVSg8//LCki0fYWrZsaZ0CeeHCBfXv31+dO3fO0x0ZAQAAAMButt4uX5IWLVqkYcOGae/evQoPD9egQYP0xBNPWP3GGI0cOVJTp07VyZMn1ahRI02ePFlVqlSxxqSkpKh///764osv5OLiog4dOuitt96Sr69vnmq43W6X7xjtsLsEW5mRtn4kAQAAUIjZlQ1sD2a3A4LZ7YVgBgAAALsUyu8xAwAAAAAQzAAAAADAdgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALCZrcFs1KhRcjgcTo+qVata/efOnVN8fLyKFSsmX19fdejQQcnJyU7rOHz4sFq3bq0iRYooODhYQ4YMUUZGxq3eFQAAAAC4bm52F1C9enV9++231nM3t/+VNHDgQP33v//V3LlzFRAQoP79+6t9+/ZavXq1JCkzM1OtW7dWaGio1qxZo6NHj6p79+5yd3fXuHHjbvm+AAAAAMD1sD2Yubm5KTQ0NEd7amqqpk+frtmzZ+uBBx6QJCUkJKhatWpat26d6tevr2+++UaJiYn69ttvFRISoqioKI0dO1YvvPCCRo0aJQ8Pj1u9OwAAAACQb7ZfY7Z3716VKlVKFSpUUNeuXXX48GFJ0ubNm3XhwgU1a9bMGlu1alWVLVtWa9eulSStXbtWkZGRCgkJscbExsYqLS1NO3bsuOI209PTlZaW5vQAAAAAALvYGszq1aunGTNm6KuvvtKUKVN04MABNW7cWKdOnVJSUpI8PDwUGBjotExISIiSkpIkSUlJSU6hLLs/u+9Kxo8fr4CAAOsRFhZWsDsGAAAAAPlg66mMrVq1sv5do0YN1atXT+XKldMnn3wib2/vm7bdYcOGadCgQdbztLQ0whkAAAAA29h+KuOlAgMDVaVKFe3bt0+hoaE6f/68Tp486TQmOTnZuiYtNDQ0x10as5/ndt1aNk9PT/n7+zs9AAAAAMAut1UwO336tPbv36+SJUuqdu3acnd319KlS63+3bt36/Dhw4qOjpYkRUdHa/v27Tp27Jg1ZsmSJfL391dERMQtrx8AAAAAroetpzIOHjxYbdu2Vbly5fTbb79p5MiRcnV1VZcuXRQQEKDevXtr0KBBCgoKkr+/v5555hlFR0erfv36kqQWLVooIiJC3bp102uvvaakpCS9+OKLio+Pl6enp527BgAAAAB5Zmsw++WXX9SlSxcdP35cJUqUUKNGjbRu3TqVKFFCkjRhwgS5uLioQ4cOSk9PV2xsrCZPnmwt7+rqqkWLFqlfv36Kjo6Wj4+P4uLiNGbMGLt2CQAAAADyzWGMMXYXYbe0tDQFBAQoNTX1trjezDHaYXcJtjIjC/1HEgAAADaxKxvcVteYAQAAAEBhRDADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZrdNMHvllVfkcDg0YMAAq+3cuXOKj49XsWLF5Ovrqw4dOig5OdlpucOHD6t169YqUqSIgoODNWTIEGVkZNzi6gEAAADg+t0WwWzjxo169913VaNGDaf2gQMH6osvvtDcuXO1YsUK/fbbb2rfvr3Vn5mZqdatW+v8+fNas2aNZs6cqRkzZmjEiBG3ehcAAAAA4LrZHsxOnz6trl27atq0aSpatKjVnpqaqunTp+uNN97QAw88oNq1ayshIUFr1qzRunXrJEnffPONEhMT9eGHHyoqKkqtWrXS2LFjNWnSJJ0/f96uXQIAAACAfLE9mMXHx6t169Zq1qyZU/vmzZt14cIFp/aqVauqbNmyWrt2rSRp7dq1ioyMVEhIiDUmNjZWaWlp2rFjxxW3mZ6errS0NKcHAAAAANjFzc6Nz5kzR1u2bNHGjRtz9CUlJcnDw0OBgYFO7SEhIUpKSrLGXBrKsvuz+65k/PjxGj169A1WDwAAAAAFw7YjZkeOHNFzzz2nWbNmycvL65Zue9iwYUpNTbUeR44cuaXbBwAAAIBL2RbMNm/erGPHjumee+6Rm5ub3NzctGLFCr311ltyc3NTSEiIzp8/r5MnTzotl5ycrNDQUElSaGhojrs0Zj/PHpMbT09P+fv7Oz0AAAAAwC62BbOmTZtq+/bt2rZtm/WoU6eOunbtav3b3d1dS5cutZbZvXu3Dh8+rOjoaElSdHS0tm/frmPHjlljlixZIn9/f0VERNzyfQIAAACA62HbNWZ+fn66++67ndp8fHxUrFgxq713794aNGiQgoKC5O/vr2eeeUbR0dGqX7++JKlFixaKiIhQt27d9NprrykpKUkvvvii4uPj5enpecv3CQAAAACuh603/7iWCRMmyMXFRR06dFB6erpiY2M1efJkq9/V1VWLFi1Sv379FB0dLR8fH8XFxWnMmDE2Vg0AAAAA+eMwxhi7i7BbWlqaAgIClJqaeltcb+YY7bC7BFuZkYX+IwkAAACb2JUNbP8eMwAAAAAo7K7rVMa9e/dq+fLlOnbsmLKyspz6RowYUSCFAQAAAEBhke9gNm3aNPXr10/FixdXaGioHI7/nXbncDgIZgBuO5wezOnBKHjMK+YVgIKV72D20ksv6eWXX9YLL7xwM+oBAAAAgEIn39eYnThxQh07drwZtQAAAABAoZTvYNaxY0d98803N6MWAAAAACiU8n0qY6VKlTR8+HCtW7dOkZGRcnd3d+p/9tlnC6w4AAAAACgM8h3Mpk6dKl9fX61YsUIrVqxw6nM4HAQzAAAAAMinfAezAwcO3Iw6AAAAAKDQuqEvmDbGyBhuFwsAAAAAN+K6gtkHH3ygyMhIeXt7y9vbWzVq1NB//vOfgq4NAAAAAAqFfJ/K+MYbb2j48OHq37+/GjZsKEn6/vvv9dRTT+mPP/7QwIEDC7xIAAAAALiT5TuYvf3225oyZYq6d+9utT300EOqXr26Ro0aRTADAAAAgHzK96mMR48eVYMGDXK0N2jQQEePHi2QogAAAACgMMl3MKtUqZI++eSTHO0ff/yxKleuXCBFAQAAAEBhku9TGUePHq3HHntMK1eutK4xW716tZYuXZprYAMAAAAAXF2+j5h16NBB69evV/HixbVw4UItXLhQxYsX14YNG/TII4/cjBoBAAAA4I6W7yNmklS7dm19+OGHBV0LAAAAABRKeQpmaWlp8vf3t/59NdnjAAAAAAB5k6dgVrRoUR09elTBwcEKDAyUw+HIMcYYI4fDoczMzAIvEgAAAADuZHkKZsuWLVNQUJAkafny5Te1IAAAAAAobPIUzGJiYqx/h4eHKywsLMdRM2OMjhw5UrDVAQAAAEAhkO+7MoaHh+v333/P0Z6SkqLw8PACKQoAAAAACpN8B7Psa8kud/r0aXl5eRVIUQAAAABQmOT5dvmDBg2SJDkcDg0fPlxFihSx+jIzM7V+/XpFRUUVeIEAAAAAcKfLczDbunWrpItHzLZv3y4PDw+rz8PDQzVr1tTgwYMLvkIAAAAAuMPlOZhl342xZ8+eevPNN/m+MgAAAAAoIHkOZtkSEhJuRh0AAAAAUGjlO5hJ0qZNm/TJJ5/o8OHDOn/+vFPfggULCqQwAAAAoLBwjM55c73CxIw0dpdgu3zflXHOnDlq0KCBdu7cqU8//VQXLlzQjh07tGzZMgUEBNyMGgEAAADgjpbvYDZu3DhNmDBBX3zxhTw8PPTmm29q165d6tSpk8qWLXszagQAAACAO1q+g9n+/fvVunVrSRfvxnjmzBk5HA4NHDhQU6dOLfACAQAAAOBOl+9gVrRoUZ06dUqSVLp0af3000+SpJMnT+rs2bMFWx0AAAAAFAL5vvnHfffdpyVLligyMlIdO3bUc889p2XLlmnJkiVq2rTpzagRAAAAAO5o+Q5m//73v3Xu3DlJ0j/+8Q+5u7trzZo16tChg1588cUCLxAAAAAA7nT5DmZBQUHWv11cXDR06NACLQgAAAAACpt8X2O2ZcsWbd++3Xr+2Wef6eGHH9b//d//5fhOMwAAAADAteU7mPXt21d79uyRJP3888967LHHVKRIEc2dO1fPP/98gRcIAAAAAHe6fAezPXv2KCoqSpI0d+5cxcTEaPbs2ZoxY4bmz59f0PUBAAAAwB0v38HMGKOsrCxJ0rfffqsHH3xQkhQWFqY//vijYKsDAAAAgEIg38GsTp06eumll/Sf//xHK1assL5s+sCBAwoJCSnwAgEAAADgTpfvYDZx4kRt2bJF/fv31z/+8Q9VqlRJkjRv3jw1aNCgwAsEAAAAgDtdvm+XX6NGDae7Mmb75z//KVdX1wIpCgAAAAAKk3wHsyvx8vIqqFUBAAAAQKGSp2AWFBSkPXv2qHjx4ipatKgcDscVx6akpBRYcQAAAABQGOQpmE2YMEF+fn6SLl5jBgAAAAAoOHkKZnFxcbn+GwAAAABw4/IUzNLS0vK8Qn9//+suBgAAAAAKozwFs8DAwKteVyZd/OJph8OhzMzMAikMAAAAAAqLPAWz5cuX3+w6AAAAAKDQylMwi4mJudl1AAAAAECh5ZKXQT/++KOysrKsf1/tkR9TpkxRjRo15O/vL39/f0VHR2vx4sVW/7lz5xQfH69ixYrJ19dXHTp0UHJystM6Dh8+rNatW6tIkSIKDg7WkCFDlJGRka86AAAAAMBOeTpiFhUVpaSkJAUHBysqKkoOh0PGmBzj8nuNWZkyZfTKK6+ocuXKMsZo5syZateunbZu3arq1atr4MCB+u9//6u5c+cqICBA/fv3V/v27bV69WpJUmZmplq3bq3Q0FCtWbNGR48eVffu3eXu7q5x48bluQ4AAAAAsFOegtmBAwdUokQJ698FpW3btk7PX375ZU2ZMkXr1q1TmTJlNH36dM2ePVsPPPCAJCkhIUHVqlXTunXrVL9+fX3zzTdKTEzUt99+q5CQEEVFRWns2LF64YUXNGrUKHl4eBRYrQAAAABws+TpVMZy5cpZd2U8dOiQSpcurXLlyjk9SpcurUOHDl13IZmZmZozZ47OnDmj6Ohobd68WRcuXFCzZs2sMVWrVlXZsmW1du1aSdLatWsVGRmpkJAQa0xsbKzS0tK0Y8eOK24rPT1daWlpTg8AAAAAsEuegtml7r//fqWkpORoT01N1f3335/vArZv3y5fX195enrqqaee0qeffqqIiAglJSXJw8NDgYGBTuNDQkKUlJQkSUpKSnIKZdn92X1XMn78eAUEBFiPsLCwfNcNAAAAAAUl38Es+/vKLnf8+HH5+Pjku4C77rpL27Zt0/r169WvXz/FxcUpMTEx3+vJj2HDhik1NdV6HDly5KZuDwAAAACuJk/XmElS+/btJV28wUePHj3k6elp9WVmZurHH39UgwYN8l2Ah4eHKlWqJEmqXbu2Nm7cqDfffFOPPfaYzp8/r5MnTzodNUtOTlZoaKgkKTQ0VBs2bHBaX/ZdG7PH5MbT09OpfgAAAACwU56PmGWf9meMkZ+fn9OpgKGhoXryySf14Ycf3nBBWVlZSk9PV+3ateXu7q6lS5dafbt379bhw4cVHR0tSYqOjtb27dt17Ngxa8ySJUvk7++viIiIG64FAAAAAG6FPB8xS0hIkCSVL19egwcPvq7TFi83bNgwtWrVSmXLltWpU6c0e/Zsfffdd/r6668VEBCg3r17a9CgQQoKCpK/v7+eeeYZRUdHq379+pKkFi1aKCIiQt26ddNrr72mpKQkvfjii4qPj+eIGAAAAIC/jDwHs2wjR44ssI0fO3ZM3bt319GjRxUQEKAaNWro66+/VvPmzSVJEyZMkIuLizp06KD09HTFxsZq8uTJ1vKurq5atGiR+vXrp+joaPn4+CguLk5jxowpsBoBAAAA4GbLczArWrRorjf9CAgIUJUqVTR48GArUOXV9OnTr9rv5eWlSZMmadKkSVccU65cOX355Zf52i4AAAAA3E7yHMwmTpyYa/vJkye1efNmtWnTRvPmzcvxpdEAAAAAgKvLczCLi4u7an9UVJTGjx9PMAMAAACAfMr395hdSZs2bbRr166CWh0AAAAAFBoFFszS09Pl4eFRUKsDAAAAgEKjwILZ9OnTFRUVVVCrAwAAAIBCI8/XmA0aNCjX9tTUVG3ZskV79uzRypUrC6wwAAAAACgs8hzMtm7dmmu7v7+/mjdvrgULFig8PLzACgMAAACAwiLPwWz58uU3sw4AAAAAKLQK7BozAAAAAMD1IZgBAAAAgM0IZgAAAABgM4IZAAAAANgsz8GsV69eOnXq1M2sBQAAAAAKpTwHs5kzZ+rPP/+8mbUAAAAAQKGU52BmjLmZdQAAAABAoZXn7zGTpFOnTsnLy+uqY/z9/W+oIAAAAAAobPIVzKpUqXLFPmOMHA6HMjMzb7goAAAAAChM8hXM5s2bp6CgoJtVCwAAAAAUSvkKZg0bNlRwcPDNqgUAAAAACiW+xwwAAAAAbJbnYFauXDm5urrezFoAAAAAoFDK86mMBw4cuJl1AAAAAEChledgVrRoUTkcjhztAQEBqlKligYPHqzmzZsXaHEAAAAAUBjkOZhNmDAh12B28uRJbd68WW3atNG8efPUtm3bAi0QAAAAAO50eQ5mPXr0uGp/VFSUxo8fTzADAAAAgHwqsLsytmnTRrt27Sqo1QEAAABAoVFgwSw9PV0eHh4FtToAAAAAKDQKLJhNnz5dUVFRBbU6AAAAACg08nyN2aBBg3JtT01N1ZYtW7Rnzx6tXLmywAoDAAAAgMIiz8Fs69atubb7+/urefPmWrBggcLDwwusMAAAAAAoLPIczJYvX37V/l9++UVPPvmkpk6desNFAQAAAEBhUmDXmB0/flzTp08vqNUBAAAAQKFRYMEMAAAAAHB9CGYAAAAAYDOCGQAAAADYLM83/2jfvv1V+0+ePHmjtQAAAABAoZTnYBYQEHDN/u7du99wQQAAAABQ2OQ5mCUkJNzMOgAAAACg0OIaMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwma3BbPz48br33nvl5+en4OBgPfzww9q9e7fTmHPnzik+Pl7FihWTr6+vOnTooOTkZKcxhw8fVuvWrVWkSBEFBwdryJAhysjIuJW7AgAAAADXzdZgtmLFCsXHx2vdunVasmSJLly4oBYtWujMmTPWmIEDB+qLL77Q3LlztWLFCv32229q37691Z+ZmanWrVvr/PnzWrNmjWbOnKkZM2ZoxIgRduwSAAAAAOSbm50b/+qrr5yez5gxQ8HBwdq8ebPuu+8+paamavr06Zo9e7YeeOABSVJCQoKqVaumdevWqX79+vrmm2+UmJiob7/9ViEhIYqKitLYsWP1wgsvaNSoUfLw8LBj1wAAAAAgz26ra8xSU1MlSUFBQZKkzZs368KFC2rWrJk1pmrVqipbtqzWrl0rSVq7dq0iIyMVEhJijYmNjVVaWpp27NiR63bS09OVlpbm9AAAAAAAu9w2wSwrK0sDBgxQw4YNdffdd0uSkpKS5OHhocDAQKexISEhSkpKssZcGsqy+7P7cjN+/HgFBARYj7CwsALeGwAAAADIu9smmMXHx+unn37SnDlzbvq2hg0bptTUVOtx5MiRm75NAAAAALgSW68xy9a/f38tWrRIK1euVJkyZaz20NBQnT9/XidPnnQ6apacnKzQ0FBrzIYNG5zWl33Xxuwxl/P09JSnp2cB7wUAAAAAXB9bj5gZY9S/f399+umnWrZsmcLDw536a9euLXd3dy1dutRq2717tw4fPqzo6GhJUnR0tLZv365jx45ZY5YsWSJ/f39FRETcmh0BAAAAgBtg6xGz+Ph4zZ49W5999pn8/Pysa8ICAgLk7e2tgIAA9e7dW4MGDVJQUJD8/f31zDPPKDo6WvXr15cktWjRQhEREerWrZtee+01JSUl6cUXX1R8fDxHxQAAAAD8JdgazKZMmSJJatKkiVN7QkKCevToIUmaMGGCXFxc1KFDB6Wnpys2NlaTJ0+2xrq6umrRokXq16+foqOj5ePjo7i4OI0ZM+ZW7QYAAAAA3BBbg5kx5ppjvLy8NGnSJE2aNOmKY8qVK6cvv/yyIEsDAAAAgFvmtrkrIwAAAAAUVgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbGZrMFu5cqXatm2rUqVKyeFwaOHChU79xhiNGDFCJUuWlLe3t5o1a6a9e/c6jUlJSVHXrl3l7++vwMBA9e7dW6dPn76FewEAAAAAN8bWYHbmzBnVrFlTkyZNyrX/tdde01tvvaV33nlH69evl4+Pj2JjY3Xu3DlrTNeuXbVjxw4tWbJEixYt0sqVK/Xkk0/eql0AAAAAgBvmZufGW7VqpVatWuXaZ4zRxIkT9eKLL6pdu3aSpA8++EAhISFauHChOnfurJ07d+qrr77Sxo0bVadOHUnS22+/rQcffFD/+te/VKpUqVu2LwAAAABwvW7ba8wOHDigpKQkNWvWzGoLCAhQvXr1tHbtWknS2rVrFRgYaIUySWrWrJlcXFy0fv36K647PT1daWlpTg8AAAAAsMttG8ySkpIkSSEhIU7tISEhVl9SUpKCg4Od+t3c3BQUFGSNyc348eMVEBBgPcLCwgq4egAAAADIu9s2mN1Mw4YNU2pqqvU4cuSI3SUBAAAAKMRu22AWGhoqSUpOTnZqT05OtvpCQ0N17Ngxp/6MjAylpKRYY3Lj6ekpf39/pwcAAAAA2OW2DWbh4eEKDQ3V0qVLrba0tDStX79e0dHRkqTo6GidPHlSmzdvtsYsW7ZMWVlZqlev3i2vGQAAAACuh613ZTx9+rT27dtnPT9w4IC2bdumoKAglS1bVgMGDNBLL72kypUrKzw8XMOHD1epUqX08MMPS5KqVaumli1b6oknntA777yjCxcuqH///urcuTN3ZAQAAADwl2FrMNu0aZPuv/9+6/mgQYMkSXFxcZoxY4aef/55nTlzRk8++aROnjypRo0a6auvvpKXl5e1zKxZs9S/f381bdpULi4u6tChg956661bvi8AAAAAcL1sDWZNmjSRMeaK/Q6HQ2PGjNGYMWOuOCYoKEizZ8++GeUBAAAAwC1x215jBgAAAACFBcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm90xwWzSpEkqX768vLy8VK9ePW3YsMHukgAAAAAgT+6IYPbxxx9r0KBBGjlypLZs2aKaNWsqNjZWx44ds7s0AAAAALimOyKYvfHGG3riiSfUs2dPRURE6J133lGRIkX0/vvv210aAAAAAFyTm90F3Kjz589r8+bNGjZsmNXm4uKiZs2aae3atbkuk56ervT0dOt5amqqJCktLe3mFptX5+wuwF63zfuAOwdzyu4ScCdiXtldAu40zCm7S7Bk12KMuaXb/csHsz/++EOZmZkKCQlxag8JCdGuXbtyXWb8+PEaPXp0jvawsLCbUiPyJ+CVALtLAO4ozCmg4DGvgIJ1O86pU6dOKSDg1tX1lw9m12PYsGEaNGiQ9TwrK0spKSkqVqyYHA6HjZXZLy0tTWFhYTpy5Ij8/f3tLgf4y2NOAQWPeQUULOaUM2OMTp06pVKlSt3S7f7lg1nx4sXl6uqq5ORkp/bk5GSFhobmuoynp6c8PT2d2gIDA29WiX9J/v7+TEygADGngILHvAIKFnPqf27lkbJsf/mbf3h4eKh27dpaunSp1ZaVlaWlS5cqOjraxsoAAAAAIG/+8kfMJGnQoEGKi4tTnTp1VLduXU2cOFFnzpxRz5497S4NAAAAAK7pjghmjz32mH7//XeNGDFCSUlJioqK0ldffZXjhiC4Nk9PT40cOTLHqZ4Arg9zCih4zCugYDGnbg8Oc6vvAwkAAAAAcPKXv8YMAAAAAP7qCGYAAAAAYDOCGQAAAADYjGB2ncqXL6+JEyfaXcZfzsGDB+VwOLRt27abvi3eo78e3rPrw7zClfB+XR/mFK6G9+z6MK/ywPyFxcXFGUmmb9++OfqefvppI8nExcXlaV0HDhwwkszWrVvzNP7YsWPmzJkzeRrbpk0bExsbm2vfypUrjSTzww8/5GldV7J8+XIjyZw4ceKG1nO5s2fPmqJFi5pixYqZc+fO5WvZuLg4065dO6e2jIwMc/ToUXPhwoUCqzEhIcEEBATkaM/Pe1RQ/v3vf5ty5coZT09PU7duXbN+/fpbuv2CwLz6H+ZVQI72Wz2vVqxYYdq0aWNKlixpJJlPP/30lm27oDCn/oc5FZCj/VbPqXHjxpk6deoYX19fU6JECdOuXTuza9euW7b9gsK8+h/mVUCO9ls9ryZPnmwiIyONn5+f8fPzM/Xr1zdffvllvtfzlz9iFhYWpjlz5ujPP/+02s6dO6fZs2erbNmyBb698+fPS5JKlCihIkWK5GmZ3r17a8mSJfrll19y9CUkJKhOnTqqUaNGgdZ5vYwxysjIsJ7Pnz9f1atXV9WqVbVw4cIbXr+rq6tCQ0Pl5nbzv6khP+9RQfj44481aNAgjRw5Ulu2bFHNmjUVGxurY8eO3bIaCgrzqmAxr67fmTNnVLNmTU2aNOmWbfNmYE4VLObU9VuxYoXi4+O1bt06LVmyRBcuXFCLFi105syZW1ZDQWFeFSzm1fUrU6aMXnnlFW3evFmbNm3SAw88oHbt2mnHjh35W1EBB8ZbKjuN33333ebDDz+02mfNmmVq1Khh2rVrZ/21ZPHixaZhw4YmICDABAUFmdatW5t9+/ZZy0hyesTExDht46WXXjIlS5Y05cuXN8YYU65cOTNhwgRjzMW/VLi7u5uVK1da63v11VdNiRIlTFJSkrlw4YIJCQkxY8eOdar/1KlTxtfX10yZMsUYY8yqVatMo0aNjJeXlylTpox55plnzOnTp63x586dM88//7wpU6aM8fDwMBUrVjTvvfee9ZeeSx/Z+33u3DnzzDPPmBIlShhPT0/TsGFDs2HDBmud2X9l+fLLL80999xj3N3dzfLly63+Jk2amHfeecdMmTLFNG/ePMd78NNPP5nWrVsbPz8/4+vraxo1amT27dtnRo4cmaOm5cuXO/1VKjMz05QuXdpMnjzZaZ1btmwxDofDHDx40BhjzOuvv27uvvtuU6RIEVOmTBnTr18/c+rUKaf6L32MHDkyx3tkjDGHDh0yDz30kPHx8TF+fn6mY8eOJikpyeofOXKkqVmzpvnggw9MuXLljL+/v3nsscdMWlpajv3OTd26dU18fLz1PDMz05QqVcqMHz8+T8vfLphXzKvbaV5dSn/hI2bMKebU7TinjLl4ZEGSWbFixXUtbxfmFfPqdp5XxhhTtGhR89577+VrmTsimL3xxhumadOmVnvTpk3NhAkTnCblvHnzzPz5883evXvN1q1bTdu2bU1kZKTJzMw0xhizYcMGI8l8++235ujRo+b48ePWNnx9fU23bt3MTz/9ZH766SdjTM43fMiQIaZcuXLm5MmTZsuWLcbDw8N89tlnTv0VK1Y0WVlZVtv7779vvL29zcmTJ82+ffuMj4+PmTBhgtmzZ49ZvXq1qVWrlunRo4c1vlOnTiYsLMwsWLDA7N+/33z77bdmzpw5JiMjw8yfP99IMrt37zZHjx41J0+eNMYY8+yzz5pSpUqZL7/80uzYscPExcWZokWLWvuX/aGuUaOG+eabb8y+ffusvn379hlPT0+TkpJijh8/bry8vKyJYowxv/zyiwkKCjLt27c3GzduNLt37zbvv/++2bVrlzl16pTp1KmTadmypTl69Kg5evSoSU9Pz3G6wODBg02jRo2c3te///3vTm0TJkwwy5YtMwcOHDBLly41d911l+nXr58xxpj09HQzceJE4+/vb20ne8Je+h5lZmaaqKgo06hRI7Np0yazbt06U7t2beuHrzEXJ6Wvr69p37692b59u1m5cqUJDQ01//d//3fFz2C29PR04+rqmuOXxu7du5uHHnromsvfTphXzKvbZV5d7q8ezJhTzKnbbU4ZY8zevXuNJLN9+/brWt4uzCvm1e06rzIyMsxHH31kPDw8zI4dO/K17B0RzI4dO2Y8PT3NwYMHzcGDB42Xl5f5/fffnSbl5X7//XenH0RXOr84Li7OhISEmPT0dKf2yydlenq6iYqKMp06dTIRERHmiSeecBq/c+dO6y8G2Ro3bmwef/xxY4wxvXv3Nk8++aTTMqtWrTIuLi7mzz//NLt37zaSzJIlS3Ldn9zOLz59+rRxd3c3s2bNstrOnz9vSpUqZV577TWn5RYuXJhjnf/3f/9nHn74Yet5u3btrL9EGGPMsGHDTHh4uDl//nyuNeV2fvHlr/PWrVuNw+Ewhw4dMsYY6y8o2X9Bys3cuXNNsWLFrOdXOr/40vfom2++Ma6urubw4cNW/44dO4wk669HI0eONEWKFHH668iQIUNMvXr1rlhLtl9//dVIMmvWrHFqHzJkiKlbt+41l7+dMK/+h3kVkGPcrZxXl/urBzPmFHPqdptTmZmZpnXr1qZhw4b5XtZuzKv/YV4F5Bhnx7z68ccfjY+Pj3F1dTUBAQHmv//9b56XzfaXv8ZMungeaevWrTVjxgwlJCSodevWKl68uNOYvXv3qkuXLqpQoYL8/f1Vvnx5SdLhw4evuf7IyEh5eHhcdYyHh4dmzZql+fPn69y5c5owYYJTf9WqVdWgQQO9//77kqR9+/Zp1apV6t27tyTphx9+0IwZM+Tr62s9YmNjlZWVpQMHDmjbtm1ydXVVTExMXl8W7d+/XxcuXFDDhg2tNnd3d9WtW1c7d+50GlunTh2n55mZmZo5c6Yef/xxq+3xxx/XjBkzlJWVJUnatm2bGjduLHd39zzXdLmoqChVq1ZNs2fPlnTx3Pdjx46pY8eO1phvv/1WTZs2VenSpeXn56du3brp+PHjOnv2bJ63s3PnToWFhSksLMxqi4iIUGBgoNNrUb58efn5+VnPS5Ys+Ze8RqwgMK9yx7z6H+ZV/jCncsec+p9bPafi4+P1008/ac6cOfle9nbBvMod8+p/btW8uuuuu7Rt2zatX79e/fr1U1xcnBITE/O8vHQH3S6/V69emjFjhmbOnKlevXrl6G/btq1SUlI0bdo0rV+/XuvXr5f0vws5r8bHxydPNaxZs0aSlJKSopSUlBz9vXv31vz583Xq1CklJCSoYsWK1iQ7ffq0+vbtq23btlmPH374QXv37lXFihXl7e2dpxqu1+X7+PXXX+vXX3/VY489Jjc3N7m5ualz5846dOiQli5dKkkFVlPXrl2tSTl79my1bNlSxYoVk3Tx1qpt2rRRjRo1NH/+fG3evNm6CUBe3rv8uvwHjMPhsH4IXU3x4sXl6uqq5ORkp/bk5GSFhoYWaI23EvPqxjCvLrreeXUnYk7dGObURQUxp/r3769FixZp+fLlKlOmTEGWd8sxr24M8+qiG51XHh4eqlSpkmrXrq3x48erZs2aevPNN/NVwx0TzFq2bKnz58/rwoULio2Ndeo7fvy4du/erRdffFFNmzZVtWrVdOLECacx2X8NyczMvK7t79+/XwMHDtS0adNUr149xcXF5XgzO3XqJBcXF82ePVsffPCBevXqJYfDIUm65557lJiYqEqVKuV4eHh4KDIyUllZWVqxYkWu28+t/ooVK8rDw0OrV6+22i5cuKCNGzcqIiLiqvszffp0de7c2emHxLZt29S5c2dNnz5dklSjRg2tWrVKFy5cuGJNeXk9//a3v+mnn37S5s2bNW/ePHXt2tXq27x5s7KysvT666+rfv36qlKlin777bd8b6datWo6cuSIjhw5YrUlJibq5MmT13wt8sLDw0O1a9e2fmBJUlZWlpYuXaro6OgbXr9dmFfMq6u52fPqTsScYk5dza2YU8YY9e/fX59++qmWLVum8PDwAlmvnZhXzKursev/VVlZWUpPT8/fQvk++fE2cvn5q6mpqSY1NdV6nn1+cWZmpilWrJh5/PHHzd69e83SpUvNvffe63S9woULF4y3t7d56aWXTFJSknXhZG7nyBrjfO5qRkaGqV+/vunQoYMxxpjffvvNFCtWzDqH91K9e/c2RYsWNa6urubXX3+12n/44Qfj7e1t4uPjzdatW82ePXvMwoULne7y16NHDxMWFmY+/fRT8/PPP5vly5ebjz/+2Bhz8SJMh8NhZsyYYY4dO2Zd/Pjcc8+ZUqVKmcWLFztd+JmSkmKMyf285GPHjhl3d3ezePHiHPV/+eWXxtPT0xw/ftz88ccfplixYtaFn3v27DEffPCB9X0oL7/8silbtqzZtWuX+f3338358+eveB53w4YNTc2aNY2fn585e/as1b5t2zYjyUycONHs37/ffPDBB6Z06dJONa9evdq6aPf333+3vrfi0vcoKyvLREVFmcaNG5vNmzeb9evX53rhZ82aNZ3qmjBhgilXrlyO1yE3c+bMMZ6enmbGjBkmMTHRPPnkkyYwMNDprj9/Bcwr5pUxt8+8OnXqlNm6davZunWrkWTeeOMNs3XrVuuahL8C5hRzypjbZ07169fPBAQEmO+++866YcLRo0ed9uevgHnFvDLm9plXQ4cONStWrDAHDhwwP/74oxk6dKhxOBzmm2++ydPy2e6oYHa5Sy/8XLJkialWrZrx9PQ0NWrUMN99912OC8mnTZtmwsLCjIuLS45bpV7u0jd89OjRpmTJkuaPP/6w+ufPn288PDzMtm3bnJZbs2aNkWQefPDBHOvcsGGDad68ufH19TU+Pj6mRo0a5uWXX7b6//zzTzNw4EBTsmRJ4+HhYSpVqmTef/99q3/MmDEmNDTUOBwOa7///PNP88wzz5jixYtf9Vapl07Kf/3rXyYwMDDXCzrT09NNYGCgefPNN40xF3+YtGjRwhQpUsT4+fmZxo0bm/379xtjLk7u7P1RLrdKvdTkyZONJNO9e/cc23zjjTdMyZIljbe3t4mNjTUffPBBjpqfeuopU6xYsQK5Veql8jMpjTHm7bffNmXLljUeHh6mbt26Zt26dXle9nbBvGJeZbsd5lVut0OW8v7FsbcD5hRzKtvtMKdym0+STEJCQp6Wv10wr5hX2W6HedWrVy9Trlw54+HhYUqUKGGaNm2a71BmjDEOY4zJ3zE2AAAAAEBBumOuMQMAAACAvyqCGZAHhw8fdrqN7eWPvNxyF4Az5hVQsJhTQMG7lfOKUxmBPMjIyNDBgwev2F++fHm5ubnduoKAOwDzCihYzCmg4N3KeUUwAwAAAACbcSojAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQCgUGvSpIkGDBhgdxkAgEKOYAYAuC49evSQw+HQK6+84tS+cOFCORyOfK2rfPnymjhxYgFWd/McPHhQDodD27Zts7sUAMAdhGAGALhuXl5eevXVV3XixAm7S8m38+fP211Cgbpw4YLdJQAAbgDBDABw3Zo1a6bQ0FCNHz/+quO+//57NW7cWN7e3goLC9Ozzz6rM2fOSLp4KuGhQ4c0cOBAORwOORwOGWNUokQJzZs3z1pHVFSUSpYs6bROT09PnT17VpJ0+PBhtWvXTr6+vvL391enTp2UnJxsjR81apSioqL03nvvKTw8XF5eXrnW+t///lcBAQGaNWvWdb0m+/fvV7t27RQSEiJfX1/de++9+vbbb63+MWPG6O67786xXFRUlIYPH249f++991StWjV5eXmpatWqmjx5stWXfdTu448/VkxMjLy8vDRr1iwdOnRIbdu2VdGiReXj46Pq1avryy+/vK79AADcWgQzAMB1c3V11bhx4/T222/rl19+yXXM/v371bJlS3Xo0EE//vijPv74Y33//ffq37+/JGnBggUqU6aMxowZo6NHj+ro0aNyOBy677779N1330mSTpw4oZ07d+rPP//Url27JEkrVqzQvffeqyJFiigrK0vt2rVTSkqKVqxYoSVLlujnn3/WY4895lTLvn37NH/+fC1YsCDXUxFnz56tLl26aNasWeratet1vSanT5/Wgw8+qKVLl2rr1q1q2bKl2rZtq8OHD0uSevXqpZ07d2rjxo3WMlu3btWPP/6onj17SpJmzZqlESNG6OWXX9bOnTs1btw4DR8+XDNnznTa1tChQ/Xcc89p586dio2NVXx8vNLT07Vy5Upt375dr776qnx9fa9rPwAAt5ab3QUAAP7aHnnkEUVFRWnkyJGaPn16jv7x48era9eu1g02KleurLfeeksxMTGaMmWKgoKC5OrqKj8/P4WGhlrLNWnSRO+++64kaeXKlapVq5ZCQ0P13XffqWrVqvruu+8UExMjSVq6dKm2b9+uAwcOKCwsTJL0wQcfqHr16tq4caPuvfdeSRdPX/zggw9UokSJHHVOmjRJ//jHP/TFF19Y670eNWvWVM2aNa3nY8eO1aeffqrPP/9c/fv3V5kyZRQbG6uEhASrroSEBMXExKhChQqSpJEjR+r1119X+/btJUnh4eFKTEzUu+++q7i4OGvdAwYMsMZIF48adujQQZGRkZJkrQ8AcPvjiBkA4Ia9+uqrmjlzpnbu3Jmj74cfftCMGTPk6+trPWJjY5WVlaUDBw5ccZ0xMTFKTEzU77//rhUrVqhJkyZq0qSJvvvuO124cEFr1qxRkyZNJEk7d+5UWFiYFcokKSIiQoGBgU41lStXLtdQNm/ePA0cOFBLliy5oVAmXTxiNnjwYFWrVk2BgYHy9fXVzp07rSNmkvTEE0/oo48+0rlz53T+/HnNnj1bvXr1kiSdOXNG+/fvV+/evZ1es5deekn79+932ladOnWcnj/77LN66aWX1LBhQ40cOVI//vjjDe0LAODWIZgBAG7Yfffdp9jYWA0bNixH3+nTp9W3b19t27bNevzwww/au3evKlaseMV1RkZGKigoSCtWrHAKZitWrNDGjRt14cIFNWjQIF91+vj45Npeq1YtlShRQu+//76MMfla5+UGDx6sTz/9VOPGjdOqVau0bds2RUZGOt1spG3btvL09NSnn36qL774QhcuXNCjjz4q6eLrJUnTpk1zes1++uknrVu37qr706dPH/3888/q1q2btm/frjp16ujtt9++of0BANwanMoIACgQr7zyiqKionTXXXc5td9zzz1KTExUpUqVrrish4eHMjMzndocDocaN26szz77TDt27FCjRo1UpEgRpaen691331WdOnWsYFKtWjUdOXJER44csY6aJSYm6uTJk4qIiLhm7RUrVtTrr7+uJk2ayNXVVf/+97/zu/uW1atXq0ePHnrkkUckXQxaBw8edBrj5uamuLg4JSQkyMPDQ507d5a3t7ckKSQkRKVKldLPP/98Xde5hYWF6amnntJTTz2lYcOGadq0aXrmmWeue38AALcGwQwAUCAiIyPVtWtXvfXWW07tL7zwgurXr6/+/furT58+8vHxUWJiopYsWWIFoPLly2vlypXq3LmzPD09Vbx4cUkXrzP7+9//rjp16lg3sbjvvvs0a9YsDRkyxNpGs2bNrO1PnDhRGRkZevrppxUTE5PjdL8rqVKlipYvX64mTZrIzc3tmt+rtnv37hxt1atXV+XKlbVgwQK1bdtWDodDw4cPV1ZWVo6xffr0UbVq1SRdDHOXGj16tJ599lkFBASoZcuWSk9P16ZNm3TixAkNGjToijUNGDBArVq1UpUqVXTixAktX77c2gYA4PbGqYwAgAIzZsyYHCGkRo0aWrFihfbs2aPGjRurVq1aGjFihEqVKuW03MGDB1WxYkWna8BiYmKUmZlpXUsmXQxrl7c5HA599tlnKlq0qO677z41a9ZMFSpU0Mcff5yv+u+66y4tW7ZMH330kf7+979fdWznzp1Vq1Ytp0dycrLeeOMNFS1aVA0aNFDbtm0VGxure+65J8fylStXVoMGDVS1alXVq1fPqa9Pnz567733lJCQoMjISMXExGjGjBkKDw+/ak2ZmZmKj49XtWrV1LJlS1WpUsXpNvsAgNuXw9zoyfQAACDfjDGqXLmynn766aseBQMAFA6cyggAwC32+++/a86cOUpKSrK+uwwAULgRzAAAuMWCg4NVvHhxTZ06VUWLFrW7HADAbYBgBgDALcZVBACAy3HzDwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZv8Ps17pkj9surgAAAAASUVORK5CYII=", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "In the original model (pe=simd=1): \n", + "Layer: MatrixVectorActivation_0\n", + "Input stream width: 1\n", + "Output stream width: 2\n", + "Layer: MatrixVectorActivation_1\n", + "Input stream width: 2\n", + "Output stream width: 2\n", + "Layer: MatrixVectorActivation_2\n", + "Input stream width: 2\n", + "Output stream width: 2\n", + "Layer: MatrixVectorActivation_3\n", + "Input stream width: 2\n", + "Output stream width: 1\n" + ] } ], "source": [ - "# Extracting LUTs from res_dict\n", - "LUTs_updated = [res_dict[key][\"LUT\"] for key in res_dict_updated.keys()] \n", - "\n", - "#Plotting the bar graph of each network layer with their corresponding LUT resource utilization\n", - "fig = plt.figure(figsize = (10, 5))\n", - "plt.bar(res_dict_updated.keys(), LUTs_updated, color ='green', width = 0.3)\n", - "plt.xlabel(\"Network Layers\")\n", - "plt.ylabel(\"LUT Utilisation\")\n", - "plt.title(\"Estimated LUT values used for each network layer\")\n", - "plt.show()" + "# Original model\n", + "list_of_mvaus = model_orig.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "print(\"In the original model (pe=simd=1): \")\n", + "for mvau in list_of_mvaus:\n", + " mvau_inst = getCustomOp(mvau)\n", + " print(\"Layer: \" + mvau.name)\n", + " print(\"Input stream width: \" + str(mvau_inst.get_instream_width()))\n", + " print(\"Output stream width: \" + str(mvau_inst.get_outstream_width()))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "From these numbers, we see that the first layer has been removed as the bottleneck and that the entire network can now perform one inference in ~4096 clock cycles (when the pipeline is full) as compared to the earlier configuration where it took ~38400 execution cycles.\n", - "\n", - "This decrease in execution latency of the network though comes at a cost of a 45% increase in LUT resource utilization for layer 1 of the network.\n", - "\n", - "We now observe the `instream_width` and `outstream_width` of our network with the updated folding parameters and then apply the `InsertDWC()` transform to it in case there is a mismatch in these widths due to the updates. " + "In the original model the output stream width of one layer matches the input stream width of the following layer. So there would be no DWC required when generating the final design." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the updated model, the situation is different. Let's have a look how the stream widths have changed." ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Instream Width = 5 Outstream Width = 4\n", - "Instream Width = 2 Outstream Width = 2\n", - "Instream Width = 2 Outstream Width = 2\n", - "Instream Width = 2 Outstream Width = 1\n" + "In the original model (pe=simd=1): \n", + "Layer: MatrixVectorActivation_0\n", + "Input stream width: 5\n", + "Output stream width: 4\n", + "Layer: MatrixVectorActivation_1\n", + "Input stream width: 2\n", + "Output stream width: 2\n", + "Layer: MatrixVectorActivation_2\n", + "Input stream width: 2\n", + "Output stream width: 2\n", + "Layer: MatrixVectorActivation_3\n", + "Input stream width: 2\n", + "Output stream width: 1\n" ] } ], "source": [ - "fc_layers = model.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", - "for fcl in fc_layers:\n", - " fcl_inst = getCustomOp(fcl)\n", - " print('Instream Width =',(fcl_inst.get_instream_width()),'Outstream Width =',int(fcl_inst.get_outstream_width()))" + "# Updated model\n", + "list_of_mvaus = model_updated.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", + "print(\"In the original model (pe=simd=1): \")\n", + "for mvau in list_of_mvaus:\n", + " mvau_inst = getCustomOp(mvau)\n", + " print(\"Layer: \" + mvau.name)\n", + " print(\"Input stream width: \" + str(mvau_inst.get_instream_width()))\n", + " print(\"Output stream width: \" + str(mvau_inst.get_outstream_width()))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, the output stream width of MatrixVectorActivation_0 has now changed to `4`, while the input stream width of MatrixVectorActivation_1 stayed `2`. So, the FINN compiler would insert a DWC between these nodes, we can manually invoke this behavior by calling the transformation `InsertDWC` on our model." ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ - "model = model.transform(InsertDWC())" + "from finn.transformation.fpgadataflow.insert_dwc import InsertDWC\n", + "from qonnx.transformation.general import GiveUniqueNodeNames\n", + "\n", + "model_updated = model_updated.transform(InsertDWC())\n", + "model_updated = model_updated.transform(GiveUniqueNodeNames())" ] }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 40, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Stopping http://0.0.0.0:5901\n", - "Serving './cybsec_DWC_inserted.onnx' at http://0.0.0.0:5901\n" + "Stopping http://0.0.0.0:5920\n", + "Serving 'cybsec_DWC.onnx' at http://0.0.0.0:5920\n" ] }, { @@ -2156,7 +924,7 @@ " " + "" ] }, - "execution_count": 24, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "model.save(\"./cybsec_DWC_inserted.onnx\")\n", - "showInNetron(\"./cybsec_DWC_inserted.onnx\",localhost_url='xirxlabs53')" + "model_updated.save(\"cybsec_DWC.onnx\")\n", + "showInNetron(\"cybsec_DWC.onnx\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Because there is a mismatch in the `outstream_width` (4) of layer 1 and the `inputstream_width` (2) of layer 2 the FINN compiler inserts the `StreamingDataWidthConverter` layer to remedy this when we call that transformation for our network above.\n", - "\n", - "On expanding this layer in the netron we see that the `inWidth` of this layer is 4 and the `outWidth` is 2.\n", - "\n", - "Note, we do not see this insertion where these widths match. They are only mismatched for the first two layers and hence we see that the data width converter is being inserted there." + "We can observe in the model that a DWC was inserted between the first two layers.\n", + "Since the DWC will also be a hardware block in our final FINN design, it has a latency and resources associated with it. Let's have a final look in our resource estimates." ] }, { "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "res_dict_DWC = []\n", - "res_dict_DWC = res_estimation(model)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, + "execution_count": 42, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['MatrixVectorActivation_0', '', 'MatrixVectorActivation_1', 'MatrixVectorActivation_2', 'MatrixVectorActivation_3']\n" - ] - }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA/wAAAHWCAYAAADKCYKCAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABc+UlEQVR4nO3deZxPdf//8efH7GY1mBnLYGwxGkaEsY2yDCFFxCXGVhIKUXyvkBaq6yraVFToipStxZVKCNnXkp2sMUPGzFgyZnn//vCbw8cMZhgz07ke99vtc8vnfbbXOZ/Pe07Pz9kcxhgjAAAAAABgK0UKugAAAAAAAJD3CPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAkI+aNWumZs2aFXQZeergwYNyOByaPn16QZdSoNgOOTd9+nQ5HA4dPHjwhuN+9913ioyMlKenpxwOhxITE297ffnN4XBo0KBBBV1GoZb5ndm4cWOup/3pp5/kcDj0008/5X1hAFDIEfgBQJf/Z/Jar7Vr1+Z4Xjt27NDzzz+fozCTnyZPnlygYTTzf7rnzp17zXGuF3zmzp1r/U975rxy8sLf16lTp9SlSxd5eXnp3Xff1X/+8x95e3sXdFm2t3r1aj3//PO2/HEFAP7XuBZ0AQBQmLzwwgsKCwvL0l65cuUcz2PHjh0aN26cmjVrpgoVKjgN++GHH261xJs2efJklShRQr169SqwGvJK9erV9Z///MepbdSoUfLx8dE///nPAqoKeW3Dhg06c+aMXnzxRbVo0aKgy/mfsXr1ao0bN069evVSQEBAQZcDALgFBH4AuEKbNm1Ut27d2zZ/d3f32zbv/yXBwcF65JFHnNpeeeUVlShRIks7/r5OnDghSXkaOs+dO8dZAn8jFy5csP3fTb6TAG4nTukHgFyaPXu26tSpI19fX/n5+SkiIkJvvvmmpEuXBnTu3FmSdM8991inlWdeO3r1NfyZp6Z/8cUXGjdunMqUKSNfX1899NBDSkpKUkpKioYMGaKgoCD5+Piod+/eSklJcapn2rRpuvfeexUUFCQPDw+Fh4frvffecxqnQoUK2r59u5YvX27VdGUdiYmJGjJkiEJDQ+Xh4aHKlSvr1VdfVUZGhtN8EhMT1atXL/n7+ysgIECxsbF/y9N+4+Pj5erqqnHjxmUZtnv3bjkcDr3zzjuSpISEBA0fPlwRERHy8fGRn5+f2rRpo19++eWGy7nWPRt69eqV5eyPjIwMTZo0STVq1JCnp6eCg4PVv39/nT592mm8jRs3KiYmRiVKlJCXl5fCwsLUp0+fG9bicDj0/PPPZ2mvUKGC01kfqampGjdunKpUqSJPT08VL15cjRs31uLFi52m27Vrlx566CEFBgbK09NTdevW1ddff51l/tu3b9e9994rLy8vlS1bVi+99FKW71V2mjVrptjYWEnS3XffLYfD4VTnnDlzVKdOHXl5eVk/9Pzxxx9O8+jVq5d8fHy0f/9+3XffffL19VX37t2vu9w//vhDffr0UXBwsDw8PFSjRg19/PHHTuNcvHhRY8aMUZ06deTv7y9vb281adJEy5YtyzK/jIwMvfnmm4qIiJCnp6dKliyp1q1bZ3st+pdffqk777zTWu533313w+105d+Ql19+WWXLlpWnp6eaN2+uffv2ZRl/3bp1at26tfz9/VW0aFFFR0dr1apV1vDnn39eI0aMkCSFhYVZfy8OHjyojh076q677nKaX/v27eVwOJw++3Xr1snhcGjRokVW2++//67OnTsrMDBQRYsWVYMGDfTf//4323WZPXu2nnvuOZUpU0ZFixZVcnJytut++vRp1atXT2XLltXu3btvuK2utHLlSnXu3FnlypWTh4eHQkNDNXToUP3111/WONOmTZPD4dCWLVuyTD9+/Hi5uLg4fedutG2lS9vX4XBox44d+sc//qFixYqpcePGuaodAHKDI/wAcIWkpCT9+eefTm0Oh0PFixeXJC1evFjdunVT8+bN9eqrr0qSdu7cqVWrVumpp55S06ZN9eSTT+qtt97S//3f/6l69eqSZP33WiZMmCAvLy+NHDlS+/bt09tvvy03NzcVKVJEp0+f1vPPP6+1a9dq+vTpCgsL05gxY6xp33vvPdWoUUP333+/XF1d9c033+iJJ55QRkaGBg4cKEmaNGmSBg8e7HTKe3BwsCTp/Pnzio6O1h9//KH+/furXLlyWr16tUaNGqXjx49r0qRJkiRjjDp06KCff/5Zjz/+uKpXr64FCxZYoezvJDg4WNHR0friiy80duxYp2Gff/65XFxcrB9ufv/9d3355Zfq3LmzwsLCFB8frw8++EDR0dHasWOHSpcunSc19e/fX9OnT1fv3r315JNP6sCBA3rnnXe0ZcsWrVq1Sm5ubjpx4oRatWqlkiVLauTIkQoICNDBgwc1f/78PKlBuhRIJkyYoH79+qlevXpKTk7Wxo0btXnzZrVs2VLSpRDfqFEjlSlTRiNHjpS3t7e++OILPfDAA5o3b54efPBBSVJcXJzuuecepaWlWeNNmTJFXl5eN6zjn//8p+644w5NmTLFutSmUqVKkmRtp7vvvlsTJkxQfHy83nzzTa1atUpbtmxxOiMgLS1NMTExaty4sf7973+raNGi11xmfHy8GjRoYN1LomTJklq0aJH69u2r5ORkDRkyRJKUnJysDz/8UN26ddOjjz6qM2fO6KOPPlJMTIzWr1+vyMhIa559+/bV9OnT1aZNG/Xr109paWlauXKl1q5d63Q20c8//6z58+friSeekK+vr9566y116tRJhw8ftv7+XM8rr7yiIkWKaPjw4UpKStJrr72m7t27a926ddY4S5cuVZs2bVSnTh2NHTtWRYoUsX4wXLlyperVq6eOHTtqz549+uyzzzRx4kSVKFFCklSyZEk1adJEX331lZKTk+Xn5ydjjFatWqUiRYpo5cqVuv/++yVdCtNFihRRo0aNrO3asGFDnT9/Xk8++aSKFy+uGTNm6P7779fcuXOt70umF198Ue7u7ho+fLhSUlKyPcL/559/qmXLlkpISNDy5cut70ZOzZkzR+fPn9eAAQNUvHhxrV+/Xm+//baOHj2qOXPmSJIeeughDRw4UDNnzlTt2rWdpp85c6aaNWumMmXK5HjbXqlz586qUqWKxo8fL2NMrmoHgFwxAAAzbdo0Iynbl4eHhzXeU089Zfz8/ExaWto15zVnzhwjySxbtizLsOjoaBMdHW29X7ZsmZFk7rzzTnPx4kWrvVu3bsbhcJg2bdo4TR8VFWXKly/v1Hb+/Pksy4mJiTEVK1Z0aqtRo4bTsjO9+OKLxtvb2+zZs8epfeTIkcbFxcUcPnzYGGPMl19+aSSZ1157zRonLS3NNGnSxEgy06ZNyzLvK2Wu65w5c645jiQzcODAbIddb7teb/2u5YMPPjCSzLZt25zaw8PDzb333mu9v3DhgklPT3ca58CBA8bDw8O88MILTm1Xb4erP+9MsbGxTp/jypUrjSQzc+ZMp/G+++47p/YFCxYYSWbDhg05Xs9MkszYsWOztJcvX97ExsZa72vVqmXatm173Xk1b97cREREmAsXLlhtGRkZpmHDhqZKlSpW25AhQ4wks27dOqvtxIkTxt/f30gyBw4cuO5yMvvllet78eJFExQUZO68807z119/We0LFy40ksyYMWOsttjYWCPJjBw58rrLydS3b19TqlQp8+effzq1d+3a1fj7+1t9LS0tzaSkpDiNc/r0aRMcHGz69OljtS1dutRIMk8++WSWZWVkZFj/lmTc3d3Nvn37rLZffvnFSDJvv/32dWvO7FfVq1d3qunNN990+n5nZGSYKlWqmJiYGKdlnz9/3oSFhZmWLVtabf/617+y/Xw2bNhgJJlvv/3WGGPMr7/+aiSZzp07m/r161vj3X///aZ27drW+8zvwcqVK622M2fOmLCwMFOhQgWrf2WuS8WKFbP8Xbvyu3D8+HFTo0YNU7FiRXPw4MHrbp8r53vl347s/m5OmDDBOBwOc+jQIautW7dupnTp0k5/AzZv3uzU13OzbceOHWskmW7dut2wbgDIC5zSDwBXePfdd7V48WKn15WnpQYEBOjcuXNZTm++VT179pSbm5v1vn79+jLGZDlVu379+jpy5IjS0tKstiuPlmaeoRAdHa3ff/9dSUlJN1z2nDlz1KRJExUrVkx//vmn9WrRooXS09O1YsUKSdK3334rV1dXDRgwwJrWxcVFgwcPvun1LkgdO3aUq6urPv/8c6vtt99+044dO/Twww9bbR4eHipS5NLuMj09XadOnZKPj4/uuOMObd68OU9qmTNnjvz9/dWyZUunz6BOnTry8fGxThXPPHK9cOFCpaam5smyrxYQEKDt27dr79692Q5PSEjQ0qVL1aVLF505c8aq9dSpU4qJidHevXut05y//fZbNWjQwOnoZsmSJW94Wv31bNy4USdOnNATTzwhT09Pq71t27aqVq1altPEJTl9Z6/FGKN58+apffv2MsY4fQ4xMTFKSkqyPm8XFxfrqHNGRoYSEhKUlpamunXrOn0n5s2bJ4fDkeUsEklZniDRokULp6PUNWvWlJ+fn37//fcb1i5JvXv3djoS3qRJE0mypt+6dav27t2rf/zjHzp16pS1bufOnVPz5s21YsWKG15qUbt2bfn4+Fh/E1auXKmyZcuqZ8+e2rx5s86fPy9jjH7++Wdr+dKl70G9evWcTl338fHRY489poMHD2rHjh1Oy4mNjb3mWSBHjx5VdHS0UlNTtWLFCpUvXz5H2+dqV87/3Llz+vPPP9WwYUMZY5xO4e/Zs6eOHTvmdLnGzJkz5eXlpU6dOkm6uW37+OOP31TdAJBbnNIPAFeoV6/edW/a98QTT+iLL75QmzZtVKZMGbVq1UpdunRR69atb2m55cqVc3rv7+8vSQoNDc3SnpGRoaSkJOs031WrVmns2LFas2aNzp8/7zR+UlKSNa9r2bt3r3799VeVLFky2+GZN047dOiQSpUqJR8fH6fhd9xxxw3WLm/l1aP2SpQooebNm+uLL77Qiy++KOnS6fyurq7q2LGjNV7mNdiTJ0/WgQMHlJ6ebg3LyanWObF3714lJSUpKCgo2+GZn0F0dLQ6deqkcePGaeLEiWrWrJkeeOAB/eMf/5CHh0ee1PLCCy+oQ4cOqlq1qu688061bt1aPXr0UM2aNSVJ+/btkzFGo0eP1ujRo69Zb5kyZXTo0CHVr18/y/Bb+c4cOnTomvOoVq2afv75Z6c2V1dXlS1b9obzPXnypBITEzVlyhRNmTIl23EyPwdJmjFjhl5//XXt2rXL6ceXK5/ysX//fpUuXVqBgYE3XP7VfwMkqVixYlnu4ZDT6YsVKyZJ1vSZP+Bc7xKcpKQka7rsuLi4KCoqSitXrpR0KfA3adJEjRs3Vnp6utauXavg4GAlJCQ4Bf5rfQ8yL3U6dOiQ7rzzTqs9uyelZOrRo4dcXV21c+dOhYSEXHO8Gzl8+LDGjBmjr7/+Oss2vvKH0pYtW6pUqVKaOXOmmjdvroyMDH322Wfq0KGDfH19Jd3ctr3eOgJAXiLwA0AuBAUFaevWrfr++++1aNEiLVq0SNOmTVPPnj01Y8aMm56vi4tLrtrN/7/mc//+/WrevLmqVaumN954Q6GhoXJ3d9e3336riRMn5ujmaBkZGWrZsqWeeeaZbIdXrVo1h2tx6zw8PJxumnWlzB8zrjyqe6u6du2q3r17a+vWrYqMjNQXX3yh5s2bW9ctS5duzjV69Gj16dNHL774ogIDA1WkSBENGTLkhtvX4XBke33ulT8aSJc+g6CgIM2cOTPb+WT+GONwODR37lytXbtW33zzjb7//nv16dNHr7/+utauXZvlx5icuLqWpk2bav/+/frqq6/0ww8/6MMPP9TEiRP1/vvvq1+/ftY6Dx8+XDExMdnOMzePsbzdrjxD43oy1+uRRx65ZnDL/NHj008/Va9evfTAAw9oxIgRCgoKkouLiyZMmKD9+/ffVJ036uu3On3m+v3rX/9yusfAlXLy/WncuLFefvllXbhwQStXrtQ///lPBQQE6M4779TKlSute4NcGfhz63r3eOjYsaM++eQTvfnmm5owYcJNzT89Pd26/v/ZZ59VtWrV5O3trT/++EO9evVy6tcuLi76xz/+oalTp2ry5MlatWqVjh075vQ0kJvZtjm5jwUA5AUCPwDkkru7u9q3b6/27dsrIyNDTzzxhD744AONHj1alStXzrMj0DnxzTffKCUlRV9//bXTEb7s7hZ+rboqVaqks2fP3vA55+XLl9eSJUt09uxZp/95ze3dsW+0jGvNL7P9Zk/hzc4DDzyg/v37W6f179mzR6NGjXIaZ+7cubrnnnv00UcfObUnJiY6/TCQnWLFimV7SnbmUepMlSpV0o8//qhGjRrlKAg0aNBADRo00Msvv6xZs2ape/fumj17tvr163fdWq5+osLFixd1/PjxLOMGBgaqd+/e6t27t86ePaumTZvq+eefV79+/VSxYkVJkpubW46+M9ldGnAr35nMz3/37t269957s8z3Zr8fJUuWlK+vr9LT02+4XnPnzlXFihU1f/58p3519an7lSpV0vfff6+EhIQcHeW/nTIvF/Dz87vh+l3vb1iTJk108eJFffbZZ/rjjz+sYN+0aVMr8FetWtUK/tK1+/WuXbus4Tk1ePBgVa5cWWPGjJG/v79GjhyZ42kzbdu2TXv27NGMGTPUs2dPq/1al2r17NlTr7/+ur755hstWrRIJUuWdPqxKzfbFgDyG9fwA0AunDp1yul9kSJFrKN+mY/Ly3yecn48ri7zqN6VRwGTkpI0bdq0LON6e3tnW1OXLl20Zs0aff/991mGJSYmWvcLuO+++5SWlub0yL/09HS9/fbbt7oalvvuu09r167Vpk2bstQxc+ZMRUZG3tJpvFcLCAhQTEyMvvjiC82ePVvu7u564IEHnMZxcXHJcpR1zpw5WR4Bl51KlSpp165dOnnypNX2yy+/ZHlUV5cuXZSenm5dWnCltLQ063M7ffp0lloyjyhe/bjG7GrJvPY605QpU7Ic4b/6O+7j46PKlStb8w8KClKzZs30wQcfZPtjwZXrmvl5rl+/3mn4tc5kyIm6desqKChI77//vtM6L1q0SDt37lTbtm1var4uLi7q1KmT5s2bp99++y3L8CvXK7t+t27dOq1Zs8Zpmk6dOskYk+3jH3N65D6v1KlTR5UqVdK///1vnT17NsvwK9fven/D6tevLzc3N7366qsKDAxUjRo1JF36IWDt2rVavnx5lqP79913n9avX++0fc6dO6cpU6aoQoUKCg8Pz9W6jB49WsOHD9eoUaOyPII0J7L7/Iwx1uNVr1azZk3VrFlTH374oebNm6euXbvK1fXyMbPcbFsAyG8c4QeAKyxatMg66nSlhg0bqmLFiurXr58SEhJ07733qmzZsjp06JDefvttRUZGWtejRkZGysXFRa+++qqSkpLk4eGhe++995rXZ9+KVq1aWWcc9O/fX2fPntXUqVMVFBSUJYzVqVNH7733nl566SVVrlxZQUFBuvfeezVixAh9/fXXateunXr16qU6dero3Llz2rZtm+bOnauDBw+qRIkSat++vRo1aqSRI0fq4MGDCg8P1/z583N0Y8ArzZs3L9ttHBsbq5EjR2rOnDlq2rSp+vfvr2rVqunYsWOaPn26jh8/nu0PGbfq4Ycf1iOPPKLJkycrJibG6ZFuktSuXTu98MIL6t27txo2bKht27Zp5syZ1pHu6+nTp4/eeOMNxcTEqG/fvjpx4oTef/991ahRw+nZ4tHR0erfv78mTJigrVu3qlWrVnJzc9PevXs1Z84cvfnmm3rooYc0Y8YMTZ48WQ8++KAqVaqkM2fOaOrUqfLz89N999133Vr69eunxx9/XJ06dVLLli31yy+/6Pvvv89ylkJ4eLiaNWumOnXqKDAwUBs3btTcuXM1aNAga5x3331XjRs3VkREhB599FFVrFhR8fHxWrNmjY4ePapffvlFkvTMM8/oP//5j1q3bq2nnnrKeixf+fLl9euvv95w+2UnM2z27t1b0dHR6tatm/VYvgoVKmjo0KE3NV/p0qPtli1bpvr16+vRRx9VeHi4EhIStHnzZv34449KSEiQdOk7MX/+fD344INq27atDhw4oPfff1/h4eFOge+ee+5Rjx499NZbb2nv3r1q3bq1MjIytHLlSt1zzz1O2/R2K1KkiD788EO1adNGNWrUUO/evVWmTBn98ccfWrZsmfz8/PTNN99IuvS3Qrr0aMSuXbvKzc1N7du3l7e3t4oWLao6depo7dq1at++vXU2QNOmTXXu3DmdO3cuS+AfOXKkPvvsM7Vp00ZPPvmkAgMDNWPGDB04cEDz5s3L0SUXV/vXv/6lpKQkDRw4UL6+vk6n2N9ItWrVVKlSJQ0fPlx//PGH/Pz8NG/evOveL6Fnz54aPny4JGVZVm62LQDku/x+LAAAFEbXeyyfrnj80ty5c02rVq1MUFCQcXd3N+XKlTP9+/c3x48fd5rf1KlTTcWKFY2Li4vT46Cu9Vi+qx9Vl93jyIy5/EinkydPWm1ff/21qVmzpvH09DQVKlQwr776qvn444+zPFYrLi7OtG3b1vj6+hpJTnWcOXPGjBo1ylSuXNm4u7ubEiVKmIYNG5p///vfTo8LPHXqlOnRo4fx8/Mz/v7+pkePHmbLli25eizftV6Zj+w6evSo6devnylTpoxxdXU1gYGBpl27dmbt2rXXnX9uH8uXKTk52Xh5eRlJ5tNPP80y/MKFC+bpp582pUqVMl5eXqZRo0ZmzZo1WT7L7B7LZ4wxn376qalYsaJxd3c3kZGR5vvvv8/yWL5MU6ZMMXXq1DFeXl7G19fXREREmGeeecYcO3bMGHPpcWDdunUz5cqVMx4eHiYoKMi0a9fObNy48YbrmZ6ebp599llTokQJU7RoURMTE2P27duX5bF8L730kqlXr54JCAgwXl5eplq1aubll192+h4YY8z+/ftNz549TUhIiHFzczNlypQx7dq1M3PnznUa79dffzXR0dHG09PTlClTxrz44ovmo48+uunH8mX6/PPPTe3atY2Hh4cJDAw03bt3N0ePHnUaJzY21nh7e99w21wpPj7eDBw40ISGhho3NzcTEhJimjdvbqZMmWKNk5GRYcaPH2/Kly9vPDw8TO3atc3ChQuz/VzT0tLMv/71L1OtWjXj7u5uSpYsadq0aWM2bdpkjaNrPI7y6s8mO9f6G3Kt7+OWLVtMx44dTfHixY2Hh4cpX7686dKli1myZInTeC+++KIpU6aMKVKkSJbPasSIEUaSefXVV52mqVy5spFk9u/fn6XO/fv3m4ceesgEBAQYT09PU69ePbNw4cIcrYsx2X8X0tPTTbdu3Yyrq6v58ssvb7iNrnws344dO0yLFi2Mj4+PKVGihHn00UetRyFm97fs+PHjxsXFxVStWvWay8nJts3ubzgA3E4OY/L5nDIAAADgb+TPP/9UqVKlNGbMmGs+nQIACiOu4QcAAACuY/r06UpPT1ePHj0KuhQAyBWu4QcAAACysXTpUu3YsUMvv/yyHnjgAVWoUKGgSwKAXOGUfgAAACAbzZo10+rVq9WoUSN9+umnKlOmTEGXBAC5QuAHAAAAAMCGuIYfAAAAAAAbIvADAAAAAGBD3LRPUkZGho4dOyZfX185HI6CLgcAAAAAYHPGGJ05c0alS5dWkSK351g8gV/SsWPHFBoaWtBlAAAAAAD+xxw5ckRly5a9LfMm8Evy9fWVdGlD+/n5FXA1AAAAAAC7S05OVmhoqJVHbwcCv2Sdxu/n50fgBwAAAADkm9t5WTk37QMAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANFXjg/+OPP/TII4+oePHi8vLyUkREhDZu3GgNN8ZozJgxKlWqlLy8vNSiRQvt3bvXaR4JCQnq3r27/Pz8FBAQoL59++rs2bP5vSoAAAAAABQaBRr4T58+rUaNGsnNzU2LFi3Sjh079Prrr6tYsWLWOK+99preeustvf/++1q3bp28vb0VExOjCxcuWON0795d27dv1+LFi7Vw4UKtWLFCjz32WEGsEgAAAAAAhYLDGGMKauEjR47UqlWrtHLlymyHG2NUunRpPf300xo+fLgkKSkpScHBwZo+fbq6du2qnTt3Kjw8XBs2bFDdunUlSd99953uu+8+HT16VKVLl75hHcnJyfL391dSUpL8/PzybgUBAAAAAMhGfuTQAj3C//XXX6tu3brq3LmzgoKCVLt2bU2dOtUafuDAAcXFxalFixZWm7+/v+rXr681a9ZIktasWaOAgAAr7EtSixYtVKRIEa1bty7b5aakpCg5OdnpBQAAAACAnRRo4P/999/13nvvqUqVKvr+++81YMAAPfnkk5oxY4YkKS4uTpIUHBzsNF1wcLA1LC4uTkFBQU7DXV1dFRgYaI1ztQkTJsjf3996hYaG5vWqAQAAAABQoAo08GdkZOiuu+7S+PHjVbt2bT322GN69NFH9f7779/W5Y4aNUpJSUnW68iRI7d1eQAAAAAA5LcCDfylSpVSeHi4U1v16tV1+PBhSVJISIgkKT4+3mmc+Ph4a1hISIhOnDjhNDwtLU0JCQnWOFfz8PCQn5+f0wsAAAAAADsp0MDfqFEj7d6926ltz549Kl++vCQpLCxMISEhWrJkiTU8OTlZ69atU1RUlCQpKipKiYmJ2rRpkzXO0qVLlZGRofr16+fDWgAAAAAAUPi4FuTChw4dqoYNG2r8+PHq0qWL1q9frylTpmjKlCmSJIfDoSFDhuill15SlSpVFBYWptGjR6t06dJ64IEHJF06I6B169bWpQCpqakaNGiQunbtmqM79AMAAAAAYEcF+lg+SVq4cKFGjRqlvXv3KiwsTMOGDdOjjz5qDTfGaOzYsZoyZYoSExPVuHFjTZ48WVWrVrXGSUhI0KBBg/TNN9+oSJEi6tSpk9566y35+PjkqIa/1WP5HI6CriD/FOxXEwAAAABum/zIoQUe+AsDAn8hxVcTAAAAgE3lRw4t0Gv4AQAAAADA7UHgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYUIEG/ueff14Oh8PpVa1aNWv4hQsXNHDgQBUvXlw+Pj7q1KmT4uPjneZx+PBhtW3bVkWLFlVQUJBGjBihtLS0/F4VAAAAAAAKFdeCLqBGjRr68ccfrfeurpdLGjp0qP773/9qzpw58vf316BBg9SxY0etWrVKkpSenq62bdsqJCREq1ev1vHjx9WzZ0+5ublp/Pjx+b4uAAAAAAAUFgUe+F1dXRUSEpKlPSkpSR999JFmzZqle++9V5I0bdo0Va9eXWvXrlWDBg30ww8/aMeOHfrxxx8VHBysyMhIvfjii3r22Wf1/PPPy93dPb9XBwAAAACAQqHAr+Hfu3evSpcurYoVK6p79+46fPiwJGnTpk1KTU1VixYtrHGrVaumcuXKac2aNZKkNWvWKCIiQsHBwdY4MTExSk5O1vbt26+5zJSUFCUnJzu9AAAAAACwkwIN/PXr19f06dP13Xff6b333tOBAwfUpEkTnTlzRnFxcXJ3d1dAQIDTNMHBwYqLi5MkxcXFOYX9zOGZw65lwoQJ8vf3t16hoaF5u2IAAAAAABSwAj2lv02bNta/a9asqfr166t8+fL64osv5OXldduWO2rUKA0bNsx6n5ycTOgHAAAAANhKgZ/Sf6WAgABVrVpV+/btU0hIiC5evKjExESnceLj461r/kNCQrLctT/zfXb3Bcjk4eEhPz8/pxcAAAAAAHZSqAL/2bNntX//fpUqVUp16tSRm5ublixZYg3fvXu3Dh8+rKioKElSVFSUtm3bphMnTljjLF68WH5+fgoPD8/3+gEAAAAAKCwK9JT+4cOHq3379ipfvryOHTumsWPHysXFRd26dZO/v7/69u2rYcOGKTAwUH5+fho8eLCioqLUoEEDSVKrVq0UHh6uHj166LXXXlNcXJyee+45DRw4UB4eHgW5agAAAAAAFKgCDfxHjx5Vt27ddOrUKZUsWVKNGzfW2rVrVbJkSUnSxIkTVaRIEXXq1EkpKSmKiYnR5MmTreldXFy0cOFCDRgwQFFRUfL29lZsbKxeeOGFglolAAAAAAAKBYcxxhR0EQUtOTlZ/v7+SkpKKvzX8zscBV1B/uGrCQAAAMCm8iOHFqpr+AEAAAAAQN4g8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbKjQBP5XXnlFDodDQ4YMsdouXLiggQMHqnjx4vLx8VGnTp0UHx/vNN3hw4fVtm1bFS1aVEFBQRoxYoTS0tLyuXoAAAAAAAqXQhH4N2zYoA8++EA1a9Z0ah86dKi++eYbzZkzR8uXL9exY8fUsWNHa3h6erratm2rixcvavXq1ZoxY4amT5+uMWPG5PcqAAAAAABQqBR44D979qy6d++uqVOnqlixYlZ7UlKSPvroI73xxhu69957VadOHU2bNk2rV6/W2rVrJUk//PCDduzYoU8//VSRkZFq06aNXnzxRb377ru6ePFiQa0SAAAAAAAFrsAD/8CBA9W2bVu1aNHCqX3Tpk1KTU11aq9WrZrKlSunNWvWSJLWrFmjiIgIBQcHW+PExMQoOTlZ27dvv+YyU1JSlJyc7PQCAAAAAMBOXAty4bNnz9bmzZu1YcOGLMPi4uLk7u6ugIAAp/bg4GDFxcVZ41wZ9jOHZw67lgkTJmjcuHG3WD0AAAAAAIVXgR3hP3LkiJ566inNnDlTnp6e+brsUaNGKSkpyXodOXIkX5cPAAAAAMDtVmCBf9OmTTpx4oTuuusuubq6ytXVVcuXL9dbb70lV1dXBQcH6+LFi0pMTHSaLj4+XiEhIZKkkJCQLHftz3yfOU52PDw85Ofn5/QCAAAAAMBOCizwN2/eXNu2bdPWrVutV926ddW9e3fr325ublqyZIk1ze7du3X48GFFRUVJkqKiorRt2zadOHHCGmfx4sXy8/NTeHh4vq8TAAAAAACFRYFdw+/r66s777zTqc3b21vFixe32vv27athw4YpMDBQfn5+Gjx4sKKiotSgQQNJUqtWrRQeHq4ePXrotddeU1xcnJ577jkNHDhQHh4e+b5OAAAAAAAUFgV6074bmThxoooUKaJOnTopJSVFMTExmjx5sjXcxcVFCxcu1IABAxQVFSVvb2/FxsbqhRdeKMCqAQAAAAAoeA5jjCnoIgpacnKy/P39lZSUVPiv53c4CrqC/MNXEwAAAIBN5UcOLbBr+AEAAAAAwO1zU6f07927V8uWLdOJEyeUkZHhNGzMmDF5UhgAAAAAALh5uQ78U6dO1YABA1SiRAmFhITIccUp5g6Hg8APAEBB4bIvwBl9AsD/uFwH/pdeekkvv/yynn322dtRDwAAAAAAyAO5vob/9OnT6ty58+2oBQAAAAAA5JFcB/7OnTvrhx9+uB21AAAAAACAPJLrU/orV66s0aNHa+3atYqIiJCbm5vT8CeffDLPigMAAAAAADfHYUzu7vARFhZ27Zk5HPr9999vuaj8lh/PP8wz3HwGAHAt7CMAZ/QJAIVYfuTQXB/hP3DgwO2oAwAAAAAA5KFcX8N/JWOMcnmCAAAAAAAAyAc3Ffg/+eQTRUREyMvLS15eXqpZs6b+85//5HVtAAAAAADgJuX6lP433nhDo0eP1qBBg9SoUSNJ0s8//6zHH39cf/75p4YOHZrnRQIAAAAAgNy5qZv2jRs3Tj179nRqnzFjhp5//vm/5TX+3LSvkOJyEQDIHfYRgDP6BIBCLD9yaK5P6T9+/LgaNmyYpb1hw4Y6fvx4nhQFAAAAAABuTa4Df+XKlfXFF19kaf/8889VpUqVPCkKAAAAAADcmlxfwz9u3Dg9/PDDWrFihXUN/6pVq7RkyZJsfwgAAAAAAAD5L9dH+Dt16qR169apRIkS+vLLL/Xll1+qRIkSWr9+vR588MHbUSMAAAAAAMilXN+0z464aV8hxVcTAHKHfQTgjD4BoBDLjxyao1P6k5OTrQKSk5OvO26hD8wAAAAAAPwPyFHgL1asmI4fP66goCAFBATIkc2vpcYYORwOpaen53mRAAAAAAAgd3IU+JcuXarAwEBJ0rJly25rQQAAAAAA4NblKPBHR0db/w4LC1NoaGiWo/zGGB05ciRvqwMAAAAAADcl13fpDwsL08mTJ7O0JyQkKCwsLE+KAgAAAAAAtybXgT/zWv2rnT17Vp6ennlSFAAAAAAAuDU5OqVfkoYNGyZJcjgcGj16tIoWLWoNS09P17p16xQZGZnnBQIAAAAAgNzLceDfsmWLpEtH+Ldt2yZ3d3drmLu7u2rVqqXhw4fnfYUAAAAAACDXchz4M+/O37t3b7355pvy8/O7bUUBAAAAAIBbk+PAn2natGm3ow4AAAAAAJCHch34JWnjxo364osvdPjwYV28eNFp2Pz58/OkMAAAAABAHsnmxuu2ZUxBV1Bo5Pou/bNnz1bDhg21c+dOLViwQKmpqdq+fbuWLl0qf3//21EjAAAAAADIpVwH/vHjx2vixIn65ptv5O7urjfffFO7du1Sly5dVK5cudtRIwAAAAAAyKVcB/79+/erbdu2ki7dnf/cuXNyOBwaOnSopkyZkucFAgAAAACA3Mt14C9WrJjOnDkjSSpTpox+++03SVJiYqLOnz+ft9UBAAAAAICbkuub9jVt2lSLFy9WRESEOnfurKeeekpLly7V4sWL1bx589tRIwAAAAAAyKVcB/533nlHFy5ckCT985//lJubm1avXq1OnTrpueeey/MCAQAAAABA7jmM4ZkFycnJ8vf3V1JSkvz8/Aq6nOvjcRoAgGthHwE4o08Al9EfCp38yKG5voZ/8+bN2rZtm/X+q6++0gMPPKD/+7//08WLF/O0OAAAAAAAcHNyHfj79++vPXv2SJJ+//13PfzwwypatKjmzJmjZ555Js8LBAAAAAAAuZfrwL9nzx5FRkZKkubMmaPo6GjNmjVL06dP17x58/K6PgAAAAAAcBNyHfiNMcrIyJAk/fjjj7rvvvskSaGhofrzzz/ztjoAAAAAAHBTch3469atq5deekn/+c9/tHz5crVt21aSdODAAQUHB+d5gQAAAAAAIPdyHfgnTZqkzZs3a9CgQfrnP/+pypUrS5Lmzp2rhg0b5nmBAAAAAAAg9/LssXwXLlyQi4uL3Nzc8mJ2+YrH8hVSf5PHaQBAocE+AnBGnwAuoz8UOvmRQ13zakaenp55NSsAAAAAAHCLchT4AwMDtWfPHpUoUULFihWT4zq/DiUkJORZcQAAAAAA4ObkKPBPnDhRvr6+ki5dww8AAAAAAAq3PLuG/++Ma/gLKb6aAJA77CMAZ/QJ4DL6Q6FTaK7hT05OzvEMC31gBgAAAADgf0COAn9AQMB1r9uXJGOMHA6H0tPT86QwAAAAAABw83IU+JctW3a76wAAAAAAAHkoR4E/Ojr6dtcBAAAAAADyUJGcjPTrr78qIyPD+vf1Xrnx3nvvqWbNmvLz85Ofn5+ioqK0aNEia/iFCxc0cOBAFS9eXD4+PurUqZPi4+Od5nH48GG1bdtWRYsWVVBQkEaMGKG0tLRc1QEAAAAAgN3k6Ah/ZGSk4uLiFBQUpMjISDkcDmV3c//cXsNftmxZvfLKK6pSpYqMMZoxY4Y6dOigLVu2qEaNGho6dKj++9//as6cOfL399egQYPUsWNHrVq1SpKUnp6utm3bKiQkRKtXr9bx48fVs2dPubm5afz48TmuAwAAAAAAu8nRY/kOHTqkcuXKyeFw6NChQ9cdt3z58rdUUGBgoP71r3/poYceUsmSJTVr1iw99NBDkqRdu3apevXqWrNmjRo0aKBFixapXbt2OnbsmIKDgyVJ77//vp599lmdPHlS7u7uOVomj+UrpP4mj9MAgEKDfQTgjD4BXEZ/KHTyI4fm6JT+8uXLW3fpP3TokMqUKaPy5cs7vcqUKXPDHwOuJz09XbNnz9a5c+cUFRWlTZs2KTU1VS1atLDGqVatmsqVK6c1a9ZIktasWaOIiAgr7EtSTEyMkpOTtX379msuKyUlRcnJyU4vAAAAAADsJEeB/0r33HOPEhISsrQnJSXpnnvuyXUB27Ztk4+Pjzw8PPT4449rwYIFCg8PV1xcnNzd3RUQEOA0fnBwsOLi4iRJcXFxTmE/c3jmsGuZMGGC/P39rVdoaGiu6wYAAAAAoDDLdeA3xlhH+6906tQpeXt757qAO+64Q1u3btW6des0YMAAxcbGaseOHbmeT26MGjVKSUlJ1uvIkSO3dXkAAAAAAOS3HN20T5I6duwo6dKN+Xr16iUPDw9rWHp6un799Vc1bNgw1wW4u7urcuXKkqQ6depow4YNevPNN/Xwww/r4sWLSkxMdDrKHx8fr5CQEElSSEiI1q9f7zS/zLv4Z46THQ8PD6f6AQAAAACwmxwf4c88/d0YI19fX6dT4kNCQvTYY4/p008/veWCMjIylJKSojp16sjNzU1Lliyxhu3evVuHDx9WVFSUJCkqKkrbtm3TiRMnrHEWL14sPz8/hYeH33ItAAAAAAD8XeX4CP+0adMkSRUqVNDw4cNv6vT9q40aNUpt2rRRuXLldObMGc2aNUs//fSTvv/+e/n7+6tv374aNmyYAgMD5efnp8GDBysqKkoNGjSQJLVq1Urh4eHq0aOHXnvtNcXFxem5557TwIEDOYIPAAAAAPifluPAn2ns2LF5tvATJ06oZ8+eOn78uPz9/VWzZk19//33atmypSRp4sSJKlKkiDp16qSUlBTFxMRo8uTJ1vQuLi5auHChBgwYoKioKHl7eys2NlYvvPBCntUIAAAAAMDfkcOYnD2ksFixYtnerM/f319Vq1bV8OHDraD+d5Mfzz/MMzw/EwBwLewjAGf0CeAy+kOhkx85NMdH+CdNmpRte2JiojZt2qR27dpp7ty5at++fV7VBgAAAAAAblKOA39sbOx1h0dGRmrChAkEfgAAAAAACoEc36X/Rtq1a6ddu3bl1ewAAAAAAMAtyLPAn5KSInd397yaHQAAAAAAuAV5Fvg/+ugjRUZG5tXsAAAAAADALcjxNfzDhg3Ltj0pKUmbN2/Wnj17tGLFijwrDAAAAAAA3LwcB/4tW7Zk2+7n56eWLVtq/vz5CgsLy7PCAAAAAADAzctx4F+2bNntrAMAAAAAAOShPLuGHwAAAAAAFB4EfgAAAAAAbIjADwAAAACADRH4AQAAAACwoRwH/j59+ujMmTO3sxYAAAAAAJBHchz4Z8yYob/++ut21gIAAAAAAPJIjgO/MeZ21gEAAAAAAPKQa25GPnPmjDw9Pa87jp+f3y0VBAAAAAAAbl2uAn/VqlWvOcwYI4fDofT09FsuCgAAAAAA3JpcBf65c+cqMDDwdtUCAAAAAADySK4Cf6NGjRQUFHS7agEAAAAAAHkkxzftAwAAAAAAfx85Dvzly5eXi4vL7awFAAAAAADkkRyf0n/gwIHbWQcAAAAAAMhDOQ78xYoVk8PhyNLu7++vqlWravjw4WrZsmWeFgcAAAAAAG5OjgP/xIkTsw38iYmJ2rRpk9q1a6e5c+eqffv2eVogAAAAAADIvRwH/l69el13eGRkpCZMmEDgBwAAAACgEMizu/S3a9dOu3btyqvZAQAAAACAW5BngT8lJUXu7u55NTsAAAAAAHAL8izwf/TRR4qMjMyr2QEAAAAAgFuQ42v4hw0blm17UlKSNm/erD179mjFihV5VhgAAAAAALh5OQ78W7Zsybbdz89PLVu21Pz58xUWFpZnhQEAAAAAgJuX48C/bNmy6w4/evSoHnvsMU2ZMuWWiwIAAAAAALcmz67hP3XqlD766KO8mh0AAAAAALgFeRb4AQAAAABA4UHgBwAAAADAhgj8AAAAAADYUI5v2texY8frDk9MTLzVWgAAAAAAQB7JceD39/e/4fCePXveckEAAAAAAODW5TjwT5s27XbWAQAAAAAA8hDX8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2ROAHAAAAAMCGCPwAAAAAANgQgR8AAAAAABsi8AMAAAAAYEMEfgAAAAAAbIjADwAAAACADRH4AQAAAACwIQI/AAAAAAA2VKCBf8KECbr77rvl6+uroKAgPfDAA9q9e7fTOBcuXNDAgQNVvHhx+fj4qFOnToqPj3ca5/Dhw2rbtq2KFi2qoKAgjRgxQmlpafm5KgAAAAAAFCoFGviXL1+ugQMHau3atVq8eLFSU1PVqlUrnTt3zhpn6NCh+uabbzRnzhwtX75cx44dU8eOHa3h6enpatu2rS5evKjVq1drxowZmj59usaMGVMQqwQAAAAAQKHgMMaYgi4i08mTJxUUFKTly5eradOmSkpKUsmSJTVr1iw99NBDkqRdu3apevXqWrNmjRo0aKBFixapXbt2OnbsmIKDgyVJ77//vp599lmdPHlS7u7uN1xucnKy/P39lZSUJD8/v9u6jrfM4SjoCvJP4flqAsDfA/sIwBl9AriM/lDo5EcOLVTX8CclJUmSAgMDJUmbNm1SamqqWrRoYY1TrVo1lStXTmvWrJEkrVmzRhEREVbYl6SYmBglJydr+/bt2S4nJSVFycnJTi8AAAAAAOyk0AT+jIwMDRkyRI0aNdKdd94pSYqLi5O7u7sCAgKcxg0ODlZcXJw1zpVhP3N45rDsTJgwQf7+/tYrNDQ0j9cGAAAAAICCVWgC/8CBA/Xbb79p9uzZt31Zo0aNUlJSkvU6cuTIbV8mAAAAAAD5ybWgC5CkQYMGaeHChVqxYoXKli1rtYeEhOjixYtKTEx0OsofHx+vkJAQa5z169c7zS/zLv6Z41zNw8NDHh4eebwWAAAAAAAUHgV6hN8Yo0GDBmnBggVaunSpwsLCnIbXqVNHbm5uWrJkidW2e/duHT58WFFRUZKkqKgobdu2TSdOnLDGWbx4sfz8/BQeHp4/KwIAAAAAQCFToEf4Bw4cqFmzZumrr76Sr6+vdc29v7+/vLy85O/vr759+2rYsGEKDAyUn5+fBg8erKioKDVo0ECS1KpVK4WHh6tHjx567bXXFBcXp+eee04DBw7kKD4AAAAA4H9WgT6Wz3GNR0NMmzZNvXr1kiRduHBBTz/9tD777DOlpKQoJiZGkydPdjpd/9ChQxowYIB++ukneXt7KzY2Vq+88opcXXP2ewaP5Suk/iaP0wCAQoN9BOCMPgFcRn8odPIjhxZo4C8sCPyFFF9NAMgd9hGAM/oEcBn9odDJjxxaaO7SDwAAAAAA8g6BHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwR+AAAAAABsiMAPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAAAAA2BCBHwAAAAAAGyLwAwAAAABgQwUa+FesWKH27durdOnScjgc+vLLL52GG2M0ZswYlSpVSl5eXmrRooX27t3rNE5CQoK6d+8uPz8/BQQEqG/fvjp79mw+rgUAAAAAAIVPgQb+c+fOqVatWnr33XezHf7aa6/prbfe0vvvv69169bJ29tbMTExunDhgjVO9+7dtX37di1evFgLFy7UihUr9Nhjj+XXKgAAAAAAUCg5jDGmoIuQJIfDoQULFuiBBx6QdOnofunSpfX0009r+PDhkqSkpCQFBwdr+vTp6tq1q3bu3Knw8HBt2LBBdevWlSR99913uu+++3T06FGVLl06R8tOTk6Wv7+/kpKS5Ofnd1vWL884HAVdQf4pHF9NAPj7YB8BOKNPAJfRHwqd/MihhfYa/gMHDiguLk4tWrSw2vz9/VW/fn2tWbNGkrRmzRoFBARYYV+SWrRooSJFimjdunXXnHdKSoqSk5OdXgAAAAAA2EmhDfxxcXGSpODgYKf24OBga1hcXJyCgoKchru6uiowMNAaJzsTJkyQv7+/9QoNDc3j6gEAAAAAKFiFNvDfTqNGjVJSUpL1OnLkSEGXBAAAAABAniq0gT8kJESSFB8f79QeHx9vDQsJCdGJEyechqelpSkhIcEaJzseHh7y8/NzegEAAAAAYCeFNvCHhYUpJCRES5YssdqSk5O1bt06RUVFSZKioqKUmJioTZs2WeMsXbpUGRkZql+/fr7XDAAAAABAYeFakAs/e/as9u3bZ70/cOCAtm7dqsDAQJUrV05DhgzRSy+9pCpVqigsLEyjR49W6dKlrTv5V69eXa1bt9ajjz6q999/X6mpqRo0aJC6du2a4zv0AwAAAABgRwUa+Ddu3Kh77rnHej9s2DBJUmxsrKZPn65nnnlG586d02OPPabExEQ1btxY3333nTw9Pa1pZs6cqUGDBql58+YqUqSIOnXqpLfeeivf1wUAAAAAgMLEYczf5CGFt1F+PP8wz/D8TADAtbCPAJzRJ4DL6A+FTn7k0EJ7DT8AAAAAALh5BH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhgj8AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA3ZJvC/++67qlChgjw9PVW/fn2tX7++oEsCAAAAAKDA2CLwf/755xo2bJjGjh2rzZs3q1atWoqJidGJEycKujQAAAAAAAqEwxhjCrqIW1W/fn3dfffdeueddyRJGRkZCg0N1eDBgzVy5MgbTp+cnCx/f38lJSXJz8/vdpd7axyOgq4g//z9v5rID/QJ4DL6A+CMPgFcRn8odPIjh7relrnmo4sXL2rTpk0aNWqU1VakSBG1aNFCa9asyXaalJQUpaSkWO+TkpIkXdrgKET4PABn9AngMvoD4Iw+AVz2N+kPmfnzdh6D/9sH/j///FPp6ekKDg52ag8ODtauXbuynWbChAkaN25clvbQ0NDbUiNukr9/QVcAFC70CeAy+gPgjD4BXPY36w9nzpyR/22q+W8f+G/GqFGjNGzYMOt9RkaGEhISVLx4cTn+l051yYHk5GSFhobqyJEjhf9yByAf0CcAZ/QJ4DL6A+CMPnF9xhidOXNGpUuXvm3L+NsH/hIlSsjFxUXx8fFO7fHx8QoJCcl2Gg8PD3l4eDi1BQQE3K4SbcHPz49OClyBPgE4o08Al9EfAGf0iWu7XUf2M/3t79Lv7u6uOnXqaMmSJVZbRkaGlixZoqioqAKsDAAAAACAgvO3P8IvScOGDVNsbKzq1q2revXqadKkSTp37px69+5d0KUBAAAAAFAgbBH4H374YZ08eVJjxoxRXFycIiMj9d1332W5kR9yz8PDQ2PHjs1yCQTwv4o+ATijTwCX0R8AZ/SJgucwt/MZAAAAAAAAoED87a/hBwAAAAAAWRH4AQAAAACwIQI/AAAAAAA2RODPQxUqVNCkSZMKuoy/nYMHD8rhcGjr1q23fVl8RgAKCn9/bg77CPtie98c+oQ9sa1vDv0hB4zNxMbGGkmmf//+WYY98cQTRpKJjY3N0bwOHDhgJJktW7bkaPwTJ06Yc+fO5Wjcdu3amZiYmGyHrVixwkgyv/zyS47mdS3Lli0zkszp06dvaT5XO3/+vClWrJgpXry4uXDhQq6mjY2NNR06dHBqS0tLM8ePHzepqal5VuO0adOMv79/lvbcfEZ55Z133jHly5c3Hh4epl69embdunX5unwAl7GPuIx9hH+W9vzeRyxfvty0a9fOlCpVykgyCxYsyLdlZ6JPXEaf8M/Snt99Yvz48aZu3brGx8fHlCxZ0nTo0MHs2rUr35ZPf7iM/uCfpT2/+8PkyZNNRESE8fX1Nb6+vqZBgwbm22+/zfV8bHmEPzQ0VLNnz9Zff/1ltV24cEGzZs1SuXLl8nx5Fy9elCSVLFlSRYsWzdE0ffv21eLFi3X06NEsw6ZNm6a6deuqZs2aeVrnzTLGKC0tzXo/b9481ahRQ9WqVdOXX355y/N3cXFRSEiIXF1v/1Mic/MZ5YXPP/9cw4YN09ixY7V582bVqlVLMTExOnHiRL7VAMAZ+4i8xT7i5p07d061atXSu+++m2/LzA59Im/RJ27e8uXLNXDgQK1du1aLFy9WamqqWrVqpXPnzuVbDfSHvEV/uHlly5bVK6+8ok2bNmnjxo2699571aFDB23fvj13M8rjHyIKXOYvP3feeaf59NNPrfaZM2eamjVrmg4dOli/zC1atMg0atTI+Pv7m8DAQNO2bVuzb98+axpJTq/o6GinZbz00kumVKlSpkKFCsYYY8qXL28mTpxojLn0q5ibm5tZsWKFNb9XX33VlCxZ0sTFxZnU1FQTHBxsXnzxRaf6z5w5Y3x8fMx7771njDFm5cqVpnHjxsbT09OULVvWDB482Jw9e9Ya/8KFC+aZZ54xZcuWNe7u7qZSpUrmww8/tH5VvPKVud4XLlwwgwcPNiVLljQeHh6mUaNGZv369dY8M3/R+/bbb81dd91l3NzczLJly6zhzZo1M++//7557733TMuWLbN8Br/99ptp27at8fX1NT4+PqZx48Zm3759ZuzYsVlqWrZsmdMvoOnp6aZMmTJm8uTJTvPcvHmzcTgc5uDBg8YYY15//XVz5513mqJFi5qyZcuaAQMGmDNnzjjVf+Vr7NixWT4jY4w5dOiQuf/++423t7fx9fU1nTt3NnFxcdbwsWPHmlq1aplPPvnElC9f3vj5+ZmHH37YJCcnZ1nv7NSrV88MHDjQep+enm5Kly5tJkyYkKPpAeQt9hHsIwrTPuJKKsAj/PQJ+kRh7BPGXDqiKsksX778pqbPLfoD/aEw9wdjjClWrJj58MMPczWNbQP/G2+8YZo3b261N2/e3EycONGpo86dO9fMmzfP7N2712zZssW0b9/eREREmPT0dGOMMevXrzeSzI8//miOHz9uTp06ZS3Dx8fH9OjRw/z222/mt99+M8Zk/RKMGDHClC9f3iQmJprNmzcbd3d389VXXzkNr1SpksnIyLDaPv74Y+Pl5WUSExPNvn37jLe3t5k4caLZs2ePWbVqlaldu7bp1auXNX6XLl1MaGiomT9/vtm/f7/58ccfzezZs01aWpqZN2+ekWR2795tjh8/bhITE40xxjz55JOmdOnS5ttvvzXbt283sbGxplixYtb6ZX7Ra9asaX744Qezb98+a9i+ffuMh4eHSUhIMKdOnTKenp5W5zHGmKNHj5rAwEDTsWNHs2HDBrN7927z8ccfm127dpkzZ86YLl26mNatW5vjx4+b48ePm5SUlCynPA0fPtw0btzY6XN9+umnndomTpxoli5dag4cOGCWLFli7rjjDjNgwABjjDEpKSlm0qRJxs/Pz1pOZie+8jNKT083kZGRpnHjxmbjxo1m7dq1pk6dOtYfZGMudVQfHx/TsWNHs23bNrNixQoTEhJi/u///u+a38FMKSkpxsXFJcv/wPXs2dPcf//9N5weQN5jH8E+orDsI65W0IGfPkGfKGx9whhj9u7daySZbdu23dT0uUV/oD8U1v6QlpZmPvvsM+Pu7m62b9+eq2ltG/hPnDhhPDw8zMGDB83BgweNp6enOXnypFNHvdrJkyed/qhc69qb2NhYExwcbFJSUpzar+6oKSkpJjIy0nTp0sWEh4ebRx991Gn8nTt3Wr9OZWrSpIl55JFHjDHG9O3b1zz22GNO06xcudIUKVLE/PXXX2b37t1Gklm8eHG265PdtTdnz541bm5uZubMmVbbxYsXTenSpc1rr73mNN2XX36ZZZ7/93//Zx544AHrfYcOHaxfvYwxZtSoUSYsLMxcvHgx25qyu/bm6u28ZcsW43A4zKFDh4wxxvq1LvPXyuzMmTPHFC9e3Hp/rWtvrvyMfvjhB+Pi4mIOHz5sDd++fbuRZP1SOXbsWFO0aFGnX+JGjBhh6tevf81aMv3xxx9Gklm9erVT+4gRI0y9evVuOD2AvMc+4jL2Ef5ZxsvPfcTVCjrw0yfoE4WtT6Snp5u2bduaRo0a5Xram0V/uIz+4J9lvILoD7/++qvx9vY2Li4uxt/f3/z3v//N8bSZbHkNv3TpGou2bdtq+vTpmjZtmtq2basSJUo4jbN3715169ZNFStWlJ+fnypUqCBJOnz48A3nHxERIXd39+uO4+7urpkzZ2revHm6cOGCJk6c6DS8WrVqatiwoT7++GNJ0r59+7Ry5Ur17dtXkvTLL79o+vTp8vHxsV4xMTHKyMjQgQMHtHXrVrm4uCg6Ojqnm0X79+9XamqqGjVqZLW5ubmpXr162rlzp9O4devWdXqfnp6uGTNm6JFHHrHaHnnkEU2fPl0ZGRmSpK1bt6pJkyZyc3PLcU1Xi4yMVPXq1TVr1ixJl67nOnHihDp37myN8+OPP6p58+YqU6aMfH191aNHD506dUrnz5/P8XJ27typ0NBQhYaGWm3h4eEKCAhw2hYVKlSQr6+v9b5UqVJcgw/8zbGPyB77iMv+1/YR9Ins0Scuy+8+MXDgQP3222+aPXt2rqe9VfSH7NEfLsuv/nDHHXdo69atWrdunQYMGKDY2Fjt2LEjx9NLNn8sX58+fTR9+nTNmDFDffr0yTK8ffv2SkhI0NSpU7Vu3TqtW7dO0uWbZ1yPt7d3jmpYvXq1JCkhIUEJCQlZhvft21fz5s3TmTNnNG3aNFWqVMnqeGfPnlX//v21detW6/XLL79o7969qlSpkry8vHJUw826eh2///57/fHHH3r44Yfl6uoqV1dXde3aVYcOHdKSJUskKc9q6t69u9VRZ82apdatW6t48eKSLj1+o127dqpZs6bmzZunTZs2WTc8yslnl1tX/9FxOBzWH6brKVGihFxcXBQfH+/UHh8fr5CQkDytEUDusY+4NewjLrnZfURhRJ+4NfSJS/KiTwwaNEgLFy7UsmXLVLZs2bwsL8foD7eG/nDJrfYHd3d3Va5cWXXq1NGECRNUq1Ytvfnmm7mqwdaBv3Xr1rp48aJSU1MVExPjNOzUqVPavXu3nnvuOTVv3lzVq1fX6dOnncbJ/OUtPT39ppa/f/9+DR06VFOnTlX9+vUVGxub5QPu0qWLihQpolmzZumTTz5Rnz595HA4JEl33XWXduzYocqVK2d5ubu7KyIiQhkZGVq+fHm2y8+u/kqVKsnd3V2rVq2y2lJTU7VhwwaFh4dfd30++ugjde3a1ekPx9atW9W1a1d99NFHkqSaNWtq5cqVSk1NvWZNOdme//jHP/Tbb79p06ZNmjt3rrp3724N27RpkzIyMvT666+rQYMGqlq1qo4dO5br5VSvXl1HjhzRkSNHrLYdO3YoMTHxhtsiJ9zd3VWnTh3rj5gkZWRkaMmSJYqKirrl+QO4Newj2Edcz+3eRxRG9An6xPXkR58wxmjQoEFasGCBli5dqrCwsDyZ782gP9Afrqeg9hEZGRlKSUnJ3US5vgigkLv62o6kpCSTlJRkvc+89iY9Pd0UL17cPPLII2bv3r1myZIl5u6773a6hi41NdV4eXmZl156ycTFxVk3q8ju+hFjnK/rSEtLMw0aNDCdOnUyxhhz7NgxU7x4cev6liv17dvXFCtWzLi4uJg//vjDav/ll1+Ml5eXGThwoNmyZYvZs2eP+fLLL53u+t6rVy8TGhpqFixYYH7//XezbNky8/nnnxtjLt34wuFwmOnTp5sTJ05YN5x46qmnTOnSpc2iRYucbraRkJBgjMn+mp0TJ04YNzc3s2jRoiz1f/vtt8bDw8OcOnXK/Pnnn6Z48eLWzTb27NljPvnkE+sZqi+//LIpV66c2bVrlzl58qS5ePHiNa9xatSokalVq5bx9fU158+ft9q3bt1qJJlJkyaZ/fv3m08++cSUKVPGqeZVq1ZZN0o5efKk9czMKz+jjIwMExkZaZo0aWI2bdpk1q1bl+3NNmrVquVU18SJE0358uWzbIfszJ4923h4eJjp06ebHTt2mMcee8wEBAQ43cETQP5hH8E+wpjCs484c+aM2bJli9myZYuRZN544w2zZcsW69rT/ECfoE8YU3j6xIABA4y/v7/56aefrBumHT9+3Gl9bif6A/3BmMLTH0aOHGmWL19uDhw4YH799VczcuRI43A4zA8//JCj6TPZPvBf7cqbbSxevNhUr17deHh4mJo1a5qffvopy01zpk6dakJDQ02RIkWyPE7jald+CcaNG2dKlSpl/vzzT2v4vHnzjLu7u9m6davTdKtXrzaSzH333ZdlnuvXrzctW7Y0Pj4+xtvb29SsWdO8/PLL1vC//vrLDB061JQqVcq4u7ubypUrm48//tga/sILL5iQkBDjcDis9f7rr7/M4MGDTYkSJa77OI0rO+q///1vExAQkO1NNFJSUkxAQIB58803jTGX/sC0atXKFC1a1Pj6+pomTZqY/fv3G2MudfjM9VE2j9O40uTJk40k07NnzyzLfOONN0ypUqWMl5eXiYmJMZ988kmWmh9//HFTvHjxPHmcxpVy01GNMebtt9825cqVM+7u7qZevXpm7dq1OZ4WQN5iH8E+IlNh2Edk9/gnXfH4q/xAn6BPZCoMfSK7/iDJTJs2LUfT3yr6A/0hU2HoD3369DHly5c37u7upmTJkqZ58+a5DvvGGOMwxpjcnRMAAAAAAAAKO1tfww8AAAAAwP8qAj9wkw4fPuz0qJOrXzl5LAsAwJ7YRwDO6BPAZfnZHzilH7hJaWlpOnjw4DWHV6hQQa6urvlXEACg0GAfATijTwCX5Wd/IPADAAAAAGBDnNIPAAAAAIANEfgBAAAAALAhAj8AAAAAADZE4AcAAAAAwIYI/AAAIFeaNWumIUOGFHQZAADgBgj8AADkk169esnhcOiVV15xav/yyy/lcDhyNa8KFSpo0qRJeVjd7XPw4EE5HA5t3bq1oEsBAOB/CoEfAIB85OnpqVdffVWnT58u6FJy7eLFiwVdQp5KTU0t6BIAALitCPwAAOSjFi1aKCQkRBMmTLjueD///LOaNGkiLy8vhYaG6sknn9S5c+ckXTql/tChQxo6dKgcDoccDoeMMSpZsqTmzp1rzSMyMlKlSpVymqeHh4fOnz8vSTp8+LA6dOggHx8f+fn5qUuXLoqPj7fGf/755xUZGakPP/xQYWFh8vT0zLbW//73v/L399fMmTNvapvs379fHTp0UHBwsHx8fHT33Xfrxx9/tIa/8MILuvPOO7NMFxkZqdGjR1vvP/zwQ1WvXl2enp6qVq2aJk+ebA3LPMvg888/V3R0tDw9PTVz5kwdOnRI7du3V7FixeTt7a0aNWro22+/van1AACgsCHwAwCQj1xcXDR+/Hi9/fbbOnr0aLbj7N+/X61bt1anTp3066+/6vPPP9fPP/+sQYMGSZLmz5+vsmXL6oUXXtDx48d1/PhxORwONW3aVD/99JMk6fTp09q5c6f++usv7dq1S5K0fPly3X333SpatKgyMjLUoUMHJSQkaPny5Vq8eLF+//13Pfzww0617Nu3T/PmzdP8+fOzPSV/1qxZ6tatm2bOnKnu3bvf1DY5e/as7rvvPi1ZskRbtmxR69at1b59ex0+fFiS1KdPH+3cuVMbNmywptmyZYt+/fVX9e7dW5I0c+ZMjRkzRi+//LJ27typ8ePHa/To0ZoxY4bTskaOHKmnnnpKO3fuVExMjAYOHKiUlBStWLFC27Zt06uvviofH5+bWg8AAAob14IuAACA/zUPPvigIiMjNXbsWH300UdZhk+YMEHdu3e3boxXpUoVvfXWW4qOjtZ7772nwMBAubi4yNfXVyEhIdZ0zZo10wcffCBJWrFihWrXrq2QkBD99NNPqlatmn766SdFR0dLkpYsWaJt27bpwIEDCg0NlSR98sknqlGjhjZs2KC7775b0qXT+D/55BOVLFkyS53vvvuu/vnPf+qbb76x5nszatWqpVq1alnvX3zxRS1YsEBff/21Bg0apLJlyyomJkbTpk2z6po2bZqio6NVsWJFSdLYsWP1+uuvq2PHjpKksLAw7dixQx988IFiY2OteQ8ZMsQaR7p0lkOnTp0UEREhSdb8AACwA47wAwBQAF599VXNmDFDO3fuzDLsl19+0fTp0+Xj42O9YmJilJGRoQMHDlxzntHR0dqxY4dOnjyp5cuXq1mzZmrWrJl++uknpaamavXq1WrWrJkkaefOnQoNDbXCviSFh4crICDAqaby5ctnG/bnzp2roUOHavHixbcU9qVLR/iHDx+u6tWrKyAgQD4+Ptq5c6d1hF+SHn30UX322We6cOGCLl68qFmzZqlPnz6SpHPnzmn//v3q27ev0zZ76aWXtH//fqdl1a1b1+n9k08+qZdeekmNGjXS2LFj9euvv97SugAAUJgQ+AEAKABNmzZVTEyMRo0alWXY2bNn1b9/f23dutV6/fLLL9q7d68qVap0zXlGREQoMDBQy5cvdwr8y5cv14YNG5SamqqGDRvmqk5vb+9s22vXrq2SJUvq448/ljEmV/O82vDhw7VgwQKNHz9eK1eu1NatWxUREeF0k8D27dvLw8NDCxYs0DfffKPU1FQ99NBDki5tL0maOnWq0zb77bfftHbt2uuuT79+/fT777+rR48e2rZtm+rWrau33377ltYHAIDCglP6AQAoIK+88ooiIyN1xx13OLXfdddd2rFjhypXrnzNad3d3ZWenu7U5nA41KRJE3311Vfavn27GjdurKJFiyolJUUffPCB6tatawXe6tWr68iRIzpy5Ih1lH/Hjh1KTExUeHj4DWuvVKmSXn/9dTVr1kwuLi565513crv6llWrVqlXr1568MEHJV0K8AcPHnQax9XVVbGxsZo2bZrc3d3VtWtXeXl5SZKCg4NVunRp/f777zd1H4HQ0FA9/vjjevzxxzVq1ChNnTpVgwcPvun1AQCgsCDwAwBQQCIiItS9e3e99dZbTu3PPvusGjRooEGDBqlfv37y9vbWjh07tHjxYitYV6hQQStWrFDXrl3l4eGhEiVKSLp0Hf/TTz+tunXrWjefa9q0qWbOnKkRI0ZYy2jRooW1/EmTJiktLU1PPPGEoqOjs5z2fi1Vq1bVsmXL1KxZM7m6umrSpEnXHX/37t1Z2mrUqKEqVapo/vz5at++vRwOh0aPHq2MjIws4/br10/Vq1eXdOlHgiuNGzdOTz75pPz9/dW6dWulpKRo48aNOn36tIYNG3bNmoYMGaI2bdqoatWqOn36tJYtW2YtAwCAvztO6QcAoAC98MILWcJtzZo1tXz5cu3Zs0dNmjRR7dq1NWbMGJUuXdppuoMHD6pSpUpO19hHR0crPT3dulZfuvQjwNVtDodDX331lYoVK6amTZuqRYsWqlixoj7//PNc1X/HHXdo6dKl+uyzz/T0009fd9yuXbuqdu3aTq/4+Hi98cYbKlasmBo2bKj27dsrJiZGd911V5bpq1SpooYNG6patWqqX7++07B+/frpww8/1LRp0xQREaHo6GhNnz5dYWFh160pPT1dAwcOVPXq1dW6dWtVrVrV6XF+AAD8nTnMrV54BwAAkA+MMapSpYqeeOKJ6x61BwAAl3BKPwAAKPROnjyp2bNnKy4uTr179y7ocgAA+Fsg8AMAgEIvKChIJUqU0JQpU1SsWLGCLgcAgL8FAj8AACj0uAIRAIDc46Z9AAAAAADYEIEfAAAAAAAbIvADAAAAAGBDBH4AAAAAAGyIwA8AAAAAgA0R+AEAAAAAsCECPwAAAAAANkTgBwAAAADAhv4fvqS0ZW20Rz0AAAAASUVORK5CYII=", "text/plain": [ - "
    " + "{'MatrixVectorActivation_0': {'BRAM_18K': 8,\n", + " 'BRAM_efficiency': 0.5208333333333334,\n", + " 'LUT': 418,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'StreamingDataWidthConverter_Batch_0': {'BRAM_18K': 0,\n", + " 'BRAM_efficiency': 1,\n", + " 'LUT': 3,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.4444444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0},\n", + " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", + " 'BRAM_efficiency': 0.006944444444444444,\n", + " 'LUT': 320,\n", + " 'URAM': 0,\n", + " 'URAM_efficiency': 1,\n", + " 'DSP': 0}}" ] }, + "execution_count": 42, "metadata": {}, - "output_type": "display_data" + "output_type": "execute_result" } ], "source": [ - "layers_DWC = list(res_dict_DWC.keys())\n", - "print(layers_DWC)\n", - "utilisation_DWC = list(res_dict_DWC.values())\n", - "lut_values_DWC = [] #Initializing a list to store LUT values.\n", - "for i in range(len(layers_DWC)):\n", - " x = list(utilisation_DWC[i].values()) #Extracting the resource utilisation for each layer.\n", - " lut_values_DWC.append(x[2]) #Extracting the LUT values of resource utilisation from each layer and appending to the list\n", - "\n", - "#Plotting the bar graph of each network layer with their corresponding LUT resource utilisation\n", - "fig = plt.figure(figsize = (12, 5))\n", - "plt.bar(layers_DWC, lut_values_DWC, color ='red', width = 0.3)\n", - "plt.xlabel(\"Network Layers\")\n", - "plt.ylabel(\"LUT Utilisation\")\n", - "plt.title(\"Estimated LUT values used for each network layer\")\n", - "plt.show()" + "model_dwc = ModelWrapper(\"cybsec_DWC.onnx\")\n", + "res_dict_dwc = model_dwc.analysis(res_estimation)\n", + "res_dict_dwc" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `StreamingDataWidthConverter` layer does not consume a large number of LUT resources as shown in the above graph." + "Since we have now one additional layer, we manipulate the data to shorten the layer names in the plot." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 48, "metadata": {}, + "outputs": [], "source": [ - "
    \n", - "Question: The name of the 'StreamingDataWidthConverter' layer is not coming in the graph.\n", - "
    " + "layers = res_dict_dwc.keys()\n", + "# replace names of layers with abbreviations\n", + "layers = [n.replace(\"MatrixVectorActivation_\", \"MVU\") for n in layers]\n", + "layers = [n.replace(\"StreamingDataWidthConverter_Batch\", \"DWC\") for n in layers]" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 50, "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1IAAAHWCAYAAAB9mLjgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABZ/0lEQVR4nO3deVhU5f//8dcAgigOiAq4oOKWG0ZpKW7gimuZaGmmuKamlZqlVu4lLZ/S8pOZLWClmWuln7TMPUVzTXPLfUlBkwS1RIHz+8Mf83UClaPgDPh8XNdcOffZ3mfm5sSLc859LIZhGAIAAAAAZJuLowsAAAAAgLyGIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAF4LaEh4crPDzc0WXkqKNHj8pisSg2NtbRpTgUn0P2xcbGymKx6OjRo7ecd9myZQoJCVHBggVlsVh0/vz5XK/vbrNYLBo8eLCjy3BqGX1my5YtppddvXq1LBaLVq9enfOFATCNIAXkMxn/k77Ra+PGjdle1549ezRu3Lhs/ZJ4N02bNs2hv+Rn/DIzf/78G85zs18o58+fb/tlKGNd2Xkh7zp37pwef/xxeXp66oMPPtAXX3yhwoULO7qsfG/Dhg0aN25cvgytABzPzdEFAMgdEyZMUFBQUKb2SpUqZXsde/bs0fjx4xUeHq7y5cvbTfvxxx/vtMTbNm3aNBUvXlw9e/Z0WA05pVq1avriiy/s2kaNGiUvLy+98sorDqoKOW3z5s26cOGCJk6cqObNmzu6nHvGhg0bNH78ePXs2VM+Pj6OLgdAPkOQAvKp1q1bq06dOrm2fnd391xb973E399fTz31lF3bG2+8oeLFi2dqR9515swZScrRX+YvXbrEWa085PLly/n+uEmfxL2GS/uAe9icOXNUu3ZtFSlSRFarVcHBwXrvvfckXbtEsHPnzpKkJk2a2C4vy7g2/9/3SGVcojZ37lyNHz9epUuXVpEiRdSpUyclJSUpJSVFQ4YMkZ+fn7y8vNSrVy+lpKTY1RMTE6OmTZvKz89PHh4eql69uj788EO7ecqXL6/du3drzZo1tpqur+P8+fMaMmSIAgMD5eHhoUqVKunNN99Uenq63XrOnz+vnj17ytvbWz4+PoqKisqTl/8kJCTIzc1N48ePzzRt//79slgs+u9//ytJSkxM1PDhwxUcHCwvLy9ZrVa1bt1av/766y23c6N74nr27JnpbGV6erqmTJmiGjVqqGDBgvL391f//v31119/2c23ZcsWRUREqHjx4vL09FRQUJB69+59y1osFovGjRuXqb18+fJ2ZymvXr2q8ePHq3LlyipYsKCKFSumhg0bavny5XbL7du3T506dZKvr68KFiyoOnXq6Lvvvsu0/t27d6tp06by9PRUmTJl9Nprr2XqV1kJDw9XVFSUJOmhhx6SxWKxq3PevHmqXbu2PD09bQH6jz/+sFtHz5495eXlpUOHDqlNmzYqUqSIunXrdtPt/vHHH+rdu7f8/f3l4eGhGjVq6LPPPrOb58qVKxozZoxq164tb29vFS5cWI0aNdKqVasyrS89PV3vvfeegoODVbBgQZUoUUKtWrXK8l6fb775RjVr1rRtd9myZbf8nK4/hrz++usqU6aMChYsqGbNmungwYOZ5t+0aZNatWolb29vFSpUSGFhYVq/fr1t+rhx4/Tiiy9KkoKCgmzHi6NHj6pjx4568MEH7dbXvn17WSwWu+9+06ZNslgsWrp0qa3t8OHD6ty5s3x9fVWoUCHVq1dP//vf/7Lclzlz5ujVV19V6dKlVahQISUnJ2e573/99ZcefvhhlSlTRvv377/lZ3W9devWqXPnzipbtqw8PDwUGBiooUOH6p9//rHNExMTI4vFou3bt2daftKkSXJ1dbXrc7f6bKVrn6/FYtGePXv05JNPqmjRomrYsKGp2oG8jjNSQD6VlJSkP//8067NYrGoWLFikqTly5era9euatasmd58801J0t69e7V+/Xo9//zzaty4sZ577jm9//77evnll1WtWjVJsv33RqKjo+Xp6amRI0fq4MGDmjp1qgoUKCAXFxf99ddfGjdunDZu3KjY2FgFBQVpzJgxtmU//PBD1ahRQ4888ojc3Ny0ePFiPfPMM0pPT9egQYMkSVOmTNGzzz5rd+mbv7+/JOnvv/9WWFiY/vjjD/Xv319ly5bVhg0bNGrUKJ0+fVpTpkyRJBmGoUcffVQ///yzBgwYoGrVqmnRokW2X3bzEn9/f4WFhWnu3LkaO3as3bSvv/5arq6utkB8+PBhffPNN+rcubOCgoKUkJCgjz76SGFhYdqzZ49KlSqVIzX1799fsbGx6tWrl5577jkdOXJE//3vf7V9+3atX79eBQoU0JkzZ9SyZUuVKFFCI0eOlI+Pj44ePaqFCxfmSA3StV/0oqOj1bdvXz388MNKTk7Wli1btG3bNrVo0ULStXDUoEEDlS5dWiNHjlThwoU1d+5cdejQQQsWLNBjjz0mSYqPj1eTJk2Umppqm2/GjBny9PS8ZR2vvPKK7rvvPs2YMcN2yW3FihUlyfY5PfTQQ4qOjlZCQoLee+89rV+/Xtu3b7c7g5WamqqIiAg1bNhQ//nPf1SoUKEbbjMhIUH16tWz3atXokQJLV26VH369FFycrKGDBkiSUpOTtYnn3yirl27ql+/frpw4YI+/fRTRURE6JdfflFISIhtnX369FFsbKxat26tvn37KjU1VevWrdPGjRvtzn7//PPPWrhwoZ555hkVKVJE77//viIjI3X8+HHb8edm3njjDbm4uGj48OFKSkrSW2+9pW7dumnTpk22eVauXKnWrVurdu3aGjt2rFxcXGx/iFm3bp0efvhhdezYUb///ru++uorTZ48WcWLF5cklShRQo0aNdK3336r5ORkWa1WGYah9evXy8XFRevWrdMjjzwi6VpIcXFxUYMGDWyfa/369fX333/rueeeU7FixTRz5kw98sgjmj9/vq2/ZJg4caLc3d01fPhwpaSkZHlG6s8//1SLFi2UmJioNWvW2PpGds2bN09///23Bg4cqGLFiumXX37R1KlTdfLkSc2bN0+S1KlTJw0aNEizZs3SAw88YLf8rFmzFB4ertKlS2f7s71e586dVblyZU2aNEmGYZiqHcjzDAD5SkxMjCEpy5eHh4dtvueff96wWq1GamrqDdc1b948Q5KxatWqTNPCwsKMsLAw2/tVq1YZkoyaNWsaV65csbV37drVsFgsRuvWre2WDw0NNcqVK2fX9vfff2faTkREhFGhQgW7tho1athtO8PEiRONwoULG7///rtd+8iRIw1XV1fj+PHjhmEYxjfffGNIMt566y3bPKmpqUajRo0MSUZMTEymdV8vY1/nzZt3w3kkGYMGDcpy2s0+15vt34189NFHhiRj165ddu3Vq1c3mjZtant/+fJlIy0tzW6eI0eOGB4eHsaECRPs2v79Ofz7+84QFRVl9z2uW7fOkGTMmjXLbr5ly5bZtS9atMiQZGzevDnb+5lBkjF27NhM7eXKlTOioqJs7++//36jbdu2N11Xs2bNjODgYOPy5cu2tvT0dKN+/fpG5cqVbW1DhgwxJBmbNm2ytZ05c8bw9vY2JBlHjhy56XYyfi6v398rV64Yfn5+Rs2aNY1//vnH1r5kyRJDkjFmzBhbW1RUlCHJGDly5E23k6FPnz5GyZIljT///NOuvUuXLoa3t7ftZy01NdVISUmxm+evv/4y/P39jd69e9vaVq5caUgynnvuuUzbSk9Pt/1bkuHu7m4cPHjQ1vbrr78akoypU6fetOaMn6tq1arZ1fTee+/Z9e/09HSjcuXKRkREhN22//77byMoKMho0aKFre3tt9/O8vvZvHmzIcn4/vvvDcMwjJ07dxqSjM6dOxt169a1zffII48YDzzwgO19Rj9Yt26dre3ChQtGUFCQUb58edvPV8a+VKhQIdNx7fq+cPr0aaNGjRpGhQoVjKNHj97087l+vdcfO7I6bkZHRxsWi8U4duyYra1r165GqVKl7I4B27Zts/tZN/PZjh071pBkdO3a9ZZ1A/kVl/YB+dQHH3yg5cuX272uvzzFx8dHly5dynSZ053q0aOHChQoYHtft25dGYaR6ZKtunXr6sSJE0pNTbW1Xf/X/YwzamFhYTp8+LCSkpJuue158+apUaNGKlq0qP7880/bq3nz5kpLS9PatWslSd9//73c3Nw0cOBA27Kurq569tlnb3u/Haljx45yc3PT119/bWv77bfftGfPHj3xxBO2Ng8PD7m4XDvsp6Wl6dy5c/Ly8tJ9992nbdu25Ugt8+bNk7e3t1q0aGH3HdSuXVteXl62S8YyzrQsWbJEV69ezZFt/5uPj492796tAwcOZDk9MTFRK1eu1OOPP64LFy7Yaj137pwiIiJ04MAB2+VO33//verVq2f31/gSJUrc8vK6m9myZYvOnDmjZ555RgULFrS1t23bVlWrVs10uZgkuz57I4ZhaMGCBWrfvr0Mw7D7HiIiIpSUlGT7vl1dXW1nSdLT05WYmKjU1FTVqVPHrk8sWLBAFosl01lPSZlGlGzevLndWZVatWrJarXq8OHDt6xdknr16mV35qZRo0aSZFt+x44dOnDggJ588kmdO3fOtm+XLl1Ss2bNtHbt2ltecvnAAw/Iy8vLdkxYt26dypQpox49emjbtm36+++/ZRiGfv75Z9v2pWv94OGHH7a7hM3Ly0tPP/20jh49qj179thtJyoq6oZnLU+ePKmwsDBdvXpVa9euVbly5bL1+fzb9eu/dOmS/vzzT9WvX1+GYdhdytejRw+dOnXK7rLNWbNmydPTU5GRkZJu77MdMGDAbdUN5Adc2gfkUw8//PBNB5t45plnNHfuXLVu3VqlS5dWy5Yt9fjjj6tVq1Z3tN2yZcvavff29pYkBQYGZmpPT09XUlKS7XKf9evXa+zYsYqLi9Pff/9tN39SUpJtXTdy4MAB7dy5UyVKlMhyesYN/8eOHVPJkiXl5eVlN/2+++67xd7lrJwa0rx48eJq1qyZ5s6dq4kTJ0q6dlmfm5ubOnbsaJsv4x6XadOm6ciRI0pLS7NNy84lV9lx4MABJSUlyc/PL8vpGd9BWFiYIiMjNX78eE2ePFnh4eHq0KGDnnzySXl4eORILRMmTNCjjz6qKlWqqGbNmmrVqpW6d++uWrVqSZIOHjwowzA0evRojR49+ob1li5dWseOHVPdunUzTb+TPnPs2LEbrqNq1ar6+eef7drc3NxUpkyZW6737NmzOn/+vGbMmKEZM2ZkOU/G9yBJM2fO1DvvvKN9+/bZhdrrR/08dOiQSpUqJV9f31tu/9/HAEkqWrRopnvksrt80aJFJcm2fEYwvtmluElJSbblsuLq6qrQ0FCtW7dO0rUg1ahRIzVs2FBpaWnauHGj/P39lZiYaBekbtQPMi55PnbsmGrWrGlrz2rk1Azdu3eXm5ub9u7dq4CAgBvOdyvHjx/XmDFj9N1332X6jK//A1SLFi1UsmRJzZo1S82aNVN6erq++uorPfrooypSpIik2/tsb7aPQH5HkALuUX5+ftqxY4d++OEHLV26VEuXLlVMTIx69OihmTNn3vZ6XV1dTbUb//+a+kOHDqlZs2aqWrWq3n33XQUGBsrd3V3ff/+9Jk+enK2b+tPT09WiRQu99NJLWU6vUqVKNvfiznl4eNjd7H29jJB4/VmIO9WlSxf16tVLO3bsUEhIiObOnatmzZrZ7guRrt1UPnr0aPXu3VsTJ06Ur6+vXFxcNGTIkFt+vhaLJcv7H64PY9K178DPz0+zZs3Kcj0ZITfjOVwbN27U4sWL9cMPP6h379565513tHHjxkwhNzv+XUvjxo116NAhffvtt/rxxx/1ySefaPLkyZo+fbr69u1r2+fhw4crIiIiy3WaeVxAbrv+jOLNZOzXU089dcNfiDPC5JdffqmePXuqQ4cOevHFF+Xn5ydXV1dFR0fr0KFDt1XnrX7W73T5jP17++237e7hul52+k/Dhg31+uuv6/Lly1q3bp1eeeUV+fj4qGbNmlq3bp3t3svrg5RZN7uHrmPHjvr888/13nvvKTo6+rbWn5aWZru/asSIEapataoKFy6sP/74Qz179rT7uXZ1ddWTTz6pjz/+WNOmTdP69et16tQpu9FBb+ezzc59gkB+RZAC7mHu7u5q37692rdvr/T0dD3zzDP66KOPNHr0aFWqVOmuPgR28eLFSklJ0XfffWf3F+msRg+7UV0VK1bUxYsXb/mcnnLlymnFihW6ePGi3S8FZkfLutU2brS+jPbbvZQnKx06dFD//v1tl/f9/vvvGjVqlN088+fPV5MmTfTpp5/atZ8/f94ucGWlaNGiWV6alXFWJUPFihX1008/qUGDBtn6BatevXqqV6+eXn/9dc2ePVvdunXTnDlz1Ldv35vW8u8RFq9cuaLTp09nmtfX11e9evVSr169dPHiRTVu3Fjjxo1T3759VaFCBUlSgQIFstVnsrpE8E76TMb3v3//fjVt2jTTem+3f5QoUUJFihRRWlraLfdr/vz5qlChghYuXGj3c/XvS/gqVqyoH374QYmJidk6K5WbMi4btFqtt9y/mx3DGjVqpCtXruirr77SH3/8YQtMjRs3tgWpKlWq2AKVdOOf63379tmmZ9ezzz6rSpUqacyYMfL29tbIkSOzvWyGXbt26ffff9fMmTPVo0cPW/uNLtnu0aOH3nnnHS1evFhLly5ViRIl7P6IYOazBcDw58A969y5c3bvXVxcbH+lzhiWPON5IHdjWPCMv0Jf/1frpKQkxcTEZJq3cOHCWdb0+OOPKy4uTj/88EOmaefPn7fdj9WmTRulpqbaDa2elpamqVOn3ulu2LRp00YbN27U1q1bM9Uxa9YshYSE3NHlPP/m4+OjiIgIzZ07V3PmzJG7u7s6dOhgN4+rq2umswLz5s3LNNR2VipWrKh9+/bp7NmztrZff/0105DIjz/+uNLS0myXGF4vNTXV9r399ddfmWrJ+Av4v4fFz6qWjHtbMsyYMSPTGal/93EvLy9VqlTJtn4/Pz+Fh4fro48+yjKEXb+vGd/nL7/8Yjf9RmfesqNOnTry8/PT9OnT7fZ56dKl2rt3r9q2bXtb63V1dVVkZKQWLFig3377LdP06/crq5+7TZs2KS4uzm6ZyMhIGYaR5TD72T3TlFNq166tihUr6j//+Y8uXryYafr1+3ezY1jdunVVoEABvfnmm/L19VWNGjUkXQtYGzdu1Jo1azKdjWrTpo1++eUXu8/n0qVLmjFjhsqXL6/q1aub2pfRo0dr+PDhGjVqVKZHPWRHVt+fYRi2x1j8W61atVSrVi198sknWrBggbp06SI3t//7m7qZzxYAZ6SAfGvp0qW2v5Jer379+qpQoYL69u2rxMRENW3aVGXKlNGxY8c0depUhYSE2K73DwkJkaurq958800lJSXJw8PD9pynnNayZUvbGbL+/fvr4sWL+vjjj+Xn55fpl9zatWvrww8/1GuvvaZKlSrJz89PTZs21YsvvqjvvvtO7dq1U8+ePVW7dm1dunRJu3bt0vz583X06FEVL15c7du3V4MGDTRy5EgdPXpU1atX18KFC7M1oMX1FixYkOVnHBUVpZEjR2revHlq3Lix+vfvr6pVq+rUqVOKjY3V6dOnswyId+qJJ57QU089pWnTpikiIiLTw1/btWunCRMmqFevXqpfv7527dqlWbNm2c7M3Ezv3r317rvvKiIiQn369NGZM2c0ffp01ahRw+7ZOGFhYerfv7+io6O1Y8cOtWzZUgUKFNCBAwc0b948vffee+rUqZNmzpypadOm6bHHHlPFihV14cIFffzxx7JarWrTps1Na+nbt68GDBigyMhItWjRQr/++qt++OGHTGfVqlevrvDwcNWuXVu+vr7asmWL5s+fr8GDB9vm+eCDD9SwYUMFBwerX79+qlChghISEhQXF6eTJ0/anrH10ksv6YsvvlCrVq30/PPP24Y/L1eunHbu3HnLzy8rGb/E9+rVS2FhYeratatt+PPy5ctr6NCht7Ve6doQ4qtWrVLdunXVr18/Va9eXYmJidq2bZt++uknJSYmSrrWJxYuXKjHHntMbdu21ZEjRzR9+nRVr17d7hfpJk2aqHv37nr//fd14MABtWrVSunp6Vq3bp2aNGli95nmNhcXF33yySdq3bq1atSooV69eql06dL6448/tGrVKlmtVi1evFjStWOFdG0I+i5duqhAgQJq3769ChcurEKFCql27drauHGj7RlS0rUzUpcuXdKlS5cyBamRI0fqq6++UuvWrfXcc8/J19dXM2fO1JEjR7RgwYJsXXr5b2+//baSkpI0aNAgFSlSxNSDuKtWraqKFStq+PDh+uOPP2S1WrVgwYKb3o/Wo0cPDR8+XJIybcvMZwtADH8O5Dc3G/5c1w1zO3/+fKNly5aGn5+f4e7ubpQtW9bo37+/cfr0abv1ffzxx0aFChUMV1dXu2F3bzT8+b+HBM9q2GfD+L+hc8+ePWtr++6774xatWoZBQsWNMqXL2+8+eabxmeffZZp+OL4+Hijbdu2RpEiRQxJdnVcuHDBGDVqlFGpUiXD3d3dKF68uFG/fn3jP//5j92w7OfOnTO6d+9uWK1Ww9vb2+jevbuxfft2U8Of3+iVMTTyyZMnjb59+xqlS5c23NzcDF9fX6Ndu3bGxo0bb7p+s8OfZ0hOTjY8PT0NScaXX36Zafrly5eNF154wShZsqTh6elpNGjQwIiLi8v0XWY1/LlhGMaXX35pVKhQwXB3dzdCQkKMH374IdPw5xlmzJhh1K5d2/D09DSKFCliBAcHGy+99JJx6tQpwzCuDbvctWtXo2zZsoaHh4fh5+dntGvXztiyZcst9zMtLc0YMWKEUbx4caNQoUJGRESEcfDgwUzDn7/22mvGww8/bPj4+Bienp5G1apVjddff92uHxiGYRw6dMjo0aOHERAQYBQoUMAoXbq00a5dO2P+/Pl28+3cudMICwszChYsaJQuXdqYOHGi8emnn9728OcZvv76a+OBBx4wPDw8DF9fX6Nbt27GyZMn7eaJiooyChcufMvP5noJCQnGoEGDjMDAQKNAgQJGQECA0axZM2PGjBm2edLT041JkyYZ5cqVMzw8PIwHHnjAWLJkSZbfa2pqqvH2228bVatWNdzd3Y0SJUoYrVu3NrZu3WqbRzcY9v/f301WbnQMuVF/3L59u9GxY0ejWLFihoeHh1GuXDnj8ccfN1asWGE338SJE43SpUsbLi4umb6rF1980ZBkvPnmm3bLVKpUyZBkHDp0KFOdhw4dMjp16mT4+PgYBQsWNB5++GFjyZIl2doXw8i6L6SlpRldu3Y13NzcjG+++eaWn9H1w5/v2bPHaN68ueHl5WUUL17c6Nevn23I+ayOZadPnzZcXV2NKlWq3HA72flsszqGA/cai2Hw9DQAAIB7wZ9//qmSJUtqzJgxNxytEkD2cI8UAADAPSI2NlZpaWnq3r27o0sB8jzukQIAAMjnVq5cqT179uj1119Xhw4dVL58eUeXBOR5XNoHAACQz4WHh2vDhg1q0KCBvvzyS5UuXdrRJQF5HkEKAAAAAEziHikAAAAAMIkgBQAAAAAmMdiEpPT0dJ06dUpFihSxPZAPAAAAwL3HMAxduHBBpUqVuumDtglSkk6dOqXAwEBHlwEAAADASZw4cUJlypS54XSClKQiRYpIuvZhWa1WB1cDAAAAwFGSk5MVGBhoywg3QpCSbJfzWa1WghQAAACAW97yw2ATAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTm6AKQBYvF0RXcOcNwdAUAAABAruGMFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYJLTBKk33nhDFotFQ4YMsbVdvnxZgwYNUrFixeTl5aXIyEglJCTYLXf8+HG1bdtWhQoVkp+fn1588UWlpqbe5eoBAAAA3EucIkht3rxZH330kWrVqmXXPnToUC1evFjz5s3TmjVrdOrUKXXs2NE2PS0tTW3bttWVK1e0YcMGzZw5U7GxsRozZszd3gUAAAAA9xCHB6mLFy+qW7du+vjjj1W0aFFbe1JSkj799FO9++67atq0qWrXrq2YmBht2LBBGzdulCT9+OOP2rNnj7788kuFhISodevWmjhxoj744ANduXLFUbsEAAAAIJ9zeJAaNGiQ2rZtq+bNm9u1b926VVevXrVrr1q1qsqWLau4uDhJUlxcnIKDg+Xv72+bJyIiQsnJydq9e/cNt5mSkqLk5GS7FwAAAABkl5sjNz5nzhxt27ZNmzdvzjQtPj5e7u7u8vHxsWv39/dXfHy8bZ7rQ1TG9IxpNxIdHa3x48ffYfUAAAAA7lUOOyN14sQJPf/885o1a5YKFix4V7c9atQoJSUl2V4nTpy4q9sHAAAAkLc5LEht3bpVZ86c0YMPPig3Nze5ublpzZo1ev/99+Xm5iZ/f39duXJF58+ft1suISFBAQEBkqSAgIBMo/hlvM+YJyseHh6yWq12LwAAAADILocFqWbNmmnXrl3asWOH7VWnTh1169bN9u8CBQpoxYoVtmX279+v48ePKzQ0VJIUGhqqXbt26cyZM7Z5li9fLqvVqurVq9/1fQIAAABwb3DYPVJFihRRzZo17doKFy6sYsWK2dr79OmjYcOGydfXV1arVc8++6xCQ0NVr149SVLLli1VvXp1de/eXW+99Zbi4+P16quvatCgQfLw8Ljr+wQAAADg3uDQwSZuZfLkyXJxcVFkZKRSUlIUERGhadOm2aa7urpqyZIlGjhwoEJDQ1W4cGFFRUVpwoQJDqwaAAAAQH5nMQzDcHQRjpacnCxvb28lJSU5x/1SFoujK7hzdCsAAADkQdnNBg5/jhQAAAAA5DUEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJDg1SH374oWrVqiWr1Sqr1arQ0FAtXbrUNj08PFwWi8XuNWDAALt1HD9+XG3btlWhQoXk5+enF198UampqXd7VwAAAADcQ9wcufEyZcrojTfeUOXKlWUYhmbOnKlHH31U27dvV40aNSRJ/fr104QJE2zLFCpUyPbvtLQ0tW3bVgEBAdqwYYNOnz6tHj16qECBApo0adJd3x8AAAAA9waLYRiGo4u4nq+vr95++2316dNH4eHhCgkJ0ZQpU7Kcd+nSpWrXrp1OnTolf39/SdL06dM1YsQInT17Vu7u7tnaZnJysry9vZWUlCSr1ZpTu3L7LBZHV3DnnKtbAQAAANmS3WzgNPdIpaWlac6cObp06ZJCQ0Nt7bNmzVLx4sVVs2ZNjRo1Sn///bdtWlxcnIKDg20hSpIiIiKUnJys3bt333BbKSkpSk5OtnsBAAAAQHY59NI+Sdq1a5dCQ0N1+fJleXl5adGiRapevbok6cknn1S5cuVUqlQp7dy5UyNGjND+/fu1cOFCSVJ8fLxdiJJkex8fH3/DbUZHR2v8+PG5tEcAAAAA8juHB6n77rtPO3bsUFJSkubPn6+oqCitWbNG1atX19NPP22bLzg4WCVLllSzZs106NAhVaxY8ba3OWrUKA0bNsz2Pjk5WYGBgXe0HwAAAADuHQ6/tM/d3V2VKlVS7dq1FR0drfvvv1/vvfdelvPWrVtXknTw4EFJUkBAgBISEuzmyXgfEBBww216eHjYRgrMeAEAAABAdjk8SP1benq6UlJSspy2Y8cOSVLJkiUlSaGhodq1a5fOnDljm2f58uWyWq22ywMBAAAAIKc59NK+UaNGqXXr1ipbtqwuXLig2bNna/Xq1frhhx906NAhzZ49W23atFGxYsW0c+dODR06VI0bN1atWrUkSS1btlT16tXVvXt3vfXWW4qPj9err76qQYMGycPDw5G7BgAAACAfc2iQOnPmjHr06KHTp0/L29tbtWrV0g8//KAWLVroxIkT+umnnzRlyhRdunRJgYGBioyM1Kuvvmpb3tXVVUuWLNHAgQMVGhqqwoULKyoqyu65UwAAAACQ05zuOVKOwHOkcgHdCgAAAHlQnnuOFAAAAADkFQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmuTm6AAAAss1icXQFd84wHF0BckJ+6IsS/TG/yA/9MQ/2Rc5IAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTQIPXhhx+qVq1aslqtslqtCg0N1dKlS23TL1++rEGDBqlYsWLy8vJSZGSkEhIS7NZx/PhxtW3bVoUKFZKfn59efPFFpaam3u1dAQAAAHAPcWiQKlOmjN544w1t3bpVW7ZsUdOmTfXoo49q9+7dkqShQ4dq8eLFmjdvntasWaNTp06pY8eOtuXT0tLUtm1bXblyRRs2bNDMmTMVGxurMWPGOGqXAAAAANwDLIZhGI4u4nq+vr56++231alTJ5UoUUKzZ89Wp06dJEn79u1TtWrVFBcXp3r16mnp0qVq166dTp06JX9/f0nS9OnTNWLECJ09e1bu7u7Z2mZycrK8vb2VlJQkq9Waa/uWbRaLoyu4c87VrQDkFxwf4SzyQ1+U6I/5RX7oj07UF7ObDZzmHqm0tDTNmTNHly5dUmhoqLZu3aqrV6+qefPmtnmqVq2qsmXLKi4uTpIUFxen4OBgW4iSpIiICCUnJ9vOamUlJSVFycnJdi8AAAAAyC6HB6ldu3bJy8tLHh4eGjBggBYtWqTq1asrPj5e7u7u8vHxsZvf399f8fHxkqT4+Hi7EJUxPWPajURHR8vb29v2CgwMzNmdAgAAAJCvOTxI3XfffdqxY4c2bdqkgQMHKioqSnv27MnVbY4aNUpJSUm214kTJ3J1ewAAAADyFzdHF+Du7q5KlSpJkmrXrq3Nmzfrvffe0xNPPKErV67o/PnzdmelEhISFBAQIEkKCAjQL7/8Yre+jFH9MubJioeHhzw8PHJ4TwAAAADcKxx+Rurf0tPTlZKSotq1a6tAgQJasWKFbdr+/ft1/PhxhYaGSpJCQ0O1a9cunTlzxjbP8uXLZbVaVb169bteOwAAAIB7g0PPSI0aNUqtW7dW2bJldeHCBc2ePVurV6/WDz/8IG9vb/Xp00fDhg2Tr6+vrFarnn32WYWGhqpevXqSpJYtW6p69erq3r273nrrLcXHx+vVV1/VoEGDOOMEAAAAINc4NEidOXNGPXr00OnTp+Xt7a1atWrphx9+UIsWLSRJkydPlouLiyIjI5WSkqKIiAhNmzbNtryrq6uWLFmigQMHKjQ0VIULF1ZUVJQmTJjgqF0CAAAAcA9wuudIOQLPkcoFdCsAuYHjI5xFfuiLEv0xv8gP/dGJ+mKee44UAAAAAOQVBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMcrudhQ4cOKBVq1bpzJkzSk9Pt5s2ZsyYHCkMAAAAAJyV6SD18ccfa+DAgSpevLgCAgJksVhs0ywWC0EKAAAAQL5nOki99tprev311zVixIjcqAcAAAAAnJ7pe6T++usvde7cOTdqAQAAAIA8wXSQ6ty5s3788cfcqAUAAAAA8gTTl/ZVqlRJo0eP1saNGxUcHKwCBQrYTX/uuedyrDgAAAAAcEYWwzAMMwsEBQXdeGUWiw4fPnzHRd1tycnJ8vb2VlJSkqxWq6PLka4bwCPPMtetACB7OD7CWeSHvijRH/OL/NAfnagvZjcbmD4jdeTIkTsqDAAAAADyujt6IK9hGDJ5QgsAAAAA8rzbClKff/65goOD5enpKU9PT9WqVUtffPFFTtcGAAAAAE7J9KV97777rkaPHq3BgwerQYMGkqSff/5ZAwYM0J9//qmhQ4fmeJEAAAAA4Exua7CJ8ePHq0ePHnbtM2fO1Lhx4/LkPVQMNpELuOQTQG7g+AhnkR/6okR/zC/yQ390or6Y3Wxg+tK+06dPq379+pna69evr9OnT5tdHQAAAADkOaaDVKVKlTR37txM7V9//bUqV66cI0UBAAAAgDMzfY/U+PHj9cQTT2jt2rW2e6TWr1+vFStWZBmwAAAAACC/MX1GKjIyUps2bVLx4sX1zTff6JtvvlHx4sX1yy+/6LHHHsuNGgEAAADAqZgebCI/YrCJXEC3ApAbOD7CWeSHvijRH/OL/NAfnagv5uhgE8nJyXb/vtnLjOjoaD300EMqUqSI/Pz81KFDB+3fv99unvDwcFksFrvXgAED7OY5fvy42rZtq0KFCsnPz08vvviiUlNTTdUCAAAAANmVrXukihYtqtOnT8vPz08+Pj6yZJF6DcOQxWJRWlpatje+Zs0aDRo0SA899JBSU1P18ssvq2XLltqzZ48KFy5sm69fv36aMGGC7X2hQoVs/05LS1Pbtm0VEBCgDRs26PTp0+rRo4cKFCigSZMmZbsWAAAAAMiubAWplStXytfXV5K0atWqHNv4smXL7N7HxsbKz89PW7duVePGjW3thQoVUkBAQJbr+PHHH7Vnzx799NNP8vf3V0hIiCZOnKgRI0Zo3Lhxcnd3z7RMSkqKUlJSbO/NnkkDAAAAcG/LVpAKCwuz/TsoKEiBgYGZzkoZhqETJ07cUTFJSUmSZAttGWbNmqUvv/xSAQEBat++vUaPHm07KxUXF6fg4GD5+/vb5o+IiNDAgQO1e/duPfDAA5m2Ex0drfHjx99RrQAAAADuXaaHPw8KCrJd5ne9xMREBQUFmbq073rp6ekaMmSIGjRooJo1a9ran3zySZUrV06lSpXSzp07NWLECO3fv18LFy6UJMXHx9uFKEm29/Hx8Vlua9SoURo2bJjtfXJysgIDA2+rbgAAAAD3HtNBKuNeqH+7ePGiChYseNuFDBo0SL/99pt+/vlnu/ann37a9u/g4GCVLFlSzZo106FDh1SxYsXb2paHh4c8PDxuu1YAAAAA97ZsB6mMMzgWi8Xu0jrp2oAPmzZtUkhIyG0VMXjwYC1ZskRr165VmTJlbjpv3bp1JUkHDx5UxYoVFRAQoF9++cVunoSEBEm64X1VAAAAAHAnsh2ktm/fLunaGaldu3bZDeLg7u6u+++/X8OHDze1ccMw9Oyzz2rRokVavXq1goKCbrnMjh07JEklS5aUJIWGhur111/XmTNnbJcbLl++XFarVdWrVzdVDwAAAABkh+kH8vbq1Uvvvfdejjy49plnntHs2bP17bff6r777rO1e3t7y9PTU4cOHdLs2bPVpk0bFStWTDt37tTQoUNVpkwZrVmzRtK1s2EhISEqVaqU3nrrLcXHx6t79+7q27dvtoc/54G8ucCJHqoGIB/h+AhnkR/6okR/zC/yQ390or6Y3WxgOkjlpKzutZKkmJgY9ezZUydOnNBTTz2l3377TZcuXVJgYKAee+wxvfrqq3Y7dezYMQ0cOFCrV69W4cKFFRUVpTfeeENubtk74UaQygVO9MMAIB/h+AhnkR/6okR/zC/yQ390or6Yq0Fqy5Ytmjt3ro4fP64rV67YTcsYTS8vIUjlAif6YQCQj3B8hLPID31Roj/mF/mhPzpRX8xuNnAxu+I5c+aofv362rt3rxYtWqSrV69q9+7dWrlypby9ve+oaAAAAADIC0wHqUmTJmny5MlavHix3N3d9d5772nfvn16/PHHVbZs2dyoEQAAAACciukgdejQIbVt21bStdH6Ll26JIvFoqFDh2rGjBk5XiAAAAAAOBvTQapo0aK6cOGCJKl06dL67bffJEnnz5/X33//nbPVAQAAAIATyvZzpDI0btxYy5cvV3BwsDp37qznn39eK1eu1PLly9WsWbPcqBEAAAAAnIrpIPXf//5Xly9fliS98sorKlCggDZs2KDIyEi9+uqrOV4gAAAAADgbhz5Hylkw/HkuoFsByA0cH+Es8kNflOiP+UV+6I9O1Bdzbfjzbdu2adeuXbb33377rTp06KCXX3450zOlAAAAACA/Mh2k+vfvr99//12SdPjwYT3xxBMqVKiQ5s2bp5deeinHCwQAAAAAZ2M6SP3+++8KCQmRJM2bN09hYWGaPXu2YmNjtWDBgpyuDwAAAACcjukgZRiG0tPTJUk//fST2rRpI0kKDAzUn3/+mbPVAQAAAIATMh2k6tSpo9dee01ffPGF1qxZY3s475EjR+Tv75/jBQIAAACAszEdpKZMmaJt27Zp8ODBeuWVV1SpUiVJ0vz581W/fv0cLxAAAAAAnE2ODX9++fJlubq6qkCBAjmxuruK4c9zgRMNYQkgH+H4CGeRH/qiRH/ML/JDf3SivpjdbGD6gbw3UrBgwZxaFQAAAAA4tWwFKV9fX/3+++8qXry4ihYtKstNUm9iYmKOFQcAAAAAzihbQWry5MkqUqSIpGv3SAEAAADAvSzH7pHKy7hHKhfQrQDkBo6PcBb5oS9K9Mf8Ij/0Ryfqizl6j1RycnK2N+wUQQQAAAAAclG2gpSPj89N74uSrj2o12KxKC0tLUcKAwAAAABnla0gtWrVqtyuAwAAAADyjGwFqbCwsNyuAwAAAADyjGwFqZ07d6pmzZpycXHRzp07bzpvrVq1cqQwAAAAAHBW2QpSISEhio+Pl5+fn0JCQmSxWJTVYH/cIwUAAADgXpCtIHXkyBGVKFHC9m8AAAAAuJdlK0iVK1fO9u9jx46pfv36cnOzXzQ1NVUbNmywmxcAAAAA8iMXsws0adJEiYmJmdqTkpLUpEmTHCkKAAAAAJyZ6SCV8byofzt37pwKFy6cI0UBAAAAgDPL1qV9ktSxY0dJ1waU6Nmzpzw8PGzT0tLStHPnTtWvXz/nKwQAAAAAJ5PtIOXt7S3p2hmpIkWKyNPT0zbN3d1d9erVU79+/XK+QgAAAABwMtkOUjExMZKk8uXLa/jw4VzGBwAAAOCeZTGyeiDUPSY5OVne3t5KSkqS1Wp1dDlSFveg5Tl0KwC5geMjnEV+6IsS/TG/yA/90Yn6YnazQbYHmyhatKh8fX0zvYKCghQREaHly5ebLjI6OloPPfSQihQpIj8/P3Xo0EH79++3m+fy5csaNGiQihUrJi8vL0VGRiohIcFunuPHj6tt27YqVKiQ/Pz89OKLLyo1NdV0PQAAAACQHdm+tG/KlClZtp8/f15bt25Vu3btNH/+fLVv3z7bG1+zZo0GDRqkhx56SKmpqXr55ZfVsmVL7dmzx3bp4NChQ/W///1P8+bNk7e3twYPHqyOHTtq/fr1kq4NdNG2bVsFBARow4YNOn36tHr06KECBQpo0qRJ2a4FAAAAALIrxy7te/fddzV//nxt2LDhttdx9uxZ+fn5ac2aNWrcuLGSkpJUokQJzZ49W506dZIk7du3T9WqVVNcXJzq1aunpUuXql27djp16pT8/f0lSdOnT9eIESN09uxZubu733K7XNqXC5zo9CyAfITjI5xFfuiLEv0xv8gP/dGJ+mKOX9p3K+3atdO+ffvuaB1JSUmSJF9fX0nS1q1bdfXqVTVv3tw2T9WqVVW2bFnFxcVJkuLi4hQcHGwLUZIUERGh5ORk7d69O8vtpKSkKDk52e4FAAAAANmVY0EqJSUlW2d/biQ9PV1DhgxRgwYNVLNmTUlSfHy83N3d5ePjYzevv7+/4uPjbfNcH6IypmdMy0p0dLS8vb1tr8DAwNuuGwAAAMC9J8eC1KeffqqQkJDbXn7QoEH67bffNGfOnJwq6YZGjRqlpKQk2+vEiRO5vk0AAAAA+Ue2B5sYNmxYlu1JSUnatm2bfv/9d61du/a2ihg8eLCWLFmitWvXqkyZMrb2gIAAXblyRefPn7c7K5WQkKCAgADbPL/88ovd+jJG9cuY5988PDzk4eFxW7UCAAAAQLaD1Pbt27Nst1qtatGihRYuXKigoCBTGzcMQ88++6wWLVqk1atXZ1q+du3aKlCggFasWKHIyEhJ0v79+3X8+HGFhoZKkkJDQ/X666/rzJkz8vPzkyQtX75cVqtV1atXN1UPAAAAAGSHQx/I+8wzz2j27Nn69ttvdd9999navb295enpKUkaOHCgvv/+e8XGxspqterZZ5+VJNvogGlpaQoJCVGpUqX01ltvKT4+Xt27d1ffvn2zPfw5o/blAicaeQVAPsLxEc4iP/RFif6YX+SH/uhEfTG72cChQcpygy89JiZGPXv2lHTtgbwvvPCCvvrqK6WkpCgiIkLTpk2zu2zv2LFjGjhwoFavXq3ChQsrKipKb7zxhtzcsnfCjSCVC5zohwFAPsLxEc4iP/RFif6YX+SH/uhEfTFPBClnQZDKBXQrALmB4yOcRX7oixL9Mb/ID/3RifriXX+OFAAAAADcKwhSAAAAAGBStoNU7969deHChdysBQAAAADyhGwHqZkzZ+qff/7JzVoAAAAAIE/IdpBiTAoAAAAAuCbbD+SVpAsXLqhgwYI3nccpRr0DAAAAgFxkKkhVqVLlhtMMw5DFYlFaWtodFwUAAAAAzsxUkJo/f758fX1zqxYAAAAAyBNMBakGDRrIz88vt2oBAAAAgDyB50gBAAAAgEnZDlLlypWTq6trbtYCAAAAAHlCti/tO3LkSG7WAQAAAAB5RraDVNGiRWWxWDK1e3t7q0qVKho+fLhatGiRo8UBAAAAgDPKdpCaPHlylkHq/Pnz2rp1q9q1a6f58+erffv2OVogAAAAADibbAepnj173nR6SEiIoqOjCVIAAAAA8r0cG7WvXbt22rdvX06tDgAAAACcVo4FqZSUFLm7u+fU6gAAAADAaeVYkPr0008VEhKSU6sDAAAAAKeV7Xukhg0blmV7UlKStm3bpt9//11r167NscIAAAAAwFllO0ht3749y3ar1aoWLVpo4cKFCgoKyrHCAAAAAMBZZTtIrVq16qbTT548qaefflozZsy446IAAAAAwJnl2D1S586d06effppTqwMAAAAAp5VjQQoAAAAA7hUEKQAAAAAwiSAFAAAAACZle7CJjh073nT6+fPn77QWAAAAAMgTsh2kvL29bzm9R48ed1wQAAAAADi7bAepmJiY3KwDAAAAAPIM7pECAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADDJoUFq7dq1at++vUqVKiWLxaJvvvnGbnrPnj1lsVjsXq1atbKbJzExUd26dZPVapWPj4/69Omjixcv3sW9AAAAAHCvcWiQunTpku6//3598MEHN5ynVatWOn36tO311Vdf2U3v1q2bdu/ereXLl2vJkiVau3atnn766dwuHQAAAMA9LNvPkcoNrVu3VuvWrW86j4eHhwICArKctnfvXi1btkybN29WnTp1JElTp05VmzZt9J///EelSpXK8ZoBAAAAwOnvkVq9erX8/Px03333aeDAgTp37pxtWlxcnHx8fGwhSpKaN28uFxcXbdq06YbrTElJUXJyst0LAAAAALLLqYNUq1at9Pnnn2vFihV68803tWbNGrVu3VppaWmSpPj4ePn5+dkt4+bmJl9fX8XHx99wvdHR0fL29ra9AgMDc3U/AAAAAOQvDr2071a6dOli+3dwcLBq1aqlihUravXq1WrWrNltr3fUqFEaNmyY7X1ycjJhCgAAAEC2OfUZqX+rUKGCihcvroMHD0qSAgICdObMGbt5UlNTlZiYeMP7qqRr911ZrVa7FwAAAABkV54KUidPntS5c+dUsmRJSVJoaKjOnz+vrVu32uZZuXKl0tPTVbduXUeVCQAAACCfc+ilfRcvXrSdXZKkI0eOaMeOHfL19ZWvr6/Gjx+vyMhIBQQE6NChQ3rppZdUqVIlRURESJKqVaumVq1aqV+/fpo+fbquXr2qwYMHq0uXLozYBwAAACDXWAzDMBy18dWrV6tJkyaZ2qOiovThhx+qQ4cO2r59u86fP69SpUqpZcuWmjhxovz9/W3zJiYmavDgwVq8eLFcXFwUGRmp999/X15eXtmuIzk5Wd7e3kpKSnKOy/wsFkdXcOcc160A5GccH+Es8kNflOiP+UV+6I9O1Bezmw0cGqScBUEqF9CtAOQGjo9wFvmhL0r0x/wiP/RHJ+qL2c0GeeoeKQAAAABwBgQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkODVJr165V+/btVapUKVksFn3zzTd20w3D0JgxY1SyZEl5enqqefPmOnDggN08iYmJ6tatm6xWq3x8fNSnTx9dvHjxLu4FAAAAgHuNQ4PUpUuXdP/99+uDDz7Icvpbb72l999/X9OnT9emTZtUuHBhRURE6PLly7Z5unXrpt27d2v58uVasmSJ1q5dq6effvpu7QIAAACAe5DFMAzD0UVIksVi0aJFi9ShQwdJ185GlSpVSi+88IKGDx8uSUpKSpK/v79iY2PVpUsX7d27V9WrV9fmzZtVp04dSdKyZcvUpk0bnTx5UqVKlcrWtpOTk+Xt7a2kpCRZrdZc2T9TLBZHV3DnnKNbAchvOD7CWeSHvijRH/OL/NAfnagvZjcbOO09UkeOHFF8fLyaN29ua/P29lbdunUVFxcnSYqLi5OPj48tRElS8+bN5eLiok2bNt1w3SkpKUpOTrZ7AQAAAEB2OW2Qio+PlyT5+/vbtfv7+9umxcfHy8/Pz266m5ubfH19bfNkJTo6Wt7e3rZXYGBgDlcPAAAAID9z2iCVm0aNGqWkpCTb68SJE44uCQAAAEAe4rRBKiAgQJKUkJBg156QkGCbFhAQoDNnzthNT01NVWJiom2erHh4eMhqtdq9AAAAACC7nDZIBQUFKSAgQCtWrLC1JScna9OmTQoNDZUkhYaG6vz589q6dattnpUrVyo9PV1169a96zUDAAAAuDe4OXLjFy9e1MGDB23vjxw5oh07dsjX11dly5bVkCFD9Nprr6ly5coKCgrS6NGjVapUKdvIftWqVVOrVq3Ur18/TZ8+XVevXtXgwYPVpUuXbI/YBwAAAABmOTRIbdmyRU2aNLG9HzZsmCQpKipKsbGxeumll3Tp0iU9/fTTOn/+vBo2bKhly5apYMGCtmVmzZqlwYMHq1mzZnJxcVFkZKTef//9u74vAAAAAO4dTvMcKUfiOVK5gG4FIDdwfISzyA99UaI/5hf5oT86UV/M88+RAgAAAABnRZACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTUQWrcuHGyWCx2r6pVq9qmX758WYMGDVKxYsXk5eWlyMhIJSQkOLBiAAAAAPcCpw5SklSjRg2dPn3a9vr5559t04YOHarFixdr3rx5WrNmjU6dOqWOHTs6sFoAAAAA9wI3RxdwK25ubgoICMjUnpSUpE8//VSzZ89W06ZNJUkxMTGqVq2aNm7cqHr16t3tUgEAAADcI5z+jNSBAwdUqlQpVahQQd26ddPx48clSVu3btXVq1fVvHlz27xVq1ZV2bJlFRcXd9N1pqSkKDk52e4FAAAAANnl1EGqbt26io2N1bJly/Thhx/qyJEjatSokS5cuKD4+Hi5u7vLx8fHbhl/f3/Fx8ffdL3R0dHy9va2vQIDA3NxLwAAAADkN059aV/r1q1t/65Vq5bq1q2rcuXKae7cufL09Lzt9Y4aNUrDhg2zvU9OTiZMAQAAAMg2pz4j9W8+Pj6qUqWKDh48qICAAF25ckXnz5+3mychISHLe6qu5+HhIavVavcCAAAAgOzKU0Hq4sWLOnTokEqWLKnatWurQIECWrFihW36/v37dfz4cYWGhjqwSgAAAAD5nVNf2jd8+HC1b99e5cqV06lTpzR27Fi5urqqa9eu8vb2Vp8+fTRs2DD5+vrKarXq2WefVWhoKCP2AQAAAMhVTh2kTp48qa5du+rcuXMqUaKEGjZsqI0bN6pEiRKSpMmTJ8vFxUWRkZFKSUlRRESEpk2b5uCqAQAAAOR3FsMwDEcX4WjJycny9vZWUlKSc9wvZbE4uoI7R7cCkBs4PsJZ5Ie+KNEf84v80B+dqC9mNxvkqXukAAAAAMAZEKQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJuWbIPXBBx+ofPnyKliwoOrWratffvnF0SUBAAAAyKfyRZD6+uuvNWzYMI0dO1bbtm3T/fffr4iICJ05c8bRpQF5n8WS918AAAA5LF8EqXfffVf9+vVTr169VL16dU2fPl2FChXSZ5995ujSAAAAAORDbo4u4E5duXJFW7du1ahRo2xtLi4uat68ueLi4rJcJiUlRSkpKbb3SUlJkqTk5OTcLfZewmcJZ0J/hDOhP8KZ0B/hLJyoL2ZkAsMwbjpfng9Sf/75p9LS0uTv72/X7u/vr3379mW5THR0tMaPH5+pPTAwMFdqvCd5ezu6AuD/0B/hTOiPcCb0RzgLJ+yLFy5ckPdN6srzQep2jBo1SsOGDbO9T09PV2JioooVKyZLPr+fIjk5WYGBgTpx4oSsVqujy8E9jv4IZ0J/hDOhP8KZ3Gv90TAMXbhwQaVKlbrpfHk+SBUvXlyurq5KSEiwa09ISFBAQECWy3h4eMjDw8OuzcfHJ7dKdEpWq/We+EFA3kB/hDOhP8KZ0B/hTO6l/nizM1EZ8vxgE+7u7qpdu7ZWrFhha0tPT9eKFSsUGhrqwMoAAAAA5Fd5/oyUJA0bNkxRUVGqU6eOHn74YU2ZMkWXLl1Sr169HF0aAAAAgHwoXwSpJ554QmfPntWYMWMUHx+vkJAQLVu2LNMAFLh2WePYsWMzXdoIOAL9Ec6E/ghnQn+EM6E/Zs1i3GpcPwAAAACAnTx/jxQAAAAA3G0EKQAAAAAwiSAFAAAAACYRpAAAAADAJIJUHtKzZ09ZLBYNGDAg07RBgwbJYrGoZ8+eat++vVq1apXlOtatWyeLxaKdO3dq9erVslgsOn/+fKb5ypcvrylTptjeJyYmqlu3brJarfLx8VGfPn108eLFnNo1OLmMvmexWFSgQAH5+/urRYsW+uyzz5Seni5J6tKlS6Z+t2zZMlksFo0bN86ufdy4cSpbtqxd24IFCxQeHi5vb295eXmpVq1amjBhghITE7NV4+rVq/Xggw/Kw8NDlSpVUmxs7G3vL/IWRx4bX3/9ddWvX1+FChW65x7sjqw5qj8ePXpUffr0UVBQkDw9PVWxYkWNHTtWV65cycndQx7jyOPjI488orJly6pgwYIqWbKkunfvrlOnTuXUrjkFglQeExgYqDlz5uiff/6xtV2+fFmzZ8+2/WLap08fLV++XCdPnsy0fExMjOrUqaNatWqZ2m63bt20e/duLV++XEuWLNHatWv19NNP39nOIE9p1aqVTp8+raNHj2rp0qVq0qSJnn/+ebVr106pqalq0qSJ1q9fr9TUVNsyq1atUmBgoFavXm23rlWrVqlJkya296+88oqeeOIJPfTQQ1q6dKl+++03vfPOO/r111/1xRdf3LK2I0eOqG3btmrSpIl27NihIUOGqG/fvvrhhx9ybP/h3Bx1bLxy5Yo6d+6sgQMH3tkOIF9xRH/ct2+f0tPT9dFHH2n37t2aPHmypk+frpdffvnOdwh5mqOOj02aNNHcuXO1f/9+LViwQIcOHVKnTp3ubGecDEEqj3nwwQcVGBiohQsX2toWLlyosmXL6oEHHpAktWvXTiVKlMj0F/mLFy9q3rx56tOnj6lt7t27V8uWLdMnn3yiunXrqmHDhpo6darmzJmT7/6ygBvz8PBQQECASpcurQcffFAvv/yyvv32Wy1dulSxsbFq0qSJLl68qC1bttiWWb16tUaOHKlNmzbp8uXLkq4dvDdt2mQLUr/88osmTZqkd955R2+//bbq16+v8uXLq0WLFlqwYIGioqJuWdv06dMVFBSkd955R9WqVdPgwYPVqVMnTZ48OXc+DDgdRxwbJWn8+PEaOnSogoOD76h+5C+O6I+tWrVSTEyMWrZsqQoVKuiRRx7R8OHD7WrAvclRx8ehQ4eqXr16KleunOrXr6+RI0dq48aNunr16h3tjzMhSOVBvXv3VkxMjO39Z599pl69etneu7m5qUePHoqNjdX1jwmbN2+e0tLS1LVrV1Pbi4uLk4+Pj+rUqWNra968uVxcXLRp06Y72BPkdU2bNtX999+vhQsXqkqVKipVqpRWrVolSbpw4YK2bdumzp07q3z58oqLi5MkbdiwQSkpKbYgNWvWLHl5eemZZ57JchvZuVwqLi5OzZs3t2uLiIiwbRP3hrt9bARuxhn6Y1JSknx9fe94Pcj7HN0fExMTNWvWLNWvX18FChS4o3U5E4JUHvTUU0/p559/1rFjx3Ts2DGtX79eTz31lN08vXv31qFDh7RmzRpbW0xMjCIjI+Xt7W1qe/Hx8fLz87Nrc3Nzk6+vr+Lj429/R5AvVK1aVUePHpV07TR+xmV869atU5UqVVSiRAk1btzY1r569WoFBQWpXLlykqQDBw6oQoUKd3RgjY+Pl7+/v12bv7+/kpOT7S5lQP52t4+NwM04uj8ePHhQU6dOVf/+/e9oPcgfHNUfR4wYocKFC6tYsWI6fvy4vv322zvaD2dDkMqDSpQoobZt2yo2NlYxMTFq27atihcvbjdP1apVVb9+fX322WeSrh1Q161bd1unZoGbMQxDFotFkhQeHq7169fr6tWrWr16tcLDwyVJYWFhdkHq+vujrv/LF3AnODbCmTiyP/7xxx9q1aqVOnfurH79+t3RupA/OKo/vvjii9q+fbt+/PFHubq6qkePHvnq//sEqTyqd+/eio2N1cyZM9W7d+8s5+nTp48WLFigCxcuKCYmRhUrVlRYWJhtutVqlXTt1P+/nT9/3vbXh4CAAJ05c8ZuempqqhITExUQEJBTu4Q8au/evQoKCpJ07YzUpUuXtHnzZq1atcrW38LCwrRp0yYlJiZq06ZNatq0qW35KlWq6PDhw3d0zXRAQIASEhLs2hISEmS1WuXp6Xnb60XeczePjcCtOKI/njp1Sk2aNFH9+vU1Y8aMHNwb5HWO6I/FixdXlSpV1KJFC82ZM0fff/+9Nm7cmIN75VgEqTyqVatWunLliq5evaqIiIgs53n88cfl4uKi2bNn6/PPP1fv3r1tZw4kqXLlynJxcdHWrVvtljt8+LCSkpJUpUoVSVJoaKjOnz9vN9/KlSuVnp6uunXr5sLeIa9YuXKldu3apcjISElSxYoVFRgYqO+++047duywHXxLly6t0qVL65133tGVK1fszkg9+eSTunjxoqZNm5blNrIaYvXfQkNDtWLFCru25cuXKzQ09Db3DHnV3Tw2Ardyt/vjH3/8ofDwcNWuXVsxMTFyceHXPPwfRx8fMx6XkpKSkgN74xzcHF0Abo+rq6v27t1r+3dWvLy89MQTT2jUqFFKTk5Wz5497aYXKVJEffv21QsvvCA3NzcFBwfrxIkTGjFihOrVq6f69etLkqpVq6ZWrVqpX79+mj59uq5evarBgwerS5cuKlWqVK7uJ5xHSkqK4uPjlZaWpoSEBC1btkzR0dFq166devToYZuvSZMmmjZtmipVqmR331JYWJimTp1qG5QiQ926dfXSSy/phRde0B9//KHHHntMpUqV0sGDBzV9+nQ1bNhQzz///E1rGzBggP773//qpZdeUu/evbVy5UrNnTtX//vf/3L+g4BTu5vHRkk6fvy4EhMTdfz4caWlpWnHjh2SpEqVKsnLyytX9hF5x93sjxkhqly5cvrPf/6js2fP2tbB1SOQ7m5/3LRpkzZv3qyGDRuqaNGiOnTokEaPHq2KFSvmrz9yGsgzoqKijEcfffSG0x999FEjKirKrm3Dhg2GJKNNmzZZLvPPP/8YY8eONapWrWp4enoaQUFBxtNPP22cPXvWbr5z584ZXbt2Nby8vAyr1Wr06tXLuHDhwp3uEvKIqKgoQ5IhyXBzczNKlChhNG/e3Pjss8+MtLQ0u3ljYmIMScaAAQPs2mNjYw1JRv/+/bPcxtdff200btzYKFKkiFG4cGGjVq1axoQJE4y//vorWzWuWrXKCAkJMdzd3Y0KFSoYMTExt7OryIMceWy8/mfj+teqVavucK+QVzmqP2Yce7N64d7lqP64c+dOo0mTJoavr6/h4eFhlC9f3hgwYIBx8uTJnNgtp2ExjHx0xxcAAAAA3AVcPAsAAAAAJhGkADi9GjVqyMvLK8vXrFmzHF0eAAC4B3FpHwCnd+zYsRsOj+7v768iRYrc5YoAAMC9jiAFAAAAACZxaR8AAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAO5p4eHhGjJkiKPLAADkMQQpAMBt6dmzpywWi9544w279m+++UYWi8XUusqXL68pU6bkYHW55+jRo7JYLNqxY4ejSwEAOBBBCgBw2woWLKg333xTf/31l6NLMe3KlSuOLiFH3ehZawCA3EGQAgDctubNmysgIEDR0dE3ne/nn39Wo0aN5OnpqcDAQD333HO6dOmSpGuX1h07dkxDhw6VxWKRxWKRYRgqUaKE5s+fb1tHSEiISpYsabdODw8P/f3335Kk48eP69FHH5WXl5esVqsef/xxJSQk2OYfN26cQkJC9MknnygoKEgFCxbMstb//e9/8vb21qxZs27rMzl06JAeffRR+fv7y8vLSw899JB++ukn2/QJEyaoZs2amZYLCQnR6NGjbe8/+eQTVatWTQULFlTVqlU1bdo027SMs2Jff/21wsLCVLBgQc2aNUvHjh1T+/btVbRoURUuXFg1atTQ999/f1v7AQC4OYIUAOC2ubq6atKkSZo6dapOnjyZ5TyHDh1Sq1atFBkZqZ07d+rrr7/Wzz//rMGDB0uSFi5cqDJlymjChAk6ffq0Tp8+LYvFosaNG2v16tWSpL/++kt79+7VP//8o3379kmS1qxZo4ceekiFChVSenq6Hn30USUmJmrNmjVavny5Dh8+rCeeeMKuloMHD2rBggVauHBhlpfmzZ49W127dtWsWbPUrVu32/pMLl68qDZt2mjFihXavn27WrVqpfbt2+v48eOSpN69e2vv3r3avHmzbZnt27dr586d6tWrlyRp1qxZGjNmjF5//XXt3btXkyZN0ujRozVz5ky7bY0cOVLPP/+89u7dq4iICA0aNEgpKSlau3atdu3apTfffFNeXl63tR8AgJtzc3QBAIC87bHHHlNISIjGjh2rTz/9NNP06OhodevWzTagQ+XKlfX+++8rLCxMH374oXx9feXq6qoiRYooICDAtlx4eLg++ugjSdLatWv1wAMPKCAgQKtXr1bVqlW1evVqhYWFSZJWrFihXbt26ciRIwoMDJQkff7556pRo4Y2b96shx56SNK1y/k+//xzlShRIlOdH3zwgV555RUtXrzYtt7bcf/99+v++++3vZ84caIWLVqk7777ToMHD1aZMmUUERGhmJgYW10xMTEKCwtThQoVJEljx47VO++8o44dO0qSgoKCtGfPHn300UeKioqyrXvIkCG2eaRrZ+UiIyMVHBwsSbb1AQByHmekAAB37M0339TMmTO1d+/eTNN+/fVXxcbGysvLy/aKiIhQenq6jhw5csN1hoWFac+ePTp79qzWrFmj8PBwhYeHa/Xq1bp69ao2bNig8PBwSdLevXsVGBhoC1GSVL16dfn4+NjVVK5cuSxD1Pz58zV06FAtX778jkKUdO2M1PDhw1WtWjX5+PjIy8tLe/futZ2RkqR+/frpq6++0uXLl3XlyhXNnj1bvXv3liRdunRJhw4dUp8+few+s9dee02HDh2y21adOnXs3j/33HN67bXX1KBBA40dO1Y7d+68o30BANwYQQoAcMcaN26siIgIjRo1KtO0ixcvqn///tqxY4ft9euvv+rAgQOqWLHiDdcZHBwsX19frVmzxi5IrVmzRps3b9bVq1dVv359U3UWLlw4y/YHHnhAJUqU0GeffSbDMEyt89+GDx+uRYsWadKkSVq3bp127Nih4OBgu8Et2rdvLw8PDy1atEiLFy/W1atX1alTJ0nXPi9J+vjjj+0+s99++00bN2686f707dtXhw8fVvfu3bVr1y7VqVNHU6dOvaP9AQBkjUv7AAA54o033lBISIjuu+8+u/YHH3xQe/bsUaVKlW64rLu7u9LS0uzaLBaLGjVqpG+//Va7d+9Ww4YNVahQIaWkpOijjz5SnTp1bEGiWrVqOnHihE6cOGE7K7Vnzx6dP39e1atXv2XtFStW1DvvvKPw8HC5urrqv//9r9ndt1m/fr169uypxx57TNK1YHT06FG7edzc3BQVFaWYmBi5u7urS5cu8vT0lCT5+/urVKlSOnz48G3dpxUYGKgBAwZowIABGjVqlD7++GM9++yzt70/AICsEaQAADkiODhY3bp10/vvv2/XPmLECNWrV0+DBw9W3759VbhwYe3Zs0fLly+3BZby5ctr7dq16tKlizw8PFS8eHFJ1+6TeuGFF1SnTh3boAmNGzfWrFmz9OKLL9q20bx5c9v2p0yZotTUVD3zzDMKCwvLdPnbjVSpUkWrVq1SeHi43Nzcbvlcq/3792dqq1GjhipXrqyFCxeqffv2slgsGj16tNLT0zPN27dvX1WrVk3StfB1vfHjx+u5556Tt7e3WrVqpZSUFG3ZskV//fWXhg0bdsOahgwZotatW6tKlSr666+/tGrVKts2AAA5i0v7AAA5ZsKECZlCQ61atbRmzRr9/vvvatSokR544AGNGTNGpUqVslvu6NGjqlixot09TGFhYUpLS7PdCyVdC1f/brNYLPr2229VtGhRNW7cWM2bN1eFChX09ddfm6r/vvvu08qVK/XVV1/phRdeuOm8Xbp00QMPPGD3SkhI0LvvvquiRYuqfv36at++vSIiIvTggw9mWr5y5cqqX7++qlatqrp169pN69u3rz755BPFxMQoODhYYWFhio2NVVBQ0E1rSktL06BBg1StWjW1atVKVapUsRs2HQCQcyzGnV4MDgAATDMMQ5UrV9Yzzzxz07NMAADnxKV9AADcZWfPntWcOXMUHx9ve3YUACBvIUgBAHCX+fn5qXjx4poxY4aKFi3q6HIAALeBIAUAwF3GVfUAkPcx2AQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADApP8H3ngi6RaSud8AAAAASUVORK5CYII=", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "# Additional Information : Constraints table\n", + "# Extracting LUTs from res_dict\n", + "LUTs_dwc = [res_dict_dwc[key][\"LUT\"] for key in res_dict_dwc.keys()] \n", "\n", - "The below table exposes the constraints associated with each layer. A developer working with these layers has to be mindful of not violating them when setting the PE & SIMD values manually." + "#Plotting the bar graph of each network layer with their corresponding LUT resource utilization\n", + "fig = plt.figure(figsize = (10, 5))\n", + "plt.bar(layers, LUTs_dwc, color ='red', width = 0.3)\n", + "plt.xlabel(\"Network Layers\")\n", + "plt.ylabel(\"LUT Utilisation\")\n", + "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.show()" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "+------------------------------------+------------+----------------------------------------------------------------+\n", - "| Layers | Attributes | Assertions |\n", - "+====================================+============+================================================================+\n", - "| addstreams_batch | PE | inp_channels % PE == 0 |\n", - "| channelwise_op_batch | PE | channels % PE == 0 |\n", - "| checksum | ~ | ~ |\n", - "| concat | ~ | ~ |\n", - "| convolutioninputgenerator | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| convolutioninputgenerator1d | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| convolutioninputgenerator_rtl | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| downsampler | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| duplicatestreams_batch | PE | channels % PE == 0 |\n", - "| eltwise | PE | inp_channels % PE == 0 |\n", - "| fmpadding_batch | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| fmpadding_rtl | SIMD | inp_feature_map_channels % SIMD == 0 |\n", - "| globalaccpool_batch | PE | channels % PE == 0 |\n", - "| hlscustomop | ~ | ~ |\n", - "| iodma | ~ | ~ |\n", - "| labelselect_batch | PE | num_labels % PE == 0 |\n", - "| lookup | ~ | ~ |\n", - "| matrixvectoractivation | PE & SIMD | matrix_height % PE == 0 & matrix_width % SIMD == 0 |\n", - "| pool_batch | PE | input_feature_map_channels % PE == 0 |\n", - "| streamingdataflowpartition | ~ | ~ |\n", - "| streamingdatawidthconverter_batch | ~ | ~ |\n", - "| streamingfifo | ~ | ~ |\n", - "| streamingmaxpool_batch | ~ | ~ |\n", - "| templates | ~ | ~ |\n", - "| thresholding_batch | PE | matrix_height % PE == 0 |\n", - "| tlastmarker | ~ | ~ |\n", - "| upsampler | ~ | ~ |\n", - "| vectorvectoractivation | PE & SIMD | kernel_height * kernel_width % SIMD == 0 & channels % PE == 0 |\n", - "+------------------------------------+------------+----------------------------------------------------------------+" + "In the case of our example network, the `StreamingDataWidthConverter_Batch` layer does not consume a large number of LUT resources as shown in the graph. This might be different for larger models and if there are a higher number of DWCs inserted. Please be aware of this when setting the folding factors for your network." ] } ], From 9bfc9b4fb1aca8ed3ea600fac4f354673011e3cc Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 27 Jun 2023 11:22:06 +0100 Subject: [PATCH 521/628] [docs] Update table for folding factor constraints --- docs/finn/internals.rst | 82 +++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 31 deletions(-) diff --git a/docs/finn/internals.rst b/docs/finn/internals.rst index 9c1ff626b2..652c94ac24 100644 --- a/docs/finn/internals.rst +++ b/docs/finn/internals.rst @@ -211,37 +211,57 @@ When the nodes in the network are converted to HLS layers, the *mem_mode* can be Constraints to folding factors per layer ========================================= -+------------------------------------+------------+----------------------------------------------------------------+ -| Layers | Attributes | Assertions | -+====================================+============+================================================================+ -| addstreams_batch | PE | inp_channels % PE == 0 | -| channelwise_op_batch | PE | channels % PE == 0 | -| checksum | - | - | -| concat | - | - | -| convolutioninputgenerator | SIMD | inp_channels % SIMD == 0 | -| convolutioninputgenerator1d | SIMD | inp_channels % SIMD == 0 | -| convolutioninputgenerator_rtl | SIMD | inp_channels % SIMD == 0 | -| downsampler | SIMD | inp_channels % SIMD == 0 | -| duplicatestreams_batch | PE | channels % PE == 0 | -| eltwise | PE | inp_channels % PE == 0 | -| fmpadding_batch | SIMD | inp_channels % SIMD == 0 | -| fmpadding_rtl | SIMD | inp_channels % SIMD == 0 | -| globalaccpool_batch | PE | channels % PE == 0 | -| iodma | - | - | -| labelselect_batch | PE | num_labels % PE == 0 | -| lookup | - | - | -| matrixvectoractivation | PE & SIMD | matrix_height % PE == 0 & matrix_width % SIMD == 0 | -| pool_batch | PE | inp_channels % PE == 0 | -| streamingdataflowpartition | - | - | -| streamingdatawidthconverter_batch | - | - | -| streamingfifo | - | - | -| streamingmaxpool_batch | - | - | -| templates | - | - | -| thresholding_batch | PE | matrix_height % PE == 0 | -| tlastmarker | - | - | -| upsampler | - | - | -| vectorvectoractivation | PE & SIMD | kernel_height * kernel_width % SIMD == 0 & channels % PE == 0 | -+------------------------------------+------------+----------------------------------------------------------------+ + +.. list-table:: Folding factor constraints + + * - **Layers** + - **Parameters** + - **Constraints** + * - Addstreams_Batch + - PE + - inp_channels % PE == 0 + * - ChannelwiseOp_Batch + - PE + - channels % PE == 0 + * - ConvolutionInputGenerator + - SIMD + - inp_channels % SIMD == 0 + * - ConvolutionInputGenerator1d + - SIMD + - inp_channels % SIMD == 0 + * - Downsampler + - SIMD + - inp_channels % SIMD == 0 + * - DuplicateStreams_Batch + - PE + - channels % PE == 0 + * - Eltwise + - PE + - inp_channels % PE == 0 + * - FMPadding_batch + - SIMD + - inp_channels % SIMD == 0 + * - FMPadding_rtl + - SIMD + - inp_channels % SIMD == 0 + * - Globalaccpool_Batch + - PE + - channels % PE == 0 + * - Labelselect_Batch + - PE + - num_labels % PE == 0 + * - MatrixVectorActivation + - PE & SIMD + - MH % PE == 0 & MW % SIMD == 0 + * - Pool_Batch + - PE + - inp_channels % PE == 0 + * - Thresholding_Batch + - PE + - MH % PE == 0 + * - VectorVectorActivation + - PE & SIMD + - k_h * k_w % SIMD == 0 & channels % PE == 0 RTL ConvolutionInputGenerator From 75eda8537b52bf452f9fc800754f5ac32ac4973e Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 27 Jun 2023 11:50:50 +0100 Subject: [PATCH 522/628] [notebooks] Clean-up folding nb and add onnx file --- notebooks/advanced/3_folding.ipynb | 571 ++++--------------------- notebooks/advanced/cybsec_PE_SIMD.onnx | Bin 0 -> 192234 bytes 2 files changed, 74 insertions(+), 497 deletions(-) create mode 100644 notebooks/advanced/cybsec_PE_SIMD.onnx diff --git a/notebooks/advanced/3_folding.ipynb b/notebooks/advanced/3_folding.ipynb index a411d3bc88..1eb99206e2 100644 --- a/notebooks/advanced/3_folding.ipynb +++ b/notebooks/advanced/3_folding.ipynb @@ -6,18 +6,20 @@ "source": [ "# FINN - Folding\n", "--------------------------------------\n", - "**Note: To run this notebook, you first need to run the build flow in the 3rd cybersecurity notebook as we utilize one of the intermediate models generated in that process in this notebook.** \n", + "**Note: We will utilize one of the intermediate models generated in the process of the cybersecurity end2end example**\n", "\n", - "This notebook describes the use of FINN parallelization parameters (PE & SIMD) to efficiently streamline models so as to extract the maximum performance out of them. \n", + "There is a local copy of `step_convert_to_hls.onnx` in this directory, which was renamed to `cybsec_PE_SIMD.onnx` to be able to go through this tutorial without requisites. But you can also generate it yourself with the [third cybersecurity Jupyter notebook](../end2end_example/cybersecurity/3-build-accelerator-with-finn.ipynb). After the execution of the estimates only build flow, it can be found in `../end2end_example/cybersecurity/output_estimates_only/intermediate_models/step_convert_to_hls.onnx`. \n", "\n", - "Please be aware that the folding factors can not be selected arbitrarily, each layer has constraints on which values the parallelization parameters can be set to, for more information see here: https://finn-dev.readthedocs.io/en/latest/internals.html#folding-factors\n", + "This notebook describes the use of FINN parallelization parameters (PE & SIMD), also called folding factors, to efficiently optimize models so as to extract the maximum performance out of them. \n", + "\n", + "Please be aware that the folding factors can not be selected arbitrarily, each layer has constraints on which values the parallelization parameters can be set to, for more information see here: https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer\n", "\n", "We'll use the utility function `showInNetron()` to visualize and interact with our network in the Jupyter Notebook and `showSrc()` to show source code of FINN library calls." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -39,7 +41,7 @@ "source": [ "This notebook shows the manual version of this step and explains how these attributes can improve performance and what are their effects on resource utilization for developers who need to maximize the performance of their network. \n", "\n", - "For that we will use the `step_convert_to_hls.onnx` file as starting point. This intermediate model from the cybersecurity example is the model representation after the high-level ONNX layers are converted to HLS layers. Each node in the graph now corresponds to an HLS C++ function call and the parallelization parameters can be set using the node attributes.\n", + "For that we will use the `cybsec_PE_SIMD.onnx` file as starting point. This intermediate model from the cybersecurity example is the model representation after the high-level ONNX layers are converted to HLS layers. Each node in the graph now corresponds to an HLS C++ function call and the parallelization parameters can be set using the node attributes.\n", "\n", "We will take this model to show how to set the folding factors manually and analyze the estimated execution clock cycles and the resource utilization of each layer in the network." ] @@ -56,7 +58,7 @@ "\n", "In practice, the layers are instantiated by function calls to optimized Vitis HLS building blocks from the [finn-hlslib](https://github.com/Xilinx/finn-hlslib) library.\n", "\n", - "Since each layer will be instantiated, we can flexibly set the parallelization of each layer and thus control resources and throughput of our network, as visualized in the imaged below:\n", + "Since each layer will be instantiated, we can flexibly set the parallelization of each layer and thus control resources and throughput of our network, as visualized in the image below:\n", "\n", "![](finn-folding.png)" ] @@ -70,52 +72,21 @@ "As discussed above, the network needs to go through a few preparation steps before it can be fed into our estimation functions.\n", "\n", "The `.onnx` file loaded here is taken from the cybersecurity end2end example notebook. \n", - "We pick the onnx file `step_convert_to_hls.onnx` to which the necessary transformations have been applied for this notebook (Network layers mapped to necessary FINN-HLS blocks. In this case, the `MatrixVectorActivation` Units). \n", + "We pick the onnx file `cybsec_PE_SIMD.onnx` to which the necessary transformations have been applied for this notebook. This means, network layers mapped to necessary FINN-HLS blocks. In this case, the `MatrixVectorActivation` units. \n", "\n", - "To interact with the `.onnx` file we use the `ModelWrapper()`. This wrapper simplifies the access to different model attributes and allows us to apply custom transformations on the model.\n", + "To interact with the `.onnx` file we use `ModelWrapper()`. This wrapper simplifies the access to different model attributes and allows us to apply custom transformations on the model.\n", "\n", "In the below cell, we load our onnx file and view the cybersecurity MLP network in Netron." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Serving 'cybsec_PE_SIMD.onnx' at http://0.0.0.0:5920\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(\"../end2end_example/cybersecurity/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")\n", - "model.save(\"cybsec_PE_SIMD.onnx\")\n", + "model = ModelWrapper(\"cybsec_PE_SIMD.onnx\")\n", "\n", "showInNetron(\"cybsec_PE_SIMD.onnx\")" ] @@ -162,40 +133,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:5920\n", - "Serving 'cybsec_PE_SIMD.onnx' at http://0.0.0.0:5920\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "showInNetron(\"cybsec_PE_SIMD.onnx\")" ] @@ -204,12 +144,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We import the analysis passes (`exp_cycles_per_layer()`) and (`res_estimation()`) to estimate the number of clock cycles and resource utilization of each network layer." + "We import the analysis passes `exp_cycles_per_layer()` and `res_estimation()` to estimate the number of clock cycles and resource utilization of each network layer." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -228,23 +168,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'MatrixVectorActivation_0': 38400,\n", - " 'MatrixVectorActivation_1': 4096,\n", - " 'MatrixVectorActivation_2': 4096,\n", - " 'MatrixVectorActivation_3': 64}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cycles_dict = model.analysis(exp_cycles_per_layer)\n", "cycles_dict" @@ -252,20 +178,9 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA3cAAAHWCAYAAADU7HB0AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABwo0lEQVR4nO3de3zO9f/H8ee1sTnM5jRGZkTFMMt5yaEsK1OEosQckoQwOax8naqv6ISETozvlxRKRWjmVCwKy1kOcyiGsM1xY3v//vDb5+uyYRfjWleP++32ueV6f96f9+f1+VzX+2qv6/P5vN82Y4wRAAAAAOBvzc3ZAQAAAAAAbh3JHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdgNuqadOmatq0qbPDyFX79++XzWZTdHS0s0PJFbt371bz5s3l4+Mjm82mBQsW3FJ7NptNI0eOzJXYrrZy5UrZbDbNmzfvtrSf227nubiWo0ePql27dipRooRsNpvGjx9/R/d/J3Tp0kVeXl7ODiPPs9ls6tOnz01tW6FCBXXp0iV3AwJw25HcAf9Q0dHRstls11x+/vnnHLe1fft2jRw5Uvv37799Ad+EyZMnu0wCdjtFRERoy5YtevPNN/Wf//xHderUcXZIuAUDBgzQ0qVLFRUVpf/85z969NFHnR2Syzt37pxGjhyplStXOjsUAP9w+ZwdAADnGj16tCpWrJilvHLlyjluY/v27Ro1apSaNm2qChUq2K374YcfbjXEmzZ58mSVLFmSX5+v4/z584qLi9Nrr71207/wI29Zvny5WrVqpVdeecXZofxjnDt3TqNGjZIkl7tTAcDfC8kd8A/32GOP3dYrNR4eHretbdy648ePS5KKFi3q3ECQa44dO5ar7+eFCxfk4eEhNzdu9vk7MMbowoULKliwoLNDuW0uXbqkjIwM/v8CZINvagA3NGfOHNWuXVtFihSRt7e3atSooQkTJki6fHvnU089JUl66KGHrNs6M29PuvqZu8xnpr788kuNGjVKd911l4oUKaJ27dopOTlZqamp6t+/v0qVKiUvLy917dpVqampdvFMnz5dDz/8sEqVKiVPT08FBgZqypQpdnUqVKigbdu2adWqVVZMV8aRlJSk/v37y9/fX56enqpcubLGjh2rjIwMu3aSkpLUpUsX+fj4qGjRooqIiFBSUlKOzlvmra9r1qxRZGSkfH19VbhwYT355JNWUnWlyZMnq1q1avL09FTZsmXVu3fvHO8rO5s2bdJjjz0mb29veXl5qVmzZna3244cOVIBAQGSpEGDBslms2W58nq1CxcuaOTIkbr33ntVoEABlSlTRm3atNHevXtvKZZMSUlJGjBggCpUqCBPT0+VK1dOnTt31l9//XXNtlNTU9WyZUv5+Pho7dq1Nx2/MUYVKlRQq1atst3Ox8dHPXv2vOVz8eeff6pbt24qXbq0PD09Va1aNU2bNi1LvQ8++EDVqlVToUKFVKxYMdWpU0ezZ8++ZruZnzdjjD788EPrc59p3759euqpp1S8eHEVKlRIDRo00KJFi+zayOyfc+bM0bBhw3TXXXepUKFCSklJueZ+MzIyNH78eFWrVk0FChRQ6dKl1bNnT506dcqu3jfffKPw8HCVLVtWnp6eqlSpkl5//XWlp6dnaXPdunVq0aKFihUrpsKFCysoKMj6zrn6XLZu3VpeXl7y9fXVK6+8km17V6tQoYJatmypn376SfXq1VOBAgV09913a+bMmVnq3ui7Yv/+/fL19ZUkjRo1yjrvI0eO1LfffiubzabNmzdb7c2fP182m01t2rSx20/VqlXVvn176/WlS5f0+uuvq1KlSvL09FSFChX06quvZvk+zDyWpUuXqk6dOipYsKA++uijax77G2+8ITc3N33wwQc3PE9XOnnypF555RXVqFFDXl5e8vb21mOPPabffvvNqnPmzBkVLlxY/fr1y7L9H3/8IXd3d40ZM8Yqy8n3cOYzzu+8847Gjx9vnY/t27c7FD/wT8GVO+AfLjk5OcsfzjabTSVKlJAkxcTE6JlnnlGzZs00duxYSdKOHTu0Zs0a9evXT40bN9bLL7+siRMn6tVXX1XVqlUlyfrvtYwZM0YFCxbU0KFDtWfPHn3wwQfKnz+/3NzcdOrUKY0cOVI///yzoqOjVbFiRQ0fPtzadsqUKapWrZqeeOIJ5cuXT999951eeuklZWRkqHfv3pKk8ePHq2/fvvLy8tJrr70mSSpdurSky7dQNWnSRH/++ad69uyp8uXLa+3atYqKitKRI0esASiMMWrVqpV++uknvfjii6pataq+/vprRUREOHSO+/btq2LFimnEiBHav3+/xo8frz59+uiLL76w6owcOVKjRo1SaGioevXqpV27dmnKlCn65ZdftGbNGuXPn9+hfW7btk2NGjWSt7e3Bg8erPz58+ujjz5S06ZNtWrVKtWvX19t2rRR0aJFNWDAAD3zzDNq0aLFdQepSE9PV8uWLRUbG6sOHTqoX79+On36tGJiYrR161ZVqlTppmORLv9h2KhRI+3YsUPdunVTrVq19Ndff+nbb7/VH3/8oZIlS2Zp+/z582rVqpV+/fVXLVu2THXr1r2l+J977jmNGzdOJ0+eVPHixa1tv/vuO6WkpOi55567pXNx9OhRNWjQwBrowtfXV4sXL1b37t2VkpKi/v37S5I++eQTvfzyy2rXrp369eunCxcuaPPmzVq3bp2effbZbNtu3Lix/vOf/6hTp0565JFH1LlzZ7v9PvDAAzp37pxefvlllShRQjNmzNATTzyhefPm6cknn7Rr6/XXX5eHh4deeeUVpaamXvcKSc+ePRUdHa2uXbvq5ZdfVkJCgiZNmqRNmzbZfXajo6Pl5eWlyMhIeXl5afny5Ro+fLhSUlL09ttvW+3FxMSoZcuWKlOmjPr16yc/Pz/t2LFDCxcutEsa0tPTFRYWpvr16+udd97RsmXL9O6776pSpUrq1avXNePNtGfPHrVr107du3dXRESEpk2bpi5duqh27dqqVq2apJx9V/j6+mrKlCnq1auXnnzySStpCwoKUrly5WSz2bR69WoFBQVJkn788Ue5ubnpp59+smI5fvy4du7caXdr9PPPP68ZM2aoXbt2GjhwoNatW6cxY8Zox44d+vrrr+2OZdeuXXrmmWfUs2dP9ejRQ/fdd1+2xzxs2DD9+9//1kcffaQePXrc8Bxdad++fVqwYIGeeuopVaxYUUePHtVHH32kJk2aaPv27Spbtqy8vLz05JNP6osvvtB7770nd3d3a/vPP/9cxhh17Ngxx+f2StOnT9eFCxf0wgsvyNPT065/AriCAfCPNH36dCMp28XT09Oq169fP+Pt7W0uXbp0zbbmzp1rJJkVK1ZkWdekSRPTpEkT6/WKFSuMJFO9enWTlpZmlT/zzDPGZrOZxx57zG77kJAQExAQYFd27ty5LPsJCwszd999t11ZtWrV7Pad6fXXXzeFCxc2v//+u1350KFDjbu7uzl48KAxxpgFCxYYSWbcuHFWnUuXLplGjRoZSWb69OlZ2r5S5jkODQ01GRkZVvmAAQOMu7u7SUpKMsYYc+zYMePh4WGaN29u0tPTrXqTJk0yksy0adOuu5/stG7d2nh4eJi9e/daZYcPHzZFihQxjRs3tsoSEhKMJPP222/fsM1p06YZSea9997Lsu7K45NkRowY4XAsw4cPN5LMV199dc32Mz8/c+fONadPnzZNmjQxJUuWNJs2bcqV+Hft2mUkmSlTptitf+KJJ0yFChWsejd7Lrp3727KlClj/vrrL7ttOnToYHx8fKzPdqtWrUy1atVueEzZkWR69+5tV9a/f38jyfz4449W2enTp03FihVNhQoVrM9d5vm9++67s+1nV/vxxx+NJDNr1iy78iVLlmQpz669nj17mkKFCpkLFy4YYy73r4oVK5qAgABz6tQpu7pXnteIiAgjyYwePdquzv33329q1659w7gDAgKMJLN69Wqr7NixY8bT09MMHDjQKsvpd8Xx48ezvNeZqlWrZp5++mnrda1atcxTTz1lJJkdO3YYY4z56quvjCTz22+/GWOMiY+PN5LM888/b9fWK6+8YiSZ5cuXZzmWJUuWZNn3lZ+FgQMHGjc3NxMdHX3D85PZbkREhPX6woULdt9Pxlz+/vD09LR7H5YuXWokmcWLF9vVDQoKsvs+zum5zfyO8vb2NseOHctR7MA/GbdlAv9wH374oWJiYuyWxYsXW+uLFi2qs2fPKiYmJlf327lzZ7urUfXr15cxRt26dbOrV79+fR06dEiXLl2yyq58liTzymOTJk20b98+JScn33Dfc+fOVaNGjVSsWDH99ddf1hIaGqr09HStXr1akvT9998rX758dlcB3N3d1bdvX4eO9YUXXrC7Pa5Ro0ZKT0/XgQMHJEnLli1TWlqa+vfvb/dcU48ePeTt7Z3l1rkbSU9P1w8//KDWrVvr7rvvtsrLlCmjZ599Vj/99NN1b7O7lvnz56tkyZLZHv+Vx3ezscyfP181a9bMchUpu/aTk5PVvHlz7dy5UytXrlRwcHCuxH/vvfeqfv36mjVrlrXu5MmTWrx4sTp27GjVu5lzYYzR/Pnz9fjjj8sYY/fZCwsLU3JysjZu3Cjpcr/7448/9Msvv9zwuHLi+++/V7169fTggw9aZV5eXnrhhRe0f//+LLe4RURE5OiZrblz58rHx0ePPPKI3fHUrl1bXl5eWrFihVX3yvZOnz6tv/76S40aNdK5c+e0c+dOSZdv301ISFD//v2zPDeY3Xl98cUX7V43atRI+/btu2HckhQYGKhGjRpZr319fXXffffZbZ/T74rradSokX788UfruH/77Te98MILKlmypFX+448/qmjRoqpevbqky++XJEVGRtq1NXDgQEnK8p1QsWJFhYWFZbt/Y4z69OmjCRMm6L///a/Ddx5k8vT0tL6f0tPTdeLECXl5eem+++6zPreSFBoaqrJly9r1oa1bt2rz5s3WlW/J8XPbtm1b6/ZXANfGbZnAP1y9evWuO6DKSy+9pC+//FKPPfaY7rrrLjVv3lxPP/30LQ+vXr58ebvXPj4+kiR/f/8s5RkZGUpOTrZuFV2zZo1GjBihuLg4nTt3zq5+cnKy1da17N69W5s3b77mHwrHjh2TJB04cEBlypTJcqvitW55uparj7VYsWKSZD2TlJnkXd2uh4eH7r77bmt9Th0/flznzp3LNs6qVasqIyNDhw4dsm49y6m9e/fqvvvuU758Of9fhyOx7N27V23bts1Ru/3799eFCxe0adOmHB9HTuPv3Lmz+vTpowMHDiggIEBz587VxYsX1alTJ4fbutLx48eVlJSkjz/+WB9//HG2dTI/e0OGDNGyZctUr149Va5cWc2bN9ezzz6rhg0b5nh/Vzpw4IB1++uVMm+fPnDggJVYSMp2BN3s7N69W8nJySpVqlS26zOPR7p8e+6wYcO0fPnyLD8uZP4ok/m84pWxXEuBAgWy9OFixYpledbvWq7ul9ltn9Pviutp1KiRpk6dqj179mjv3r2y2WwKCQmxkr4ePXroxx9/VMOGDa3k6cCBA3Jzc8syarGfn5+KFi2a5Tvheu/XzJkzdebMGU2ZMkXPPPPMDeO9loyMDE2YMEGTJ09WQkKC3bONmd/NkuTm5qaOHTtqypQpOnfunAoVKqRZs2apQIEC1vPZkuPnNqefSeCfjuQOwHWVKlVK8fHxWrp0qRYvXqzFixdr+vTp6ty5s2bMmHHT7V75LEZOyo0xki7/8desWTNVqVJF7733nvz9/eXh4aHvv/9e77//fpYBUbKTkZGhRx55RIMHD852/b333pvDo8iZGx0THNeqVSvNmTNHb731lmbOnJmrIzl26NBBAwYM0KxZs/Tqq6/qv//9r+rUqeNwUn+1zM/mc889d82rJ5nPZVWtWlW7du3SwoULtWTJEs2fP1+TJ0/W8OHDrSH3b6ecjrSYkZGhUqVK2V2luVLmH+5JSUlq0qSJvL29NXr0aFWqVEkFChTQxo0bNWTIkBz126tdq1/d6vZX9svc+K7IvFq6evVq7du3T7Vq1VLhwoXVqFEjTZw4UWfOnNGmTZv05ptvZtn2WleBr3a996thw4aKj4/XpEmT9PTTT9/0s2r//ve/9a9//UvdunXT66+/ruLFi8vNzU39+/fP8v517txZb7/9thYsWKBnnnlGs2fPtgY9yuTouXXl0T+B3ERyB+CGPDw89Pjjj+vxxx9XRkaGXnrpJX300Uf617/+pcqVK+f4D5Dc8N133yk1NVXffvut3S/vV97+lelacVWqVElnzpxRaGjodfcVEBCg2NhYnTlzxu7q3a5du24y+mvvJ7PdK29dTEtLU0JCwg3jvJqvr68KFSqUbZw7d+6Um5tbliukOVGpUiWtW7dOFy9ezPEAL47EUqlSJW3dujVH7bZu3VrNmzdXly5dVKRIkSyjpd5K/MWLF1d4eLhmzZqljh07as2aNVkGd7jZc1GkSBGlp6fn6D0tXLiw2rdvr/bt2ystLU1t2rTRm2++qaioKBUoUCBH+8wUEBBwzfcgc/3NqFSpkpYtW6aGDRte94/vlStX6sSJE/rqq6/UuHFjqzwhISFLe9Ll2/gc/dzfDjn9rrjed2D58uVVvnx5/fjjj9q3b591K2jjxo0VGRmpuXPnKj093e68BAQEKCMjQ7t377YbnOro0aNKSkpy6P2qXLmyxo0bp6ZNm+rRRx9VbGysihQpkuPtM82bN08PPfSQPvvsM7vypKSkLIMdVa9eXffff79mzZqlcuXK6eDBg1lG58zpuQXgGJ65A3BdJ06csHvt5uZmXV3IHJK7cOHCknRLw/bnVOav7Vf+up6cnKzp06dnqVu4cOFsY3r66acVFxenpUuXZlmXlJRkPd/XokULXbp0yS5xSE9Pd3gI8RsJDQ2Vh4eHJk6caHdcn332mZKTkxUeHm6VHTx40PqD/Frc3d3VvHlzffPNN9q/f79VfvToUc2ePVsPPvigvL29HY6zbdu2+uuvvzRp0qQs6651FdKRWNq2bavffvsty0iA12q/c+fOmjhxoqZOnaohQ4bkavydOnXS9u3bNWjQILm7u6tDhw433VYmd3d3tW3bVvPnz882ib1yeoyr+52Hh4cCAwNljNHFixevfZDX0KJFC61fv15xcXFW2dmzZ/Xxxx+rQoUKCgwMdLhN6XJfSk9P1+uvv55l3aVLl6z+l12/TUtL0+TJk+22qVWrlipWrKjx48dn6bvOuNKd0++KQoUKWWXZadSokZYvX67169dbyV1wcLCKFCmit956SwULFlTt2rWt+i1atJCkLD8qvPfee5Jk952QE0FBQfr++++1Y8cOPf744zp//rxD20uX38Or34O5c+fqzz//zLZ+p06d9MMPP2j8+PEqUaKEHnvsMbv1OT23ABzDlTvgH27x4sXZJgsPPPCA7r77bj3//PM6efKkHn74YZUrV04HDhzQBx98oODgYOsX5eDgYLm7u2vs2LFKTk6Wp6enNQ9dbmvevLl1JbFnz546c+aMPvnkE5UqVUpHjhyxq1u7dm1NmTJFb7zxhipXrqxSpUrp4Ycf1qBBg/Ttt9+qZcuW1tDnZ8+e1ZYtWzRv3jzt379fJUuW1OOPP66GDRtq6NCh2r9/vwIDA/XVV1/laNAWR/j6+ioqKkqjRo3So48+qieeeEK7du3S5MmTVbduXbtBCDp37qxVq1bd8A/dN954QzExMXrwwQf10ksvKV++fProo4+UmpqqcePG3VScnTt31syZMxUZGWn9kXr27FktW7ZML730UrbzwzkSy6BBgzRv3jw99dRT6tatm2rXrq2TJ0/q22+/1dSpU1WzZs0sbffp00cpKSl67bXX5OPjo1dffTVX4g8PD1eJEiU0d+5cPfbYY1k+yzd7Lt566y2tWLFC9evXV48ePRQYGKiTJ09q48aNWrZsmU6ePCnp8ufcz89PDRs2VOnSpbVjxw5NmjRJ4eHhN3XVZejQofr888/12GOP6eWXX1bx4sU1Y8YMJSQkaP78+Td9W2uTJk3Us2dPjRkzRvHx8WrevLny58+v3bt3a+7cuZowYYLatWunBx54QMWKFVNERIRefvll2Ww2/ec//8nyOXZzc9OUKVP0+OOPKzg4WF27dlWZMmW0c+dObdu2LdtE4HbK6XdFwYIFFRgYqC+++EL33nuvihcvrurVq1vPDjZq1EizZs2SzWazbtN0d3fXAw88oKVLl6pp06Z2003UrFlTERER+vjjj61bWtevX68ZM2aodevWeuihhxw+lgYNGuibb75RixYt1K5dOy1YsMChKVZatmyp0aNHq2vXrnrggQe0ZcsWzZo1y+5ugys9++yzGjx4sL7++mv16tUry75yem4BOOgOj84JII+43lQIumKY/3nz5pnmzZubUqVKGQ8PD1O+fHnTs2dPc+TIEbv2PvnkE3P33Xcbd3d3u2kRrjUVwty5c7ON55dffrErHzFihJFkjh8/bpV9++23JigoyBQoUMBUqFDBjB071hqaPiEhwaqXmJhowsPDTZEiRYwkuzhOnz5toqKiTOXKlY2Hh4cpWbKkeeCBB8w777xjN0XDiRMnTKdOnYy3t7fx8fExnTp1Mps2bXJoKoSrjynzHFw9dcSkSZNMlSpVTP78+U3p0qVNr169sgwH36RJE5PTr+6NGzeasLAw4+XlZQoVKmQeeughs3btWrs6jkyFYMzl4exfe+01U7FiRZM/f37j5+dn2rVrZzfNgbIZEj4nsRhz+Xz36dPH3HXXXcbDw8OUK1fOREREWFMHXOvzM3jwYCPJTJo06Zbjz/TSSy8ZSWb27Nm5ei6OHj1qevfubfz9/a3tmjVrZj7++GOrzkcffWQaN25sSpQoYTw9PU2lSpXMoEGDTHJy8nWPL3OfV0+FYIwxe/fuNe3atTNFixY1BQoUMPXq1TMLFy60q3Ot83sjH3/8saldu7YpWLCgKVKkiKlRo4YZPHiwOXz4sFVnzZo1pkGDBqZgwYKmbNmyZvDgwdaw+Vf3hZ9++sk88sgjpkiRIqZw4cImKCjIfPDBB9b6iIgIU7hw4SxxZH5f3EhAQIAJDw/PUn7195UxOf+uWLt2raldu7bx8PDI8r5v27bNSDJVq1a1a/uNN94wksy//vWvLLFcvHjRjBo1yvp8+fv7m6ioKGvaiBsdizHZfxa++eYbky9fPtO+ffssUxtc3e7VUyEMHDjQlClTxhQsWNA0bNjQxMXFZXvOMrVo0cJIyravG5Ozc+vodxTwT2czhif6AQC42oABA/TZZ58pMTHRuu0OQM49+eST2rJli/bs2ePsUIB/DJ65AwDgKhcuXNB///tftW3blsQOuAlHjhzRokWL7KYQAXD78cwdAAD/79ixY1q2bJnmzZunEydOqF+/fs4OCfhbSUhI0Jo1a/Tpp58qf/786tmzp7NDAv5RSO4AAPh/27dvV8eOHVWqVClNnDhRwcHBzg4J+FtZtWqVunbtqvLly2vGjBny8/NzdkjAPwrP3AEAAACAC+CZOwAAAABwASR3AAAAAOAC8swzd2+99ZaioqLUr18/jR8/XtLl0coGDhyoOXPmKDU1VWFhYZo8ebJKly5tbXfw4EH16tVLK1askJeXlyIiIjRmzBjly/e/Q1u5cqUiIyO1bds2+fv7a9iwYerSpYvd/j/88EO9/fbbSkxMVM2aNfXBBx+oXr16OY4/IyNDhw8fVpEiRWSz2W7pXAAAAAD4+zLG6PTp0ypbtqzc3O7g9TSnzrL3/9avX28qVKhggoKCTL9+/azyF1980fj7+5vY2Fjz66+/mgYNGpgHHnjAWn/p0iVTvXp1ExoaajZt2mS+//57U7JkSRMVFWXV2bdvnylUqJCJjIw027dvNx988IFxd3c3S5YsserMmTPHeHh4mGnTpplt27aZHj16mKJFi5qjR4/m+BgOHTp03QmhWVhYWFhYWFhYWFj+WcuhQ4duLVFykNMHVDlz5oxq1aqlyZMn64033lBwcLDGjx+v5ORk+fr6avbs2WrXrp0kaefOnapatari4uLUoEEDLV68WC1bttThw4etq3lTp07VkCFDdPz4cXl4eGjIkCFatGiRtm7dau2zQ4cOSkpK0pIlSyRJ9evXV926dTVp0iRJl6/C+fv7q2/fvho6dGiOjiM5OVlFixbVoUOH5O3tnZunCAAAAMDfSEpKivz9/ZWUlCQfH587tl+n35bZu3dvhYeHKzQ0VG+88YZVvmHDBl28eFGhoaFWWZUqVVS+fHkruYuLi1ONGjXsbtMMCwtTr169tG3bNt1///2Ki4uzayOzTv/+/SVJaWlp2rBhg6Kioqz1bm5uCg0NVVxc3DXjTk1NVWpqqvX69OnTkiRvb2+SOwAAAAB3/HEtpyZ3c+bM0caNG/XLL79kWZeYmCgPDw8VLVrUrrx06dJKTEy06lyZ2GWuz1x3vTopKSk6f/68Tp06pfT09Gzr7Ny585qxjxkzRqNGjcrZgQIAAADAbea00TIPHTqkfv36adasWSpQoICzwrhpUVFRSk5OtpZDhw45OyQAAAAA/2BOS+42bNigY8eOqVatWsqXL5/y5cunVatWaeLEicqXL59Kly6ttLQ0JSUl2W139OhR+fn5SZL8/Px09OjRLOsz112vjre3twoWLKiSJUvK3d092zqZbWTH09PTugWTWzEBAAAAOJvTkrtmzZppy5Ytio+Pt5Y6deqoY8eO1r/z58+v2NhYa5tdu3bp4MGDCgkJkSSFhIRoy5YtOnbsmFUnJiZG3t7eCgwMtOpc2UZmncw2PDw8VLt2bbs6GRkZio2NteoAAAAAQF7ntGfuihQpourVq9uVFS5cWCVKlLDKu3fvrsjISBUvXlze3t7q27evQkJC1KBBA0lS8+bNFRgYqE6dOmncuHFKTEzUsGHD1Lt3b3l6ekqSXnzxRU2aNEmDBw9Wt27dtHz5cn355ZdatGiRtd/IyEhFRESoTp06qlevnsaPH6+zZ8+qa9eud+hsAAAAAMCtcfpomdfz/vvvy83NTW3btrWbxDyTu7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq5w+z52rSElJkY+Pj5KTk3n+DgAAAPgHc1Zu4LRn7gAAAAAAuYfkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF5DP2QHg9rDZnB2Bcxnj7AgAAACAO4srdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF+DU5G7KlCkKCgqSt7e3vL29FRISosWLF1vrmzZtKpvNZre8+OKLdm0cPHhQ4eHhKlSokEqVKqVBgwbp0qVLdnVWrlypWrVqydPTU5UrV1Z0dHSWWD788ENVqFBBBQoUUP369bV+/frbcswAAAAAcDs4NbkrV66c3nrrLW3YsEG//vqrHn74YbVq1Urbtm2z6vTo0UNHjhyxlnHjxlnr0tPTFR4errS0NK1du1YzZsxQdHS0hg8fbtVJSEhQeHi4HnroIcXHx6t///56/vnntXTpUqvOF198ocjISI0YMUIbN25UzZo1FRYWpmPHjt2ZEwEAAAAAt8hmjDHODuJKxYsX19tvv63u3buradOmCg4O1vjx47Otu3jxYrVs2VKHDx9W6dKlJUlTp07VkCFDdPz4cXl4eGjIkCFatGiRtm7dam3XoUMHJSUlacmSJZKk+vXrq27dupo0aZIkKSMjQ/7+/urbt6+GDh2a7b5TU1OVmppqvU5JSZG/v7+Sk5Pl7e2dG6filthszo7AufLWpxoAAAD/JCkpKfLx8bnjuUGeeeYuPT1dc+bM0dmzZxUSEmKVz5o1SyVLllT16tUVFRWlc+fOWevi4uJUo0YNK7GTpLCwMKWkpFhX/+Li4hQaGmq3r7CwMMXFxUmS0tLStGHDBrs6bm5uCg0NtepkZ8yYMfLx8bEWf3//WzsBAAAAAHAL8jk7gC1btigkJEQXLlyQl5eXvv76awUGBkqSnn32WQUEBKhs2bLavHmzhgwZol27dumrr76SJCUmJtoldpKs14mJidetk5KSovPnz+vUqVNKT0/Pts7OnTuvGXdUVJQiIyOt15lX7gAAAADAGZye3N13332Kj49XcnKy5s2bp4iICK1atUqBgYF64YUXrHo1atRQmTJl1KxZM+3du1eVKlVyYtSSp6enPD09nRoDAAAAAGRy+m2ZHh4eqly5smrXrq0xY8aoZs2amjBhQrZ169evL0nas2ePJMnPz09Hjx61q5P52s/P77p1vL29VbBgQZUsWVLu7u7Z1slsAwAAAADyOqcnd1fLyMiwG6jkSvHx8ZKkMmXKSJJCQkK0ZcsWu1EtY2Ji5O3tbd3aGRISotjYWLt2YmJirOf6PDw8VLt2bbs6GRkZio2NtXv2DwAAAADyMqfelhkVFaXHHntM5cuX1+nTpzV79mytXLlSS5cu1d69ezV79my1aNFCJUqU0ObNmzVgwAA1btxYQUFBkqTmzZsrMDBQnTp10rhx45SYmKhhw4apd+/e1i2TL774oiZNmqTBgwerW7duWr58ub788kstWrTIiiMyMlIRERGqU6eO6tWrp/Hjx+vs2bPq2rWrU84LAAAAADjKqcndsWPH1LlzZx05ckQ+Pj4KCgrS0qVL9cgjj+jQoUNatmyZlWj5+/urbdu2GjZsmLW9u7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq/LcPHd/V86ay+JamOfO2REAAADgn+ofP88dAAAAAODmkdwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC7AqcndlClTFBQUJG9vb3l7eyskJESLFy+21l+4cEG9e/dWiRIl5OXlpbZt2+ro0aN2bRw8eFDh4eEqVKiQSpUqpUGDBunSpUt2dVauXKlatWrJ09NTlStXVnR0dJZYPvzwQ1WoUEEFChRQ/fr1tX79+ttyzAAAAABwOzg1uStXrpzeeustbdiwQb/++qsefvhhtWrVStu2bZMkDRgwQN99953mzp2rVatW6fDhw2rTpo21fXp6usLDw5WWlqa1a9dqxowZio6O1vDhw606CQkJCg8P10MPPaT4+Hj1799fzz//vJYuXWrV+eKLLxQZGakRI0Zo48aNqlmzpsLCwnTs2LE7dzIAAAAA4BbYjDHG2UFcqXjx4nr77bfVrl07+fr6avbs2WrXrp0kaefOnapatari4uLUoEEDLV68WC1bttThw4dVunRpSdLUqVM1ZMgQHT9+XB4eHhoyZIgWLVqkrVu3Wvvo0KGDkpKStGTJEklS/fr1VbduXU2aNEmSlJGRIX9/f/Xt21dDhw7NUdwpKSny8fFRcnKyvL29c/OU3BSbzdkROFfe+lQDAADgn8RZuUGeeeYuPT1dc+bM0dmzZxUSEqINGzbo4sWLCg0NtepUqVJF5cuXV1xcnCQpLi5ONWrUsBI7SQoLC1NKSop19S8uLs6ujcw6mW2kpaVpw4YNdnXc3NwUGhpq1clOamqqUlJS7BYAAAAAcBanJ3dbtmyRl5eXPD099eKLL+rrr79WYGCgEhMT5eHhoaJFi9rVL126tBITEyVJiYmJdold5vrMdderk5KSovPnz+uvv/5Senp6tnUy28jOmDFj5OPjYy3+/v43dfwAAAAAkBucntzdd999io+P17p169SrVy9FRERo+/btzg7rhqKiopScnGwthw4dcnZIAAAAAP7B8jk7AA8PD1WuXFmSVLt2bf3yyy+aMGGC2rdvr7S0NCUlJdldvTt69Kj8/PwkSX5+fllGtcwcTfPKOlePsHn06FF5e3urYMGCcnd3l7u7e7Z1MtvIjqenpzw9PW/uoAEAAAAglzn9yt3VMjIylJqaqtq1ayt//vyKjY211u3atUsHDx5USEiIJCkkJERbtmyxG9UyJiZG3t7eCgwMtOpc2UZmncw2PDw8VLt2bbs6GRkZio2NteoAAAAAQF7n1Ct3UVFReuyxx1S+fHmdPn1as2fP1sqVK7V06VL5+Pioe/fuioyMVPHixeXt7a2+ffsqJCREDRo0kCQ1b95cgYGB6tSpk8aNG6fExEQNGzZMvXv3tq6qvfjii5o0aZIGDx6sbt26afny5fryyy+1aNEiK47IyEhFRESoTp06qlevnsaPH6+zZ8+qa9euTjkvAAAAAOAopyZ3x44dU+fOnXXkyBH5+PgoKChIS5cu1SOPPCJJev/99+Xm5qa2bdsqNTVVYWFhmjx5srW9u7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIq/LcPHd/V8xzl7fwqQYAAICz/OPnuQMAAAAA3DySOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXIDDyd358+d17tw56/WBAwc0fvx4/fDDD7kaGAAAAAAg5xxO7lq1aqWZM2dKkpKSklS/fn29++67atWqlaZMmZLrAQIAAAAAbszh5G7jxo1q1KiRJGnevHkqXbq0Dhw4oJkzZ2rixIm5HiAAAAAA4MYcTu7OnTunIkWKSJJ++OEHtWnTRm5ubmrQoIEOHDiQ6wECAAAAAG7M4eSucuXKWrBggQ4dOqSlS5eqefPmkqRjx47J29s71wMEAAAAANyYw8nd8OHD9corr6hChQqqV6+eQkJCJF2+inf//ffneoAAAAAAgBuzGWOMoxslJibqyJEjqlmzptzcLueH69evl7e3t6pUqZLrQf4dpKSkyMfHR8nJyXniCqbN5uwInMvxTzUAAACQO5yVG9zUPHd+fn4qUqSIYmJidP78eUlS3bp1/7GJHQAAAAA4m8PJ3YkTJ9SsWTPde++9atGihY4cOSJJ6t69uwYOHJjrAQIAAAAAbszh5G7AgAHKnz+/Dh48qEKFClnl7du315IlS3I1OAAAAABAzuRzdIMffvhBS5cuVbly5ezK77nnHqZCAAAAAAAncfjK3dmzZ+2u2GU6efKkPD09cyUoAAAAAIBjHE7uGjVqpJkzZ1qvbTabMjIyNG7cOD300EO5GhwAAAAAIGccvi1z3LhxatasmX799VelpaVp8ODB2rZtm06ePKk1a9bcjhgBAAAAADfg8JW76tWr6/fff9eDDz6oVq1a6ezZs2rTpo02bdqkSpUq3Y4YAQAAAAA3cFOTmCMrJjHPW/hUAwAAwFmclRvk6LbMzZs357jBoKCgmw4GAAAAAHBzcpTcBQcHy2az6UYX+Ww2m9LT03MlMAAAAABAzuUouUtISLjdcQAAAAAAbkGOkruAgIDbHQcAAAAA4BY4PFrmmDFjNG3atCzl06ZN09ixY3MlKAAAAACAYxxO7j766CNVqVIlS3m1atU0derUXAkKAAAAAOAYh5O7xMRElSlTJku5r6+vjhw5kitBAQAAAAAc43By5+/vrzVr1mQpX7NmjcqWLZsrQQEAAAAAHJOjAVWu1KNHD/Xv318XL17Uww8/LEmKjY3V4MGDNXDgwFwPEAAAAABwYw4nd4MGDdKJEyf00ksvKS0tTZJUoEABDRkyRFFRUbkeIAAAAADgxmzmRjOTX8OZM2e0Y8cOFSxYUPfcc488PT1zO7a/lZSUFPn4+Cg5OVne3t7ODkc2m7MjcK6b+1QDAAAAt85ZuYHDz9xNnz5d58+fl5eXl+rWravq1av/4xM7AAAAAHA2h5O7oUOHqnTp0urevbvWrl17O2ICAAAAADjI4eTuzz//1IwZM/TXX3+padOmqlKlisaOHavExESHdz5mzBjVrVtXRYoUUalSpdS6dWvt2rXLrk7Tpk1ls9nslhdffNGuzsGDBxUeHq5ChQqpVKlSGjRokC5dumRXZ+XKlapVq5Y8PT1VuXJlRUdHZ4nnww8/VIUKFVSgQAHVr19f69evd/iYAAAAAMAZHE7u8uXLpyeffFLffPONDh06pB49emjWrFkqX768nnjiCX3zzTfKyMjIUVurVq1S79699fPPPysmJkYXL15U8+bNdfbsWbt6PXr00JEjR6xl3Lhx1rr09HSFh4crLS1Na9eu1YwZMxQdHa3hw4dbdRISEhQeHq6HHnpI8fHx6t+/v55//nktXbrUqvPFF18oMjJSI0aM0MaNG1WzZk2FhYXp2LFjjp4iAAAAALjjbnpAlUzr1q3TtGnTNGPGDJUpU0anTp1SsWLFNH36dDVt2tShto4fP65SpUpp1apVaty4saTLV+6Cg4M1fvz4bLdZvHixWrZsqcOHD6t06dKSpKlTp2rIkCE6fvy4PDw8NGTIEC1atEhbt261tuvQoYOSkpK0ZMkSSVL9+vVVt25dTZo0SZKUkZEhf39/9e3bV0OHDr1h7AyokrcwoAoAAACc5W8zoIokHT16VO+8846qVaumpk2bKiUlRQsXLlRCQoL+/PNPPf3004qIiHC43eTkZElS8eLF7cpnzZqlkiVLqnr16oqKitK5c+esdXFxcapRo4aV2ElSWFiYUlJStG3bNqtOaGioXZthYWGKi4uTJKWlpWnDhg12ddzc3BQaGmrVuVpqaqpSUlLsFgAAAABwFofnuXv88ce1dOlS3XvvverRo4c6d+5sl4wVLlxYAwcO1Ntvv+1QuxkZGerfv78aNmyo6tWrW+XPPvusAgICVLZsWW3evFlDhgzRrl279NVXX0mSEhMT7RI7SdbrzOcAr1UnJSVF58+f16lTp5Senp5tnZ07d2Yb75gxYzRq1CiHjhEAAAAAbheHk7vM2yZDQkKuWcfX11cJCQkOtdu7d29t3bpVP/30k135Cy+8YP27Ro0aKlOmjJo1a6a9e/eqUqVKjgWfi6KiohQZGWm9TklJkb+/v9PiAQAAAPDP5nBy99lnn92wjs1mU0BAQI7b7NOnjxYuXKjVq1erXLly161bv359SdKePXtUqVIl+fn5ZRnV8ujRo5IkPz8/67+ZZVfW8fb2VsGCBeXu7i53d/ds62S2cTVPT0/m9wMAAACQZzj8zN3LL7+siRMnZimfNGmS+vfv71Bbxhj16dNHX3/9tZYvX66KFSvecJv4+HhJUpkyZSRJISEh2rJli92oljExMfL29lZgYKBVJzY21q6dmJgY6+qjh4eHateubVcnIyNDsbGx171CCQAAAAB5hcPJ3fz589WwYcMs5Q888IDmzZvnUFu9e/fWf//7X82ePVtFihRRYmKiEhMTdf78eUnS3r179frrr2vDhg3av3+/vv32W3Xu3FmNGzdWUFCQJKl58+YKDAxUp06d9Ntvv2np0qUaNmyYevfubV1Ze/HFF7Vv3z4NHjxYO3fu1OTJk/Xll19qwIABViyRkZH65JNPNGPGDO3YsUO9evXS2bNn1bVrV0dPEQAAAADccQ7flnnixAn5+PhkKff29tZff/3lUFtTpkyRpCxTJkyfPl1dunSRh4eHli1bpvHjx+vs2bPy9/dX27ZtNWzYMKuuu7u7Fi5cqF69eikkJESFCxdWRESERo8ebdWpWLGiFi1apAEDBmjChAkqV66cPv30U4WFhVl12rdvr+PHj2v48OFKTExUcHCwlixZkmWQFQAAAADIixye56569ep68cUX1adPH7vyDz74QFOmTNH27dtzNcC/C+a5y1uY5w4AAADO4qzcwOErd5GRkerTp4+OHz+uhx9+WJIUGxurd99995oTjQMAAAAAbi+Hk7tu3bopNTVVb775pl5//XVJUoUKFTRlyhR17tw51wMEAAAAANyYw7dlXun48eMqWLCgvLy8cjOmvyVuy8xbuC0TAAAAzvK3uS3zSr6+vrkVBwAAAADgFjg8FQIAAAAAIO8huQMAAAAAF0ByBwAAAAAuwOHk7o8//rjmup9//vmWggEAAAAA3ByHk7vmzZvr5MmTWcrXrFmjRx99NFeCAgAAAAA4xuHkrkGDBmrevLlOnz5tla1evVotWrTQiBEjcjU4AAAAAEDOOJzcffrppypfvrwef/xxpaamasWKFQoPD9fo0aM1YMCA2xEjAAAAAOAGHE7u3NzcNGfOHOXPn18PP/ywnnjiCY0ZM0b9+vW7HfEBAAAAAHLAZowxN6q0efPmLGWnT5/WM888o/DwcPXq1csqDwoKyt0I/yacNQv9tdhszo7AuW78qQYAAABuD2flBjlK7tzc3GSz2XRl1StfZ/7bZrMpPT399kWbh5Hc5S0kdwAAAHAWZ+UG+XJSKSEh4XbHAQAAAAC4BTlK7gICAm53HAAAAACAW+DwgCpjxozRtGnTspRPmzZNY8eOzZWgAAAAAACOcTi5++ijj1SlSpUs5dWqVdPUqVNzJSgAAAAAgGMcTu4SExNVpkyZLOW+vr46cuRIrgQFAAAAAHCMw8mdv7+/1qxZk6V8zZo1Klu2bK4EBQAAAABwTI4GVLlSjx491L9/f128eFEPP/ywJCk2NlaDBw/WwIEDcz1AAAAAAMCNOZzcDRo0SCdOnNBLL72ktLQ0SVKBAgU0ZMgQRUVF5XqAAAAAAIAby9Ek5tk5c+aMduzYoYIFC+qee+6Rp6dnbsf2t8Ik5nkLk5gDAADAWfL0JObZ8fLysgZW+acndgAAAADgbA4PqJKRkaHRo0fLx8dHAQEBCggIUNGiRfX6668rIyPjdsQIAAAAALgBh6/cvfbaa/rss8/01ltvqWHDhpKkn376SSNHjtSFCxf05ptv5nqQAAAAAIDrc/iZu7Jly2rq1Kl64okn7Mq/+eYbvfTSS/rzzz9zNcC/C565y1t45g4AAADO4qzcwOHbMk+ePKkqVapkKa9SpYpOnjyZK0EBAAAAABzjcHJXs2ZNTZo0KUv5pEmTVLNmzVwJCgAAAADgGIefuRs3bpzCw8O1bNkyhYSESJLi4uJ06NAhff/997keIAAAAADgxhy+ctekSRP9/vvvevLJJ5WUlKSkpCS1adNGu3btUqNGjW5HjAAAAACAG7jpScxhjwFV8hY+1QAAAHCWPD2J+ebNm3PcYFBQ0E0HAwAAAAC4OTlK7oKDg2Wz2XSji3w2m03p6em5EhgAAAAAIOdylNwlJCTc7jgAAAAAALcgR8ldQEDA7Y4DAAAAAHALHB4tc8yYMZo2bVqW8mnTpmns2LG5EhQAAAAAwDEOJ3cfffSRqlSpkqW8WrVqmjp1qkNtjRkzRnXr1lWRIkVUqlQptW7dWrt27bKrc+HCBfXu3VslSpSQl5eX2rZtq6NHj9rVOXjwoMLDw1WoUCGVKlVKgwYN0qVLl+zqrFy5UrVq1ZKnp6cqV66s6OjoLPF8+OGHqlChggoUKKD69etr/fr1Dh0PAAAAADiLw8ldYmKiypQpk6Xc19dXR44ccaitVatWqXfv3vr5558VExOjixcvqnnz5jp79qxVZ8CAAfruu+80d+5crVq1SocPH1abNm2s9enp6QoPD1daWprWrl2rGTNmKDo6WsOHD7fqJCQkKDw8XA899JDi4+PVv39/Pf/881q6dKlV54svvlBkZKRGjBihjRs3qmbNmgoLC9OxY8ccOiYAAAAAcAaH57m75557NGLECD333HN25f/5z380YsQI7du376aDOX78uEqVKqVVq1apcePGSk5Olq+vr2bPnq127dpJknbu3KmqVasqLi5ODRo00OLFi9WyZUsdPnxYpUuXliRNnTpVQ4YM0fHjx+Xh4aEhQ4Zo0aJF2rp1q7WvDh06KCkpSUuWLJEk1a9fX3Xr1tWkSZMkSRkZGfL391ffvn01dOjQG8bOPHd5C/PcAQAAwFmclRs4fOWuR48e6t+/v6ZPn64DBw7owIEDmjZtmgYMGKAePXrcUjDJycmSpOLFi0uSNmzYoIsXLyo0NNSqU6VKFZUvX15xcXGSpLi4ONWoUcNK7CQpLCxMKSkp2rZtm1XnyjYy62S2kZaWpg0bNtjVcXNzU2hoqFXnaqmpqUpJSbFbAAAAAMBZcjRa5pUGDRqkEydO6KWXXlJaWpokqUCBAhoyZIiioqJuOpCMjAz1799fDRs2VPXq1SVdvgXUw8NDRYsWtatbunRpJSYmWnWuTOwy12euu16dlJQUnT9/XqdOnVJ6enq2dXbu3JltvGPGjNGoUaNu7mABAAAAIJc5fOXOZrNp7NixOn78uH7++Wf99ttvOnnypN0zbjejd+/e2rp1q+bMmXNL7dwpUVFRSk5OtpZDhw45OyQAAAAA/2AOX7nL5OXlpbp16+ZKEH369NHChQu1evVqlStXzir38/NTWlqakpKS7K7eHT16VH5+fladq0e1zBxN88o6V4+wefToUXl7e6tgwYJyd3eXu7t7tnUy27iap6enPD09b+6AAQAAACCXOXzlLjcZY9SnTx99/fXXWr58uSpWrGi3vnbt2sqfP79iY2Otsl27dungwYMKCQmRJIWEhGjLli12o1rGxMTI29tbgYGBVp0r28isk9mGh4eHateubVcnIyNDsbGxVh0AAAAAyMtu+spdbujdu7dmz56tb775RkWKFLGekfPx8VHBggXl4+Oj7t27KzIyUsWLF5e3t7f69u2rkJAQNWjQQJLUvHlzBQYGqlOnTho3bpwSExM1bNgw9e7d27qy9uKLL2rSpEkaPHiwunXrpuXLl+vLL7/UokWLrFgiIyMVERGhOnXqqF69eho/frzOnj2rrl273vkTAwAAAAAOcngqhFzd+TXG658+fbq6dOki6fIk5gMHDtTnn3+u1NRUhYWFafLkyXa3Sx44cEC9evXSypUrVbhwYUVEROitt95Svnz/y11XrlypAQMGaPv27SpXrpz+9a9/WfvINGnSJL399ttKTExUcHCwJk6cqPr16+foWJgKIW9hKgQAAAA4i7Nygxwld7Vq1VJsbKyKFSum0aNH65VXXlGhQoXuRHx/GyR3eQvJHQAAAJwlT89zt2PHDp09e1aSNGrUKJ05c+a2BgUAAAAAcEyOnrkLDg5W165d9eCDD8oYo3feeUdeXl7Z1r3VKREAAAAAAI7L0W2Zu3bt0ogRI7R3715t3LhRgYGBds+zWY3ZbNq4ceNtCTSv47bMvIXbMgEAAOAsefqZuyu5ubkpMTFRpUqVul0x/S2R3OUtJHcAAABwFmflBg5PhZCRkXE74gAAAAAA3IKbmudu7969Gj9+vHbs2CFJCgwMVL9+/VSpUqVcDQ4AAAAAkDM5Gi3zSkuXLlVgYKDWr1+voKAgBQUFad26dapWrZpiYmJuR4wAAAAAgBtw+Jm7+++/X2FhYXrrrbfsyocOHaoffviBAVV45i5P4Jk7AAAAOEuenufuSjt27FD37t2zlHfr1k3bt2/PlaAAAAAAAI5xOLnz9fVVfHx8lvL4+HhG0AQAAAAAJ3F4QJUePXrohRde0L59+/TAAw9IktasWaOxY8cqMjIy1wMEAAAAANyYw8/cGWM0fvx4vfvuuzp8+LAkqWzZsho0aJBefvll2f6hD3vxzF3ewjN3AAAAcJa/zSTmVzp9+rQkqUiRIrkW0N8VyV3eQnIHAAAAZ/nbTGJ+JZI6AAAAAMgbHB5QBQAAAACQ95DcAQAAAIALILkDAAAAABfgUHJ38eJFNWvWTLt3775d8QAAAAAAboJDyV3+/Pm1efPm2xULAAAAAOAmOXxb5nPPPafPPvvsdsQCAAAAALhJDk+FcOnSJU2bNk3Lli1T7dq1VbhwYbv17733Xq4FBwAAAADIGYeTu61bt6pWrVqSpN9//91une2fPnM2AAAAADiJw8ndihUrbkccAAAAAIBbcNNTIezZs0dLly7V+fPnJUnGmFwLCgAAAADgGIeTuxMnTqhZs2a699571aJFCx05ckSS1L17dw0cODDXAwQAAAAA3JjDyd2AAQOUP39+HTx4UIUKFbLK27dvryVLluRqcAAAAACAnHH4mbsffvhBS5cuVbly5ezK77nnHh04cCDXAgMAAAAA5JzDV+7Onj1rd8Uu08mTJ+Xp6ZkrQQEAAAAAHONwcteoUSPNnDnTem2z2ZSRkaFx48bpoYceytXgAAAAAAA54/BtmePGjVOzZs3066+/Ki0tTYMHD9a2bdt08uRJrVmz5nbECAAAAAC4AYev3FWvXl2///67HnzwQbVq1Upnz55VmzZttGnTJlWqVOl2xAgAAAAAuAGbYYK6XJGSkiIfHx8lJyfL29vb2eHIZnN2BM7FpxoAAADO4qzcwOHbMiXp1KlT+uyzz7Rjxw5JUmBgoLp27arixYvnanAAAAAAgJxx+LbM1atXq0KFCpo4caJOnTqlU6dOaeLEiapYsaJWr159O2IEAAAAANyAw7dl1qhRQyEhIZoyZYrc3d0lSenp6XrppZe0du1abdmy5bYEmtdxW2bewm2ZAAAAcBZn5QYOX7nbs2ePBg4caCV2kuTu7q7IyEjt2bMnV4MDAAAAAOSMw8ldrVq1rGftrrRjxw7VrFkzV4ICAAAAADgmR8nd5s2breXll19Wv3799M477+inn37STz/9pHfeeUcDBgzQgAEDHNr56tWr9fjjj6ts2bKy2WxasGCB3fouXbrIZrPZLY8++qhdnZMnT6pjx47y9vZW0aJF1b17d505cyZL/I0aNVKBAgXk7++vcePGZYll7ty5qlKligoUKKAaNWro+++/d+hYAAAAAMCZcjRaZnBwsGw2m658PG/w4MFZ6j377LNq3759jnd+9uxZ1axZU926dVObNm2yrfPoo49q+vTp1mtPT0+79R07dtSRI0cUExOjixcvqmvXrnrhhRc0e/ZsSZfvd23evLlCQ0M1depUbdmyRd26dVPRokX1wgsvSJLWrl2rZ555RmPGjFHLli01e/ZstW7dWhs3blT16tVzfDwAAAAA4Cw5GlDlwIEDOW4wICDg5gKx2fT111+rdevWVlmXLl2UlJSU5Ypeph07digwMFC//PKL6tSpI0lasmSJWrRooT/++ENly5bVlClT9NprrykxMVEeHh6SpKFDh2rBggXauXOnJKl9+/Y6e/asFi5caLXdoEEDBQcHa+rUqdnuOzU1VampqdbrlJQU+fv7M6BKHsGAKgAAAHCWPD2gSkBAQI6X3LZy5UqVKlVK9913n3r16qUTJ05Y6+Li4lS0aFErsZOk0NBQubm5ad26dVadxo0bW4mdJIWFhWnXrl06deqUVSc0NNRuv2FhYYqLi7tmXGPGjJGPj4+1+Pv758rxAgAAAMDNuKlJzA8fPqyffvpJx44dU0ZGht26l19+OVcCky7fktmmTRtVrFhRe/fu1auvvqrHHntMcXFxcnd3V2JiokqVKmW3Tb58+VS8eHElJiZKkhITE1WxYkW7OqVLl7bWFStWTImJiVbZlXUy28hOVFSUIiMjrdeZV+4AAAAAwBkcTu6io6PVs2dPeXh4qESJErJdcf+fzWbL1eSuQ4cO1r9r1KihoKAgVapUSStXrlSzZs1ybT83w9PTM8vzfwAAAADgLA5PhfCvf/1Lw4cPV3Jysvbv36+EhARr2bdv3+2I0XL33XerZMmS1nx6fn5+OnbsmF2dS5cu6eTJk/Lz87PqHD161K5O5usb1clcDwAAAAB5ncPJ3blz59ShQwe5uTm86S37448/dOLECZUpU0aSFBISoqSkJG3YsMGqs3z5cmVkZKh+/fpWndWrV+vixYtWnZiYGN13330qVqyYVSc2NtZuXzExMQoJCbndhwQAAAAAucLhDK179+6aO3duruz8zJkzio+PV3x8vCQpISFB8fHxOnjwoM6cOaNBgwbp559/1v79+xUbG6tWrVqpcuXKCgsLkyRVrVpVjz76qHr06KH169drzZo16tOnjzp06KCyZctKujw9g4eHh7p3765t27bpiy++0IQJE+yel+vXr5+WLFmid999Vzt37tTIkSP166+/qk+fPrlynAAAAABwu+VoKoQrpaenq2XLljp//rxq1Kih/Pnz261/7733ctzWypUr9dBDD2Upj4iI0JQpU9S6dWtt2rRJSUlJKlu2rJo3b67XX3/dbvCTkydPqk+fPvruu+/k5uamtm3bauLEifLy8rLqbN68Wb1799Yvv/yikiVLqm/fvhoyZIjdPufOnathw4Zp//79uueeezRu3Di1aNEix8firOFOr4WpEJwdAQAAAP6pnJUbOJzcvfHGGxo+fLjuu+8+lS5dOsuAKsuXL8/1IP8OSO7yFpI7AAAAOIuzcgOHR8t89913NW3aNHXp0uU2hAMAAAAAuBkOP3Pn6emphg0b3o5YAAAAAAA3yeHkrl+/fvrggw9uRywAAAAAgJvk8G2Z69ev1/Lly7Vw4UJVq1Yty4AqX331Va4FBwAAAADIGYeTu6JFi6pNmza3IxYAAAAAwE1yOLmbPn367YgDAAAAAHALHH7mDgAAAACQ9zh85a5ixYp2c9tdbd++fbcUEAAAAADAcQ4nd/3797d7ffHiRW3atElLlizRoEGDcisuAAAAAIADHE7u+vXrl235hx9+qF9//fWWAwIAAAAAOC7Xnrl77LHHNH/+/NxqDgAAAADggFxL7ubNm6fixYvnVnMAAAAAAAc4fFvm/fffbzegijFGiYmJOn78uCZPnpyrwQEAAAAAcsbh5K5169Z2r93c3OTr66umTZuqSpUquRUXAAAAAMABNmOMcXYQriAlJUU+Pj5KTk6Wt7e3s8PRdWar+EfgUw0AAABncVZuwCTmAAAAAOACcnxbppub23UnL5ckm82mS5cu3XJQAAAAAADH5Di5+/rrr6+5Li4uThMnTlRGRkauBAUAAAAAcEyOk7tWrVplKdu1a5eGDh2q7777Th07dtTo0aNzNTgAAAAAQM7c1DN3hw8fVo8ePVSjRg1dunRJ8fHxmjFjhgICAnI7PgAAAABADjiU3CUnJ2vIkCGqXLmytm3bptjYWH333XeqXr367YoPAAAAAJADOb4tc9y4cRo7dqz8/Pz0+eefZ3ubJgAAAADAOXI8z52bm5sKFiyo0NBQubu7X7PeV199lWvB/Z0wz13ewjx3AAAAcBZn5QY5vnLXuXPnG06FAAAAAABwjhwnd9HR0bcxDAAAAADArbip0TIBAAAAAHkLyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABTk3uVq9erccff1xly5aVzWbTggUL7NYbYzR8+HCVKVNGBQsWVGhoqHbv3m1X5+TJk+rYsaO8vb1VtGhRde/eXWfOnLGrs3nzZjVq1EgFChSQv7+/xo0blyWWuXPnqkqVKipQoIBq1Kih77//PtePFwAAAABuF6cmd2fPnlXNmjX14YcfZrt+3LhxmjhxoqZOnap169apcOHCCgsL04ULF6w6HTt21LZt2xQTE6OFCxdq9erVeuGFF6z1KSkpat68uQICArRhwwa9/fbbGjlypD7++GOrztq1a/XMM8+oe/fu2rRpk1q3bq3WrVtr69att+/gAQAAACAX2YwxxtlBSJLNZtPXX3+t1q1bS7p81a5s2bIaOHCgXnnlFUlScnKySpcurejoaHXo0EE7duxQYGCgfvnlF9WpU0eStGTJErVo0UJ//PGHypYtqylTpui1115TYmKiPDw8JElDhw7VggULtHPnTklS+/btdfbsWS1cuNCKp0GDBgoODtbUqVNzFH9KSop8fHyUnJwsb2/v3DotN81mc3YEzpU3PtUAAAD4J3JWbpBnn7lLSEhQYmKiQkNDrTIfHx/Vr19fcXFxkqS4uDgVLVrUSuwkKTQ0VG5ublq3bp1Vp3HjxlZiJ0lhYWHatWuXTp06ZdW5cj+ZdTL3k53U1FSlpKTYLQAAAADgLHk2uUtMTJQklS5d2q68dOnS1rrExESVKlXKbn2+fPlUvHhxuzrZtXHlPq5VJ3N9dsaMGSMfHx9r8ff3d/QQAQAAACDX5NnkLq+LiopScnKytRw6dMjZIQEAAAD4B8uzyZ2fn58k6ejRo3blR48etdb5+fnp2LFjdusvXbqkkydP2tXJro0r93GtOpnrs+Pp6Slvb2+7BQAAAACcJc8mdxUrVpSfn59iY2OtspSUFK1bt04hISGSpJCQECUlJWnDhg1WneXLlysjI0P169e36qxevVoXL1606sTExOi+++5TsWLFrDpX7iezTuZ+AAAAACCvc2pyd+bMGcXHxys+Pl7S5UFU4uPjdfDgQdlsNvXv319vvPGGvv32W23ZskWdO3dW2bJlrRE1q1atqkcffVQ9evTQ+vXrtWbNGvXp00cdOnRQ2bJlJUnPPvusPDw81L17d23btk1ffPGFJkyYoMjISCuOfv36acmSJXr33Xe1c+dOjRw5Ur/++qv69Olzp08JAAAAANwUp06FsHLlSj300ENZyiMiIhQdHS1jjEaMGKGPP/5YSUlJevDBBzV58mTde++9Vt2TJ0+qT58++u677+Tm5qa2bdtq4sSJ8vLysups3rxZvXv31i+//KKSJUuqb9++GjJkiN0+586dq2HDhmn//v265557NG7cOLVo0SLHx8JUCHkLUyEAAADAWZyVG+SZee7+7kju8hY+1QAAAHAW5rkDAAAAANw0kjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACAC8jTyd3IkSNls9nslipVqljrL1y4oN69e6tEiRLy8vJS27ZtdfToUbs2Dh48qPDwcBUqVEilSpXSoEGDdOnSJbs6K1euVK1ateTp6anKlSsrOjr6ThweAAAAAOSaPJ3cSVK1atV05MgRa/npp5+sdQMGDNB3332nuXPnatWqVTp8+LDatGljrU9PT1d4eLjS0tK0du1azZgxQ9HR0Ro+fLhVJyEhQeHh4XrooYcUHx+v/v376/nnn9fSpUvv6HECAAAAwK2wGWOMs4O4lpEjR2rBggWKj4/Psi45OVm+vr6aPXu22rVrJ0nauXOnqlatqri4ODVo0ECLFy9Wy5YtdfjwYZUuXVqSNHXqVA0ZMkTHjx+Xh4eHhgwZokWLFmnr1q1W2x06dFBSUpKWLFmS41hTUlLk4+Oj5ORkeXt739qB5wKbzdkROFfe/VQDAADA1TkrN8jzV+52796tsmXL6u6771bHjh118OBBSdKGDRt08eJFhYaGWnWrVKmi8uXLKy4uTpIUFxenGjVqWImdJIWFhSklJUXbtm2z6lzZRmadzDauJTU1VSkpKXYLAAAAADhLnk7u6tevr+joaC1ZskRTpkxRQkKCGjVqpNOnTysxMVEeHh4qWrSo3TalS5dWYmKiJCkxMdEusctcn7nuenVSUlJ0/vz5a8Y2ZswY+fj4WIu/v/+tHi4AAAAA3LR8zg7geh577DHr30FBQapfv74CAgL05ZdfqmDBgk6MTIqKilJkZKT1OiUlhQQPAAAAgNPk6St3VytatKjuvfde7dmzR35+fkpLS1NSUpJdnaNHj8rPz0+S5Ofnl2X0zMzXN6rj7e193QTS09NT3t7edgsAAAAAOMvfKrk7c+aM9u7dqzJlyqh27drKnz+/YmNjrfW7du3SwYMHFRISIkkKCQnRli1bdOzYMatOTEyMvL29FRgYaNW5so3MOpltAAAAAMDfQZ5O7l555RWtWrVK+/fv19q1a/Xkk0/K3d1dzzzzjHx8fNS9e3dFRkZqxYoV2rBhg7p27aqQkBA1aNBAktS8eXMFBgaqU6dO+u2337R06VINGzZMvXv3lqenpyTpxRdf1L59+zR48GDt3LlTkydP1pdffqkBAwY489ABAAAAwCF5+pm7P/74Q88884xOnDghX19fPfjgg/r555/l6+srSXr//ffl5uamtm3bKjU1VWFhYZo8ebK1vbu7uxYuXKhevXopJCREhQsXVkREhEaPHm3VqVixohYtWqQBAwZowoQJKleunD799FOFhYXd8eMFAAAAgJuVp+e5+zthnru8hU81AAAAnIV57gAAAAAANy1P35YJAHkJV8SdHQFcDX3K2REAcDVcuQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJHQAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcAMkdAAAAALgAkjsAAAAAcAEkdwAAAADgAkjuAAAAAMAFkNwBAAAAgAsguQMAAAAAF0ByBwAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AAAAAOACSO4AAAAAwAWQ3AEAAACACyC5AwAAAAAXQHIHAAAAAC6A5A4AAAAAXADJ3VU+/PBDVahQQQUKFFD9+vW1fv16Z4cEAAAAADdEcneFL774QpGRkRoxYoQ2btyomjVrKiwsTMeOHXN2aAAAAABwXSR3V3jvvffUo0cPde3aVYGBgZo6daoKFSqkadOmOTs0AAAAALiufM4OIK9IS0vThg0bFBUVZZW5ubkpNDRUcXFxWeqnpqYqNTXVep2cnCxJSklJuf3B4oZ4G4DcR78Cchd9CreDj4+zI3Cu//+T3OkycwJjzB3dL8nd//vrr7+Unp6u0qVL25WXLl1aO3fuzFJ/zJgxGjVqVJZyf3//2xYjcu6f/sUG3A70KyB30aeA3JfX+tXp06flcweDIrm7SVFRUYqMjLReZ2Rk6OTJkypRooRsNpsTI3O+lJQU+fv769ChQ/L29nZ2OIBLoF8BuYs+BeQ++tX/GGN0+vRplS1b9o7ul+Tu/5UsWVLu7u46evSoXfnRo0fl5+eXpb6np6c8PT3tyooWLXo7Q/zb8fb2/sd3bCC30a+A3EWfAnIf/eqyO3nFLhMDqvw/Dw8P1a5dW7GxsVZZRkaGYmNjFRIS4sTIAAAAAODGuHJ3hcjISEVERKhOnTqqV6+exo8fr7Nnz6pr167ODg0AAAAArovk7grt27fX8ePHNXz4cCUmJio4OFhLlizJMsgKrs/T01MjRozIctsqgJtHvwJyF30KyH30K+ezmTs9PicAAAAAINfxzB0AAAAAuACSOwAAAABwASR3AAAAAOACSO6crEKFCho/fryzw/jb2b9/v2w2m+Lj42/7vniP/l54v24OfQrXw3t2c+hXuBber5tDn8oBAxMREWEkmZ49e2ZZ99JLLxlJJiIiIkdtJSQkGElm06ZNOap/7Ngxc/bs2RzVbdmypQkLC8t23erVq40k89tvv+WorWtZsWKFkWROnTp1S+1c7dy5c6ZYsWKmRIkS5sKFCw5tGxERYVq1amVXdunSJXPkyBFz8eLFXItx+vTpxsfHJ0u5I+9Rbpk0aZIJCAgwnp6epl69embdunV3dP+3ij71P/Qpnyzld7pPrVq1yrRs2dKUKVPGSDJff/31Hdt3bqJf/Q/9yidL+Z3uV//+979NnTp1jJeXl/H19TWtWrUyO3fuvGP7zw30qf+hT/lkKb/TfWry5MmmRo0apkiRIqZIkSKmQYMG5vvvv3e4Ha7c/T9/f3/NmTNH58+ft8ouXLig2bNnq3z58rm+v7S0NEmSr6+vChUqlKNtunfvrpiYGP3xxx9Z1k2fPl116tRRUFBQrsZ5s4wxunTpkvV6/vz5qlatmqpUqaIFCxbccvvu7u7y8/NTvny3fzYPR96j3PDFF18oMjJSI0aM0MaNG1WzZk2FhYXp2LFjdyyG3ECfyl30qZt39uxZ1axZUx9++OEd2+ftQr/KXfSrm7dq1Sr17t1bP//8s2JiYnTx4kU1b95cZ8+evWMx5Ab6VO6iT928cuXK6a233tKGDRv066+/6uGHH1arVq20bds2xxrK5aTzbynzl4Hq1aub//73v1b5rFmzTFBQkGnVqpX1y83ixYtNw4YNjY+PjylevLgJDw83e/bssbaRZLc0adLEbh9vvPGGKVOmjKlQoYIxxpiAgADz/vvvG2Mu/2qSP39+s3r1aqu9sWPHGl9fX5OYmGguXrxoSpcubV5//XW7+E+fPm28vLzMlClTjDHG/Pjjj+bBBx80BQoUMOXKlTN9+/Y1Z86csepfuHDBDB482JQrV854eHiYSpUqmU8//dT61enKJfO4L1y4YPr27Wt8fX2Np6enadiwoVm/fr3VZuYvPt9//72pVauWyZ8/v1mxYoW1vmnTpmbq1KlmypQp5pFHHsnyHmzdutWEh4ebIkWKGC8vL/Pggw+aPXv2mBEjRmSJacWKFXa/kKWnp5u77rrLTJ482a7NjRs3GpvNZvbv32+MMebdd9811atXN4UKFTLlypUzvXr1MqdPn7aL/8plxIgRWd4jY4w5cOCAeeKJJ0zhwoVNkSJFzFNPPWUSExOt9SNGjDA1a9Y0M2fONAEBAcbb29u0b9/epKSkZDnu7NSrV8/07t3bep2enm7Kli1rxowZk6Pt8wL6FH0qL/WpK+lvfuWOfkW/yov9ypjLVzkkmVWrVt3U9s5An6JP5eU+ZYwxxYoVM59++qlD25Dcmf91vPfee880a9bMKm/WrJl5//337Tr3vHnzzPz5883u3bvNpk2bzOOPP25q1Khh0tPTjTHGrF+/3kgyy5YtM0eOHDEnTpyw9uHl5WU6depktm7darZu3WqMyfrBGTRokAkICDBJSUlm48aNxsPDw3zzzTd26ytVqmQyMjKssmnTppmCBQuapKQks2fPHlO4cGHz/vvvm99//92sWbPG3H///aZLly5W/aefftr4+/ubr776yuzdu9csW7bMzJkzx1y6dMnMnz/fSDK7du0yR44cMUlJScYYY15++WVTtmxZ8/3335tt27aZiIgIU6xYMev4MjtHUFCQ+eGHH8yePXusdXv27DGenp7m5MmT5sSJE6ZAgQJWhzPGmD/++MMUL17ctGnTxvzyyy9m165dZtq0aWbnzp3m9OnT5umnnzaPPvqoOXLkiDly5IhJTU3NcvvDK6+8Yh588EG793XgwIF2Ze+//75Zvny5SUhIMLGxsea+++4zvXr1MsYYk5qaasaPH2+8vb2t/WR2/Cvfo/T0dBMcHGwefPBB8+uvv5qff/7Z1K5d2/oSN+Zy5/by8jJt2rQxW7ZsMatXrzZ+fn7m1VdfveZnMFNqaqpxd3fP8sdn586dzRNPPHHD7fMK+hR9Kq/0qau5QnJHv6Jf5bV+ZYwxu3fvNpLMli1bbmp7Z6BP0afyap+6dOmS+fzzz42Hh4fZtm2bQ9uS3Jn/de5jx44ZT09Ps3//frN//35ToEABc/z4cbvOfbXjx4/bfZld657riIgIU7p0aZOammpXfnXnTk1NNcHBwebpp582gYGBpkePHnb1d+zYYf16kalRo0bmueeeM8YY0717d/PCCy/YbfPjjz8aNzc3c/78ebNr1y4jycTExGR7PNndc33mzBmTP39+M2vWLKssLS3NlC1b1owbN85uuwULFmRp89VXXzWtW7e2Xrdq1cr6VcQYY6KiokzFihVNWlpatjFld8/11ed506ZNxmazmQMHDhhjjPVrTuavWdmZO3euKVGihPX6WvdcX/ke/fDDD8bd3d0cPHjQWr9t2zYjyfola8SIEaZQoUJ2v9QMGjTI1K9f/5qxZPrzzz+NJLN27Vq78kGDBpl69erdcPu8gj71P/Qpnyz17mSfuporJHf0K/pVXutX6enpJjw83DRs2NDhbZ2JPvU/9CmfLPWc0ac2b95sChcubNzd3Y2Pj49ZtGhRjrfNxDN3V/D19VV4eLiio6M1ffp0hYeHq2TJknZ1du/erWeeeUZ33323vL29VaFCBUnSwYMHb9h+jRo15OHhcd06Hh4emjVrlubPn68LFy7o/ffft1tfpUoVPfDAA5o2bZokac+ePfrxxx/VvXt3SdJvv/2m6OhoeXl5WUtYWJgyMjKUkJCg+Ph4ubu7q0mTJjk9Ldq7d68uXryohg0bWmX58+dXvXr1tGPHDru6derUsXudnp6uGTNm6LnnnrPKnnvuOUVHRysjI0OSFB8fr0aNGil//vw5julqwcHBqlq1qmbPni3p8rMAx44d01NPPWXVWbZsmZo1a6a77rpLRYoUUadOnXTixAmdO3cux/vZsWOH/P395e/vb5UFBgaqaNGidueiQoUKKlKkiPW6TJkyf7tn5nIDfSp79Kn/oU85jn6VPfrV/9zpftW7d29t3bpVc+bMcXjbvIA+lT361P/cqT513333KT4+XuvWrVOvXr0UERGh7du353h7iakQsujWrZuio6M1Y8YMdevWLcv6xx9/XCdPntQnn3yidevWad26dZL+94Ds9RQuXDhHMaxdu1aSdPLkSZ08eTLL+u7du2v+/Pk6ffq0pk+frkqVKlmd9cyZM+rZs6fi4+Ot5bffftPu3btVqVIlFSxYMEcx3Kyrj3Hp0qX6888/1b59e+XLl0/58uVThw4ddODAAcXGxkpSrsXUsWNHq3PPnj1bjz76qEqUKCHp8tC5LVu2VFBQkObPn68NGzZYgyvk5L1z1NVfVDabzfoyu56SJUvK3d1dR48etSs/evSo/Pz8cjXGO4U+dWvoU5fdbJ9yVfSrW0O/uiw3+lWfPn20cOFCrVixQuXKlcvN8O4o+tStoU9ddqt9ysPDQ5UrV1bt2rU1ZswY1axZUxMmTHAoBpK7qzz66KNKS0vTxYsXFRYWZrfuxIkT2rVrl4YNG6ZmzZqpatWqOnXqlF2dzF9m0tPTb2r/e/fu1YABA/TJJ5+ofv36ioiIyPKhePrpp+Xm5qbZs2dr5syZ6tatm2w2mySpVq1a2r59uypXrpxl8fDwUI0aNZSRkaFVq1Zlu//s4q9UqZI8PDy0Zs0aq+zixYv65ZdfFBgYeN3j+eyzz9ShQwe7L5v4+Hh16NBBn332mSQpKChIP/74oy5evHjNmHJyPp999llt3bpVGzZs0Lx589SxY0dr3YYNG5SRkaF3331XDRo00L333qvDhw87vJ+qVavq0KFDOnTokFW2fft2JSUl3fBc5ISHh4dq165tffFJUkZGhmJjYxUSEnLL7TsDfYo+dT23u0+5KvoV/ep67kS/MsaoT58++vrrr7V8+XJVrFgxV9p1FvoUfep6nPX/qoyMDKWmpjq2kcM3crqgq+/pTU5ONsnJydbrzHuu09PTTYkSJcxzzz1ndu/ebWJjY03dunXtnuG4ePGiKViwoHnjjTdMYmKi9UBqdvcNG2N/P++lS5dMgwYNTNu2bY0xxhw+fNiUKFHCuq/5St27dzfFihUz7u7u5s8//7TKf/vtN1OwYEHTu3dvs2nTJvP777+bBQsW2I2+2KVLF+Pv72++/vprs2/fPrNixQrzxRdfGGMuP9xqs9lMdHS0OXbsmPVQab9+/UzZsmXN4sWL7R6oPXnypDEm+3u1jx07ZvLnz28WL16cJf7vv//eeHp6mhMnTpi//vrLlChRwnqg9vfffzczZ8605st58803Tfny5c3OnTvN8ePHTVpa2jXvbW/YsKGpWbOmKVKkiDl37pxVHh8fbySZ8ePHm71795qZM2eau+66yy7mNWvWWA9DHz9+3Jrb5Mr3KCMjwwQHB5tGjRqZDRs2mHXr1mX7QG3NmjXt4nr//fdNQEBAlvOQnTlz5hhPT08THR1ttm/fbl544QVTtGhRuxGZ8jr6FH3KmLzTp06fPm02bdpkNm3aZCSZ9957z2zatMl6RuPvgn5FvzIm7/SrXr16GR8fH7Ny5UprIIojR47YHU9eR5+iTxmTd/rU0KFDzapVq0xCQoLZvHmzGTp0qLHZbOaHH37I0faZSO7MtTtepisfqI2JiTFVq1Y1np6eJigoyKxcuTLLA/qffPKJ8ff3N25ublmGwr3alR+cUaNGmTJlypi//vrLWj9//nzj4eFh4uPj7bZbu3atkWRatGiRpc3169ebRx55xHh5eZnChQuboKAg8+abb1rrz58/bwYMGGDKlCljPDw8TOXKlc20adOs9aNHjzZ+fn7GZrNZx33+/HnTt29fU7JkyesOhXtl537nnXdM0aJFs31QNjU11RQtWtRMmDDBGHP5S6l58+amUKFCpkiRIqZRo0Zm7969xpjLXxKZx6NshsK90uTJk40k07lz5yz7fO+990yZMmVMwYIFTVhYmJk5c2aWmF988UVTokSJXBkK90qOdG5jjPnggw9M+fLljYeHh6lXr575+eefc7xtXkCfok9lygt9KruhrqWcT06cV9Cv6FeZ8kK/yq5PSTLTp0/P0fZ5AX2KPpUpL/Spbt26mYCAAOPh4WF8fX1Ns2bNHE7sjDHGZowxjl3rAwAAAADkNTxzBwAAAAAugOQOuIMOHjxoN0zx1UtOhlQG8D/0KSD30a+A3HUn+xS3ZQJ30KVLl7R///5rrq9QoYLy5ct35wIC/uboU0Duo18BuetO9imSOwAAAABwAdyWCQAAAAAugOQOAAAAAFwAyR0AAAAAuACSOwAAAABwASR3AADkkqZNm6p///45rr9y5UrZbDYlJSXdtpgAAP8cJHcAAKfr0qWLbDab3nrrLbvyBQsWyGazOdRWhQoVNH78+FyMDgCAvweSOwBAnlCgQAGNHTtWp06dcnYoDktLS3N2CLfk4sWLzg4BAJALSO4AAHlCaGio/Pz8NGbMmOvW++mnn9SoUSMVLFhQ/v7+evnll3X27FlJl2+LPHDggAYMGCCbzSabzSZjjHx9fTVv3jyrjeDgYJUpU8auTU9PT507d06SdPDgQbVq1UpeXl7y9vbW008/raNHj1r1R44cqeDgYH366aeqWLGiChQokG2sixYtko+Pj2bNmpWjc3DixAk988wzuuuuu1SoUCHVqFFDn3/+ubV+5syZKlGihFJTU+22a926tTp16mS9/uabb1SrVi0VKFBAd999t0aNGqVLly5Z6202m6ZMmaInnnhChQsX1ptvvqlTp06pY8eO8vX1VcGCBXXPPfdo+vTpOYobAJA3kNwBAPIEd3d3/fvf/9YHH3ygP/74I9s6e/fu1aOPPqq2bdtq8+bN+uKLL/TTTz+pT58+kqSvvvpK5cqV0+jRo3XkyBEdOXJENptNjRs31sqVKyVJp06d0o4dO3T+/Hnt3LlTkrRq1SrVrVtXhQoVUkZGhlq1aqWTJ09q1apViomJ0b59+9S+fXu7WPbs2aP58+frq6++Unx8fJZYZ8+erWeeeUazZs1Sx44dc3QOLly4oNq1a2vRokXaunWrXnjhBXXq1Enr16+XJD311FNKT0/Xt99+a21z7NgxLVq0SN26dZMk/fjjj+rcubP69eun7du366OPPlJ0dLTefPNNu32NHDlSTz75pLZs2aJu3brpX//6l7Zv367Fixdrx44dmjJlikqWLJmjuAEAeUM+ZwcAAECmJ598UsHBwRoxYoQ+++yzLOvHjBmjjh07WoOW3HPPPZo4caKaNGmiKVOmqHjx4nJ3d1eRIkXk5+dnbde0aVN99NFHkqTVq1fr/vvvl5+fn1auXKkqVapo5cqVatKkiSQpNjZWW7ZsUUJCgvz9/SVdvmJWrVo1/fLLL6pbt66ky7dizpw5U76+vlni/PDDD/Xaa6/pu+++s9rNibvuukuvvPKK9bpv375aunSpvvzyS9WrV08FCxbUs88+q+nTp+upp56SJP33v/9V+fLl1bRpU0nSqFGjNHToUEVEREiS7r77br3++usaPHiwRowYYbX97LPPqmvXrtbrgwcP6v7771edOnUkXX52EQDw98KVOwBAnjJ27FjNmDFDO3bsyLLut99+U3R0tLy8vKwlLCxMGRkZSkhIuGabTZo00fbt23X8+HGtWrVKTZs2VdOmTbVy5UpdvHhRa9eutZKjHTt2yN/f30rsJCkwMFBFixa1iykgICDbxG7evHkaMGCAYmJiHErsJCk9PV2vv/66atSooeLFi8vLy0tLly7VwYMHrTo9evTQDz/8oD///FOSFB0dbQ1Ik3mORo8ebXeOevTooSNHjli3nUqykrhMvXr10pw5cxQcHKzBgwdr7dq1DsUOAHA+kjsAQJ7SuHFjhYWFKSoqKsu6M2fOqGfPnoqPj7eW3377Tbt371alSpWu2WZmsrRq1Sq75G7VqlX65ZdfdPHiRT3wwAMOxVm4cOFsy++//375+vpq2rRpMsY41Obbb7+tCRMmaMiQIVqxYoXi4+MVFhZmN2DL/fffr5o1a2rmzJnasGGDtm3bpi5duljrz5w5o1GjRtmdoy1btmj37t12zwZeHf9jjz1mPa94+PBhNWvWzO4qIgAg7+O2TABAnvPWW28pODhY9913n115rVq1tH37dlWuXPma23p4eCg9Pd2uzGazqVGjRvrmm2+0bds2PfjggypUqJBSU1P10UcfqU6dOlayU7VqVR06dEiHDh2yrt5t375dSUlJCgwMvGHslSpV0rvvvqumTZvK3d1dkyZNyvFxr1mzRq1atdJzzz0nScrIyNDvv/+eZb/PP/+8xo8frz///FOhoaF2Vxlr1aqlXbt2XfccXYuvr68iIiIUERGhRo0aadCgQXrnnXccbgcA4BxcuQMA5Dk1atRQx44dNXHiRLvyIUOGaO3aterTp4/i4+O1e/duffPNN9aAKtLlZ8VWr16tP//8U3/99ZdV3rRpU33++ecKDg6Wl5eX3Nzc1LhxY82aNcvu9snQ0FBr/xs3btT69evVuXNnNWnSJMutjNdy7733asWKFZo/f75Dk5rfc889iomJ0dq1a7Vjxw717NnTbpTOTM8++6z++OMPffLJJ9ZAKpmGDx+umTNnatSoUdq2bZt27NihOXPmaNiwYdfd9/Dhw/XNN99oz5492rZtmxYuXKiqVavmOHYAgPOR3AEA8qTRo0crIyPDriwoKEirVq3S77//rkaNGun+++/X8OHDVbZsWbvt9u/fr0qVKtk9E9ekSROlp6dbz9ZJlxO+q8tsNpu++eYbFStWTI0bN1ZoaKjuvvtuffHFFw7Ff99992n58uX6/PPPNXDgwBxtM2zYMNWqVUthYWFq2rSp/Pz81Lp16yz1fHx81LZtW3l5eWVZHxYWpoULF+qHH35Q3bp11aBBA73//vsKCAi47r49PDwUFRWloKAgNW7cWO7u7pozZ05ODxcAkAfYjKMPBAAAAKdr1qyZqlWrluXqJgDgn4vkDgCAv5FTp05p5cqVateunbZv357luUQAwD8XA6oAAPA3cv/99+vUqVMaO3YsiR0AwA5X7gAAAADABTCgCgAAAAC4AJI7AAAAAHABJHcAAAAA4AJI7gAAAADABZDcAQAAAIALILkDAAAAABdAcgcAAAAALoDkDgAAAABcwP8Binx+rd+7B7sAAAAASUVORK5CYII=", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", @@ -273,7 +188,7 @@ "plt.bar(cycles_dict.keys(), cycles_dict.values(), color ='blue', width = 0.3)\n", "plt.xlabel(\"Network layers\")\n", "plt.ylabel(\"Number of clock cycles\")\n", - "plt.title(\"Estimated no. of clock cycles for each network layer\")\n", + "plt.title(\"Clock cycles per layer PE=SIMD=1\")\n", "plt.show()" ] }, @@ -291,43 +206,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'MatrixVectorActivation_0': {'BRAM_18K': 5,\n", - " 'BRAM_efficiency': 0.8333333333333334,\n", - " 'LUT': 319,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.006944444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0}}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "res_dict = model.analysis(res_estimation)\n", "res_dict" @@ -349,20 +230,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABXyklEQVR4nO3deXwN9/7H8fdJSJDVHiqCaBFbipaUoI0KQrncKlViufRqaC3VVm9bSxelC6Vof621l2prbbX2vSiK1BZrKUpQJPaI5Pv7wyNzHQlyCJPK6/l4nEed73xn5jPnnG+ad2bmexzGGCMAAAAAgG3c7C4AAAAAAHI6ghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGYDbVr9+fdWvX9/uMrLUgQMH5HA4NHHiRLtLyRJ79uxRw4YN5efnJ4fDodmzZ9td0n2lY8eOKlWqVKb6fvDBBypTpozc3d0VGhp6V+uyw/Lly+VwODR9+nS7S8nWOnbsKG9v79tad+DAgXI4HFlcEYDsgmAG3IcmTpwoh8Nxw8cvv/yS6W3t2LFDAwcO1IEDB+5ewbdhzJgx9014upuio6O1detWvfvuu/rqq69Uo0aNDPulBdIPP/zwhtsqVaqUmjZtmuGyX3/91Qq0advKzCO7fa7uloULF+qVV15R7dq1NWHCBL333nt2l5QjTJ06VSNGjLC7DADIlFx2FwDg7hk8eLBKly6drr1s2bKZ3saOHTs0aNAg1a9fP92ZgYULF95pibdtzJgxKlSokDp27GhbDdndxYsXtXbtWv3nP/9Rjx497tl+CxcurK+++sqp7aOPPtLhw4c1fPjwdH1zgqVLl8rNzU3jxo2Th4eH3eXkGFOnTtW2bdvUq1cvu0sBgFsimAH3scaNG9/wDElW4BfM7O3EiROSJH9//3u6Xy8vLz333HNObdOmTdPp06fTtecUx48fV968ebNszBhjdOnSJeXNmzdLtoe77/z58/Ly8rK7jLvqwoULypcvn91lAH9bXMoI5HDTpk1T9erV5ePjI19fX1WuXFmffPKJpKuXRD799NOSpMcff9y6/Gz58uWS0t9jlnaPybfffqtBgwbpgQcekI+Pj/75z38qMTFRSUlJ6tWrl4oUKSJvb2916tRJSUlJTvVMmDBBTzzxhIoUKSJPT0+FhIRo7NixTn1KlSql7du3a8WKFVZN19aRkJCgXr16KTAwUJ6enipbtqyGDh2q1NRUp+0kJCSoY8eO8vPzk7+/v6Kjo5WQkJCp1y3tctHVq1erT58+Kly4sLy8vPSPf/zDCkTXGjNmjCpWrChPT08VL15cMTExmd5XRjZv3qzGjRvL19dX3t7eioiIcLpEdeDAgQoKCpIk9evXTw6HI9P3Qt0rN/vs3UjaZyztM5gmo3sD4+Pj1alTJ5UoUUKenp4qVqyYmjdvnu7yyXnz5ik8PFxeXl7y8fFRVFSUtm/fnm7fs2fPVqVKlZQnTx5VqlRJs2bNytRxOhwOTZgwQefPn7c+r2l1XrlyRW+//baCg4Pl6empUqVK6fXXX083LtIuI12wYIFq1KihvHnz6vPPP7/pftetW6dGjRrJz89P+fLlU7169bR69WqnPn/88YdeeOEFlStXTnnz5lXBggX19NNPZ3iJaUJCgnr37q1SpUrJ09NTJUqUUIcOHfTXX3859UtNTdW7776rEiVKKE+ePIqIiNDevXtv+Tql3T+1d+9edezYUf7+/vLz81OnTp104cKFdP3/+9//qnr16sqbN68KFCigNm3a6NChQ9by+vXr68cff9Qff/xhve6lSpWSMUaFChVSnz59nGr29/eXu7u707gcOnSocuXKpXPnzlltS5cutT4v/v7+at68ueLi4jI8lh07dujZZ59V/vz5VadOnRsee2xsrAoXLqz69es77SszMvMzMzo6WoUKFVJycnK69Rs2bKhy5co5td3qtZWuvr6VKlXSxo0bVbduXeXLl0+vv/66S7UDcMYZM+A+lpiYmO6XJofDoYIFC0qSFi1apLZt2yoiIkJDhw6VJMXFxWn16tV66aWXVLduXb344osaOXKkXn/9dVWoUEGSrP/eyJAhQ5Q3b1699tpr2rt3r0aNGqXcuXPLzc1Np0+f1sCBA/XLL79o4sSJKl26tN566y1r3bFjx6pixYp66qmnlCtXLv3www964YUXlJqaqpiYGEnSiBEj1LNnT3l7e+s///mPJKlo0aKSrv7Ftl69evrzzz/1/PPPq2TJklqzZo369++vo0ePWvebGGPUvHlz/fzzz/r3v/+tChUqaNasWYqOjnbpNe7Zs6fy58+vAQMG6MCBAxoxYoR69Oihb775xuozcOBADRo0SA0aNFD37t21a9cujR07Vhs2bNDq1auVO3dul/a5fft2hYeHy9fXV6+88opy586tzz//XPXr19eKFStUs2ZNtWzZUv7+/urdu7fatm2rJk2a3PaEA3fDrT57WaFVq1bavn27evbsqVKlSun48eNatGiRDh48aIXUr776StHR0YqMjNTQoUN14cIFjR07VnXq1NHmzZutfgsXLlSrVq0UEhKiIUOG6OTJk1bou5WvvvpK//d//6f169fryy+/lCQ99thjkqR//etfmjRpkv75z3+qb9++WrdunYYMGaK4uLh0wW/Xrl1q27atnn/+eXXt2jXdL9PXWrp0qRo3bqzq1atrwIABcnNzs36BX7VqlR599FFJ0oYNG7RmzRq1adNGJUqU0IEDBzR27FjVr19fO3bssM5+nDt3TuHh4YqLi1Pnzp1VrVo1/fXXX/r+++91+PBhFSpUyNr3+++/Lzc3N7388stKTEzUsGHD1K5dO61bty5T71vr1q1VunRpDRkyRJs2bdKXX36pIkWKWJ8TSXr33Xf15ptvqnXr1vrXv/6lEydOaNSoUapbt642b94sf39//ec//1FiYqLTJbTe3t5yOByqXbu2Vq5caW1vy5YtSkxMlJubm1avXq2oqChJ0qpVq/Twww9bY2fx4sVq3LixypQpo4EDB+rixYsaNWqUateurU2bNqX748fTTz+tBx98UO+9956MMRke74YNGxQZGakaNWpozpw5Lp8FzczPzPbt22vy5MlasGCB032i8fHxWrp0qQYMGODSa5vm5MmTaty4sdq0aaPnnnvO+jkM4DYZAPedCRMmGEkZPjw9Pa1+L730kvH19TVXrly54ba+++47I8ksW7Ys3bJ69eqZevXqWc+XLVtmJJlKlSqZy5cvW+1t27Y1DofDNG7c2Gn9sLAwExQU5NR24cKFdPuJjIw0ZcqUcWqrWLGi077TvP3228bLy8vs3r3bqf21114z7u7u5uDBg8YYY2bPnm0kmWHDhll9rly5YsLDw40kM2HChHTbvlbaa9ygQQOTmppqtffu3du4u7ubhIQEY4wxx48fNx4eHqZhw4YmJSXF6vfpp58aSWb8+PE33U9GWrRoYTw8PMy+ffustiNHjhgfHx9Tt25dq23//v1Gkvnggw9uuc3M9A0KCjJRUVEZLtuwYcNNX7eoqCin9zozn72MpH3Grv88ptWftv/Tp0/f8njOnj1r/P39TdeuXZ3a4+PjjZ+fn1N7aGioKVasmPW+GmPMwoULjaR0n+GMREdHGy8vL6e22NhYI8n861//cmp/+eWXjSSzdOlSqy0oKMhIMvPnz7/lvlJTU82DDz5oIiMjnT6bFy5cMKVLlzZPPvmkU9v11q5daySZyZMnW21vvfWWkWRmzpyZ4f6M+d97U6FCBZOUlGQt/+STT4wks3Xr1pvWPWDAACPJdO7c2an9H//4hylYsKD1/MCBA8bd3d28++67Tv22bt1qcuXK5dR+/ecuzQcffGDc3d3NmTNnjDHGjBw50gQFBZlHH33UvPrqq8YYY1JSUoy/v7/p3bu3tV5oaKgpUqSIOXnypNX222+/GTc3N9OhQ4d0x9K2bdt0+772s/Dzzz8bX19fExUVZS5dunTT1+fa7V4rMz8zU1JSTIkSJcwzzzzj1O/jjz82DofD/P7778YY117bevXqGUnms88+u2XdADKHSxmB+9jo0aO1aNEip8e8efOs5f7+/jp//rwWLVqUpfvt0KGD01mgmjVryhijzp07O/WrWbOmDh06pCtXrlht1/61OO2MX7169fT7778rMTHxlvv+7rvvFB4ervz58+uvv/6yHg0aNFBKSor1V/KffvpJuXLlUvfu3a113d3d1bNnT5eOtVu3bk7TV4eHhyslJUV//PGHpKt/Yb98+bJ69eolN7f//cjt2rWrfH199eOPP7q0v5SUFC1cuFAtWrRQmTJlrPZixYrp2Wef1c8//6wzZ864tE073K3PXpq0+7mWL1+u06dPZ9hn0aJFSkhIUNu2bZ0+K+7u7qpZs6aWLVsmSTp69KhiY2MVHR0tPz8/a/0nn3xSISEht13jTz/9JElOl9RJUt++fSUp3WejdOnSioyMvOV2Y2NjtWfPHj377LM6efKkdVznz59XRESEVq5caV3We+14S05O1smTJ1W2bFn5+/tr06ZN1rIZM2aoatWq+sc//pFuf9dP396pUyene+nCw8MlSb///vsta5ekf//7307Pw8PDdfLkSetzPXPmTKWmpqp169ZO71tAQIAefPBB6327mbRxumbNGklXz4yFh4crPDxcq1atkiRt27ZNCQkJVv1pn4OOHTuqQIEC1raqVKmiJ5980no/b3Ys11q2bJkiIyMVERGhmTNnytPT85Z1ZyQzPzPd3NzUrl07ff/99zp79qzVf8qUKXrsscesSaJcfW09PT3VqVOn26obQHpcygjcxx599NGbTv7xwgsv6Ntvv1Xjxo31wAMPqGHDhmrdurUaNWp0R/stWbKk0/O0X2YDAwPTtaempioxMdG6vHL16tUaMGCA1q5dm+6+ksTERKdfjDOyZ88ebdmy5Yaz/R0/flzS1XtrihUrlu7yvptdHpaR6481f/78kmSFgbSAdv12PTw8VKZMGWt5Zp04cUIXLlzIsM4KFSooNTVVhw4dUsWKFV3ablbJ7Hcs3a3PXhpPT08NHTpUffv2VdGiRVWrVi01bdpUHTp0UEBAgKSrnxVJeuKJJzLchq+vr6T/vYcPPvhguj7lypVzCjCu+OOPP+Tm5pZultSAgAD5+/un+2xkNMNqRtKO62aX5SYmJip//vy6ePGihgwZogkTJujPP/90utzu2j+E7Nu3T61atcrU/m81Ju5kfV9fX+3Zs0fGmAzfD0mZujS4WrVqypcvn1atWqXIyEitWrVKgwYNUkBAgEaNGqVLly5ZAS3t3rAbjWXp6thbsGBBugk+bvSeXbp0SVFRUapevbq+/fZb5cp1+7+OZfZnZocOHTR06FDNmjVLHTp00K5du7Rx40Z99tlnVn9XX9sHHniASaCALEQwA3KwIkWKKDY2VgsWLNC8efM0b948TZgwQR06dNCkSZNue7vu7u4utaf9Mrhv3z5FRESofPny+vjjjxUYGCgPDw/99NNPGj58eLrJOzKSmpqqJ598Uq+88kqGyx966KFMHkXm3OqY7id58uTRxYsXM1yW9gthnjx5MrWt2/3s3Sj4paSkpGvr1auXmjVrptmzZ2vBggV68803NWTIEC1dulQPP/yw9Xn66quvrLB2rTv5ZdkVmQ2zmb33KO24Pvjggxt+kXXaHyR69uypCRMmqFevXgoLC7O+iLxNmzaZGm8ZudMxcav1U1NT5XA4NG/evAz7ZuZeyty5c6tmzZpauXKl9u7dq/j4eIWHh6to0aJKTk7WunXrtGrVKpUvX/6OvtLhRu+Zp6enmjRpojlz5mj+/Pk3/H7AW3HlZ2ZISIiqV6+u//73v+rQoYP++9//ysPDQ61bt7b6uPraMisokLUIZkAO5+HhoWbNmqlZs2ZKTU3VCy+8oM8//1xvvvmmypYtm+lfGrPCDz/8oKSkJH3//fdOfzXP6NKkG9UVHBysc+fOqUGDBjfdV1BQkJYsWaJz5845/bKxa9eu26z+xvtJ2+61lx5evnxZ+/fvv2Wd1ytcuLDy5cuXYZ07d+6Um5tbujOTWSUoKEg7duzIcFlaPWnHmxm3+uxlJO3syfUzWt7ozGNwcLD69u2rvn37as+ePQoNDdVHH32k//73vwoODpZ0NSTe7H1IO6a0M1HXupPPS1BQkFJTU7Vnzx6nCXWOHTumhIQEl17La6Udl6+v7y0/X9OnT1d0dLQ++ugjq+3SpUvpXt/g4GBt27btturJasHBwTLGqHTp0rf8Q8vNfn6Fh4dr6NChWrx4sQoVKqTy5cvL4XCoYsWKWrVqlVatWuUUmK4dy9fbuXOnChUqlOnp8B0Oh6ZMmaLmzZvr6aef1rx585xmls0sV35mSlfPmvXp00dHjx7V1KlTFRUVZY0pybXXFkDW4x4zIAc7efKk03M3NzdVqVJFkqzputN+0biTqd0zK+0vtNdfTjVhwoR0fb28vDKsqXXr1lq7dq0WLFiQbllCQoJ1P1uTJk105coVp2mlU1JSNGrUqDs9DCcNGjSQh4eHRo4c6XRc48aNU2JiojX7myQdPHhQO3fuvOn23N3d1bBhQ82ZM8dpSvNjx45p6tSpqlOnjnUJXlZr0qSJDh8+rNmzZzu1JyUlWTPnVatWLVPbysxnLyNBQUFyd3d3mlFPuvp1BNe6cOGCLl265NQWHBwsHx8fa/uRkZHy9fXVe++9l+E04mlfe1CsWDGFhoZq0qRJTpf3LVq06IZBNTOaNGkiSdZMoWk+/vhjSXL6bLiievXqCg4O1ocffpjh1OvXfp2Du7t7ujNZo0aNSncGslWrVvrtt98y/IqAe312uGXLlnJ3d9egQYPS7dsY4/TZ8vLyuuG9qeHh4UpKStKIESNUp04dK8SFh4frq6++0pEjR6z7yyTnz8G1P3u2bdumhQsXWu9nZnl4eGjmzJl65JFH1KxZM61fv96l9SXXfmZKUtu2beVwOPTSSy/p999/T/e9gq68tgCyHmfMgPvYvHnzMvxF/7HHHlOZMmX0r3/9S6dOndITTzyhEiVK6I8//tCoUaMUGhpq/QU/NDRU7u7uGjp0qBITE+Xp6Wl9Z05Wa9iwoXUW5fnnn9e5c+f0xRdfqEiRIjp69KhT3+rVq2vs2LF65513VLZsWRUpUkRPPPGE+vXrp++//15NmzZVx44dVb16dZ0/f15bt27V9OnTdeDAARUqVEjNmjVT7dq19dprr+nAgQMKCQnRzJkzMzXBiCsKFy6s/v37a9CgQWrUqJGeeuop7dq1S2PGjNEjjzzi9ItRhw4dtGLFilv+ovvOO+9o0aJFqlOnjl544QXlypVLn3/+uZKSkjRs2LA7qnfJkiXpAo0ktWjRQt26ddP48eP19NNPq3Pnznr44Yd18uRJffPNN9q2bZsmT56c6ftNMvPZy4ifn5+efvppjRo1Sg6HQ8HBwZo7d65172Ca3bt3KyIiQq1bt1ZISIhy5cqlWbNm6dixY2rTpo2kq2eUxo4dq/bt26tatWpq06aNChcurIMHD+rHH39U7dq19emnn0q6+hUQUVFRqlOnjjp37qxTp05p1KhRqlixosvfO5WmatWqio6O1v/93/8pISFB9erV0/r16zVp0iS1aNFCjz/++G1t183NTV9++aUaN26sihUrqlOnTnrggQf0559/atmyZfL19dUPP/wgSWratKm++uor+fn5KSQkRGvXrtXixYutez7T9OvXT9OnT7fe++rVq+vUqVP6/vvv9dlnn6lq1aq3VevtCA4O1jvvvKP+/fvrwIEDatGihXx8fLR//37NmjVL3bp108svvyzp6s+Jb775Rn369NEjjzwib29vNWvWTJIUFhamXLlyadeuXerWrZu1/bp161p/sLk2mElXLw9t3LixwsLC1KVLF2u6fD8/Pw0cONDlY8mbN6/mzp2rJ554Qo0bN9aKFStUqVKlTK/vys9M6erPo0aNGum7776Tv79/uvDvymsL4C64t5NAArgXbjZdvq6ZUnz69OmmYcOGpkiRIsbDw8OULFnSPP/88+bo0aNO2/viiy9MmTJljLu7u9NU5TeaLv+7777LsJ4NGzY4tadN/XzixAmr7fvvvzdVqlQxefLkMaVKlTJDhw4148ePN5LM/v37rX7x8fEmKirK+Pj4GElOdZw9e9b079/flC1b1nh4eJhChQqZxx57zHz44YdO0/ifPHnStG/f3vj6+ho/Pz/Tvn17s3nzZpemy7/+mG40nfunn35qypcvb3Lnzm2KFi1qunfvbk6fPu3UJ2366czYtGmTiYyMNN7e3iZfvnzm8ccfN2vWrHHqczvT5d/o8dVXXxljrk5D37t3b1O6dGmTO3du4+vrax5//HEzb968m27/+mnLM/vZy8iJEydMq1atTL58+Uz+/PnN888/b7Zt2+b0vv31118mJibGlC9f3nh5eRk/Pz9Ts2ZN8+2336bb3rJly0xkZKTx8/MzefLkMcHBwaZjx47m119/deo3Y8YMU6FCBePp6WlCQkLMzJkzTXR09G1Pl2+MMcnJyWbQoEHW6xkYGGj69++fbur0m31VwY1s3rzZtGzZ0hQsWNB4enqaoKAg07p1a7NkyRKrz+nTp02nTp1MoUKFjLe3t4mMjDQ7d+40QUFBJjo62ml7J0+eND169DAPPPCA8fDwMCVKlDDR0dHmr7/+MsbcePxf/1UGN5LRzwNj/jfWrh3/xlx9P+rUqWO8vLyMl5eXKV++vImJiTG7du2y+pw7d848++yzxt/fP8OvNnjkkUeMJLNu3Tqr7fDhw0aSCQwMzLDOxYsXm9q1a5u8efMaX19f06xZM7Njx45MHYsxGX8W/vrrLxMSEmICAgLMnj17bvkaXSuzPzPTfPvtt0aS6dat2w33k5nXtl69eqZixYo33AYA1zmMuQ/vUAcAAEA6c+bMUYsWLbRy5cp0ZwQB2ItgBgAAkEM0bdpUcXFx2rt37z2d3AnArXGPGQAAwH1u2rRp2rJli3788Ud98sknhDIgG+KMGQAAwH3O4XDI29tbzzzzjD777LN79j19ADKPUQkAAHCf4+/wQPbH95gBAAAAgM0IZgAAAABgMy5llJSamqojR47Ix8eHm2EBAACAHMwYo7Nnz6p48eJyc7t357EIZpKOHDmiwMBAu8sAAAAAkE0cOnRIJUqUuGf7I5hJ8vHxkXT1xff19bW5GgAAAAB2OXPmjAIDA62McK8QzCTr8kVfX1+CGQAAAIB7fosTk38AAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANgsl90FAMDd5hjksLsEW5kBxu4ScB9iXDGukLUYU4wpglk2xMBkYAIAACBn4VJGAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwma3BbOzYsapSpYp8fX3l6+ursLAwzZs3z1p+6dIlxcTEqGDBgvL29larVq107Ngxp20cPHhQUVFRypcvn4oUKaJ+/frpypUr9/pQAAAAAOC22RrMSpQooffff18bN27Ur7/+qieeeELNmzfX9u3bJUm9e/fWDz/8oO+++04rVqzQkSNH1LJlS2v9lJQURUVF6fLly1qzZo0mTZqkiRMn6q233rLrkAAAAADAZQ5jjLG7iGsVKFBAH3zwgf75z3+qcOHCmjp1qv75z39Kknbu3KkKFSpo7dq1qlWrlubNm6emTZvqyJEjKlq0qCTps88+06uvvqoTJ07Iw8MjU/s8c+aM/Pz8lJiYKF9f37t2bJnlGOSwuwRbmQHZ6iOJ+wBjijGFrMe4YlwhazGmss+YsisbZJt7zFJSUjRt2jSdP39eYWFh2rhxo5KTk9WgQQOrT/ny5VWyZEmtXbtWkrR27VpVrlzZCmWSFBkZqTNnzlhn3TKSlJSkM2fOOD0AAAAAwC62B7OtW7fK29tbnp6e+ve//61Zs2YpJCRE8fHx8vDwkL+/v1P/okWLKj4+XpIUHx/vFMrSlqctu5EhQ4bIz8/PegQGBmbtQQEAAACAC2wPZuXKlVNsbKzWrVun7t27Kzo6Wjt27Lir++zfv78SExOtx6FDh+7q/gAAAADgZnLZXYCHh4fKli0rSapevbo2bNigTz75RM8884wuX76shIQEp7Nmx44dU0BAgCQpICBA69evd9pe2qyNaX0y4unpKU9Pzyw+EgAAAAC4PbafMbteamqqkpKSVL16deXOnVtLliyxlu3atUsHDx5UWFiYJCksLExbt27V8ePHrT6LFi2Sr6+vQkJC7nntAAAAAHA7bD1j1r9/fzVu3FglS5bU2bNnNXXqVC1fvlwLFiyQn5+funTpoj59+qhAgQLy9fVVz549FRYWplq1akmSGjZsqJCQELVv317Dhg1TfHy83njjDcXExHBGDAAAAMDfhq3B7Pjx4+rQoYOOHj0qPz8/ValSRQsWLNCTTz4pSRo+fLjc3NzUqlUrJSUlKTIyUmPGjLHWd3d319y5c9W9e3eFhYXJy8tL0dHRGjx4sF2HBAAAAAAuszWYjRs37qbL8+TJo9GjR2v06NE37BMUFKSffvopq0sDAAAAgHsm291jBgAAAAA5DcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbGZrMBsyZIgeeeQR+fj4qEiRImrRooV27drl1Kd+/fpyOBxOj3//+99OfQ4ePKioqCjly5dPRYoUUb9+/XTlypV7eSgAAAAAcNty2bnzFStWKCYmRo888oiuXLmi119/XQ0bNtSOHTvk5eVl9evatasGDx5sPc+XL5/175SUFEVFRSkgIEBr1qzR0aNH1aFDB+XOnVvvvffePT0eAAAAALgdtgaz+fPnOz2fOHGiihQpoo0bN6pu3bpWe758+RQQEJDhNhYuXKgdO3Zo8eLFKlq0qEJDQ/X222/r1Vdf1cCBA+Xh4XFXjwEAAAAA7lS2uscsMTFRklSgQAGn9ilTpqhQoUKqVKmS+vfvrwsXLljL1q5dq8qVK6to0aJWW2RkpM6cOaPt27dnuJ+kpCSdOXPG6QEAAAAAdrH1jNm1UlNT1atXL9WuXVuVKlWy2p999lkFBQWpePHi2rJli1599VXt2rVLM2fOlCTFx8c7hTJJ1vP4+PgM9zVkyBANGjToLh0JAAAAALgm2wSzmJgYbdu2TT///LNTe7du3ax/V65cWcWKFVNERIT27dun4ODg29pX//791adPH+v5mTNnFBgYeHuFAwAAAMAdyhaXMvbo0UNz587VsmXLVKJEiZv2rVmzpiRp7969kqSAgAAdO3bMqU/a8xvdl+bp6SlfX1+nBwAAAADYxdZgZoxRjx49NGvWLC1dulSlS5e+5TqxsbGSpGLFikmSwsLCtHXrVh0/ftzqs2jRIvn6+iokJOSu1A0AAAAAWcnWSxljYmI0depUzZkzRz4+PtY9YX5+fsqbN6/27dunqVOnqkmTJipYsKC2bNmi3r17q27duqpSpYokqWHDhgoJCVH79u01bNgwxcfH64033lBMTIw8PT3tPDwAAAAAyBRbz5iNHTtWiYmJql+/vooVK2Y9vvnmG0mSh4eHFi9erIYNG6p8+fLq27evWrVqpR9++MHahru7u+bOnSt3d3eFhYXpueeeU4cOHZy+9wwAAAAAsjNbz5gZY266PDAwUCtWrLjldoKCgvTTTz9lVVkAAAAAcE9li8k/AAAAACAnI5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzl4PZ/Pnz9fPPP1vPR48erdDQUD377LM6ffp0lhYHAAAAADmBy8GsX79+OnPmjCRp69at6tu3r5o0aaL9+/erT58+WV4gAAAAANzvcrm6wv79+xUSEiJJmjFjhpo2bar33ntPmzZtUpMmTbK8QAAAAAC437l8xszDw0MXLlyQJC1evFgNGzaUJBUoUMA6kwYAAAAAyLxMB7POnTvr7Nmzql27tvr06aO3335b69evV1RUlCRp9+7dKlGixF0rFAAAAADuV5kOZpMmTdLFixc1evRo5cqVS9OnT9fYsWP1wAMPSJLmzZunRo0a3bVCAQAAAOB+lel7zIwxkqSSJUtq7ty56ZYPHz4866oCAAAAgBzEpck/zp49qzx58ty0j6+v7x0VBAAAAAA5jUvB7KGHHrrhMmOMHA6HUlJS7rgoAAAAAMhJXApm06dPV4ECBe5WLQAAAACQI7kUzGrXrq0iRYrcrVoAAAAAIEdy+XvMAAAAAABZK9PBLCgoSO7u7lm68yFDhuiRRx6Rj4+PihQpohYtWmjXrl1OfS5duqSYmBgVLFhQ3t7eatWqlY4dO+bU5+DBg4qKilK+fPlUpEgR9evXT1euXMnSWgEAAADgbsl0MNu/f78KFiyYpTtfsWKFYmJi9Msvv2jRokVKTk5Ww4YNdf78eatP79699cMPP+i7777TihUrdOTIEbVs2dJanpKSoqioKF2+fFlr1qzRpEmTNHHiRL311ltZWisAAAAA3C2Zvscsf/78cjgc6dr9/Pz00EMP6eWXX9aTTz7p0s7nz5/v9HzixIkqUqSINm7cqLp16yoxMVHjxo3T1KlT9cQTT0iSJkyYoAoVKuiXX35RrVq1tHDhQu3YsUOLFy9W0aJFFRoaqrfffluvvvqqBg4cKA8Pj3T7TUpKUlJSkvX8zJkzLtUNAAAAAFkp08Fs+PDhGQazhIQEbdy4UU2bNtX06dPVrFmz2y4mMTFRkqyZHzdu3Kjk5GQ1aNDA6lO+fHmVLFlSa9euVa1atbR27VpVrlxZRYsWtfpERkaqe/fu2r59ux5++OF0+xkyZIgGDRp023UCAAAAQFbKdDDr2LHjTZeHhoZqyJAhtx3MUlNT1atXL9WuXVuVKlWSJMXHx8vDw0P+/v5OfYsWLar4+Hirz7WhLG152rKM9O/fX3369LGenzlzRoGBgbdVNwAAAADcqSyblbFp06bauXPnba8fExOjbdu2adq0aVlV0g15enrK19fX6QEAAAAAdsmyYJaUlJTh/VyZ0aNHD82dO1fLli1TiRIlrPaAgABdvnxZCQkJTv2PHTumgIAAq8/1szSmPU/rAwAAAADZWZYFs3Hjxik0NNSldYwx6tGjh2bNmqWlS5eqdOnSTsurV6+u3Llza8mSJVbbrl27dPDgQYWFhUmSwsLCtHXrVh0/ftzqs2jRIvn6+iokJOT2DwgAAAAA7pFM32N27T1Z10pMTNSmTZu0e/durVy50qWdx8TEaOrUqZozZ458fHyse8L8/PyUN29e+fn5qUuXLurTp48KFCggX19f9ezZU2FhYapVq5YkqWHDhgoJCVH79u01bNgwxcfH64033lBMTIw8PT1dqgcAAAAA7JDpYLZ58+YM2319ffXkk09q5syZ6c543crYsWMlSfXr13dqnzBhgjXZyPDhw+Xm5qZWrVopKSlJkZGRGjNmjNXX3d1dc+fOVffu3RUWFiYvLy9FR0dr8ODBLtUCAAAAAHbJdDBbtmzZTZcfPnxY3bp10//93/9leufGmFv2yZMnj0aPHq3Ro0ffsE9QUJB++umnTO8XAAAAALKTLLvH7OTJkxo3blxWbQ4AAAAAcowsC2YAAAAAgNtDMAMAAAAAmxHMAAAAAMBmmZ78o2XLljddfv2XQAMAAAAAMifTwczPz++Wyzt06HDHBQEAAABATpPpYDZhwoS7WQcAAAAA5FjcYwYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYLFPBrFq1ajp9+rQkafDgwbpw4cJdLQoAAAAAcpJMBbO4uDidP39ekjRo0CCdO3furhYFAAAAADlJpqbLDw0NVadOnVSnTh0ZY/Thhx/K29s7w75vvfVWlhYIAAAAAPe7TAWziRMnasCAAZo7d64cDofmzZunXLnSr+pwOAhmAAAAAOCiTAWzcuXKadq0aZIkNzc3LVmyREWKFLmrhQEAAABATpGpYHat1NTUu1EHAAAAAORYLgczSdq3b59GjBihuLg4SVJISIheeuklBQcHZ2lxAAAAAJATuPw9ZgsWLFBISIjWr1+vKlWqqEqVKlq3bp0qVqyoRYsW3Y0aAQAAAOC+5vIZs9dee029e/fW+++/n6791Vdf1ZNPPpllxQEAAABATuDyGbO4uDh16dIlXXvnzp21Y8eOLCkKAAAAAHISl4NZ4cKFFRsbm649NjaWmRoBAAAA4Da4fClj165d1a1bN/3+++967LHHJEmrV6/W0KFD1adPnywvEAAAAADudy4HszfffFM+Pj766KOP1L9/f0lS8eLFNXDgQL344otZXiAAAAAA3O9cDmYOh0O9e/dW7969dfbsWUmSj49PlhcGAAAAADnFbX2PWRoCGQAAAADcOZcn/wAAAAAAZC2CGQAAAADYjGAGAAAAADZzKZglJycrIiJCe/bsuVv1AAAAAECO41Iwy507t7Zs2XK3agEAAACAHMnlSxmfe+45jRs37m7UAgAAAAA5ksvT5V+5ckXjx4/X4sWLVb16dXl5eTkt//jjj7OsOAAAAADICVwOZtu2bVO1atUkSbt373Za5nA4sqYqAAAAAMhBXA5my5Ytuxt1AAAAAECOddvT5e/du1cLFizQxYsXJUnGmCwrCgAAAAByEpeD2cmTJxUREaGHHnpITZo00dGjRyVJXbp0Ud++fbO8QAAAAAC437kczHr37q3cuXPr4MGDypcvn9X+zDPPaP78+VlaHAAAAADkBC7fY7Zw4UItWLBAJUqUcGp/8MEH9ccff2RZYQAAAACQU7h8xuz8+fNOZ8rSnDp1Sp6enllSFAAAAADkJC4Hs/DwcE2ePNl67nA4lJqaqmHDhunxxx/P0uIAAAAAICdw+VLGYcOGKSIiQr/++qsuX76sV155Rdu3b9epU6e0evXqu1EjAAAAANzXXD5jVqlSJe3evVt16tRR8+bNdf78ebVs2VKbN29WcHDw3agRAAAAAO5rLp8xkyQ/Pz/95z//yepaAAAAACBHuq1gdvr0aY0bN05xcXGSpJCQEHXq1EkFChTI0uIAAAAAICdw+VLGlStXqlSpUho5cqROnz6t06dPa+TIkSpdurRWrlx5N2oEAAAAgPuay8EsJiZGzzzzjPbv36+ZM2dq5syZ+v3339WmTRvFxMS4tK2VK1eqWbNmKl68uBwOh2bPnu20vGPHjnI4HE6PRo0aOfU5deqU2rVrJ19fX/n7+6tLly46d+6cq4cFAAAAALZxOZjt3btXffv2lbu7u9Xm7u6uPn36aO/evS5t6/z586patapGjx59wz6NGjXS0aNHrcfXX3/ttLxdu3bavn27Fi1apLlz52rlypXq1q2bawcFAAAAADZy+R6zatWqKS4uTuXKlXNqj4uLU9WqVV3aVuPGjdW4ceOb9vH09FRAQECGy+Li4jR//nxt2LBBNWrUkCSNGjVKTZo00YcffqjixYu7VA8AAAAA2CFTwWzLli3Wv1988UW99NJL2rt3r2rVqiVJ+uWXXzR69Gi9//77WV7g8uXLVaRIEeXPn19PPPGE3nnnHRUsWFCStHbtWvn7+1uhTJIaNGggNzc3rVu3Tv/4xz8y3GZSUpKSkpKs52fOnMnyugEAAAAgszIVzEJDQ+VwOGSMsdpeeeWVdP2effZZPfPMM1lWXKNGjdSyZUuVLl1a+/bt0+uvv67GjRtr7dq1cnd3V3x8vIoUKeK0Tq5cuVSgQAHFx8ffcLtDhgzRoEGDsqxOAAAAALgTmQpm+/fvv9t1ZKhNmzbWvytXrqwqVaooODhYy5cvV0RExG1vt3///urTp4/1/MyZMwoMDLyjWgEAAADgdmUqmAUFBd3tOjKlTJkyKlSokPbu3auIiAgFBATo+PHjTn2uXLmiU6dO3fC+NOnqfWuenp53u1wAAAAAyJTb+oLpI0eO6Oeff9bx48eVmprqtOzFF1/MksIycvjwYZ08eVLFihWTJIWFhSkhIUEbN25U9erVJUlLly5VamqqatasedfqAAAAAICs5HIwmzhxop5//nl5eHioYMGCcjgc1jKHw+FSMDt37pzTFPv79+9XbGysChQooAIFCmjQoEFq1aqVAgICtG/fPr3yyisqW7asIiMjJUkVKlRQo0aN1LVrV3322WdKTk5Wjx491KZNG2ZkBAAAAPC34XIwe/PNN/XWW2+pf//+cnNz+WvQnPz66696/PHHredp931FR0dr7Nix2rJliyZNmqSEhAQVL15cDRs21Ntvv+10GeKUKVPUo0cPRUREyM3NTa1atdLIkSPvqC4AAAAAuJdcDmYXLlxQmzZt7jiUSVL9+vWdZnq83oIFC265jQIFCmjq1Kl3XAsAAAAA2MXldNWlSxd99913d6MWAAAAAMiRXD5jNmTIEDVt2lTz589X5cqVlTt3bqflH3/8cZYVBwAAAAA5wW0FswULFqhcuXKSlG7yDwAAAACAa1wOZh999JHGjx+vjh073oVyAAAAACDncfkeM09PT9WuXftu1AIAAAAAOZLLweyll17SqFGj7kYtAAAAAJAjuXwp4/r167V06VLNnTtXFStWTDf5x8yZM7OsOAAAAADICVwOZv7+/mrZsuXdqAUAAAAAciSXg9mECRPuRh0AAAAAkGO5fI8ZAAAAACBruXzGrHTp0jf9vrLff//9jgoCAAAAgJzG5WDWq1cvp+fJycnavHmz5s+fr379+mVVXQAAAACQY7gczF566aUM20ePHq1ff/31jgsCAAAAgJwmy+4xa9y4sWbMmJFVmwMAAACAHCPLgtn06dNVoECBrNocAAAAAOQYLl/K+PDDDztN/mGMUXx8vE6cOKExY8ZkaXEAAAAAkBO4HMxatGjh9NzNzU2FCxdW/fr1Vb58+ayqCwAAAAByDJeD2YABA+5GHQAAAACQY/EF0wAAAABgs0yfMXNzc7vpF0tLksPh0JUrV+64KAAAAADISTIdzGbNmnXDZWvXrtXIkSOVmpqaJUUBAAAAQE6S6WDWvHnzdG27du3Sa6+9ph9++EHt2rXT4MGDs7Q4AAAAAMgJbusesyNHjqhr166qXLmyrly5otjYWE2aNElBQUFZXR8AAAAA3PdcCmaJiYl69dVXVbZsWW3fvl1LlizRDz/8oEqVKt2t+gAAAADgvpfpSxmHDRumoUOHKiAgQF9//XWGlzYCAAAAAFyX6WD22muvKW/evCpbtqwmTZqkSZMmZdhv5syZWVYcAAAAAOQEmQ5mHTp0uOV0+QAAAAAA12U6mE2cOPEulgEAAAAAOddtzcoIAAAAAMg6BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALCZrcFs5cqVatasmYoXLy6Hw6HZs2c7LTfG6K233lKxYsWUN29eNWjQQHv27HHqc+rUKbVr106+vr7y9/dXly5ddO7cuXt4FAAAAABwZ2wNZufPn1fVqlU1evToDJcPGzZMI0eO1GeffaZ169bJy8tLkZGRunTpktWnXbt22r59uxYtWqS5c+dq5cqV6tat2706BAAAAAC4Y7ns3Hnjxo3VuHHjDJcZYzRixAi98cYbat68uSRp8uTJKlq0qGbPnq02bdooLi5O8+fP14YNG1SjRg1J0qhRo9SkSRN9+OGHKl68+D07FgAAAAC4Xdn2HrP9+/crPj5eDRo0sNr8/PxUs2ZNrV27VpK0du1a+fv7W6FMkho0aCA3NzetW7fuhttOSkrSmTNnnB4AAAAAYJdsG8zi4+MlSUWLFnVqL1q0qLUsPj5eRYoUcVqeK1cuFShQwOqTkSFDhsjPz896BAYGZnH1AAAAAJB52TaY3U39+/dXYmKi9Th06JDdJQEAAADIwbJtMAsICJAkHTt2zKn92LFj1rKAgAAdP37cafmVK1d06tQpq09GPD095evr6/QAAAAAALtk22BWunRpBQQEaMmSJVbbmTNntG7dOoWFhUmSwsLClJCQoI0bN1p9li5dqtTUVNWsWfOe1wwAAAAAt8PWWRnPnTunvXv3Ws/379+v2NhYFShQQCVLllSvXr30zjvv6MEHH1Tp0qX15ptvqnjx4mrRooUkqUKFCmrUqJG6du2qzz77TMnJyerRo4fatGnDjIwAAAAA/jZsDWa//vqrHn/8cet5nz59JEnR0dGaOHGiXnnlFZ0/f17dunVTQkKC6tSpo/nz5ytPnjzWOlOmTFGPHj0UEREhNzc3tWrVSiNHjrznxwIAAAAAt8vWYFa/fn0ZY2643OFwaPDgwRo8ePAN+xQoUEBTp069G+UBAAAAwD2Rbe8xAwAAAICcgmAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNsnUwGzhwoBwOh9OjfPny1vJLly4pJiZGBQsWlLe3t1q1aqVjx47ZWDEAAAAAuC5bBzNJqlixoo4ePWo9fv75Z2tZ79699cMPP+i7777TihUrdOTIEbVs2dLGagEAAADAdbnsLuBWcuXKpYCAgHTtiYmJGjdunKZOnaonnnhCkjRhwgRVqFBBv/zyi2rVqnXDbSYlJSkpKcl6fubMmawvHAAAAAAyKdufMduzZ4+KFy+uMmXKqF27djp48KAkaePGjUpOTlaDBg2svuXLl1fJkiW1du3am25zyJAh8vPzsx6BgYF39RgAAAAA4GaydTCrWbOmJk6cqPnz52vs2LHav3+/wsPDdfbsWcXHx8vDw0P+/v5O6xQtWlTx8fE33W7//v2VmJhoPQ4dOnQXjwIAAAAAbi5bX8rYuHFj699VqlRRzZo1FRQUpG+//VZ58+a97e16enrK09MzK0oEAAAAgDuWrc+YXc/f318PPfSQ9u7dq4CAAF2+fFkJCQlOfY4dO5bhPWkAAAAAkF39rYLZuXPntG/fPhUrVkzVq1dX7ty5tWTJEmv5rl27dPDgQYWFhdlYJQAAAAC4Jltfyvjyyy+rWbNmCgoK0pEjRzRgwAC5u7urbdu28vPzU5cuXdSnTx8VKFBAvr6+6tmzp8LCwm46IyMAAAAAZDfZOpgdPnxYbdu21cmTJ1W4cGHVqVNHv/zyiwoXLixJGj58uNzc3NSqVSslJSUpMjJSY8aMsblqAAAAAHBNtg5m06ZNu+nyPHnyaPTo0Ro9evQ9qggAAAAAst7f6h4zAAAAALgfEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZvdNMBs9erRKlSqlPHnyqGbNmlq/fr3dJQEAAABAptwXweybb75Rnz59NGDAAG3atElVq1ZVZGSkjh8/bndpAAAAAHBL90Uw+/jjj9W1a1d16tRJISEh+uyzz5QvXz6NHz/e7tIAAAAA4JZy2V3Anbp8+bI2btyo/v37W21ubm5q0KCB1q5dm+E6SUlJSkpKsp4nJiZKks6cOXN3i82sS3YXYK9s8z7g/sGYsrsE3I8YV3aXgPsNY8ruEixptRhj7ul+//bB7K+//lJKSoqKFi3q1F60aFHt3Lkzw3WGDBmiQYMGpWsPDAy8KzXCNX7v+9ldAnBfYUwBWY9xBWSt7Dimzp49Kz+/e1fX3z6Y3Y7+/furT58+1vPU1FSdOnVKBQsWlMPhsLEy+505c0aBgYE6dOiQfH197S4H+NtjTAFZj3EFZC3GlDNjjM6ePavixYvf0/3+7YNZoUKF5O7urmPHjjm1Hzt2TAEBARmu4+npKU9PT6c2f3//u1Xi35Kvry8DE8hCjCkg6zGugKzFmPqfe3mmLM3ffvIPDw8PVa9eXUuWLLHaUlNTtWTJEoWFhdlYGQAAAABkzt/+jJkk9enTR9HR0apRo4YeffRRjRgxQufPn1enTp3sLg0AAAAAbum+CGbPPPOMTpw4obfeekvx8fEKDQ3V/Pnz000Iglvz9PTUgAED0l3qCeD2MKaArMe4ArIWYyp7cJh7PQ8kAAAAAMDJ3/4eMwAAAAD4uyOYAQAAAIDNCGYAAAAAYDOC2W0qVaqURowYYXcZfzsHDhyQw+FQbGzsXd8X79HfD+/Z7WFc4UZ4v24PYwo3w3t2exhXmWD+xqKjo40k8/zzz6db9sILLxhJJjo6OlPb2r9/v5FkNm/enKn+x48fN+fPn89U36ZNm5rIyMgMl61cudJIMr/99lumtnUjy5YtM5LM6dOn72g717tw4YLJnz+/KViwoLl06ZJL60ZHR5vmzZs7tV25csUcPXrUJCcnZ1mNEyZMMH5+funaXXmPssqnn35qgoKCjKenp3n00UfNunXr7un+swLj6n8YV37p2u/1uFqxYoVp2rSpKVasmJFkZs2adc/2nVUYU//DmPJL136vx9R7771natSoYby9vU3hwoVN8+bNzc6dO+/Z/rMK4+p/GFd+6drv9bgaM2aMqVy5svHx8TE+Pj6mVq1a5qeffnJ5O3/7M2aBgYGaNm2aLl68aLVdunRJU6dOVcmSJbN8f5cvX5YkFS5cWPny5cvUOl26dNGiRYt0+PDhdMsmTJigGjVqqEqVKlla5+0yxujKlSvW8xkzZqhixYoqX768Zs+efcfbd3d3V0BAgHLluvvf1ODKe5QVvvnmG/Xp00cDBgzQpk2bVLVqVUVGRur48eP3rIaswrjKWoyr23f+/HlVrVpVo0ePvmf7vBsYU1mLMXX7VqxYoZiYGP3yyy9atGiRkpOT1bBhQ50/f/6e1ZBVGFdZi3F1+0qUKKH3339fGzdu1K+//qonnnhCzZs31/bt213bUBYHxnsqLY1XqlTJ/Pe//7Xap0yZYqpUqWKaN29u/bVk3rx5pnbt2sbPz88UKFDAREVFmb1791rrSHJ61KtXz2kf77zzjilWrJgpVaqUMcaYoKAgM3z4cGPM1b9U5M6d26xcudLa3tChQ03hwoVNfHy8SU5ONkWLFjVvv/22U/1nz5413t7eZuzYscYYY1atWmXq1Klj8uTJY0qUKGF69uxpzp07Z/W/dOmSeeWVV0yJEiWMh4eHCQ4ONl9++aX1l55rH2nHfenSJdOzZ09TuHBh4+npaWrXrm3Wr19vbTPtryw//fSTqVatmsmdO7dZtmyZtbx+/frms88+M2PHjjVPPvlkuvdg27ZtJioqyvj4+Bhvb29Tp04ds3fvXjNgwIB0NS1btszpr1IpKSnmgQceMGPGjHHa5qZNm4zD4TAHDhwwxhjz0UcfmUqVKpl8+fKZEiVKmO7du5uzZ8861X/tY8CAAeneI2OM+eOPP8xTTz1lvLy8jI+Pj3n66adNfHy8tXzAgAGmatWqZvLkySYoKMj4+vqaZ555xpw5cybdcWfk0UcfNTExMdbzlJQUU7x4cTNkyJBMrZ9dMK4YV9lpXF1Lf+MzZowpxlR2HFPGXD2zIMmsWLHitta3C+OKcZWdx5UxxuTPn998+eWXLq1zXwSzjz/+2ERERFjtERERZvjw4U6Dcvr06WbGjBlmz549ZvPmzaZZs2amcuXKJiUlxRhjzPr1640ks3jxYnP06FFz8uRJax/e3t6mffv2Ztu2bWbbtm3GmPRveL9+/UxQUJBJSEgwmzZtMh4eHmbOnDlOy4ODg01qaqrVNn78eJM3b16TkJBg9u7da7y8vMzw4cPN7t27zerVq83DDz9sOnbsaPVv3bq1CQwMNDNnzjT79u0zixcvNtOmTTNXrlwxM2bMMJLMrl27zNGjR01CQoIxxpgXX3zRFC9e3Pz0009m+/btJjo62uTPn986vrQPdZUqVczChQvN3r17rWV79+41np6e5tSpU+bkyZMmT5481kAxxpjDhw+bAgUKmJYtW5oNGzaYXbt2mfHjx5udO3eas2fPmtatW5tGjRqZo0ePmqNHj5qkpKR0lwu8/PLLpk6dOk7va9++fZ3ahg8fbpYuXWr2799vlixZYsqVK2e6d+9ujDEmKSnJjBgxwvj6+lr7SRuw175HKSkpJjQ01NSpU8f8+uuv5pdffjHVq1e3fvgac3VQent7m5YtW5qtW7ealStXmoCAAPP666/f8DOYJikpybi7u6f7pbFDhw7mqaeeuuX62QnjinGVXcbV9f7uwYwxxZjKbmPKGGP27NljJJmtW7fe1vp2YVwxrrLruLpy5Yr5+uuvjYeHh9m+fbtL694Xwez48ePG09PTHDhwwBw4cMDkyZPHnDhxwmlQXu/EiRNOP4hudH1xdHS0KVq0qElKSnJqv35QJiUlmdDQUNO6dWsTEhJiunbt6tQ/Li7O+otBmvDwcPPcc88ZY4zp0qWL6datm9M6q1atMm5ububixYtm165dRpJZtGhRhseT0fXF586dM7lz5zZTpkyx2i5fvmyKFy9uhg0b5rTe7Nmz023z9ddfNy1atLCeN2/e3PpLhDHG9O/f35QuXdpcvnw5w5oyur74+td58+bNxuFwmD/++MMYY6y/oKT9BSkj3333nSlYsKD1/EbXF1/7Hi1cuNC4u7ubgwcPWsu3b99uJFl/PRowYIDJly+f019H+vXrZ2rWrHnDWtL8+eefRpJZs2aNU3u/fv3Mo48+esv1sxPG1f8wrvzS9buX4+p6f/dgxphiTGW3MZWSkmKioqJM7dq1XV7Xboyr/2Fc+aXrZ8e42rJli/Hy8jLu7u7Gz8/P/Pjjj5leN83f/h4z6ep1pFFRUZo4caImTJigqKgoFSpUyKnPnj171LZtW5UpU0a+vr4qVaqUJOngwYO33H7lypXl4eFx0z4eHh6aMmWKZsyYoUuXLmn48OFOy8uXL6/HHntM48ePlyTt3btXq1atUpcuXSRJv/32myZOnChvb2/rERkZqdTUVO3fv1+xsbFyd3dXvXr1MvuyaN++fUpOTlbt2rWttty5c+vRRx9VXFycU98aNWo4PU9JSdGkSZP03HPPWW3PPfecJk6cqNTUVElSbGyswsPDlTt37kzXdL3Q0FBVqFBBU6dOlXT12vfjx4/r6aeftvosXrxYEREReuCBB+Tj46P27dvr5MmTunDhQqb3ExcXp8DAQAUGBlptISEh8vf3d3otSpUqJR8fH+t5sWLF/pb3iGUFxlXGGFf/w7hyDWMqY4yp/7nXYyomJkbbtm3TtGnTXF43u2BcZYxx9T/3alyVK1dOsbGxWrdunbp3767o6Gjt2LEj0+tL99F0+Z07d9bEiRM1adIkde7cOd3yZs2a6dSpU/riiy+0bt06rVu3TtL/buS8GS8vr0zVsGbNGknSqVOndOrUqXTLu3TpohkzZujs2bOaMGGCgoODrUF27tw5Pf/884qNjbUev/32m/bs2aPg4GDlzZs3UzXcruuPccGCBfrzzz/1zDPPKFeuXMqVK5fatGmjP/74Q0uWLJGkLKupXbt21qCcOnWqGjVqpIIFC0q6OrVq06ZNVaVKFc2YMUMbN260JgHIzHvnqut/wDgcDuuH0M0UKlRI7u7uOnbsmFP7sWPHFBAQkKU13kuMqzvDuLrqdsfV/YgxdWcYU1dlxZjq0aOH5s6dq2XLlqlEiRJZWd49x7i6M4yrq+50XHl4eKhs2bKqXr26hgwZoqpVq+qTTz5xqYb7Jpg1atRIly9fVnJysiIjI52WnTx5Urt27dIbb7yhiIgIVahQQadPn3bqk/bXkJSUlNva/759+9S7d2998cUXqlmzpqKjo9O9ma1bt5abm5umTp2qyZMnq3PnznI4HJKkatWqaceOHSpbtmy6h4eHhypXrqzU1FStWLEiw/1nVH9wcLA8PDy0evVqqy05OVkbNmxQSEjITY9n3LhxatOmjdMPidjYWLVp00bjxo2TJFWpUkWrVq1ScnLyDWvKzOv57LPPatu2bdq4caOmT5+udu3aWcs2btyo1NRUffTRR6pVq5YeeughHTlyxOX9VKhQQYcOHdKhQ4esth07dighIeGWr0VmeHh4qHr16tYPLElKTU3VkiVLFBYWdsfbtwvjinF1M3d7XN2PGFOMqZu5F2PKGKMePXpo1qxZWrp0qUqXLp0l27UT44pxdTN2/b8qNTVVSUlJrq3k8sWP2cj1168mJiaaxMRE63na9cUpKSmmYMGC5rnnnjN79uwxS5YsMY888ojT/QrJyckmb9685p133jHx8fHWjZMZXSNrjPO1q1euXDG1atUyrVq1MsYYc+TIEVOwYEHrGt5rdenSxeTPn9+4u7ubP//802r/7bffTN68eU1MTIzZvHmz2b17t5k9e7bTLH8dO3Y0gYGBZtasWeb33383y5YtM998840x5upNmA6Hw0ycONEcP37cuvnxpZdeMsWLFzfz5s1zuvHz1KlTxpiMr0s+fvy4yZ07t5k3b166+n/66Sfj6elpTp48af766y9TsGBB68bP3bt3m8mTJ1vfh/Luu++akiVLmp07d5oTJ06Yy5cv3/A67tq1a5uqVasaHx8fc+HCBas9NjbWSDIjRoww+/btM5MnTzYPPPCAU82rV6+2bto9ceKE9b0V175HqampJjQ01ISHh5uNGzeadevWZXjjZ9WqVZ3qGj58uAkKCkr3OmRk2rRpxtPT00ycONHs2LHDdOvWzfj7+zvN+vN3wLhiXBmTfcbV2bNnzebNm83mzZuNJPPxxx+bzZs3W/ck/B0wphhTxmSfMdW9e3fj5+dnli9fbk2YcPToUafj+TtgXDGujMk+4+q1114zK1asMPv37zdbtmwxr732mnE4HGbhwoWZWj/NfRXMrnftjZ+LFi0yFSpUMJ6enqZKlSpm+fLl6W4k/+KLL0xgYKBxc3NLN1Xq9a59wwcNGmSKFStm/vrrL2v5jBkzjIeHh4mNjXVab82aNUaSadKkSbptrl+/3jz55JPG29vbeHl5mSpVqph3333XWn7x4kXTu3dvU6xYMePh4WHKli1rxo8fby0fPHiwCQgIMA6Hwzruixcvmp49e5pChQrddKrUawflhx9+aPz9/TO8oTMpKcn4+/ubTz75xBhz9YdJw4YNTb58+YyPj48JDw83+/btM8ZcHdxpx6MMpkq91pgxY4wk06FDh3T7/Pjjj02xYsVM3rx5TWRkpJk8eXK6mv/973+bggULZslUqddyZVAaY8yoUaNMyZIljYeHh3n00UfNL7/8kul1swvGFeMqTXYYVxlNhyxl/otjswPGFGMqTXYYUxmNJ0lmwoQJmVo/u2BcMa7SZIdx1blzZxMUFGQ8PDxM4cKFTUREhMuhzBhjHMYY49o5NgAAAABAVrpv7jEDAAAAgL8rghmQCQcPHnSaxvb6R2am3AXgjHEFZC3GFJD17uW44lJGIBOuXLmiAwcO3HB5qVKllCtXrntXEHAfYFwBWYsxBWS9ezmuCGYAAAAAYDMuZQQAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAEBS/fr11atXr0z3X758uRwOhxISEu5aTQCAnINgBgC4Ix07dpTD4dD777/v1D579mw5HA6XtlWqVCmNGDEiC6sDAODvgWAGALhjefLk0dChQ3X69Gm7S3HZ5cuX7S7hjiQnJ9tdAgAgCxDMAAB3rEGDBgoICNCQIUNu2u/nn39WeHi48ubNq8DAQL344os6f/68pKuXEv7xxx/q3bu3HA6HHA6HjDEqXLiwpk+fbm0jNDRUxYoVc9qmp6enLly4IEk6ePCgmjdvLm9vb/n6+qp169Y6duyY1X/gwIEKDQ3Vl19+qdKlSytPnjwZ1vrjjz/Kz89PU6ZMydRrcPLkSbVt21YPPPCA8uXLp8qVK+vrr7+2lk+ePFkFCxZUUlKS03otWrRQ+/btredz5sxRtWrVlCdPHpUpU0aDBg3SlStXrOUOh0Njx47VU089JS8vL7377rs6ffq02rVrp8KFCytv3rx68MEHNWHChEzVDQDIHghmAIA75u7urvfee0+jRo3S4cOHM+yzb98+NWrUSK1atdKWLVv0zTff6Oeff1aPHj0kSTNnzlSJEiU0ePBgHT16VEePHpXD4VDdunW1fPlySdLp06cVFxenixcvaufOnZKkFStW6JFHHlG+fPmUmpqq5s2b69SpU1qxYoUWLVqk33//Xc8884xTLXv37tWMGTM0c+ZMxcbGpqt16tSpatu2raZMmaJ27dpl6jW4dOmSqlevrh9//FHbtm1Tt27d1L59e61fv16S9PTTTyslJUXff/+9tc7x48f1448/qnPnzpKkVatWqUOHDnrppZe0Y8cOff7555o4caLeffddp30NHDhQ//jHP7R161Z17txZb775pnbs2KF58+YpLi5OY8eOVaFChTJVNwAgmzAAANyB6Oho07x5c2OMMbVq1TKdO3c2xhgza9Ysc+3/Zrp06WK6devmtO6qVauMm5ubuXjxojHGmKCgIDN8+HCnPiNHjjQVK1Y0xhgze/ZsU7NmTdO8eXMzduxYY4wxDRo0MK+//roxxpiFCxcad3d3c/DgQWv97du3G0lm/fr1xhhjBgwYYHLnzm2OHz/utJ969eqZl156yXz66afGz8/PLF++/KbHvWzZMiPJnD59+oZ9oqKiTN++fa3n3bt3N40bN7aef/TRR6ZMmTImNTXVGGNMRESEee+995y28dVXX5lixYpZzyWZXr16OfVp1qyZ6dSp003rBQBkb5wxAwBkmaFDh2rSpEmKi4tLt+y3337TxIkT5e3tbT0iIyOVmpqq/fv333Cb9erV044dO3TixAmtWLFC9evXV/369bV8+XIlJydrzZo1ql+/viQpLi5OgYGBCgwMtNYPCQmRv7+/U01BQUEqXLhwun1Nnz5dvXv31qJFi1SvXj2Xjj0lJUVvv/22KleurAIFCsjb21sLFizQwYMHrT5du3bVwoUL9eeff0qSJk6caE2ekvYaDR482Ok16tq1q44ePWpdqilJNWrUcNp39+7dNW3aNIWGhuqVV17RmjVrXKodAGA/ghkAIMvUrVtXkZGR6t+/f7pl586d0/PPP6/Y2Fjr8dtvv2nPnj0KDg6+4TbTgs6KFSucgtmKFSu0YcMGJScn67HHHnOpTi8vrwzbH374YRUuXFjjx4+XMcalbX7wwQf65JNP9Oqrr2rZsmWKjY1VZGSk0+QiDz/8sKpWrarJkydr48aN2r59uzp27GgtP3funAYNGuT0Gm3dulV79uxxuhfu+vobN25s3Z935MgRRURE6OWXX3apfgCAvXLZXQAA4P7y/vvvKzQ0VOXKlXNqr1atmnbs2KGyZcvecF0PDw+lpKQ4tTkcDoWHh2vOnDnavn276tSpo3z58ikpKUmff/65atSoYQWVChUq6NChQzp06JB11mzHjh1KSEhQSEjILWsPDg7WRx99pPr168vd3V2ffvpppo979erVat68uZ577jlJUmpqqnbv3p1uv//61780YsQI/fnnn2rQoIHT2b1q1app165dN32NbqRw4cKKjo5WdHS0wsPD1a9fP3344YcubwcAYA/OmAEAslTlypXVrl07jRw50qn91Vdf1Zo1a9SjRw/FxsZqz549mjNnjjX5h3T1e8xWrlypP//8U3/99ZfVXr9+fX399dcKDQ2Vt7e33NzcVLduXU2ZMsXpksMGDRpY+9+0aZPWr1+vDh06qF69euku/7uRhx56SMuWLdOMGTNc+sLpBx98UIsWLdKaNWsUFxen559/3mk2yDTPPvusDh8+rC+++MKa9CPNW2+9pcmTJ2vQoEHavn274uLiNG3aNL3xxhs33fdbb72lOXPmaO/evdq+fbvmzp2rChUqZLp2AID9CGYAgCw3ePBgpaamOrVVqVJFK1as0O7duxUeHq6HH35Yb731looXL+603oEDBxQcHOx0D1i9evWUkpJi3UsmXQ1r17c5HA7NmTNH+fPnV926ddWgQQOVKVNG33zzjUv1lytXTkuXLtXXX3+tvn37ZmqdN954Q9WqVVNkZKTq16+vgIAAtWjRIl0/Pz8/tWrVSt7e3umWR0ZGau7cuVq4cKEeeeQR1apVS8OHD1dQUNBN9+3h4aH+/furSpUqqlu3rtzd3TVt2rTMHi4AIBtwGFcvogcAAHckIiJCFStWTHdWEQCQcxHMAAC4R06fPq3ly5frn//8p3bs2JHuPjwAQM7F5B8AANwjDz/8sE6fPq2hQ4cSygAATjhjBgAAAAA2Y/IPAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBm/w/0qOvg3rATgQAAAABJRU5ErkJggg==", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Extracting LUTs from res_dict\n", "LUTs = [res_dict[key][\"LUT\"] for key in res_dict.keys()] \n", @@ -372,7 +242,7 @@ "plt.bar(res_dict.keys(), LUTs, color ='green', width = 0.3)\n", "plt.xlabel(\"Network layers\")\n", "plt.ylabel(\"Number of LUTs\")\n", - "plt.title(\"Estimated no. of LUTs used for each network layer\")\n", + "plt.title(\"No. of LUTs per layer PE=SIMD=1\")\n", "plt.show()" ] }, @@ -389,8 +259,8 @@ "source": [ "## Modify Parameters\n", "\n", - "We now modify the parallelization parameters of the first network layer to reduce its overall latency.\n", - "We individually extract the `MatrixVectorActivation` blocks from the `.onnx` file and set the config values manually (although this can be done automatically by the FINN compiler as mentioned in the introduction).\n", + "We now modify the parallelization parameters of the first network layer to reduce its latency.\n", + "We only extract the first `MatrixVectorActivation` block from the model and set the parallelization parameters manually.\n", "\n", "In the first step, we left the `PE` & `SIMD` values for all the layers on default (=1) to establish a baseline and measure the estimated clock cycles and resource utilization for each of the individual layers.\n", "\n", @@ -399,22 +269,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The parallelization parameters of MatrixVectorActivation_0 were: \n", - "PE: 1\n", - "SIMD: 1\n", - "The parallelization parameters of MatrixVectorActivation_0 are updated to: \n", - "PE: 2\n", - "SIMD: 5\n" - ] - } - ], + "outputs": [], "source": [ "from qonnx.custom_op.registry import getCustomOp\n", "\n", @@ -442,45 +299,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We save the model and view it. On expanding the first `MatrixVectorActivation` we can view the updated `PE` & `SIMD` parameters for that layer." + "We save the model and view it. On expanding the first `MatrixVectorActivation` we can see the updated `PE` & `SIMD` parameters for that layer." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:5920\n", - "Serving 'cybsec_PE_SIMD_modified.onnx' at http://0.0.0.0:5920\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model.save(\"cybsec_PE_SIMD_modified.onnx\")\n", "showInNetron(\"cybsec_PE_SIMD_modified.onnx\")" @@ -496,23 +322,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'MatrixVectorActivation_0': 3840,\n", - " 'MatrixVectorActivation_1': 4096,\n", - " 'MatrixVectorActivation_2': 4096,\n", - " 'MatrixVectorActivation_3': 64}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "cycles_dict_updated = model.analysis(exp_cycles_per_layer)\n", "cycles_dict_updated" @@ -520,68 +332,30 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA28AAAHWCAYAAADglbFoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABr3UlEQVR4nO3de3zO9f/H8ee1sRmzOc1mmTkVhhEVS0ORpZFCpcRESOQYWvV16uDQwSGhE+P7JYWiyGFOU4iS5SzkVMxktjnObO/fH267fi4bdnHNtYvH/Xa7bnW9P+/P+/P6XNf1uux1fT6f98dijDECAAAAAORrbs4OAAAAAABwfRRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8Abljjxo3VuHFjZ4fhUAcOHJDFYlFMTIyzQ3GIPXv2qFmzZvL19ZXFYtH8+fNvajyLxaJhw4Y5JLYrrV69WhaLRXPnzs2T8R0tL1+Lqzl27Jjatm2rkiVLymKxaNy4cbd0+7dCp06d5O3t7eww8j2LxaJevXrd0Lrly5dXp06dHBsQgFuC4g24DcXExMhisVz18csvv+R6rB07dmjYsGE6cOBA3gV8AyZNmnTbFFh5KSoqSlu3btW7776r//73v7rvvvucHRJuQr9+/bR06VJFR0frv//9rx577DFnh3TbO3v2rIYNG6bVq1c7OxQAUAFnBwAg74wYMUIVKlTI1l65cuVcj7Fjxw4NHz5cjRs3Vvny5W2WLVu27GZDvGGTJk1SqVKl+PX4Gs6dO6f169frzTffvOFf6JG/rFy5Uq1atdJrr73m7FDuGGfPntXw4cMl6bY70wCA66F4A25jzZs3z9MjLR4eHnk2Nm7e8ePHJUnFihVzbiBwmMTERIe+n+fPn5eHh4fc3DgRxxUYY3T+/Hl5eXk5O5Q8c/HiRWVmZvLvC3AVfFsDd7jZs2erbt26Klq0qHx8fFSzZk2NHz9e0qXTL59++mlJ0sMPP2w97TLr9KErr3nLumbpm2++0fDhw3XXXXepaNGiatu2rVJSUpSWlqa+ffuqdOnS8vb21osvvqi0tDSbeKZNm6ZHHnlEpUuXlqenp0JCQjR58mSbPuXLl9f27dsVFxdnjenyOJKTk9W3b18FBQXJ09NTlStX1ujRo5WZmWkzTnJysjp16iRfX18VK1ZMUVFRSk5OztXrlnVq6tq1a9W/f3/5+fmpSJEieuqpp6xF0+UmTZqk6tWry9PTU4GBgerZs2eut5WTzZs3q3nz5vLx8ZG3t7eaNGliczrssGHDFBwcLEkaOHCgLBZLtiOnVzp//ryGDRume+65R4UKFVKZMmXUunVr7du376ZiyZKcnKx+/fqpfPny8vT0VNmyZdWxY0f9+++/Vx07LS1NLVq0kK+vr9atW3fD8RtjVL58ebVq1SrH9Xx9fdW9e/ebfi3++ecfde7cWf7+/vL09FT16tU1derUbP0+/vhjVa9eXYULF1bx4sV13333adasWVcdN+vzZozRJ598Yv3cZ/nrr7/09NNPq0SJEipcuLDq16+vRYsW2YyRlZ+zZ8/WW2+9pbvuukuFCxdWamrqVbebmZmpcePGqXr16ipUqJD8/f3VvXt3nTx50qbfggULFBkZqcDAQHl6eqpSpUp6++23lZGRkW3MDRs26PHHH1fx4sVVpEgRhYaGWr9zrnwtn3zySXl7e8vPz0+vvfZajuNdqXz58mrRooV+/vlnPfDAAypUqJAqVqyoGTNmZOt7ve+KAwcOyM/PT5I0fPhw6+s+bNgwff/997JYLNqyZYt1vHnz5slisah169Y226lWrZqeffZZ6/OLFy/q7bffVqVKleTp6any5cvrjTfeyPZ9mLUvS5cu1X333ScvLy99+umnV933d955R25ubvr444+v+zpdLikpSa+99ppq1qwpb29v+fj4qHnz5vrjjz+sfU6fPq0iRYqoT58+2db/+++/5e7urpEjR1rbcvM9nHWN8QcffKBx48ZZX48dO3bYFT9wJ+HIG3AbS0lJyfaHscViUcmSJSVJsbGxeu6559SkSRONHj1akrRz506tXbtWffr0UcOGDdW7d29NmDBBb7zxhqpVqyZJ1v9ezciRI+Xl5aXXX39de/fu1ccff6yCBQvKzc1NJ0+e1LBhw/TLL78oJiZGFSpU0JAhQ6zrTp48WdWrV9cTTzyhAgUK6IcfftArr7yizMxM9ezZU5I0btw4vfrqq/L29tabb74pSfL395d06RSnRo0a6Z9//lH37t1Vrlw5rVu3TtHR0Tp69Kh1ggdjjFq1aqWff/5ZL7/8sqpVq6bvvvtOUVFRdr3Gr776qooXL66hQ4fqwIEDGjdunHr16qWvv/7a2mfYsGEaPny4mjZtqh49emj37t2aPHmyfv31V61du1YFCxa0a5vbt29XeHi4fHx8NGjQIBUsWFCffvqpGjdurLi4ONWrV0+tW7dWsWLF1K9fPz333HN6/PHHrzkJREZGhlq0aKEVK1aoXbt26tOnj06dOqXY2Fht27ZNlSpVuuFYpEt/+IWHh2vnzp3q3Lmz6tSpo3///Vfff/+9/v77b5UqVSrb2OfOnVOrVq3022+/afny5br//vtvKv4XXnhBY8aMUVJSkkqUKGFd94cfflBqaqpeeOGFm3otjh07pvr161snkvDz89PixYvVpUsXpaamqm/fvpKkzz//XL1791bbtm3Vp08fnT9/Xlu2bNGGDRv0/PPP5zh2w4YN9d///lcdOnTQo48+qo4dO9ps98EHH9TZs2fVu3dvlSxZUtOnT9cTTzyhuXPn6qmnnrIZ6+2335aHh4dee+01paWlXfMIR/fu3RUTE6MXX3xRvXv31v79+zVx4kRt3rzZ5rMbExMjb29v9e/fX97e3lq5cqWGDBmi1NRUvf/++9bxYmNj1aJFC5UpU0Z9+vRRQECAdu7cqYULF9oUBRkZGYqIiFC9evX0wQcfaPny5frwww9VqVIl9ejR46rxZtm7d6/atm2rLl26KCoqSlOnTlWnTp1Ut25dVa9eXVLuviv8/Pw0efJk9ejRQ0899ZS1KAsNDVXZsmVlsVi0Zs0ahYaGSpJ++uknubm56eeff7bGcvz4ce3atcvm1OWXXnpJ06dPV9u2bTVgwABt2LBBI0eO1M6dO/Xdd9/Z7Mvu3bv13HPPqXv37uratauqVKmS4z6/9dZbeu+99/Tpp5+qa9eu132NLvfXX39p/vz5evrpp1WhQgUdO3ZMn376qRo1aqQdO3YoMDBQ3t7eeuqpp/T111/ro48+kru7u3X9r776SsYYtW/fPtev7eWmTZum8+fPq1u3bvL09LTJTwBXMABuO9OmTTOScnx4enpa+/Xp08f4+PiYixcvXnWsOXPmGElm1apV2ZY1atTINGrUyPp81apVRpKpUaOGuXDhgrX9ueeeMxaLxTRv3txm/bCwMBMcHGzTdvbs2WzbiYiIMBUrVrRpq169us22s7z99tumSJEi5s8//7Rpf/311427u7s5dOiQMcaY+fPnG0lmzJgx1j4XL1404eHhRpKZNm1atrEvl/UaN23a1GRmZlrb+/XrZ9zd3U1ycrIxxpjExETj4eFhmjVrZjIyMqz9Jk6caCSZqVOnXnM7OXnyySeNh4eH2bdvn7XtyJEjpmjRoqZhw4bWtv379xtJ5v3337/umFOnTjWSzEcffZRt2eX7J8kMHTrU7liGDBliJJlvv/32quNnfX7mzJljTp06ZRo1amRKlSplNm/e7JD4d+/ebSSZyZMn2yx/4oknTPny5a39bvS16NKliylTpoz5999/bdZp166d8fX1tX62W7VqZapXr37dfcqJJNOzZ0+btr59+xpJ5qeffrK2nTp1ylSoUMGUL1/e+rnLen0rVqyYY55d6aeffjKSzMyZM23alyxZkq09p/G6d+9uChcubM6fP2+MuZRfFSpUMMHBwebkyZM2fS9/XaOioowkM2LECJs+9957r6lbt+514w4ODjaSzJo1a6xtiYmJxtPT0wwYMMDaltvviuPHj2d7r7NUr17dPPPMM9bnderUMU8//bSRZHbu3GmMMebbb781kswff/xhjDEmPj7eSDIvvfSSzVivvfaakWRWrlyZbV+WLFmSbduXfxYGDBhg3NzcTExMzHVfn6xxo6KirM/Pnz9v8/1kzKXvD09PT5v3YenSpUaSWbx4sU3f0NBQm+/j3L62Wd9RPj4+JjExMVexA3c6TpsEbmOffPKJYmNjbR6LFy+2Li9WrJjOnDmj2NhYh263Y8eONkeT6tWrJ2OMOnfubNOvXr16Onz4sC5evGhtu/xajqwjh40aNdJff/2llJSU6257zpw5Cg8PV/HixfXvv/9aH02bNlVGRobWrFkjSfrxxx9VoEABm1/x3d3d9eqrr9q1r926dbM5fS08PFwZGRk6ePCgJGn58uW6cOGC+vbta3NdUdeuXeXj45Pt1LbrycjI0LJly/Tkk0+qYsWK1vYyZcro+eef188//3zN0+CuZt68eSpVqlSO+3/5/t1oLPPmzVOtWrWyHQXKafyUlBQ1a9ZMu3bt0urVq1W7dm2HxH/PPfeoXr16mjlzpnVZUlKSFi9erPbt21v73chrYYzRvHnz1LJlSxljbD57ERERSklJ0e+//y7pUt79/fff+vXXX6+7X7nx448/6oEHHtBDDz1kbfP29la3bt104MCBbKegRUVF5eqaqTlz5sjX11ePPvqozf7UrVtX3t7eWrVqlbXv5eOdOnVK//77r8LDw3X27Fnt2rVL0qXTa/fv36++fftmu24vp9f15ZdftnkeHh6uv/7667pxS1JISIjCw8Otz/38/FSlShWb9XP7XXEt4eHh+umnn6z7/ccff6hbt24qVaqUtf2nn35SsWLFVKNGDUmX3i9J6t+/v81YAwYMkKRs3wkVKlRQREREjts3xqhXr14aP368/ve//9l95kAWT09P6/dTRkaGTpw4IW9vb1WpUsX6uZWkpk2bKjAw0CaHtm3bpi1btliPXEv2v7Zt2rSxnp4K4No4bRK4jT3wwAPXnLDklVde0TfffKPmzZvrrrvuUrNmzfTMM8/c9PTj5cqVs3nu6+srSQoKCsrWnpmZqZSUFOupnGvXrtXQoUO1fv16nT171qZ/SkqKdayr2bNnj7Zs2XLVPwQSExMlSQcPHlSZMmWynUp4tVOSrubKfS1evLgkWa8JyirirhzXw8NDFStWtC7PrePHj+vs2bM5xlmtWjVlZmbq8OHD1lPDcmvfvn2qUqWKChTI/T8L9sSyb98+tWnTJlfj9u3bV+fPn9fmzZtzvR+5jb9jx47q1auXDh48qODgYM2ZM0fp6enq0KGD3WNd7vjx40pOTtZnn32mzz77LMc+WZ+9wYMHa/ny5XrggQdUuXJlNWvWTM8//7waNGiQ6+1d7uDBg9bTUy+XdXrzwYMHrYWDpBxnoM3Jnj17lJKSotKlS+e4PGt/pEunz7711ltauXJlth8Psn50ybpe8PJYrqZQoULZcrh48eLZrrW7mivzMqf1c/tdcS3h4eGaMmWK9u7dq3379slisSgsLMxa1HXt2lU//fSTGjRoYC2ODh48KDc3t2yz/gYEBKhYsWLZvhOu9X7NmDFDp0+f1uTJk/Xcc89dN96ryczM1Pjx4zVp0iTt37/f5trCrO9mSXJzc1P79u01efJknT17VoULF9bMmTNVqFAh6/XRkv2vbW4/kwAo3oA7WunSpRUfH6+lS5dq8eLFWrx4saZNm6aOHTtq+vTpNzzu5ddC5KbdGCPp0h93TZo0UdWqVfXRRx8pKChIHh4e+vHHHzV27NhsE47kJDMzU48++qgGDRqU4/J77rknl3uRO9fbJ9ivVatWmj17tkaNGqUZM2Y4dCbEdu3aqV+/fpo5c6beeOMN/e9//9N9991nd9F+pazP5gsvvHDVox9Z10VVq1ZNu3fv1sKFC7VkyRLNmzdPkyZN0pAhQ6xT0uel3M5UmJmZqdKlS9scZblc1h/mycnJatSokXx8fDRixAhVqlRJhQoV0u+//67BgwfnKm+vdLW8utn1L89LR3xXZB3tXLNmjf766y/VqVNHRYoUUXh4uCZMmKDTp09r8+bNevfdd7Ote7WjuFe61vvVoEEDxcfHa+LEiXrmmWdu+Fqx9957T//5z3/UuXNnvf322ypRooTc3NzUt2/fbO9fx44d9f7772v+/Pl67rnnNGvWLOukQlnsfW1v59kzAUejeAPucB4eHmrZsqVatmypzMxMvfLKK/r000/1n//8R5UrV871HxiO8MMPPygtLU3ff/+9zS/nl5+eleVqcVWqVEmnT59W06ZNr7mt4OBgrVixQqdPn7Y5+rZ79+4bjP7q28ka9/JTCy9cuKD9+/dfN84r+fn5qXDhwjnGuWvXLrm5uWU7wpkblSpV0oYNG5Senp7rCVTsiaVSpUratm1brsZ98skn1axZM3Xq1ElFixbNNtvozcRfokQJRUZGaubMmWrfvr3Wrl2bbfKEG30tihYtqoyMjFy9p0WKFNGzzz6rZ599VhcuXFDr1q317rvvKjo6WoUKFcrVNrMEBwdf9T3IWn4jKlWqpOXLl6tBgwbX/ON69erVOnHihL799ls1bNjQ2r5///5s40mXTrOz93OfF3L7XXGt78By5cqpXLly+umnn/TXX39ZT9Vs2LCh+vfvrzlz5igjI8PmdQkODlZmZqb27NljM/nTsWPHlJycbNf7VblyZY0ZM0aNGzfWY489phUrVqho0aK5Xj/L3Llz9fDDD+vLL7+0aU9OTs42mVCNGjV07733aubMmSpbtqwOHTqUbXbL3L62AOzHNW/AHezEiRM2z93c3KxHB7KmrC5SpIgk3dS09rmV9Wv55b+Op6SkaNq0adn6FilSJMeYnnnmGa1fv15Lly7Ntiw5Odl6fd3jjz+uixcv2hQGGRkZdk+xfT1NmzaVh4eHJkyYYLNfX375pVJSUhQZGWltO3TokPUP7qtxd3dXs2bNtGDBAh04cMDafuzYMc2aNUsPPfSQfHx87I6zTZs2+vfffzVx4sRsy652FNGeWNq0aaM//vgj20x6Vxu/Y8eOmjBhgqZMmaLBgwc7NP4OHTpox44dGjhwoNzd3dWuXbsbHiuLu7u72rRpo3nz5uVYpF5++4gr887Dw0MhISEyxig9Pf3qO3kVjz/+uDZu3Kj169db286cOaPPPvtM5cuXV0hIiN1jSpdyKSMjQ2+//Xa2ZRcvXrTmX055e+HCBU2aNMlmnTp16qhChQoaN25cttx1xpHq3H5XFC5c2NqWk/DwcK1cuVIbN260Fm+1a9dW0aJFNWrUKHl5ealu3brW/o8//rgkZfvR4KOPPpIkm++E3AgNDdWPP/6onTt3qmXLljp37pxd60uX3sMr34M5c+bon3/+ybF/hw4dtGzZMo0bN04lS5ZU8+bNbZbn9rUFYD+OvAG3scWLF+dYDDz44IOqWLGiXnrpJSUlJemRRx5R2bJldfDgQX388ceqXbu29Rfh2rVry93dXaNHj1ZKSoo8PT2t92FztGbNmlmPBHbv3l2nT5/W559/rtKlS+vo0aM2fevWravJkyfrnXfeUeXKlVW6dGk98sgjGjhwoL7//nu1aNHCOjX4mTNntHXrVs2dO1cHDhxQqVKl1LJlSzVo0ECvv/66Dhw4oJCQEH377be5mhTFHn5+foqOjtbw4cP12GOP6YknntDu3bs1adIk3X///TYX+Xfs2FFxcXHX/UP2nXfeUWxsrB566CG98sorKlCggD799FOlpaVpzJgxNxRnx44dNWPGDPXv39/6R+iZM2e0fPlyvfLKKzneH82eWAYOHKi5c+fq6aefVufOnVW3bl0lJSXp+++/15QpU1SrVq1sY/fq1Uupqal688035evrqzfeeMMh8UdGRqpkyZKaM2eOmjdvnu2zfKOvxahRo7Rq1SrVq1dPXbt2VUhIiJKSkvT7779r+fLlSkpKknTpcx4QEKAGDRrI399fO3fu1MSJExUZGXlDR01ef/11ffXVV2revLl69+6tEiVKaPr06dq/f7/mzZt3w6edNmrUSN27d9fIkSMVHx+vZs2aqWDBgtqzZ4/mzJmj8ePHq23btnrwwQdVvHhxRUVFqXfv3rJYLPrvf/+b7XPs5uamyZMnq2XLlqpdu7ZefPFFlSlTRrt27dL27dtz/EM/L+X2u8LLy0shISH6+uuvdc8996hEiRKqUaOG9dq98PBwzZw5UxaLxXoapbu7ux588EEtXbpUjRs3trkdQ61atRQVFaXPPvvMesrpxo0bNX36dD355JN6+OGH7d6X+vXra8GCBXr88cfVtm1bzZ8/365bkLRo0UIjRozQiy++qAcffFBbt27VzJkzbc4WuNzzzz+vQYMG6bvvvlOPHj2ybSu3ry2AG3CLZ7cEcAtc61YBumwa/Llz55pmzZqZ0qVLGw8PD1OuXDnTvXt3c/ToUZvxPv/8c1OxYkXj7u5uc9uAq90qYM6cOTnG8+uvv9q0Dx061Egyx48ft7Z9//33JjQ01BQqVMiUL1/ejB492jp1+/79+639EhISTGRkpClatKiRZBPHqVOnTHR0tKlcubLx8PAwpUqVMg8++KD54IMPbG5hcOLECdOhQwfj4+NjfH19TYcOHczmzZvtulXAlfuU9RpceWuFiRMnmqpVq5qCBQsaf39/06NHj2zTpTdq1Mjk9mv5999/NxEREcbb29sULlzYPPzww2bdunU2fey5VYAxl6Z7f/PNN02FChVMwYIFTUBAgGnbtq3NbQCUw5TpuYnFmEuvd69evcxdd91lPDw8TNmyZU1UVJR1av2rfX4GDRpkJJmJEyfedPxZXnnlFSPJzJo1y6GvxbFjx0zPnj1NUFCQdb0mTZqYzz77zNrn008/NQ0bNjQlS5Y0np6eplKlSmbgwIEmJSXlmvuXtc0rbxVgjDH79u0zbdu2NcWKFTOFChUyDzzwgFm4cKFNn6u9vtfz2Wefmbp16xovLy9TtGhRU7NmTTNo0CBz5MgRa5+1a9ea+vXrGy8vLxMYGGgGDRpknVb+ylz4+eefzaOPPmqKFi1qihQpYkJDQ83HH39sXR4VFWWKFCmSLY6s74vrCQ4ONpGRkdnar/y+Mib33xXr1q0zdevWNR4eHtne9+3btxtJplq1ajZjv/POO0aS+c9//pMtlvT0dDN8+HDr5ysoKMhER0dbb6twvX0xJufPwoIFC0yBAgXMs88+m23q/yvHvfJWAQMGDDBlypQxXl5epkGDBmb9+vU5vmZZHn/8cSMpx1w3Jnevrb3fUQCMsRjDVfUAgDtLv3799OWXXyohIcF6WhyA3Hvqqae0detW7d2719mhAHcUrnkDANxRzp8/r//9739q06YNhRtwA44ePapFixbZ3GIDwK3BNW8AgDtCYmKili9frrlz5+rEiRPq06ePs0MCXMr+/fu1du1affHFFypYsKC6d+/u7JCAOw7FGwDgjrBjxw61b99epUuX1oQJE1S7dm1nhwS4lLi4OL344osqV66cpk+froCAAGeHBNxxuOYNAAAAAFwA17wBAAAAgAugeAMAAAAAF8A1b7mQmZmpI0eOqGjRorJYLM4OBwAAAICTGGN06tQpBQYGys3t1h4Lo3jLhSNHjigoKMjZYQAAAADIJw4fPqyyZcve0m1SvOVC0aJFJV16g3x8fJwcDQAAAABnSU1NVVBQkLVGuJUo3nIh61RJHx8fijcAAAAATrmciglLAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABBZwdAADkBxaLsyNwLmOcHQFuR+SVsyMAcLvhyBsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAXkm+Jt1KhRslgs6tu3r7Xt/Pnz6tmzp0qWLClvb2+1adNGx44ds1nv0KFDioyMVOHChVW6dGkNHDhQFy9etOmzevVq1alTR56enqpcubJiYmJuwR4BAAAAgOPki+Lt119/1aeffqrQ0FCb9n79+umHH37QnDlzFBcXpyNHjqh169bW5RkZGYqMjNSFCxe0bt06TZ8+XTExMRoyZIi1z/79+xUZGamHH35Y8fHx6tu3r1566SUtXbr0lu0fAAAAANwsizHOncj29OnTqlOnjiZNmqR33nlHtWvX1rhx45SSkiI/Pz/NmjVLbdu2lSTt2rVL1apV0/r161W/fn0tXrxYLVq00JEjR+Tv7y9JmjJligYPHqzjx4/Lw8NDgwcP1qJFi7Rt2zbrNtu1a6fk5GQtWbIkVzGmpqbK19dXKSkp8vHxcfyLAMDpmNLc2RHgdkReOTsCAHnBmbWB04+89ezZU5GRkWratKlN+6ZNm5Senm7TXrVqVZUrV07r16+XJK1fv141a9a0Fm6SFBERodTUVG3fvt3a58qxIyIirGPkJC0tTampqTYPAAAAAHAmp96ke/bs2fr999/166+/ZluWkJAgDw8PFStWzKbd399fCQkJ1j6XF25Zy7OWXatPamqqzp07Jy8vr2zbHjlypIYPH37D+wUAAAAAjua0I2+HDx9Wnz59NHPmTBUqVMhZYeQoOjpaKSkp1sfhw4edHRIAAACAO5zTirdNmzYpMTFRderUUYECBVSgQAHFxcVpwoQJKlCggPz9/XXhwgUlJyfbrHfs2DEFBARIkgICArLNPpn1/Hp9fHx8cjzqJkmenp7y8fGxeQAAAACAMzmteGvSpIm2bt2q+Ph46+O+++5T+/btrf9fsGBBrVixwrrO7t27dejQIYWFhUmSwsLCtHXrViUmJlr7xMbGysfHRyEhIdY+l4+R1SdrDAAAAABwBU675q1o0aKqUaOGTVuRIkVUsmRJa3uXLl3Uv39/lShRQj4+Pnr11VcVFham+vXrS5KaNWumkJAQdejQQWPGjFFCQoLeeust9ezZU56enpKkl19+WRMnTtSgQYPUuXNnrVy5Ut98840WLVp0a3cYAAAAAG6CUycsuZ6xY8fKzc1Nbdq0UVpamiIiIjRp0iTrcnd3dy1cuFA9evRQWFiYihQpoqioKI0YMcLap0KFClq0aJH69eun8ePHq2zZsvriiy8UERHhjF0CAAAAgBvi9Pu8uQLu8wbc/rgflbMjwO2IvHJ2BADywh19nzcAAAAAwPVRvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHAB+fom3bg67p3j7AgAAACAW4sjbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFOLV4mzx5skJDQ+Xj4yMfHx+FhYVp8eLF1uWNGzeWxWKxebz88ss2Yxw6dEiRkZEqXLiwSpcurYEDB+rixYs2fVavXq06derI09NTlStXVkxMzK3YPQAAAABwmALO3HjZsmU1atQo3X333TLGaPr06WrVqpU2b96s6tWrS5K6du2qESNGWNcpXLiw9f8zMjIUGRmpgIAArVu3TkePHlXHjh1VsGBBvffee5Kk/fv3KzIyUi+//LJmzpypFStW6KWXXlKZMmUUERFxa3cYAAAAAG6QxRhjnB3E5UqUKKH3339fXbp0UePGjVW7dm2NGzcux76LFy9WixYtdOTIEfn7+0uSpkyZosGDB+v48ePy8PDQ4MGDtWjRIm3bts26Xrt27ZScnKwlS5bkOG5aWprS0tKsz1NTUxUUFKSUlBT5+Pg4bmdvgsXi7AicK399anE7IKecHQFuR+SVsyMAkBdSU1Pl6+vrlNog31zzlpGRodmzZ+vMmTMKCwuzts+cOVOlSpVSjRo1FB0drbNnz1qXrV+/XjVr1rQWbpIUERGh1NRUbd++3dqnadOmNtuKiIjQ+vXrrxrLyJEj5evra30EBQU5ajcBAAAA4IY49bRJSdq6davCwsJ0/vx5eXt767vvvlNISIgk6fnnn1dwcLACAwO1ZcsWDR48WLt379a3334rSUpISLAp3CRZnyckJFyzT2pqqs6dOycvL69sMUVHR6t///7W51lH3gAAAADAWZxevFWpUkXx8fFKSUnR3LlzFRUVpbi4OIWEhKhbt27WfjVr1lSZMmXUpEkT7du3T5UqVcqzmDw9PeXp6Zln4wMAAACAvZx+2qSHh4cqV66sunXrauTIkapVq5bGjx+fY9969epJkvbu3StJCggI0LFjx2z6ZD0PCAi4Zh8fH58cj7oBAAAAQH7k9OLtSpmZmTaThVwuPj5eklSmTBlJUlhYmLZu3arExERrn9jYWPn4+FhPvQwLC9OKFStsxomNjbW5rg4AAAAA8junnjYZHR2t5s2bq1y5cjp16pRmzZql1atXa+nSpdq3b59mzZqlxx9/XCVLltSWLVvUr18/NWzYUKGhoZKkZs2aKSQkRB06dNCYMWOUkJCgt956Sz179rSe9vjyyy9r4sSJGjRokDp37qyVK1fqm2++0aJFi5y56wAAAABgF6cWb4mJierYsaOOHj0qX19fhYaGaunSpXr00Ud1+PBhLV++XOPGjdOZM2cUFBSkNm3a6K233rKu7+7uroULF6pHjx4KCwtTkSJFFBUVZXNfuAoVKmjRokXq16+fxo8fr7Jly+qLL77gHm8AAAAAXEq+u89bfuTMezlcDffOcXYEuN2QU86OALcj8srZEQDIC9znDQAAAABwTRRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABTi3eJk+erNDQUPn4+MjHx0dhYWFavHixdfn58+fVs2dPlSxZUt7e3mrTpo2OHTtmM8ahQ4cUGRmpwoULq3Tp0ho4cKAuXrxo02f16tWqU6eOPD09VblyZcXExNyK3QMAAAAAh7G7eDt37pzOnj1rfX7w4EGNGzdOy5Yts3vjZcuW1ahRo7Rp0yb99ttveuSRR9SqVStt375dktSvXz/98MMPmjNnjuLi4nTkyBG1bt3aun5GRoYiIyN14cIFrVu3TtOnT1dMTIyGDBli7bN//35FRkbq4YcfVnx8vPr27auXXnpJS5cutTteAAAAAHAWizHG2LNCs2bN1Lp1a7388stKTk5W1apVVbBgQf3777/66KOP1KNHj5sKqESJEnr//ffVtm1b+fn5adasWWrbtq0kadeuXapWrZrWr1+v+vXra/HixWrRooWOHDkif39/SdKUKVM0ePBgHT9+XB4eHho8eLAWLVqkbdu2WbfRrl07JScna8mSJbmKKTU1Vb6+vkpJSZGPj89N7Z+jWCzOjsC57PvUAtdHTjk7AtyOyCtnRwAgLzizNrD7yNvvv/+u8PBwSdLcuXPl7++vgwcPasaMGZowYcINB5KRkaHZs2frzJkzCgsL06ZNm5Senq6mTZta+1StWlXlypXT+vXrJUnr169XzZo1rYWbJEVERCg1NdV69G79+vU2Y2T1yRojJ2lpaUpNTbV5AAAAAIAz2V28nT17VkWLFpUkLVu2TK1bt5abm5vq16+vgwcP2h3A1q1b5e3tLU9PT7388sv67rvvFBISooSEBHl4eKhYsWI2/f39/ZWQkCBJSkhIsCncspZnLbtWn9TUVJ07dy7HmEaOHClfX1/rIygoyO79AgAAAABHsrt4q1y5subPn6/Dhw9r6dKlatasmSQpMTHxhg4bVqlSRfHx8dqwYYN69OihqKgo7dixw+5xHCk6OlopKSnWx+HDh50aDwAAAADYXbwNGTJEr732msqXL68HHnhAYWFhki4dhbv33nvtDsDDw0OVK1dW3bp1NXLkSNWqVUvjx49XQECALly4oOTkZJv+x44dU0BAgCQpICAg2+yTWc+v18fHx0deXl45xuTp6WmdATPrAQAAAADOZHfx1rZtWx06dEi//fabzYyNTZo00dixY286oMzMTKWlpalu3boqWLCgVqxYYV22e/duHTp0yFowhoWFaevWrUpMTLT2iY2NlY+Pj0JCQqx9Lh8jq0/WGAAAAADgCgrcyEoBAQE6ffq0YmNj1bBhQ3l5een++++Xxc5ppaKjo9W8eXOVK1dOp06d0qxZs7R69WotXbpUvr6+6tKli/r3768SJUrIx8dHr776qsLCwlS/fn1Jl2a+DAkJUYcOHTRmzBglJCTorbfeUs+ePeXp6SlJevnllzVx4kQNGjRInTt31sqVK/XNN99o0aJFN7LrAAAAAOAUdhdvJ06c0DPPPKNVq1bJYrFoz549qlixorp06aLixYvrww8/zPVYiYmJ6tixo44ePSpfX1+FhoZq6dKlevTRRyVJY8eOlZubm9q0aaO0tDRFRERo0qRJ1vXd3d21cOFC9ejRQ2FhYSpSpIiioqI0YsQIa58KFSpo0aJF6tevn8aPH6+yZcvqiy++UEREhL27DgAAAABOY/d93jp27KjExER98cUXqlatmv744w9VrFhRS5cuVf/+/a1T9N9OuM9b/sO9c+Bo5JSzI8DtiLxydgQA8oIzawO7j7wtW7ZMS5cuVdmyZW3a77777hu6VQAAAAAA4PrsnrDkzJkzKly4cLb2pKQk63VmAAAAAADHsrt4Cw8P14wZM6zPLRaLMjMzNWbMGD388MMODQ4AAAAAcIndp02OGTNGTZo00W+//aYLFy5o0KBB2r59u5KSkrR27dq8iBEAAAAA7nh2H3mrUaOG/vzzTz300ENq1aqVzpw5o9atW2vz5s2qVKlSXsQIAAAAAHc8u2ebvBMx22T+w6cWjkZOOTsC3I7IK2dHACAv5PvZJrds2ZLrAUNDQ284GAAAAABAznJVvNWuXVsWi0XXO0hnsViUkZHhkMAAAAAAAP8vV8Xb/v378zoOAAAAAMA15Kp4Cw4Ozus4AAAAAADXYPdskyNHjtTUqVOztU+dOlWjR492SFAAAAAAAFt2F2+ffvqpqlatmq29evXqmjJlikOCAgAAAADYsrt4S0hIUJkyZbK1+/n56ejRow4JCgAAAABgy+7iLSgoSGvXrs3WvnbtWgUGBjokKAAAAACArVxNWHK5rl27qm/fvkpPT9cjjzwiSVqxYoUGDRqkAQMGODxAAAAAAMANFG8DBw7UiRMn9Morr+jChQuSpEKFCmnw4MGKjo52eIAAAAAAAMlirnfn7as4ffq0du7cKS8vL919993y9PR0dGz5Rmpqqnx9fZWSkiIfHx9nhyNJslicHYFz3dinFrg6csrZEeB2RF45OwIAecGZtYHd17xNmzZN586dk7e3t+6//37VqFHjti7cAAAAACA/sLt4e/311+Xv768uXbpo3bp1eRETAAAAAOAKdhdv//zzj6ZPn65///1XjRs3VtWqVTV69GglJCTkRXwAAAAAAN1A8VagQAE99dRTWrBggQ4fPqyuXbtq5syZKleunJ544gktWLBAmZmZeRErAAAAANyx7C7eLufv76+HHnpIYWFhcnNz09atWxUVFaVKlSpp9erVDgoRAAAAAHBDxduxY8f0wQcfqHr16mrcuLFSU1O1cOFC7d+/X//884+eeeYZRUVFOTpWAAAAALhj2X2rgJYtW2rp0qW655579NJLL6ljx44qUaKETZ/ExEQFBATcNqdPcquA/Ifpl+Fo5JSzI8DtiLxydgQA8oIzawO7b9JdunRpxcXFKSws7Kp9/Pz8tH///psKDAAAAADw/274Jt13Eo685T98auFo5JSzI8DtiLxydgQA8oJL3aS7d+/emjBhQrb2iRMnqm/fvo6ICQAAAABwBbuLt3nz5qlBgwbZ2h988EHNnTvXIUEBAAAAAGzZXbydOHFCvr6+2dp9fHz077//OiQoAAAAAIAtu4u3ypUra8mSJdnaFy9erIoVKzokKAAAAACALbtnm+zfv7969eql48eP65FHHpEkrVixQh9++KHGjRvn6PgAAAAAALqB4q1z585KS0vTu+++q7fffluSVL58eU2ePFkdO3Z0eIAAAAAAgJu8VcDx48fl5eUlb29vR8aU73CrgPyH6ZfhaOSUsyPA7Yi8cnYEAPKCS92k+3J+fn6OigMAAAAAcA12T1jiSCNHjtT999+vokWLqnTp0nryySe1e/dumz6NGzeWxWKxebz88ss2fQ4dOqTIyEgVLlxYpUuX1sCBA3Xx4kWbPqtXr1adOnXk6empypUrKyYmJq93DwAAAAAcxqnFW1xcnHr27KlffvlFsbGxSk9PV7NmzXTmzBmbfl27dtXRo0etjzFjxliXZWRkKDIyUhcuXNC6des0ffp0xcTEaMiQIdY++/fvV2RkpB5++GHFx8erb9++eumll7R06dJbtq8AAAAAcDNu6po3Rzt+/LhKly6tuLg4NWzYUNKlI2+1a9e+6kyWixcvVosWLXTkyBH5+/tLkqZMmaLBgwfr+PHj8vDw0ODBg7Vo0SJt27bNul67du2UnJyc420PrsQ1b/lP/vnU4nZBTjk7AtyOyCtnRwAgLzizNrD7yNvff/991WW//PLLTQWTkpIiSSpRooRN+8yZM1WqVCnVqFFD0dHROnv2rHXZ+vXrVbNmTWvhJkkRERFKTU3V9u3brX2aNm1qM2ZERITWr1+fYxxpaWlKTU21eQAAAACAM9ldvDVr1kxJSUnZ2teuXavHHnvshgPJzMxU37591aBBA9WoUcPa/vzzz+t///ufVq1apejoaP33v//VCy+8YF2ekJBgU7hJsj5PSEi4Zp/U1FSdO3cuWywjR46Ur6+v9REUFHTD+wUAAAAAjmD3bJP169dXs2bNtGrVKhUtWlSStGbNGrVs2VLDhg274UB69uypbdu26eeff7Zp79atm/X/a9asqTJlyqhJkybat2+fKlWqdMPbu5bo6Gj179/f+jw1NZUCDgAAAIBT2X3k7YsvvlC5cuXUsmVLpaWladWqVYqMjNSIESPUr1+/GwqiV69eWrhwoVatWqWyZctes2+9evUkSXv37pUkBQQE6NixYzZ9sp4HBARcs4+Pj4+8vLyybcPT01M+Pj42DwAAAABwJruLNzc3N82ePVsFCxbUI488oieeeEIjR45Unz597N64MUa9evXSd999p5UrV6pChQrXXSc+Pl6SVKZMGUlSWFiYtm7dqsTERGuf2NhY+fj4KCQkxNpnxYoVNuPExsYqLCzM7pgBAAAAwBlyNdvkli1bsrWdOnVKzz33nCIjI9WjRw9re2hoaK43/sorr2jWrFlasGCBqlSpYm339fWVl5eX9u3bp1mzZunxxx9XyZIltWXLFvXr109ly5ZVXFycpEu3Cqhdu7YCAwM1ZswYJSQkqEOHDnrppZf03nvvSbp0q4AaNWqoZ8+e6ty5s1auXKnevXtr0aJFioiIuG6czDaZ/zCDFxyNnHJ2BLgdkVfOjgBAXnBmbZCr4s3NzU0Wi0WXd738edb/WywWZWRk5H7jV/lWnzZtmjp16qTDhw/rhRde0LZt23TmzBkFBQXpqaee0ltvvWXzQh08eFA9evTQ6tWrVaRIEUVFRWnUqFEqUOD/L+lbvXq1+vXrpx07dqhs2bL6z3/+o06dOuUqToq3/Id/EOFo5JSzI8DtiLxydgQA8kK+L94OHjyY6wGDg4NvKqD8iOIt/+EfRDgaOeXsCHA7Iq+cHQGAvODM2iBXs03ejgUZAAAAALgSuycsGTlypKZOnZqtferUqRo9erRDggIAAAAA2LK7ePv0009VtWrVbO3Vq1fXlClTHBIUAAAAAMCW3cVbQkKCdZr+y/n5+eno0aMOCQoAAAAAYMvu4i0oKEhr167N1r527VoFBgY6JCgAAAAAgK1cTVhyua5du6pv375KT0/XI488IklasWKFBg0apAEDBjg8QAAAAADADRRvAwcO1IkTJ/TKK6/owoULkqRChQpp8ODBio6OdniAAAAAAIBc3uctJ6dPn9bOnTvl5eWlu+++W56eno6OLd/gPm/5D/fOgaORU86OALcj8srZEQDIC/n+Pm858fb2tk5ccjsXbgAAAACQH9g9YUlmZqZGjBghX19fBQcHKzg4WMWKFdPbb7+tzMzMvIgRAAAAAO54dh95e/PNN/Xll19q1KhRatCggSTp559/1rBhw3T+/Hm9++67Dg8SAAAAAO50dl/zFhgYqClTpuiJJ56waV+wYIFeeeUV/fPPPw4NMD/gmrf8h+sI4GjklLMjwO2IvHJ2BADygjNrA7tPm0xKSlLVqlWztVetWlVJSUkOCQoAAAAAYMvu4q1WrVqaOHFitvaJEyeqVq1aDgkKAAAAAGDL7mvexowZo8jISC1fvlxhYWGSpPXr1+vw4cP68ccfHR4gAAAAAOAGjrw1atRIf/75p5566iklJycrOTlZrVu31u7duxUeHp4XMQIAAADAHe+Gb9J9J2HCkvyHTy0cjZxydgS4HZFXzo4AQF7I9zfp3rJlS64HDA0NveFgAAAAAAA5y1XxVrt2bVksFl3vIJ3FYlFGRoZDAgMAAAAA/L9cFW/79+/P6zgAAAAAANeQq+ItODg4r+MAAAAAAFyD3bNNjhw5UlOnTs3WPnXqVI0ePdohQQEAAAAAbNldvH366aeqWrVqtvbq1atrypQpDgkKAAAAAGDL7uItISFBZcqUydbu5+eno0ePOiQoAAAAAIAtu4u3oKAgrV27Nlv72rVrFRgY6JCgAAAAAAC2cjVhyeW6du2qvn37Kj09XY888ogkacWKFRo0aJAGDBjg8AABAAAAADdQvA0cOFAnTpzQK6+8ogsXLkiSChUqpMGDBys6OtrhAQIAAAAAJIu53p23r+L06dPauXOnvLy8dPfdd8vT09PRseUbqamp8vX1VUpKinx8fJwdjiTJYnF2BM51Y59a4OrIKWdHgNsReeXsCADkBWfWBnYfecvi7e2t+++/35GxAAAAAACuwu4JSwAAAAAAtx7FGwAAAAC4AIo3AAAAAHABuSre6tSpo5MnT0qSRowYobNnz+ZpUAAAAAAAW7kq3nbu3KkzZ85IkoYPH67Tp0/naVAAAAAAAFu5Kt5q166tF198UcOHD5cxRh988IFGjBiR48MeI0eO1P3336+iRYuqdOnSevLJJ7V7926bPufPn1fPnj1VsmRJeXt7q02bNjp27JhNn0OHDikyMlKFCxdW6dKlNXDgQF28eNGmz+rVq1WnTh15enqqcuXKiomJsStWAAAAAHCmXN0qICYmRkOHDtXChQtlsVi0ePFiFSiQfVWLxaIhQ4bkeuNxcXHq2bOn7r//fl28eFFvvPGGmjVrph07dqhIkSKSpH79+mnRokWaM2eOfH191atXL7Vu3Vpr166VJGVkZCgyMlIBAQFat26djh49qo4dO6pgwYJ67733JEn79+9XZGSkXn75Zc2cOVMrVqzQSy+9pDJlyigiIiLX8QIAAACAs9h9k243NzclJCSodOnSDg/m+PHjKl26tOLi4tSwYUOlpKTIz89Ps2bNUtu2bSVJu3btUrVq1bR+/XrVr19fixcvVosWLXTkyBH5+/tLkqZMmaLBgwfr+PHj8vDw0ODBg7Vo0SJt27bNuq127dopOTlZS5YsuW5c3KQ7/+HGp3A0csrZEeB2RF45OwIAecGZtYHds01mZmbmSeEmSSkpKZKkEiVKSJI2bdqk9PR0NW3a1NqnatWqKleunNavXy9JWr9+vWrWrGkt3CQpIiJCqamp2r59u7XP5WNk9cka40ppaWlKTU21eQAAAACAM93QrQL27dunV199VU2bNlXTpk3Vu3dv7du376YCyczMVN++fdWgQQPVqFFDkpSQkCAPDw8VK1bMpq+/v78SEhKsfS4v3LKWZy27Vp/U1FSdO3cuWywjR46Ur6+v9REUFHRT+wYAAAAAN8vu4m3p0qUKCQnRxo0bFRoaqtDQUG3YsEHVq1dXbGzsDQfSs2dPbdu2TbNnz77hMRwlOjpaKSkp1sfhw4edHRIAAACAO1yuJiy53Ouvv65+/fpp1KhR2doHDx6sRx991O4gevXqpYULF2rNmjUqW7astT0gIEAXLlxQcnKyzdG3Y8eOKSAgwNpn48aNNuNlzUZ5eZ8rZ6g8duyYfHx85OXllS0eT09PeXp62r0fAAAAAJBX7D7ytnPnTnXp0iVbe+fOnbVjxw67xjLGqFevXvruu++0cuVKVahQwWZ53bp1VbBgQa1YscLatnv3bh06dEhhYWGSpLCwMG3dulWJiYnWPrGxsfLx8VFISIi1z+VjZPXJGgMAAAAA8ju7izc/Pz/Fx8dna4+Pj7d7IpOePXvqf//7n2bNmqWiRYsqISFBCQkJ1uvQfH191aVLF/Xv31+rVq3Spk2b9OKLLyosLEz169eXJDVr1kwhISHq0KGD/vjjDy1dulRvvfWWevbsaT169vLLL+uvv/7SoEGDtGvXLk2aNEnffPON+vXrZ+/uAwAAAIBT2H3aZNeuXdWtWzf99ddfevDBByVJa9eu1ejRo9W/f3+7xpo8ebIkqXHjxjbt06ZNU6dOnSRJY8eOlZubm9q0aaO0tDRFRERo0qRJ1r7u7u5auHChevToobCwMBUpUkRRUVE2NwyvUKGCFi1apH79+mn8+PEqW7asvvjiC+7xBgAAAMBl2H2fN2OMxo0bpw8//FBHjhyRJAUGBmrgwIHq3bu3LLfhTV24z1v+w71z4GjklLMjwO2IvHJ2BADygjNrA7uLt8udOnVKklS0aFGHBZQfUbzlP/yDCEcjp5wdAW5H5JWzIwCQF5xZG9h92uTlbveiDQAAAADyixu6STcAAAAA4NaieAMAAAAAF0DxBgAAAAAuwK7iLT09XU2aNNGePXvyKh4AAAAAQA7sKt4KFiyoLVu25FUsAAAAAICrsPu0yRdeeEFffvllXsQCAAAAALgKu28VcPHiRU2dOlXLly9X3bp1VaRIEZvlH330kcOCAwAAAABcYnfxtm3bNtWpU0eS9Oeff9oss9zpd+MEAAAAgDxid/G2atWqvIgDAAAAAHANN3yrgL1792rp0qU6d+6cJMkY47CgAAAAAAC27C7eTpw4oSZNmuiee+7R448/rqNHj0qSunTpogEDBjg8QAAAAADADRRv/fr1U8GCBXXo0CEVLlzY2v7ss89qyZIlDg0OAAAAAHCJ3de8LVu2TEuXLlXZsmVt2u+++24dPHjQYYEBAAAAAP6f3Ufezpw5Y3PELUtSUpI8PT0dEhQAAAAAwJbdxVt4eLhmzJhhfW6xWJSZmakxY8bo4YcfdmhwAAAAAIBL7D5tcsyYMWrSpIl+++03XbhwQYMGDdL27duVlJSktWvX5kWMAAAAAHDHs/vIW40aNfTnn3/qoYceUqtWrXTmzBm1bt1amzdvVqVKlfIiRgAAAAC441kMN2i7rtTUVPn6+iolJUU+Pj7ODkeSZLE4OwLn4lMLRyOnnB0BbkfklbMjAJAXnFkb2H3apCSdPHlSX375pXbu3ClJCgkJ0YsvvqgSJUo4NDgAAAAAwCV2nza5Zs0alS9fXhMmTNDJkyd18uRJTZgwQRUqVNCaNWvyIkYAAAAAuOPZfdpkzZo1FRYWpsmTJ8vd3V2SlJGRoVdeeUXr1q3T1q1b8yRQZ+K0yfyHU1HgaOSUsyPA7Yi8cnYEAPKCM2sDu4+87d27VwMGDLAWbpLk7u6u/v37a+/evQ4NDgAAAABwid3FW506dazXul1u586dqlWrlkOCAgAAAADYytWEJVu2bLH+f+/evdWnTx/t3btX9evXlyT98ssv+uSTTzRq1Ki8iRIAAAAA7nC5uubNzc1NFotF1+tqsViUkZHhsODyC655y3+4jgCORk45OwLcjsgrZ0cAIC/k+1sF7N+/P6/jAAAAAABcQ66Kt+Dg4LyOAwAAAABwDTd0k+4jR47o559/VmJiojIzM22W9e7d2yGBAQAAAAD+n93FW0xMjLp37y4PDw+VLFlSlstOaLdYLBRvAAAAAJAH7C7e/vOf/2jIkCGKjo6Wm5vddxoAAAAAANwAu6uvs2fPql27dhRuAAAAAHAL2V2BdenSRXPmzMmLWAAAAAAAV2F38TZy5EjFxcWpcePGevXVV9W/f3+bhz3WrFmjli1bKjAwUBaLRfPnz7dZ3qlTJ1ksFpvHY489ZtMnKSlJ7du3l4+Pj4oVK6YuXbro9OnTNn22bNmi8PBwFSpUSEFBQRozZoy9uw0AAAAATmX3NW8jR47U0qVLVaVKFUnKNmGJPc6cOaNatWqpc+fOat26dY59HnvsMU2bNs363NPT02Z5+/btdfToUcXGxio9PV0vvviiunXrplmzZkm6dBO9Zs2aqWnTppoyZYq2bt2qzp07q1ixYurWrZtd8QIAAACAs9hdvH344YeaOnWqOnXqdNMbb968uZo3b37NPp6engoICMhx2c6dO7VkyRL9+uuvuu+++yRJH3/8sR5//HF98MEHCgwM1MyZM3XhwgVNnTpVHh4eql69uuLj4/XRRx9dtXhLS0tTWlqa9XlqauoN7iEAAAAAOIbdp016enqqQYMGeRFLjlavXq3SpUurSpUq6tGjh06cOGFdtn79ehUrVsxauElS06ZN5ebmpg0bNlj7NGzYUB4eHtY+ERER2r17t06ePJnjNkeOHClfX1/rIygoKI/2DgAAAAByx+7irU+fPvr444/zIpZsHnvsMc2YMUMrVqzQ6NGjFRcXp+bNmysjI0OSlJCQoNKlS9usU6BAAZUoUUIJCQnWPv7+/jZ9sp5n9blSdHS0UlJSrI/Dhw87etcAAAAAwC52nza5ceNGrVy5UgsXLlT16tVVsGBBm+Xffvutw4Jr166d9f9r1qyp0NBQVapUSatXr1aTJk0ctp0reXp6Zru2DgAAAACcye7irVixYledXCSvVaxYUaVKldLevXvVpEkTBQQEKDEx0abPxYsXlZSUZL1OLiAgQMeOHbPpk/X8atfSAQAAAEB+Y3fxdvnMj7fa33//rRMnTqhMmTKSpLCwMCUnJ2vTpk2qW7euJGnlypXKzMxUvXr1rH3efPNNpaenW48SxsbGqkqVKipevLhzdgQAAAAA7GT3NW+OdPr0acXHxys+Pl6StH//fsXHx+vQoUM6ffq0Bg4cqF9++UUHDhzQihUr1KpVK1WuXFkRERGSpGrVqumxxx5T165dtXHjRq1du1a9evVSu3btFBgYKEl6/vnn5eHhoS5dumj79u36+uuvNX78eLvvSQcAAAAAzmQxxhh7VqhQocI17+f2119/5Xqs1atX6+GHH87WHhUVpcmTJ+vJJ5/U5s2blZycrMDAQDVr1kxvv/22zQQkSUlJ6tWrl3744Qe5ubmpTZs2mjBhgry9va19tmzZop49e+rXX39VqVKl9Oqrr2rw4MG5jjM1NVW+vr5KSUmRj49PrtfLS3beUu+2Y9+nFrg+csrZEeB2RF45OwIAecGZtYHdxdv48eNtnqenp2vz5s1asmSJBg4cqNdff92hAeYHFG/5D/8gwtHIKWdHgNsReeXsCADkBWfWBnZf89anT58c2z/55BP99ttvNx0QAAAAACA7h13z1rx5c82bN89RwwEAAAAALuOw4m3u3LkqUaKEo4YDAAAAAFzG7tMm7733XpsJS4wxSkhI0PHjxzVp0iSHBgcAAAAAuMTu4u3JJ5+0ee7m5iY/Pz81btxYVatWdVRcAAAAAIDL2D3b5J2I2SbzHz61cDRyytkR4HZEXjk7AgB5wZm1gVNv0g0AAAAAyJ1cnzbp5uZ2zZtzS5LFYtHFixdvOigAAAAAgK1cF2/ffffdVZetX79eEyZMUGZmpkOCAgAAAADYynXx1qpVq2xtu3fv1uuvv64ffvhB7du314gRIxwaHAAAAADgkhu65u3IkSPq2rWratasqYsXLyo+Pl7Tp09XcHCwo+MDAAAAAMjO4i0lJUWDBw9W5cqVtX37dq1YsUI//PCDatSokVfxAQAAAABkx2mTY8aM0ejRoxUQEKCvvvoqx9MoAQAAAAB5I9f3eXNzc5OXl5eaNm0qd3f3q/b79ttvHRZcfsF93vIf7p0DRyOnnB0BbkfklbMjAJAXnFkb5PrIW8eOHa97qwAAAAAAQN7IdfEWExOTh2EAAAAAAK7lhmabBAAAAADcWhRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAFOLd7WrFmjli1bKjAwUBaLRfPnz7dZbozRkCFDVKZMGXl5ealp06bas2ePTZ+kpCS1b99ePj4+KlasmLp06aLTp0/b9NmyZYvCw8NVqFAhBQUFacyYMXm9awAAAADgUE4t3s6cOaNatWrpk08+yXH5mDFjNGHCBE2ZMkUbNmxQkSJFFBERofPnz1v7tG/fXtu3b1dsbKwWLlyoNWvWqFu3btblqampatasmYKDg7Vp0ya9//77GjZsmD777LM83z8AAAAAcBSLMcY4OwhJslgs+u677/Tkk09KunTULTAwUAMGDNBrr70mSUpJSZG/v79iYmLUrl077dy5UyEhIfr111913333SZKWLFmixx9/XH///bcCAwM1efJkvfnmm0pISJCHh4ck6fXXX9f8+fO1a9euXMWWmpoqX19fpaSkyMfHx/E7fwMsFmdH4Fz541OL2wk55ewIcDsir5wdAYC84MzaIN9e87Z//34lJCSoadOm1jZfX1/Vq1dP69evlyStX79exYoVsxZuktS0aVO5ublpw4YN1j4NGza0Fm6SFBERod27d+vkyZM5bjstLU2pqak2DwAAAABwpnxbvCUkJEiS/P39bdr9/f2tyxISElS6dGmb5QUKFFCJEiVs+uQ0xuXbuNLIkSPl6+trfQQFBd38DgEAAADATci3xZszRUdHKyUlxfo4fPiws0MCAAAAcIfLt8VbQECAJOnYsWM27ceOHbMuCwgIUGJios3yixcvKikpyaZPTmNcvo0reXp6ysfHx+YBAAAAAM6Ub4u3ChUqKCAgQCtWrLC2paamasOGDQoLC5MkhYWFKTk5WZs2bbL2WblypTIzM1WvXj1rnzVr1ig9Pd3aJzY2VlWqVFHx4sVv0d4AAAAAwM1xavF2+vRpxcfHKz4+XtKlSUri4+N16NAhWSwW9e3bV++8846+//57bd26VR07dlRgYKB1Rspq1arpscceU9euXbVx40atXbtWvXr1Urt27RQYGChJev755+Xh4aEuXbpo+/bt+vrrrzV+/Hj179/fSXsNAAAAAPZz6q0CVq9erYcffjhbe1RUlGJiYmSM0dChQ/XZZ58pOTlZDz30kCZNmqR77rnH2jcpKUm9evXSDz/8IDc3N7Vp00YTJkyQt7e3tc+WLVvUs2dP/frrrypVqpReffVVDR48ONdxcquA/Ifpl+Fo5JSzI8DtiLxydgQA8oIza4N8c5+3/IziLf/hUwtHI6ecHQFuR+SVsyMAkBe4zxsAAAAA4Joo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAF5OvibdiwYbJYLDaPqlWrWpefP39ePXv2VMmSJeXt7a02bdro2LFjNmMcOnRIkZGRKly4sEqXLq2BAwfq4sWLt3pXAAAAAOCmFHB2ANdTvXp1LV++3Pq8QIH/D7lfv35atGiR5syZI19fX/Xq1UutW7fW2rVrJUkZGRmKjIxUQECA1q1bp6NHj6pjx44qWLCg3nvvvVu+LwAAAABwo/J98VagQAEFBARka09JSdGXX36pWbNm6ZFHHpEkTZs2TdWqVdMvv/yi+vXra9myZdqxY4eWL18uf39/1a5dW2+//bYGDx6sYcOGycPD41bvDgAAAADckHx92qQk7dmzR4GBgapYsaLat2+vQ4cOSZI2bdqk9PR0NW3a1Nq3atWqKleunNavXy9JWr9+vWrWrCl/f39rn4iICKWmpmr79u1X3WZaWppSU1NtHgAAAADgTPm6eKtXr55iYmK0ZMkSTZ48Wfv371d4eLhOnTqlhIQEeXh4qFixYjbr+Pv7KyEhQZKUkJBgU7hlLc9adjUjR46Ur6+v9REUFOTYHQMAAAAAO+Xr0yabN29u/f/Q0FDVq1dPwcHB+uabb+Tl5ZVn242Ojlb//v2tz1NTUyngAAAAADhVvj7ydqVixYrpnnvu0d69exUQEKALFy4oOTnZps+xY8es18gFBARkm30y63lO19Fl8fT0lI+Pj80DAAAAAJzJpYq306dPa9++fSpTpozq1q2rggULasWKFdblu3fv1qFDhxQWFiZJCgsL09atW5WYmGjtExsbKx8fH4WEhNzy+AEAAADgRuXr0yZfe+01tWzZUsHBwTpy5IiGDh0qd3d3Pffcc/L19VWXLl3Uv39/lShRQj4+Pnr11VcVFham+vXrS5KaNWumkJAQdejQQWPGjFFCQoLeeust9ezZU56enk7eOwAAAADIvXxdvP3999967rnndOLECfn5+emhhx7SL7/8Ij8/P0nS2LFj5ebmpjZt2igtLU0RERGaNGmSdX13d3ctXLhQPXr0UFhYmIoUKaKoqCiNGDHCWbsEAAAAADfEYowxzg4iv0tNTZWvr69SUlLyzfVvFouzI3AuPrVwNHLK2RHgdkReOTsCAHnBmbWBS13zBgAAAAB3Koo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcAMUbAAAAALgAijcAAAAAcAEUbwAAAADgAijeAAAAAMAFULwBAAAAgAugeAMAAAAAF0DxBgAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHABFG8AAAAA4AIo3gAAAADABVC8AQAAAIALoHgDAAAAABdA8QYAAAAALoDiDQAAAABcQAFnBwAAAADg+iwWZ0fgXMY4OwLn48gbAAAAALiAO6p4++STT1S+fHkVKlRI9erV08aNG50dEgAAAADkyh1TvH399dfq37+/hg4dqt9//121atVSRESEEhMTnR0aAAAAAFzXHVO8ffTRR+ratatefPFFhYSEaMqUKSpcuLCmTp3q7NAAAAAA4LruiAlLLly4oE2bNik6Otra5ubmpqZNm2r9+vXZ+qelpSktLc36PCUlRZKUmpqa98EiV3grAMcipwDHI68Ax8ovOZVVExgnzKByRxRv//77rzIyMuTv72/T7u/vr127dmXrP3LkSA0fPjxbe1BQUJ7FCPv4+jo7AuD2Qk4BjkdeAY6V33Lq1KlT8r3FQd0RxZu9oqOj1b9/f+vzzMxMJSUlqWTJkrLc6XO06tKvDUFBQTp8+LB8fHycHQ7g8sgpwPHIK8CxyKn/Z4zRqVOnFBgYeMu3fUcUb6VKlZK7u7uOHTtm037s2DEFBARk6+/p6SlPT0+btmLFiuVliC7Jx8fnjk9ewJHIKcDxyCvAscipS271Ebcsd8SEJR4eHqpbt65WrFhhbcvMzNSKFSsUFhbmxMgAAAAAIHfuiCNvktS/f39FRUXpvvvu0wMPPKBx48bpzJkzevHFF50dGgAAAABc1x1TvD377LM6fvy4hgwZooSEBNWuXVtLlizJNokJrs/T01NDhw7NdmopgBtDTgGOR14BjkVO5Q8W44w5LgEAAAAAdrkjrnkDAAAAAFdH8QYAAAAALoDiDQAAAABcAMVbHipfvrzGjRvn7DBczoEDB2SxWBQfH5/n2+I9cj28ZzeGvMLV8H7dGHIK18J7dmPIq1wwt7moqCgjyXTv3j3bsldeecVIMlFRUbkaa//+/UaS2bx5c676JyYmmjNnzuSqb4sWLUxERESOy9asWWMkmT/++CNXY13NqlWrjCRz8uTJmxrnSmfPnjXFixc3JUuWNOfPn7dr3aioKNOqVSubtosXL5qjR4+a9PR0h8U4bdo04+vrm63dnvfIUSZOnGiCg4ONp6eneeCBB8yGDRtu6fYdgbz6f+SVb7b2W51XcXFxpkWLFqZMmTJGkvnuu+9u2bYdhZz6f+SUb7b2W51T7733nrnvvvuMt7e38fPzM61atTK7du26Zdt3FPLq/5FXvtnab3VeTZo0ydSsWdMULVrUFC1a1NSvX9/8+OOPdo9zRxx5CwoK0uzZs3Xu3Dlr2/nz5zVr1iyVK1fO4du7cOGCJMnPz0+FCxfO1TpdunRRbGys/v7772zLpk2bpvvuu0+hoaEOjfNGGWN08eJF6/N58+apevXqqlq1qubPn3/T47u7uysgIEAFCuT9nSzseY8c4euvv1b//v01dOhQ/f7776pVq5YiIiKUmJh4y2JwFPLKscirG3fmzBnVqlVLn3zyyS3bZl4gpxyLnLpxcXFx6tmzp3755RfFxsYqPT1dzZo105kzZ25ZDI5CXjkWeXXjypYtq1GjRmnTpk367bff9Mgjj6hVq1bavn27fQM5uKjMd7Kq+ho1apj//e9/1vaZM2ea0NBQ06pVK+uvLosXLzYNGjQwvr6+pkSJEiYyMtLs3bvXuo4km0ejRo1stvHOO++YMmXKmPLlyxtjjAkODjZjx441xlz6xaNgwYJmzZo11vFGjx5t/Pz8TEJCgklPTzf+/v7m7bffton/1KlTxtvb20yePNkYY8xPP/1kHnroIVOoUCFTtmxZ8+qrr5rTp09b+58/f94MGjTIlC1b1nh4eJhKlSqZL774wvqL0eWPrP0+f/68efXVV42fn5/x9PQ0DRo0MBs3brSOmfVrzY8//mjq1KljChYsaFatWmVd3rhxYzNlyhQzefJk8+ijj2Z7D7Zt22YiIyNN0aJFjbe3t3nooYfM3r17zdChQ7PFtGrVKptftzIyMsxdd91lJk2aZDPm77//biwWizlw4IAxxpgPP/zQ1KhRwxQuXNiULVvW9OjRw5w6dcom/ssfQ4cOzfYeGWPMwYMHzRNPPGGKFCliihYtap5++mmTkJBgXT506FBTq1YtM2PGDBMcHGx8fHzMs88+a1JTU7Ptd04eeOAB07NnT+vzjIwMExgYaEaOHJmr9fML8oq8yk95dTm58JE3coqcyo85ZcylIxSSTFxc3A2t7yzkFXmVn/PKGGOKFy9uvvjiC7vWuWOKt48++sg0adLE2t6kSRMzduxYm8SdO3eumTdvntmzZ4/ZvHmzadmypalZs6bJyMgwxhizceNGI8ksX77cHD161Jw4ccK6DW9vb9OhQwezbds2s23bNmNM9g/FwIEDTXBwsElOTja///678fDwMAsWLLBZXqlSJZOZmWltmzp1qvHy8jLJyclm7969pkiRImbs2LHmzz//NGvXrjX33nuv6dSpk7X/M888Y4KCgsy3335r9u3bZ5YvX25mz55tLl68aObNm2ckmd27d5ujR4+a5ORkY4wxvXv3NoGBgebHH38027dvN1FRUaZ48eLW/cv64IeGhpply5aZvXv3Wpft3bvXeHp6mqSkJHPixAlTqFAhazIZY8zff/9tSpQoYVq3bm1+/fVXs3v3bjN16lSza9cuc+rUKfPMM8+Yxx57zBw9etQcPXrUpKWlZTs14bXXXjMPPfSQzfs6YMAAm7axY8ealStXmv3795sVK1aYKlWqmB49ehhjjElLSzPjxo0zPj4+1u1kJfXl71FGRoapXbu2eeihh8xvv/1mfvnlF1O3bl3rF7QxlxLX29vbtG7d2mzdutWsWbPGBAQEmDfeeOOqn8EsaWlpxt3dPdsflh07djRPPPHEddfPT8gr8iq/5NWVXL14I6fIqfyWU8YYs2fPHiPJbN269YbWdxbyirzKr3l18eJF89VXXxkPDw+zfft2u9a9Y4q3xMRE4+npaQ4cOGAOHDhgChUqZI4fP26TuFc6fvy4zZfV1c53joqKMv7+/iYtLc2m/crETUtLM7Vr1zbPPPOMCQkJMV27drXpv3PnTusvD1nCw8PNCy+8YIwxpkuXLqZbt2426/z000/Gzc3NnDt3zuzevdtIMrGxsTnuT07nO58+fdoULFjQzJw509p24cIFExgYaMaMGWOz3vz587ON+cYbb5gnn3zS+rxVq1bWXzSMMSY6OtpUqFDBXLhwIceYcjrf+crXefPmzcZisZiDBw8aY4z1l5isX6JyMmfOHFOyZEnr86ud73z5e7Rs2TLj7u5uDh06ZF2+fft2I8n6K9TQoUNN4cKFbX5lGThwoKlXr95VY8nyzz//GElm3bp1Nu0DBw40DzzwwHXXz0/Iq/9HXvlm63cr8+pKrl68kVPkVH7LqYyMDBMZGWkaNGhg97rORl79P/LKN1s/Z+TVli1bTJEiRYy7u7vx9fU1ixYtyvW6We6Ia96kS+e1RkZGKiYmRtOmTVNkZKRKlSpl02fPnj167rnnVLFiRfn4+Kh8+fKSpEOHDl13/Jo1a8rDw+OafTw8PDRz5kzNmzdP58+f19ixY22WV61aVQ8++KCmTp0qSdq7d69++ukndenSRZL0xx9/KCYmRt7e3tZHRESEMjMztX//fsXHx8vd3V2NGjXK7cuiffv2KT09XQ0aNLC2FSxYUA888IB27txp0/e+++6zeZ6RkaHp06frhRdesLa98MILiomJUWZmpiQpPj5e4eHhKliwYK5julLt2rVVrVo1zZo1S9Klc/ETExP19NNPW/ssX75cTZo00V133aWiRYuqQ4cOOnHihM6ePZvr7ezcuVNBQUEKCgqytoWEhKhYsWI2r0X58uVVtGhR6/MyZcq45DVrjkBe5Yy8+n/klX3IqZyRU//vVudUz549tW3bNs2ePdvudfML8ipn5NX/u1V5VaVKFcXHx2vDhg3q0aOHoqKitGPHjlyvL91htwro3LmzYmJiNH36dHXu3Dnb8pYtWyopKUmff/65NmzYoA0bNkj6/4tPr6VIkSK5imHdunWSpKSkJCUlJWVb3qVLF82bN0+nTp3StGnTVKlSJWsinj59Wt27d1d8fLz18ccff2jPnj2qVKmSvLy8chXDjbpyH5cuXap//vlHzz77rAoUKKACBQqoXbt2OnjwoFasWCFJDoupffv21sSdNWuWHnvsMZUsWVLSpWllW7RoodDQUM2bN0+bNm2yTlyQm/fOXld+CVksFusX1bWUKlVK7u7uOnbsmE37sWPHFBAQ4NAYbyXy6uaQV5fcaF7djsipm0NOXeKInOrVq5cWLlyoVatWqWzZso4M75Yjr24OeXXJzeaVh4eHKleurLp162rkyJGqVauWxo8fb1cMd1Tx9thjj+nChQtKT09XRESEzbITJ05o9+7deuutt9SkSRNVq1ZNJ0+etOmT9atKRkbGDW1/37596tevnz7//HPVq1dPUVFR2d7wZ555Rm5ubpo1a5ZmzJihzp07y2KxSJLq1KmjHTt2qHLlytkeHh4eqlmzpjIzMxUXF5fj9nOKv1KlSvLw8NDatWutbenp6fr1118VEhJyzf358ssv1a5dO5svkvj4eLVr105ffvmlJCk0NFQ//fST0tPTrxpTbl7P559/Xtu2bdOmTZs0d+5ctW/f3rps06ZNyszM1Icffqj69evrnnvu0ZEjR+zeTrVq1XT48GEdPnzY2rZjxw4lJydf97XIDQ8PD9WtW9f6pSZJmZmZWrFihcLCwm56fGchr8ira8nrvLodkVPk1LXcipwyxqhXr1767rvvtHLlSlWoUMEh4zoTeUVeXYuz/q3KzMxUWlqafSvZfaKli7nyfNqUlBSTkpJifZ51vnNGRoYpWbKkeeGFF8yePXvMihUrzP33329z/UR6errx8vIy77zzjklISLBe7JnTObvG2J5Le/HiRVO/fn3Tpk0bY4wxR44cMSVLlrSeU3y5Ll26mOLFixt3d3fzzz//WNv/+OMP4+XlZXr27Gk2b95s/vzzTzN//nyb2Qs7depkgoKCzHfffWf++usvs2rVKvP1118bYy5dOGqxWExMTIxJTEy0XrDZp08fExgYaBYvXmxzsWpSUpIxJufzpBMTE03BggXN4sWLs8X/448/Gk9PT3PixAnz77//mpIlS1ovVv3zzz/NjBkzrPeLeffdd025cuXMrl27zPHjx82FCxeuel55gwYNTK1atUzRokXN2bNnre3x8fFGkhk3bpzZt2+fmTFjhrnrrrtsYl67dq31QuPjx49b7+tx+XuUmZlpateubcLDw82mTZvMhg0bcrxYtVatWjZxjR071gQHB2d7HXIye/Zs4+npaWJiYsyOHTtMt27dTLFixWxmM3IF5BV5ZUz+yatTp06ZzZs3m82bNxtJ5qOPPjKbN2+2XiPhCsgpcsqY/JNTPXr0ML6+vmb16tXWSR6OHj1qsz+ugLwir4zJP3n1+uuvm7i4OLN//36zZcsW8/rrrxuLxWKWLVuWq/Wz3HHF25Uuv1g1NjbWVKtWzXh6eprQ0FCzevXqbBe/f/755yYoKMi4ubllmyb2Spd/KIYPH27KlClj/v33X+vyefPmGQ8PDxMfH2+z3rp164wk8/jjj2cbc+PGjebRRx813t7epkiRIiY0NNS8++671uXnzp0z/fr1M2XKlDEeHh6mcuXKZurUqdblI0aMMAEBAcZisVj3+9y5c+bVV181pUqVuuY0sZcn7gcffGCKFSuW40WoaWlpplixYmb8+PHGmEtfOM2aNTOFCxc2RYsWNeHh4Wbfvn3GmEtfAFn7oxymib3cpEmTjCTTsWPHbNv86KOPTJkyZYyXl5eJiIgwM2bMyBbzyy+/bEqWLOmQaWIvZ0/iGmPMxx9/bMqVK2c8PDzMAw88YH755Zdcr5tfkFfkVZb8kFc5TQUt5f7mu/kBOUVOZckPOZVTPkky06ZNy9X6+QV5RV5lyQ951blzZxMcHGw8PDyMn5+fadKkid2FmzHGWIwxxr5jdQAAAACAW+2OuuYNAAAAAFwVxRvgIIcOHbKZwvfKR26mGwZgi7wCHIucAhzvVuYVp00CDnLx4kUdOHDgqsvLly+vAgUK3LqAgNsAeQU4FjkFON6tzCuKNwAAAABwAZw2CQAAAAAugOINAAAAAFwAxRsAAAAAuACKNwAAAABwARRvAADkUuPGjdW3b99c91+9erUsFouSk5PzLCYAwJ2D4g0AkOc6deoki8WiUaNG2bTPnz9fFovFrrHKly+vcePGOTA6AABcA8UbAOCWKFSokEaPHq2TJ086OxS7Xbhwwdkh3JT09HRnhwAAcACKNwDALdG0aVMFBARo5MiR1+z3888/Kzw8XF5eXgoKClLv3r115swZSZdOWzx48KD69esni8Uii8UiY4z8/Pw0d+5c6xi1a9dWmTJlbMb09PTU2bNnJUmHDh1Sq1at5O3tLR8fHz3zzDM6duyYtf+wYcNUu3ZtffHFF6pQoYIKFSqUY6yLFi2Sr6+vZs6cmavX4MSJE3ruued01113qXDhwqpZs6a++uor6/IZM2aoZMmSSktLs1nvySefVIcOHazPFyxYoDp16qhQoUKqWLGihg8frosXL1qXWywWTZ48WU888YSKFCmid999VydPnlT79u3l5+cnLy8v3X333Zo2bVqu4gYA5A8UbwCAW8Ld3V3vvfeePv74Y/3999859tm3b58ee+wxtWnTRlu2bNHXX3+tn3/+Wb169ZIkffvttypbtqxGjBiho0eP6ujRo7JYLGrYsKFWr14tSTp58qR27typc+fOadeuXZKkuLg43X///SpcuLAyMzPVqlUrJSUlKS4uTrGxsfrrr7/07LPP2sSyd+9ezZs3T99++63i4+OzxTpr1iw999xzmjlzptq3b5+r1+D8+fOqW7euFi1apG3btqlbt27q0KGDNm7cKEl6+umnlZGRoe+//966TmJiohYtWqTOnTtLkn766Sd17NhRffr00Y4dO/Tpp58qJiZG7777rs22hg0bpqeeekpbt25V586d9Z///Ec7duzQ4sWLtXPnTk2ePFmlSpXKVdwAgHzCAACQx6KiokyrVq2MMcbUr1/fdO7c2RhjzHfffWcu/6eoS5cuplu3bjbr/vTTT8bNzc2cO3fOGGNMcHCwGTt2rE2fCRMmmOrVqxtjjJk/f76pV6+eadWqlZk8ebIxxpimTZuaN954wxhjzLJly4y7u7s5dOiQdf3t27cbSWbjxo3GGGOGDh1qChYsaBITE22206hRI9OnTx8zceJE4+vra1avXn3N/V61apWRZE6ePHnVPpGRkWbAgAHW5z169DDNmze3Pv/www9NxYoVTWZmpjHGmCZNmpj33nvPZoz//ve/pkyZMtbnkkzfvn1t+rRs2dK8+OKL14wXAJC/ceQNAHBLjR49WtOnT9fOnTuzLfvjjz8UExMjb29v6yMiIkKZmZnav3//Vcds1KiRduzYoePHjysuLk6NGzdW48aNtXr1aqWnp2vdunVq3LixJGnnzp0KCgpSUFCQdf2QkBAVK1bMJqbg4GD5+fll29bcuXPVr18/xcbGqlGjRnbte0ZGht5++23VrFlTJUqUkLe3t5YuXapDhw5Z+3Tt2lXLli3TP//8I0mKiYmxTviS9RqNGDHC5jXq2rWrjh49aj0tVJLuu+8+m2336NFDs2fPVu3atTVo0CCtW7fOrtgBAM5H8QYAuKUaNmyoiIgIRUdHZ1t2+vRpde/eXfHx8dbHH3/8oT179qhSpUpXHTOrGIqLi7Mp3uLi4vTrr78qPT1dDz74oF1xFilSJMf2e++9V35+fpo6daqMMXaN+f7772v8+PEaPHiwVq1apfj4eEVERNhMiHLvvfeqVq1amjFjhjZt2qTt27erU6dO1uWnT5/W8OHDbV6jrVu3as+ePTbX5l0Zf/Pmza3XCx45ckRNmjTRa6+9Zlf8AADnKuDsAAAAd55Ro0apdu3aqlKlik17nTp1tGPHDlWuXPmq63p4eCgjI8OmzWKxKDw8XAsWLND27dv10EMPqXDhwkpLS9Onn36q++67z1rMVKtWTYcPH9bhw4etR9927Nih5ORkhYSEXDf2SpUq6cMPP1Tjxo3l7u6uiRMn5nq/165dq1atWumFF16QJGVmZurPP//Mtt2XXnpJ48aN0z///KOmTZvaHCWsU6eOdu/efc3X6Gr8/PwUFRWlqKgohYeHa+DAgfrggw/sHgcA4BwceQMA3HI1a9ZU+/btNWHCBJv2wYMHa926derVq5fi4+O1Z88eLViwwDphiXTpPm9r1qzRP//8o3///dfa3rhxY3311VeqXbu2vL295ebmpoYNG2rmzJk2pzc2bdrUuv3ff/9dGzduVMeOHdWoUaNspxpezT333KNVq1Zp3rx5dt20++6771ZsbKzWrVunnTt3qnv37jazXGZ5/vnn9ffff+vzzz+3TlSSZciQIZoxY4aGDx+u7du3a+fOnZo9e7beeuuta257yJAhWrBggfbu3avt27dr4cKFqlatWq5jBwA4H8UbAMApRowYoczMTJu20NBQxcXF6c8//1R4eLjuvfdeDRkyRIGBgTbrHThwQJUqVbK5Jq1Ro0bKyMiwXtsmXSrormyzWCxasGCBihcvroYNG6pp06aqWLGivv76a7vir1KlilauXKmvvvpKAwYMyNU6b731lurUqaOIiAg1btxYAQEBevLJJ7P18/X1VZs2beTt7Z1teUREhBYuXKhly5bp/vvvV/369TV27FgFBwdfc9seHh6Kjo5WaGioGjZsKHd3d82ePTu3uwsAyAcsxt4T9gEAQJ5r0qSJqlevnu3oJADgzkXxBgBAPnLy5EmtXr1abdu21Y4dO7JdFwgAuHMxYQkAAPnIvffeq5MnT2r06NEUbgAAGxx5AwAAAAAXwIQlAAAAAOACKN4AAAAAwAVQvAEAAACAC6B4AwAAAAAXQPEGAAAAAC6A4g0AAAAAXADFGwAAAAC4AIo3AAAAAHAB/wdjk8kS8Ch0wAAAAABJRU5ErkJggg==", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "fig = plt.figure(figsize = (10, 5))\n", "plt.bar(cycles_dict_updated.keys(), cycles_dict_updated.values(), color ='blue', width = 0.3)\n", "plt.xlabel(\"Network layers\")\n", "plt.ylabel(\"Number of clock cycles\")\n", - "plt.title(\"Estimated no. of clock cycles for each network layer\")\n", + "plt.title(\"Clock cycles per layer with updated folding factors\")\n", "plt.show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This has of course consequences for the resource usage of the network." + ] + }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'MatrixVectorActivation_0': {'BRAM_18K': 8,\n", - " 'BRAM_efficiency': 0.5208333333333334,\n", - " 'LUT': 418,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.006944444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0}}" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "res_dict_updated = model.analysis(res_estimation)\n", "res_dict_updated" @@ -589,20 +363,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2YAAAHWCAYAAAAcgJqiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABerUlEQVR4nO3deZxO9f//8edldrMazIxl7IlhxogwtlGWsaYoKTGWJA0VX4o+IaRpp/qgtKBPpKyVT5IsQ7askd1EFIMsM5YMM/P+/eE35+Myg5kxHMvjfrudW3O9z/uc8zrXdb2vPK+zXA5jjBEAAAAAwDYF7C4AAAAAAO50BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwB50qhRIzVq1MjuMvLV3r175XA4NGnSJLtLsRXPQ85NmjRJDodDe/fuvWrfH374QZGRkfL09JTD4dCJEyeue303msPhUJ8+fewu46aW+Z5Zu3ZtrpddsmSJHA6HlixZkv+FAbAdwQy4zWT+T/9y06pVq3K8rq1bt+qVV17J0T86b6Rx48bZGhoy/3E0Y8aMy/a50j9QZ8yYYf3jKnNdOZlw6zp69Kg6dOggLy8vjR07Vv/5z3/k7e1td1m3vRUrVuiVV165LUMwgNuPq90FALg+RowYobJly2Zpr1ChQo7XsXXrVg0fPlyNGjVSmTJlnOb9+OOP11pino0bN05FihRR165dbashv1SuXFn/+c9/nNoGDx4sHx8f/etf/7KpKuS3NWvW6OTJkxo5cqSaNGlidzl3jBUrVmj48OHq2rWrAgIC7C4HAK6IYAbcplq0aKGaNWtet/W7u7tft3XfSYKDg/XEE084tb3++usqUqRIlnbcug4fPixJ+RoOTp8+zVG3W8jZs2dv+89N3pPAteFURuAONm3aNNWoUUO+vr7y8/NTeHi43nvvPUkXTol85JFHJEn33XefdTpd5rUNl15jlnlK3tdff63hw4erRIkS8vX11cMPP6zk5GSlpqbq+eefV1BQkHx8fNStWzelpqY61TNx4kTdf//9CgoKkoeHh8LCwjR+/HinPmXKlNGWLVuUkJBg1XRxHSdOnNDzzz+v0NBQeXh4qEKFCnrjjTeUkZHhtJ4TJ06oa9eu8vf3V0BAgGJjY2/J050OHTokV1dXDR8+PMu8HTt2yOFw6N///rck6dixYxowYIDCw8Pl4+MjPz8/tWjRQr/++utVt3O5awq7du2a5WhqRkaGxowZoypVqsjT01PBwcHq1auXjh8/7tRv7dq1iomJUZEiReTl5aWyZcuqe/fuV63F4XDolVdeydJepkwZp6Oo58+f1/Dhw3XXXXfJ09NThQsXVv369bVgwQKn5bZv366HH35YgYGB8vT0VM2aNfXtt99mWf+WLVt0//33y8vLSyVLltSrr76a5X2VnUaNGik2NlaSdO+998rhcDjVOX36dNWoUUNeXl5WIP/rr7+c1tG1a1f5+PgoMTFRLVu2lK+vrzp16nTF7f7111/q3r27goOD5eHhoSpVquizzz5z6nPu3DkNHTpUNWrUkL+/v7y9vdWgQQMtXrw4y/oyMjL03nvvKTw8XJ6enipatKiaN2+e7bVSc+bMUdWqVa3t/vDDD1d9ni7+DBk1apRKliwpT09PNW7cWLt3787Sf/Xq1WrevLn8/f1VsGBBRUdHa/ny5db8V155RQMHDpQklS1b1vq82Lt3r9q1a6d77rnHaX1t2rSRw+Fweu1Xr14th8OhefPmWW2///67HnnkEQUGBqpgwYKqU6eO/vvf/2a7L9OmTdPLL7+sEiVKqGDBgkpJScl2348fP65atWqpZMmS2rFjx1Wfq4stW7ZMjzzyiEqVKiUPDw+FhoaqX79++ueff6w+EydOlMPh0IYNG7Is/9prr8nFxcXpPXe151a68Pw6HA5t3bpVjz/+uAoVKqT69evnqnYAzjhiBtymkpOT9ffffzu1ORwOFS5cWJK0YMECPfbYY2rcuLHeeOMNSdK2bdu0fPlyPffcc2rYsKGeffZZvf/++3rppZdUuXJlSbL+eznx8fHy8vLSoEGDtHv3bn3wwQdyc3NTgQIFdPz4cb3yyitatWqVJk2apLJly2ro0KHWsuPHj1eVKlX0wAMPyNXVVd99952eeeYZZWRkKC4uTpI0ZswY9e3b1+lUv+DgYEnSmTNnFB0drb/++ku9evVSqVKltGLFCg0ePFgHDx7UmDFjJEnGGLVt21Y///yznn76aVWuXFmzZ8+2/vF8KwkODlZ0dLS+/vprDRs2zGneV199JRcXFytg//7775ozZ44eeeQRlS1bVocOHdJHH32k6Ohobd26VcWLF8+Xmnr16qVJkyapW7duevbZZ7Vnzx79+9//1oYNG7R8+XK5ubnp8OHDatasmYoWLapBgwYpICBAe/fu1axZs/KlBunCPxzj4+P15JNPqlatWkpJSdHatWu1fv16NW3aVNKFsFWvXj2VKFFCgwYNkre3t77++ms9+OCDmjlzph566CFJUlJSku677z6lpaVZ/SZMmCAvL6+r1vGvf/1Ld999tyZMmGCdYly+fHlJsp6ne++9V/Hx8Tp06JDee+89LV++XBs2bHA6wpaWlqaYmBjVr19fb7/9tgoWLHjZbR46dEh16tSxrnUsWrSo5s2bpx49eiglJUXPP/+8JCklJUWffPKJHnvsMfXs2VMnT57Up59+qpiYGP3yyy+KjIy01tmjRw9NmjRJLVq00JNPPqm0tDQtW7ZMq1atcjo6//PPP2vWrFl65pln5Ovrq/fff1/t27fXvn37rM+fK3n99ddVoEABDRgwQMnJyXrzzTfVqVMnrV692uqzaNEitWjRQjVq1NCwYcNUoEAB64udZcuWqVatWmrXrp127typL7/8UqNHj1aRIkUkSUWLFlWDBg30zTffKCUlRX5+fjLGaPny5SpQoICWLVumBx54QNKF0FOgQAHVq1fPel7r1q2rM2fO6Nlnn1XhwoU1efJkPfDAA5oxY4b1fsk0cuRIubu7a8CAAUpNTc32iNnff/+tpk2b6tixY0pISLDeGzk1ffp0nTlzRr1791bhwoX1yy+/6IMPPtCff/6p6dOnS5IefvhhxcXFacqUKapevbrT8lOmTFGjRo1UokSJHD+3F3vkkUd011136bXXXpMxJle1A7iEAXBbmThxopGU7eTh4WH1e+6554yfn59JS0u77LqmT59uJJnFixdnmRcdHW2io6Otx4sXLzaSTNWqVc25c+es9scee8w4HA7TokULp+WjoqJM6dKlndrOnDmTZTsxMTGmXLlyTm1VqlRx2namkSNHGm9vb7Nz506n9kGDBhkXFxezb98+Y4wxc+bMMZLMm2++afVJS0szDRo0MJLMxIkTs6z7Ypn7On369Mv2kWTi4uKynXel5/VK+3c5H330kZFkNm/e7NQeFhZm7r//fuvx2bNnTXp6ulOfPXv2GA8PDzNixAintkufh0tf70yxsbFOr+OyZcuMJDNlyhSnfj/88INT++zZs40ks2bNmhzvZyZJZtiwYVnaS5cubWJjY63H1apVM61atbriuho3bmzCw8PN2bNnrbaMjAxTt25dc9ddd1ltzz//vJFkVq9ebbUdPnzY+Pv7G0lmz549V9xO5ri8eH/PnTtngoKCTNWqVc0///xjtc+dO9dIMkOHDrXaYmNjjSQzaNCgK24nU48ePUyxYsXM33//7dTesWNH4+/vb421tLQ0k5qa6tTn+PHjJjg42HTv3t1qW7RokZFknn322SzbysjIsP6WZNzd3c3u3buttl9//dVIMh988MEVa84cV5UrV3aq6b333nN6f2dkZJi77rrLxMTEOG37zJkzpmzZsqZp06ZW21tvvZXt67NmzRojyXz//ffGGGM2bdpkJJlHHnnE1K5d2+r3wAMPmOrVq1uPM98Hy5Yts9pOnjxpypYta8qUKWONr8x9KVeuXJbPtYvfCwcPHjRVqlQx5cqVM3v37r3i83Pxei/+7MjuczM+Pt44HA7zxx9/WG2PPfaYKV68uNNnwPr1653Gem6e22HDhhlJ5rHHHrtq3QByhlMZgdvU2LFjtWDBAqfp4tNxAgICdPr06SyndV2rLl26yM3NzXpcu3ZtGWOynKJWu3Zt7d+/X2lpaVbbxUcfMo/4RUdH6/fff1dycvJVtz19+nQ1aNBAhQoV0t9//21NTZo0UXp6upYuXSpJ+v777+Xq6qrevXtby7q4uKhv37553m87tWvXTq6urvrqq6+stt9++01bt27Vo48+arV5eHioQIELH/vp6ek6evSofHx8dPfdd2v9+vX5Usv06dPl7++vpk2bOr0GNWrUkI+Pj3WKXOaRoLlz5+r8+fP5su1LBQQEaMuWLdq1a1e2848dO6ZFixapQ4cOOnnypFXr0aNHFRMTo127dlmnd33//feqU6eO09GCokWLXvV0witZu3atDh8+rGeeeUaenp5We6tWrVSpUqUsp8dJcnrPXo4xRjNnzlSbNm1kjHF6HWJiYpScnGy93i4uLtZRnIyMDB07dkxpaWmqWbOm03ti5syZcjgcWY7KSspyx9AmTZo4HfWJiIiQn5+ffv/996vWLkndunVzOrLUoEEDSbKW37hxo3bt2qXHH39cR48etfbt9OnTaty4sZYuXXrVU0yrV68uHx8f6zNh2bJlKlmypLp06aL169frzJkzMsbo559/trYvXXgf1KpVy+mUPR8fHz311FPau3evtm7d6rSd2NjYyx5V/fPPPxUdHa3z589r6dKlKl26dI6en0tdvP7Tp0/r77//Vt26dWWMcTp1sUuXLjpw4IDTaapTpkyRl5eX2rdvLylvz+3TTz+dp7oBZMWpjMBtqlatWle8+cczzzyjr7/+Wi1atFCJEiXUrFkzdejQQc2bN7+m7ZYqVcrpsb+/vyQpNDQ0S3tGRoaSk5Ot05uWL1+uYcOGaeXKlTpz5oxT/+TkZGtdl7Nr1y5t2rRJRYsWzXZ+5g0Y/vjjDxUrVkw+Pj5O8+++++6r7F3+yq9b4BcpUkSNGzfW119/rZEjR0q6cBqjq6ur2rVrZ/XLvEZo3Lhx2rNnj9LT0615OTnFLCd27dql5ORkBQUFZTs/8zWIjo5W+/btNXz4cI0ePVqNGjXSgw8+qMcff1weHh75UsuIESPUtm1bVaxYUVWrVlXz5s3VuXNnRURESJJ2794tY4yGDBmiIUOGXLbeEiVK6I8//lDt2rWzzL+W98wff/xx2XVUqlRJP//8s1Obq6urSpYsedX1HjlyRCdOnNCECRM0YcKEbPtkvg6SNHnyZL3zzjvavn27U0i++K6uiYmJKl68uAIDA6+6/Us/AySpUKFCWa4xzOnyhQoVkiRr+cygfaVTj5OTk63lsuPi4qKoqCgtW7ZM0oVg1qBBA9WvX1/p6elatWqVgoODdezYMadgdrn3QeYp3n/88YeqVq1qtWd3Z9xMnTt3lqurq7Zt26aQkJDL9ruaffv2aejQofr222+zPMcXf6HVtGlTFStWTFOmTFHjxo2VkZGhL7/8Um3btpWvr6+kvD23V9pHALlDMAPuUEFBQdq4caPmz5+vefPmad68eZo4caK6dOmiyZMn53m9Li4uuWo3//+ahMTERDVu3FiVKlXSu+++q9DQULm7u+v777/X6NGjc3SThYyMDDVt2lQvvPBCtvMrVqyYw724dh4eHk4X318sM3RefJTkWnXs2FHdunXTxo0bFRkZqa+//lqNGze2rquRLlzkP2TIEHXv3l0jR45UYGCgChQooOeff/6qz6/D4cj2+pGLw5104TUICgrSlClTsl1PZmjO/B24VatW6bvvvtP8+fPVvXt3vfPOO1q1alWW0JwTl9bSsGFDJSYm6ptvvtGPP/6oTz75RKNHj9aHH36oJ5980trnAQMGKCYmJtt15ubnJa63i494Xknmfj3xxBOX/Qd2Zjj94osv1LVrVz344IMaOHCggoKC5OLiovj4eCUmJuapzquN9WtdPnP/3nrrLadr4C6Wk/dP/fr1NWrUKJ09e1bLli3Tv/71LwUEBKhq1apatmyZde3qxcEst650DWK7du30+eef67333lN8fHye1p+enm5dn/biiy+qUqVK8vb21l9//aWuXbs6jWsXFxc9/vjj+vjjjzVu3DgtX75cBw4ccLr7a16e25xcZwkgZwhmwB3M3d1dbdq0UZs2bZSRkaFnnnlGH330kYYMGaIKFSrc0B81/u6775Samqpvv/3W6Rvz7O4Od7m6ypcvr1OnTl31d6JKly6thQsX6tSpU07/yMjt3dCuto3LrS+zPa+nLmXnwQcfVK9evazTGXfu3KnBgwc79ZkxY4buu+8+ffrpp07tJ06ccApw2SlUqFC2p6JlHvXJVL58ef3000+qV69ejv7BVqdOHdWpU0ejRo3S1KlT1alTJ02bNk1PPvnkFWu59A6a586d08GDB7P0DQwMVLdu3dStWzedOnVKDRs21CuvvKInn3xS5cqVkyS5ubnl6D2T3SmR1/KeyXz9d+zYofvvvz/LevP6/ihatKh8fX2Vnp5+1f2aMWOGypUrp1mzZjmNq0tPWSxfvrzmz5+vY8eO5eio2fWUeZqkn5/fVffvSp9hDRo00Llz5/Tll1/qr7/+sgJYw4YNrWBWsWJFK6BJlx/X27dvt+bnVN++fVWhQgUNHTpU/v7+GjRoUI6XzbR582bt3LlTkydPVpcuXaz2y52i3qVLF73zzjv67rvvNG/ePBUtWtTpS4ncPLcA8h/XmAF3qKNHjzo9LlCggPUteuZt7DN/j+ZG3EY+81vyi79VT05O1sSJE7P09fb2zramDh06aOXKlZo/f36WeSdOnLCuZ2vZsqXS0tKcbsWfnp6uDz744Fp3w9KyZUutWrVK69aty1LHlClTFBkZeU2nL10qICBAMTEx+vrrrzVt2jS5u7vrwQcfdOrj4uKS5ajF9OnTs9yaPTvly5fX9u3bdeTIEavt119/zXIL7Q4dOig9Pd06pfJiaWlp1ut2/PjxLLVkfkN/6c8oZFdL5rVBmSZMmJDliNml73EfHx9VqFDBWn9QUJAaNWqkjz76KNtQd/G+Zr6ev/zyi9P8yx0ZzImaNWsqKChIH374odM+z5s3T9u2bVOrVq3ytF4XFxe1b99eM2fO1G+//ZZl/sX7ld24W716tVauXOm0TPv27WWMyfZnGXJ6JCy/1KhRQ+XLl9fbb7+tU6dOZZl/8f5d6TOsdu3acnNz0xtvvKHAwEBVqVJF0oXAtmrVKiUkJGQ5WtayZUv98ssvTs/P6dOnNWHCBJUpU0ZhYWG52pchQ4ZowIABGjx4cJafBsmJ7F4/Y4z1syeXioiIUEREhD755BPNnDlTHTt2lKvr/76jz81zCyD/ccQMuE3NmzfP+hb3YnXr1lW5cuX05JNP6tixY7r//vtVsmRJ/fHHH/rggw8UGRlpXS8RGRkpFxcXvfHGG0pOTpaHh4f1O2P5rVmzZtYRvF69eunUqVP6+OOPFRQUlOUfzTVq1ND48eP16quvqkKFCgoKCtL999+vgQMH6ttvv1Xr1q3VtWtX1ahRQ6dPn9bmzZs1Y8YM7d27V0WKFFGbNm1Ur149DRo0SHv37lVYWJhmzZqVoxuMXGzmzJnZPsexsbEaNGiQpk+froYNG6pXr16qVKmSDhw4oEmTJungwYPZBs5r9eijj+qJJ57QuHHjFBMTk+XHjFu3bq0RI0aoW7duqlu3rjZv3qwpU6ZYR46upHv37nr33XcVExOjHj166PDhw/rwww9VpUoVp99mio6OVq9evRQfH6+NGzeqWbNmcnNz065duzR9+nS99957evjhhzV58mSNGzdODz30kMqXL6+TJ0/q448/lp+fn1q2bHnFWp588kk9/fTTat++vZo2bapff/1V8+fPz3LULywsTI0aNVKNGjUUGBiotWvXasaMGerTp4/VZ+zYsapfv77Cw8PVs2dPlStXTocOHdLKlSv1559/Wr/x9sILL+g///mPmjdvrueee866XX7p0qW1adOmqz5/2ckMBd26dVN0dLQee+wx63b5ZcqUUb9+/fK0XunCLecXL16s2rVrq2fPngoLC9OxY8e0fv16/fTTTzp27JikC++JWbNm6aGHHlKrVq20Z88effjhhwoLC3P6h/l9992nzp076/3339euXbvUvHlzZWRkaNmyZbrvvvucntPrrUCBAvrkk0/UokULValSRd26dVOJEiX0119/afHixfLz89N3330n6cJnhXThJws6duwoNzc3tWnTRt7e3ipYsKBq1KihVatWWb9hJl04Ynb69GmdPn06SzAbNGiQvvzyS7Vo0ULPPvusAgMDNXnyZO3Zs0czZ87M0amml3rrrbeUnJysuLg4+fr65uqH5StVqqTy5ctrwIAB+uuvv+Tn56eZM2de8Xq+Ll26aMCAAZKUZVu5eW4BXAc3+jaQAK6vK90uXxfdFnnGjBmmWbNmJigoyLi7u5tSpUqZXr16mYMHDzqt7+OPPzblypUzLi4uTrdpvtzt8i+9hXx2twk35n+3Wj5y5IjV9u2335qIiAjj6elpypQpY9544w3z2WefZbnddVJSkmnVqpXx9fU1kpzqOHnypBk8eLCpUKGCcXd3N0WKFDF169Y1b7/9ttNt/I8ePWo6d+5s/Pz8jL+/v+ncubPZsGFDrm6Xf7kp81baf/75p3nyySdNiRIljKurqwkMDDStW7c2q1atuuL6c3u7/EwpKSnGy8vLSDJffPFFlvlnz541//d//2eKFStmvLy8TL169czKlSuzvJbZ3S7fGGO++OILU65cOePu7m4iIyPN/Pnzs9wuP9OECRNMjRo1jJeXl/H19TXh4eHmhRdeMAcOHDDGXLhN92OPPWZKlSplPDw8TFBQkGndurVZu3btVfczPT3dvPjii6ZIkSKmYMGCJiYmxuzevTvL7fJfffVVU6tWLRMQEGC8vLxMpUqVzKhRo5zeB8YYk5iYaLp06WJCQkKMm5ubKVGihGndurWZMWOGU79NmzaZ6Oho4+npaUqUKGFGjhxpPv300zzfLj/TV199ZapXr248PDxMYGCg6dSpk/nzzz+d+sTGxhpvb++rPjcXO3TokImLizOhoaHGzc3NhISEmMaNG5sJEyZYfTIyMsxrr71mSpcubTw8PEz16tXN3Llzs31d09LSzFtvvWUqVapk3N3dTdGiRU2LFi3MunXrrD66zM9EXPraZOdynyGXez9u2LDBtGvXzhQuXNh4eHiY0qVLmw4dOpiFCxc69Rs5cqQpUaKEKVCgQJbXauDAgUaSeeONN5yWqVChgpFkEhMTs9SZmJhoHn74YRMQEGA8PT1NrVq1zNy5c3O0L8Zk/15IT083jz32mHF1dTVz5sy56nN08e3yt27dapo0aWJ8fHxMkSJFTM+ePa2fKMjus+zgwYPGxcXFVKxY8bLbyclzm91nOIBr4zCGXwMEAAC4E/z9998qVqyYhg4detm7kQKwB9eYAQAA3CEmTZqk9PR0de7c2e5SAFyCa8wAAABuc4sWLdLWrVs1atQoPfjggypTpozdJQG4BKcyAgAA3OYaNWqkFStWqF69evriiy9UokQJu0sCcAmCGQAAAADYjGvMAAAAAMBmBDMAAAAAsBk3/5CUkZGhAwcOyNfX1/qBSQAAAAB3HmOMTp48qeLFi+fph+PzimAm6cCBAwoNDbW7DAAAAAA3if3796tkyZI3bHsEM0m+vr6SLjz5fn5+NlcDAAAAwC4pKSkKDQ21MsKNQjCTrNMX/fz8CGYAAAAAbvglTtz8AwAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZq52F4CsHMMddpdgKzPM2F0CAAAAcENxxAwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZjdNMHv99dflcDj0/PPPW21nz55VXFycChcuLB8fH7Vv316HDh1yWm7fvn1q1aqVChYsqKCgIA0cOFBpaWk3uHoAAAAAyLubIpitWbNGH330kSIiIpza+/Xrp++++07Tp09XQkKCDhw4oHbt2lnz09PT1apVK507d04rVqzQ5MmTNWnSJA0dOvRG7wIAAAAA5JntwezUqVPq1KmTPv74YxUqVMhqT05O1qeffqp3331X999/v2rUqKGJEydqxYoVWrVqlSTpxx9/1NatW/XFF18oMjJSLVq00MiRIzV27FidO3fOrl0CAAAAgFyxPZjFxcWpVatWatKkiVP7unXrdP78eaf2SpUqqVSpUlq5cqUkaeXKlQoPD1dwcLDVJyYmRikpKdqyZctlt5mamqqUlBSnCQAAAADs4mrnxqdNm6b169drzZo1WeYlJSXJ3d1dAQEBTu3BwcFKSkqy+lwcyjLnZ867nPj4eA0fPvwaqwcAAACA/GHbEbP9+/frueee05QpU+Tp6XlDtz148GAlJydb0/79+2/o9gEAAADgYrYFs3Xr1unw4cO655575OrqKldXVyUkJOj999+Xq6urgoODde7cOZ04ccJpuUOHDikkJESSFBISkuUujZmPM/tkx8PDQ35+fk4TAAAAANjFtmDWuHFjbd68WRs3brSmmjVrqlOnTtbfbm5uWrhwobXMjh07tG/fPkVFRUmSoqKitHnzZh0+fNjqs2DBAvn5+SksLOyG7xMAAAAA5IVt15j5+vqqatWqTm3e3t4qXLiw1d6jRw/1799fgYGB8vPzU9++fRUVFaU6depIkpo1a6awsDB17txZb775ppKSkvTyyy8rLi5OHh4eN3yfAAAAACAvbL35x9WMHj1aBQoUUPv27ZWamqqYmBiNGzfOmu/i4qK5c+eqd+/eioqKkre3t2JjYzVixAgbqwYAAACA3HEYY4zdRdgtJSVF/v7+Sk5OvimuN3MMd9hdgq3MsDv+LQkAAACb2JUNbP8dMwAAAAC40xHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACb2RrMxo8fr4iICPn5+cnPz09RUVGaN2+eNb9Ro0ZyOBxO09NPP+20jn379qlVq1YqWLCggoKCNHDgQKWlpd3oXQEAAACAPHO1c+MlS5bU66+/rrvuukvGGE2ePFlt27bVhg0bVKVKFUlSz549NWLECGuZggULWn+np6erVatWCgkJ0YoVK3Tw4EF16dJFbm5ueu211274/gAAAABAXtgazNq0aeP0eNSoURo/frxWrVplBbOCBQsqJCQk2+V//PFHbd26VT/99JOCg4MVGRmpkSNH6sUXX9Qrr7wid3f3674PAAAAAHCtbpprzNLT0zVt2jSdPn1aUVFRVvuUKVNUpEgRVa1aVYMHD9aZM2eseStXrlR4eLiCg4OttpiYGKWkpGjLli2X3VZqaqpSUlKcJgAAAACwi61HzCRp8+bNioqK0tmzZ+Xj46PZs2crLCxMkvT444+rdOnSKl68uDZt2qQXX3xRO3bs0KxZsyRJSUlJTqFMkvU4KSnpstuMj4/X8OHDr9MeAQAAAEDu2B7M7r77bm3cuFHJycmaMWOGYmNjlZCQoLCwMD311FNWv/DwcBUrVkyNGzdWYmKiypcvn+dtDh48WP3797cep6SkKDQ09Jr2AwAAAADyyvZTGd3d3VWhQgXVqFFD8fHxqlatmt57771s+9auXVuStHv3bklSSEiIDh065NQn8/HlrkuTJA8PD+tOkJkTAAAAANjF9mB2qYyMDKWmpmY7b+PGjZKkYsWKSZKioqK0efNmHT582OqzYMEC+fn5WadDAgAAAMDNztZTGQcPHqwWLVqoVKlSOnnypKZOnaolS5Zo/vz5SkxM1NSpU9WyZUsVLlxYmzZtUr9+/dSwYUNFRERIkpo1a6awsDB17txZb775ppKSkvTyyy8rLi5OHh4edu4aAAAAAOSYrcHs8OHD6tKliw4ePCh/f39FRERo/vz5atq0qfbv36+ffvpJY8aM0enTpxUaGqr27dvr5ZdftpZ3cXHR3Llz1bt3b0VFRcnb21uxsbFOv3sGAAAAADc7hzHG2F2E3VJSUuTv76/k5OSb4nozx3CH3SXYygy749+SAAAAsIld2eCmu8YMAAAAAO40BDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbOZqdwEAcL05hjvsLsFWZpixuwTchhhXjCvkL8YUY4ojZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM1uD2fjx4xURESE/Pz/5+fkpKipK8+bNs+afPXtWcXFxKly4sHx8fNS+fXsdOnTIaR379u1Tq1atVLBgQQUFBWngwIFKS0u70bsCAAAAAHlmazArWbKkXn/9da1bt05r167V/fffr7Zt22rLli2SpH79+um7777T9OnTlZCQoAMHDqhdu3bW8unp6WrVqpXOnTunFStWaPLkyZo0aZKGDh1q1y4BAAAAQK45jDHG7iIuFhgYqLfeeksPP/ywihYtqqlTp+rhhx+WJG3fvl2VK1fWypUrVadOHc2bN0+tW7fWgQMHFBwcLEn68MMP9eKLL+rIkSNyd3fPdhupqalKTU21HqekpCg0NFTJycny8/O7/jt5FY7hDrtLsJUZdlO9JXEbYEwxppD/GFeMK+QvxtTNM6ZSUlLk7+9/w7PBTXONWXp6uqZNm6bTp08rKipK69at0/nz59WkSROrT6VKlVSqVCmtXLlSkrRy5UqFh4dboUySYmJilJKSYh11y058fLz8/f2tKTQ09PrtGAAAAABche3BbPPmzfLx8ZGHh4eefvppzZ49W2FhYUpKSpK7u7sCAgKc+gcHByspKUmSlJSU5BTKMudnzrucwYMHKzk52Zr279+fvzsFAAAAALngancBd999tzZu3Kjk5GTNmDFDsbGxSkhIuK7b9PDwkIeHx3XdBgAAAADklO3BzN3dXRUqVJAk1ahRQ2vWrNF7772nRx99VOfOndOJEyecjpodOnRIISEhkqSQkBD98ssvTuvLvGtjZh8AAAAAuNnZfirjpTIyMpSamqoaNWrIzc1NCxcutObt2LFD+/btU1RUlCQpKipKmzdv1uHDh60+CxYskJ+fn8LCwm547QAAAACQF7YeMRs8eLBatGihUqVK6eTJk5o6daqWLFmi+fPny9/fXz169FD//v0VGBgoPz8/9e3bV1FRUapTp44kqVmzZgoLC1Pnzp315ptvKikpSS+//LLi4uI4VREAAADALcPWYHb48GF16dJFBw8elL+/vyIiIjR//nw1bdpUkjR69GgVKFBA7du3V2pqqmJiYjRu3DhreRcXF82dO1e9e/dWVFSUvL29FRsbqxEjRti1SwAAAACQa7YGs08//fSK8z09PTV27FiNHTv2sn1Kly6t77//Pr9LAwAAAIAb5qa7xgwAAAAA7jQEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAAADAZgQzAAAAALAZwQwAAAAAbEYwAwAAAACbEcwAAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAm7nmZaFdu3Zp8eLFOnz4sDIyMpzmDR06NF8KAwAAAIA7Ra6D2ccff6zevXurSJEiCgkJkcPhsOY5HA6CGQAAAADkUq6D2auvvqpRo0bpxRdfvB71AAAAAMAdJ9fXmB0/flyPPPLI9agFAAAAAO5IuQ5mjzzyiH788cfrUQsAAAAA3JFyfSpjhQoVNGTIEK1atUrh4eFyc3Nzmv/ss8/mW3EAAAAAcCfIdTCbMGGCfHx8lJCQoISEBKd5DoeDYAYAAAAAuZTrYLZnz57rUQcAAAAA3LGu6QemjTEyxuRXLQAAAABwR8pTMPv8888VHh4uLy8veXl5KSIiQv/5z3/yuzYAAAAAuCPk+lTGd999V0OGDFGfPn1Ur149SdLPP/+sp59+Wn///bf69euX70UCAAAAwO0s18Hsgw8+0Pjx49WlSxer7YEHHlCVKlX0yiuvEMwAAAAAIJdyfSrjwYMHVbdu3SztdevW1cGDB/OlKAAAAAC4k+Q6mFWoUEFff/11lvavvvpKd911V74UBQAAAAB3klyfyjh8+HA9+uijWrp0qXWN2fLly7Vw4cJsAxsAAAAA4MpyfcSsffv2Wr16tYoUKaI5c+Zozpw5KlKkiH755Rc99NBD16NGAAAAALit5fqImSTVqFFDX3zxRX7XAgAAAAB3pBwdMUtJSXH6+0pTbsTHx+vee++Vr6+vgoKC9OCDD2rHjh1OfRo1aiSHw+E0Pf3000599u3bp1atWqlgwYIKCgrSwIEDlZaWlqtaAAAAAMAuOTpiVqhQIR08eFBBQUEKCAiQw+HI0scYI4fDofT09BxvPCEhQXFxcbr33nuVlpaml156Sc2aNdPWrVvl7e1t9evZs6dGjBhhPS5YsKD1d3p6ulq1aqWQkBCtWLFCBw8eVJcuXeTm5qbXXnstx7UAAAAAgF1yFMwWLVqkwMBASdLixYvzbeM//PCD0+NJkyYpKChI69atU8OGDa32ggULKiQkJNt1/Pjjj9q6dat++uknBQcHKzIyUiNHjtSLL76oV155Re7u7lmWSU1NVWpqqvU4t0f6AAAAACA/5SiYRUdHW3+XLVtWoaGhWY6aGWO0f//+ayomOTlZkqwQmGnKlCn64osvFBISojZt2mjIkCHWUbOVK1cqPDxcwcHBVv+YmBj17t1bW7ZsUfXq1bNsJz4+XsOHD7+mWgEAAAAgv+T65h9ly5a1Tmu82LFjx1S2bNlcncp4sYyMDD3//POqV6+eqlatarU//vjjKl26tIoXL65NmzbpxRdf1I4dOzRr1ixJUlJSklMok2Q9TkpKynZbgwcPVv/+/a3HKSkpCg0NzVPdAAAAAHCtch3MMq8lu9SpU6fk6emZ50Li4uL022+/6eeff3Zqf+qpp6y/w8PDVaxYMTVu3FiJiYkqX758nrbl4eEhDw+PPNcKAAAAAPkpx8Es8wiTw+FwOpVQunADjtWrVysyMjJPRfTp00dz587V0qVLVbJkySv2rV27tiRp9+7dKl++vEJCQvTLL7849Tl06JAkXfa6NAAAAAC4meQ4mG3YsEHShSNmmzdvdrqphru7u6pVq6YBAwbkauPGGPXt21ezZ8/WkiVLVLZs2asus3HjRklSsWLFJElRUVEaNWqUDh8+bJ1euWDBAvn5+SksLCxX9QAAAACAHXIczDLvxtitWze999578vPzu+aNx8XFaerUqfrmm2/k6+trXRPm7+8vLy8vJSYmaurUqWrZsqUKFy6sTZs2qV+/fmrYsKEiIiIkSc2aNVNYWJg6d+6sN998U0lJSXr55ZcVFxfH6YoAAAAAbgm5vsZs4sSJ+bbx8ePHS7rwI9KXbqNr165yd3fXTz/9pDFjxuj06dMKDQ1V+/bt9fLLL1t9XVxcNHfuXPXu3VtRUVHy9vZWbGys0++eAQAAAMDNLNfBTJLWrl2rr7/+Wvv27dO5c+ec5mXeLTEnjDFXnB8aGqqEhISrrqd06dL6/vvvc7xdAAAAALiZFMjtAtOmTVPdunW1bds2zZ49W+fPn9eWLVu0aNEi+fv7X48aAQAAAOC2lutg9tprr2n06NH67rvv5O7urvfee0/bt29Xhw4dVKpUqetRIwAAAADc1nIdzBITE9WqVStJF+7GePr0aTkcDvXr108TJkzI9wIBAAAA4HaX62BWqFAhnTx5UpJUokQJ/fbbb5KkEydO6MyZM/lbHQAAAADcAXJ984+GDRtqwYIFCg8P1yOPPKLnnntOixYt0oIFC9S4cePrUSMAAAAA3NZyHcz+/e9/6+zZs5Kkf/3rX3Jzc9OKFSuy3MYeAAAAAJAzuQ5mgYGB1t8FChTQoEGD8rUgAAAAALjT5Poas/Xr12vz5s3W42+++UYPPvigXnrppSy/aQYAAAAAuLpcB7NevXpp586dkqTff/9djz76qAoWLKjp06frhRdeyPcCAQAAAOB2l+tgtnPnTkVGRkqSpk+frujoaE2dOlWTJk3SzJkz87s+AAAAALjt5TqYGWOUkZEhSfrpp5/UsmVLSVJoaKj+/vvv/K0OAAAAAO4AuQ5mNWvW1Kuvvqr//Oc/SkhIsH5ses+ePQoODs73AgEAAADgdpfrYDZmzBitX79effr00b/+9S9VqFBBkjRjxgzVrVs33wsEAAAAgNtdrm+XHxER4XRXxkxvvfWWXFxc8qUoAAAAALiT5DqYXY6np2d+rQoAAAAA7ig5CmaBgYHauXOnihQpokKFCsnhcFy277Fjx/KtOAAAAAC4E+QomI0ePVq+vr6SLlxjBgAAAADIPzkKZrGxsdn+DQAAAAC4djkKZikpKTleoZ+fX56LAQAAAIA7UY6CWUBAwBWvK5Mu/PC0w+FQenp6vhQGAAAAAHeKHAWzxYsXX+86AAAAAOCOlaNgFh0dfb3rAAAAAIA7Vo6C2aZNm1S1alUVKFBAmzZtumLfiIiIfCkMAAAAAO4UOQpmkZGRSkpKUlBQkCIjI+VwOGSMydKPa8wAAAAAIPdyFMz27NmjokWLWn8DAAAAAPJPjoJZ6dKlrb//+OMP1a1bV66uzoumpaVpxYoVTn0BAAAAAFdXILcL3HfffTp27FiW9uTkZN133335UhQAAAAA3ElyHcwyf6/sUkePHpW3t3e+FAUAAAAAd5IcncooSe3atZN04QYfXbt2lYeHhzUvPT1dmzZtUt26dfO/QgAAAAC4zeU4mPn7+0u6cMTM19dXXl5e1jx3d3fVqVNHPXv2zP8KAQAAAOA2l+NgNnHiRElSmTJlNGDAAE5bBAAAAIB8kuNglmnYsGHXow4AAAAAuGPl+OYfhQoVUmBgYJapbNmyiomJ0YIFC3K98fj4eN17773y9fVVUFCQHnzwQe3YscOpz9mzZxUXF6fChQvLx8dH7du316FDh5z67Nu3T61atVLBggUVFBSkgQMHKi0tLdf1AAAAAIAdcnzEbMyYMdm2nzhxQuvWrVPr1q01Y8YMtWnTJscbT0hIUFxcnO69916lpaXppZdeUrNmzbR161brVMl+/frpv//9r6ZPny5/f3/16dNH7dq10/LlyyVduPFIq1atFBISohUrVujgwYPq0qWL3Nzc9Nprr+W4FgAAAACwi8MYY/JjRe+++65mzJihFStW5HkdR44cUVBQkBISEtSwYUMlJyeraNGimjp1qh5++GFJ0vbt21W5cmWtXLlSderU0bx589S6dWsdOHBAwcHBkqQPP/xQL774oo4cOSJ3d/erbjclJUX+/v5KTk6Wn59fnuvPL47hWX+O4E5ihuXLWxKwMKYYU8h/jCvGFfIXY+rmGVN2ZYNc/47Z5bRu3Vrbt2+/pnUkJydLkgIDAyVJ69at0/nz59WkSROrT6VKlVSqVCmtXLlSkrRy5UqFh4dboUySYmJilJKSoi1btmS7ndTUVKWkpDhNAAAAAGCXfAtmqampOTo6dTkZGRl6/vnnVa9ePVWtWlWSlJSUJHd3dwUEBDj1DQ4OVlJSktXn4lCWOT9zXnbi4+Pl7+9vTaGhoXmuGwAAAACuVb4Fs08//VSRkZF5Xj4uLk6//fabpk2bll8lXdbgwYOVnJxsTfv377/u2wQAAACAy8nxzT/69++fbXtycrLWr1+vnTt3aunSpXkqok+fPpo7d66WLl2qkiVLWu0hISE6d+6cTpw44XTU7NChQwoJCbH6/PLLL07ry7xrY2afS3l4eMjDwyNPtQIAAABAfstxMNuwYUO27X5+fmratKlmzZqlsmXL5mrjxhj17dtXs2fP1pIlS7IsX6NGDbm5uWnhwoVq3769JGnHjh3at2+foqKiJElRUVEaNWqUDh8+rKCgIEnSggUL5Ofnp7CwsFzVAwAAAAB2yHEwW7x4cb5vPC4uTlOnTtU333wjX19f65owf39/eXl5yd/fXz169FD//v0VGBgoPz8/9e3bV1FRUapTp44kqVmzZgoLC1Pnzp315ptvKikpSS+//LLi4uI4KgYAAADglpDjYHY9jB8/XpLUqFEjp/aJEyeqa9eukqTRo0erQIECat++vVJTUxUTE6Nx48ZZfV1cXDR37lz17t1bUVFR8vb2VmxsrEaMGHGjdgMAAAAAromtwSwnP6Hm6empsWPHauzYsZftU7p0aX3//ff5WRoAAAAA3DD5dldGAAAAAEDeEMwAAAAAwGY5Dmbdu3fXyZMnr2ctAAAAAHBHynEwmzx5sv7555/rWQsAAAAA3JFyHMxycqMOAAAAAEDu5equjCdPnpSnp+cV+/j5+V1TQQAAAABwp8lVMKtYseJl5xlj5HA4lJ6efs1FAQAAAMCdJFfBbMaMGQoMDLxetQAAAADAHSlXwaxevXoKCgq6XrUAAAAAwB2J3zEDAAAAAJvlOJiVLl1aLi4u17MWAAAAALgj5fhUxj179lzPOgAAAADgjpXjYFaoUCE5HI4s7f7+/qpYsaIGDBigpk2b5mtxAAAAAHAnyHEwGz16dLbB7MSJE1q3bp1at26tGTNmqE2bNvlaIAAAAADc7nIczLp27XrF+ZGRkYqPjyeYAQAAAEAu5dtdGVu3bq3t27fn1+oAAAAA4I6Rb8EsNTVV7u7u+bU6AAAAALhj5Fsw+/TTTxUZGZlfqwMAAACAO0aOrzHr379/tu3Jyclav369du7cqaVLl+ZbYQAAAABwp8hxMNuwYUO27X5+fmratKlmzZqlsmXL5lthAAAAAHCnyHEwW7x48RXn//nnn3rqqac0YcKEay4KAAAAAO4k+XaN2dGjR/Xpp5/m1+oAAAAA4I6Rb8EMAAAAAJA3BDMAAAAAsBnBDAAAAABsluObf7Rr1+6K80+cOHGttQAAAADAHSnHwczf3/+q87t06XLNBQEAAADAnSbHwWzixInXsw4AAAAAuGNxjRkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANjM1mC2dOlStWnTRsWLF5fD4dCcOXOc5nft2lUOh8Npat68uVOfY8eOqVOnTvLz81NAQIB69OihU6dO3cC9AAAAAIBrY2swO336tKpVq6axY8detk/z5s118OBBa/ryyy+d5nfq1ElbtmzRggULNHfuXC1dulRPPfXU9S4dAAAAAPJNjn/H7Hpo0aKFWrRoccU+Hh4eCgkJyXbetm3b9MMPP2jNmjWqWbOmJOmDDz5Qy5Yt9fbbb6t48eL5XjMAAAAA5Leb/hqzJUuWKCgoSHfffbd69+6to0ePWvNWrlypgIAAK5RJUpMmTVSgQAGtXr36sutMTU1VSkqK0wQAAAAAdrmpg1nz5s31+eefa+HChXrjjTeUkJCgFi1aKD09XZKUlJSkoKAgp2VcXV0VGBiopKSky643Pj5e/v7+1hQaGnpd9wMAAAAArsTWUxmvpmPHjtbf4eHhioiIUPny5bVkyRI1btw4z+sdPHiw+vfvbz1OSUkhnAEAAACwzU19xOxS5cqVU5EiRbR7925JUkhIiA4fPuzUJy0tTceOHbvsdWnShevW/Pz8nCYAAAAAsMstFcz+/PNPHT16VMWKFZMkRUVF6cSJE1q3bp3VZ9GiRcrIyFDt2rXtKhMAAAAAcsXWUxlPnTplHf2SpD179mjjxo0KDAxUYGCghg8frvbt2yskJESJiYl64YUXVKFCBcXExEiSKleurObNm6tnz5768MMPdf78efXp00cdO3bkjowAAAAAbhm2HjFbu3atqlevrurVq0uS+vfvr+rVq2vo0KFycXHRpk2b9MADD6hixYrq0aOHatSooWXLlsnDw8Nax5QpU1SpUiU1btxYLVu2VP369TVhwgS7dgkAAAAAcs3WI2aNGjWSMeay8+fPn3/VdQQGBmrq1Kn5WRYAAAAA3FC31DVmAAAAAHA7IpgBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADazNZgtXbpUbdq0UfHixeVwODRnzhyn+cYYDR06VMWKFZOXl5eaNGmiXbt2OfU5duyYOnXqJD8/PwUEBKhHjx46derUDdwLAAAAALg2tgaz06dPq1q1aho7dmy289988029//77+vDDD7V69Wp5e3srJiZGZ8+etfp06tRJW7Zs0YIFCzR37lwtXbpUTz311I3aBQAAAAC4Zq52brxFixZq0aJFtvOMMRozZoxefvlltW3bVpL0+eefKzg4WHPmzFHHjh21bds2/fDDD1qzZo1q1qwpSfrggw/UsmVLvf322ypevPgN2xcAAAAAyKub9hqzPXv2KCkpSU2aNLHa/P39Vbt2ba1cuVKStHLlSgUEBFihTJKaNGmiAgUKaPXq1Zddd2pqqlJSUpwmAAAAALDLTRvMkpKSJEnBwcFO7cHBwda8pKQkBQUFOc13dXVVYGCg1Sc78fHx8vf3t6bQ0NB8rh4AAAAAcu6mDWbX0+DBg5WcnGxN+/fvt7skAAAAAHewmzaYhYSESJIOHTrk1H7o0CFrXkhIiA4fPuw0Py0tTceOHbP6ZMfDw0N+fn5OEwAAAADY5aYNZmXLllVISIgWLlxotaWkpGj16tWKioqSJEVFRenEiRNat26d1WfRokXKyMhQ7dq1b3jNAAAAAJAXtt6V8dSpU9q9e7f1eM+ePdq4caMCAwNVqlQpPf/883r11Vd11113qWzZshoyZIiKFy+uBx98UJJUuXJlNW/eXD179tSHH36o8+fPq0+fPurYsSN3ZAQAAABwy7A1mK1du1b33Xef9bh///6SpNjYWE2aNEkvvPCCTp8+raeeekonTpxQ/fr19cMPP8jT09NaZsqUKerTp48aN26sAgUKqH379nr//fdv+L4AAAAAQF7ZGswaNWokY8xl5zscDo0YMUIjRoy4bJ/AwEBNnTr1epQHAAAAADfETXuNGQAAAADcKQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNbupg9sorr8jhcDhNlSpVsuafPXtWcXFxKly4sHx8fNS+fXsdOnTIxooBAAAAIPdu6mAmSVWqVNHBgwet6eeff7bm9evXT999952mT5+uhIQEHThwQO3atbOxWgAAAADIPVe7C7gaV1dXhYSEZGlPTk7Wp59+qqlTp+r++++XJE2cOFGVK1fWqlWrVKdOnRtdKgAAAADkyU1/xGzXrl0qXry4ypUrp06dOmnfvn2SpHXr1un8+fNq0qSJ1bdSpUoqVaqUVq5cecV1pqamKiUlxWkCAAAAALvc1MGsdu3amjRpkn744QeNHz9ee/bsUYMGDXTy5EklJSXJ3d1dAQEBTssEBwcrKSnpiuuNj4+Xv7+/NYWGhl7HvQAAAACAK7upT2Vs0aKF9XdERIRq166t0qVL6+uvv5aXl1ee1zt48GD179/fepySkkI4AwAAAGCbm/qI2aUCAgJUsWJF7d69WyEhITp37pxOnDjh1OfQoUPZXpN2MQ8PD/n5+TlNAAAAAGCXWyqYnTp1SomJiSpWrJhq1KghNzc3LVy40Jq/Y8cO7du3T1FRUTZWCQAAAAC5c1OfyjhgwAC1adNGpUuX1oEDBzRs2DC5uLjosccek7+/v3r06KH+/fsrMDBQfn5+6tu3r6KiorgjIwAAAIBbyk0dzP7880899thjOnr0qIoWLar69etr1apVKlq0qCRp9OjRKlCggNq3b6/U1FTFxMRo3LhxNlcNAAAAALlzUwezadOmXXG+p6enxo4dq7Fjx96gigAAAAAg/91S15gBAAAAwO2IYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2IxgBgAAAAA2I5gBAAAAgM0IZgAAAABgM4IZAAAAANiMYAYAAAAANiOYAQAAAIDNCGYAAAAAYDOCGQAAAADYjGAGAAAAADYjmAEAAACAzQhmAAAAAGAzghkAAAAA2Oy2CWZjx45VmTJl5Onpqdq1a+uXX36xuyQAAAAAyJHbIph99dVX6t+/v4YNG6b169erWrVqiomJ0eHDh+0uDQAAAACu6rYIZu+++6569uypbt26KSwsTB9++KEKFiyozz77zO7SAAAAAOCqXO0u4FqdO3dO69at0+DBg622AgUKqEmTJlq5cmW2y6Smpio1NdV6nJycLElKSUm5vsXm1Fm7C7DXTfM64PbBmLK7BNyOGFd2l4DbDWPK7hIsmbUYY27odm/5YPb3338rPT1dwcHBTu3BwcHavn17tsvEx8dr+PDhWdpDQ0OvS43IHf/X/e0uAbitMKaA/Me4AvLXzTimTp48KX//G1fXLR/M8mLw4MHq37+/9TgjI0PHjh1T4cKF5XA4bKzMfikpKQoNDdX+/fvl5+dndznALY8xBeQ/xhWQvxhTzowxOnnypIoXL35Dt3vLB7MiRYrIxcVFhw4dcmo/dOiQQkJCsl3Gw8NDHh4eTm0BAQHXq8Rbkp+fHwMTyEeMKSD/Ma6A/MWY+p8beaQs0y1/8w93d3fVqFFDCxcutNoyMjK0cOFCRUVF2VgZAAAAAOTMLX/ETJL69++v2NhY1axZU7Vq1dKYMWN0+vRpdevWze7SAAAAAOCqbotg9uijj+rIkSMaOnSokpKSFBkZqR9++CHLDUFwdR4eHho2bFiWUz0B5A1jCsh/jCsgfzGmbg4Oc6PvAwkAAAAAcHLLX2MGAAAAALc6ghkAAAAA2IxgBgAAAAA2I5jlUZkyZTRmzBi7y7jl7N27Vw6HQxs3brzu2+I1uvXwmuUN4wqXw+uVN4wpXAmvWd4wrnLA3MJiY2ONJNOrV68s85555hkjycTGxuZoXXv27DGSzIYNG3LU//Dhw+b06dM56tu6dWsTExOT7bylS5caSebXX3/N0bouZ/HixUaSOX78+DWt51JnzpwxhQoVMoULFzZnz57N1bKxsbGmbdu2Tm1paWnm4MGD5vz58/lW48SJE42/v3+W9ty8Rvnl3//+tyldurTx8PAwtWrVMqtXr76h288PjKv/YVz5Z2m/0eMqISHBtG7d2hQrVsxIMrNnz75h284vjKn/YUz5Z2m/0WPqtddeMzVr1jQ+Pj6maNGipm3btmb79u03bPv5hXH1P4wr/yztN3pcjRs3zoSHhxtfX1/j6+tr6tSpY77//vtcr+eWP2IWGhqqadOm6Z9//rHazp49q6lTp6pUqVL5vr1z585JkooWLaqCBQvmaJkePXpowYIF+vPPP7PMmzhxomrWrKmIiIh8rTOvjDFKS0uzHs+cOVNVqlRRpUqVNGfOnGtev4uLi0JCQuTqev1/qSE3r1F++Oqrr9S/f38NGzZM69evV7Vq1RQTE6PDhw/fsBryC+MqfzGu8u706dOqVq2axo4de8O2eT0wpvIXYyrvEhISFBcXp1WrVmnBggU6f/68mjVrptOnT9+wGvIL4yp/Ma7yrmTJknr99de1bt06rV27Vvfff7/atm2rLVu25G5F+RwYb6jMNF61alXzxRdfWO1TpkwxERERpm3btta3JfPmzTP16tUz/v7+JjAw0LRq1crs3r3bWkaS0xQdHe20jVdffdUUK1bMlClTxhhjTOnSpc3o0aONMRe+qXBzczNLly611vfGG2+YokWLmqSkJHP+/HkTHBxsRo4c6VT/yZMnjY+Pjxk/frwxxphly5aZ+vXrG09PT1OyZEnTt29fc+rUKav/2bNnzQsvvGBKlixp3N3dTfny5c0nn3xifdNz8ZS532fPnjV9+/Y1RYsWNR4eHqZevXrml19+sdaZ+S3L999/b+655x7j5uZmFi9ebM1v1KiR+fDDD8348eNN06ZNs7wGv/32m2nVqpXx9fU1Pj4+pn79+mb37t1m2LBhWWpavHix07dS6enppkSJEmbcuHFO61y/fr1xOBxm7969xhhj3nnnHVO1alVTsGBBU7JkSdO7d29z8uRJp/ovnoYNG5blNTLGmD/++MM88MADxtvb2/j6+ppHHnnEJCUlWfOHDRtmqlWrZj7//HNTunRp4+fnZx599FGTkpKSZb+zU6tWLRMXF2c9Tk9PN8WLFzfx8fE5Wv5mwbhiXN1M4+piuoWPmDGmGFM345gy5sKRBUkmISEhT8vbhXHFuLqZx5UxxhQqVMh88sknuVrmtghm7777rmncuLHV3rhxYzN69GinQTljxgwzc+ZMs2vXLrNhwwbTpk0bEx4ebtLT040xxvzyyy9Gkvnpp5/MwYMHzdGjR61t+Pj4mM6dO5vffvvN/Pbbb8aYrC/4wIEDTenSpc2JEyfM+vXrjbu7u/nmm2+c5pcvX95kZGRYbZ999pnx8vIyJ06cMLt37zbe3t5m9OjRZufOnWb58uWmevXqpmvXrlb/Dh06mNDQUDNr1iyTmJhofvrpJzNt2jSTlpZmZs6caSSZHTt2mIMHD5oTJ04YY4x59tlnTfHixc33339vtmzZYmJjY02hQoWs/ct8U0dERJgff/zR7N6925q3e/du4+HhYY4dO2aOHj1qPD09rYFijDF//vmnCQwMNO3atTNr1qwxO3bsMJ999pnZvn27OXnypOnQoYNp3ry5OXjwoDl48KBJTU3NcrrAgAEDTP369Z1e1//7v/9zahs9erRZtGiR2bNnj1m4cKG5++67Te/evY0xxqSmppoxY8YYPz8/azuZA/bi1yg9Pd1ERkaa+vXrm7Vr15pVq1aZGjVqWB++xlwYlD4+PqZdu3Zm8+bNZunSpSYkJMS89NJLl30PZkpNTTUuLi5Z/tHYpUsX88ADD1x1+ZsJ44pxdbOMq0vd6sGMMcWYutnGlDHG7Nq1y0gymzdvztPydmFcMa5u1nGVlpZmvvzyS+Pu7m62bNmSq2Vvi2B2+PBh4+HhYfbu3Wv27t1rPD09zZEjR5wG5aWOHDni9EF0ufOLY2NjTXBwsElNTXVqv3RQpqammsjISNOhQwcTFhZmevbs6dR/27Zt1jcGmRo0aGCeeOIJY4wxPXr0ME899ZTTMsuWLTMFChQw//zzj9mxY4eRZBYsWJDt/mR3fvGpU6eMm5ubmTJlitV27tw5U7x4cfPmm286LTdnzpws63zppZfMgw8+aD1u27at9U2EMcYMHjzYlC1b1pw7dy7bmrI7v/jS53nDhg3G4XCYP/74wxhjrG9QMr9Bys706dNN4cKFrceXO7/44tfoxx9/NC4uLmbfvn3W/C1bthhJ1rdHw4YNMwULFnT6dmTgwIGmdu3al60l019//WUkmRUrVji1Dxw40NSqVeuqy99MGFf/w7jyz9LvRo6rS93qwYwxxZi62cZUenq6adWqlalXr16ul7Ub4+p/GFf+WfrZMa42bdpkvL29jYuLi/H39zf//e9/c7xsplv+GjPpwnmkrVq10qRJkzRx4kS1atVKRYoUceqza9cuPfbYYypXrpz8/PxUpkwZSdK+ffuuuv7w8HC5u7tfsY+7u7umTJmimTNn6uzZsxo9erTT/EqVKqlu3br67LPPJEm7d+/WsmXL1KNHD0nSr7/+qkmTJsnHx8eaYmJilJGRoT179mjjxo1ycXFRdHR0Tp8WJSYm6vz586pXr57V5ubmplq1amnbtm1OfWvWrOn0OD09XZMnT9YTTzxhtT3xxBOaNGmSMjIyJEkbN25UgwYN5ObmluOaLhUZGanKlStr6tSpki6c+3748GE98sgjVp+ffvpJjRs3VokSJeTr66vOnTvr6NGjOnPmTI63s23bNoWGhio0NNRqCwsLU0BAgNNzUaZMGfn6+lqPixUrdkteI5YfGFfZY1z9D+MqdxhT2WNM/c+NHlNxcXH67bffNG3atFwve7NgXGWPcfU/N2pc3X333dq4caNWr16t3r17KzY2Vlu3bs3x8tJtdLv87t27a9KkSZo8ebK6d++eZX6bNm107Ngxffzxx1q9erVWr14t6X8Xcl6Jt7d3jmpYsWKFJOnYsWM6duxYlvk9evTQzJkzdfLkSU2cOFHly5e3BtmpU6fUq1cvbdy40Zp+/fVX7dq1S+XLl5eXl1eOasirS/dx/vz5+uuvv/Too4/K1dVVrq6u6tixo/744w8tXLhQkvKtpk6dOlmDcurUqWrevLkKFy4s6cKtVVu3bq2IiAjNnDlT69ats24CkJPXLrcu/YBxOBzWh9CVFClSRC4uLjp06JBT+6FDhxQSEpKvNd5IjKtrw7i6IK/j6nbEmLo2jKkL8mNM9enTR3PnztXixYtVsmTJ/CzvhmNcXRvG1QXXOq7c3d1VoUIF1ahRQ/Hx8apWrZree++9XNVw2wSz5s2b69y5czp//rxiYmKc5h09elQ7duzQyy+/rMaNG6ty5co6fvy4U5/Mb0PS09PztP3ExET169dPH3/8sWrXrq3Y2NgsL2aHDh1UoEABTZ06VZ9//rm6d+8uh8MhSbrnnnu0detWVahQIcvk7u6u8PBwZWRkKCEhIdvtZ1d/+fLl5e7uruXLl1tt58+f15o1axQWFnbF/fn000/VsWNHpw+JjRs3qmPHjvr0008lSREREVq2bJnOnz9/2Zpy8nw+/vjj+u2337Ru3TrNmDFDnTp1suatW7dOGRkZeuedd1SnTh1VrFhRBw4cyPV2KleurP3792v//v1W29atW3XixImrPhc54e7urho1algfWJKUkZGhhQsXKioq6prXbxfGFePqSq73uLodMaYYU1dyI8aUMUZ9+vTR7NmztWjRIpUtWzZf1msnxhXj6krs+n9VRkaGUlNTc7dQrk9+vIlcev5qcnKySU5Oth5nnl+cnp5uChcubJ544gmza9cus3DhQnPvvfc6Xa9w/vx54+XlZV599VWTlJRkXTiZ3Tmyxjifu5qWlmbq1Klj2rdvb4wx5sCBA6Zw4cLWObwX69GjhylUqJBxcXExf/31l9X+66+/Gi8vLxMXF2c2bNhgdu7caebMmeN0l7+uXbua0NBQM3v2bPP777+bxYsXm6+++soYc+EiTIfDYSZNmmQOHz5sXfz43HPPmeLFi5t58+Y5Xfh57NgxY0z25yUfPnzYuLm5mXnz5mWp//vvvzceHh7m6NGj5u+//zaFCxe2LvzcuXOn+fzzz63fQxk1apQpVaqU2b59uzly5Ig5d+7cZc/jrlevnqlWrZrx9fU1Z86csdo3btxoJJkxY8aYxMRE8/nnn5sSJUo41bx8+XLrot0jR45Yv1tx8WuUkZFhIiMjTYMGDcy6devM6tWrs73ws1q1ak51jR492pQuXTrL85CdadOmGQ8PDzNp0iSzdetW89RTT5mAgACnu/7cChhXjCtjbp5xdfLkSbNhwwazYcMGI8m8++67ZsOGDdY1CbcCxhRjypibZ0z17t3b+Pv7myVLllg3TDh48KDT/twKGFeMK2NunnE1aNAgk5CQYPbs2WM2bdpkBg0aZBwOh/nxxx9ztHym2yqYXeriCz8XLFhgKleubDw8PExERIRZsmRJlgvJP/74YxMaGmoKFCiQ5Vapl7r4BR8+fLgpVqyY+fvvv635M2fONO7u7mbjxo1Oy61YscJIMi1btsyyzl9++cU0bdrU+Pj4GG9vbxMREWFGjRplzf/nn39Mv379TLFixYy7u7upUKGC+eyzz6z5I0aMMCEhIcbhcFj7/c8//5i+ffuaIkWKXPFWqRcPyrffftsEBARke0FnamqqCQgIMO+9954x5sKHSbNmzUzBggWNr6+vadCggUlMTDTGXBjcmfujbG6VerFx48YZSaZLly5Ztvnuu++aYsWKGS8vLxMTE2M+//zzLDU//fTTpnDhwvlyq9SL5WZQGmPMBx98YEqVKmXc3d1NrVq1zKpVq3K87M2CccW4ynQzjKvsbocs5fyHY28GjCnGVKabYUxlN54kmYkTJ+Zo+ZsF44pxlelmGFfdu3c3pUuXNu7u7qZo0aKmcePGuQ5lxhjjMMaY3B1jAwAAAADkp9vmGjMAAAAAuFURzIAc2Ldvn9NtbC+dcnLLXQDOGFdA/mJMAfnvRo4rTmUEciAtLU179+697PwyZcrI1dX1xhUE3AYYV0D+YkwB+e9GjiuCGQAAAADYjFMZAQAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJsRzAAAd7RGjRrp+eeft7sMAMAdjmAGAMiTrl27yuFw6PXXX3dqnzNnjhwOR67WVaZMGY0ZMyYfq7t+9u7dK4fDoY0bN9pdCgDgNkIwAwDkmaenp9544w0dP37c7lJy7dy5c3aXkK/Onz9vdwkAgGtAMAMA5FmTJk0UEhKi+Pj4K/b7+eef1aBBA3l5eSk0NFTPPvusTp8+LenCqYR//PGH+vXrJ4fDIYfDIWOMihYtqhkzZljriIyMVLFixZzW6eHhoTNnzkiS9u3bp7Zt28rHx0d+fn7q0KGDDh06ZPV/5ZVXFBkZqU8++URly5aVp6dntrX+97//lb+/v6ZMmZKn5yQxMVFt27ZVcHCwfHx8dO+99+qnn36y5o8YMUJVq1bNslxkZKSGDBliPf7kk09UuXJleXp6qlKlSho3bpw1L/Oo3VdffaXo6Gh5enpqypQp+uOPP9SmTRsVKlRI3t7eqlKlir7//vs87QcA4MYimAEA8szFxUWvvfaaPvjgA/3555/Z9klMTFTz5s3Vvn17bdq0SV999ZV+/vln9enTR5I0a9YslSxZUiNGjNDBgwd18OBBORwONWzYUEuWLJEkHT9+XNu2bdM///yj7du3S5ISEhJ07733qmDBgsrIyFDbtm117NgxJSQkaMGCBfr999/16KOPOtWye/duzZw5U7Nmzcr2VMSpU6fqscce05QpU9SpU6c8PSenTp1Sy5YttXDhQm3YsEHNmzdXmzZttG/fPklS9+7dtW3bNq1Zs8ZaZsOGDdq0aZO6desmSZoyZYqGDh2qUaNGadu2bXrttdc0ZMgQTZ482WlbgwYN0nPPPadt27YpJiZGcXFxSk1N1dKlS7V582a98cYb8vHxydN+AABuLFe7CwAA3NoeeughRUZGatiwYfr000+zzI+Pj1enTp2sG2zcddddev/99xUdHa3x48crMDBQLi4u8vX1VUhIiLVco0aN9NFHH0mSli5dqurVqyskJERLlixRpUqVtGTJEkVHR0uSFi5cqM2bN2vPnj0KDQ2VJH3++eeqUqWK1qxZo3vvvVfShdMXP//8cxUtWjRLnWPHjtW//vUvfffdd9Z686JatWqqVq2a9XjkyJGaPXu2vv32W/Xp00clS5ZUTEyMJk6caNU1ceJERUdHq1y5cpKkYcOG6Z133lG7du0kSWXLltXWrVv10UcfKTY21lr3888/b/WRLhw1bN++vcLDwyXJWh8A4ObHETMAwDV74403NHnyZG3bti3LvF9//VWTJk2Sj4+PNcXExCgjI0N79uy57Dqjo6O1detWHTlyRAkJCWrUqJEaNWqkJUuW6Pz581qxYoUaNWokSdq2bZtCQ0OtUCZJYWFhCggIcKqpdOnS2YayGTNmqF+/flqwYME1hTLpwhGzAQMGqHLlygoICJCPj4+2bdtmHTGTpJ49e+rLL7/U2bNnde7cOU2dOlXdu3eXJJ0+fVqJiYnq0aOH03P26quvKjEx0WlbNWvWdHr87LPP6tVXX1W9evU0bNgwbdq06Zr2BQBw4xDMAADXrGHDhoqJidHgwYOzzDt16pR69eqljRs3WtOvv/6qXbt2qXz58pddZ3h4uAIDA5WQkOAUzBISErRmzRqdP39edevWzVWd3t7e2bZXr15dRYsW1WeffSZjTK7WeakBAwZo9uzZeu2117Rs2TJt3LhR4eHhTjcbadOmjTw8PDR79mx99913On/+vB5++GFJF54vSfr444+dnrPffvtNq1atuuL+PPnkk/r999/VuXNnbd68WTVr1tQHH3xwTfsDALgxOJURAJAvXn/9dUVGRuruu+92ar/nnnu0detWVahQ4bLLuru7Kz093anN4XCoQYMG+uabb7RlyxbVr19fBQsWVGpqqj766CPVrFnTCiaVK1fW/v37tX//fuuo2datW3XixAmFhYVdtfby5cvrnXfeUaNGjeTi4qJ///vfud19y/Lly9W1a1c99NBDki4Erb179zr1cXV1VWxsrCZOnCh3d3d17NhRXl5ekqTg4GAVL15cv//+e56ucwsNDdXTTz+tp59+WoMHD9bHH3+svn375nl/AAA3BsEMAJAvwsPD1alTJ73//vtO7S+++KLq1KmjPn366Mknn5S3t7e2bt2qBQsWWAGoTJkyWrp0qTp27CgPDw8VKVJE0oXrzP7v//5PNWvWtG5i0bBhQ02ZMkUDBw60ttGkSRNr+2PGjFFaWpqeeeYZRUdHZznd73IqVqyoxYsXq1GjRnJ1db3q76rt2LEjS1uVKlV01113adasWWrTpo0cDoeGDBmijIyMLH2ffPJJVa5cWdKFMHex4cOH69lnn5W/v7+aN2+u1NRUrV27VsePH1f//v0vW9Pzzz+vFi1aqGLFijp+/LgWL15sbQMAcHPjVEYAQL4ZMWJElhASERGhhIQE7dy5Uw0aNFD16tU1dOhQFS9e3Gm5vXv3qnz58k7XgEVHRys9Pd26lky6ENYubXM4HPrmm29UqFAhNWzYUE2aNFG5cuX01Vdf5ar+u+++W4sWLdKXX36p//u//7ti344dO6p69epO06FDh/Tuu++qUKFCqlu3rtq0aaOYmBjdc889WZa/6667VLduXVWqVEm1a9d2mvfkk0/qk08+0cSJExUeHq7o6GhNmjRJZcuWvWJN6enpiouLU+XKldW8eXNVrFjR6Tb7AICbl8Nc68n0AAAg14wxuuuuu/TMM89c8SgYAODOwKmMAADcYEeOHNG0adOUlJRk/XYZAODORjADAOAGCwoKUpEiRTRhwgQVKlTI7nIAADcBghkAADcYVxEAAC7FzT8AAAAAwGYEMwAAAACwGcEMAAAAAGxGMAMAAAAAmxHMAAAAAMBmBDMAAAAAsBnBDAAAAABsRjADAAAAAJv9P5KKOBMzm6wXAAAAAElFTkSuQmCC", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Extracting LUTs from res_dict\n", "LUTs_updated = [res_dict_updated[key][\"LUT\"] for key in res_dict_updated.keys()] \n", @@ -612,7 +375,7 @@ "plt.bar(res_dict_updated.keys(), LUTs_updated, color ='green', width = 0.3)\n", "plt.xlabel(\"Network Layers\")\n", "plt.ylabel(\"LUT Utilisation\")\n", - "plt.title(\"Estimated LUT values used for each network layer\")\n", + "plt.title(\"No. of LUTs per layer with updated folding factors\")\n", "plt.show()" ] }, @@ -622,7 +385,7 @@ "source": [ "From these numbers, we see that the first layer has been removed as the bottleneck and that the entire network can now perform one inference in ~4096 clock cycles (when the pipeline is full) as compared to the earlier configuration where it took ~38400 execution cycles.\n", "\n", - "This decrease in execution latency of the network though comes at a cost of a 45% increase in LUT resource utilization for layer 1 of the network." + "This decrease in execution latency of the network though comes at a cost of a 45% increase in LUT resource utilization for the first layer of the network." ] }, { @@ -636,7 +399,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next to resources and performance, folding factors (or parallelization parameters) are influencing also other properties of the generated design. Since we are able to generate results in parallel, the data that gets feed into the layer needs to be packed in a specific format to provide the correct data at the correct time for the internal parallelism. Also, the data that comes out of a layer will be in a specific format depending on the internal parallelism." + "Next to resources and performance, folding factors (or parallelization parameters) are influencing also other properties of the generated design. Since we are able to generate results in parallel, the data that gets fed into the layer needs to be packed in a specific format to provide the correct data at the correct time for the internal parallelism. Also, the data that comes out of a layer will be in a specific format depending on the internal parallelism." ] }, { @@ -648,7 +411,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -665,29 +428,9 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In the original model (pe=simd=1): \n", - "Layer: MatrixVectorActivation_0\n", - "Input shape: (1, 600, 1)\n", - "Output shape: (1, 64, 1)\n", - "Layer: MatrixVectorActivation_1\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 64, 1)\n", - "Layer: MatrixVectorActivation_2\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 64, 1)\n", - "Layer: MatrixVectorActivation_3\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 1, 1)\n" - ] - } - ], + "outputs": [], "source": [ "# Original model\n", "list_of_mvaus = model_orig.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", @@ -701,29 +444,9 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In the original model (pe=simd=1): \n", - "Layer: MatrixVectorActivation_0\n", - "Input shape: (1, 120, 5)\n", - "Output shape: (1, 32, 2)\n", - "Layer: MatrixVectorActivation_1\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 64, 1)\n", - "Layer: MatrixVectorActivation_2\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 64, 1)\n", - "Layer: MatrixVectorActivation_3\n", - "Input shape: (1, 64, 1)\n", - "Output shape: (1, 1, 1)\n" - ] - } - ], + "outputs": [], "source": [ "# Updated model\n", "list_of_mvaus = model_updated.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", @@ -744,42 +467,18 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " def get_instream_width(self, ind=0):\n", - " i_bits = self.get_input_datatype().bitwidth()\n", - " in_width = i_bits * self.get_nodeattr(\"SIMD\")\n", - " return in_width\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "showSrc(mvau_inst.get_instream_width)" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " def get_outstream_width(self, ind=0):\n", - " o_bits = self.get_output_datatype().bitwidth()\n", - " out_width = o_bits * self.get_nodeattr(\"PE\")\n", - " return out_width\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "showSrc(mvau_inst.get_outstream_width)" ] @@ -800,29 +499,9 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In the original model (pe=simd=1): \n", - "Layer: MatrixVectorActivation_0\n", - "Input stream width: 1\n", - "Output stream width: 2\n", - "Layer: MatrixVectorActivation_1\n", - "Input stream width: 2\n", - "Output stream width: 2\n", - "Layer: MatrixVectorActivation_2\n", - "Input stream width: 2\n", - "Output stream width: 2\n", - "Layer: MatrixVectorActivation_3\n", - "Input stream width: 2\n", - "Output stream width: 1\n" - ] - } - ], + "outputs": [], "source": [ "# Original model\n", "list_of_mvaus = model_orig.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", @@ -850,29 +529,9 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In the original model (pe=simd=1): \n", - "Layer: MatrixVectorActivation_0\n", - "Input stream width: 5\n", - "Output stream width: 4\n", - "Layer: MatrixVectorActivation_1\n", - "Input stream width: 2\n", - "Output stream width: 2\n", - "Layer: MatrixVectorActivation_2\n", - "Input stream width: 2\n", - "Output stream width: 2\n", - "Layer: MatrixVectorActivation_3\n", - "Input stream width: 2\n", - "Output stream width: 1\n" - ] - } - ], + "outputs": [], "source": [ "# Updated model\n", "list_of_mvaus = model_updated.get_nodes_by_op_type(\"MatrixVectorActivation\")\n", @@ -893,7 +552,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -906,40 +565,9 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Stopping http://0.0.0.0:5920\n", - "Serving 'cybsec_DWC.onnx' at http://0.0.0.0:5920\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 40, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model_updated.save(\"cybsec_DWC.onnx\")\n", "showInNetron(\"cybsec_DWC.onnx\")" @@ -955,49 +583,9 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'MatrixVectorActivation_0': {'BRAM_18K': 8,\n", - " 'BRAM_efficiency': 0.5208333333333334,\n", - " 'LUT': 418,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'StreamingDataWidthConverter_Batch_0': {'BRAM_18K': 0,\n", - " 'BRAM_efficiency': 1,\n", - " 'LUT': 3,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_1': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_2': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.4444444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0},\n", - " 'MatrixVectorActivation_3': {'BRAM_18K': 1,\n", - " 'BRAM_efficiency': 0.006944444444444444,\n", - " 'LUT': 320,\n", - " 'URAM': 0,\n", - " 'URAM_efficiency': 1,\n", - " 'DSP': 0}}" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "model_dwc = ModelWrapper(\"cybsec_DWC.onnx\")\n", "res_dict_dwc = model_dwc.analysis(res_estimation)\n", @@ -1013,7 +601,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1025,20 +613,9 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1IAAAHWCAYAAAB9mLjgAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAA9hAAAPYQGoP6dpAABZ/0lEQVR4nO3deVhU5f//8dcAgigOiAq4oOKWG0ZpKW7gimuZaGmmuKamlZqlVu4lLZ/S8pOZLWClmWuln7TMPUVzTXPLfUlBkwS1RIHz+8Mf83UClaPgDPh8XNdcOffZ3mfm5sSLc859LIZhGAIAAAAAZJuLowsAAAAAgLyGIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAF4LaEh4crPDzc0WXkqKNHj8pisSg2NtbRpTgUn0P2xcbGymKx6OjRo7ecd9myZQoJCVHBggVlsVh0/vz5XK/vbrNYLBo8eLCjy3BqGX1my5YtppddvXq1LBaLVq9enfOFATCNIAXkMxn/k77Ra+PGjdle1549ezRu3Lhs/ZJ4N02bNs2hv+Rn/DIzf/78G85zs18o58+fb/tlKGNd2Xkh7zp37pwef/xxeXp66oMPPtAXX3yhwoULO7qsfG/Dhg0aN25cvgytABzPzdEFAMgdEyZMUFBQUKb2SpUqZXsde/bs0fjx4xUeHq7y5cvbTfvxxx/vtMTbNm3aNBUvXlw9e/Z0WA05pVq1avriiy/s2kaNGiUvLy+98sorDqoKOW3z5s26cOGCJk6cqObNmzu6nHvGhg0bNH78ePXs2VM+Pj6OLgdAPkOQAvKp1q1bq06dOrm2fnd391xb973E399fTz31lF3bG2+8oeLFi2dqR9515swZScrRX+YvXbrEWa085PLly/n+uEmfxL2GS/uAe9icOXNUu3ZtFSlSRFarVcHBwXrvvfckXbtEsHPnzpKkJk2a2C4vy7g2/9/3SGVcojZ37lyNHz9epUuXVpEiRdSpUyclJSUpJSVFQ4YMkZ+fn7y8vNSrVy+lpKTY1RMTE6OmTZvKz89PHh4eql69uj788EO7ecqXL6/du3drzZo1tpqur+P8+fMaMmSIAgMD5eHhoUqVKunNN99Uenq63XrOnz+vnj17ytvbWz4+PoqKisqTl/8kJCTIzc1N48ePzzRt//79slgs+u9//ytJSkxM1PDhwxUcHCwvLy9ZrVa1bt1av/766y23c6N74nr27JnpbGV6erqmTJmiGjVqqGDBgvL391f//v31119/2c23ZcsWRUREqHjx4vL09FRQUJB69+59y1osFovGjRuXqb18+fJ2ZymvXr2q8ePHq3LlyipYsKCKFSumhg0bavny5XbL7du3T506dZKvr68KFiyoOnXq6Lvvvsu0/t27d6tp06by9PRUmTJl9Nprr2XqV1kJDw9XVFSUJOmhhx6SxWKxq3PevHmqXbu2PD09bQH6jz/+sFtHz5495eXlpUOHDqlNmzYqUqSIunXrdtPt/vHHH+rdu7f8/f3l4eGhGjVq6LPPPrOb58qVKxozZoxq164tb29vFS5cWI0aNdKqVasyrS89PV3vvfeegoODVbBgQZUoUUKtWrXK8l6fb775RjVr1rRtd9myZbf8nK4/hrz++usqU6aMChYsqGbNmungwYOZ5t+0aZNatWolb29vFSpUSGFhYVq/fr1t+rhx4/Tiiy9KkoKCgmzHi6NHj6pjx4568MEH7dbXvn17WSwWu+9+06ZNslgsWrp0qa3t8OHD6ty5s3x9fVWoUCHVq1dP//vf/7Lclzlz5ujVV19V6dKlVahQISUnJ2e573/99ZcefvhhlSlTRvv377/lZ3W9devWqXPnzipbtqw8PDwUGBiooUOH6p9//rHNExMTI4vFou3bt2daftKkSXJ1dbXrc7f6bKVrn6/FYtGePXv05JNPqmjRomrYsKGp2oG8jjNSQD6VlJSkP//8067NYrGoWLFikqTly5era9euatasmd58801J0t69e7V+/Xo9//zzaty4sZ577jm9//77evnll1WtWjVJsv33RqKjo+Xp6amRI0fq4MGDmjp1qgoUKCAXFxf99ddfGjdunDZu3KjY2FgFBQVpzJgxtmU//PBD1ahRQ4888ojc3Ny0ePFiPfPMM0pPT9egQYMkSVOmTNGzzz5rd+mbv7+/JOnvv/9WWFiY/vjjD/Xv319ly5bVhg0bNGrUKJ0+fVpTpkyRJBmGoUcffVQ///yzBgwYoGrVqmnRokW2X3bzEn9/f4WFhWnu3LkaO3as3bSvv/5arq6utkB8+PBhffPNN+rcubOCgoKUkJCgjz76SGFhYdqzZ49KlSqVIzX1799fsbGx6tWrl5577jkdOXJE//3vf7V9+3atX79eBQoU0JkzZ9SyZUuVKFFCI0eOlI+Pj44ePaqFCxfmSA3StV/0oqOj1bdvXz388MNKTk7Wli1btG3bNrVo0ULStXDUoEEDlS5dWiNHjlThwoU1d+5cdejQQQsWLNBjjz0mSYqPj1eTJk2Umppqm2/GjBny9PS8ZR2vvPKK7rvvPs2YMcN2yW3FihUlyfY5PfTQQ4qOjlZCQoLee+89rV+/Xtu3b7c7g5WamqqIiAg1bNhQ//nPf1SoUKEbbjMhIUH16tWz3atXokQJLV26VH369FFycrKGDBkiSUpOTtYnn3yirl27ql+/frpw4YI+/fRTRURE6JdfflFISIhtnX369FFsbKxat26tvn37KjU1VevWrdPGjRvtzn7//PPPWrhwoZ555hkVKVJE77//viIjI3X8+HHb8edm3njjDbm4uGj48OFKSkrSW2+9pW7dumnTpk22eVauXKnWrVurdu3aGjt2rFxcXGx/iFm3bp0efvhhdezYUb///ru++uorTZ48WcWLF5cklShRQo0aNdK3336r5ORkWa1WGYah9evXy8XFRevWrdMjjzwi6VpIcXFxUYMGDWyfa/369fX333/rueeeU7FixTRz5kw98sgjmj9/vq2/ZJg4caLc3d01fPhwpaSkZHlG6s8//1SLFi2UmJioNWvW2PpGds2bN09///23Bg4cqGLFiumXX37R1KlTdfLkSc2bN0+S1KlTJw0aNEizZs3SAw88YLf8rFmzFB4ertKlS2f7s71e586dVblyZU2aNEmGYZiqHcjzDAD5SkxMjCEpy5eHh4dtvueff96wWq1GamrqDdc1b948Q5KxatWqTNPCwsKMsLAw2/tVq1YZkoyaNWsaV65csbV37drVsFgsRuvWre2WDw0NNcqVK2fX9vfff2faTkREhFGhQgW7tho1athtO8PEiRONwoULG7///rtd+8iRIw1XV1fj+PHjhmEYxjfffGNIMt566y3bPKmpqUajRo0MSUZMTEymdV8vY1/nzZt3w3kkGYMGDcpy2s0+15vt34189NFHhiRj165ddu3Vq1c3mjZtant/+fJlIy0tzW6eI0eOGB4eHsaECRPs2v79Ofz7+84QFRVl9z2uW7fOkGTMmjXLbr5ly5bZtS9atMiQZGzevDnb+5lBkjF27NhM7eXKlTOioqJs7++//36jbdu2N11Xs2bNjODgYOPy5cu2tvT0dKN+/fpG5cqVbW1DhgwxJBmbNm2ytZ05c8bw9vY2JBlHjhy56XYyfi6v398rV64Yfn5+Rs2aNY1//vnH1r5kyRJDkjFmzBhbW1RUlCHJGDly5E23k6FPnz5GyZIljT///NOuvUuXLoa3t7ftZy01NdVISUmxm+evv/4y/P39jd69e9vaVq5caUgynnvuuUzbSk9Pt/1bkuHu7m4cPHjQ1vbrr78akoypU6fetOaMn6tq1arZ1fTee+/Z9e/09HSjcuXKRkREhN22//77byMoKMho0aKFre3tt9/O8vvZvHmzIcn4/vvvDcMwjJ07dxqSjM6dOxt169a1zffII48YDzzwgO19Rj9Yt26dre3ChQtGUFCQUb58edvPV8a+VKhQIdNx7fq+cPr0aaNGjRpGhQoVjKNHj97087l+vdcfO7I6bkZHRxsWi8U4duyYra1r165GqVKl7I4B27Zts/tZN/PZjh071pBkdO3a9ZZ1A/kVl/YB+dQHH3yg5cuX272uvzzFx8dHly5dynSZ053q0aOHChQoYHtft25dGYaR6ZKtunXr6sSJE0pNTbW1Xf/X/YwzamFhYTp8+LCSkpJuue158+apUaNGKlq0qP7880/bq3nz5kpLS9PatWslSd9//73c3Nw0cOBA27Kurq569tlnb3u/Haljx45yc3PT119/bWv77bfftGfPHj3xxBO2Ng8PD7m4XDvsp6Wl6dy5c/Ly8tJ9992nbdu25Ugt8+bNk7e3t1q0aGH3HdSuXVteXl62S8YyzrQsWbJEV69ezZFt/5uPj492796tAwcOZDk9MTFRK1eu1OOPP64LFy7Yaj137pwiIiJ04MAB2+VO33//verVq2f31/gSJUrc8vK6m9myZYvOnDmjZ555RgULFrS1t23bVlWrVs10uZgkuz57I4ZhaMGCBWrfvr0Mw7D7HiIiIpSUlGT7vl1dXW1nSdLT05WYmKjU1FTVqVPHrk8sWLBAFosl01lPSZlGlGzevLndWZVatWrJarXq8OHDt6xdknr16mV35qZRo0aSZFt+x44dOnDggJ588kmdO3fOtm+XLl1Ss2bNtHbt2ltecvnAAw/Iy8vLdkxYt26dypQpox49emjbtm36+++/ZRiGfv75Z9v2pWv94OGHH7a7hM3Ly0tPP/20jh49qj179thtJyoq6oZnLU+ePKmwsDBdvXpVa9euVbly5bL1+fzb9eu/dOmS/vzzT9WvX1+GYdhdytejRw+dOnXK7rLNWbNmydPTU5GRkZJu77MdMGDAbdUN5Adc2gfkUw8//PBNB5t45plnNHfuXLVu3VqlS5dWy5Yt9fjjj6tVq1Z3tN2yZcvavff29pYkBQYGZmpPT09XUlKS7XKf9evXa+zYsYqLi9Pff/9tN39SUpJtXTdy4MAB7dy5UyVKlMhyesYN/8eOHVPJkiXl5eVlN/2+++67xd7lrJwa0rx48eJq1qyZ5s6dq4kTJ0q6dlmfm5ubOnbsaJsv4x6XadOm6ciRI0pLS7NNy84lV9lx4MABJSUlyc/PL8vpGd9BWFiYIiMjNX78eE2ePFnh4eHq0KGDnnzySXl4eORILRMmTNCjjz6qKlWqqGbNmmrVqpW6d++uWrVqSZIOHjwowzA0evRojR49+ob1li5dWseOHVPdunUzTb+TPnPs2LEbrqNq1ar6+eef7drc3NxUpkyZW6737NmzOn/+vGbMmKEZM2ZkOU/G9yBJM2fO1DvvvKN9+/bZhdrrR/08dOiQSpUqJV9f31tu/9/HAEkqWrRopnvksrt80aJFJcm2fEYwvtmluElJSbblsuLq6qrQ0FCtW7dO0rUg1ahRIzVs2FBpaWnauHGj/P39lZiYaBekbtQPMi55PnbsmGrWrGlrz2rk1Azdu3eXm5ub9u7dq4CAgBvOdyvHjx/XmDFj9N1332X6jK//A1SLFi1UsmRJzZo1S82aNVN6erq++uorPfrooypSpIik2/tsb7aPQH5HkALuUX5+ftqxY4d++OEHLV26VEuXLlVMTIx69OihmTNn3vZ6XV1dTbUb//+a+kOHDqlZs2aqWrWq3n33XQUGBsrd3V3ff/+9Jk+enK2b+tPT09WiRQu99NJLWU6vUqVKNvfiznl4eNjd7H29jJB4/VmIO9WlSxf16tVLO3bsUEhIiObOnatmzZrZ7guRrt1UPnr0aPXu3VsTJ06Ur6+vXFxcNGTIkFt+vhaLJcv7H64PY9K178DPz0+zZs3Kcj0ZITfjOVwbN27U4sWL9cMPP6h379565513tHHjxkwhNzv+XUvjxo116NAhffvtt/rxxx/1ySefaPLkyZo+fbr69u1r2+fhw4crIiIiy3WaeVxAbrv+jOLNZOzXU089dcNfiDPC5JdffqmePXuqQ4cOevHFF+Xn5ydXV1dFR0fr0KFDt1XnrX7W73T5jP17++237e7hul52+k/Dhg31+uuv6/Lly1q3bp1eeeUV+fj4qGbNmlq3bp3t3svrg5RZN7uHrmPHjvr888/13nvvKTo6+rbWn5aWZru/asSIEapataoKFy6sP/74Qz179rT7uXZ1ddWTTz6pjz/+WNOmTdP69et16tQpu9FBb+ezzc59gkB+RZAC7mHu7u5q37692rdvr/T0dD3zzDP66KOPNHr0aFWqVOmuPgR28eLFSklJ0XfffWf3F+msRg+7UV0VK1bUxYsXb/mcnnLlymnFihW6ePGi3S8FZkfLutU2brS+jPbbvZQnKx06dFD//v1tl/f9/vvvGjVqlN088+fPV5MmTfTpp5/atZ8/f94ucGWlaNGiWV6alXFWJUPFihX1008/qUGDBtn6BatevXqqV6+eXn/9dc2ePVvdunXTnDlz1Ldv35vW8u8RFq9cuaLTp09nmtfX11e9evVSr169dPHiRTVu3Fjjxo1T3759VaFCBUlSgQIFstVnsrpE8E76TMb3v3//fjVt2jTTem+3f5QoUUJFihRRWlraLfdr/vz5qlChghYuXGj3c/XvS/gqVqyoH374QYmJidk6K5WbMi4btFqtt9y/mx3DGjVqpCtXruirr77SH3/8YQtMjRs3tgWpKlWq2AKVdOOf63379tmmZ9ezzz6rSpUqacyYMfL29tbIkSOzvWyGXbt26ffff9fMmTPVo0cPW/uNLtnu0aOH3nnnHS1evFhLly5ViRIl7P6IYOazBcDw58A969y5c3bvXVxcbH+lzhiWPON5IHdjWPCMv0Jf/1frpKQkxcTEZJq3cOHCWdb0+OOPKy4uTj/88EOmaefPn7fdj9WmTRulpqbaDa2elpamqVOn3ulu2LRp00YbN27U1q1bM9Uxa9YshYSE3NHlPP/m4+OjiIgIzZ07V3PmzJG7u7s6dOhgN4+rq2umswLz5s3LNNR2VipWrKh9+/bp7NmztrZff/0105DIjz/+uNLS0myXGF4vNTXV9r399ddfmWrJ+Av4v4fFz6qWjHtbMsyYMSPTGal/93EvLy9VqlTJtn4/Pz+Fh4fro48+yjKEXb+vGd/nL7/8Yjf9RmfesqNOnTry8/PT9OnT7fZ56dKl2rt3r9q2bXtb63V1dVVkZKQWLFig3377LdP06/crq5+7TZs2KS4uzm6ZyMhIGYaR5TD72T3TlFNq166tihUr6j//+Y8uXryYafr1+3ezY1jdunVVoEABvfnmm/L19VWNGjUkXQtYGzdu1Jo1azKdjWrTpo1++eUXu8/n0qVLmjFjhsqXL6/q1aub2pfRo0dr+PDhGjVqVKZHPWRHVt+fYRi2x1j8W61atVSrVi198sknWrBggbp06SI3t//7m7qZzxYAZ6SAfGvp0qW2v5Jer379+qpQoYL69u2rxMRENW3aVGXKlNGxY8c0depUhYSE2K73DwkJkaurq958800lJSXJw8PD9pynnNayZUvbGbL+/fvr4sWL+vjjj+Xn55fpl9zatWvrww8/1GuvvaZKlSrJz89PTZs21YsvvqjvvvtO7dq1U8+ePVW7dm1dunRJu3bt0vz583X06FEVL15c7du3V4MGDTRy5EgdPXpU1atX18KFC7M1oMX1FixYkOVnHBUVpZEjR2revHlq3Lix+vfvr6pVq+rUqVOKjY3V6dOnswyId+qJJ57QU089pWnTpikiIiLTw1/btWunCRMmqFevXqpfv7527dqlWbNm2c7M3Ezv3r317rvvKiIiQn369NGZM2c0ffp01ahRw+7ZOGFhYerfv7+io6O1Y8cOtWzZUgUKFNCBAwc0b948vffee+rUqZNmzpypadOm6bHHHlPFihV14cIFffzxx7JarWrTps1Na+nbt68GDBigyMhItWjRQr/++qt++OGHTGfVqlevrvDwcNWuXVu+vr7asmWL5s+fr8GDB9vm+eCDD9SwYUMFBwerX79+qlChghISEhQXF6eTJ0/anrH10ksv6YsvvlCrVq30/PPP24Y/L1eunHbu3HnLzy8rGb/E9+rVS2FhYeratatt+PPy5ctr6NCht7Ve6doQ4qtWrVLdunXVr18/Va9eXYmJidq2bZt++uknJSYmSrrWJxYuXKjHHntMbdu21ZEjRzR9+nRVr17d7hfpJk2aqHv37nr//fd14MABtWrVSunp6Vq3bp2aNGli95nmNhcXF33yySdq3bq1atSooV69eql06dL6448/tGrVKlmtVi1evFjStWOFdG0I+i5duqhAgQJq3769ChcurEKFCql27drauHGj7RlS0rUzUpcuXdKlS5cyBamRI0fqq6++UuvWrfXcc8/J19dXM2fO1JEjR7RgwYJsXXr5b2+//baSkpI0aNAgFSlSxNSDuKtWraqKFStq+PDh+uOPP2S1WrVgwYKb3o/Wo0cPDR8+XJIybcvMZwtADH8O5Dc3G/5c1w1zO3/+fKNly5aGn5+f4e7ubpQtW9bo37+/cfr0abv1ffzxx0aFChUMV1dXu2F3bzT8+b+HBM9q2GfD+L+hc8+ePWtr++6774xatWoZBQsWNMqXL2+8+eabxmeffZZp+OL4+Hijbdu2RpEiRQxJdnVcuHDBGDVqlFGpUiXD3d3dKF68uFG/fn3jP//5j92w7OfOnTO6d+9uWK1Ww9vb2+jevbuxfft2U8Of3+iVMTTyyZMnjb59+xqlS5c23NzcDF9fX6Ndu3bGxo0bb7p+s8OfZ0hOTjY8PT0NScaXX36Zafrly5eNF154wShZsqTh6elpNGjQwIiLi8v0XWY1/LlhGMaXX35pVKhQwXB3dzdCQkKMH374IdPw5xlmzJhh1K5d2/D09DSKFCliBAcHGy+99JJx6tQpwzCuDbvctWtXo2zZsoaHh4fh5+dntGvXztiyZcst9zMtLc0YMWKEUbx4caNQoUJGRESEcfDgwUzDn7/22mvGww8/bPj4+Bienp5G1apVjddff92uHxiGYRw6dMjo0aOHERAQYBQoUMAoXbq00a5dO2P+/Pl28+3cudMICwszChYsaJQuXdqYOHGi8emnn9728OcZvv76a+OBBx4wPDw8DF9fX6Nbt27GyZMn7eaJiooyChcufMvP5noJCQnGoEGDjMDAQKNAgQJGQECA0axZM2PGjBm2edLT041JkyYZ5cqVMzw8PIwHHnjAWLJkSZbfa2pqqvH2228bVatWNdzd3Y0SJUoYrVu3NrZu3WqbRzcY9v/f301WbnQMuVF/3L59u9GxY0ejWLFihoeHh1GuXDnj8ccfN1asWGE338SJE43SpUsbLi4umb6rF1980ZBkvPnmm3bLVKpUyZBkHDp0KFOdhw4dMjp16mT4+PgYBQsWNB5++GFjyZIl2doXw8i6L6SlpRldu3Y13NzcjG+++eaWn9H1w5/v2bPHaN68ueHl5WUUL17c6Nevn23I+ayOZadPnzZcXV2NKlWq3HA72flsszqGA/cai2Hw9DQAAIB7wZ9//qmSJUtqzJgxNxytEkD2cI8UAADAPSI2NlZpaWnq3r27o0sB8jzukQIAAMjnVq5cqT179uj1119Xhw4dVL58eUeXBOR5XNoHAACQz4WHh2vDhg1q0KCBvvzyS5UuXdrRJQF5HkEKAAAAAEziHikAAAAAMIkgBQAAAAAmMdiEpPT0dJ06dUpFihSxPZAPAAAAwL3HMAxduHBBpUqVuumDtglSkk6dOqXAwEBHlwEAAADASZw4cUJlypS54XSClKQiRYpIuvZhWa1WB1cDAAAAwFGSk5MVGBhoywg3QpCSbJfzWa1WghQAAACAW97yw2ATAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTm6AKQBYvF0RXcOcNwdAUAAABAruGMFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYJLTBKk33nhDFotFQ4YMsbVdvnxZgwYNUrFixeTl5aXIyEglJCTYLXf8+HG1bdtWhQoVkp+fn1588UWlpqbe5eoBAAAA3EucIkht3rxZH330kWrVqmXXPnToUC1evFjz5s3TmjVrdOrUKXXs2NE2PS0tTW3bttWVK1e0YcMGzZw5U7GxsRozZszd3gUAAAAA9xCHB6mLFy+qW7du+vjjj1W0aFFbe1JSkj799FO9++67atq0qWrXrq2YmBht2LBBGzdulCT9+OOP2rNnj7788kuFhISodevWmjhxoj744ANduXLFUbsEAAAAIJ9zeJAaNGiQ2rZtq+bNm9u1b926VVevXrVrr1q1qsqWLau4uDhJUlxcnIKDg+Xv72+bJyIiQsnJydq9e/cNt5mSkqLk5GS7FwAAAABkl5sjNz5nzhxt27ZNmzdvzjQtPj5e7u7u8vHxsWv39/dXfHy8bZ7rQ1TG9IxpNxIdHa3x48ffYfUAAAAA7lUOOyN14sQJPf/885o1a5YKFix4V7c9atQoJSUl2V4nTpy4q9sHAAAAkLc5LEht3bpVZ86c0YMPPig3Nze5ublpzZo1ev/99+Xm5iZ/f39duXJF58+ft1suISFBAQEBkqSAgIBMo/hlvM+YJyseHh6yWq12LwAAAADILocFqWbNmmnXrl3asWOH7VWnTh1169bN9u8CBQpoxYoVtmX279+v48ePKzQ0VJIUGhqqXbt26cyZM7Z5li9fLqvVqurVq9/1fQIAAABwb3DYPVJFihRRzZo17doKFy6sYsWK2dr79OmjYcOGydfXV1arVc8++6xCQ0NVr149SVLLli1VvXp1de/eXW+99Zbi4+P16quvatCgQfLw8Ljr+wQAAADg3uDQwSZuZfLkyXJxcVFkZKRSUlIUERGhadOm2aa7urpqyZIlGjhwoEJDQ1W4cGFFRUVpwoQJDqwaAAAAQH5nMQzDcHQRjpacnCxvb28lJSU5x/1SFoujK7hzdCsAAADkQdnNBg5/jhQAAAAA5DUEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJDg1SH374oWrVqiWr1Sqr1arQ0FAtXbrUNj08PFwWi8XuNWDAALt1HD9+XG3btlWhQoXk5+enF198UampqXd7VwAAAADcQ9wcufEyZcrojTfeUOXKlWUYhmbOnKlHH31U27dvV40aNSRJ/fr104QJE2zLFCpUyPbvtLQ0tW3bVgEBAdqwYYNOnz6tHj16qECBApo0adJd3x8AAAAA9waLYRiGo4u4nq+vr95++2316dNH4eHhCgkJ0ZQpU7Kcd+nSpWrXrp1OnTolf39/SdL06dM1YsQInT17Vu7u7tnaZnJysry9vZWUlCSr1ZpTu3L7LBZHV3DnnKtbAQAAANmS3WzgNPdIpaWlac6cObp06ZJCQ0Nt7bNmzVLx4sVVs2ZNjRo1Sn///bdtWlxcnIKDg20hSpIiIiKUnJys3bt333BbKSkpSk5OtnsBAAAAQHY59NI+Sdq1a5dCQ0N1+fJleXl5adGiRapevbok6cknn1S5cuVUqlQp7dy5UyNGjND+/fu1cOFCSVJ8fLxdiJJkex8fH3/DbUZHR2v8+PG5tEcAAAAA8juHB6n77rtPO3bsUFJSkubPn6+oqCitWbNG1atX19NPP22bLzg4WCVLllSzZs106NAhVaxY8ba3OWrUKA0bNsz2Pjk5WYGBgXe0HwAAAADuHQ6/tM/d3V2VKlVS7dq1FR0drfvvv1/vvfdelvPWrVtXknTw4EFJUkBAgBISEuzmyXgfEBBww216eHjYRgrMeAEAAABAdjk8SP1benq6UlJSspy2Y8cOSVLJkiUlSaGhodq1a5fOnDljm2f58uWyWq22ywMBAAAAIKc59NK+UaNGqXXr1ipbtqwuXLig2bNna/Xq1frhhx906NAhzZ49W23atFGxYsW0c+dODR06VI0bN1atWrUkSS1btlT16tXVvXt3vfXWW4qPj9err76qQYMGycPDw5G7BgAAACAfc2iQOnPmjHr06KHTp0/L29tbtWrV0g8//KAWLVroxIkT+umnnzRlyhRdunRJgYGBioyM1Kuvvmpb3tXVVUuWLNHAgQMVGhqqwoULKyoqyu65UwAAAACQ05zuOVKOwHOkcgHdCgAAAHlQnnuOFAAAAADkFQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmuTm6AAAAss1icXQFd84wHF0BckJ+6IsS/TG/yA/9MQ/2Rc5IAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTQIPXhhx+qVq1aslqtslqtCg0N1dKlS23TL1++rEGDBqlYsWLy8vJSZGSkEhIS7NZx/PhxtW3bVoUKFZKfn59efPFFpaam3u1dAQAAAHAPcWiQKlOmjN544w1t3bpVW7ZsUdOmTfXoo49q9+7dkqShQ4dq8eLFmjdvntasWaNTp06pY8eOtuXT0tLUtm1bXblyRRs2bNDMmTMVGxurMWPGOGqXAAAAANwDLIZhGI4u4nq+vr56++231alTJ5UoUUKzZ89Wp06dJEn79u1TtWrVFBcXp3r16mnp0qVq166dTp06JX9/f0nS9OnTNWLECJ09e1bu7u7Z2mZycrK8vb2VlJQkq9Waa/uWbRaLoyu4c87VrQDkFxwf4SzyQ1+U6I/5RX7oj07UF7ObDZzmHqm0tDTNmTNHly5dUmhoqLZu3aqrV6+qefPmtnmqVq2qsmXLKi4uTpIUFxen4OBgW4iSpIiICCUnJ9vOamUlJSVFycnJdi8AAAAAyC6HB6ldu3bJy8tLHh4eGjBggBYtWqTq1asrPj5e7u7u8vHxsZvf399f8fHxkqT4+Hi7EJUxPWPajURHR8vb29v2CgwMzNmdAgAAAJCvOTxI3XfffdqxY4c2bdqkgQMHKioqSnv27MnVbY4aNUpJSUm214kTJ3J1ewAAAADyFzdHF+Du7q5KlSpJkmrXrq3Nmzfrvffe0xNPPKErV67o/PnzdmelEhISFBAQIEkKCAjQL7/8Yre+jFH9MubJioeHhzw8PHJ4TwAAAADcKxx+Rurf0tPTlZKSotq1a6tAgQJasWKFbdr+/ft1/PhxhYaGSpJCQ0O1a9cunTlzxjbP8uXLZbVaVb169bteOwAAAIB7g0PPSI0aNUqtW7dW2bJldeHCBc2ePVurV6/WDz/8IG9vb/Xp00fDhg2Tr6+vrFarnn32WYWGhqpevXqSpJYtW6p69erq3r273nrrLcXHx+vVV1/VoEGDOOMEAAAAINc4NEidOXNGPXr00OnTp+Xt7a1atWrphx9+UIsWLSRJkydPlouLiyIjI5WSkqKIiAhNmzbNtryrq6uWLFmigQMHKjQ0VIULF1ZUVJQmTJjgqF0CAAAAcA9wuudIOQLPkcoFdCsAuYHjI5xFfuiLEv0xv8gP/dGJ+mKee44UAAAAAOQVBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMcrudhQ4cOKBVq1bpzJkzSk9Pt5s2ZsyYHCkMAAAAAJyV6SD18ccfa+DAgSpevLgCAgJksVhs0ywWC0EKAAAAQL5nOki99tprev311zVixIjcqAcAAAAAnJ7pe6T++usvde7cOTdqAQAAAIA8wXSQ6ty5s3788cfcqAUAAAAA8gTTl/ZVqlRJo0eP1saNGxUcHKwCBQrYTX/uuedyrDgAAAAAcEYWwzAMMwsEBQXdeGUWiw4fPnzHRd1tycnJ8vb2VlJSkqxWq6PLka4bwCPPMtetACB7OD7CWeSHvijRH/OL/NAfnagvZjcbmD4jdeTIkTsqDAAAAADyujt6IK9hGDJ5QgsAAAAA8rzbClKff/65goOD5enpKU9PT9WqVUtffPFFTtcGAAAAAE7J9KV97777rkaPHq3BgwerQYMGkqSff/5ZAwYM0J9//qmhQ4fmeJEAAAAA4Exua7CJ8ePHq0ePHnbtM2fO1Lhx4/LkPVQMNpELuOQTQG7g+AhnkR/6okR/zC/yQ390or6Y3Wxg+tK+06dPq379+pna69evr9OnT5tdHQAAAADkOaaDVKVKlTR37txM7V9//bUqV66cI0UBAAAAgDMzfY/U+PHj9cQTT2jt2rW2e6TWr1+vFStWZBmwAAAAACC/MX1GKjIyUps2bVLx4sX1zTff6JtvvlHx4sX1yy+/6LHHHsuNGgEAAADAqZgebCI/YrCJXEC3ApAbOD7CWeSHvijRH/OL/NAfnagv5uhgE8nJyXb/vtnLjOjoaD300EMqUqSI/Pz81KFDB+3fv99unvDwcFksFrvXgAED7OY5fvy42rZtq0KFCsnPz08vvviiUlNTTdUCAAAAANmVrXukihYtqtOnT8vPz08+Pj6yZJF6DcOQxWJRWlpatje+Zs0aDRo0SA899JBSU1P18ssvq2XLltqzZ48KFy5sm69fv36aMGGC7X2hQoVs/05LS1Pbtm0VEBCgDRs26PTp0+rRo4cKFCigSZMmZbsWAAAAAMiubAWplStXytfXV5K0atWqHNv4smXL7N7HxsbKz89PW7duVePGjW3thQoVUkBAQJbr+PHHH7Vnzx799NNP8vf3V0hIiCZOnKgRI0Zo3Lhxcnd3z7RMSkqKUlJSbO/NnkkDAAAAcG/LVpAKCwuz/TsoKEiBgYGZzkoZhqETJ07cUTFJSUmSZAttGWbNmqUvv/xSAQEBat++vUaPHm07KxUXF6fg4GD5+/vb5o+IiNDAgQO1e/duPfDAA5m2Ex0drfHjx99RrQAAAADuXaaHPw8KCrJd5ne9xMREBQUFmbq073rp6ekaMmSIGjRooJo1a9ran3zySZUrV06lSpXSzp07NWLECO3fv18LFy6UJMXHx9uFKEm29/Hx8Vlua9SoURo2bJjtfXJysgIDA2+rbgAAAAD3HtNBKuNeqH+7ePGiChYseNuFDBo0SL/99pt+/vlnu/ann37a9u/g4GCVLFlSzZo106FDh1SxYsXb2paHh4c8PDxuu1YAAAAA97ZsB6mMMzgWi8Xu0jrp2oAPmzZtUkhIyG0VMXjwYC1ZskRr165VmTJlbjpv3bp1JUkHDx5UxYoVFRAQoF9++cVunoSEBEm64X1VAAAAAHAnsh2ktm/fLunaGaldu3bZDeLg7u6u+++/X8OHDze1ccMw9Oyzz2rRokVavXq1goKCbrnMjh07JEklS5aUJIWGhur111/XmTNnbJcbLl++XFarVdWrVzdVDwAAAABkh+kH8vbq1Uvvvfdejjy49plnntHs2bP17bff6r777rO1e3t7y9PTU4cOHdLs2bPVpk0bFStWTDt37tTQoUNVpkwZrVmzRtK1s2EhISEqVaqU3nrrLcXHx6t79+7q27dvtoc/54G8ucCJHqoGIB/h+AhnkR/6okR/zC/yQ390or6Y3WxgOkjlpKzutZKkmJgY9ezZUydOnNBTTz2l3377TZcuXVJgYKAee+wxvfrqq3Y7dezYMQ0cOFCrV69W4cKFFRUVpTfeeENubtk74UaQygVO9MMAIB/h+AhnkR/6okR/zC/yQ390or6Yq0Fqy5Ytmjt3ro4fP64rV67YTcsYTS8vIUjlAif6YQCQj3B8hLPID31Roj/mF/mhPzpRX8xuNnAxu+I5c+aofv362rt3rxYtWqSrV69q9+7dWrlypby9ve+oaAAAAADIC0wHqUmTJmny5MlavHix3N3d9d5772nfvn16/PHHVbZs2dyoEQAAAACciukgdejQIbVt21bStdH6Ll26JIvFoqFDh2rGjBk5XiAAAAAAOBvTQapo0aK6cOGCJKl06dL67bffJEnnz5/X33//nbPVAQAAAIATyvZzpDI0btxYy5cvV3BwsDp37qznn39eK1eu1PLly9WsWbPcqBEAAAAAnIrpIPXf//5Xly9fliS98sorKlCggDZs2KDIyEi9+uqrOV4gAAAAADgbhz5Hylkw/HkuoFsByA0cH+Es8kNflOiP+UV+6I9O1Bdzbfjzbdu2adeuXbb33377rTp06KCXX3450zOlAAAAACA/Mh2k+vfvr99//12SdPjwYT3xxBMqVKiQ5s2bp5deeinHCwQAAAAAZ2M6SP3+++8KCQmRJM2bN09hYWGaPXu2YmNjtWDBgpyuDwAAAACcjukgZRiG0tPTJUk//fST2rRpI0kKDAzUn3/+mbPVAQAAAIATMh2k6tSpo9dee01ffPGF1qxZY3s475EjR+Tv75/jBQIAAACAszEdpKZMmaJt27Zp8ODBeuWVV1SpUiVJ0vz581W/fv0cLxAAAAAAnE2ODX9++fJlubq6qkCBAjmxuruK4c9zgRMNYQkgH+H4CGeRH/qiRH/ML/JDf3SivpjdbGD6gbw3UrBgwZxaFQAAAAA4tWwFKV9fX/3+++8qXry4ihYtKstNUm9iYmKOFQcAAAAAzihbQWry5MkqUqSIpGv3SAEAAADAvSzH7pHKy7hHKhfQrQDkBo6PcBb5oS9K9Mf8Ij/0Ryfqizl6j1RycnK2N+wUQQQAAAAAclG2gpSPj89N74uSrj2o12KxKC0tLUcKAwAAAABnla0gtWrVqtyuAwAAAADyjGwFqbCwsNyuAwAAAADyjGwFqZ07d6pmzZpycXHRzp07bzpvrVq1cqQwAAAAAHBW2QpSISEhio+Pl5+fn0JCQmSxWJTVYH/cIwUAAADgXpCtIHXkyBGVKFHC9m8AAAAAuJdlK0iVK1fO9u9jx46pfv36cnOzXzQ1NVUbNmywmxcAAAAA8iMXsws0adJEiYmJmdqTkpLUpEmTHCkKAAAAAJyZ6SCV8byofzt37pwKFy6cI0UBAAAAgDPL1qV9ktSxY0dJ1waU6Nmzpzw8PGzT0tLStHPnTtWvXz/nKwQAAAAAJ5PtIOXt7S3p2hmpIkWKyNPT0zbN3d1d9erVU79+/XK+QgAAAABwMtkOUjExMZKk8uXLa/jw4VzGBwAAAOCeZTGyeiDUPSY5OVne3t5KSkqS1Wp1dDlSFveg5Tl0KwC5geMjnEV+6IsS/TG/yA/90Yn6YnazQbYHmyhatKh8fX0zvYKCghQREaHly5ebLjI6OloPPfSQihQpIj8/P3Xo0EH79++3m+fy5csaNGiQihUrJi8vL0VGRiohIcFunuPHj6tt27YqVKiQ/Pz89OKLLyo1NdV0PQAAAACQHdm+tG/KlClZtp8/f15bt25Vu3btNH/+fLVv3z7bG1+zZo0GDRqkhx56SKmpqXr55ZfVsmVL7dmzx3bp4NChQ/W///1P8+bNk7e3twYPHqyOHTtq/fr1kq4NdNG2bVsFBARow4YNOn36tHr06KECBQpo0qRJ2a4FAAAAALIrxy7te/fddzV//nxt2LDhttdx9uxZ+fn5ac2aNWrcuLGSkpJUokQJzZ49W506dZIk7du3T9WqVVNcXJzq1aunpUuXql27djp16pT8/f0lSdOnT9eIESN09uxZubu733K7XNqXC5zo9CyAfITjI5xFfuiLEv0xv8gP/dGJ+mKOX9p3K+3atdO+ffvuaB1JSUmSJF9fX0nS1q1bdfXqVTVv3tw2T9WqVVW2bFnFxcVJkuLi4hQcHGwLUZIUERGh5ORk7d69O8vtpKSkKDk52e4FAAAAANmVY0EqJSUlW2d/biQ9PV1DhgxRgwYNVLNmTUlSfHy83N3d5ePjYzevv7+/4uPjbfNcH6IypmdMy0p0dLS8vb1tr8DAwNuuGwAAAMC9J8eC1KeffqqQkJDbXn7QoEH67bffNGfOnJwq6YZGjRqlpKQk2+vEiRO5vk0AAAAA+Ue2B5sYNmxYlu1JSUnatm2bfv/9d61du/a2ihg8eLCWLFmitWvXqkyZMrb2gIAAXblyRefPn7c7K5WQkKCAgADbPL/88ovd+jJG9cuY5988PDzk4eFxW7UCAAAAQLaD1Pbt27Nst1qtatGihRYuXKigoCBTGzcMQ88++6wWLVqk1atXZ1q+du3aKlCggFasWKHIyEhJ0v79+3X8+HGFhoZKkkJDQ/X666/rzJkz8vPzkyQtX75cVqtV1atXN1UPAAAAAGSHQx/I+8wzz2j27Nn69ttvdd9999navb295enpKUkaOHCgvv/+e8XGxspqterZZ5+VJNvogGlpaQoJCVGpUqX01ltvKT4+Xt27d1ffvn2zPfw5o/blAicaeQVAPsLxEc4iP/RFif6YX+SH/uhEfTG72cChQcpygy89JiZGPXv2lHTtgbwvvPCCvvrqK6WkpCgiIkLTpk2zu2zv2LFjGjhwoFavXq3ChQsrKipKb7zxhtzcsnfCjSCVC5zohwFAPsLxEc4iP/RFif6YX+SH/uhEfTFPBClnQZDKBXQrALmB4yOcRX7oixL9Mb/ID/3RifriXX+OFAAAAADcKwhSAAAAAGBStoNU7969deHChdysBQAAAADyhGwHqZkzZ+qff/7JzVoAAAAAIE/IdpBiTAoAAAAAuCbbD+SVpAsXLqhgwYI3nccpRr0DAAAAgFxkKkhVqVLlhtMMw5DFYlFaWtodFwUAAAAAzsxUkJo/f758fX1zqxYAAAAAyBNMBakGDRrIz88vt2oBAAAAgDyB50gBAAAAgEnZDlLlypWTq6trbtYCAAAAAHlCti/tO3LkSG7WAQAAAAB5RraDVNGiRWWxWDK1e3t7q0qVKho+fLhatGiRo8UBAAAAgDPKdpCaPHlylkHq/Pnz2rp1q9q1a6f58+erffv2OVogAAAAADibbAepnj173nR6SEiIoqOjCVIAAAAA8r0cG7WvXbt22rdvX06tDgAAAACcVo4FqZSUFLm7u+fU6gAAAADAaeVYkPr0008VEhKSU6sDAAAAAKeV7Xukhg0blmV7UlKStm3bpt9//11r167NscIAAAAAwFllO0ht3749y3ar1aoWLVpo4cKFCgoKyrHCAAAAAMBZZTtIrVq16qbTT548qaefflozZsy446IAAAAAwJnl2D1S586d06effppTqwMAAAAAp5VjQQoAAAAA7hUEKQAAAAAwiSAFAAAAACZle7CJjh073nT6+fPn77QWAAAAAMgTsh2kvL29bzm9R48ed1wQAAAAADi7bAepmJiY3KwDAAAAAPIM7pECAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADDJoUFq7dq1at++vUqVKiWLxaJvvvnGbnrPnj1lsVjsXq1atbKbJzExUd26dZPVapWPj4/69Omjixcv3sW9AAAAAHCvcWiQunTpku6//3598MEHN5ynVatWOn36tO311Vdf2U3v1q2bdu/ereXLl2vJkiVau3atnn766dwuHQAAAMA9LNvPkcoNrVu3VuvWrW86j4eHhwICArKctnfvXi1btkybN29WnTp1JElTp05VmzZt9J///EelSpXK8ZoBAAAAwOnvkVq9erX8/Px03333aeDAgTp37pxtWlxcnHx8fGwhSpKaN28uFxcXbdq06YbrTElJUXJyst0LAAAAALLLqYNUq1at9Pnnn2vFihV68803tWbNGrVu3VppaWmSpPj4ePn5+dkt4+bmJl9fX8XHx99wvdHR0fL29ra9AgMDc3U/AAAAAOQvDr2071a6dOli+3dwcLBq1aqlihUravXq1WrWrNltr3fUqFEaNmyY7X1ycjJhCgAAAEC2OfUZqX+rUKGCihcvroMHD0qSAgICdObMGbt5UlNTlZiYeMP7qqRr911ZrVa7FwAAAABkV54KUidPntS5c+dUsmRJSVJoaKjOnz+vrVu32uZZuXKl0tPTVbduXUeVCQAAACCfc+ilfRcvXrSdXZKkI0eOaMeOHfL19ZWvr6/Gjx+vyMhIBQQE6NChQ3rppZdUqVIlRURESJKqVaumVq1aqV+/fpo+fbquXr2qwYMHq0uXLozYBwAAACDXWAzDMBy18dWrV6tJkyaZ2qOiovThhx+qQ4cO2r59u86fP69SpUqpZcuWmjhxovz9/W3zJiYmavDgwVq8eLFcXFwUGRmp999/X15eXtmuIzk5Wd7e3kpKSnKOy/wsFkdXcOcc160A5GccH+Es8kNflOiP+UV+6I9O1Bezmw0cGqScBUEqF9CtAOQGjo9wFvmhL0r0x/wiP/RHJ+qL2c0GeeoeKQAAAABwBgQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkODVJr165V+/btVapUKVksFn3zzTd20w3D0JgxY1SyZEl5enqqefPmOnDggN08iYmJ6tatm6xWq3x8fNSnTx9dvHjxLu4FAAAAgHuNQ4PUpUuXdP/99+uDDz7Icvpbb72l999/X9OnT9emTZtUuHBhRURE6PLly7Z5unXrpt27d2v58uVasmSJ1q5dq6effvpu7QIAAACAe5DFMAzD0UVIksVi0aJFi9ShQwdJ185GlSpVSi+88IKGDx8uSUpKSpK/v79iY2PVpUsX7d27V9WrV9fmzZtVp04dSdKyZcvUpk0bnTx5UqVKlcrWtpOTk+Xt7a2kpCRZrdZc2T9TLBZHV3DnnKNbAchvOD7CWeSHvijRH/OL/NAfnagvZjcbOO09UkeOHFF8fLyaN29ua/P29lbdunUVFxcnSYqLi5OPj48tRElS8+bN5eLiok2bNt1w3SkpKUpOTrZ7AQAAAEB2OW2Qio+PlyT5+/vbtfv7+9umxcfHy8/Pz266m5ubfH19bfNkJTo6Wt7e3rZXYGBgDlcPAAAAID9z2iCVm0aNGqWkpCTb68SJE44uCQAAAEAe4rRBKiAgQJKUkJBg156QkGCbFhAQoDNnzthNT01NVWJiom2erHh4eMhqtdq9AAAAACC7nDZIBQUFKSAgQCtWrLC1JScna9OmTQoNDZUkhYaG6vz589q6dattnpUrVyo9PV1169a96zUDAAAAuDe4OXLjFy9e1MGDB23vjxw5oh07dsjX11dly5bVkCFD9Nprr6ly5coKCgrS6NGjVapUKdvIftWqVVOrVq3Ur18/TZ8+XVevXtXgwYPVpUuXbI/YBwAAAABmOTRIbdmyRU2aNLG9HzZsmCQpKipKsbGxeumll3Tp0iU9/fTTOn/+vBo2bKhly5apYMGCtmVmzZqlwYMHq1mzZnJxcVFkZKTef//9u74vAAAAAO4dTvMcKUfiOVK5gG4FIDdwfISzyA99UaI/5hf5oT86UV/M88+RAgAAAABnRZACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmOTUQWrcuHGyWCx2r6pVq9qmX758WYMGDVKxYsXk5eWlyMhIJSQkOLBiAAAAAPcCpw5SklSjRg2dPn3a9vr5559t04YOHarFixdr3rx5WrNmjU6dOqWOHTs6sFoAAAAA9wI3RxdwK25ubgoICMjUnpSUpE8//VSzZ89W06ZNJUkxMTGqVq2aNm7cqHr16t3tUgEAAADcI5z+jNSBAwdUqlQpVahQQd26ddPx48clSVu3btXVq1fVvHlz27xVq1ZV2bJlFRcXd9N1pqSkKDk52e4FAAAAANnl1EGqbt26io2N1bJly/Thhx/qyJEjatSokS5cuKD4+Hi5u7vLx8fHbhl/f3/Fx8ffdL3R0dHy9va2vQIDA3NxLwAAAADkN059aV/r1q1t/65Vq5bq1q2rcuXKae7cufL09Lzt9Y4aNUrDhg2zvU9OTiZMAQAAAMg2pz4j9W8+Pj6qUqWKDh48qICAAF25ckXnz5+3mychISHLe6qu5+HhIavVavcCAAAAgOzKU0Hq4sWLOnTokEqWLKnatWurQIECWrFihW36/v37dfz4cYWGhjqwSgAAAAD5nVNf2jd8+HC1b99e5cqV06lTpzR27Fi5urqqa9eu8vb2Vp8+fTRs2DD5+vrKarXq2WefVWhoKCP2AQAAAMhVTh2kTp48qa5du+rcuXMqUaKEGjZsqI0bN6pEiRKSpMmTJ8vFxUWRkZFKSUlRRESEpk2b5uCqAQAAAOR3FsMwDEcX4WjJycny9vZWUlKSc9wvZbE4uoI7R7cCkBs4PsJZ5Ie+KNEf84v80B+dqC9mNxvkqXukAAAAAMAZEKQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJhGkAAAAAMAkghQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADAJIIUAAAAAJhEkAIAAAAAkwhSAAAAAGASQQoAAAAATCJIAQAAAIBJBCkAAAAAMIkgBQAAAAAmEaQAAAAAwCSCFAAAAACYRJACAAAAAJMIUgAAAABgEkEKAAAAAEwiSAEAAACASQQpAAAAADCJIAUAAAAAJuWbIPXBBx+ofPnyKliwoOrWratffvnF0SUBAAAAyKfyRZD6+uuvNWzYMI0dO1bbtm3T/fffr4iICJ05c8bRpQF5n8WS918AAAA5LF8EqXfffVf9+vVTr169VL16dU2fPl2FChXSZ5995ujSAAAAAORDbo4u4E5duXJFW7du1ahRo2xtLi4uat68ueLi4rJcJiUlRSkpKbb3SUlJkqTk5OTcLfZewmcJZ0J/hDOhP8KZ0B/hLJyoL2ZkAsMwbjpfng9Sf/75p9LS0uTv72/X7u/vr3379mW5THR0tMaPH5+pPTAwMFdqvCd5ezu6AuD/0B/hTOiPcCb0RzgLJ+yLFy5ckPdN6srzQep2jBo1SsOGDbO9T09PV2JioooVKyZLPr+fIjk5WYGBgTpx4oSsVqujy8E9jv4IZ0J/hDOhP8KZ3Gv90TAMXbhwQaVKlbrpfHk+SBUvXlyurq5KSEiwa09ISFBAQECWy3h4eMjDw8OuzcfHJ7dKdEpWq/We+EFA3kB/hDOhP8KZ0B/hTO6l/nizM1EZ8vxgE+7u7qpdu7ZWrFhha0tPT9eKFSsUGhrqwMoAAAAA5Fd5/oyUJA0bNkxRUVGqU6eOHn74YU2ZMkWXLl1Sr169HF0aAAAAgHwoXwSpJ554QmfPntWYMWMUHx+vkJAQLVu2LNMAFLh2WePYsWMzXdoIOAL9Ec6E/ghnQn+EM6E/Zs1i3GpcPwAAAACAnTx/jxQAAAAA3G0EKQAAAAAwiSAFAAAAACYRpAAAAADAJIJUHtKzZ09ZLBYNGDAg07RBgwbJYrGoZ8+eat++vVq1apXlOtatWyeLxaKdO3dq9erVslgsOn/+fKb5ypcvrylTptjeJyYmqlu3brJarfLx8VGfPn108eLFnNo1OLmMvmexWFSgQAH5+/urRYsW+uyzz5Seni5J6tKlS6Z+t2zZMlksFo0bN86ufdy4cSpbtqxd24IFCxQeHi5vb295eXmpVq1amjBhghITE7NV4+rVq/Xggw/Kw8NDlSpVUmxs7G3vL/IWRx4bX3/9ddWvX1+FChW65x7sjqw5qj8ePXpUffr0UVBQkDw9PVWxYkWNHTtWV65cycndQx7jyOPjI488orJly6pgwYIqWbKkunfvrlOnTuXUrjkFglQeExgYqDlz5uiff/6xtV2+fFmzZ8+2/WLap08fLV++XCdPnsy0fExMjOrUqaNatWqZ2m63bt20e/duLV++XEuWLNHatWv19NNP39nOIE9p1aqVTp8+raNHj2rp0qVq0qSJnn/+ebVr106pqalq0qSJ1q9fr9TUVNsyq1atUmBgoFavXm23rlWrVqlJkya296+88oqeeOIJPfTQQ1q6dKl+++03vfPOO/r111/1xRdf3LK2I0eOqG3btmrSpIl27NihIUOGqG/fvvrhhx9ybP/h3Bx1bLxy5Yo6d+6sgQMH3tkOIF9xRH/ct2+f0tPT9dFHH2n37t2aPHmypk+frpdffvnOdwh5mqOOj02aNNHcuXO1f/9+LViwQIcOHVKnTp3ubGecDEEqj3nwwQcVGBiohQsX2toWLlyosmXL6oEHHpAktWvXTiVKlMj0F/mLFy9q3rx56tOnj6lt7t27V8uWLdMnn3yiunXrqmHDhpo6darmzJmT7/6ygBvz8PBQQECASpcurQcffFAvv/yyvv32Wy1dulSxsbFq0qSJLl68qC1bttiWWb16tUaOHKlNmzbp8uXLkq4dvDdt2mQLUr/88osmTZqkd955R2+//bbq16+v8uXLq0WLFlqwYIGioqJuWdv06dMVFBSkd955R9WqVdPgwYPVqVMnTZ48OXc+DDgdRxwbJWn8+PEaOnSogoOD76h+5C+O6I+tWrVSTEyMWrZsqQoVKuiRRx7R8OHD7WrAvclRx8ehQ4eqXr16KleunOrXr6+RI0dq48aNunr16h3tjzMhSOVBvXv3VkxMjO39Z599pl69etneu7m5qUePHoqNjdX1jwmbN2+e0tLS1LVrV1Pbi4uLk4+Pj+rUqWNra968uVxcXLRp06Y72BPkdU2bNtX999+vhQsXqkqVKipVqpRWrVolSbpw4YK2bdumzp07q3z58oqLi5MkbdiwQSkpKbYgNWvWLHl5eemZZ57JchvZuVwqLi5OzZs3t2uLiIiwbRP3hrt9bARuxhn6Y1JSknx9fe94Pcj7HN0fExMTNWvWLNWvX18FChS4o3U5E4JUHvTUU0/p559/1rFjx3Ts2DGtX79eTz31lN08vXv31qFDh7RmzRpbW0xMjCIjI+Xt7W1qe/Hx8fLz87Nrc3Nzk6+vr+Lj429/R5AvVK1aVUePHpV07TR+xmV869atU5UqVVSiRAk1btzY1r569WoFBQWpXLlykqQDBw6oQoUKd3RgjY+Pl7+/v12bv7+/kpOT7S5lQP52t4+NwM04uj8ePHhQU6dOVf/+/e9oPcgfHNUfR4wYocKFC6tYsWI6fvy4vv322zvaD2dDkMqDSpQoobZt2yo2NlYxMTFq27atihcvbjdP1apVVb9+fX322WeSrh1Q161bd1unZoGbMQxDFotFkhQeHq7169fr6tWrWr16tcLDwyVJYWFhdkHq+vujrv/LF3AnODbCmTiyP/7xxx9q1aqVOnfurH79+t3RupA/OKo/vvjii9q+fbt+/PFHubq6qkePHvnq//sEqTyqd+/eio2N1cyZM9W7d+8s5+nTp48WLFigCxcuKCYmRhUrVlRYWJhtutVqlXTt1P+/nT9/3vbXh4CAAJ05c8ZuempqqhITExUQEJBTu4Q8au/evQoKCpJ07YzUpUuXtHnzZq1atcrW38LCwrRp0yYlJiZq06ZNatq0qW35KlWq6PDhw3d0zXRAQIASEhLs2hISEmS1WuXp6Xnb60XeczePjcCtOKI/njp1Sk2aNFH9+vU1Y8aMHNwb5HWO6I/FixdXlSpV1KJFC82ZM0fff/+9Nm7cmIN75VgEqTyqVatWunLliq5evaqIiIgs53n88cfl4uKi2bNn6/PPP1fv3r1tZw4kqXLlynJxcdHWrVvtljt8+LCSkpJUpUoVSVJoaKjOnz9vN9/KlSuVnp6uunXr5sLeIa9YuXKldu3apcjISElSxYoVFRgYqO+++047duywHXxLly6t0qVL65133tGVK1fszkg9+eSTunjxoqZNm5blNrIaYvXfQkNDtWLFCru25cuXKzQ09Db3DHnV3Tw2Ardyt/vjH3/8ofDwcNWuXVsxMTFyceHXPPwfRx8fMx6XkpKSkgN74xzcHF0Abo+rq6v27t1r+3dWvLy89MQTT2jUqFFKTk5Wz5497aYXKVJEffv21QsvvCA3NzcFBwfrxIkTGjFihOrVq6f69etLkqpVq6ZWrVqpX79+mj59uq5evarBgwerS5cuKlWqVK7uJ5xHSkqK4uPjlZaWpoSEBC1btkzR0dFq166devToYZuvSZMmmjZtmipVqmR331JYWJimTp1qG5QiQ926dfXSSy/phRde0B9//KHHHntMpUqV0sGDBzV9+nQ1bNhQzz///E1rGzBggP773//qpZdeUu/evbVy5UrNnTtX//vf/3L+g4BTu5vHRkk6fvy4EhMTdfz4caWlpWnHjh2SpEqVKsnLyytX9hF5x93sjxkhqly5cvrPf/6js2fP2tbB1SOQ7m5/3LRpkzZv3qyGDRuqaNGiOnTokEaPHq2KFSvmrz9yGsgzoqKijEcfffSG0x999FEjKirKrm3Dhg2GJKNNmzZZLvPPP/8YY8eONapWrWp4enoaQUFBxtNPP22cPXvWbr5z584ZXbt2Nby8vAyr1Wr06tXLuHDhwp3uEvKIqKgoQ5IhyXBzczNKlChhNG/e3Pjss8+MtLQ0u3ljYmIMScaAAQPs2mNjYw1JRv/+/bPcxtdff200btzYKFKkiFG4cGGjVq1axoQJE4y//vorWzWuWrXKCAkJMdzd3Y0KFSoYMTExt7OryIMceWy8/mfj+teqVavucK+QVzmqP2Yce7N64d7lqP64c+dOo0mTJoavr6/h4eFhlC9f3hgwYIBx8uTJnNgtp2ExjHx0xxcAAAAA3AVcPAsAAAAAJhGkADi9GjVqyMvLK8vXrFmzHF0eAAC4B3FpHwCnd+zYsRsOj+7v768iRYrc5YoAAMC9jiAFAAAAACZxaR8AAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAO5p4eHhGjJkiKPLAADkMQQpAMBt6dmzpywWi9544w279m+++UYWi8XUusqXL68pU6bkYHW55+jRo7JYLNqxY4ejSwEAOBBBCgBw2woWLKg333xTf/31l6NLMe3KlSuOLiFH3ehZawCA3EGQAgDctubNmysgIEDR0dE3ne/nn39Wo0aN5OnpqcDAQD333HO6dOmSpGuX1h07dkxDhw6VxWKRxWKRYRgqUaKE5s+fb1tHSEiISpYsabdODw8P/f3335Kk48eP69FHH5WXl5esVqsef/xxJSQk2OYfN26cQkJC9MknnygoKEgFCxbMstb//e9/8vb21qxZs27rMzl06JAeffRR+fv7y8vLSw899JB++ukn2/QJEyaoZs2amZYLCQnR6NGjbe8/+eQTVatWTQULFlTVqlU1bdo027SMs2Jff/21wsLCVLBgQc2aNUvHjh1T+/btVbRoURUuXFg1atTQ999/f1v7AQC4OYIUAOC2ubq6atKkSZo6dapOnjyZ5TyHDh1Sq1atFBkZqZ07d+rrr7/Wzz//rMGDB0uSFi5cqDJlymjChAk6ffq0Tp8+LYvFosaNG2v16tWSpL/++kt79+7VP//8o3379kmS1qxZo4ceekiFChVSenq6Hn30USUmJmrNmjVavny5Dh8+rCeeeMKuloMHD2rBggVauHBhlpfmzZ49W127dtWsWbPUrVu32/pMLl68qDZt2mjFihXavn27WrVqpfbt2+v48eOSpN69e2vv3r3avHmzbZnt27dr586d6tWrlyRp1qxZGjNmjF5//XXt3btXkyZN0ujRozVz5ky7bY0cOVLPP/+89u7dq4iICA0aNEgpKSlau3atdu3apTfffFNeXl63tR8AgJtzc3QBAIC87bHHHlNISIjGjh2rTz/9NNP06OhodevWzTagQ+XKlfX+++8rLCxMH374oXx9feXq6qoiRYooICDAtlx4eLg++ugjSdLatWv1wAMPKCAgQKtXr1bVqlW1evVqhYWFSZJWrFihXbt26ciRIwoMDJQkff7556pRo4Y2b96shx56SNK1y/k+//xzlShRIlOdH3zwgV555RUtXrzYtt7bcf/99+v++++3vZ84caIWLVqk7777ToMHD1aZMmUUERGhmJgYW10xMTEKCwtThQoVJEljx47VO++8o44dO0qSgoKCtGfPHn300UeKioqyrXvIkCG2eaRrZ+UiIyMVHBwsSbb1AQByHmekAAB37M0339TMmTO1d+/eTNN+/fVXxcbGysvLy/aKiIhQenq6jhw5csN1hoWFac+ePTp79qzWrFmj8PBwhYeHa/Xq1bp69ao2bNig8PBwSdLevXsVGBhoC1GSVL16dfn4+NjVVK5cuSxD1Pz58zV06FAtX778jkKUdO2M1PDhw1WtWjX5+PjIy8tLe/futZ2RkqR+/frpq6++0uXLl3XlyhXNnj1bvXv3liRdunRJhw4dUp8+few+s9dee02HDh2y21adOnXs3j/33HN67bXX1KBBA40dO1Y7d+68o30BANwYQQoAcMcaN26siIgIjRo1KtO0ixcvqn///tqxY4ft9euvv+rAgQOqWLHiDdcZHBwsX19frVmzxi5IrVmzRps3b9bVq1dVv359U3UWLlw4y/YHHnhAJUqU0GeffSbDMEyt89+GDx+uRYsWadKkSVq3bp127Nih4OBgu8Et2rdvLw8PDy1atEiLFy/W1atX1alTJ0nXPi9J+vjjj+0+s99++00bN2686f707dtXhw8fVvfu3bVr1y7VqVNHU6dOvaP9AQBkjUv7AAA54o033lBISIjuu+8+u/YHH3xQe/bsUaVKlW64rLu7u9LS0uzaLBaLGjVqpG+//Va7d+9Ww4YNVahQIaWkpOijjz5SnTp1bEGiWrVqOnHihE6cOGE7K7Vnzx6dP39e1atXv2XtFStW1DvvvKPw8HC5urrqv//9r9ndt1m/fr169uypxx57TNK1YHT06FG7edzc3BQVFaWYmBi5u7urS5cu8vT0lCT5+/urVKlSOnz48G3dpxUYGKgBAwZowIABGjVqlD7++GM9++yzt70/AICsEaQAADkiODhY3bp10/vvv2/XPmLECNWrV0+DBw9W3759VbhwYe3Zs0fLly+3BZby5ctr7dq16tKlizw8PFS8eHFJ1+6TeuGFF1SnTh3boAmNGzfWrFmz9OKLL9q20bx5c9v2p0yZotTUVD3zzDMKCwvLdPnbjVSpUkWrVq1SeHi43Nzcbvlcq/3792dqq1GjhipXrqyFCxeqffv2slgsGj16tNLT0zPN27dvX1WrVk3StfB1vfHjx+u5556Tt7e3WrVqpZSUFG3ZskV//fWXhg0bdsOahgwZotatW6tKlSr666+/tGrVKts2AAA5i0v7AAA5ZsKECZlCQ61atbRmzRr9/vvvatSokR544AGNGTNGpUqVslvu6NGjqlixot09TGFhYUpLS7PdCyVdC1f/brNYLPr2229VtGhRNW7cWM2bN1eFChX09ddfm6r/vvvu08qVK/XVV1/phRdeuOm8Xbp00QMPPGD3SkhI0LvvvquiRYuqfv36at++vSIiIvTggw9mWr5y5cqqX7++qlatqrp169pN69u3rz755BPFxMQoODhYYWFhio2NVVBQ0E1rSktL06BBg1StWjW1atVKVapUsRs2HQCQcyzGnV4MDgAATDMMQ5UrV9Yzzzxz07NMAADnxKV9AADcZWfPntWcOXMUHx9ve3YUACBvIUgBAHCX+fn5qXjx4poxY4aKFi3q6HIAALeBIAUAwF3GVfUAkPcx2AQAAAAAmESQAgAAAACTCFIAAAAAYBJBCgAAAABMIkgBAAAAgEkEKQAAAAAwiSAFAAAAACYRpAAAAADApP8H3ngi6RaSud8AAAAASUVORK5CYII=", - "text/plain": [ - "
    " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Extracting LUTs from res_dict\n", "LUTs_dwc = [res_dict_dwc[key][\"LUT\"] for key in res_dict_dwc.keys()] \n", diff --git a/notebooks/advanced/cybsec_PE_SIMD.onnx b/notebooks/advanced/cybsec_PE_SIMD.onnx new file mode 100644 index 0000000000000000000000000000000000000000..b450cc9e43361e845fda8c95d743e1b461a1a9ad GIT binary patch literal 192234 zcmeF0P0wxFRh_TF0sHbPB8Y*sXj%gC(TGH+01+uhae@*8=~;zcKqYpSWy%41<99@9 z&`Xy_`u-bq@?SEaxz<=`taaA=USk&t4h-En=a^%Rz4qScy!WH*&;7{H{OC9T>XVPZ z_0>Q7g&+R4U-?(R`M>`6kG=h$Km7LdfBN;0|M>l{f91oE-u~2=-~Z&x-~Rene)TKg zeE(bTf8$r*{)^f^`S1_^>{}oF`Ja9K^{*}CXMXm{`1sqO{OT`!p$_=ghkyBpAAGg$ z^V?tj;f8otvf98k2`1!X#^yPo^h2Mg{ zE&4}a_}kA;`S<_z7d`|1vA3W7k6-%o?~dgc-u~zxzyH<$`oTwE`=uZMlW+d%`(Jzi zllTAR>mUEcUw`I@zxcCnfAWt%{OJ8}{ndZ|=;LpF`QxvBFz)b+Z-3&$kG}csPky(e zfACk|{NR^<^mo4W`~Uvm{`P<1nlHTl+&4b>##g>k>0kQ%*FN~_$KU?u*FX3g&Od+q z;~#zec%yOF&%OPrkG}nlFLB&>=JTKX+s}UP zx8mdR-}1kwzxn)m;@rjI!J)SuE%bh#scp|3narnrMbh=FC0$5!+UB6j1m3#Jl;aSA@*d zd!Lz^dpeKH>(7}Dnp;i2*X?J(o+Ekf>7742^O}AJXJ4Cm;jf>u(0TUt{H0$ryaUfU zuUj0+dU3GrYH77~Hn}>pp*b1Jr`+dFz6Lo8sD9-$)ZSpa5a2+gwsX;d>X*+dzOI3$ zUVD1USMJQP19ktd-hX?>aoN1MhNke^#Jl;W<=OdNtmn^b>zIbBSLOG(^d_@arcpER zV{MqztIR_3C0+h;1u53aI;i z;@$kzE3#$mdEy%?LIKsU`aQgUURs>3yoTpoPwkyQM|oXx)j|PvzfZiIpL#{Mj6F|$ zLq#Z{?xQ6~@$QLc$YjpkgRkF{N4!^Z6j1m3#Jl;aS7giB^Tan)gaYb5T5=Tco@j_FXj56$|t z*R|~FB~S0v(}M$bA1!&vPm~-5)crp3Zhq<&*)sO#-2VQySC_17PM*714(OHr^p5CS z$x%Sv?-TFlr(TgQW6u-cP!S5K`)J8gynCV68QZtV-nmv|xMD4_25iFfl;ugI3MH|Kgz=g@(=kCq(8 zyC<3J`~C_B`J`~C_B`e_4obr z|8#yo$UB;!kr*7P`##a$rE^(k;90dUs2By*eYE5#-aXL_nar7c@*}pbj3Gh{Mn?#a&>|NEn%A{0>f`^3BXsaJ%|Ywy2)Uh_J* zA(J`1d!}95IuAYf;wSv`;cI#)M*(%;=9UQ`-Cj>F=W~rrb9PtJ1r@8>u0hK0ruOYT z_Jodn?F+@9&YGs*zt_q=-2)A1o>8+iOrY*d+wN1|dM9#OtbRZ1rTISnT<8hKd7=9r z>%8)kuV*}T?$PzabM!5A+_U!e9t7A6eqP^q+eP50T7h%;=A2>Pp~&_5l?9z)^8A`} z?tMnQm$PLz?Du@{afaU~r=P{MJ^7%zHm=|9QyE|5nb+oZ=<7Y=ym^<+^Rson0zSLv zWp|#loPirYC!cLyqj)Yp&)QcN&D^F>&+oK6J+686zVmvgSq!kIHQ4r+4rz zSJyRS`W-yiyAX`rrcdwb8uIg+SMNKoXDnzkPp`-O`<~nRGkcEpGl-#C=5uc6Il~01 zU%tONI?(Q|^GgTY^QonEjen-6I!0yIy}~#(c2~#hI-|2*YHqs_s(|X(d;3H_-y!qM z>)_1JYkbyIX8L)~`c8xy(Bw7yJg@UP=enMoecd;*+qHamhKf)?$F;RLm@Wi3(5UO# z<*nzKxk^8vq7+c~{rekIdp;-c({^2F%^7raf6we)^S%4$1dYAU@cnbHGb*<-WP$3} zO@GO&ywC3IX3p9=j~M69>E~PbLg$&8^Zw3TTf0yOmS!QRYN3E0=XzhWyv$v7BJ=sS zv+~-$kl-wK_{6%t_S^ff+~xiAoQ?bJy2hUcXEzr&a^Nx7y?GyeSJ5qBx}TZzy5==L zBdu0duJGY4ETj!wHJr*=`+z+Vx{<$Gfzt1%HIb1^lb>F6+FW&*4x}PbZbLBbH zRTQk8dVSol&8gK`xzJm~gjWi&sbT_Xw3BtXor4YzxIlDmV8#N zVCB?nXSV0D?02kZ^}YMr>7(^^yvryW6YUnl?3{E z1PNX%czRZ~+}X(t{l}j+Q)R==K?tqk!gFH5)nMxwHD`(t2r`x&G{s0UG5q$inm4>=E~~*p&}Ge_ip)JtWa!f z$Ir!B*S>uY)9azJtnb%5&oDU&u3yiEkWeiYQ1|=9yZNbCWXss|#4o(~xwqMoZYy`_ zE1|*GyUo4Ta{q^{pkd;To= zxjeV5m%7IHe09E*OTCWT&amf+Z>R_b)P1z%DBeBM44KS1_wDEAhyuW`hY}bfXLGkQ!XY^T`)4ONt);#Tbq8lng0d*fOIg0l= z{rp(V>*d&G@g0*K1=RgM@os+T>wB*#+4b`q8LWn?S8dl|l(xaUXPT#$d-A;IR_bRKMn*_uKb7J;uuRGV-at_BpmUuX`FQg#zk+pLjPv^@?m6 zd!G1)icmn^M@x?4-4o4_$(*?-kJ!^^LC)FN=1aNM-xSz^E`@HW2nE!AwB#tYuir=P z3FT5e_vA-xTgg#C-R~3c=BHkfEo09U-%t?>sD6E?*+ZLaW-P6##Rkbe`4QVzauiVg z;_r%@jhyhDI7CK3}e3E!6U?3y+C z_Opfu6y@EsJUL`D<)^nD>pkfqI_w8qId%nnwSswB#t>=Uks}c==f+C-P3H5)@GP`@}o^ewNVJUeV6l^F%jPgaYb5T5=Tco@j>t}>rvnJop#RH0(`^uH)v$xOPdd+ox(FGNwfQ~be^3RdT_}SO}XZGCJ+39o6 z36($rb-z!%n;-i3^Zs@EfabWo|4j7A_eID(xvxRzyfo?GTOX`$0Ju z4XA$AJ)K!Pl5=Dx_w)5$8DMWLwbyy}eRgZ;v--N?+x^n}PM`K@iYy$c`##a$rE^(j z^4adFiJ4yzpL)edg?r*lVi+*@;bZQ|Yh)QR}}DSdq>N2X%e`*Q8No=Cp8 z=6PIOSZ9xQbj|Tz&B=$dUyHVW&d5sDAOxK;0kB`<(uK z^8Ml7llzeNYFXFvxf&`$0d>DmyqjN|&#%Om@iX;xmD|8h0d?=@XIzRs&t((|ZQTp` z`2M%&%j@BqW&YB9DVO?rtvyeCLq#Z{`sMGW_=ej3J}XlJYiD-tb=_;=tUiw%1=RgM z@os+V71=WOJn;<`p@6!NmK>$_^_=VpG$y2JtNC*Z^?dF3prH_1=RiVJhiX0@F<`cbgX)xr)w&|t}WV7 zbqc6^x9$B<)beA_wTC@N@;wiah|iw!F@!PbZT27 zQ;R)*<}8J0Dd&0?auiVgnxCupHOtG~RYz1iW5Tm+@!4BPE51Ds65r*_ZO=sBkjb2L zKl6Q-&x=cZ{`SoH^PFv>y-O!j&%EyMYdp(eP%#Rq`)HH*87Q@{^Vk#0rFiYzb$osW zw51Q}(M4dcuT8w0pL#|58Mv?YoFy-)7zNb5n|~j8E-x}GeQ3Vs#m{S9ShuXtq@n5* z(A1gp9XvZzdZXr5$1JZ=b3I4#rMEfv($_gt@7)=u=Hlr!pD(m^{gEfa^Z32VZO_*M zT0!0SiS~l-d-}ZIozGcEET|X-)V=F_{JIj*4! zE87dgQRs$>RBhMbmA1jVXPTvL@0FS4dCkemQ9#}A6Yu7yUXd+h&lBHJ5elgLXvtB$ zd!iXKnRD*`IXv9^$~6>F_xr@V`Kecg%(}ll&$7Y#&eiMV+%~6HW91HgCA8QexhF5z zzpq9xIynlc`+efw{Lp#c_48Qz)91hu{25faK8tICx{nro<#0-0*Rlh3@77+O4c2|f zIRB=JpOH`Ru;e zh&;Lo%=NX2ck@%P$d<9^iEpR~1=M}CiojfA`uuj#T?ob3d8voz%({+T zq0eo?Qor)|8xs4(=E-WzQ_DShU9#>AuY0VW90k<y5A?>;jepIugI1`&#HAn#VDZeqa{c2 z?ull|WX{}^pRxEEThB+v*W4!ByYz}|8S+`RE~pp<)V*8(3_R9{7nx;Gt$%j-eq%Q$ z>z^A>v;`HTfV$r&-pxjqp$A;0aDP^Yc_Ji zbFa1c($-nl=JYc<*K$59*VeQ6zUJh>wyUMRmF9b|dpS{F!nxF%t2zbL{XX$-e(Dw3 zGWI<24Hcn)x{sC|rS|P}Kr>`AXYR?*7@xVJA{0>l;`^!D$O+HA*4|6oK3nfK?XQV+ zeE%hNZqr+wP4gO0%kw$cb?B8@2u^0ptxyqi6j1m3#Jl;aS0pp*dpk4Ep#ydA_Vk?P zt#cl+!O8>eH|h1|JlXmAd-lCGu3OJ>+tJ>oS7ghO&#HAn#VDZeqa{c2?ull|WX{}+ zuXj9R+sZW*(7dK*BPTrfT6;&M3{Ap3Yg#?*vA_+P%<0`T?WwKz%uMpU=H%okpzimH zck@%P$d<9^iEpR~1=M}C$}5%)qm1 zT~IL!sQYNiQM`Mi87libqmZu8;q08w^VxJ>Txhc>e?HXi*-q`9VFC@`*ZXtMvz=Mj z)ivO}Z=T`n&zzsd_p$GL^qKRyH?KY$?|JP|XID)MsQZ25-TctE=d_;l&4Aze={#(W z%-@a!+2-7ol<}pzhoBJGFQ5H=WBf1J9~;LB%Mb?xQ6~@$QLM@N?z&3SYa5 z&+or4c&SzH=^ki6-N%H-?)$f9y&{=e9zK;&EmiAh!5(@-bMd@+)-qH4AL{$BclI;J zb3Z+^JcD!0BHI8gU}q8)m^2UoDK^U0o7(t?UnK;1_RzRt%9 zowcDkJ?6fCHm$!lxrPGjexG|SO4|jHL+w;%gZr?Y~pn$sHC*I9Zy&_x2 z-kg7z_vfgFjyc!#*167N246n<36($rb-z!%o1c0`$gH0od!A)4s2By*eYE5#-aXL_ znar7c@*}pbVBViH$U}?Y#Do=_=bv5K;1`6 zj^f=D&5+5QxhFqj+e(fC>VBViH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqj+e(fC z>VBViH$U}?Y#Do=_=bv5K;1`6j^f=D%~09?{e)KgLh>bE2ssL<`+efw{M0M5W$bz4 z8!AEpbssG`iuaoQJ$AnD_II1Jb=kTO{u)#!1=RgM@os+V71=WOJn;<`p@6!NmK?>q zCz>IXit*W0EoiaV2{{U=`+efw{M0M5W$bz48!A$@{;iO@Ia71-oNJF$%RPCAUufhg zpzimHck@#x;@^+fbC$oLViZvK(UPNh_e3*fGH33|kJz@7qky{KCtmpdvu4eDMKaU; zmT?XbsQYNiQM`Mi88VqO_vA-xTgg#C-R~3c=BHkfEo09U-%t?>sQYNiQM`Mi88VqO z_vB@-pUW9tgB%6az32LVt>aNZ-N%G`M>c1kRnmfrQ9#{COOE2*6U~sxoVh1IV%tiN z0_uLBcsD=wifkEsp7@4}P(a;BOOE2*6U~sxoVh1IV%tiN0_uLBcsD=wifkEsb3Xn3 zgbvhwwB#t>J<$xA%$a-gBet#ND4_25iFfl;ugI3M=ZSBqNY(cH46n2e-aXSiwcL~E zH76%W0d>DmyqlkTMYfDRPkci~D4_16B}eh@iDt-T&fJq9v27(s0d>DmyqlkTMYfDR zPkci~D4_16B}eh@iDt-T&fJq9v27(s0d>DmyqlkTMYfDRPkci~D4_16B}eh@iDqb! z_2VBViH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqj+e(fC>VBVi zH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqj+e(fC>VBViH$U}?Y#Do=_=bv5K;1`6 zj^f=D&5+5QxhIcU{yT8}x0kaqx4)OrT}8KN3fi@n>553Oy zb71vnlUbzNeSPO=Ze6GH=Q*B4+Ar_SZSRZC{VL~cJ=gWvb-r_*&1-x{P2k)*#~O1! zXXiNcsCBlp^V&W$w0yR{CeGbyU*`Tn*<;QJ+6J)%R6)1_tO_VXZo7B_SO4x`Fv)M+s@~Vz z4$k{-*WiDj-+w>X{0@DVHP3#Ix=%lgxG73O^ZqfB&v$Un*XMog6KDMn&h~ZlT5FuzoHHn(`c=8yk$cyi-dX3f`~KdEY-efg z$YA9Zyq&pE-lH;}{T|%Y&>+6QpWE-y_o+x<_ji1MpV4>VJ}v}!!SCVwZ@b2Q*LAfU z&f%GFXE?~LOrvJM$9#W3Uu(Qum9yr3HnROb=x8D^wafJLb>`x4I>%%No>l9DidFep zt@rMQskwO0wa2ODp1i{^G;$PB_ig^u`_IpX&xQi(J|^5dvN`jtk``2~>i_2VfBk(h zpQD~;JP*IW#8OcAG2ywh&fjzS`>fw-o^kuxakkH@^R8#q*$bMi=DN0|b++wzZhL+9 z%%bJ{d-rqnF*?qzy}@)L6wmAXe#rTG%*?v3cEh_j66p zXZiU!<_Ri=0_uLBcsD=wifkEsp7@4}P(a;BOOE2*6U~sxoVh1IV%tiN0_uLBcsD=w zifkEsp7@4}P(a;BOOE2*6U~sxoVh2r&CkWjRRRUnz32LQnm?cIU3_%^eEFI_#X8rx ziY}-a1=M}C;OBXf+OqyR&u7Uq{d~Tj{L;7|1vK@T$nBSQ=GOD!8JeW#=3AfkI+H!U zAm*;eT`;5=J~wqnVjhht9h@vt}!oZLB%Mb?(5Ibmi)6? z-n!R_J-WTAZ10K3&u$_0ldos&V8_?<__b8FRW)(ua9fmoSKIw@wCo7wcL|e6s+%jtfLEK(46ZTvG83*Gq>q4d3h}vpi$d> ztTPQw;%S|EXxsZ89`P$wDh1U2KJjjT>J`b%`aOAOoI?ldK3eeYvk85rPc8T4>w*g* zM*(%ePrRF7+VVBVihrfP*TCd2KLC>mnLB%Mb?xO|w`Rj9xSkuUQU+;Q)9k?`d6j1m3#Jl;a zS7giB^Tan)gaYb5T5=Tco@jX@8*ZTeNWbNz6nfyy9OJ* zu%1`FKI#V<-u+9pf*uEPy*_bnjJxBa^ zAN%3W$nzja0d?=W-p@K71ysNM?}Xw@FS@w(ocM-{P(a;BOOE2*6U~sxoVBm-`t*+I zTFFsB-R~3c=BHkfEo09U-%t?>sQYNiQM`Mi88VqO_vA-xTgg#C-R~3c=BHkfEo09U z-%t?>sQYNiQM`Mi88VqO_vA-xTgg#C-M8s~gS8j_Rp;``Jb7G4B~%Lq)P1z%DBeBM z43%Afm-YQh^K84k?+WB7pzimHck@%P$d<9^iEpR~1=M}C%}>1|TgILz zzM&!%Q1{W2qj>j3Gh{Mn?#YkXwvwZOy5A?>%}>1|WVY|uJ~N?OD4_16B}eh@iDt-T z&fJq9v27(s0d>DmyqlkTMYfDRPi+4iCA4)-`5Vuj{0SXG?#YkXwvwZOy5A?>%}>1| zTgILzzM&!%Q1{W2qj>j3Gh{Mn?c3*j*w?kLdAdhw{u%b{3|XM=W5T^7n={Ws`g{ET z*V%QS^&IfmpfV|-?)Qmz^Gmzl*ZG`p0@jg=uQSjSnoG@tzfZiI zANuz9uJxR60@jg=uXjUFXf8EZ#=gDVetMPJ{(Y3!IQ!bfyZNbCWXss|#5Yu=%AeVK zH}r(&QgdbO+q>#vMgesnEjfyJPc%a&bLO7h_U-c^=j?0qrCjL#Jl1z(&-2ZIb5Hlh*2t{x-E$d5 zN?SiS=(R5-U*d(3qky{KC*I*dy$6?P2A);xf{IZ<-MjUfkM-e2X4$8fd-8R`AWxr} zv#(9On_t@Y%$b2_)w-Z!6j1lklB0O{L^EVkG5=n)bb8j$NsavHTtRLo_wV5og4+!{XX$- ze(Dw3GWOj3Gh{Mn?#YkXwvwZOy5A?>%}>1|TgILzzM&!%Q1{W2 zqj>j3Gh{Mn?#YkXwvwZOy5A?>%}>1|TgILzzM&!%Q1{W2qj>j3Gh{Mn?#YkXwvwZO zy5A?>%}>1|ndzT*`!#X z@8+joku77-6W>q~3aI;N$x*y}q8T!oGxy-z-#f=8eZI4=O}v|*dPTO3Jx_c?MJS-| zqa{c2?ull|WX{}^AF*vEM*(%ePrRF-dPTO3Jx_c?MJS-|qa{c2?ull|WX{}^AF*vE zM*(%ePrRF-dPOqxY5o3R=Wu>PV_E-AIQLeQkJ#$l?+0=eQ2omP9ijFH(}e&ptmjg% zqnX!SXQ5dy+EDc>pFtL$&t}hgXV}--lb`!}zr423a}x*5rDmR!dh^Kj73{7S<_=e! zReYUQw4v%0Q1{W2qj>j3D{Rks+1GWQo_TWh8E;T+6j1m3#5?@;{aCL^X4W&}Qwh~l zwa%E{_4)9~^0PUy&aNGP?&tk{t+}6PY_FDeuQm|sS7woFcR98_8`solxt^!?2GfNA2O71W)tcw@ooku%n^aS?o<8Gz z=3AyOkLTpt?YTQp+IPnJ%b&N~hH9gLy5A?>%}>1|TgILzzM&#j{fux!CUbh9)9=)- zy;^SX--2D4_25iFfl;ugI3M=ZSBq2nE!AwB#t>J<$xA%sKbhRoa?H!FWG)c|%9B5QRwNOCaM+?5552y6?%Bu4>t zzfZiIpL#{etY@+3S@wd8Q9#{COOE2*6V1>deLv{jTMfQ^BJ*j4uVuFmDG+&ZWD`7_s1XP7|U$Ar&)eec+4XO*;| zViZvK(UPNh_e3*fGH33=U;P}&Is4jtDHr-b)c1e$8shn1{TyD(rOtiWfi8t^s0anr zeYE5#-aXL_nar7c@*}pbVBViH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqk_2=UDnK@h2zlS~hQhXOe z@#UMN7yM_R-f+cYqm2+bY3tQ&GQbA>s-D^M=EbD-$G8+LIHKZPrRF-dPTO3 zJx_c?MJS-|-TXN$#h&Lfik#Zq`;73O&g9Y;zW=fw=c>1{!*j=CcNGQe=S(a|)w-Z! z6j1$|&$F&aes6fyAK6}J)8jf6pV2NJ*S`y@00q?jKJjjT>J`~C_B`Dk&{Jcc^-FO~F6o}*$XxnpQ z`(DBQu3X{s;Tu#N1=RgM@os+V73p{Kv)G%H=ULBWuP#~Vo_y~4Zb%DSY>?c8pYPt! z!7~EQntZz#9#E8b&+@kOYi}^mYGXEgtnfmAHfs7fvvxnzb{}H);6U9+3(j@twQDEm z>}7V&nZx(<`mAlQ?Dg6=6P#7$Yo_}6`B=gvl})U(H*ww*O%KD**K+*~_T)2xYgUW2 zg1YY$E%bH&)GI>9o@d@;JxgBaYwGfK4>(Zw$GKkX`R#3IB8NBQFAs6Sv}3N%1`FL) zbjzpyk{6ji*Ew{c?xQ6~@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xO|Y zzQ@wn_w0CNTgp9on=dqS6j1m3#Jl;aS0pp*`H#%{*(!Z;t_AAeZSzWh*ZbRF_kA4o zGOKgfJ>fvz_lfo{oy#%<&#HAn#VDZeqa{c2KBwOiYk9pKyDYwAlB0mS-zVP9PrV{r z#-1m>p&}Ge_tBE0c=tpzWHM*&$&c8!lB0mS-zVP9PrV|USwDwo#yNDL?$5s$!gFRM zXVONX>6z`sBhD_$nJ^2yaR&o?j_xr@V`KecA%h;Rq>CbL-pzfnhzOwc_M{4v_ z%RTu>-#a-9sQZ25-Tc%mLgwkuy_s3>G;>eqS!WuW#M3(S)N)T=(HEg|D4_25iFfl; zugI3M=ZSBq2nE!AwBS#_2P4)rvQPWzb>Py-Q9#}A6Yu7yUXd+h&lBHJ5elgLXvtB$ zd!iXK8Izx{^)q&Odonh16j1m3#Jl;aS7giB^Tan)gaYb5T5=Tco@jX@8+joku77- z6W>q~3aI;N$x*y}q8T!oGxy|2Y+K1uK;7>X@8+joku77-6W>q~3aI;N$x*y}q8T!o zGxy|2Y+K1uK;7>X@8+joku77-6W>q~3aI;N$x*y}q8T!oGxy|2Y+K1uK;7>X@8*}b z{XWYKJnAWz*@`war-ye>>&#QjJ$Xd|_urnOMGQzm-FvQo!&t|ofVz*fiu>=JKHEE) zvr1Y}u_|A)>2uk<%0A)La!+0t_TP>1{(k25jFs_wT_@*JK;74~ud}e~9t~ALtBu+0 zu@+xvpcnkSzSou+k#${X>+k-R?E`A{v*dYjJ_qtZv&`pg9}g(XyQe+&!a7?ld*$;i zuOiKvK;65Y&#>Opk(u4zvJ3H=$cE-?Ol;e+mL(mEi}%_=UnI1HQO`z45q7S<~RK%FRvv7G|QX`)V-TOKR?rY zW=A4BE*OUME0*6&iQCVE5UPOc7k@6)Y~+N;zH+5)&lQ=@v%88es2By*eYE5#wfkP% zeWu4)&|=$}T=#UXqg+D)b-z!%o1c0`wv0Vbd_zSjpziBE$2R-)oVj`yon=qX{t{FM z1ysMj%kSU$zP7GgFlU}s(t?UnK;1_RetnjRZr7p5wHwcCPR=zHQ1|=9yZNbCWXsr_ zbNd;zSC_17PM*714(OHr^k#H>1~hUMQ1|=9yZNbCWXss|#5Yug0_r|maun~LXogJY z%su%L+g5TEQ1|=9yZNbCWXss|#5Yug0_r|maun~LXogJY%sshnes7;p2^3KG`@{?1 ze>b&ey&{=e9zK;&Efi4q(UPNh_e3*fGH33|kJz@7qky{KC*I9Zy&_x2o+rMcA{0>f z(UPNh_e3*fGH33|kJz@7qky{KC*I9Zy&_x2o+rMcA{0>f(UPNh_e3*fGH33|ZGVbC z|9vF)aJ4XZxGsfms0anreYE5#-aXL_nar7c@*}pb(|N`+efw{M0M5W$bz48!AEpbssG`ig!;mLnd?Pp8SYy zD>(|N`+efw{M0M5W$bz48!AEpbssG`O6}Xf1JMkbRED3s*f;)&Ju|MMfV$r&-pxue~N$pt>BYPrRF-dPTO3Jx_c?MJS-|qfNfv)1K!@jecsm2VZB6tM*Qg0_uLB zcsD=wifkEsp7@4}P(a;BOOE2*6U~sxoVh1IV%tiN0_uLBcsD=wifkEsp7@4}P(a;B zOOE2b=F|6RuVM|&$w(gZ6D3Chb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>;-% zZ6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>_g`za#l`yUTwUkaPC6dE|Uf zU+e4MjI(R>nf>#s!PhX z@8+joku76y&UfB7uji=w9>_j7-@{t_k?CDaeIlR_b)VCB2PjhwTu&6jeizbUW-T?*Y$5elgL{_``W_WD_8#&XY7%RPCAN8>Dh z{(JiOm$R=;yu)A5WxXO>20g3R1r?)!x{o$_pN%qi&!vaAem^@KUQ^!lC8!JvsDAk~ znLDF>&FS4U^{MsqSdY1$bLR5CNDa;D;eF2A`>$xer>~plc3o-fS;$d9-R~3c=9iXd zFK(S{TtydDi~{OD+T`2w;Y=O6pv4BsJ$bo2m!EB&%~j+mpzimHck@%PNM`ze_L&LQ zLIHIjEjfz!ntp!dh7PUvh2%@T5ONgIJg;UWCp`CBdq<-TO;YpIet+L$&AP{i+>_7H z)8~gyjsmJ*?~Tct`k7n4<~1$6a7|@=*815C&0eKeu6gbnir>M{xz5Y%$;YMLef^SD zi~{O@pLjPvbs~IzK$k){RD=TRK3Z}V@1AIe2HEa;cwS$eDu)8;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0` zwv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzht)pJkTp zd0Jg1=RgM@os+V71=WOJn;<`p@6!NmK?>qCz>IXIde~b#I}_j1=RgM z@os+V71=WOJn;<`p@6!NHu?H{+Mee~jecsmCm-p1Cr1HwzfZiIpL#{Mj6F|$Lq#Z{ z?xQ6~@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xQ6~@$QLc$fRQYw_ofV zf5e^{*HA#+?-TFlr(TgQW6u-cP!S5K`)J8gynCVs^B@g+DlB0mS z-zVP9PrV{r#-1m>p&}Ge_tBE0c=tpzWHM*&$&c8!lB0mS-zVP9PrV{r#-1m>p&}Ge z_tBE0)c*8mJ33JJ)wZ9H!&{+~kfVUQ-zVP9PrV{r#-1m>p&}Ge_tBE0c=tpzWHM*& z$&c8!lB0mS-zVP9PrV{r#-1m>p&}Ge_tBE0c=tpzWHM*&$&c8!lB0mS-zVP9PrV{r z#-1m>p&}Ge_tBE0c=tpzWHM*&$&VQ8`n^Jq0_uLBcsD=wifkEsp7@4}P(a;Bn|ymd zoT)<>wAdiICqH7_N{#~RexGuBc8vr1Y}v8r{3 z4fKTOQgi*8P?DdMxpZf_B>3{t3)^QPgesu!_lbA&Q?JODu{Y;C@0-_iRQ@+DXPnR> z&;oeZE6}y^9NH zuV=L9IZ~sa zTJFh5di8ZSie{O=G+)Y<-an^YS6F9@Wv|RFuOheiM{ektbKN{^GO3w-%6b2%=Op86 zZWHZYdPTMj`K($OREz@Z-YuVT2-DM?IZiG2$x%Sv?-TFlr(TgQW6u-cP!S5K z`)J8gynCVvz1#Qm{cqRCnUB*i zj2s2j{XX$-e(Dw3GWOYy}@)Lz=5LJfx7P=n)PX~YuVFFp5Cda2M6jtTJn&eC^-tK z`+efw{M0M5W$bz48!AEpbssG`ig!;mLnd?Pp8SYyD>(|N`+efw{M0M5W$bz48!AEp zb?(|N`+efw{M0M5W$bz48!AEpbssG`ig!;mLnd?Pp8SYyD>(|N`+efw{M0M5W$bz4 z8!AEpbssG`ig!;mLnd?Pp8SYyD>(|N`+efw{M3o~zb&ojEPp}8D4_16B}eh@iDt-T z&fJsRu4h}%H?=-{v!?{#}tgo}?x%3fz`23ueaCt*4$$He9xD1+dVm^&Um_pbEyK}dQY|V@ap-T!&$jrXb-G=Ov!ss zG))7_XT$tQ`u^kXfAHRad};IPHKu=l{e4e&{ml8h&hLL^{y+Hs|1rIPe{Y)K-kksR z{^Rrb$MEytZ#uts|9<{{zW?#b9i8(jpzimHck@%P$d<9^iEpR~1=M}CONZXke?_y3aI;i;@$kzE3#$mdEy%? zLIHIjEjfyJPc%a&bLO7>h;1u53aI;i;@$kzE3#$mdEy%?LIHIjEjfyJPc%a&bLO7> zh;1u53aI;i;@$kzE3#$mdEy%?LIHIjEjfyJPc%a&bLO7>h;1u53aI;i;@$kzE3#$m zdEy%?LIHIjEjfyJPc%a&bLO7>h;1u53aI;i;@$kzE3#$mdEy%?LIHIjEjfyJPc%a& zW3GP(v1({9^_3+@0d>DmyqjOzI;$dEhJ0473o1qdbssG`ig!;mLnd?Pp8SYyD>(|N z`+efw{M0M5W$bz48!AEpbssG`ig!;mLnamDf8&aMX(`RwSwv}rrpzimHck@%P$d<9^iEpR~1=M}CWxNa6=|@diP9wYU|IR%p}iiPEL*j z>VBVi;cq{~)+>^k=6J563o1qdbssG`ig!;mLndRc&w*7#duJy{0d>DmyqlkTMYfDR zPkci~D4_16B}egIb9=w^;DyasOYd&vD4_25iFfl;ugI3M=ZSBq2nE!AwB#t>J<$xA z%$a-gBet#ND4_25iFfl;ugI3M=ZSBq2nE!AwB#t>J<$xA%$a-gBet#ND4_25iFfl; zugI3M=ZSBq2nE!AwB#tY_s{jIPmi&n#r88pexh7M0d>DmyqlkTMYfDRPkci~D4_16 zB}eh@iDt-T&fJq9v27(s0d>DmyqlkTMYfDRPkci~D4_16B}eh@iDt-T&fJq9v27(s z0d>DmyqlkTMYfDRPkci~D4_16B}eh@iDt-T&fJq9v27(s0d>DmyqlkTMYfDRPkci~ zD4_16B}eh@iDt-T&fJq9v27(s0d>DmyqlkTMYfDRPkci~D4_16B}egIbNl-}JvdPJ z(UOM@UH?Yy1p!h}{mTE2s=dK%}>1|TgILzzM&!%Q1{W2qj>j3Gh{Mn?#YkXwvwZOy5A?>%}>1|TgILz zzM&!%Q1{W2qj>j3Gh{Mn?#YkXwvwZOy5A?>%}>1|TgILzzM&!%Q1{W2qj>j3Gh{Mn z?#YkXwvwZOy5A?>%}>1|TgILzzM&!%Q1{W2qj>j3Gc?Hhb8+siCLghTCr1HwzfZiI zpL#{Mj6F|$Lq#Z{?xQ6~@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xQ6~ z@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xQ6~@$QLc$YjpklOM5dB}V~u zzfZiIpL#{Mj6F|$Lq#Z{?xQ6~@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{ z?xQ6~@$QLc$YjpklOM5dB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xQ6~@$QLc$YjpklOM5d zB}V~uzfZiIpL#{Mj6F|$Lq#Z{?xQ6~@$QLcXpn#U`)__A^YiE}`LwTt90k<J`~C_B`J`~C_B`J`~C_B`J`~C_B`h@6?)Qmz^HZR_b)P1z%DBeBM44KTC zd-5Z;t>h@6?)Qlo{`%jfQm;s6n%ie4R0{>veYE5#-aXL_nar7c@*}pbM*%&~+0HAyGfbfF56wHWIrFTN7F3J^>ONX>6z`sBhD_$nJ^2yaR&o?j_xr@V z`KecA%h>b8H&lcI>ONX>6z`sBhD<82 z_e}fLa!;PuoSYm5)crp3Zhq<&*)sM#@eLKBfVz*C9L2jQnjw=pb5DN6wv`+O)crp3 zZhq<&*)sM#@eLKBfVz*C9L2jQnjw=pb5DN6wv`+O)crp3Zhq<&$;|pad1jnL2kJgr zaun~LXogJY%su%L+g5TEQ1|=9yZNbCWXss|#5Yug0_r|maun~LXogJY%su%L+g5TE zQ1|=9yZNbCWXss|#5Yug0_r|maun~LXogJY%su%L+g5TEQ1|=9yZNbCWXsr_vwuI3 z0h(pb1nT~1-aQ}A%ULqFd|qo#jsog_pLjPv^@?m6d!G1)icmn^M@x?4-4o5wAnS9T zd#lMu?B2;yK;7>X@8+joku77-6W>q~3aI;N$x*y}q8TcifA<(NshK?FCrXY2>VBVi zH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqj+e(fC>VBViH$U}?Y#Do=_=bv5K;1`6 zj^f=D&5+5QxhFqj+e(fC>VBViH$U}?Y#Do=_=bv5K;1`6j^f=D&5+5QxhFqj+e(fC z>VBViH@~#&??LBtz6n@If*Ud^IsZ3}(zp8%o7&QAUr4^h3n51Vb-z!%o1c0`wv0Vb zd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD z+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0` zwv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27 zX2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!% zo1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3 zNAd27X2@jD+>;-%Z6!wmb-z!%o1c0`wv0Vbd_zSjpzfn3NAd27X2@jD+>_hp-%}@4 z0tM9lKJjjT=;y!JI&a_CzL~Jj3O8gjr+3e^Pi^k~@4?fn>-zba=YKOgTXVZM^z|-v z{d!(}SJ8%I!n17o-?im!pBa8>#^z1k@6Z`*exKZnXO{P3Pi>tsuUq%N?P%}N`|Qm4 z8qYTaD_8pB=_RjBqo&_sD(iV#=QF6xLh+|}o#v)jZe0fls$Y3t?VVu)gqCz>IXipBGqZ}0ht?u;`ipzimHck@f@`<2~LOnBC`dRS+H8#0;G zd(G`W*!G~8_a_4Ky5z$o(Cqa|=9WvnB3s6uC%&N~6j1lklB0O{L^D+OdtPeRzL0#0 z7ebB#n&;JQzW>d)MIXqCz>IXIde~5HlNeech6J<$xA%$a*~+k9Vvr|(bGyiK%s=@r>BVBViH@~#&eVxzwCSV;2ZpdU#@1AL&TJFi`HQRGe zt?%t@&HnQw_KvZ;5a2*j>_FXj56$|t*R|~FB~S0v(}M$bA1!&vPm~-5)crp3Zhq<& z*)sM#@eLKBfVz*C9L2jQnxR3~_d54hlaJWFlcRvT-zVP9FKykwB3p)h)Kd-*sQdo2 zH>Bk`W4Wic?U6-3y(79-auiVa`^3BXsaIsn*z?3URD=TRK3Z}V@1AIeOyUtj5`usB2#yrpZ)N*}#uFbq$?=v)muT8w0pL#{Mj6F|$Lq#Z{?xQ6~@$QLc$Yjpk zlOHj!+xLU0>D(sTyYz}|8S+`Rf*0@GW=9@Plg`=K+O>DvPp>juyIs$9X^Xvsr< zqU0!`?)Qmz^HZR|Mf7&^JSl_ejs^1|djGqV@StLXu#ehH#2t~F-L4hQ|4gpuik>~*i+B0h z%V)CBV87trC{%lI3m69R`IlO~-ajn8Qtw*tk#`uz8J=^=cnDCvjx6r;d%o`2klwz` zNPdQ?_kG|Kr~BEn_AayL>6wh=oPDWf+UwpY-{pA_(LnV&GCXTao)wKSY7+7uv(?Ex z3b;`wl06?O^yLWc;>8d>N~3+VSbNICD>5DeRIek$T_c;5_mBkg+{H)hasOV|5c2-( ztldjp7O(lTpH|HwK3m*wJd&zZik-cxn?xTl*_bE-*{k9W6^EYCvnCEl4~@2fJst$*_zvRpj) zGgK~=7y(Lu-fFhAy|(xA4zv&8ZGUw+mgFHPy}7^loHE*`-|Uy=;=!Mxa-qZsP`!>N zIf`p%G^0#r&N{i<`*$B59|5Y@bEbFdr!LFIl#Au-FoZ%>FIXz>A&oq+PWWgP4Se` zp3-Hxc<^VaTqrRDRIg)6j^bK*JtsP-ULTjNlRr9c89oA3ujfqf)K6WOi^n}@`odr2 z^B)uDT9@kaY~;PSS=++zTzmv5{o!|naw9W5Yn5F~yL_+G1CPMByw+t|bD`WRQL}J9 z^{k~Qx%;P#j{w!{In&$v>s?uw<>EolP`OZI1gKuek{rdgGn!E*GiROLZ9dz;&HYQq zeGi|%@3`+>wdO95Ki|;YlWX~!XI(?-nQltkYqxSHp7ik@TeW5^FC5hDM{bYmhH~#$ zUfym*tzLS3#(r*p-|c0FI7px8kp1&<(WHA%d#G#kB26D&;2A#imATlU0JZH^y zLlU@qZa3TVxYVS|m(LAZ8o0cZA$4zmUblLW-$kh+K=pdg^rC-x2J2YfQq1??etHg3 z(DyBf0#vVKul8S1YJ1+Lp+da`zV;%P{>)j{S8s0WTD~=hJ@YKz(YZZ%LCx997hPq} zT0Q${&pWMN?uq`otM;xt(C2<+=G4gW4EpOHU8tPUAv1QCutMGtgSSF1ETXK4$iJ@V@*$@DZS0P`zHVzh~V$^!x5}me0M}!@jw< zzo)$Wsa^I+P9No(UtL|E!`{&u-V^UL{k+(GUvkY*TJz9A=}+AC)L$O#bKcD!&GFvu zsq9*M?#X+mZ_Q(y?>_Ife4a@L)oU*o&y<&jj{w!{C9kvTMgOQ{(f!=>v-Q5Y_m$^r zzt2dY=h42PF4g#6^Ikyp+6(*FyhnMkQX)Y0dd~DteQB53uJP_6X`#djP`$42LQC9P z@6x@Mhjnhq;?cXmuY7)Ue~)@@%{j_G)44nJ9SCqx5m4Se8mL2{3(Hr*yEE&_JIRx| z_e!|jZ>x{JG39nGHcwgkvN!bgJXr7O>$&W~7va0pwtm@Ty$y4z$J}>2^w2N&##3p$ z9LWvQedhKQ1%aVJ}*2%k;b#hq~oY8kg ztQnL0?#{@|Q(bkv`qDhB?|#RVcU?aA#%J93qMmTC%X&PA+UJuUK4bOftgd^=VJU`9 zU3ovVJM(vhE#0S4_dV|S9Q@O}o}&$OQLmxgXU;OUrOUIBe2E8wj{w!{Inz7! zQ+@ZZc0tz3W$L?J%(K8pfaxLA{Y@6A1NRa?)QUi8=9QkNw?&D|#_N`)eyqub~L^Dn(>c`fVw>Fq|X z_T%i3j+8wFs5@s$U(X_%{)>LESFr~Vs1be+e?DE)7cRYO>ALJ%*2%BCt`$B4RIleu z@6=CSmW#(dXZl8o5TJS;OL7#~&S*xN%$#*{wf%ml^D*HgKuw==BQrc}m0hC|MomKA zW41cEMWBew|c)v;3GiIGn5;d;aRKf8jUb&67n9i)yX{yaew}&pMNZuGg>%k z<;I~GtqZO2iuXH41J&!u@T_T3J474xnAa>c>$82?r(S#MiC>`FbC$;j^)ARd_|3lD zgwI}OpC$F8@AqB(ntNIpdpyJWvNwEru4QZM&b8fX8Aa4ftUZYJ*tzJB@?iJ~Q2N8q ziE<+|JZqI*qY(n%-Szz~ge_Dql$gT5^EmgipSI?Cc6+&7qST&R-a9$=p1tq&9L>lx zT1Era>&Wn|DSbcd(k>qK43!HdMu6&dEXh$^JEIw8GIQ3+H`}GeM}X?}oavqVsmpTl zxaUmYC=miwuVYD$;##@iH=0oo%0|uXly!3VPZ=KpN`L&Fo;9iaHM48y)UD0DX6+vPr2A*ppKaIljk@0H(pT?- zS+8?vliKqyeu|o5elOqirM~ZV&zZhaA_S;j$C4bywKJMgCNpQ9e6vxm_lS=G)$2La zJM~kS<>GPAnZ8jX1gKuek{m^~zaw4p{WQvCIHHMA#Jd<3Xo&zWBIIct5-tjm&~ z=JXt*3nfN?>UAv1QCvHt8D%0H_fag=VvFR@!1CfFK=pdg^rC-y{>#sqynTlEdGEu7 zbxOEVCNrz{oONeg)VpBT_PghOzF%j*EB4HNugsf|nf*Rf*QlJa?h&5jKJ|UawVzp_ zW=}cc+1}T4{Y)6^(S6fALJ%*2%BCt`$B4 zRIleu@6=CSmh}Ap{QT?tkoSj!(x3c(sqW_@gX;BBbB%1Syk?+T7j2YQ0@Uc4_GhMVln4Q;*Rf2#?(3fO zaE*1>vQ9qS=l6imUgh;nl2dOUzHZNV-g~geXR!S2aX{bF0&0Z!Ty*+IO)~R(FUZo? zzFBwT4E3G`d;}={*`HzR?NPsx8SYwiakM=oEtD7ms@Jh3M{(_pW|Z38*C-R^mRPLG^m=p;@2pwU>JKc;3tJv*_Wud4{@wRwfKC9>7&-MAO zmnug$=8_uRD3h68&wP0%_onrD5B0oYYb$i#| z*mK!`XI?&2&dhr!*OzP!hXWdbrr9z~Ek48NruTA3`B%*h0#vV; z>{;E5e(Gx$+rzW=piLcbKEFDvcf)6Ioz;f1oHUAgr{0`) zewOw}zVWH&J@m{qs&T1nikYPKKJ9JH+I)z^V}6rM3IVFubEbFdr!LFIp9aq^;4JS;&IQJzEL6sDE;B{1J&!};#zrm+;#6%?>&2znd9$KpJ(r@GQCqj zby+SR_nc|JrSNwZ9aOJlNsi*$8BJI}yQ3Uy4c$Gzul20yHP@b-GtfZwIx;+JpV8XX z*K^6bbb2#49gcTqrRDRIg)6j^f%G z%_tMu{4-@KdX}8mJ3^=#=J)bFU+Q!3x{G_x>UVGYj?acyNZ*<_T95r2g1whKdpOYD z8#`hx46}~h?E$8pgoc(?$+@)qP z0}e`m^v*KxzSDl+=~a8XH_zz#z5|-z`(NgFquZB$--$ik71ZGCd}a&oXYO|~pV4zN zuV?Z7ia`oc`V+eE?B2@q^`4pg%z3A!V^)97z&qjR=(4Pi?DM_Pvz>b;&lmWA_5A%T zI*0ik@ov_0*5gIib6HdFdvjlF&{_90PQ5jH^NjVpQ)^v3pWoWrtGT_;=M@`->UHdm z`S}btcha8A{B1_}(5AkhoAco7jygYkYd?1uJh$g@*Qu?#uCu!Dy~_JN==%(E&Tm~> z_En9#W+pLT^{e*o=X#HJ4%AF~CigsVIyzbFd3Ym+VeZdOCteZ>P`zHV@6f$NKXu=! zr{LWKN$+U+=z|)%d1quj)9TewTSL~Io@>{z+3-atu zYuC$T(YZtjP}^SB4YC6P4r+vFU+%Boy2td%-SBhuc|31z`Y6};!E?yHmo0~9TyqY+ z_RqOad5_j=c1{`XDP5L}2Y-gjg%Tq`^*Wa1D6VJr^V3?rF1ETXKIVRZyu)~pJ;Pq* z^-kCp`dy#*V4t%^d+(Z6qefmRF-3naRH z1gKumnck@{ZJm#Wcffgg_mH$uVhTS$dh(kZ@Ae$N5BfTjji7pcdcjA4>h+xIo%*TE za`Cw5Oy4LG0#vVKnS6al?l}+FSa&V!VAGY$!}`;^BnrV4|@0HSxCOb1Hngt>h+xIo%*TEa`Cw5Oy4LG0#vVKncSb< z>ZQl_UCTPT{mT9M==1M)#`gdp0cz-3Tfgh`9=3ZPo2)Be_EOr1XL8@CYdmjVd!Cp5 zrQucdIdgCKm_5<`;Hxf)1gKumnck@%x}QbP;dRd$-6#>oWe+`RAfJB-y$=7N%j+`t zn$uhFEZ)auPtEF8nck_Nx-1uud(QNQ^~}}f;~w{>+^N)_pY3`l?yGk}*2%-P-UTy3 z&6wP0Q68TqbU#P$=?&VVcf4lQoZ&n@ou=j)tA#GCXRXJ1OKS(FL0X^h2xXl=+GCcV zcWNt@7y+u+bEbFdOY8G!>)F$@?!i7HQ7RPPbD2SlS)lYMo~<=6vqwKY$+52f+{f(J zIHP9JgM;d|^O}VgdON1po))yAfjR^d&jM zIo@N=S$9!=&v(%oj2F~x_#U)Z-@HZ1d?tyK#yeBK!LDVU{AOdEgU?=NzjLV<{diyb z(S7I3J*`jAI)g2K9z5pK8#S|&Yh})9|K8i`%QL9g`?U9nl1hN;^_=NNf89HES<=(o zee%R*FIvsU&a+OB?YlPX{rPP_X7&)EdOc@)r+(_PTs-bM(>F?l0M+YQlB2kGMl;G} z=B$%%wo8eR0M+X`(>wK3m*wJd&zZhaA_S;j$C4aH_4>Y0Cn^i^tdoZ=e}2|ndta65 zo%+)H`=)lIBEvH%{WEM`T0f(IKG733i<$HOzR&vkmS^1s{?^=kCw*4W?|bwyI(KDw zMmOd%^>qjL&?ov?%Eic+XL4^^j`wg+*?Yn#)I z+Jch1;F&#h>%Oxb%jY}gIrN#)`Jm>j`6Y~rNaX$m)x{LJPM|)}2)mY4x*DN&aqK(o@ zfSO(??nxl0o^(8(&pTRs>^anuNce%V%J?mYRIs~X*&zat-pSmm;k9*GajS?Y1^*Wa1D6XB+j53)y>*Sm5QsN^( z^?J_qPW{wnxp>@jrf-x80jk%rBu8=WjAoR{%vmSjY?l%r0jk$?rg!S6F3ZK^o-=); zLUAv1QCvHt8D%na z*2y>9rNl>o>h+xIo%*TEa`Cw5Oy4LG0#vVKNsi*$8O9AO6>R?e&+hzkK7hw}14j?|%5z@4WZUm*08+-4EXVjW7N4s@KK{ z|Mf}cuj2fxdN{O}{_t0-^3IpWUOZTod)BP&>D&fPGq(D24l@<@UO2N$e`aQkeVMc0 z``ak%oZWr4YK}GUU;Y~XrTa|jOJg27l4s3qPvN;my%fi+J8MxzF=_HuPOJ$7k5nxeb_RY<14=XN!KOb+|0LCr{>8u|$((Lvvw3@RdP zIH(yXZq2zwGOUxIgObT%KQ*Ws$x}l=AiuZWF0Bp(b26wA{oRck>fYkkoJ%CbI{7&$ znH=_0gPM^%HS`loqJzFU8B|2pa8NT&+?sQVWLPI}@b3iV{@tM85xF4nsbR{SSN4j_rd!Xc#rG-6vC**2FF*od}(^0YldgB2UNsX_vaA(^?6Fm z`y}tX&HF6>HrHpmzwV}cwsq!F#aiMH%haG|Bu@?fgp%l>Z%zgkku@CDj1#x!Tp}6P z$s78e&d=!beo96~e>Uua&-cA^pXFJnY)|Jo&sv|M`K!)z^clI|b5-xYb+2vi$+u=O zhFK?+M2Gv-pk^db4gG|Y=%8;-1{IMt9Mp^xx8__T8P>_qLCNH>pBmJRr5vWA12apKmTOC-ZO`8g<=9QIR#nvpy;^b<;=gT6T#R7BQrP%}>4nsbR{SSLRR zC6mK`YEUzhr-pt)Np#RRCxeQ}8V+j4f&2FZt>vrW*2&L7$>gw~8q|#BsiB`x5*_r- z$)F;#hJ%`M;?|r?B*Qv+L;pVH_pQ*sN1ITLSid7xuivxuKwZPLPWj^9_VyVuE6?d! z^LKu7=Go`W*vmQcq_5kW)#p(N>~-DqsTFHZyGGr-fI(EPZbHllOkft=m8AoSl81Z_QvUb5AIV z4)>`+%}Aaa`Uxe`LEoGVDk5t*DE;9%LCrAVv-{53hi~wE=y&6HSuznuspNer_g#W7 zO?&rQcW2Hy>x`)L9Gx~$N(878_BqpXFSGmP^$xM>zM%UvwlReAXGs)@O>pr6%J(t)SzZ0PYwNqlIWmsP6ic`H5}B86SwAEA{o}n&q2xLu%8;#jO3}IpHLDV^v%hj zBC>{qnsMURoJ%CbI{6v!&r8p+za}B?JT=<$=S%bvCF(#mCxeQ}8V+j4iCc3nkqql( zA@2uYzZdv@0_riAS6aT86g<&3?~**%N*StD|mea`!wwTI`@{&hFz z3BG5~%zFBJe>-Uo|C$+hTAKFV%$jG})0vr?w_>aJXI#&xJr_Rju>9K`&v&2P`|%0M zhcefqIYX&Yk#|mQP4tM8>cC)51{IMt9Mp^xx8__T8P>_qLCNH>pBmJRr5vWA12apKmTOC-ZO`GEd@NcZ<6zhlc@qt9IW{!ZmN*7w{qv~PJ4UQy@lzDxRp z+viN{GPg$UK6%ZfrRO-Jq&jS%1~nsjYUn4FLtdbEyISw7!YA%I%uepJ zdT+ejtnKN{Sj||m)j7NGlKtTJ?Rm{DnH85l`=3I|;INk()Qse*p`TC^9rVq~pdzw{ zgPL*T)|^Wu!#a5b@5b+VeO@KgjDC0QQQbc4`kvV1>{+L5PiJk-y3d$%(J$w4KfY7w z!PB?=vgKTJ_sMGpR^}eE_jMkYp6h!oz1SQ9<&N_lmFtY%_p18loF2q5h)^T!bEf59 zX7@W|(fwYDQrBT4HK-ZMQ$s(YBs%DulR-sf4F@&j#H~4(NQQOt2HuI^hd;Nam3rzo z;Ji0wBKgwhEbn*LjF=X)_W=bC4K&A_U;zGrWBJ?Hj*8lZ-M&6L;Nd7stW=1hA!cO&+1 zHTSZ=?>_s<>D%Xw&HFODPhK;yzSTQ!m!dgCsZo)4PHj!}h?45SU`_@Vku@CDj1#x! zTp}6P$s6*W_bhu9qYDV(Z&`&6d4*KR~P!U|MMLb3WzsuX)>CpRqnGj@7g0z07+n=Qhz&BbHya=X6%v9%s+$J(`u- zp3d9e-lzHY_#EnsCIWw=jX^RQ@c2m7hBUkIZwHaNZ-nrB|lVV-Af zIem=Fnmb%-?RD)tQYY_v`qo{v&HFODzwX9)Sd}}NL#@0tJT+>1dEU-_mS>%^J)P$~ zYxvE{zHf8&-0!*Jwa=NcImg_6@|uCwvwNdz^&2&M>wI)HXZB6*Gt%30)z54nT0`?L zTGZKJcT_)lpEvKY?!rd-_HduPW?=JO#4jjS4mYSl%}Aaa`Uxe`LEoGVDk5t*s2L}2 z&ACJ}tdkG$d+2w=JDTscXUO0Eiag)r&zaU`ZjIV~ za^H#Pqc!uWVlA38lo}Oz=hW6jk0_}Q4CZ7|5n01Q%{Xyu&LxszoxH*C1bMxWq8QQd zPL#U)$~#YQzhA#+W#IR3-wv?ZHT9WQwa;^%J#XEOd06MX?o(?{ z&i9$aJo-8Fpj6-u3kL{h0;oHr!u#(sP6Q?%_WV@RjR- z9-#0kc9aAnQL}K)-cvmHB=&}vv1_#yaSdxd_o_oFY!SNA0N&=B6N&e`j-nF>1 z?R$Nhr8zf1BS7^!mgG_{bGP2ufN91O&ZKAE^PY4xP`!S-CHExv6g2D3l9mPs)$3T2 zhuoffz4XEH5j#o(ktj)@(dU2p963X?*LiTGOh&%!+j`51ey_Jo@hm4V+ODf!zsl4G zT}N*5P+w-1U6IiUP`!>NxfGw*&8^oO3p()x*0a>Xh_>sh*B-Tj@8dkwN8R;hRxS0B zpb?;Y9ZPa4mvd(CqOl8|cmlnzWFmHy1R_zhaL(RSJohB`J~Zpjl9mPs)$3T2huoff zz4XEH5j#o(k*HZXXYVPVdy;z}nssMMOM`>zbu7t4ZqL15`r!D89VLNClqCLXp={iI z_uSZXg^=7bnssMMOM`>zbu7t4ZqL15_|~(WylA_wdjI_|SL5<~*<$93BDrNW>&}vv z1{W^pR7#m(;c{VDX$t|NUAv1LvGK#Ui#qph#e(?NYpHx zv-cFwJ;}Wf&APLsrNKetmO46Nw|L*?|-s{UO&A9;@0jk%r zB$slTyY)kCRw}@ulS<=$rpn4rk@{rqe zua`bJK4M2nAQC0%`~34?KQr^a_~+5tuWPR_?^5sa251DRUdNJL%4P1>8yhgqSi+g~ ztb5**F6{@OzuqP9=F_5LLvqV#)}19S4GyZ;u_O<sXS9+@5>A^uh5FJ4ynPs988??^;SEPjc@= zv+gWuX>d@zjwN}>?YY-W9~>XCqa+ZCnuT-rp5nPDx%Z)2cb2p?IH+F7l04-0-0P(e zj*r+;5{N|2!Z~|S@!XT#`_QaAOIjKnRIg)69&&r`_0k8&N9-sGM51QloV}-b?n&-_ zXx5!2Ee#H;*RdoIxjpxK>4W1Vc9aAnQL}K)-cvmHB=&}vv1_#yaSdxd_o_oFY z!SNA0N&=B6N#E!H`?p_*-ovLx?n&-JXx5!2Ee#H;*RdoIxjpxK>4W1Vc9aAnQL}K) z-cvmHB=&}vv1_#yaSdxd_o_oFY!SNA0N&=B6N#F1P|L6An`FdW-_3jpuTST+& zENN+QP`!>NdC2X#*9+hJEKgpvU3cv)%d=78Ol}#?y0fIE!9n#pmgFI~=Uy*;aD2p$ zl0YPC7S7pwiszo>-iK!0S<=$rpn4rk@{rqeua`bJK4M2nAQCkTXO;aOiaZ^+sX|I_ z8O^%0q@}?HpErtLZsXS9+@5>A z^uh5FJ4ynPs988??^;SEPjc@=v+gWuX>d@zjwN}>?YY-W9~>XCqa+ZClJuAK_rI@k z`TJb=IRc${0_$1oU_{$>*Uqv$8x_vvmeH&`OIjKnRIg)69&&r`_0k8&N9-sGM4}{p zpMU=Ae>2bbvi@Dl|1TD>zNA|0_Px@gPk`!mEXk!@=5D>Q0n>~noXL6?{5whlk*ImT z@>96n6LZCmtmY8K8a{dqV~$I@M2X2t$X-?}r+mDs{r$Z0N4nd`W8Ul9&u9406wLf{ z#ZPTBe^(lxx6k{>>i@gV^HP0$oR|0iNO%64G~UrqXwN@6?EU>e(jLW2a(rhz^N+Q+ z@2pwxi@z`5C){6NJ}b`rGsT1A>k2e_)cA8IpSACl`bW}Vz8vq@`~KkP?*EW{Ie$nv z z_!6eh7xup^KCdrOdOni>OY++PpqhT}P5!ZZ@5|4Jf%gs0QG8wT{@V`^|5K6rN7MhY z`Y*-bROYOgihnKd$BLYv{1f?qSH4dasXvE5Xb)dBK9A@4U+OIG^!u+k>mzx-uKph? zcn80w+5c;s&->s9>Gu>oC!f!2{Lsc1x-^ITaUPufk>V4@f06&s73go$Ct~l{Rr7=V zH9VnzY98n2-j~uJbjC}?N8+6K@vMJYXa0}v+>hn?Nd3>fegB8#@xM3o!+*g`|3x17 z&fb@L03P-a6!{|V@q2#suYCq{?w$K-^#|SMO$E>PY4_gqKj;}9_dNIX*WcDJ8U;V} z>3KQd|EqrR(x05?K4)-(aZ<4?N`fIPhcKFxU z&-yQt&+&@?%}>4f{I9xZd++@Ex4!VBFMj0p*S_%i7k}mT*IxY4>wo&W|L{{U zUd-;dzw_Z2{>F>HuJ-HQmgSp&=f#hH_1zEkueo@wi~p6k4&eZKUd9;z zCAW=9i+{v#EdSuey?;)@FTVJ(n|}lA1%D}Sit*() zZ;fAm=b6T@9E~6R^z8@oV=sQ>KmGc*zV*(py!+w1AO80H-}u7ke&LsX`77aEZ|6zp zdO1%z*Sopnd^^9FKe6BNuf6>XU;7uo_;dgA3xE5?k9_!zZ~f-CKX~W8cYmAj=!<{* z#b5g3KYH_vKmWyl|G_Wbd(A(-&-<(Yibwvm)8=RK8BLoX*!MK;=Py5}-{b50^8KHn z+e_(v{p?phxZSe8E6?AuzBJF@vc5fcw!Hnh7Y}cI{_UT9@#B26f95}a=Qlt6_HVrN V?e~A?SHJPCZ+!6XdvCn Date: Tue, 27 Jun 2023 12:25:00 +0100 Subject: [PATCH 523/628] [tests] Enable ci to automatically test new folding nb --- notebooks/advanced/3_folding.ipynb | 11 +++++++---- tests/notebooks/test_jupyter_notebooks.py | 1 + 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/notebooks/advanced/3_folding.ipynb b/notebooks/advanced/3_folding.ipynb index 1eb99206e2..07b66da52f 100644 --- a/notebooks/advanced/3_folding.ipynb +++ b/notebooks/advanced/3_folding.ipynb @@ -85,10 +85,12 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(\"cybsec_PE_SIMD.onnx\")\n", + "model_path = os.environ[\"FINN_ROOT\"] + \"/notebooks/advanced/cybsec_PE_SIMD.onnx\" \n", + "model = ModelWrapper(model_path)\n", "\n", - "showInNetron(\"cybsec_PE_SIMD.onnx\")" + "showInNetron(model_path)" ] }, { @@ -137,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(\"cybsec_PE_SIMD.onnx\")" + "showInNetron(model_path)" ] }, { @@ -415,7 +417,8 @@ "metadata": {}, "outputs": [], "source": [ - "model_orig = ModelWrapper(\"cybsec_PE_SIMD.onnx\")\n", + "dir_path = os.environ[\"FINN_ROOT\"] + \"/notebooks/advanced/\" \n", + "model_orig = ModelWrapper(dir_path + \"cybsec_PE_SIMD.onnx\")\n", "model_updated = ModelWrapper(\"cybsec_PE_SIMD_modified.onnx\")" ] }, diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index 819b4ccde0..836f1e059e 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -21,6 +21,7 @@ pytest.param(notebook_advanced_dir + "0_custom_analysis_pass.ipynb"), pytest.param(notebook_advanced_dir + "1_custom_transformation_pass.ipynb"), pytest.param(notebook_advanced_dir + "2_custom_op.ipynb"), + pytest.param(notebook_advanced_dir + "3_folding.ipynb"), ] cyber_notebooks = [ From 21f191ec9f64e33fc767a861297b76d25c8597ea Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 28 Jun 2023 14:18:44 +0100 Subject: [PATCH 524/628] [gha] Update python version for pre-commit gha --- .github/workflows/pre-commit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 5f03379bbc..011ccebadc 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: '3.8' + python-version: '3.10' - name: Run Lint uses: pre-commit/action@v3.0.0 From d3465bc31684886ef1079184f486f36a75c640e2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 28 Jun 2023 14:31:31 +0100 Subject: [PATCH 525/628] [linting] Pre-commit with python 3.10 on all files --- .pre-commit-config.yaml | 3 +- .../cybersecurity/dataloader_quantized.py | 18 +-- .../cybersecurity/validate-unsw-nb15.py | 4 +- .../fpgadataflow/dataflow_performance.py | 4 +- .../analysis/fpgadataflow/post_synth_res.py | 2 +- .../analysis/fpgadataflow/res_estimation.py | 5 +- src/finn/builder/build_dataflow.py | 18 +-- src/finn/builder/build_dataflow_config.py | 14 +-- src/finn/builder/build_dataflow_steps.py | 60 +++------- src/finn/core/onnx_exec.py | 8 +- .../fpgadataflow/addstreams_batch.py | 16 +-- .../fpgadataflow/channelwise_op_batch.py | 39 ++----- src/finn/custom_op/fpgadataflow/checksum.py | 19 +--- src/finn/custom_op/fpgadataflow/concat.py | 21 +--- .../fpgadataflow/convolutioninputgenerator.py | 8 +- .../convolutioninputgenerator1d.py | 37 ++---- .../convolutioninputgenerator_rtl.py | 78 ++++--------- .../custom_op/fpgadataflow/downsampler.py | 4 +- .../fpgadataflow/duplicatestreams_batch.py | 16 +-- src/finn/custom_op/fpgadataflow/eltwise.py | 17 +-- .../custom_op/fpgadataflow/fmpadding_batch.py | 4 +- .../custom_op/fpgadataflow/fmpadding_rtl.py | 12 +- .../fpgadataflow/globalaccpool_batch.py | 8 +- .../custom_op/fpgadataflow/hlscustomop.py | 28 ++--- src/finn/custom_op/fpgadataflow/iodma.py | 30 ++--- .../fpgadataflow/labelselect_batch.py | 8 +- src/finn/custom_op/fpgadataflow/lookup.py | 24 +--- .../fpgadataflow/matrixvectoractivation.py | 94 ++++------------ src/finn/custom_op/fpgadataflow/pool_batch.py | 16 +-- .../streamingdatawidthconverter_batch.py | 26 ++--- .../custom_op/fpgadataflow/streamingfifo.py | 32 ++---- .../fpgadataflow/streamingmaxpool_batch.py | 12 +- .../fpgadataflow/thresholding_batch.py | 72 +++--------- .../custom_op/fpgadataflow/tlastmarker.py | 16 +-- src/finn/custom_op/fpgadataflow/upsampler.py | 8 +- .../fpgadataflow/vectorvectoractivation.py | 80 ++++--------- .../qnn-data/cybsec-mlp/validate-unsw-nb15.py | 8 +- .../qnn-data/templates/driver/driver_base.py | 32 ++---- .../qnn-data/templates/driver/validate.py | 4 +- .../fpgadataflow/annotate_resources.py | 4 +- .../transformation/fpgadataflow/cleanup.py | 4 +- .../fpgadataflow/compile_cppsim.py | 4 +- .../fpgadataflow/convert_to_hls_layers.py | 78 ++++--------- .../fpgadataflow/create_stitched_ip.py | 106 +++++------------- .../fpgadataflow/derive_characteristic.py | 30 ++--- .../fpgadataflow/externalize_params.py | 6 +- .../transformation/fpgadataflow/floorplan.py | 9 +- .../fpgadataflow/hlssynth_ip.py | 12 +- .../transformation/fpgadataflow/insert_dwc.py | 3 +- .../fpgadataflow/insert_fifo.py | 19 +--- .../fpgadataflow/insert_hook.py | 3 +- .../fpgadataflow/insert_iodma.py | 31 ++--- .../fpgadataflow/insert_tlastmarker.py | 13 +-- .../fpgadataflow/make_pynq_driver.py | 59 +++------- .../fpgadataflow/make_zynq_proj.py | 43 ++----- .../fpgadataflow/prepare_cppsim.py | 4 +- .../transformation/fpgadataflow/prepare_ip.py | 4 +- .../fpgadataflow/prepare_rtlsim.py | 4 +- .../fpgadataflow/set_exec_mode.py | 4 +- .../fpgadataflow/set_fifo_depths.py | 27 ++--- .../fpgadataflow/set_folding.py | 15 +-- .../fpgadataflow/vitis_build.py | 49 +++----- src/finn/transformation/move_reshape.py | 8 +- .../qonnx/convert_qonnx_to_finn.py | 4 +- .../qonnx/fold_quant_weights.py | 19 +--- .../qonnx/infer_quant_avg_pool_2d.py | 53 ++------- .../qonnx/qonnx_activation_handlers.py | 32 ++---- .../qonnx/quant_act_to_multithreshold.py | 4 +- src/finn/transformation/streamline/absorb.py | 47 ++------ src/finn/transformation/streamline/reorder.py | 86 +++----------- .../streamline/round_thresholds.py | 3 +- src/finn/util/create.py | 8 +- src/finn/util/data_packing.py | 20 +--- src/finn/util/imagenet.py | 15 +-- src/finn/util/platforms.py | 12 +- src/finn/util/pyverilator.py | 20 +--- src/finn/util/test.py | 4 +- src/finn/util/vcd.py | 4 +- .../brevitas/test_brevitas_avg_pool_export.py | 2 +- tests/brevitas/test_brevitas_mobilenet.py | 8 +- ...revitas_non_scaled_quanthardtanh_export.py | 8 +- tests/brevitas/test_brevitas_qlinear.py | 8 +- .../brevitas/test_brevitas_relu_act_export.py | 2 - .../test_brevitas_scaled_qhardtanh_export.py | 8 +- .../brevitas/test_brevitas_selu_act_export.py | 4 +- .../test_brevitas_validate_mobilenet.py | 6 +- tests/end2end/test_end2end_bnn_pynq.py | 100 +++++------------ tests/end2end/test_end2end_cybsec_mlp.py | 17 ++- tests/end2end/test_end2end_mobilenet_v1.py | 20 +--- .../test_convert_to_hls_1d_conv_layer.py | 16 +-- .../test_convert_to_hls_channelwise_layer.py | 17 +-- .../test_convert_to_hls_conv_fc_transition.py | 60 +++------- .../test_convert_to_hls_conv_layer.py | 12 +- .../test_convert_to_hls_layers_cnv.py | 6 +- .../test_convert_to_hls_layers_fc.py | 6 +- .../test_convert_to_hls_layers_synthetic.py | 20 +--- .../test_convert_to_hls_pool_batch.py | 36 ++---- .../test_depthwise_convolution.py | 17 +-- tests/fpgadataflow/test_fifosizing.py | 11 +- .../test_fpgadataflow_channelwise_ops.py | 4 +- .../test_fpgadataflow_checksum.py | 8 +- .../test_fpgadataflow_convinputgenerator.py | 20 +--- .../test_fpgadataflow_convinputgenerator1d.py | 16 +-- ...est_fpgadataflow_convinputgenerator_rtl.py | 28 ++--- ...dataflow_convinputgenerator_rtl_dynamic.py | 60 +++------- .../test_fpgadataflow_downsampler.py | 4 +- .../test_fpgadataflow_duplicatestreams.py | 8 +- tests/fpgadataflow/test_fpgadataflow_dwc.py | 9 +- tests/fpgadataflow/test_fpgadataflow_fifo.py | 6 +- .../test_fpgadataflow_fmpadding.py | 16 +-- .../test_fpgadataflow_globalaccpool.py | 4 +- .../test_fpgadataflow_ipstitch.py | 8 +- .../test_fpgadataflow_labelselect.py | 4 +- .../fpgadataflow/test_fpgadataflow_lookup.py | 4 +- .../test_fpgadataflow_streamingmaxpool.py | 16 +-- .../test_fpgadataflow_thresholding.py | 28 ++--- tests/fpgadataflow/test_fpgadataflow_vvau.py | 12 +- tests/fpgadataflow/test_minimize_bit_width.py | 26 +---- tests/fpgadataflow/test_runtime_weights.py | 4 +- tests/fpgadataflow/test_set_folding.py | 10 +- tests/fpgadataflow/test_split_large_fifos.py | 8 +- tests/notebooks/test_jupyter_notebooks.py | 4 +- .../streamline/test_absorb_mul_into_topk.py | 16 +-- .../test_absorb_transp_into_flatten.py | 4 +- .../streamline/test_linear_past_eltwise.py | 20 +--- .../streamline/test_maxpool_nhwc.py | 24 +--- .../streamline/test_move_chw_add_past_conv.py | 8 +- .../test_move_identical_op_past_join_op.py | 16 +-- .../test_move_maxpool_past_multithreshold.py | 12 +- .../streamline/test_move_mul_past_dw_conv.py | 8 +- .../streamline/test_move_mul_past_maxpool.py | 8 +- .../streamline/test_move_past_fork.py | 8 +- .../test_move_scalar_past_matmul.py | 8 +- .../streamline/test_scale_resize_nhwc.py | 44 ++------ .../test_infer_data_layouts_cnv.py | 10 +- tests/transformation/test_qonnx_to_finn.py | 15 +-- tests/util/test_build_dataflow.py | 20 +--- tests/util/test_create.py | 4 +- tests/util/test_data_packing_hls.py | 8 +- tutorials/fpga_flow/gen_tb_data.py | 4 +- 140 files changed, 702 insertions(+), 2001 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 42a18b2737..72a9688505 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -60,11 +60,12 @@ repos: hooks: - id: black language_version: python3 + args: [--line-length=100] - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: - id: flake8 # black-compatible flake-8 config - args: ['--max-line-length=88', # black default + args: ['--max-line-length=100', # black default '--extend-ignore=E203'] # E203 is not PEP8 compliant diff --git a/notebooks/end2end_example/cybersecurity/dataloader_quantized.py b/notebooks/end2end_example/cybersecurity/dataloader_quantized.py index 738811fa72..38505fb6ef 100644 --- a/notebooks/end2end_example/cybersecurity/dataloader_quantized.py +++ b/notebooks/end2end_example/cybersecurity/dataloader_quantized.py @@ -48,7 +48,6 @@ def __init__( onehot=False, train=True, ): - self.dataframe = ( pd.concat([pd.read_csv(file_path_train), pd.read_csv(file_path_test)]) .reset_index() @@ -77,9 +76,7 @@ def __getitem__(self, index): data_val = self.data[index][:-1] return data_val, target - def dec2bin( - self, column: pd.Series, number_of_bits: int, left_msb: bool = True - ) -> pd.Series: + def dec2bin(self, column: pd.Series, number_of_bits: int, left_msb: bool = True) -> pd.Series: """Convert a decimal pd.Series to binary pd.Series with numbers in their # base-2 equivalents. The output is a numpy nd array. @@ -133,6 +130,7 @@ def integer_encoding(self, df): def quantize_df(self, df): """Quantized the input dataframe. The scaling is done by multiplying every column by the inverse of the minimum of that column""" + # gets the smallest positive number of a vector def get_min_positive_number(vector): return vector[vector > 0].min() @@ -178,24 +176,18 @@ def char_split(s): column_data = np.clip( column_data, 0, 4294967295 ) # clip due to overflow of uint32 of matlab code - column_data = self.round_like_matlab_series( - column_data - ) # round like matlab + column_data = self.round_like_matlab_series(column_data) # round like matlab column_data = column_data.astype(np.uint32) # cast like matlab if column == "rate": column_data.update(pd.Series(dict_correct_rate_values)) python_quantized_df[column] = ( - self.dec2bin(column_data, maxbits, left_msb=False) - .reshape((-1, 1)) - .flatten() + self.dec2bin(column_data, maxbits, left_msb=False).reshape((-1, 1)).flatten() ) for column in python_quantized_df.columns: - python_quantized_df[column] = ( - python_quantized_df[column].apply(char_split).values - ) + python_quantized_df[column] = python_quantized_df[column].apply(char_split).values python_quantized_df_separated = pd.DataFrame( np.column_stack(python_quantized_df.values.T.tolist()) diff --git a/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py b/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py index 0ffb525544..c4570616d2 100644 --- a/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py +++ b/notebooks/end2end_example/cybersecurity/validate-unsw-nb15.py @@ -57,9 +57,7 @@ def make_unsw_nb15_test_batches(bsize, dataset_root): help='name of bitfile (i.e. "resizer.bit")', default="../bitfile/finn-accel.bit", ) - parser.add_argument( - "--dataset_root", help="dataset root dir for download/reuse", default="." - ) + parser.add_argument("--dataset_root", help="dataset root dir for download/reuse", default=".") # parse arguments args = parser.parse_args() bsize = args.batchsize diff --git a/src/finn/analysis/fpgadataflow/dataflow_performance.py b/src/finn/analysis/fpgadataflow/dataflow_performance.py index 5726702666..824690f5f6 100644 --- a/src/finn/analysis/fpgadataflow/dataflow_performance.py +++ b/src/finn/analysis/fpgadataflow/dataflow_performance.py @@ -66,9 +66,7 @@ def dataflow_performance(model): max_pred_latency = 0 else: # find max of any of predecessors - pred_latencies = map( - lambda x: latency_at_node_output[x.name], predecessors - ) + pred_latencies = map(lambda x: latency_at_node_output[x.name], predecessors) max_pred_latency = max(pred_latencies) latency_at_node_output[node.name] = node_cycles + max_pred_latency critical_path_cycles = max(latency_at_node_output.values()) diff --git a/src/finn/analysis/fpgadataflow/post_synth_res.py b/src/finn/analysis/fpgadataflow/post_synth_res.py index 1202120529..3304b88d60 100644 --- a/src/finn/analysis/fpgadataflow/post_synth_res.py +++ b/src/finn/analysis/fpgadataflow/post_synth_res.py @@ -86,7 +86,7 @@ def get_instance_stats(inst_name): if row != []: node_dict = {} row = list(row[0]) - for (restype, ind) in restype_to_ind.items(): + for restype, ind in restype_to_ind.items(): node_dict[restype] = int(row[ind].attrib["contents"]) return node_dict else: diff --git a/src/finn/analysis/fpgadataflow/res_estimation.py b/src/finn/analysis/fpgadataflow/res_estimation.py index 406496bc0e..be4cf417bc 100644 --- a/src/finn/analysis/fpgadataflow/res_estimation.py +++ b/src/finn/analysis/fpgadataflow/res_estimation.py @@ -62,10 +62,7 @@ def res_estimation_complete(model): if is_fpgadataflow_node(node) is True: op_type = node.op_type inst = registry.getCustomOp(node) - if ( - op_type == "MatrixVectorActivation" - or op_type == "VectorVectorActivation" - ): + if op_type == "MatrixVectorActivation" or op_type == "VectorVectorActivation": orig_restype = inst.get_nodeattr("resType") res_dict[node.name] = [] inst.set_nodeattr("resType", "dsp") diff --git a/src/finn/builder/build_dataflow.py b/src/finn/builder/build_dataflow.py index d6864994a7..284cd2baa3 100644 --- a/src/finn/builder/build_dataflow.py +++ b/src/finn/builder/build_dataflow.py @@ -91,12 +91,8 @@ def resolve_build_steps(cfg: DataflowBuildConfig, partial: bool = True): return steps_as_fxns -def resolve_step_filename( - step_name: str, cfg: DataflowBuildConfig, step_delta: int = 0 -): - step_names = list( - map(lambda x: x.__name__, resolve_build_steps(cfg, partial=False)) - ) +def resolve_step_filename(step_name: str, cfg: DataflowBuildConfig, step_delta: int = 0): + step_names = list(map(lambda x: x.__name__, resolve_build_steps(cfg, partial=False))) assert step_name in step_names, "start_step %s not found" + step_name step_no = step_names.index(step_name) + step_delta assert step_no >= 0, "Invalid step+delta combination" @@ -150,19 +146,13 @@ def build_dataflow_cfg(model_filename, cfg: DataflowBuildConfig): for transform_step in build_dataflow_steps: try: step_name = transform_step.__name__ - print( - "Running step: %s [%d/%d]" - % (step_name, step_num, len(build_dataflow_steps)) - ) + print("Running step: %s [%d/%d]" % (step_name, step_num, len(build_dataflow_steps))) # redirect output to logfile if not cfg.verbose: sys.stdout = stdout_logger sys.stderr = stderr_logger # also log current step name to logfile - print( - "Running step: %s [%d/%d]" - % (step_name, step_num, len(build_dataflow_steps)) - ) + print("Running step: %s [%d/%d]" % (step_name, step_num, len(build_dataflow_steps))) # run the step step_start = time.time() model = transform_step(model, cfg) diff --git a/src/finn/builder/build_dataflow_config.py b/src/finn/builder/build_dataflow_config.py index 4c3e4ff899..e4fed05731 100644 --- a/src/finn/builder/build_dataflow_config.py +++ b/src/finn/builder/build_dataflow_config.py @@ -267,9 +267,7 @@ class DataflowBuildConfig: #: When `auto_fifo_depths = True`, select which method will be used for #: setting the FIFO sizes. - auto_fifo_strategy: Optional[ - AutoFIFOSizingMethod - ] = AutoFIFOSizingMethod.LARGEFIFO_RTLSIM + auto_fifo_strategy: Optional[AutoFIFOSizingMethod] = AutoFIFOSizingMethod.LARGEFIFO_RTLSIM #: Avoid using C++ rtlsim for auto FIFO sizing and rtlsim throughput test #: if set to True, always using Python instead @@ -366,9 +364,7 @@ def _resolve_driver_platform(self): elif self.shell_flow_type == ShellFlowType.VITIS_ALVEO: return "alveo" else: - raise Exception( - "Couldn't resolve driver platform for " + str(self.shell_flow_type) - ) + raise Exception("Couldn't resolve driver platform for " + str(self.shell_flow_type)) def _resolve_fpga_part(self): if self.fpga_part is None: @@ -410,8 +406,7 @@ def _resolve_vitis_platform(self): return alveo_default_platform[self.board] else: raise Exception( - "Could not resolve Vitis platform:" - " need either board or vitis_platform specified" + "Could not resolve Vitis platform:" " need either board or vitis_platform specified" ) def _resolve_verification_steps(self): @@ -429,8 +424,7 @@ def _resolve_verification_io_pair(self): ) verify_input_npy = np.load(self.verify_input_npy) assert os.path.isfile(self.verify_expected_output_npy), ( - "verify_expected_output_npy not found: " - + self.verify_expected_output_npy + "verify_expected_output_npy not found: " + self.verify_expected_output_npy ) verify_expected_output_npy = np.load(self.verify_expected_output_npy) return ( diff --git a/src/finn/builder/build_dataflow_steps.py b/src/finn/builder/build_dataflow_steps.py index a22b5adc98..54ba7e4ea1 100644 --- a/src/finn/builder/build_dataflow_steps.py +++ b/src/finn/builder/build_dataflow_steps.py @@ -145,9 +145,7 @@ def verify_step( in_npy = np.expand_dims(in_npy_all[b], axis=0) exp_out_npy = np.expand_dims(exp_out_npy_all[b], axis=0) if need_parent: - assert ( - cfg.save_intermediate_models - ), "Enable save_intermediate_models for verification" + assert cfg.save_intermediate_models, "Enable save_intermediate_models for verification" parent_model_fn = intermediate_models_dir + "/dataflow_parent.onnx" child_model_fn = intermediate_models_dir + "/verify_%s.onnx" % step_name model.save(child_model_fn) @@ -161,9 +159,7 @@ def verify_step( ) print("Attempting to force model shape on verification input") in_npy = in_npy.reshape(exp_ishape) - out_dict = execute_parent( - parent_model_fn, child_model_fn, in_npy, return_full_ctx=True - ) + out_dict = execute_parent(parent_model_fn, child_model_fn, in_npy, return_full_ctx=True) out_npy = out_dict[out_tensor_name] else: inp_tensor_name = model.graph.input[0].name @@ -230,9 +226,7 @@ def prepare_for_stitched_ip_rtlsim(verify_model, cfg): inst.set_nodeattr("ipgen_path", "") need_restitch = True # StreamingDataWidthConverter must have impl_style=hls - for dwc_layer in verify_model.get_nodes_by_op_type( - "StreamingDataWidthConverter_Batch" - ): + for dwc_layer in verify_model.get_nodes_by_op_type("StreamingDataWidthConverter_Batch"): inst = getCustomOp(dwc_layer) if inst.get_nodeattr("impl_style") != "hls": inst.set_nodeattr("impl_style", "hls") @@ -382,8 +376,7 @@ def step_create_dataflow_partition(model: ModelWrapper, cfg: DataflowBuildConfig parent_model = model.transform( CreateDataflowPartition( - partition_model_dir=cfg.output_dir - + "/intermediate_models/supported_op_partitions" + partition_model_dir=cfg.output_dir + "/intermediate_models/supported_op_partitions" ) ) sdp_nodes = parent_model.get_nodes_by_op_type("StreamingDataflowPartition") @@ -422,9 +415,7 @@ def step_target_fps_parallelization(model: ModelWrapper, cfg: DataflowBuildConfi "mem_mode", "runtime_writeable_weights", ] - extract_model_config_to_json( - model, cfg.output_dir + "/auto_folding_config.json", hw_attrs - ) + extract_model_config_to_json(model, cfg.output_dir + "/auto_folding_config.json", hw_attrs) return model @@ -459,9 +450,7 @@ def step_generate_estimate_reports(model: ModelWrapper, cfg: DataflowBuildConfig with open(report_dir + "/estimate_layer_cycles.json", "w") as f: json.dump(estimate_layer_cycles, f, indent=2) estimate_layer_resources = model.analysis(res_estimation) - estimate_layer_resources["total"] = aggregate_dict_keys( - estimate_layer_resources - ) + estimate_layer_resources["total"] = aggregate_dict_keys(estimate_layer_resources) with open(report_dir + "/estimate_layer_resources.json", "w") as f: json.dump(estimate_layer_resources, f, indent=2) estimate_layer_resources_complete = model.analysis(res_estimation_complete) @@ -475,8 +464,7 @@ def step_generate_estimate_reports(model: ModelWrapper, cfg: DataflowBuildConfig est_fps = n_clock_cycles_per_sec / estimate_network_performance["max_cycles"] estimate_network_performance["estimated_throughput_fps"] = est_fps est_latency_ns = ( - estimate_network_performance["critical_path_cycles"] - * cfg.synth_clk_period_ns + estimate_network_performance["critical_path_cycles"] * cfg.synth_clk_period_ns ) estimate_network_performance["estimated_latency_ns"] = est_latency_ns with open(report_dir + "/estimate_network_performance.json", "w") as f: @@ -497,9 +485,7 @@ def step_minimize_bit_width(model: ModelWrapper, cfg: DataflowBuildConfig): def step_hls_codegen(model: ModelWrapper, cfg: DataflowBuildConfig): "Generate Vivado HLS code to prepare HLSCustomOp nodes for IP generation." - model = model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) + model = model.transform(PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period())) return model @@ -599,9 +585,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): "inFIFODepths", "outFIFODepths", ] - extract_model_config_to_json( - model, cfg.output_dir + "/final_hw_config.json", hw_attrs - ) + extract_model_config_to_json(model, cfg.output_dir + "/final_hw_config.json", hw_attrs) # perform FIFO splitting and shallow FIFO removal only after the final config # json file has been written. otherwise, since these transforms may add/remove @@ -612,9 +596,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig): # after FIFOs are ready to go, call PrepareIP and HLSSynthIP again # this will only run for the new nodes (e.g. FIFOs and DWCs) - model = model.transform( - PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period()) - ) + model = model.transform(PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period())) model = model.transform(HLSSynthIP()) return model @@ -651,9 +633,7 @@ def step_create_stitched_ip(model: ModelWrapper, cfg: DataflowBuildConfig): if cfg.verify_save_rtlsim_waveforms: report_dir = cfg.output_dir + "/report" os.makedirs(report_dir, exist_ok=True) - verify_model.set_metadata_prop( - "rtlsim_trace", "%s/verify_rtlsim.vcd" % (report_dir) - ) + verify_model.set_metadata_prop("rtlsim_trace", "%s/verify_rtlsim.vcd" % (report_dir)) verify_step(verify_model, cfg, "stitched_ip_rtlsim", need_parent=True) os.environ["LIVENESS_THRESHOLD"] = str(prev_liveness) return model @@ -674,9 +654,7 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi rtlsim_model = deepcopy(model) rtlsim_model = prepare_for_stitched_ip_rtlsim(rtlsim_model, cfg) # multi-in/out streams currently not supported in our C++ verilator driver - model_multi_io = ( - len(rtlsim_model.graph.input) > 1 or len(rtlsim_model.graph.output) > 1 - ) + model_multi_io = len(rtlsim_model.graph.input) > 1 or len(rtlsim_model.graph.output) > 1 force_python_rtlsim = cfg.force_python_rtlsim or model_multi_io if model_multi_io: warnings.warn( @@ -694,9 +672,7 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi "rtlsim_trace", "%s/rtlsim_perf_batch_%d.vcd" % (report_dir, rtlsim_bs), ) - rtlsim_model.set_metadata_prop( - "extra_verilator_args", str(["-CFLAGS", "-O3"]) - ) + rtlsim_model.set_metadata_prop("extra_verilator_args", str(["-CFLAGS", "-O3"])) # run with single input to get latency rtlsim_latency_dict = throughput_test_rtlsim(rtlsim_model, 1) # run with batch to get stable-state throughput @@ -712,7 +688,7 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi rtlsim_perf_dict["runtime[ms]"] = runtime_s * 1000 rtlsim_perf_dict["throughput[images/s]"] = rtlsim_bs / runtime_s rtlsim_perf_dict["fclk[mhz]"] = fclk_mhz - for (key, val) in rtlsim_perf_dict.items(): + for key, val in rtlsim_perf_dict.items(): if "max_count" in key: del rtlsim_perf_dict[key] # estimate stable-state throughput based on latency+throughput @@ -754,13 +730,9 @@ def step_out_of_context_synthesis(model: ModelWrapper, cfg: DataflowBuildConfig) """Run out-of-context synthesis and generate reports. Depends on the DataflowOutputType.STITCHED_IP output product.""" if DataflowOutputType.OOC_SYNTH in cfg.generate_outputs: - assert ( - DataflowOutputType.STITCHED_IP in cfg.generate_outputs - ), "OOC needs stitched IP" + assert DataflowOutputType.STITCHED_IP in cfg.generate_outputs, "OOC needs stitched IP" model = model.transform( - SynthOutOfContext( - part=cfg._resolve_fpga_part(), clk_period_ns=cfg.synth_clk_period_ns - ) + SynthOutOfContext(part=cfg._resolve_fpga_part(), clk_period_ns=cfg.synth_clk_period_ns) ) report_dir = cfg.output_dir + "/report" os.makedirs(report_dir, exist_ok=True) diff --git a/src/finn/core/onnx_exec.py b/src/finn/core/onnx_exec.py index daecb59743..588e97e9e4 100644 --- a/src/finn/core/onnx_exec.py +++ b/src/finn/core/onnx_exec.py @@ -34,9 +34,7 @@ from finn.core.rtlsim_exec import rtlsim_exec -def execute_onnx( - model, input_dict, return_full_exec_context=False, start_node=None, end_node=None -): +def execute_onnx(model, input_dict, return_full_exec_context=False, start_node=None, end_node=None): """Executes given ONNX ModelWrapper with given named inputs. If return_full_exec_context is False, a dict of named outputs is returned as indicated by the model.graph.output. @@ -53,9 +51,7 @@ def execute_onnx( # if set to "rtlsim" execute model using pyverilator model_exec_mode = model.get_metadata_prop("exec_mode") if (model_exec_mode is None) or (model_exec_mode == ""): - return execute_onnx_base( - model, input_dict, return_full_exec_context, start_node, end_node - ) + return execute_onnx_base(model, input_dict, return_full_exec_context, start_node, end_node) if not model.check_all_tensor_shapes_specified(): raise Exception("Found unspecified tensor shapes, try infer_shapes") diff --git a/src/finn/custom_op/fpgadataflow/addstreams_batch.py b/src/finn/custom_op/fpgadataflow/addstreams_batch.py index 8fbdf9c452..51de1590ec 100644 --- a/src/finn/custom_op/fpgadataflow/addstreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/addstreams_batch.py @@ -121,9 +121,7 @@ def verify_node(self): self.get_nodeattr("inputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required LabelSelect_Batch attributes do not exist.""" - ) + info_messages.append("""The required LabelSelect_Batch attributes do not exist.""") return info_messages @@ -184,9 +182,7 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert ( - inp.shape == exp_ishape - ), """Input0 shape doesn't match expected shape .""" + assert inp.shape == exp_ishape, """Input0 shape doesn't match expected shape .""" export_idt = self.get_input_datatype() # reshape input into folded form inp = inp.reshape(folded_ishape) @@ -197,9 +193,7 @@ def execute_node(self, context, graph): # exact same thing for input1 inp = context[node.input[1]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert ( - inp.shape == exp_ishape - ), """Input1 shape doesn't match expected shape .""" + assert inp.shape == exp_ishape, """Input1 shape doesn't match expected shape .""" export_idt = self.get_input_datatype() # reshape input into folded form inp = inp.reshape(folded_ishape) @@ -377,9 +371,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() diff --git a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py index 71fc37b184..5e0063ac33 100644 --- a/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py +++ b/src/finn/custom_op/fpgadataflow/channelwise_op_batch.py @@ -178,9 +178,7 @@ def verify_node(self): self.get_nodeattr("outputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required Threshold_Batch attributes do not exist.""" - ) + info_messages.append("""The required Threshold_Batch attributes do not exist.""") return info_messages @@ -300,9 +298,7 @@ def get_hls_compatible_parameter_tensor(self, orig_param_vector): assert (orig_param_vector.astype(np.int32) == orig_param_vector).all() ret = orig_param_vector - assert ( - ret.shape[0] == chn - ), "Cardinality of parameter vector is not as expected (chn)" + assert ret.shape[0] == chn, "Cardinality of parameter vector is not as expected (chn)" # distribute rows between PEs ret = ret.reshape(tmem, pe).transpose() @@ -324,9 +320,7 @@ def generate_params(self, model, path): parameter_tensor = self.get_hls_compatible_parameter_tensor(parameters) pdt = DataType[self.get_nodeattr("paramDataType")] - parameters_hls_code = numpy_to_hls_code( - parameter_tensor, pdt, "parameters", False, True - ) + parameters_hls_code = numpy_to_hls_code(parameter_tensor, pdt, "parameters", False, True) # get input data type export_idt = self.get_input_datatype() if self.get_input_datatype() == DataType["BIPOLAR"]: @@ -430,9 +424,7 @@ def execute_node(self, context, graph): elif mode == "rtlsim": sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) output = self.rtlsim(sim, inp) @@ -441,9 +433,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) @@ -584,18 +574,13 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") # the channelwise parameter tensor is acc_type [PE][TMEM][N_PARAMS_PER_CHANNEL] # partition for parallel access along PE and N_PARAMS_PER_CHANNEL # dimensions (dims 1 and 3) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.parameters " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.parameters " "complete dim=1") ) # self.code_gen_dict["$PRAGMAS$"].append( # ( @@ -613,17 +598,11 @@ def pragmas(self): if pe < ich: if ram_style == "distributed": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.parameters " - "core=ROM_2P_LUTRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.parameters " "core=ROM_2P_LUTRAM") ) elif ram_style == "block": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.parameters " - "core=ROM_2P_BRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.parameters " "core=ROM_2P_BRAM") ) else: raise Exception( diff --git a/src/finn/custom_op/fpgadataflow/checksum.py b/src/finn/custom_op/fpgadataflow/checksum.py index c9d16c0011..6121c5d97a 100644 --- a/src/finn/custom_op/fpgadataflow/checksum.py +++ b/src/finn/custom_op/fpgadataflow/checksum.py @@ -183,9 +183,7 @@ def execute_node(self, context, graph): np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) io_dict = { @@ -199,9 +197,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) @@ -303,8 +299,7 @@ def dataoutstrm(self): ), "std::vector checksum(1);", "checksum[0] = chk;", - 'cnpy::npy_save("%s/output_checksum.npy",&checksum[0],{1},"w");' - % code_gen_dir, + 'cnpy::npy_save("%s/output_checksum.npy",&checksum[0],{1},"w");' % code_gen_dir, ] def save_as_npy(self): @@ -331,13 +326,9 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS interface s_axilite port=drain bundle=checksum" ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS interface ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS interface ap_ctrl_none port=return") self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS dataflow") - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS dataflow disable_start_propagation" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS dataflow disable_start_propagation") def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() diff --git a/src/finn/custom_op/fpgadataflow/concat.py b/src/finn/custom_op/fpgadataflow/concat.py index c43e88d59d..8c24dadbeb 100644 --- a/src/finn/custom_op/fpgadataflow/concat.py +++ b/src/finn/custom_op/fpgadataflow/concat.py @@ -134,7 +134,7 @@ def generate_params(self, model, path): idt = self.get_input_datatype() total_elems = self.get_total_elems() total_bw = idt.bitwidth() * total_elems - for (i, elems) in enumerate(elems_per_stream): + for i, elems in enumerate(elems_per_stream): bw = idt.bitwidth() * elems inp_stream = "hls::stream > &in%d" % (bw, i) inp_streams.append(inp_stream) @@ -298,8 +298,7 @@ def strm_decl(self): packed_hls_type = "ap_uint<%d>" % packed_bits stream_name = "in%d_%s" % (i, self.hls_sname()) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream<%s> %s ("%s");' - % (packed_hls_type, stream_name, stream_name) + 'hls::stream<%s> %s ("%s");' % (packed_hls_type, stream_name, stream_name) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( 'hls::stream> out_{} ("out_{}");'.format( @@ -353,9 +352,7 @@ def blackboxfunction(self): in_streams = [] for i in range(n_inputs): iwidth = self.get_instream_width(i) - in_streams.append( - "hls::stream> &in%d_%s" % (iwidth, i, self.hls_sname()) - ) + in_streams.append("hls::stream> &in%d_%s" % (iwidth, i, self.hls_sname())) in_streams = ",".join(in_streams) total_width = self.get_input_datatype().bitwidth() * self.get_total_elems() out_stream = "hls::stream> &out_%s" % ( @@ -369,16 +366,12 @@ def pragmas(self): n_inputs = self.get_n_inputs() pragmas = [] for i in range(n_inputs): - pragmas.append( - "#pragma HLS INTERFACE axis port=in%d_%s" % (i, self.hls_sname()) - ) + pragmas.append("#pragma HLS INTERFACE axis port=in%d_%s" % (i, self.hls_sname())) self.code_gen_dict["$PRAGMAS$"] = pragmas self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def get_instream_width_padded(self, ind=0): in_width = self.get_instream_width(ind) @@ -390,7 +383,5 @@ def get_verilog_top_module_intf_names(self): sname = self.hls_sname() intf_names["s_axis"] = [] for i in range(n_inputs): - intf_names["s_axis"].append( - ("in%d_%s" % (i, sname), self.get_instream_width_padded(i)) - ) + intf_names["s_axis"].append(("in%d_%s" % (i, sname), self.get_instream_width_padded(i))) return intf_names diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py index c80f79a8c9..33c542d79d 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py @@ -202,9 +202,7 @@ def get_exp_cycles(self): cycles_write_block = (ofm_dim_w * k_w * k_h * (ifm_ch / simd)) / mmv cycles_read_block = stride_w * ifm_dim_w * (ifm_ch / simd) max_cycles = max(cycles_write_block, cycles_read_block) - exp_cycles = ( - ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles - ) + exp_cycles = ifm_dim_w * k_h * dilation_h * (ifm_ch / simd) + ofm_dim_h * max_cycles return int(exp_cycles) @@ -505,6 +503,4 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py index 43e8df17b4..046e8e096d 100644 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator1d.py @@ -245,13 +245,7 @@ def use_parallel_window_output(self): no_dilation = dilation_h == 1 and dilation_w == 1 supported_ram_style = ram_style in ["auto", "distributed"] if self.get_nodeattr("parallel_window") == 1: - if ( - fully_unfolded - and non_dws - and no_stride - and no_dilation - and supported_ram_style - ): + if fully_unfolded and non_dws and no_stride and no_dilation and supported_ram_style: return True else: warnings.warn( @@ -289,10 +283,7 @@ def get_exp_cycles(self): "ConvolutionInputGenerator_1D_dws_stride", ]: exp_cycles = ( - 1 - + ofm_dim_w * k_w * ifm_ch / simd - + (ifm_ch / simd) * (k_w - 1) - - (k_w - 1) + 1 + ofm_dim_w * k_w * ifm_ch / simd + (ifm_ch / simd) * (k_w - 1) - (k_w - 1) ) elif swu_variant == "ConvolutionInputGenerator_1D_dws_naive": cycles_read_block = ifm_dim_w * ifm_ch / simd @@ -337,9 +328,7 @@ def bram_estimation(self): ram_width = 2 else: ram_width = 1 - width_mul = math.ceil( - simd * self.get_input_datatype().bitwidth() / ram_width - ) + width_mul = math.ceil(simd * self.get_input_datatype().bitwidth() / ram_width) depth_mul = math.ceil(ram_depth / 18432) return width_mul * depth_mul else: @@ -358,25 +347,17 @@ def lut_estimation(self): ram_style = self.get_nodeattr("ram_style") swu_variant = self.get_swu_variant() if swu_variant == "ConvolutionInputGenerator_1D_parallel": - ram_luts = math.ceil( - simd * self.get_input_datatype().bitwidth() * (k_w + 1) / 64 - ) + ram_luts = math.ceil(simd * self.get_input_datatype().bitwidth() * (k_w + 1) / 64) elif ram_style == "distributed": if swu_variant == "ConvolutionInputGenerator_1D": - ram_luts = math.ceil( - self.get_input_datatype().bitwidth() * (k_w - 1) * ifm_ch / 64 - ) + ram_luts = math.ceil(self.get_input_datatype().bitwidth() * (k_w - 1) * ifm_ch / 64) elif swu_variant == "ConvolutionInputGenerator_1D_dws_naive": - ram_luts = math.ceil( - self.get_input_datatype().bitwidth() * ifm_dim_w * ifm_ch / 64 - ) + ram_luts = math.ceil(self.get_input_datatype().bitwidth() * ifm_dim_w * ifm_ch / 64) elif swu_variant in [ "ConvolutionInputGenerator_1D_dws", "ConvolutionInputGenerator_1D_dws_stride", ]: - ram_luts = math.ceil( - self.get_input_datatype().bitwidth() * k_w * ifm_ch / 64 - ) + ram_luts = math.ceil(self.get_input_datatype().bitwidth() * k_w * ifm_ch / 64) else: ram_luts = 0 return 300 + ram_luts @@ -741,6 +722,4 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") diff --git a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py index c54c4ac1c9..a55cdcc0be 100755 --- a/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py +++ b/src/finn/custom_op/fpgadataflow/convolutioninputgenerator_rtl.py @@ -239,9 +239,7 @@ def get_buffer_depth(self): channel_factor = int(ifm_ch / simd) # compute minimal buffer length (assuming it holds 1 complete window) - buffer_min_size = ( - (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 - ) * channel_factor + buffer_min_size = ((k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1) * channel_factor impl_style = self.select_impl_style() if impl_style == "default": @@ -251,13 +249,11 @@ def get_buffer_depth(self): buffer_min_size + max( 0, - ((stride_w - 1) - (int(mmv_out * k_h * k_w / mmv_in))) - * channel_factor, + ((stride_w - 1) - (int(mmv_out * k_h * k_w / mmv_in))) * channel_factor, ) + max( 0, - ((stride_h - 1) * w - (int(mmv_out * k_h * k_w / mmv_in))) - * channel_factor, + ((stride_h - 1) * w - (int(mmv_out * k_h * k_w / mmv_in))) * channel_factor, ) ) elif impl_style == "parallel": @@ -377,9 +373,7 @@ def bram_estimation(self): remainder_cascade_width = math.ceil(buffer_width / remainder_width) cascade_savings = ram_cascade_width - remainder_cascade_width - return int( - (ram_cascade_depth * ram_cascade_width - cascade_savings) * buffer_count - ) + return int((ram_cascade_depth * ram_cascade_width - cascade_savings) * buffer_count) else: return 0 @@ -430,9 +424,7 @@ def execute_node(self, context, graph): folded_ishape = self.get_folded_input_shape() if mode == "cppsim": - raise Exception( - "cppsim not possible for RTL SWG, please set exec_mode to rtlsim" - ) + raise Exception("cppsim not possible for RTL SWG, please set exec_mode to rtlsim") elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: @@ -463,9 +455,7 @@ def execute_node(self, context, graph): sim = self.get_rtlsim() nbits = self.get_instream_width() - rtlsim_inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + rtlsim_inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) rtlsim_output = self.rtlsim(sim, rtlsim_inp) @@ -474,9 +464,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) @@ -524,9 +512,7 @@ def prepare_codegen_default(self): channel_factor = int(ifm_ch / simd) # compute minimal buffer length (assuming it holds 1 complete window) - buffer_min_size = ( - (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 - ) * channel_factor + buffer_min_size = ((k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1) * channel_factor buffer_actual_size = self.get_buffer_depth() code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] @@ -680,9 +666,7 @@ def prepare_codegen_parallel(self): the loop controller configuration and partitioning the fixed buffer into shift-registers (for parallel read access) and line buffers (for efficient LUTRAM/BRAM/URAM implementation).""" - template_path = ( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" - ) + template_path = os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_parallel.sv" code_gen_dict = {} ifm_ch = self.get_nodeattr("IFMChannels") @@ -707,9 +691,7 @@ def prepare_codegen_parallel(self): channel_factor = int(ifm_ch / simd) # compute minimal buffer length (assuming it holds 1 complete window) - buffer_min_size = ( - (k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1 - ) * channel_factor + buffer_min_size = ((k_h - 1) * dilation_h * w + (k_w - 1) * dilation_w + 1) * channel_factor buffer_actual_size = self.get_buffer_depth() code_gen_dict["$BUF_ELEM_TOTAL$"] = [str(buffer_actual_size)] @@ -902,9 +884,7 @@ def prepare_codegen_parallel(self): OUT_ELEM_WIDTH*{mmv_idx}+:OUT_ELEM_WIDTH];""".format( out_idx=out_idx, fifo_id=fifo_id, - access_idx=len(reg_fifo) - - 1 - - int((max(reg_fifo) - access_idx) / M), + access_idx=len(reg_fifo) - 1 - int((max(reg_fifo) - access_idx) / M), mmv_idx=(max(reg_fifo) - access_idx) % M, mmv=M, ) @@ -970,22 +950,16 @@ def select_impl_style(self): if self.get_nodeattr("parallel_window"): # mmv_in = M * 1 mmv_out = M * k_h * k_w - assert ( - ifm_ch == simd - ), "Constraint violated: SIMD must be equal to IFMChannels" + assert ifm_ch == simd, "Constraint violated: SIMD must be equal to IFMChannels" else: # mmv_in = 1 mmv_out = 1 - assert ( - ifm_ch % simd == 0 - ), "Constraint violated: SIMD must divide IFMChannels" + assert ifm_ch % simd == 0, "Constraint violated: SIMD must divide IFMChannels" # choose implementation style if mmv_out > 1 or (k_h == 1 and k_w == 1): impl_style = "parallel" - assert ( - ifm_ch == simd - ), "Constraint violated: SIMD must be equal to IFMChannels" + assert ifm_ch == simd, "Constraint violated: SIMD must be equal to IFMChannels" else: impl_style = "default" @@ -1025,9 +999,7 @@ def generate_hdl(self): template_select = "/finn-rtllib/swg/swg_template_wrapper.v" with open(os.environ["FINN_ROOT"] + template_select, "r") as f: template_wrapper = f.read() - with open( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_axilite.v", "r" - ) as f: + with open(os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_template_axilite.v", "r") as f: template_axilite = f.read() for key in code_gen_dict: # transform list into long string separated by '\n' @@ -1036,16 +1008,12 @@ def generate_hdl(self): template_wrapper = template_wrapper.replace(key, code_gen_line) template_axilite = template_axilite.replace(key, code_gen_line) with open( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv" - ), + os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_impl.sv"), "w", ) as f: f.write(template) with open( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v" - ), + os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_wrapper.v"), "w", ) as f: f.write(template_wrapper) @@ -1053,20 +1021,14 @@ def generate_hdl(self): # AXI-Lite reg. file component is only needed for dynamic mode if self.get_nodeattr("dynamic_mode"): with open( - os.path.join( - code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v" - ), + os.path.join(code_gen_dir, self.get_nodeattr("gen_top_module") + "_axilite.v"), "w", ) as f: f.write(template_axilite) # Copy static source file for common core components - shutil.copy2( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir - ) - shutil.copy2( - os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_pkg.sv", code_gen_dir - ) + shutil.copy2(os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_common.sv", code_gen_dir) + shutil.copy2(os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_pkg.sv", code_gen_dir) # set ipgen_path and ip_path so that HLS-Synth transformation # and stich_ip transformation do not complain diff --git a/src/finn/custom_op/fpgadataflow/downsampler.py b/src/finn/custom_op/fpgadataflow/downsampler.py index d42a076c30..e2cea6da6b 100644 --- a/src/finn/custom_op/fpgadataflow/downsampler.py +++ b/src/finn/custom_op/fpgadataflow/downsampler.py @@ -296,9 +296,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py index 0d5d806dc5..1f2d1b79be 100644 --- a/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py +++ b/src/finn/custom_op/fpgadataflow/duplicatestreams_batch.py @@ -132,9 +132,7 @@ def verify_node(self): self.get_nodeattr("inputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required GlobalAccPool_Batch attributes do not exist.""" - ) + info_messages.append("""The required GlobalAccPool_Batch attributes do not exist.""") return info_messages @@ -161,9 +159,7 @@ def get_outstream_width(self, ind=0): return out_width def get_number_output_values(self): - return self.get_num_output_streams() * np.prod( - self.get_folded_output_shape()[1:-1] - ) + return self.get_num_output_streams() * np.prod(self.get_folded_output_shape()[1:-1]) def get_exp_cycles(self): # Channels/PE * batch size * fmdim * fmdim @@ -235,9 +231,7 @@ def execute_node(self, context, graph): # execute the precompiled model super().exec_precompiled_singlenode_model() # load output npy file - super().npy_to_dynamic_outputs( - context, ["output%d.npy" % i for i in range(n_outputs)] - ) + super().npy_to_dynamic_outputs(context, ["output%d.npy" % i for i in range(n_outputs)]) for i in range(n_outputs): assert ( context[node.output[i]].shape == exp_oshape @@ -411,9 +405,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out%d_%s" % (i, self.hls_sname()) ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() diff --git a/src/finn/custom_op/fpgadataflow/eltwise.py b/src/finn/custom_op/fpgadataflow/eltwise.py index 348e314792..ab1dc00118 100644 --- a/src/finn/custom_op/fpgadataflow/eltwise.py +++ b/src/finn/custom_op/fpgadataflow/eltwise.py @@ -42,7 +42,6 @@ def __init__(self, onnx_node, **kwargs): super().__init__(onnx_node, **kwargs) def get_nodeattr_types(self): - my_attrs = super().get_nodeattr_types() my_attrs.update( { @@ -154,9 +153,7 @@ def verify_node(self): self.get_nodeattr("eltwiseOp") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required StreamingEltwise attributes do not exist.""" - ) + info_messages.append("""The required StreamingEltwise attributes do not exist.""") return info_messages @@ -235,9 +232,7 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert ( - inp.shape == exp_ishape - ), """Input0 shape doesn't match expected shape .""" + assert inp.shape == exp_ishape, """Input0 shape doesn't match expected shape .""" export_idt0 = self.get_input_datatype(0) # reshape input into folded form inp = inp.reshape(folded_ishape) @@ -248,9 +243,7 @@ def execute_node(self, context, graph): # exact same thing for input1 inp = context[node.input[1]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert ( - inp.shape == exp_ishape - ), """Input1 shape doesn't match expected shape .""" + assert inp.shape == exp_ishape, """Input1 shape doesn't match expected shape .""" export_idt1 = self.get_input_datatype(1) # reshape input into folded form inp = inp.reshape(folded_ishape) @@ -481,9 +474,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def get_verilog_top_module_intf_names(self): intf_names = super().get_verilog_top_module_intf_names() diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py index ea9028d925..5bd5e07916 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_batch.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_batch.py @@ -333,9 +333,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py index 9c27503224..d79c214730 100644 --- a/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py +++ b/src/finn/custom_op/fpgadataflow/fmpadding_rtl.py @@ -192,9 +192,7 @@ def execute_node(self, context, graph): folded_ishape = self.get_folded_input_shape() if mode == "cppsim": - raise Exception( - "cppsim not possible for FMPadding_rtl, please set exec_mode to rtlsim" - ) + raise Exception("cppsim not possible for FMPadding_rtl, please set exec_mode to rtlsim") elif mode == "rtlsim": code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") else: @@ -218,9 +216,7 @@ def execute_node(self, context, graph): sim = self.get_rtlsim() nbits = self.get_instream_width() - rtlsim_inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + rtlsim_inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) rtlsim_output = self.rtlsim(sim, rtlsim_inp) @@ -229,9 +225,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape) diff --git a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py index e518507034..5ed440dace 100644 --- a/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/globalaccpool_batch.py @@ -128,9 +128,7 @@ def verify_node(self): self.get_nodeattr("inputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required GlobalAccPool_Batch attributes do not exist.""" - ) + info_messages.append("""The required GlobalAccPool_Batch attributes do not exist.""") # verify that input data is 2D if len(self.get_nodeattr("numInputVectors")) != 3: @@ -351,6 +349,4 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") diff --git a/src/finn/custom_op/fpgadataflow/hlscustomop.py b/src/finn/custom_op/fpgadataflow/hlscustomop.py index d5d0c9ea6e..4fed8ed4b5 100644 --- a/src/finn/custom_op/fpgadataflow/hlscustomop.py +++ b/src/finn/custom_op/fpgadataflow/hlscustomop.py @@ -169,9 +169,7 @@ def get_all_verilog_paths(self): code_gen_dir != "" ), """Node attribute "code_gen_dir_ipgen" is not set. Please run HLSSynthIP first.""" - verilog_path = "{}/project_{}/sol1/impl/verilog/".format( - code_gen_dir, self.onnx_node.name - ) + verilog_path = "{}/project_{}/sol1/impl/verilog/".format(code_gen_dir, self.onnx_node.name) # default impl only returns the HLS verilog codegen dir return [verilog_path] @@ -355,9 +353,10 @@ def ipgen_singlenode_code(self): assert os.path.isdir(ipgen_path), "IPGen failed: %s not found" % (ipgen_path) self.set_nodeattr("ipgen_path", ipgen_path) ip_path = ipgen_path + "/sol1/impl/ip" - assert os.path.isdir( - ip_path - ), "IPGen failed: %s not found. Check log under %s" % (ip_path, code_gen_dir) + assert os.path.isdir(ip_path), "IPGen failed: %s not found. Check log under %s" % ( + ip_path, + code_gen_dir, + ) self.set_nodeattr("ip_path", ip_path) vlnv = "xilinx.com:hls:%s:1.0" % node.name self.set_nodeattr("ip_vlnv", vlnv) @@ -756,22 +755,15 @@ def get_ap_int_max_w(self): instream = self.get_instream_width() outstream = self.get_outstream_width() ret = max([instream, outstream]) - assert ret <= 32768, ( - "AP_INT_MAX_W=%d is larger than allowed maximum of 32768" % ret - ) + assert ret <= 32768, "AP_INT_MAX_W=%d is larger than allowed maximum of 32768" % ret return ret def derive_characteristic_fxns(self, period, override_rtlsim_dict=None): """Return the unconstrained characteristic functions for this node.""" # ensure rtlsim is ready - assert self.get_nodeattr("rtlsim_so") != "", ( - "rtlsim not ready for " + self.onnx_node.name - ) + assert self.get_nodeattr("rtlsim_so") != "", "rtlsim not ready for " + self.onnx_node.name if self.get_nodeattr("io_chrc_period") > 0: - warnings.warn( - "Skipping node %s: already has FIFO characteristic" - % self.onnx_node.name - ) + warnings.warn("Skipping node %s: already has FIFO characteristic" % self.onnx_node.name) return exp_cycles = self.get_exp_cycles() n_inps = np.prod(self.get_folded_input_shape()[:-1]) @@ -802,9 +794,7 @@ def derive_characteristic_fxns(self, period, override_rtlsim_dict=None): # extra dicts to keep track of cycle-by-cycle transaction behavior # note that we restrict key names to filter out weight streams etc txns_in = {key: [] for (key, value) in io_dict["inputs"].items() if "in" in key} - txns_out = { - key: [] for (key, value) in io_dict["outputs"].items() if "out" in key - } + txns_out = {key: [] for (key, value) in io_dict["outputs"].items() if "out" in key} def monitor_txns(sim_obj): for inp in txns_in: diff --git a/src/finn/custom_op/fpgadataflow/iodma.py b/src/finn/custom_op/fpgadataflow/iodma.py index 4b4ad28def..bb3de268a0 100644 --- a/src/finn/custom_op/fpgadataflow/iodma.py +++ b/src/finn/custom_op/fpgadataflow/iodma.py @@ -116,9 +116,7 @@ def get_folded_input_shape(self, ind=0): shape = list(self.get_normal_input_shape()) itype_bits = self.get_input_datatype().bitwidth() intfw = self.get_nodeattr("streamWidth") - assert ( - intfw % itype_bits == 0 - ), "Input stream width must be a multiple of datatype bits" + assert intfw % itype_bits == 0, "Input stream width must be a multiple of datatype bits" elems_per_word = intfw // itype_bits assert shape[-1] % elems_per_word == 0, "Fold depth must be integer" fold_depth = shape[-1] // elems_per_word @@ -133,9 +131,7 @@ def get_folded_output_shape(self, ind=0): shape = list(self.get_normal_output_shape()) itype_bits = self.get_output_datatype().bitwidth() intfw = self.get_nodeattr("streamWidth") - assert ( - intfw % itype_bits == 0 - ), "Input stream width must be a multiple of datatype bits" + assert intfw % itype_bits == 0, "Input stream width must be a multiple of datatype bits" elems_per_word = intfw // itype_bits assert shape[-1] % elems_per_word == 0, "Fold depth must be integer" fold_depth = shape[-1] // elems_per_word @@ -196,9 +192,7 @@ def get_number_output_values(self): stream_width = self.get_nodeattr("streamWidth") nelems = np.prod(oshape) nbits = nelems * itype_bits - assert ( - nbits % stream_width == 0 - ), "DMA: total transfer size must be word multiple" + assert nbits % stream_width == 0, "DMA: total transfer size must be word multiple" ovalues = nbits // stream_width return ovalues @@ -255,8 +249,7 @@ def docompute(self): if strmw == intfw: # case 0: AXI MM width = out width, no DWCs needed self.code_gen_dict["$DOCOMPUTE$"] = [ - dma_inst_template - % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) + dma_inst_template % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) ] elif (strmw % intfw == 0) or (intfw % strmw == 0): # case 1: AXI MM width divisible by out width or vice versa @@ -298,8 +291,7 @@ def docompute(self): if strmw == intfw: # case 0: in width = AXI MM width, no DWCs needed self.code_gen_dict["$DOCOMPUTE$"] = [ - dma_inst_template - % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) + dma_inst_template % ("in0_" + self.hls_sname(), "out_" + self.hls_sname()) ] elif (strmw % intfw == 0) or (intfw % strmw == 0): # case 1: AXI MM width divisible by in width or vice versa @@ -381,16 +373,14 @@ def pragmas(self): if direction == "in": if intfname == "": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE m_axi offset=slave port=in0_" - + self.hls_sname() + "#pragma HLS INTERFACE m_axi offset=slave port=in0_" + self.hls_sname() ) else: self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE m_axi offset=slave port=%s" % (intfname) ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE s_axilite port=in0_%s bundle=control" - % (self.hls_sname()) + "#pragma HLS INTERFACE s_axilite port=in0_%s bundle=control" % (self.hls_sname()) ) self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() @@ -401,16 +391,14 @@ def pragmas(self): ) if intfname == "": self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE m_axi offset=slave port=out_" - + self.hls_sname() + "#pragma HLS INTERFACE m_axi offset=slave port=out_" + self.hls_sname() ) else: self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE m_axi offset=slave port=%s" % (intfname) ) self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE s_axilite port=out_%s bundle=control" - % (self.hls_sname()) + "#pragma HLS INTERFACE s_axilite port=out_%s bundle=control" % (self.hls_sname()) ) else: raise ValueError("Invalid IODMA direction, please set to in or out") diff --git a/src/finn/custom_op/fpgadataflow/labelselect_batch.py b/src/finn/custom_op/fpgadataflow/labelselect_batch.py index 12a88dacd4..60d3eb9154 100644 --- a/src/finn/custom_op/fpgadataflow/labelselect_batch.py +++ b/src/finn/custom_op/fpgadataflow/labelselect_batch.py @@ -141,9 +141,7 @@ def verify_node(self): self.get_nodeattr("outputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required LabelSelect_Batch attributes do not exist.""" - ) + info_messages.append("""The required LabelSelect_Batch attributes do not exist.""") # verify that input data is 1D if len(self.get_nodeattr("numInputVectors")) > 1: @@ -362,9 +360,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def get_exp_cycles(self): nlabels = self.get_nodeattr("Labels") diff --git a/src/finn/custom_op/fpgadataflow/lookup.py b/src/finn/custom_op/fpgadataflow/lookup.py index ecf630ef7f..2dfca90ed9 100644 --- a/src/finn/custom_op/fpgadataflow/lookup.py +++ b/src/finn/custom_op/fpgadataflow/lookup.py @@ -184,9 +184,7 @@ def defines(self, var): my_defines.append("#define T_SRC %s" % elem_hls_type) my_defines.append("#define T_DST ap_uint") elif mem_mode == "const": - my_defines.append( - "#define NumEmbeddings %d" % self.get_nodeattr("NumEmbeddings") - ) + my_defines.append("#define NumEmbeddings %d" % self.get_nodeattr("NumEmbeddings")) my_defines.append("#define EmbeddingDim %d" % emb_dim) my_defines.append("#define InputType %s" % elem_hls_type) my_defines.append("#define EmbeddingType %s" % emb_hls_type) @@ -310,18 +308,12 @@ def pragmas(self): my_pragmas.append("#pragma HLS INTERFACE axis port=out_" + self.hls_sname()) my_pragmas.append("#pragma HLS INTERFACE ap_ctrl_none port=return") if mem_mode == "const": - my_pragmas.append( - "#pragma HLS BIND_STORAGE variable=embeddings type=ROM_2P impl=BRAM" - ) + my_pragmas.append("#pragma HLS BIND_STORAGE variable=embeddings type=ROM_2P impl=BRAM") elif mem_mode == "external": my_pragmas.append("#pragma HLS INTERFACE m_axi offset=slave port=mem") my_pragmas.append("#pragma HLS INTERFACE s_axilite port=mem bundle=control") - my_pragmas.append( - "#pragma HLS INTERFACE s_axilite port=size bundle=control" - ) - my_pragmas.append( - "#pragma HLS INTERFACE s_axilite port=oob_count bundle=control" - ) + my_pragmas.append("#pragma HLS INTERFACE s_axilite port=size bundle=control") + my_pragmas.append("#pragma HLS INTERFACE s_axilite port=oob_count bundle=control") my_pragmas.append("#pragma HLS INTERFACE ap_none port=oob_irq") else: raise Exception("Unrecognized mem_mode: " + mem_mode) @@ -342,9 +334,7 @@ def generate_params(self, model, path): # reverse innertmost dim in embeddings to remain compatible with # how we normally encode the data in FINN embeddings_rev = np.flip(embeddings, -1) - embeddings_hls_code = numpy_to_hls_code( - embeddings_rev, edt, "embeddings", True, False - ) + embeddings_hls_code = numpy_to_hls_code(embeddings_rev, edt, "embeddings", True, False) f_thresh = open(weight_filename, "w") f_thresh.write(embeddings_hls_code) f_thresh.close() @@ -366,9 +356,7 @@ def generate_params(self, model, path): pad_amount = align_factor - emb_dim embeddings_padded = np.pad(embeddings, [(0, 0), (0, pad_amount)]) # reshape for packing the innermost dim - embeddings_padded = embeddings_padded.reshape( - -1, emb_elems_per_ext_mem_width - ) + embeddings_padded = embeddings_padded.reshape(-1, emb_elems_per_ext_mem_width) weight_filename = "%s/%s.dat" % (path, self.onnx_node.name) ret = pack_innermost_dim_as_hex_string( embeddings_padded, edt, ext_mem_width, True, prefix="" diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index fae2d86d88..204a41e21c 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -189,9 +189,7 @@ def verify_node(self): self.get_nodeattr("outputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required MatrixVectorActivation attributes do not exist.""" - ) + info_messages.append("""The required MatrixVectorActivation attributes do not exist.""") # verify the number of inputs depending on noActivation value # check noActivation value to determine the number of inputs @@ -370,9 +368,7 @@ def lut_estimation(self): comp_luts = (2**B - 1) * acc_bits return int( - c0 - + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) - + c2 + c0 + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) + c2 ) def dsp_estimation(self): @@ -720,9 +716,7 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (mh, 1)) - assert ( - ret.shape[0] == mh - ), "Channels of threshold matrix are not as expected (mh)" + assert ret.shape[0] == mh, "Channels of threshold matrix are not as expected (mh)" # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) assert ( @@ -760,9 +754,7 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): if self.get_weight_datatype() == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] if weight_file_mode == "hls_header": - weight_hls_code = numpy_to_hls_code( - weight_tensor, export_wdt, "weights", True, True - ) + weight_hls_code = numpy_to_hls_code(weight_tensor, export_wdt, "weights", True, True) # write weights into C++ header file as dictated by finn-hlslib f_weights = open(weight_file_name, "w") if export_wdt.bitwidth() != 1: @@ -796,14 +788,10 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): pe = self.get_nodeattr("PE") simd = self.get_nodeattr("SIMD") # simd_flipped - weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape( - 1, -1, pe * simd - ) + weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape(1, -1, pe * simd) weight_tensor_simd_flipped = weight_tensor_simd_flipped.copy() # flipped - weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape( - 1, -1, pe * simd - ) + weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape(1, -1, pe * simd) weight_tensor_pe_flipped = weight_tensor_pe_flipped.copy() if weight_file_mode == "decoupled_npy": # save weight stream into npy for cppsim @@ -866,9 +854,7 @@ def generate_params(self, model, path): # also save weights as Verilog .dat file # This file will be ignored when synthesizing UltraScale memory. weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) - self.make_weight_file( - weights, "decoupled_verilog_dat", weight_filename_rtl - ) + self.make_weight_file(weights, "decoupled_verilog_dat", weight_filename_rtl) else: raise Exception( """Please set mem_mode to "const", "decoupled", or "external", @@ -987,9 +973,7 @@ def execute_node(self, context, graph): elif mode == "rtlsim": sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) if mem_mode == "external" or mem_mode == "decoupled": @@ -999,9 +983,7 @@ def execute_node(self, context, graph): # so use it as such for weight generation if self.get_weight_datatype() == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] - wei = npy_to_rtlsim_input( - "{}/weights.npy".format(code_gen_dir), export_wdt, wnbits - ) + wei = npy_to_rtlsim_input("{}/weights.npy".format(code_gen_dir), export_wdt, wnbits) num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) io_dict = { "inputs": {"in0": inp, "weights": wei * num_w_reps}, @@ -1016,9 +998,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) @@ -1078,9 +1058,7 @@ def defines(self, var): ] if mem_mode == "decoupled" or mem_mode == "external": wdt = self.get_weight_datatype() - self.code_gen_dict["$DEFINES$"].append( - "#define WP1 {}\n".format(wdt.bitwidth()) - ) + self.code_gen_dict["$DEFINES$"].append("#define WP1 {}\n".format(wdt.bitwidth())) def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -1283,19 +1261,14 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") if mem_mode == "const": self.code_gen_dict["$PRAGMAS$"].append('#include "params.h"') # the weight tensor is ap_uint [PE][WMEM] # partition for parallel access along the PE dimension (dim 1) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=weights.m_weights " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=weights.m_weights " "complete dim=1") ) elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$PRAGMAS$"].append( @@ -1317,39 +1290,25 @@ def pragmas(self): if self.calc_tmem() != 0: # TODO find a better way of checking for no pregenerated thresholds self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=1") ) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=3" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=3") ) # add resource pragma for thresholds if set if ram_style_thresholds == "distributed": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.m_thresholds " - "core=ROM_2P_LUTRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.m_thresholds " "core=ROM_2P_LUTRAM") ) elif ram_style_thresholds == "block": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.m_thresholds " - "core=ROM_2P_BRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.m_thresholds " "core=ROM_2P_BRAM") ) elif ram_style_thresholds == "auto": # no pragma needed pass else: - raise Exception( - "Unrecognized ram_style_thresholds value:" + ram_style_thresholds - ) + raise Exception("Unrecognized ram_style_thresholds value:" + ram_style_thresholds) def code_generation_ipi(self): cmd = [] @@ -1373,8 +1332,7 @@ def code_generation_ipi(self): cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) cmd.append( "create_bd_intf_pin -mode Master " - "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" - % (node_name, dout_name) + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, dout_name) ) cmd.append( "create_bd_intf_pin -mode Slave " @@ -1389,8 +1347,7 @@ def code_generation_ipi(self): strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( - "create_bd_cell -type ip -vlnv %s /%s/%s" - % (strm_vlnv, node_name, strm_inst) + "create_bd_cell -type ip -vlnv %s /%s/%s" % (strm_vlnv, node_name, strm_inst) ) cmd.append( "set_property -dict [list " @@ -1444,8 +1401,7 @@ def code_generation_ipi(self): axilite_name = self.get_verilog_top_module_intf_names()["axilite"][0] cmd.append( "create_bd_intf_pin -mode Slave " - "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" - % (node_name, axilite_name) + "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" % (node_name, axilite_name) ) cmd.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " @@ -1467,9 +1423,7 @@ def get_verilog_top_module_intf_names(self): mem_mode = self.get_nodeattr("mem_mode") sname = self.hls_sname() if mem_mode == "external": - intf_names["s_axis"].append( - ("weights_" + sname, self.get_weightstream_width_padded()) - ) + intf_names["s_axis"].append(("weights_" + sname, self.get_weightstream_width_padded())) if mem_mode == "decoupled": # only expose axilite interface if attribute is set runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 1 @@ -1513,7 +1467,5 @@ def derive_characteristic_fxns(self, period): if mem_mode in ["decoupled", "external"]: n_weight_inps = self.calc_wmem() num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) - io_dict["inputs"]["weights"] = [ - 0 for i in range(num_w_reps * n_weight_inps) - ] + io_dict["inputs"]["weights"] = [0 for i in range(num_w_reps * n_weight_inps)] super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/pool_batch.py b/src/finn/custom_op/fpgadataflow/pool_batch.py index 8ccfce7820..8c7bc83141 100644 --- a/src/finn/custom_op/fpgadataflow/pool_batch.py +++ b/src/finn/custom_op/fpgadataflow/pool_batch.py @@ -191,13 +191,9 @@ def verify_node(self): # check supported function fnx = self.get_nodeattr("Function") if fnx in ["MaxPool", "QuantAvgPool"]: - info_messages.append( - "Attribute Function contains a supported pool function" - ) + info_messages.append("Attribute Function contains a supported pool function") else: - info_messages.append( - "Attribute Function contains an unsupported pool function" - ) + info_messages.append("Attribute Function contains an unsupported pool function") return info_messages def global_includes(self): @@ -283,9 +279,7 @@ def docompute(self): else: act_hls_dt = "ap_uint<{}>".format(accum_bits) self.code_gen_dict["$DOCOMPUTE$"] += [ - "QuantAvgPoolFunction<{},{},{}> pool_fxn;".format( - act_hls_dt, o_hls_dt, size - ) + "QuantAvgPoolFunction<{},{},{}> pool_fxn;".format(act_hls_dt, o_hls_dt, size) ] else: raise Exception("Pool_Batch doesn't currently support " + fxn) @@ -352,9 +346,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py index dc905658b1..baf4aed502 100644 --- a/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingdatawidthconverter_batch.py @@ -213,14 +213,10 @@ def defines(self, var): ] if self.needs_lcm(): lcmWidth = self.get_iowidth_lcm() - assert ( - numInWords % (lcmWidth / inWidth) == 0 - ), "Error in DWC LCM calculation" + assert numInWords % (lcmWidth / inWidth) == 0, "Error in DWC LCM calculation" numLCMToOut = numInWords // (lcmWidth / inWidth) self.code_gen_dict["$DEFINES$"].append("#define LCMWidth %d" % lcmWidth) - self.code_gen_dict["$DEFINES$"].append( - "#define NumLCMToOut %d" % (numLCMToOut) - ) + self.code_gen_dict["$DEFINES$"].append("#define NumLCMToOut %d" % (numLCMToOut)) def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -339,13 +335,9 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") if self.needs_lcm(): - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS DATAFLOW disable_start_propagation" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS DATAFLOW disable_start_propagation") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") @@ -371,9 +363,7 @@ def execute_node(self, context, graph): inp = context[node.input[0]] assert str(inp.dtype) == "float32", "Input datatype is not float32" - assert inp.shape == tuple( - exp_shape - ), "Input shape does not match expected shape." + assert inp.shape == tuple(exp_shape), "Input shape does not match expected shape." if self.get_input_datatype() == DataType["BIPOLAR"]: # store bipolar activations as binary @@ -447,8 +437,7 @@ def code_generation_ipi(self): cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) cmd.append( "create_bd_intf_pin -mode Master " - "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" - % (node_name, dout_name) + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, dout_name) ) cmd.append( "create_bd_intf_pin -mode Slave " @@ -493,8 +482,7 @@ def code_generation_ipi(self): return cmd else: raise Exception( - "DWC implementation style %s not supported, please use hls or vivado" - % impl_style + "DWC implementation style %s not supported, please use hls or vivado" % impl_style ) def lut_estimation(self): diff --git a/src/finn/custom_op/fpgadataflow/streamingfifo.py b/src/finn/custom_op/fpgadataflow/streamingfifo.py index 34b1940fa1..1249bc1251 100644 --- a/src/finn/custom_op/fpgadataflow/streamingfifo.py +++ b/src/finn/custom_op/fpgadataflow/streamingfifo.py @@ -137,9 +137,7 @@ def get_verilog_top_module_name(self): def code_generation_ipgen(self, model, fpgapart, clk): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - verilog_dir = "{}/project_{}/sol1/impl/verilog".format( - code_gen_dir, self.onnx_node.name - ) + verilog_dir = "{}/project_{}/sol1/impl/verilog".format(code_gen_dir, self.onnx_node.name) os.makedirs(verilog_dir) # copy Q_srl.v from finn-rtllib to verilog directory memstream_dir = get_finn_root() + "/finn-rtllib/memstream/hdl/" @@ -175,9 +173,7 @@ def code_generation_ipgen(self, model, fpgapart, clk): def ipgen_singlenode_code(self): code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") - verilog_dir = "{}/project_{}/sol1/impl/verilog".format( - code_gen_dir, self.onnx_node.name - ) + verilog_dir = "{}/project_{}/sol1/impl/verilog".format(code_gen_dir, self.onnx_node.name) # prepare the IP packaging tcl template template = templates.ip_package_tcl self.code_gen_dict.clear() @@ -215,9 +211,7 @@ def get_normal_input_shape(self, ind=0): depth = self.get_adjusted_depth() assert depth >= 2, """Depth is too low""" if depth > 256 and self.get_nodeattr("impl_style") == "rtl": - warnings.warn( - "Depth is high, set between 2 and 256 for efficient SRL implementation" - ) + warnings.warn("Depth is high, set between 2 and 256 for efficient SRL implementation") # derive normal shape from folded shape # StreamingFIFOs are inserted in between fpgadataflow nodes # the folded shape could be for example (1, nf, pe) @@ -297,9 +291,7 @@ def execute_node(self, context, graph): np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input) sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) output = self.rtlsim(sim, inp) @@ -308,9 +300,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) oshape = self.get_normal_output_shape() @@ -375,8 +365,7 @@ def code_generation_ipi(self): cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) cmd.append( "create_bd_intf_pin -mode Master " - "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" - % (node_name, dout_name) + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, dout_name) ) cmd.append( "create_bd_intf_pin -mode Slave " @@ -397,8 +386,7 @@ def code_generation_ipi(self): ) cmd.append( "set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] " - "[get_bd_cells /%s/fifo]" - % (np.ceil(self.get_outstream_width() / 8), node_name) + "[get_bd_cells /%s/fifo]" % (np.ceil(self.get_outstream_width() / 8), node_name) ) cmd.append( "connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] " @@ -410,8 +398,7 @@ def code_generation_ipi(self): ) cmd.append( "connect_bd_net [get_bd_pins %s/%s] " - "[get_bd_pins %s/fifo/s_axis_aresetn]" - % (node_name, rst_name, node_name) + "[get_bd_pins %s/fifo/s_axis_aresetn]" % (node_name, rst_name, node_name) ) cmd.append( "connect_bd_net [get_bd_pins %s/%s] " @@ -420,8 +407,7 @@ def code_generation_ipi(self): return cmd else: raise Exception( - "FIFO implementation style %s not supported, please use rtl or vivado" - % impl_style + "FIFO implementation style %s not supported, please use rtl or vivado" % impl_style ) def bram_estimation(self): diff --git a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py index 78f4095cbe..8f294da4ac 100755 --- a/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py +++ b/src/finn/custom_op/fpgadataflow/streamingmaxpool_batch.py @@ -105,12 +105,8 @@ def get_normal_output_shape(self, ind=0): ifm_ch = self.get_nodeattr("NumChannels") ceil_mode = self.get_nodeattr("CeilMode") if not self.is_1d(): - assert ( - ifm_dim_h % k_h == 0 - ), "StreamingMaxPool needs ImgDim_h % PoolDim_h == 0" - assert ( - ifm_dim_w % k_w == 0 - ), "StreamingMaxPool needs ImgDim_w % PoolDim_w == 0" + assert ifm_dim_h % k_h == 0, "StreamingMaxPool needs ImgDim_h % PoolDim_h == 0" + assert ifm_dim_w % k_w == 0, "StreamingMaxPool needs ImgDim_w % PoolDim_w == 0" ofm_dim_h = compute_pool_output_dim(ifm_dim_h, k_h, k_h, 0, ceil_mode) ofm_dim_w = compute_pool_output_dim(ifm_dim_w, k_w, k_w, 0, ceil_mode) oshape = (1, ofm_dim_h, ofm_dim_w, ifm_ch) @@ -359,9 +355,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index fc5aa61d66..3bcc5c05cf 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -142,9 +142,7 @@ def verify_node(self): self.get_nodeattr("outputDataType") info_messages.append("All necessary attributes exist") except Exception: - info_messages.append( - """The required Threshold_Batch attributes do not exist.""" - ) + info_messages.append("""The required Threshold_Batch attributes do not exist.""") return info_messages @@ -305,23 +303,17 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): ), """Threshold matrix dimension is not as expected (2).""" n_thres_steps = orig_thres_matrix.shape[1] - assert n_thres_steps == self.get_nodeattr( - "numSteps" - ), "Mismatch in threshold steps" + assert n_thres_steps == self.get_nodeattr("numSteps"), "Mismatch in threshold steps" if not self.get_input_datatype().signed(): # ensure all thresholds are nonnegative assert (orig_thres_matrix >= 0).all() # ensure all thresholds are integer - assert np.equal( - np.mod(orig_thres_matrix, 1), 0 - ).all(), "Need int threshold tensor" + assert np.equal(np.mod(orig_thres_matrix, 1), 0).all(), "Need int threshold tensor" ret = orig_thres_matrix # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (mh, 1)) - assert ( - ret.shape[0] == mh - ), "Channels of threshold matrix are not as expected (mh)" + assert ret.shape[0] == mh, "Channels of threshold matrix are not as expected (mh)" # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) assert ( @@ -456,9 +448,7 @@ def generate_params(self, model, path): # also save weights as Verilog .dat file # This file will be ignored when synthesizing UltraScale memory. weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) - self.make_weight_file( - thresholds, "decoupled_verilog_dat", weight_filename_rtl - ) + self.make_weight_file(thresholds, "decoupled_verilog_dat", weight_filename_rtl) else: raise Exception("Unrecognized mem_mode") @@ -519,15 +509,11 @@ def execute_node(self, context, graph): out = 2 * out - 1 context[node.output[0]] = out oshape = self.get_normal_output_shape() - assert ( - context[node.output[0]].shape == oshape - ), """Output shape is not as expected""" + assert context[node.output[0]].shape == oshape, """Output shape is not as expected""" elif mode == "rtlsim": sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) if self.get_nodeattr("mem_mode") == "decoupled": @@ -552,9 +538,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) @@ -594,8 +578,7 @@ def defines(self, var): "#define ActVal1 %d" % self.get_nodeattr("ActVal") ) self.code_gen_dict["$DEFINES$"].append( - "#define ThresType1 %s" - % self.get_weight_datatype().get_hls_datatype_str() + "#define ThresType1 %s" % self.get_weight_datatype().get_hls_datatype_str() ) self.code_gen_dict["$DEFINES$"].append( "#define NumSteps1 %d" % self.get_nodeattr("numSteps") @@ -768,25 +751,17 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") if self.get_nodeattr("mem_mode") == "const": # the threshold tensor is acc_type [PE][TMEM][N_THRES] # partition for parallel access along PE and N_THRES # dimensions (dims 1 and 3) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=1") ) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=3" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=3") ) # set resource type ram_style = self.get_nodeattr("ram_style") @@ -797,17 +772,11 @@ def pragmas(self): if pe < ich: if ram_style == "distributed": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.m_thresholds " - "core=ROM_2P_LUTRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.m_thresholds " "core=ROM_2P_LUTRAM") ) elif ram_style == "block": self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS RESOURCE variable=threshs.m_thresholds " - "core=ROM_2P_BRAM" - ) + ("#pragma HLS RESOURCE variable=threshs.m_thresholds " "core=ROM_2P_BRAM") ) else: raise Exception( @@ -839,8 +808,7 @@ def code_generation_ipi(self): cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) cmd.append( "create_bd_intf_pin -mode Master " - "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" - % (node_name, dout_name) + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, dout_name) ) cmd.append( "create_bd_intf_pin -mode Slave " @@ -855,8 +823,7 @@ def code_generation_ipi(self): strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( - "create_bd_cell -type ip -vlnv %s /%s/%s" - % (strm_vlnv, node_name, strm_inst) + "create_bd_cell -type ip -vlnv %s /%s/%s" % (strm_vlnv, node_name, strm_inst) ) cmd.append( "set_property -dict [list " @@ -910,8 +877,7 @@ def code_generation_ipi(self): axilite_name = self.get_verilog_top_module_intf_names()["axilite"][0] cmd.append( "create_bd_intf_pin -mode Slave " - "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" - % (node_name, axilite_name) + "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" % (node_name, axilite_name) ) cmd.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " @@ -966,7 +932,5 @@ def derive_characteristic_fxns(self, period): if mem_mode in ["decoupled", "external"]: n_weight_inps = self.calc_tmem() num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) - io_dict["inputs"]["weights"] = [ - 0 for i in range(num_w_reps * n_weight_inps) - ] + io_dict["inputs"]["weights"] = [0 for i in range(num_w_reps * n_weight_inps)] super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/custom_op/fpgadataflow/tlastmarker.py b/src/finn/custom_op/fpgadataflow/tlastmarker.py index 6eaf03ab16..9309841b2e 100644 --- a/src/finn/custom_op/fpgadataflow/tlastmarker.py +++ b/src/finn/custom_op/fpgadataflow/tlastmarker.py @@ -130,11 +130,9 @@ def docompute(self): self.code_gen_dict["$DOCOMPUTE$"] = [ "for(unsigned int i=0; i in0_%s ("in0_%s");' - % (self.hls_sname(), self.hls_sname()) + 'hls::stream in0_%s ("in0_%s");' % (self.hls_sname(), self.hls_sname()) ) self.code_gen_dict["$STREAMDECLARATIONS$"].append( - 'hls::stream out_%s ("out_%s");' - % (self.hls_sname(), self.hls_sname()) + 'hls::stream out_%s ("out_%s");' % (self.hls_sname(), self.hls_sname()) ) def get_verilog_top_module_intf_names(self): diff --git a/src/finn/custom_op/fpgadataflow/upsampler.py b/src/finn/custom_op/fpgadataflow/upsampler.py index ab5a734e7c..9c0db1f3df 100644 --- a/src/finn/custom_op/fpgadataflow/upsampler.py +++ b/src/finn/custom_op/fpgadataflow/upsampler.py @@ -107,9 +107,7 @@ def make_shape_compatible_op(self, model): exp_ishape = self.get_normal_input_shape() oshape = self.get_normal_output_shape() ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0])) - assert ( - ishape == exp_ishape - ), "Unexpect input shape for UpsampleNearestNeighbour_Batch." + assert ishape == exp_ishape, "Unexpect input shape for UpsampleNearestNeighbour_Batch." return super().make_const_shape_op(oshape) def infer_node_datatype(self, model): @@ -280,9 +278,7 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") def execute_node(self, context, graph): mode = self.get_nodeattr("exec_mode") diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 64fb5dcbe1..f817751852 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -284,9 +284,7 @@ def get_folded_input_shape(self, ind=0): simd = self.get_nodeattr("SIMD") pe = self.get_nodeattr("PE") kernel_2 = k_h * k_w - assert ( - kernel_2 % simd == 0 - ), "Requirement kernel (k_h * k_w) divisable by SIMD is violated." + assert kernel_2 % simd == 0, "Requirement kernel (k_h * k_w) divisable by SIMD is violated." sf = kernel_2 // simd assert ch % pe == 0, "Requirement Channels divisable by PE is violated." nf = ch // pe @@ -436,9 +434,7 @@ def get_hls_compatible_threshold_tensor(self, orig_thres_matrix): # ensure channels = mh , duplicating if necessary if ret.shape[0] == 1: ret = np.tile(ret, (ch, 1)) - assert ( - ret.shape[0] == ch - ), "Channels of threshold matrix are not as expected (ch)" + assert ret.shape[0] == ch, "Channels of threshold matrix are not as expected (ch)" # distribute rows between PEs ret = interleave_matrix_outer_dim_from_partitions(ret, pe) assert ( @@ -476,9 +472,7 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): if self.get_weight_datatype() == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] if weight_file_mode == "hls_header": - weight_hls_code = numpy_to_hls_code( - weight_tensor, export_wdt, "weights", True, True - ) + weight_hls_code = numpy_to_hls_code(weight_tensor, export_wdt, "weights", True, True) # write weights into C++ header file as dictated by finn-hlslib f_weights = open(weight_file_name, "w") if export_wdt.bitwidth() != 1: @@ -512,14 +506,10 @@ def make_weight_file(self, weights, weight_file_mode, weight_file_name): pe = self.get_nodeattr("PE") simd = self.get_nodeattr("SIMD") # simd_flipped - weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape( - 1, -1, pe * simd - ) + weight_tensor_simd_flipped = weight_tensor_simd_flipped.reshape(1, -1, pe * simd) weight_tensor_simd_flipped = weight_tensor_simd_flipped.copy() # flipped - weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape( - 1, -1, pe * simd - ) + weight_tensor_pe_flipped = weight_tensor_pe_flipped.reshape(1, -1, pe * simd) weight_tensor_pe_flipped = weight_tensor_pe_flipped.copy() if weight_file_mode == "decoupled_npy": # save weight stream into npy for cppsim @@ -582,9 +572,7 @@ def generate_params(self, model, path): # also save weights as Verilog .dat file # This file will be ignored when synthesizing UltraScale memory. weight_filename_rtl = "{}/memblock.dat".format(code_gen_dir) - self.make_weight_file( - weights, "decoupled_verilog_dat", weight_filename_rtl - ) + self.make_weight_file(weights, "decoupled_verilog_dat", weight_filename_rtl) else: raise Exception( """Please set mem_mode to "const", "decoupled", or "external", @@ -703,9 +691,7 @@ def execute_node(self, context, graph): elif mode == "rtlsim": sim = self.get_rtlsim() nbits = self.get_instream_width() - inp = npy_to_rtlsim_input( - "{}/input_0.npy".format(code_gen_dir), export_idt, nbits - ) + inp = npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir), export_idt, nbits) super().reset_rtlsim(sim) super().toggle_clk(sim) @@ -716,9 +702,7 @@ def execute_node(self, context, graph): # so use it as such for weight generation if self.get_weight_datatype() == DataType["BIPOLAR"]: export_wdt = DataType["BINARY"] - wei = npy_to_rtlsim_input( - "{}/weights.npy".format(code_gen_dir), export_wdt, wnbits - ) + wei = npy_to_rtlsim_input("{}/weights.npy".format(code_gen_dir), export_wdt, wnbits) dim_h, dim_w = self.get_nodeattr("Dim") num_w_reps = dim_h * dim_w @@ -735,9 +719,7 @@ def execute_node(self, context, graph): packed_bits = self.get_outstream_width() out_npy_path = "{}/output.npy".format(code_gen_dir) out_shape = self.get_folded_output_shape() - rtlsim_output_to_npy( - output, out_npy_path, odt, out_shape, packed_bits, target_bits - ) + rtlsim_output_to_npy(output, out_npy_path, odt, out_shape, packed_bits, target_bits) # load and reshape output output = np.load(out_npy_path) @@ -783,9 +765,7 @@ def defines(self, var): ] if mem_mode == "decoupled" or mem_mode == "external": wdt = self.get_weight_datatype() - self.code_gen_dict["$DEFINES$"].append( - "#define WP1 {}\n".format(wdt.bitwidth()) - ) + self.code_gen_dict["$DEFINES$"].append("#define WP1 {}\n".format(wdt.bitwidth())) def read_npy_data(self): code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim") @@ -986,19 +966,14 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=out_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS INTERFACE ap_ctrl_none port=return" - ) + self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return") if mem_mode == "const": self.code_gen_dict["$PRAGMAS$"].append('#include "params.h"') # the weight tensor is ap_uint [PE][WMEM] # partition for parallel access along the PE dimension (dim 1) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=weights.m_weights " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=weights.m_weights " "complete dim=1") ) elif mem_mode == "decoupled" or mem_mode == "external": self.code_gen_dict["$PRAGMAS$"].append( @@ -1016,16 +991,10 @@ def pragmas(self): if self.calc_tmem() != 0: # TODO find a better way of checking for no pregenerated thresholds self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=1" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=1") ) self.code_gen_dict["$PRAGMAS$"].append( - ( - "#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " - "complete dim=3" - ) + ("#pragma HLS ARRAY_PARTITION variable=threshs.m_thresholds " "complete dim=3") ) def get_verilog_top_module_intf_names(self): @@ -1033,9 +1002,7 @@ def get_verilog_top_module_intf_names(self): mem_mode = self.get_nodeattr("mem_mode") sname = self.hls_sname() if mem_mode == "external": - intf_names["s_axis"].append( - ("weights_" + sname, self.get_weightstream_width_padded()) - ) + intf_names["s_axis"].append(("weights_" + sname, self.get_weightstream_width_padded())) if mem_mode == "decoupled": # only expose axilite interface if attribute is set runtime_writable = self.get_nodeattr("runtime_writeable_weights") == 1 @@ -1065,8 +1032,7 @@ def code_generation_ipi(self): cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name)) cmd.append( "create_bd_intf_pin -mode Master " - "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" - % (node_name, dout_name) + "-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, dout_name) ) cmd.append( "create_bd_intf_pin -mode Slave " @@ -1081,8 +1047,7 @@ def code_generation_ipi(self): strm_vlnv = "amd.com:finn:memstream:1.0" strm_inst = node_name + "_wstrm" cmd.append( - "create_bd_cell -type ip -vlnv %s /%s/%s" - % (strm_vlnv, node_name, strm_inst) + "create_bd_cell -type ip -vlnv %s /%s/%s" % (strm_vlnv, node_name, strm_inst) ) cmd.append( "set_property -dict [list " @@ -1136,8 +1101,7 @@ def code_generation_ipi(self): axilite_name = self.get_verilog_top_module_intf_names()["axilite"][0] cmd.append( "create_bd_intf_pin -mode Slave " - "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" - % (node_name, axilite_name) + "-vlnv xilinx.com:interface:aximm_rtl:1.0 /%s/%s" % (node_name, axilite_name) ) cmd.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " @@ -1281,9 +1245,7 @@ def lut_estimation(self): comp_luts = (2**B - 1) * acc_bits return int( - c0 - + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) - + c2 + c0 + c1 * (P * (mult_luts + addertree_luts + acc_luts + thr_luts + comp_luts)) + c2 ) def dsp_estimation(self): @@ -1356,7 +1318,5 @@ def derive_characteristic_fxns(self, period): if mem_mode in ["decoupled", "external"]: n_weight_inps = self.calc_wmem() num_w_reps = np.prod(self.get_nodeattr("numInputVectors")) - io_dict["inputs"]["weights"] = [ - 0 for i in range(num_w_reps * n_weight_inps) - ] + io_dict["inputs"]["weights"] = [0 for i in range(num_w_reps * n_weight_inps)] super().derive_characteristic_fxns(period, override_rtlsim_dict=io_dict) diff --git a/src/finn/qnn-data/cybsec-mlp/validate-unsw-nb15.py b/src/finn/qnn-data/cybsec-mlp/validate-unsw-nb15.py index be09abad9c..e0e2a75f19 100644 --- a/src/finn/qnn-data/cybsec-mlp/validate-unsw-nb15.py +++ b/src/finn/qnn-data/cybsec-mlp/validate-unsw-nb15.py @@ -57,9 +57,7 @@ def make_unsw_nb15_test_batches(bsize, dataset_root, limit_batches): help='name of bitfile (i.e. "resizer.bit")', default="../bitfile/finn-accel.bit", ) - parser.add_argument( - "--dataset_root", help="dataset root dir for download/reuse", default="." - ) + parser.add_argument("--dataset_root", help="dataset root dir for download/reuse", default=".") parser.add_argument( "--limit_batches", help="number of batches, -1 for max", type=int, default=-1 ) @@ -72,9 +70,7 @@ def make_unsw_nb15_test_batches(bsize, dataset_root, limit_batches): limit_batches = args.limit_batches print("Loading dataset...") - (test_imgs, test_labels) = make_unsw_nb15_test_batches( - bsize, dataset_root, limit_batches - ) + (test_imgs, test_labels) = make_unsw_nb15_test_batches(bsize, dataset_root, limit_batches) ok = 0 nok = 0 diff --git a/src/finn/qnn-data/templates/driver/driver_base.py b/src/finn/qnn-data/templates/driver/driver_base.py index 5f6f00da13..f701122885 100644 --- a/src/finn/qnn-data/templates/driver/driver_base.py +++ b/src/finn/qnn-data/templates/driver/driver_base.py @@ -122,7 +122,7 @@ def load_external_weights(self): w_filenames = [] if not os.path.isdir(self.runtime_weight_dir): return - for (dirpath, dirnames, filenames) in os.walk(self.runtime_weight_dir): + for dirpath, dirnames, filenames in os.walk(self.runtime_weight_dir): w_filenames.extend(filenames) tmp_weight_dict = {} @@ -173,7 +173,7 @@ def load_runtime_weights(self, flush_accel=True, verify=True): w_filenames = [] if not os.path.isdir(self.runtime_weight_dir): return - for (dirpath, dirnames, filenames) in os.walk(self.runtime_weight_dir): + for dirpath, dirnames, filenames in os.walk(self.runtime_weight_dir): w_filenames.extend(filenames) rt_weight_dict = {} for w_filename in w_filenames: @@ -182,18 +182,14 @@ def load_runtime_weights(self, flush_accel=True, verify=True): dat = f.read() else: continue - layer_w = np.fromiter( - [int(x, 16) for x in dat.strip().split()], dtype=np.uint32 - ) + layer_w = np.fromiter([int(x, 16) for x in dat.strip().split()], dtype=np.uint32) sdp_ind = int(w_filename.split("_")[0]) layer_ind = int(w_filename.split("_")[1]) rt_weight_dict[(sdp_ind, layer_ind)] = layer_w for sdp_ind, layer_ind in rt_weight_dict.keys(): cand_if_name = "StreamingDataflowPartition_%d" % sdp_ind if cand_if_name in self.ip_dict.keys(): - layer_mmio = getattr( - self, "StreamingDataflowPartition_%d" % sdp_ind - ).mmio + layer_mmio = getattr(self, "StreamingDataflowPartition_%d" % sdp_ind).mmio layer_w = rt_weight_dict[(sdp_ind, layer_ind)] layer_mmio.write_mm(0, layer_w.tobytes()) if verify: @@ -342,9 +338,7 @@ def execute_on_buffers(self, asynch=False, batch_size=None): assert batch_size <= self.batch_size, "Specified batch_size is too large." if self.platform == "zynq-iodma": for o in range(self.num_outputs): - assert ( - self.odma[o].read(0x00) & 0x4 != 0 - ), "Output DMA %d is not idle" % (o) + assert self.odma[o].read(0x00) & 0x4 != 0, "Output DMA %d is not idle" % (o) # manually launch IODMAs since signatures are missing for iwdma, iwbuf, iwdma_name in self.external_weights: iwdma.write(0x10, iwbuf.device_address) @@ -360,17 +354,13 @@ def execute_on_buffers(self, asynch=False, batch_size=None): self.idma[i].write(0x00, 1) elif self.platform == "alveo": for o in range(self.num_outputs): - assert self.odma_handle[o] is None, ( - "Output DMA %d is already running" % o - ) + assert self.odma_handle[o] is None, "Output DMA %d is already running" % o for i in range(self.num_inputs): self.idma[i].start(self.ibuf_packed_device[i], batch_size) for iwdma, iwbuf, iwdma_name in self.external_weights: iwdma.start(iwbuf, batch_size) for o in range(self.num_outputs): - self.odma_handle[o] = self.odma[o].start( - self.obuf_packed_device[o], batch_size - ) + self.odma_handle[o] = self.odma[o].start(self.obuf_packed_device[o], batch_size) else: raise Exception("Unrecognized platform: %s" % self.platform) # blocking behavior depends on asynch parameter @@ -386,9 +376,7 @@ def wait_until_finished(self): while status & 0x2 == 0: status = self.odma[o].read(0x00) elif self.platform == "alveo": - assert all( - [x is not None for x in self.odma_handle] - ), "No odma_handle to wait on" + assert all([x is not None for x in self.odma_handle]), "No odma_handle to wait on" for o in range(self.num_outputs): self.odma_handle[o].wait() self.odma_handle[o] = None @@ -402,9 +390,7 @@ def execute(self, input_npy): # if single input, convert to list to normalize how we process the input if not type(input_npy) is list: input_npy = [input_npy] - assert self.num_inputs == len( - input_npy - ), "Not all accelerator inputs are specified." + assert self.num_inputs == len(input_npy), "Not all accelerator inputs are specified." for i in range(self.num_inputs): ibuf_folded = self.fold_input(input_npy[i], ind=i) ibuf_packed = self.pack_input(ibuf_folded, ind=i) diff --git a/src/finn/qnn-data/templates/driver/validate.py b/src/finn/qnn-data/templates/driver/validate.py index 1b29d4342c..c8bc1c009d 100644 --- a/src/finn/qnn-data/templates/driver/validate.py +++ b/src/finn/qnn-data/templates/driver/validate.py @@ -38,9 +38,7 @@ parser.add_argument( "--batchsize", help="number of samples for inference", type=int, default=100 ) - parser.add_argument( - "--dataset", help="dataset to use (mnist of cifar10)", required=True - ) + parser.add_argument("--dataset", help="dataset to use (mnist of cifar10)", required=True) parser.add_argument( "--platform", help="Target platform: zynq-iodma alveo", default="zynq-iodma" ) diff --git a/src/finn/transformation/fpgadataflow/annotate_resources.py b/src/finn/transformation/fpgadataflow/annotate_resources.py index 0cc4234c8c..bb5637f7d3 100644 --- a/src/finn/transformation/fpgadataflow/annotate_resources.py +++ b/src/finn/transformation/fpgadataflow/annotate_resources.py @@ -76,9 +76,7 @@ def apply(self, model): # recurse into model to manually annotate per-layer resources sdp_model_filename = getCustomOp(node).get_nodeattr("model") sdp_model = ModelWrapper(sdp_model_filename) - sdp_model = sdp_model.transform( - AnnotateResources(self.mode, self.res_dict) - ) + sdp_model = sdp_model.transform(AnnotateResources(self.mode, self.res_dict)) sdp_dict = sdp_model.get_metadata_prop("res_total_" + self.mode) sdp_dict = eval(sdp_dict) # save transformed model diff --git a/src/finn/transformation/fpgadataflow/cleanup.py b/src/finn/transformation/fpgadataflow/cleanup.py index 1d0efaf4bb..398580c48e 100644 --- a/src/finn/transformation/fpgadataflow/cleanup.py +++ b/src/finn/transformation/fpgadataflow/cleanup.py @@ -79,7 +79,5 @@ def apply(self, model): except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (model, False) diff --git a/src/finn/transformation/fpgadataflow/compile_cppsim.py b/src/finn/transformation/fpgadataflow/compile_cppsim.py index da337caa62..e93a8ec307 100644 --- a/src/finn/transformation/fpgadataflow/compile_cppsim.py +++ b/src/finn/transformation/fpgadataflow/compile_cppsim.py @@ -70,7 +70,5 @@ def applyNodeLocal(self, node): in node attribute "executable_path".""" except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (node, False) diff --git a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py index fcfe9e7727..ef02453498 100644 --- a/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py +++ b/src/finn/transformation/fpgadataflow/convert_to_hls_layers.py @@ -61,9 +61,7 @@ def apply(self, model): i2c_out_shape = model.get_tensor_shape(i2c_output) dt = model.get_tensor_datatype(i2c_input) if not dt.is_integer(): - warnings.warn( - "%s : Input is not int. Can't infer ConvInpGen." % n.name - ) + warnings.warn("%s : Input is not int. Can't infer ConvInpGen." % n.name) continue i2c_inst = getCustomOp(n) stride_h, stride_w = i2c_inst.get_nodeattr("stride") @@ -92,8 +90,7 @@ def apply(self, model): # assert dt.allowed(pad_val),"""FMPadding_Batch DataType # must support pad_val""" assert pad_val == 0, ( - "%s : FMPadding_Batch doesn't currently support pad_val!= 0" - % n.name + "%s : FMPadding_Batch doesn't currently support pad_val!= 0" % n.name ) odim_padding_h = ifm_dim_h + pad_h @@ -113,9 +110,7 @@ def apply(self, model): ConvInpGen_idim_h = odim_padding_h ConvInpGen_idim_w = odim_padding_w - padding_optype = ( - "FMPadding_rtl" if self.use_rtl_variant else "FMPadding_Batch" - ) + padding_optype = "FMPadding_rtl" if self.use_rtl_variant else "FMPadding_Batch" padding_node = helper.make_node( padding_optype, @@ -167,13 +162,9 @@ def apply(self, model): if (stride_h > 1 or stride_w > 1) and is_kernel_pointwise: downsample_1D = (ifm_dim_h == 1) or (ifm_dim_w == 1) is1D_unitx = ifm_dim_w == 1 - downsample_2D = ( - (not downsample_1D) and is_square_image and is_equal_stride - ) + downsample_2D = (not downsample_1D) and is_square_image and is_equal_stride if not (downsample_1D or downsample_2D): - warnings.warn( - f"Couldn't infer Downsample from {n.name},check config." - ) + warnings.warn(f"Couldn't infer Downsample from {n.name},check config.") continue ConvInpGen_idim = max(ConvInpGen_idim_h, ConvInpGen_idim_w) stride = max(stride_h, stride_w) @@ -196,9 +187,7 @@ def apply(self, model): graph.node.insert(ConvInpGen_node_idx, ConvInpGen_node) else: # create equivalent ConvolutionInputGenerator node - if ( - is_square_image and is_square_kernel - ): # square images and square kernels + if is_square_image and is_square_kernel: # square images and square kernels assert is_equal_stride, ( """%s: Non-equal strides along different axes is not supported for (non-)square convolutions""" @@ -290,15 +279,13 @@ def apply(self, model): dt = model.get_tensor_datatype(n.input[0]) if not dt.is_integer(): warnings.warn( - "%s: Input not int. Can't infer UpsampleNearestNeighbour." - % n.name + "%s: Input not int. Can't infer UpsampleNearestNeighbour." % n.name ) continue if model.get_tensor_layout(n.input[0]) != DataLayout.NHWC: warnings.warn( - "%s: Input not NHWC. Can't infer UpsampleNearestNeighbour." - % n.name + "%s: Input not NHWC. Can't infer UpsampleNearestNeighbour." % n.name ) continue @@ -319,8 +306,7 @@ def apply(self, model): is_scale_square_2d = scales[1] == scales[2] is_scale_1d = scales[1] > 1 and scales[2] == 1 assert is_scale_square_2d or is_scale_1d, ( - "%s: Upsampling only supported for 1D H, or 2D square scaling" - % n.name + "%s: Upsampling only supported for 1D H, or 2D square scaling" % n.name ) assert scales[0] == scales[3] == 1, ( n.name + ": Upsampling is only supported for scales with " @@ -334,8 +320,7 @@ def apply(self, model): is_shape_1d = in_shape[1] > 1 and in_shape[2] == 1 assert is_shape_square_2d or is_shape_1d, ( - "%s: Upsampling is only supported for 1D H or 2D square inputs." - % n.name + "%s: Upsampling is only supported for 1D H or 2D square inputs." % n.name ) # Extract information for HLS node @@ -538,9 +523,7 @@ def apply(self, model): elif node.op_type == "QuantAvgPool2d": assert odt.is_integer(), """Output data type for QuantAvgPool2d needs to be integer""" - assert all( - x == 0 for x in pad - ), "Padding is not supported for QuantAvgPool2d" + assert all(x == 0 for x in pad), "Padding is not supported for QuantAvgPool2d" inst = getCustomOp(node) pool_fxn = "QuantAvgPool" pool_size_param = inst.get_shifts() @@ -548,9 +531,7 @@ def apply(self, model): else: raise Exception( - "pad_value and pool_fxn not configured for {}".format( - node.op_type - ) + "pad_value and pool_fxn not configured for {}".format(node.op_type) ) # format input tensor @@ -809,17 +790,13 @@ def apply(self, model): scale = getCustomOp(consumer).get_nodeattr("out_scale") actval = getCustomOp(consumer).get_nodeattr("out_bias") assert int(actval) == actval, ( - consumer.name - + ": out_bias must be integer for HLS conversion." + consumer.name + ": out_bias must be integer for HLS conversion." ) actval = int(actval) odt_is_bipolar = odt == DataType["BIPOLAR"] - bipolar_ok = ( - odt_is_bipolar and (scale == 2.0) and (actval == -1) - ) + bipolar_ok = odt_is_bipolar and (scale == 2.0) and (actval == -1) assert scale == 1.0 or bipolar_ok, ( - consumer.name - + ": out_scale=1 or bipolar output needed for conversion." + consumer.name + ": out_scale=1 or bipolar output needed for conversion." ) assert (not odt.signed()) or (actval < 0), ( consumer.name + ": Signed output requres actval < 0" @@ -909,10 +886,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "MatMul" - and model.get_tensor_sparsity(n.input[1]) is not None - ): + if n.op_type == "MatMul" and model.get_tensor_sparsity(n.input[1]) is not None: sparsity = model.get_tensor_sparsity(n.input[1]) try: k_h, k_w = sparsity["dw"]["kernel_shape"] @@ -971,13 +945,11 @@ def apply(self, model): odt = model.get_tensor_datatype(mt_output) scale = getCustomOp(consumer).get_nodeattr("out_scale") assert scale == 1.0, ( - consumer.name - + ": out_scale must be equal to 1.0 for HLS conversion." + consumer.name + ": out_scale must be equal to 1.0 for HLS conversion." ) actval = getCustomOp(consumer).get_nodeattr("out_bias") assert int(actval) == actval, ( - consumer.name - + ": out_bias must be integer for HLS conversion." + consumer.name + ": out_bias must be integer for HLS conversion." ) actval = int(actval) assert (not odt.signed()) or (actval < 0), ( @@ -1093,13 +1065,11 @@ def apply(self, model): odt = model.get_tensor_datatype(thl_output) scale = getCustomOp(node).get_nodeattr("out_scale") assert scale == 1.0, ( - node.name - + ": MultiThreshold out_scale must be 1 for HLS conversion." + node.name + ": MultiThreshold out_scale must be 1 for HLS conversion." ) actval = getCustomOp(node).get_nodeattr("out_bias") assert int(actval) == actval, ( - node.name - + ": MultiThreshold out_bias must be integer for HLS conversion." + node.name + ": MultiThreshold out_bias must be integer for HLS conversion." ) actval = int(actval) assert (not odt.signed()) or (actval < 0), ( @@ -1369,9 +1339,7 @@ def apply(self, model): # check if the shape of initializer is compatible ll_cinit_shape = list(ll_cinit.shape) if np.prod(ll_cinit_shape) == 1: - warnings.warn( - "Broadcasting " + str(node.op_type) + "(" + node.name + ")" - ) + warnings.warn("Broadcasting " + str(node.op_type) + "(" + node.name + ")") ll_cinit = np.full((ch), ll_cinit.flatten()[0]) elif np.prod(ll_cinit_shape) != ch or ll_cinit_shape[ch_index] != ch: # parameter shape not compatible with Channelwise_batch @@ -1680,9 +1648,7 @@ def apply(self, model): dt0 = model.get_tensor_datatype(node.input[0]) if dt0 is None: continue - dt_coherent = all( - [model.get_tensor_datatype(x) == dt0 for x in node.input] - ) + dt_coherent = all([model.get_tensor_datatype(x) == dt0 for x in node.input]) if not dt_coherent: continue # skip conversion if any inputs are static diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index ef1afb95ca..6e40f39687 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -86,9 +86,7 @@ class CreateStitchedIP(Transformation): The packaged block design IP can be found under the ip subdirectory. """ - def __init__( - self, fpgapart, clk_ns, ip_name="finn_design", vitis=False, signature=[] - ): + def __init__(self, fpgapart, clk_ns, ip_name="finn_design", vitis=False, signature=[]): super().__init__() self.fpgapart = fpgapart self.clk_ns = clk_ns @@ -121,17 +119,13 @@ def connect_clk_rst(self, node): # make clock and reset external, if they aren't already if not self.clock_reset_are_external: self.connect_cmds.append( - "make_bd_pins_external [get_bd_pins %s/%s]" - % (inst_name, clock_intf_name) + "make_bd_pins_external [get_bd_pins %s/%s]" % (inst_name, clock_intf_name) ) self.connect_cmds.append("set_property name ap_clk [get_bd_ports ap_clk_0]") self.connect_cmds.append( - "make_bd_pins_external [get_bd_pins %s/%s]" - % (inst_name, reset_intf_name) - ) - self.connect_cmds.append( - "set_property name ap_rst_n [get_bd_ports ap_rst_n_0]" + "make_bd_pins_external [get_bd_pins %s/%s]" % (inst_name, reset_intf_name) ) + self.connect_cmds.append("set_property name ap_rst_n [get_bd_ports ap_rst_n_0]") self.clock_reset_are_external = True self.intf_names["clk"] = ["ap_clk"] self.intf_names["rst"] = ["ap_rst_n"] @@ -172,13 +166,9 @@ def connect_axi(self, node): ) self.connect_cmds.append("assign_bd_address") seg_name = "%s/Data_m_axi_gmem/SEG_%s_Reg" % (inst_name, ext_if_name) - self.connect_cmds.append( - "set_property offset 0 [get_bd_addr_segs {%s}]" % (seg_name) - ) + self.connect_cmds.append("set_property offset 0 [get_bd_addr_segs {%s}]" % (seg_name)) # TODO should propagate this information from the node instead of 4G - self.connect_cmds.append( - "set_property range 4G [get_bd_addr_segs {%s}]" % (seg_name) - ) + self.connect_cmds.append("set_property range 4G [get_bd_addr_segs {%s}]" % (seg_name)) self.intf_names["aximm"] = [(ext_if_name, aximm_intf_name[0][1])] self.has_aximm = True @@ -215,8 +205,7 @@ def connect_s_axis_external(self, node, idx=None): continue input_intf_name = input_intf_names[i][0] self.connect_cmds.append( - "make_bd_intf_pins_external [get_bd_intf_pins %s/%s]" - % (inst_name, input_intf_name) + "make_bd_intf_pins_external [get_bd_intf_pins %s/%s]" % (inst_name, input_intf_name) ) self.connect_cmds.append( "set_property name s_axis_%d [get_bd_intf_ports %s_0]" @@ -236,12 +225,10 @@ def connect_ap_none_external(self, node): for i in range(len(input_intf_names)): input_intf_name = input_intf_names[i] self.connect_cmds.append( - "make_bd_pins_external [get_bd_pins %s/%s]" - % (inst_name, input_intf_name) + "make_bd_pins_external [get_bd_pins %s/%s]" % (inst_name, input_intf_name) ) self.connect_cmds.append( - "set_property name %s [get_bd_ports %s_0]" - % (input_intf_name, input_intf_name) + "set_property name %s [get_bd_ports %s_0]" % (input_intf_name, input_intf_name) ) def insert_signature(self, checksum_count): @@ -267,12 +254,10 @@ def insert_signature(self, checksum_count): ) # set clk and reset self.connect_cmds.append( - "connect_bd_net [get_bd_ports ap_clk] [get_bd_pins %s/ap_clk]" - % signature_name + "connect_bd_net [get_bd_ports ap_clk] [get_bd_pins %s/ap_clk]" % signature_name ) self.connect_cmds.append( - "connect_bd_net [get_bd_ports ap_rst_n] [get_bd_pins %s/ap_rst_n]" - % signature_name + "connect_bd_net [get_bd_ports ap_rst_n] [get_bd_pins %s/ap_rst_n]" % signature_name ) fclk_mhz = 1 / (self.clk_ns * 0.001) fclk_hz = fclk_mhz * 1000000 @@ -290,9 +275,7 @@ def insert_signature(self, checksum_count): self.connect_cmds.append( "make_bd_intf_pins_external [get_bd_intf_pins %s/s_axi]" % signature_name ) - self.connect_cmds.append( - "set_property name s_axilite_info [get_bd_intf_ports s_axi_0]" - ) + self.connect_cmds.append("set_property name s_axilite_info [get_bd_intf_ports s_axi_0]") self.connect_cmds.append("assign_bd_address") def apply(self, model): @@ -320,9 +303,7 @@ def apply(self, model): ) for node in model.graph.node: # ensure that all nodes are fpgadataflow, and that IPs are generated - assert is_fpgadataflow_node( - node - ), "All nodes must be FINN fpgadataflow nodes." + assert is_fpgadataflow_node(node), "All nodes must be FINN fpgadataflow nodes." node_inst = getCustomOp(node) ip_dir_value = node_inst.get_nodeattr("ip_path") assert os.path.isdir(ip_dir_value), "IP generation directory doesn't exist." @@ -337,12 +318,10 @@ def apply(self, model): if producer is None: continue j = list(producer.output).index(node.input[i]) - src_intf_name = getCustomOp( - producer - ).get_verilog_top_module_intf_names()["m_axis"][j][0] - dst_intf_name = node_inst.get_verilog_top_module_intf_names()[ - "s_axis" - ][i][0] + src_intf_name = getCustomOp(producer).get_verilog_top_module_intf_names()[ + "m_axis" + ][j][0] + dst_intf_name = node_inst.get_verilog_top_module_intf_names()["s_axis"][i][0] self.connect_cmds.append( "connect_bd_intf_net [get_bd_intf_pins %s/%s] " "[get_bd_intf_pins %s/%s]" @@ -382,8 +361,7 @@ def apply(self, model): tcl = [] # create vivado project tcl.append( - "create_project %s %s -part %s" - % (prjname, vivado_stitch_proj_dir, self.fpgapart) + "create_project %s %s -part %s" % (prjname, vivado_stitch_proj_dir, self.fpgapart) ) # no warnings on long module names tcl.append("set_msg_config -id {[BD 41-1753]} -suppress") @@ -399,9 +377,7 @@ def apply(self, model): fclk_mhz = 1 / (self.clk_ns * 0.001) fclk_hz = fclk_mhz * 1000000 model.set_metadata_prop("clk_ns", str(self.clk_ns)) - tcl.append( - "set_property CONFIG.FREQ_HZ %d [get_bd_ports /ap_clk]" % round(fclk_hz) - ) + tcl.append("set_property CONFIG.FREQ_HZ %d [get_bd_ports /ap_clk]" % round(fclk_hz)) tcl.append("validate_bd_design") tcl.append("save_bd_design") # create wrapper hdl (for rtlsim later on) @@ -419,8 +395,7 @@ def apply(self, model): # synthesize to DCP and export stub, DCP and constraints if self.vitis: tcl.append( - "set_property SYNTH_CHECKPOINT_MODE Hierarchical [ get_files %s ]" - % bd_filename + "set_property SYNTH_CHECKPOINT_MODE Hierarchical [ get_files %s ]" % bd_filename ) tcl.append( "set_property -name {STEPS.SYNTH_DESIGN.ARGS.MORE OPTIONS} " @@ -472,16 +447,9 @@ def apply(self, model): # if targeting Vitis, add some properties to the IP if self.vitis: # replace source code with dcp - tcl.append( - "set_property sdx_kernel true [ipx::find_open_core %s]" % block_vlnv - ) - tcl.append( - "set_property sdx_kernel_type rtl [ipx::find_open_core %s]" % block_vlnv - ) - tcl.append( - "set_property supported_families { } [ipx::find_open_core %s]" - % block_vlnv - ) + tcl.append("set_property sdx_kernel true [ipx::find_open_core %s]" % block_vlnv) + tcl.append("set_property sdx_kernel_type rtl [ipx::find_open_core %s]" % block_vlnv) + tcl.append("set_property supported_families { } [ipx::find_open_core %s]" % block_vlnv) tcl.append( "set_property xpm_libraries {XPM_CDC XPM_MEMORY XPM_FIFO} " "[ipx::find_open_core %s]" % block_vlnv @@ -496,32 +464,20 @@ def apply(self, model): "ipx::remove_all_file " "[ipx::get_file_groups xilinx_anylanguagebehavioralsimulation]" ) - tcl.append( - "ipx::remove_all_file " - "[ipx::get_file_groups xilinx_anylanguagesynthesis]" - ) + tcl.append("ipx::remove_all_file " "[ipx::get_file_groups xilinx_anylanguagesynthesis]") tcl.append( "ipx::remove_file_group " "xilinx_anylanguagebehavioralsimulation [ipx::current_core]" ) - tcl.append( - "ipx::remove_file_group " - "xilinx_anylanguagesynthesis [ipx::current_core]" - ) + tcl.append("ipx::remove_file_group " "xilinx_anylanguagesynthesis [ipx::current_core]") # remove sim and src folders tcl.append("file delete -force %s/ip/sim" % vivado_stitch_proj_dir) tcl.append("file delete -force %s/ip/src" % vivado_stitch_proj_dir) # copy and add DCP, stub, and xdc tcl.append("file mkdir %s/ip/dcp" % vivado_stitch_proj_dir) tcl.append("file mkdir %s/ip/impl" % vivado_stitch_proj_dir) - tcl.append( - "file copy -force %s.dcp %s/ip/dcp" - % (block_name, vivado_stitch_proj_dir) - ) - tcl.append( - "file copy -force %s.xdc %s/ip/impl" - % (block_name, vivado_stitch_proj_dir) - ) + tcl.append("file copy -force %s.dcp %s/ip/dcp" % (block_name, vivado_stitch_proj_dir)) + tcl.append("file copy -force %s.xdc %s/ip/impl" % (block_name, vivado_stitch_proj_dir)) tcl.append("ipx::add_file_group xilinx_implementation [ipx::current_core]") tcl.append( "ipx::add_file impl/%s.xdc [ipx::get_file_groups xilinx_implementation]" @@ -532,16 +488,12 @@ def apply(self, model): "[ipx::get_files impl/%s.xdc " "-of_objects [ipx::get_file_groups xilinx_implementation]]" % block_name ) - tcl.append( - "ipx::add_file_group " "xilinx_synthesischeckpoint [ipx::current_core]" - ) + tcl.append("ipx::add_file_group " "xilinx_synthesischeckpoint [ipx::current_core]") tcl.append( "ipx::add_file dcp/%s.dcp " "[ipx::get_file_groups xilinx_synthesischeckpoint]" % block_name ) - tcl.append( - "ipx::add_file_group xilinx_simulationcheckpoint [ipx::current_core]" - ) + tcl.append("ipx::add_file_group xilinx_simulationcheckpoint [ipx::current_core]") tcl.append( "ipx::add_file dcp/%s.dcp " "[ipx::get_file_groups xilinx_simulationcheckpoint]" % block_name diff --git a/src/finn/transformation/fpgadataflow/derive_characteristic.py b/src/finn/transformation/fpgadataflow/derive_characteristic.py index 67eb96995e..dc660f5fba 100644 --- a/src/finn/transformation/fpgadataflow/derive_characteristic.py +++ b/src/finn/transformation/fpgadataflow/derive_characteristic.py @@ -65,9 +65,7 @@ def applyNodeLocal(self, node): inst.derive_characteristic_fxns(period=self.period) except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (node, False) def apply(self, model: ModelWrapper): @@ -103,24 +101,16 @@ def apply(self, model: ModelWrapper): # for DuplicateStreams, use comp_branch_first's input characterization # for AddStreams, use comp_branch_last's output characterization period = comp_branch_first.get_nodeattr("io_chrc_period") - comp_branch_first_f = comp_branch_first.get_nodeattr("io_characteristic")[ - : 2 * period - ] - comp_branch_last_f = comp_branch_last.get_nodeattr("io_characteristic")[ - 2 * period : - ] + comp_branch_first_f = comp_branch_first.get_nodeattr("io_characteristic")[: 2 * period] + comp_branch_last_f = comp_branch_last.get_nodeattr("io_characteristic")[2 * period :] ds_node_inst = registry.getCustomOp(ds_node) addstrm_node_inst = registry.getCustomOp(addstrm_node) ds_node_inst.set_nodeattr("io_chrc_period", period) ds_node_inst.set_nodeattr("io_characteristic", comp_branch_first_f * 2) addstrm_node_inst.set_nodeattr("io_chrc_period", period) addstrm_node_inst.set_nodeattr("io_characteristic", comp_branch_last_f * 2) - warnings.warn( - f"Set {ds_node.name} chrc. from {comp_branch_first.onnx_node.name}" - ) - warnings.warn( - f"Set {addstrm_node.name} chrc. from {comp_branch_last.onnx_node.name}" - ) + warnings.warn(f"Set {ds_node.name} chrc. from {comp_branch_first.onnx_node.name}") + warnings.warn(f"Set {addstrm_node.name} chrc. from {comp_branch_last.onnx_node.name}") return (model, run_again) @@ -147,9 +137,7 @@ def applyNodeLocal(self, node): assert op_type != "StreamingFIFO", "Found existing FIFOs" period = prod.get_nodeattr("io_chrc_period") prod_chrc = prod.get_nodeattr("io_chrc_out")[0] - assert ( - len(prod_chrc) == 2 * period - ), "Found unexpected characterization attribute" + assert len(prod_chrc) == 2 * period, "Found unexpected characterization attribute" if any([x > 2 for x in prod.get_nodeattr("outFIFODepths")]): # FIFO depth already set, can skip this node return (node, False) @@ -186,14 +174,12 @@ def applyNodeLocal(self, node): # finally, check node inputs to ensure FIFOs are added to # any top-level inputs (at least self.io_fifo_depth deep) in_fifo_depths = prod.get_nodeattr("inFIFODepths") - for (i, input_name) in enumerate(node.input): + for i, input_name in enumerate(node.input): if input_name in [x.name for x in model.graph.input]: in_fifo_depths[i] = max(self.io_fifo_depth, in_fifo_depths[i]) prod.set_nodeattr("inFIFODepths", in_fifo_depths) except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (node, False) diff --git a/src/finn/transformation/fpgadataflow/externalize_params.py b/src/finn/transformation/fpgadataflow/externalize_params.py index 732b82c675..633db0c553 100644 --- a/src/finn/transformation/fpgadataflow/externalize_params.py +++ b/src/finn/transformation/fpgadataflow/externalize_params.py @@ -64,11 +64,7 @@ def filter_fc_extw(x): assert iodma_init is not None # remove output-side initializer to get correct dataflow partitioning model.graph.initializer.remove( - [ - x - for x in model.graph.initializer - if x.name == extw_tensor_name_out - ][0] + [x for x in model.graph.initializer if x.name == extw_tensor_name_out][0] ) graph_modified = True diff --git a/src/finn/transformation/fpgadataflow/floorplan.py b/src/finn/transformation/fpgadataflow/floorplan.py index 549b94d9f2..d43aabcf55 100644 --- a/src/finn/transformation/fpgadataflow/floorplan.py +++ b/src/finn/transformation/fpgadataflow/floorplan.py @@ -56,7 +56,6 @@ def __init__(self, floorplan=None): self.user_floorplan = floorplan def apply(self, model): - # read in a user-specified floorplan or generate a default one if self.user_floorplan is None: self.user_floorplan = model.analysis(floorplan_params) @@ -129,9 +128,7 @@ def apply(self, model): non_dma_nodes, ) ) - non_dma_nodes = list( - filter(lambda x: x not in dyn_tlastmarker_nodes, non_dma_nodes) - ) + non_dma_nodes = list(filter(lambda x: x not in dyn_tlastmarker_nodes, non_dma_nodes)) for node in dma_nodes: node_inst = getCustomOp(node) @@ -166,9 +163,7 @@ def apply(self, model): pre_inst = getCustomOp(pre_node) pre_slr = pre_inst.get_nodeattr("slr") if node_slr == pre_slr: - axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()[ - "axilite" - ] + axilite_intf_name = pre_inst.get_verilog_top_module_intf_names()["axilite"] if len(axilite_intf_name) != 0: node_inst.set_nodeattr("partition_id", partition_cnt) partition_cnt += 1 diff --git a/src/finn/transformation/fpgadataflow/hlssynth_ip.py b/src/finn/transformation/fpgadataflow/hlssynth_ip.py index c091dbd5ed..08069fa00f 100644 --- a/src/finn/transformation/fpgadataflow/hlssynth_ip.py +++ b/src/finn/transformation/fpgadataflow/hlssynth_ip.py @@ -64,11 +64,9 @@ def applyNodeLocal(self, node): ), """Node attribute "code_gen_dir_ipgen" is empty. Please run transformation PrepareIP first.""" - if not os.path.isdir( - inst.get_nodeattr("ipgen_path") - ) or not inst.get_nodeattr("code_gen_dir_ipgen") in inst.get_nodeattr( - "ipgen_path" - ): + if not os.path.isdir(inst.get_nodeattr("ipgen_path")) or not inst.get_nodeattr( + "code_gen_dir_ipgen" + ) in inst.get_nodeattr("ipgen_path"): # call the compilation function for this node inst.ipgen_singlenode_code() else: @@ -81,7 +79,5 @@ def applyNodeLocal(self, node): is empty.""" except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (node, False) diff --git a/src/finn/transformation/fpgadataflow/insert_dwc.py b/src/finn/transformation/fpgadataflow/insert_dwc.py index cff8b60267..140d154b1a 100644 --- a/src/finn/transformation/fpgadataflow/insert_dwc.py +++ b/src/finn/transformation/fpgadataflow/insert_dwc.py @@ -48,8 +48,7 @@ def apply(self, model): if consumers == []: continue assert len(consumers) == 1, ( - n.name - + ": HLS node with fan-out higher than 1 cannot be stitched" + n.name + ": HLS node with fan-out higher than 1 cannot be stitched" ) consumer = consumers[0] if _suitable_node(consumer) is True: diff --git a/src/finn/transformation/fpgadataflow/insert_fifo.py b/src/finn/transformation/fpgadataflow/insert_fifo.py index bfeee95e9b..f57c9e41b7 100644 --- a/src/finn/transformation/fpgadataflow/insert_fifo.py +++ b/src/finn/transformation/fpgadataflow/insert_fifo.py @@ -85,9 +85,7 @@ class InsertFIFO(Transformation): The other node attributes necessary to create a FIFO node are taken from the node the FIFO node is inserted after: 'folded_shape' and 'dtype'""" - def __init__( - self, create_shallow_fifos=False, max_qsrl_depth=None, vivado_ram_style="auto" - ): + def __init__(self, create_shallow_fifos=False, max_qsrl_depth=None, vivado_ram_style="auto"): super().__init__() self.create_shallow_fifos = create_shallow_fifos self.max_qsrl_depth = max_qsrl_depth @@ -151,10 +149,7 @@ def apply(self, model): graph.value_info.append(fifo_output_tensor) model.set_tensor_datatype(fifo_output_tensor.name, dtype) - if ( - self.max_qsrl_depth is None - or fifo_depth <= self.max_qsrl_depth - ): + if self.max_qsrl_depth is None or fifo_depth <= self.max_qsrl_depth: impl_style = "rtl" else: impl_style = "vivado" @@ -187,10 +182,7 @@ def apply(self, model): for graph_in_name in graph_in_names: first_node = model.find_consumer(graph_in_name) # insert FIFO as first node, except when first node is DMA - if ( - first_node.op_type != "StreamingFIFO" - and first_node.op_type != "IODMA" - ): + if first_node.op_type != "StreamingFIFO" and first_node.op_type != "IODMA": inp_ind = list(first_node.input).index(graph_in_name) n_input = first_node.input[inp_ind] n0 = getCustomOp(first_node) @@ -242,10 +234,7 @@ def apply(self, model): graph_out_names = [x.name for x in model.graph.output] for graph_out_name in graph_out_names: final_node = model.find_producer(graph_out_name) - if ( - final_node.op_type != "StreamingFIFO" - and final_node.op_type != "IODMA" - ): + if final_node.op_type != "StreamingFIFO" and final_node.op_type != "IODMA": assert ( final_node.op_type != "TLastMarker" ), """Insert tlast marker should be done diff --git a/src/finn/transformation/fpgadataflow/insert_hook.py b/src/finn/transformation/fpgadataflow/insert_hook.py index 21ec3f049f..14989efa75 100644 --- a/src/finn/transformation/fpgadataflow/insert_hook.py +++ b/src/finn/transformation/fpgadataflow/insert_hook.py @@ -74,8 +74,7 @@ def apply(self, model): for output_name in n.output: consumers = model.find_consumers(output_name) assert len(consumers) <= 1, ( - n.name - + ": HLS node with fan-out higher than 1 cannot be stitched" + n.name + ": HLS node with fan-out higher than 1 cannot be stitched" ) n0 = getCustomOp(n) n0_hook = n0.get_nodeattr("output_hook") diff --git a/src/finn/transformation/fpgadataflow/insert_iodma.py b/src/finn/transformation/fpgadataflow/insert_iodma.py index 28bcd9598a..90700d5726 100644 --- a/src/finn/transformation/fpgadataflow/insert_iodma.py +++ b/src/finn/transformation/fpgadataflow/insert_iodma.py @@ -51,9 +51,7 @@ def __init__( self.insert_input = insert_input self.insert_output = insert_output self.insert_extmemw = insert_extmemw - assert ( - 2 ** math.log2(max_intfwidth) == max_intfwidth - ), "max_intfwidth must be a power of 2" + assert 2 ** math.log2(max_intfwidth) == max_intfwidth, "max_intfwidth must be a power of 2" self.max_intfwidth = max_intfwidth def get_mem_init(self, weights, pe, simd): @@ -122,13 +120,9 @@ def apply(self, model): padded_instream_width = first_node_inst.get_instream_width_padded() padded_instream_bytes = padded_instream_width // 8 # determine the feasible interface width - transfer_bits = padded_instream_width * np.prod( - in_folded_shape[:-1] - ) + transfer_bits = padded_instream_width * np.prod(in_folded_shape[:-1]) intfwidth = math.gcd(transfer_bits, self.max_intfwidth) - assert ( - intfwidth % 8 == 0 - ), "No feasible interface width for transfer size" + assert intfwidth % 8 == 0, "No feasible interface width for transfer size" # make new buffer first_node_in = oh.make_tensor_value_info( model.make_new_valueinfo_name(), TensorProto.FLOAT, in_shape @@ -169,18 +163,12 @@ def apply(self, model): # take advantage of AXI stream width padding for DMA alignment # (AXI streams are always padded to 8 bits) # this is the width of stream input to DMA - padded_outstream_width = ( - final_node_inst.get_outstream_width_padded() - ) + padded_outstream_width = final_node_inst.get_outstream_width_padded() padded_outstream_bytes = padded_outstream_width // 8 # determine the feasible interface width - transfer_bits = padded_outstream_width * np.prod( - out_folded_shape[:-1] - ) + transfer_bits = padded_outstream_width * np.prod(out_folded_shape[:-1]) intfwidth = math.gcd(transfer_bits, self.max_intfwidth) - assert ( - intfwidth % 8 == 0 - ), "No feasible interface width for transfer size" + assert intfwidth % 8 == 0, "No feasible interface width for transfer size" # make new buffer final_node_out = oh.make_tensor_value_info( model.make_new_valueinfo_name(), TensorProto.FLOAT, out_shape @@ -211,8 +199,7 @@ def apply(self, model): # attached IODMA fc_extw_nodes = list( filter( - lambda x: x.op_type - in ["MatrixVectorActivation", "VectorVectorActivation"] + lambda x: x.op_type in ["MatrixVectorActivation", "VectorVectorActivation"] and getCustomOp(x).get_nodeattr("mem_mode") == "external" and model.find_producer(x.input[1]) is None, all_nodes, @@ -226,9 +213,7 @@ def apply(self, model): # determine the feasible interface width transfer_bits = np.prod(w_shape) * w_dtype.bitwidth() intfwidth = math.gcd(transfer_bits, self.max_intfwidth) - assert ( - intfwidth % 8 == 0 - ), "No feasible interface width for transfer size" + assert intfwidth % 8 == 0, "No feasible interface width for transfer size" # calculate width of stream output from DMA pe = get_by_name(fc_node.attribute, "PE").i simd = get_by_name(fc_node.attribute, "SIMD").i diff --git a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py index 1610916eb6..94f0b0eae1 100644 --- a/src/finn/transformation/fpgadataflow/insert_tlastmarker.py +++ b/src/finn/transformation/fpgadataflow/insert_tlastmarker.py @@ -54,10 +54,8 @@ def apply(self, model): graph_modified = False if final_node.op_type != "TLastMarker" and not ( final_node.op_type == "IODMA" - and get_by_name(final_node.attribute, "direction").s.decode("UTF-8") - == "out" + and get_by_name(final_node.attribute, "direction").s.decode("UTF-8") == "out" ): - custom_op = getCustomOp(final_node) num_iters = int(custom_op.get_number_output_values()) stream_width = int(custom_op.get_outstream_width()) @@ -113,18 +111,13 @@ def apply(self, model): # 2. node is either a TLastMarker or an input IODMA if first_node.op_type != "TLastMarker" and not ( first_node.op_type == "IODMA" - and get_by_name(first_node.attribute, "direction").s.decode("UTF-8") - == "in" + and get_by_name(first_node.attribute, "direction").s.decode("UTF-8") == "in" ): - custom_op = getCustomOp(first_node) num_iters = np.prod(custom_op.get_folded_input_shape()[1:-1]) inp_idx = list(first_node.input).index(graph_in_name) if inp_idx > 0: - if ( - first_node.op_type == "MatrixVectorActivation" - and inp_idx == 1 - ): + if first_node.op_type == "MatrixVectorActivation" and inp_idx == 1: stream_width = int(custom_op.get_weightstream_width()) elif first_node.op_type == "AddStreams_Batch" and inp_idx == 1: stream_width = int(custom_op.get_instream_width()) diff --git a/src/finn/transformation/fpgadataflow/make_pynq_driver.py b/src/finn/transformation/fpgadataflow/make_pynq_driver.py index dce98e54a3..5a0e47c130 100644 --- a/src/finn/transformation/fpgadataflow/make_pynq_driver.py +++ b/src/finn/transformation/fpgadataflow/make_pynq_driver.py @@ -56,14 +56,10 @@ def to_external_tensor(init, w_dtype): weight_width = init.shape[1] * w_dtype.bitwidth() weight_width_padded = roundup_to_integer_multiple(weight_width, 4) - hex_init = pack_innermost_dim_as_hex_string( - init, w_dtype, weight_width_padded, prefix="0x" - ) + hex_init = pack_innermost_dim_as_hex_string(init, w_dtype, weight_width_padded, prefix="0x") ext_weight = np.array([], dtype=np.uint8) for line in hex_init: - array_line = [ - x for x in reversed(hexstring2npbytearray(line, remove_prefix="0x")) - ] + array_line = [x for x in reversed(hexstring2npbytearray(line, remove_prefix="0x"))] ext_weight = np.append(ext_weight, array_line) return ext_weight @@ -88,7 +84,6 @@ def __init__(self, platform): self.platform = platform def apply(self, model): - # create a temporary folder for the generated driver pynq_driver_dir = make_build_dir(prefix="pynq_driver_") model.set_metadata_prop("pynq_driver_dir", pynq_driver_dir) @@ -115,9 +110,7 @@ def apply(self, model): files_to_copy.append( (qonnx_path + "/core/__init__.py", qonnx_target_path + "/core/__init__.py") ) - files_to_copy.append( - (qonnx_path + "/util/basic.py", qonnx_target_path + "/util/basic.py") - ) + files_to_copy.append((qonnx_path + "/util/basic.py", qonnx_target_path + "/util/basic.py")) files_to_copy.append( (qonnx_path + "/util/__init__.py", qonnx_target_path + "/util/__init__.py") ) @@ -133,7 +126,7 @@ def apply(self, model): finn_target_path + "/util/__init__.py", ) ) - for (src_file, target_file) in files_to_copy: + for src_file, target_file in files_to_copy: shutil.copy(src_file, target_file) # extract input-output shapes from the graph # TODO convert this to an analysis pass? @@ -165,13 +158,9 @@ def apply(self, model): first_node = successor_df_model.find_consumer( successor_df_model.graph.input[successor_input_num].name ) - i_tensor_shape_folded = tuple( - getCustomOp(first_node).get_folded_input_shape() - ) + i_tensor_shape_folded = tuple(getCustomOp(first_node).get_folded_input_shape()) # generate dummy folded i/o tensors and their packed versions - i_tensor_dummy_folded = gen_finn_dt_tensor( - i_tensor_dt, i_tensor_shape_folded - ) + i_tensor_dummy_folded = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape_folded) i_tensor_dummy_packed = dpk.finnpy_to_packed_bytearray( i_tensor_dummy_folded, i_tensor_dt ) @@ -201,24 +190,16 @@ def apply(self, model): ), """ Ensure CreateDataflowPartition called before driver creation.""" df_model = ModelWrapper(getCustomOp(o_producer).get_nodeattr("model")) - assert ( - df_model.graph.node[-1].op_type == "IODMA" - ), "Partition must hold output IODMA" + assert df_model.graph.node[-1].op_type == "IODMA", "Partition must hold output IODMA" predecessors = model.find_direct_predecessors(o_producer) - predecessor_output_num = list(predecessors[0].output).index( - o_producer.input[0] - ) + predecessor_output_num = list(predecessors[0].output).index(o_producer.input[0]) predecessor_sdp = getCustomOp(predecessors[0]) predecessor_df_model = ModelWrapper(predecessor_sdp.get_nodeattr("model")) last_node = predecessor_df_model.find_producer( predecessor_df_model.graph.output[predecessor_output_num].name ) - o_tensor_shape_folded = tuple( - getCustomOp(last_node).get_folded_output_shape() - ) - o_tensor_dummy_folded = gen_finn_dt_tensor( - o_tensor_dt, o_tensor_shape_folded - ) + o_tensor_shape_folded = tuple(getCustomOp(last_node).get_folded_output_shape()) + o_tensor_dummy_folded = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape_folded) o_tensor_dummy_packed = dpk.finnpy_to_packed_bytearray( o_tensor_dummy_folded, o_tensor_dt ) @@ -256,17 +237,11 @@ def apply(self, model): assert df_model.graph.node[0].op_type == "IODMA" iodma_node = getCustomOp(df_model.graph.node[0]) if iodma_node.get_nodeattr("burstMode") == "wrap": # input weights dma? - init_tensor = df_model.get_initializer( - iodma_node.onnx_node.input[0] - ) + init_tensor = df_model.get_initializer(iodma_node.onnx_node.input[0]) ext_weight_dma_cnt += 1 - w_dtype = df_model.get_tensor_datatype( - iodma_node.onnx_node.input[0] - ) + w_dtype = df_model.get_tensor_datatype(iodma_node.onnx_node.input[0]) init_external_tensor = to_external_tensor(init_tensor, w_dtype) - np.save( - weights_dir + "/" + idma_name + ".npy", init_external_tensor - ) + np.save(weights_dir + "/" + idma_name + ".npy", init_external_tensor) idma_idx += 1 # fill in the driver template @@ -293,9 +268,7 @@ def apply(self, model): # add validate.py to run full top-1 test (only for suitable networks) validate_py = pynq_driver_dir + "/validate.py" - validate_template = pk.resource_filename( - "finn.qnn-data", "templates/driver/validate.py" - ) + validate_template = pk.resource_filename("finn.qnn-data", "templates/driver/validate.py") shutil.copy(validate_template, validate_py) # generate weight files for runtime-writable layers @@ -318,9 +291,7 @@ def apply(self, model): rt_layer_ind, node.name, ) - node_inst.make_weight_file( - fcl_w, "decoupled_runtime", w_filename - ) + node_inst.make_weight_file(fcl_w, "decoupled_runtime", w_filename) rt_layer_ind += 1 elif node.op_type == "StreamingDataflowPartition": warnings.warn( diff --git a/src/finn/transformation/fpgadataflow/make_zynq_proj.py b/src/finn/transformation/fpgadataflow/make_zynq_proj.py index f48566326e..989eb62a88 100644 --- a/src/finn/transformation/fpgadataflow/make_zynq_proj.py +++ b/src/finn/transformation/fpgadataflow/make_zynq_proj.py @@ -92,7 +92,6 @@ def __init__(self, platform, enable_debug=False): self.enable_debug = 1 if enable_debug else 0 def apply(self, model): - # create a config file and empty list of xo files config = [] idma_idx = 0 @@ -110,15 +109,12 @@ def apply(self, model): ipstitch_path = kernel_model.get_metadata_prop("vivado_stitch_proj") if ipstitch_path is None or (not os.path.isdir(ipstitch_path)): raise Exception( - "No stitched IPI design found for %s, apply CreateStitchedIP first." - % node.name + "No stitched IPI design found for %s, apply CreateStitchedIP first." % node.name ) vivado_stitch_vlnv = kernel_model.get_metadata_prop("vivado_stitch_vlnv") if vivado_stitch_vlnv is None: - raise Exception( - "No vlnv found for %s, apply CreateStitchedIP first." % node.name - ) + raise Exception("No vlnv found for %s, apply CreateStitchedIP first." % node.name) ip_dirs = ["list"] ip_dirs += collect_ip_dirs(kernel_model, ipstitch_path) @@ -170,9 +166,7 @@ def apply(self, model): "[get_bd_intf_pins smartconnect_0/S%02d_AXI]" % (instance_names[node.name], aximm_idx) ) - assert ( - len(ifnames["axilite"]) == 1 - ), "Must have 1 AXI lite interface on IODMA nodes" + assert len(ifnames["axilite"]) == 1, "Must have 1 AXI lite interface on IODMA nodes" axilite_intf_name = ifnames["axilite"][0] assert axilite_intf_name is not None config.append( @@ -182,8 +176,7 @@ def apply(self, model): ) # assign_bd_address with appropriate range/offset config.append( - "assign_axi_addr_proc %s/%s" - % (instance_names[node.name], axilite_intf_name) + "assign_axi_addr_proc %s/%s" % (instance_names[node.name], axilite_intf_name) ) aximm_idx += 1 @@ -269,23 +262,18 @@ def apply(self, model): bash_command = ["bash", synth_project_sh] process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE) process_compile.communicate() - bitfile_name = ( - vivado_pynq_proj_dir + "/finn_zynq_link.runs/impl_1/top_wrapper.bit" - ) + bitfile_name = vivado_pynq_proj_dir + "/finn_zynq_link.runs/impl_1/top_wrapper.bit" if not os.path.isfile(bitfile_name): raise Exception( - "Synthesis failed, no bitfile found. Check logs under %s" - % vivado_pynq_proj_dir + "Synthesis failed, no bitfile found. Check logs under %s" % vivado_pynq_proj_dir ) deploy_bitfile_name = vivado_pynq_proj_dir + "/resizer.bit" copy(bitfile_name, deploy_bitfile_name) # set bitfile attribute model.set_metadata_prop("bitfile", deploy_bitfile_name) hwh_name_alts = [ - vivado_pynq_proj_dir - + "/finn_zynq_link.srcs/sources_1/bd/top/hw_handoff/top.hwh", - vivado_pynq_proj_dir - + "/finn_zynq_link.gen/sources_1/bd/top/hw_handoff/top.hwh", + vivado_pynq_proj_dir + "/finn_zynq_link.srcs/sources_1/bd/top/hw_handoff/top.hwh", + vivado_pynq_proj_dir + "/finn_zynq_link.gen/sources_1/bd/top/hw_handoff/top.hwh", ] hwh_name = None for hwh_name_cand in hwh_name_alts: @@ -293,8 +281,7 @@ def apply(self, model): hwh_name = hwh_name_cand if not os.path.isfile(hwh_name): raise Exception( - "Synthesis failed, no bitfile found. Check logs under %s" - % vivado_pynq_proj_dir + "Synthesis failed, no bitfile found. Check logs under %s" % vivado_pynq_proj_dir ) deploy_hwh_name = vivado_pynq_proj_dir + "/resizer.hwh" copy(hwh_name, deploy_hwh_name) @@ -350,21 +337,15 @@ def apply(self, model): kernel_model = kernel_model.transform(InsertFIFO()) kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) - kernel_model = kernel_model.transform( - PrepareIP(self.fpga_part, self.period_ns) - ) + kernel_model = kernel_model.transform(PrepareIP(self.fpga_part, self.period_ns)) kernel_model = kernel_model.transform(HLSSynthIP()) kernel_model = kernel_model.transform( - CreateStitchedIP( - self.fpga_part, self.period_ns, sdp_node.onnx_node.name, False - ) + CreateStitchedIP(self.fpga_part, self.period_ns, sdp_node.onnx_node.name, False) ) kernel_model.set_metadata_prop("platform", "zynq-iodma") kernel_model.save(dataflow_model_filename) # Assemble design from IPs - model = model.transform( - MakeZYNQProject(self.platform, enable_debug=self.enable_debug) - ) + model = model.transform(MakeZYNQProject(self.platform, enable_debug=self.enable_debug)) # set platform attribute for correct remote execution model.set_metadata_prop("platform", "zynq-iodma") diff --git a/src/finn/transformation/fpgadataflow/prepare_cppsim.py b/src/finn/transformation/fpgadataflow/prepare_cppsim.py index 07021c1e8d..76c3f88310 100644 --- a/src/finn/transformation/fpgadataflow/prepare_cppsim.py +++ b/src/finn/transformation/fpgadataflow/prepare_cppsim.py @@ -49,9 +49,7 @@ def _codegen_single_node(node, model): code_gen_dir = inst.get_nodeattr("code_gen_dir_cppsim") # ensure that there is a directory if code_gen_dir == "" or not os.path.isdir(code_gen_dir): - code_gen_dir = make_build_dir( - prefix="code_gen_cppsim_" + str(node.name) + "_" - ) + code_gen_dir = make_build_dir(prefix="code_gen_cppsim_" + str(node.name) + "_") inst.set_nodeattr("code_gen_dir_cppsim", code_gen_dir) # ensure that there is generated code inside the dir inst.code_generation_cppsim(model) diff --git a/src/finn/transformation/fpgadataflow/prepare_ip.py b/src/finn/transformation/fpgadataflow/prepare_ip.py index 2ebd6310f0..5461bbd77c 100644 --- a/src/finn/transformation/fpgadataflow/prepare_ip.py +++ b/src/finn/transformation/fpgadataflow/prepare_ip.py @@ -47,9 +47,7 @@ def _codegen_single_node(node, model, fpgapart, clk): code_gen_dir = inst.get_nodeattr("code_gen_dir_ipgen") # ensure that there is a directory if code_gen_dir == "" or not os.path.isdir(code_gen_dir): - code_gen_dir = make_build_dir( - prefix="code_gen_ipgen_" + str(node.name) + "_" - ) + code_gen_dir = make_build_dir(prefix="code_gen_ipgen_" + str(node.name) + "_") inst.set_nodeattr("code_gen_dir_ipgen", code_gen_dir) # ensure that there is generated code inside the dir inst.code_generation_ipgen(model, fpgapart, clk) diff --git a/src/finn/transformation/fpgadataflow/prepare_rtlsim.py b/src/finn/transformation/fpgadataflow/prepare_rtlsim.py index 645d86cf14..8ba7cfd965 100644 --- a/src/finn/transformation/fpgadataflow/prepare_rtlsim.py +++ b/src/finn/transformation/fpgadataflow/prepare_rtlsim.py @@ -74,7 +74,5 @@ def applyNodeLocal(self, node): ), "Failed to prepare RTLSim, no rtlsim_so attribute found." except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (node, False) diff --git a/src/finn/transformation/fpgadataflow/set_exec_mode.py b/src/finn/transformation/fpgadataflow/set_exec_mode.py index a08d153cb2..8488b4ef83 100644 --- a/src/finn/transformation/fpgadataflow/set_exec_mode.py +++ b/src/finn/transformation/fpgadataflow/set_exec_mode.py @@ -56,7 +56,5 @@ def apply(self, model): was not successful. Node attribute "exec_mode" is not set""" except KeyError: # exception if op_type is not supported - raise Exception( - "Custom op_type %s is currently not supported." % op_type - ) + raise Exception("Custom op_type %s is currently not supported." % op_type) return (model, False) diff --git a/src/finn/transformation/fpgadataflow/set_fifo_depths.py b/src/finn/transformation/fpgadataflow/set_fifo_depths.py index 35e7b9e6c9..da6099ab9a 100644 --- a/src/finn/transformation/fpgadataflow/set_fifo_depths.py +++ b/src/finn/transformation/fpgadataflow/set_fifo_depths.py @@ -262,9 +262,7 @@ def apply(self, model): modified_fc_nodes = [] for node in model.graph.node: # verify assumptions - assert is_fpgadataflow_node(node), "Found non-fpgadataflow node: " + str( - node - ) + assert is_fpgadataflow_node(node), "Found non-fpgadataflow node: " + str(node) assert node.op_type != "StreamingFIFO", "Found existing StreamingFIFO node" node = getCustomOp(node) ifd = node.get_nodeattr("inFIFODepths") @@ -289,8 +287,7 @@ def apply(self, model): node.set_nodeattr("mem_mode", "decoupled") reset_implementation(node) warnings.warn( - "Changed mem_mode from external to decoupled for " - + node.onnx_node.name + "Changed mem_mode from external to decoupled for " + node.onnx_node.name ) # insert stream infrastructure (DWC/FIFO) @@ -308,9 +305,7 @@ def apply(self, model): node.set_nodeattr("depth_monitor", 1) node.set_nodeattr("impl_style", "rtl") # check depths and fix as necessary - if (self.max_depth is not None) and ( - node.get_nodeattr("depth") != self.max_depth - ): + if (self.max_depth is not None) and (node.get_nodeattr("depth") != self.max_depth): node.set_nodeattr("depth", self.max_depth) # insert FIFOs and do all transformations for RTLsim @@ -373,15 +368,11 @@ def apply(self, model): ncycles = ncycles - 1 if not output_detected: - warnings.warn( - "No output detected, calculated FIFO depths may not be correct" - ) + warnings.warn("No output detected, calculated FIFO depths may not be correct") else: # do rtlsim in C++ for FIFO sizing # determine # inputs for FIFO sizing according to topology type - swg_nodes = [ - x for x in model.graph.node if "ConvolutionInputGenerator" in x.op_type - ] + swg_nodes = [x for x in model.graph.node if "ConvolutionInputGenerator" in x.op_type] if len(swg_nodes) == 0: # MLP, no layer overlap # assuming half the nodes are now FIFOs, use half the # of @@ -443,9 +434,7 @@ def apply(self, model): # handle custom sizing for SWG FIFOs if desired if self.swg_exception: - model = model.transform( - CapConvolutionFIFODepths(max_qsrl_depth=self.max_qsrl_depth) - ) + model = model.transform(CapConvolutionFIFODepths(max_qsrl_depth=self.max_qsrl_depth)) # remove shallow FIFOs model = model.transform(RemoveShallowFIFOs()) @@ -575,9 +564,7 @@ def apply(self, model): if node.op_type == "StreamingFIFO": n_inst = getCustomOp(node) depth = n_inst.get_nodeattr("depth") - cfgs = get_fifo_split_configs( - depth, self.max_qsrl_depth, self.max_vivado_depth - ) + cfgs = get_fifo_split_configs(depth, self.max_qsrl_depth, self.max_vivado_depth) if len(cfgs) > 1: fld_shape = n_inst.get_folded_output_shape() dtype = n_inst.get_nodeattr("dataType") diff --git a/src/finn/transformation/fpgadataflow/set_folding.py b/src/finn/transformation/fpgadataflow/set_folding.py index 0a466afe13..eca1053f8f 100644 --- a/src/finn/transformation/fpgadataflow/set_folding.py +++ b/src/finn/transformation/fpgadataflow/set_folding.py @@ -80,9 +80,7 @@ class SetFolding(Transformation): unfolded before SIMD is increased """ - def __init__( - self, target_cycles_per_frame=1000, mvau_wwidth_max=36, two_pass_relaxation=True - ): + def __init__(self, target_cycles_per_frame=1000, mvau_wwidth_max=36, two_pass_relaxation=True): super().__init__() self.target_cycles_per_frame = target_cycles_per_frame self.mvau_wwidth_max = mvau_wwidth_max @@ -142,8 +140,7 @@ def apply(self, model): # finish if target met break if ( - node_inst.get_weight_datatype().bitwidth() - * node_inst.get_nodeattr("SIMD") + node_inst.get_weight_datatype().bitwidth() * node_inst.get_nodeattr("SIMD") > self.mvau_wwidth_max ): # revert if we've gone above width threshold @@ -196,9 +193,7 @@ def apply(self, model): else: raise Exception("Undefined edge case for %s" % op_type) if ksize != 1: # pointwise vvau/pool lack a SWU - raise Exception( - "Expected SWU on DW op input, found " + swu_node.op_type - ) + raise Exception("Expected SWU on DW op input, found " + swu_node.op_type) elif op_type in simd_ops: if op_type.startswith("ConvolutionInputGenerator"): depthwise = node_inst.get_nodeattr("depthwise") @@ -224,9 +219,7 @@ def apply(self, model): max_simd = node_inst.get_nodeattr("NumChannels") self.optimize_attribute_val(node_inst, max_simd, "SIMD") else: - warnings.warn( - "SetFolding doesn't know how to handle op_type " + op_type - ) + warnings.warn("SetFolding doesn't know how to handle op_type " + op_type) model = model.transform(GiveUniqueNodeNames()) model = model.transform(AnnotateCycles()) diff --git a/src/finn/transformation/fpgadataflow/vitis_build.py b/src/finn/transformation/fpgadataflow/vitis_build.py index e0a5666000..2fc0b2f3bb 100644 --- a/src/finn/transformation/fpgadataflow/vitis_build.py +++ b/src/finn/transformation/fpgadataflow/vitis_build.py @@ -56,9 +56,7 @@ def _check_vitis_envvars(): assert "VITIS_PATH" in os.environ, "VITIS_PATH must be set for Vitis" - assert ( - "PLATFORM_REPO_PATHS" in os.environ - ), "PLATFORM_REPO_PATHS must be set for Vitis" + assert "PLATFORM_REPO_PATHS" in os.environ, "PLATFORM_REPO_PATHS must be set for Vitis" assert ( "XILINX_XRT" in os.environ ), "XILINX_XRT must be set for Vitis, ensure the XRT env is sourced" @@ -97,9 +95,7 @@ def apply(self, model): # NOTE: this assumes the graph is Vitis-compatible: max one axi lite interface # developed from instructions in UG1393 (v2019.2) and package_xo documentation # package_xo is responsible for generating the kernel xml - assert ( - len(interfaces["axilite"]) <= 1 - ), "CreateVitisXO supports max 1 AXI lite interface" + assert len(interfaces["axilite"]) <= 1, "CreateVitisXO supports max 1 AXI lite interface" axilite_intf_name = None if len(interfaces["axilite"]) == 1: axilite_intf_name = interfaces["axilite"][0] @@ -114,14 +110,12 @@ def apply(self, model): ) arg_id += 1 args_string.append( - "{numReps:0:%s:%s:0x4:0x1C:uint:0}" - % (str(arg_id), axilite_intf_name) + "{numReps:0:%s:%s:0x4:0x1C:uint:0}" % (str(arg_id), axilite_intf_name) ) arg_id += 1 else: args_string.append( - "{numReps:0:%s:%s:0x4:0x10:uint:0}" - % (str(arg_id), axilite_intf_name) + "{numReps:0:%s:%s:0x4:0x10:uint:0}" % (str(arg_id), axilite_intf_name) ) arg_id += 1 for intf in interfaces["s_axis"] + interfaces["m_axis"]: @@ -139,9 +133,10 @@ def apply(self, model): model.set_metadata_prop("vitis_xo", xo_path) # generate the package_xo command in a tcl script - package_xo_string = ( - "package_xo -force -xo_path %s -kernel_name %s -ip_directory %s" - % (xo_path, self.ip_name, stitched_ip_dir) + package_xo_string = "package_xo -force -xo_path %s -kernel_name %s -ip_directory %s" % ( + xo_path, + self.ip_name, + stitched_ip_dir, ) for arg in args_string: package_xo_string += " -kernel_xml_args " + arg @@ -255,9 +250,7 @@ def apply(self, model): mem_type = "DDR" mem_idx = 1 node_mem_port = "%s[%d]" % (mem_type, mem_idx) - config.append( - "sp=%s.m_axi_gmem0:%s" % (instance_names[node.name], node_mem_port) - ) + config.append("sp=%s.m_axi_gmem0:%s" % (instance_names[node.name], node_mem_port)) # connect streams if producer is not None: for i in range(len(node.input)): @@ -281,14 +274,10 @@ def apply(self, model): # add Vivado physopt directives if desired if self.strategy == VitisOptStrategy.PERFORMANCE_BEST: config.append("[vivado]") - config.append( - "prop=run.impl_1.STEPS.OPT_DESIGN.ARGS.DIRECTIVE=ExploreWithRemap" - ) + config.append("prop=run.impl_1.STEPS.OPT_DESIGN.ARGS.DIRECTIVE=ExploreWithRemap") config.append("prop=run.impl_1.STEPS.PLACE_DESIGN.ARGS.DIRECTIVE=Explore") config.append("prop=run.impl_1.STEPS.PHYS_OPT_DESIGN.IS_ENABLED=true") - config.append( - "prop=run.impl_1.STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE=Explore" - ) + config.append("prop=run.impl_1.STEPS.PHYS_OPT_DESIGN.ARGS.DIRECTIVE=Explore") config.append("prop=run.impl_1.STEPS.ROUTE_DESIGN.ARGS.DIRECTIVE=Explore") config = "\n".join(config) + "\n" @@ -341,9 +330,7 @@ def apply(self, model): with open(gen_rep_xml_sh, "w") as f: f.write("#!/bin/bash \n") f.write("cd {}\n".format(link_dir)) - f.write( - "vivado -mode batch -source %s\n" % (link_dir + "/gen_report_xml.tcl") - ) + f.write("vivado -mode batch -source %s\n" % (link_dir + "/gen_report_xml.tcl")) f.write("cd {}\n".format(working_dir)) bash_command = ["bash", gen_rep_xml_sh] process_genxml = subprocess.Popen(bash_command, stdout=subprocess.PIPE) @@ -419,18 +406,12 @@ def apply(self, model): kernel_model = kernel_model.transform(RemoveUnusedTensors()) kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix)) kernel_model.save(dataflow_model_filename) - kernel_model = kernel_model.transform( - PrepareIP(self.fpga_part, self.period_ns) - ) + kernel_model = kernel_model.transform(PrepareIP(self.fpga_part, self.period_ns)) kernel_model = kernel_model.transform(HLSSynthIP()) kernel_model = kernel_model.transform( - CreateStitchedIP( - self.fpga_part, self.period_ns, sdp_node.onnx_node.name, True - ) - ) - kernel_model = kernel_model.transform( - CreateVitisXO(sdp_node.onnx_node.name) + CreateStitchedIP(self.fpga_part, self.period_ns, sdp_node.onnx_node.name, True) ) + kernel_model = kernel_model.transform(CreateVitisXO(sdp_node.onnx_node.name)) kernel_model.set_metadata_prop("platform", "alveo") kernel_model.save(dataflow_model_filename) # Assemble design from kernels diff --git a/src/finn/transformation/move_reshape.py b/src/finn/transformation/move_reshape.py index cec04a182b..ed553e7cee 100644 --- a/src/finn/transformation/move_reshape.py +++ b/src/finn/transformation/move_reshape.py @@ -54,9 +54,7 @@ def apply(self, model): fc_inst = getCustomOp(consumer) mw = fc_inst.get_nodeattr("MW") mh = fc_inst.get_nodeattr("MH") - (b, h, w, c) = model.get_tensor_shape( - transp_node.input[0] - ) + (b, h, w, c) = model.get_tensor_shape(transp_node.input[0]) # absorb transpose into weight matrix, # allowing FC layer to operate on the NHWC input W = model.get_initializer(consumer.input[1]) @@ -78,8 +76,6 @@ def apply(self, model): into subsequent node" ) else: - warnings.warn( - "Unsupported transpose node before flatten layer" - ) + warnings.warn("Unsupported transpose node before flatten layer") return (model, graph_modified) diff --git a/src/finn/transformation/qonnx/convert_qonnx_to_finn.py b/src/finn/transformation/qonnx/convert_qonnx_to_finn.py index 34f11d1e95..c921b3d472 100644 --- a/src/finn/transformation/qonnx/convert_qonnx_to_finn.py +++ b/src/finn/transformation/qonnx/convert_qonnx_to_finn.py @@ -66,9 +66,7 @@ class ConvertQONNXtoFINN(Transformation): def __init__( self, - filter_function=default_filter_function_generator( - max_multithreshold_bit_width=8 - ), + filter_function=default_filter_function_generator(max_multithreshold_bit_width=8), ): super().__init__() self._filter_function = filter_function diff --git a/src/finn/transformation/qonnx/fold_quant_weights.py b/src/finn/transformation/qonnx/fold_quant_weights.py index e8339ae244..e027010271 100644 --- a/src/finn/transformation/qonnx/fold_quant_weights.py +++ b/src/finn/transformation/qonnx/fold_quant_weights.py @@ -57,13 +57,9 @@ def apply(self, model): is_const_shape = (n.op_type == "Shape") and (ishape is not None) if is_all_constant_inputs or is_const_shape: # Check node validity - if ( - n.op_type == "Quant" - and not model.get_initializer(n.input[2]) == 0 - ): + if n.op_type == "Quant" and not model.get_initializer(n.input[2]) == 0: raise ValueError( - "Only Quant nodes with zero-point == 0 " - "are currently supported." + "Only Quant nodes with zero-point == 0 " "are currently supported." ) if model.is_fork_node(n): raise ValueError( @@ -73,8 +69,7 @@ def apply(self, model): target_node = model.find_direct_successors(n) if target_node is None: raise RuntimeError( - "Weights quantized with the Quant node must have " - "a successor node." + "Weights quantized with the Quant node must have " "a successor node." ) else: target_node = target_node[0] @@ -126,9 +121,7 @@ def apply(self, model): model.set_tensor_datatype(node_out, new_dtype) # Reshape scale for Conv if required - target_output_shape = model.get_tensor_shape( - target_node.output[0] - ) + target_output_shape = model.get_tensor_shape(target_node.output[0]) if target_node.op_type == "Conv" and len(scale.shape) > 0: conv_out_shape = [1] * len(target_output_shape) # only support per-output channel scaling @@ -160,9 +153,7 @@ def apply(self, model): "Can only constant fold scaled Quant weights " "if a successor exists." ) - assert ( - len(successor) == 1 - ), "Only implemented for a single consumer" + assert len(successor) == 1, "Only implemented for a single consumer" successor = successor[0] succ_output_name = successor.output[0] diff --git a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py index d2aaee59a4..72d473419a 100644 --- a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py +++ b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py @@ -124,18 +124,10 @@ def apply(self, model): node_ind += 1 if n.op_type == "AveragePool": mul_node = model.find_direct_successors(n) - if ( - mul_node is not None - and len(mul_node) == 1 - and mul_node[0].op_type == "Mul" - ): + if mul_node is not None and len(mul_node) == 1 and mul_node[0].op_type == "Mul": mul_node = mul_node[0] t_node = model.find_direct_successors(mul_node) - if ( - t_node is not None - and len(t_node) == 1 - and t_node[0].op_type == "Trunc" - ): + if t_node is not None and len(t_node) == 1 and t_node[0].op_type == "Trunc": t_node = t_node[0] running_node_index = node_ind # Check node for compatibility @@ -143,27 +135,16 @@ def apply(self, model): k_s = get_by_name(n.attribute, "kernel_shape") if k_s is None or len(k_s.ints) != 2 or len(set(k_s.ints)) != 1: raise ValueError( - "FINN only supports average pooling with " - "2D square kernels." + "FINN only supports average pooling with " "2D square kernels." ) k_s = k_s.ints[0] pads = get_by_name(n.attribute, "pads") - if ( - pads is None - or len(set(pads.ints)) != 1 - or pads.ints[0] != 0 - ): - raise ValueError( - "FINN dosn't support padding for average pooling." - ) + if pads is None or len(set(pads.ints)) != 1 or pads.ints[0] != 0: + raise ValueError("FINN dosn't support padding for average pooling.") stride = get_by_name(n.attribute, "strides") - if ( - stride is None - or len(stride.ints) != 2 - or len(set(stride.ints)) != 1 - ): + if stride is None or len(stride.ints) != 2 or len(set(stride.ints)) != 1: raise ValueError( "FINN only supports 2D strides with equal values in " "each direction." @@ -172,11 +153,7 @@ def apply(self, model): # Mul node mul_val = model.get_initializer(mul_node.input[1]) - if ( - mul_val is None - or len(mul_val.shape) != 0 - or mul_val != k_s * k_s - ): + if mul_val is None or len(mul_val.shape) != 0 or mul_val != k_s * k_s: raise ValueError( f"The Mul node after the AveragePool node must have " f"static initialization at the second input, " @@ -190,8 +167,7 @@ def apply(self, model): rounding_mode = get_by_name(t_node.attribute, "rounding_mode") if rounding_mode is None or rounding_mode.s != b"FLOOR": raise ValueError( - "The Trunc node must have the rounding_mode " - "set to 'FLOOR'." + "The Trunc node must have the rounding_mode " "set to 'FLOOR'." ) for inp in t_node.input[1:]: if model.get_initializer(inp) is None: @@ -207,13 +183,8 @@ def apply(self, model): f"the Trunc node, it currently is {zero_pt}." ) trunc_in_bits = model.get_initializer(t_node.input[3]).flatten() - trunc_out_bits = model.get_initializer( - t_node.input[4] - ).flatten() - if ( - len(trunc_in_bits.shape) != 1 - or len(trunc_out_bits.shape) != 1 - ): + trunc_out_bits = model.get_initializer(t_node.input[4]).flatten() + if len(trunc_in_bits.shape) != 1 or len(trunc_out_bits.shape) != 1: raise ValueError( f"Finn only supports scalar bit widths " f"for the Trunc node. The input bit width " @@ -228,9 +199,7 @@ def apply(self, model): # https://github.com/Xilinx/finn-base/blob/ # 7c2603a95e90e4de2575020e575c24eab6a15889/src/finn/custom_op/ # general/quantavgpool2d.py#L94 - ibits = math.floor( - math.log(2**trunc_in_bits / (k_s * k_s), 2) - ) + ibits = math.floor(math.log(2**trunc_in_bits / (k_s * k_s), 2)) # Get sign signed = _get_signed_from_upstream(model, t_node) # ToDo: Change this to NHWC, diff --git a/src/finn/transformation/qonnx/qonnx_activation_handlers.py b/src/finn/transformation/qonnx/qonnx_activation_handlers.py index bbe5e1a0e3..323e391df4 100644 --- a/src/finn/transformation/qonnx/qonnx_activation_handlers.py +++ b/src/finn/transformation/qonnx/qonnx_activation_handlers.py @@ -351,13 +351,10 @@ def _calculate_thresholds(self): bit_width = 1.0 else: raise RuntimeError("Got an unexpected quantizer node type") - quant_scale = self._model.get_initializer(self._q_node.input[1]).astype( - np.float32 - ) + quant_scale = self._model.get_initializer(self._q_node.input[1]).astype(np.float32) act_node = self._model.find_direct_predecessors(self._q_node) act_node = act_node[0] if act_node.op_type == "Relu": - # Calculate thersholds, see: https://github.com/Xilinx/brevitas/blob/ # a5bfd6dc5e030f0047ac1ee47932b60e8e873e17/src/brevitas/export/ # onnx/finn/handler/act.py#L21 @@ -367,9 +364,7 @@ def _calculate_thresholds(self): num_scale_channels = flat_scale.shape[0] step = np.abs(flat_scale).astype(np.float32) min_threshold = step / 2 - thresholds = np.empty( - (num_scale_channels, num_thresholds), dtype=np_default_dtype - ) + thresholds = np.empty((num_scale_channels, num_thresholds), dtype=np_default_dtype) for c in range(num_scale_channels): for t in range(num_thresholds): thresholds[c][t] = min_threshold[c] + step[c] * t @@ -391,9 +386,7 @@ def _calculate_thresholds(self): # from https://pytorch.org/docs/stable/generated/torch.nn.SELU.html alpha = 1.6732632423543772848170429916717 selu_scale = 1.0507009873554804934193349852946 - thresholds = np.empty( - (num_scale_channels, num_thresholds), dtype=np_default_dtype - ) + thresholds = np.empty((num_scale_channels, num_thresholds), dtype=np_default_dtype) for c in range(num_scale_channels): for t in range(num_thresholds): step = -1.0 + half_scale + scale[c] * t @@ -424,8 +417,7 @@ def _remove_activation_node(self, multi_threshold_node): act_node = self._model.find_direct_predecessors(self._q_node) if act_node is None: raise RuntimeError( - "For handling of Relu activations a predecesor to " - "the Quant node must exist." + "For handling of Relu activations a predecesor to " "the Quant node must exist." ) act_node = act_node[0] if act_node.op_type not in self.valid_predecessor_op_types(): @@ -466,9 +458,7 @@ def _check_compatibility(self): q_inst = getCustomOp(self._q_node) signed = q_inst.get_nodeattr("signed") if not signed: - raise ValueError( - "FINN only supports signed Quant nodes for identity activations." - ) + raise ValueError("FINN only supports signed Quant nodes for identity activations.") if not self._model.get_initializer(self._q_node.input[2]) == 0: raise ValueError( "Only Quant nodes with zero-point == 0 " @@ -537,9 +527,7 @@ def _calculate_thresholds(self): num_scale_channels = flat_scale.shape[0] step = np.abs(flat_scale) half_step = step / 2.0 - thresholds = np.empty( - (num_scale_channels, num_thresholds), dtype=np_default_dtype - ) + thresholds = np.empty((num_scale_channels, num_thresholds), dtype=np_default_dtype) # compute the value of the smallest threshold, we'll neg-bias all # generated thresholds by this much min_threshold = -half_step - step * ((num_thresholds // 2) - 1) @@ -550,9 +538,7 @@ def _calculate_thresholds(self): thresholds[c][t] = min_threshold[c] + step[c] * t # ToDo: The index 1 needs to be changed to -1 for the channels last format - num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[ - 1 - ] + num_output_channels = self._model.get_tensor_shape(self._q_node.output[0])[1] final_shape = (num_output_channels, num_thresholds) if thresholds.shape != final_shape: thresholds = np.broadcast_to(thresholds, final_shape) @@ -574,9 +560,7 @@ def _calculate_act_scale(self): if bit_width != 1: scale = quant_scale else: - assert ( - quant_scale.flatten().shape[0] == 1 - ), "Unsupported BIPOLAR per channel scale" + assert quant_scale.flatten().shape[0] == 1, "Unsupported BIPOLAR per channel scale" assert quant_scale.flatten()[0] == 1.0, "Unsupported BIPOLAR scale != 1" scale = quant_scale * 2 return scale diff --git a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py index 48dda3820d..1b1aea1bab 100644 --- a/src/finn/transformation/qonnx/quant_act_to_multithreshold.py +++ b/src/finn/transformation/qonnx/quant_act_to_multithreshold.py @@ -87,9 +87,7 @@ class ConvertQuantActToMultiThreshold(Transformation): def __init__( self, - filter_function=default_filter_function_generator( - max_multithreshold_bit_width=8 - ), + filter_function=default_filter_function_generator(max_multithreshold_bit_width=8), ): super().__init__() self._filter_function = filter_function diff --git a/src/finn/transformation/streamline/absorb.py b/src/finn/transformation/streamline/absorb.py index 73df52f890..e3e2468bba 100644 --- a/src/finn/transformation/streamline/absorb.py +++ b/src/finn/transformation/streamline/absorb.py @@ -80,9 +80,7 @@ def apply(self, model): steps = T.shape[-1] new_min = bias new_max = steps + bias - odt = DataType.get_smallest_possible(steps).name.replace( - "UINT", "INT" - ) + odt = DataType.get_smallest_possible(steps).name.replace("UINT", "INT") odt = DataType[odt] assert odt.allowed(new_max) and odt.allowed( new_min @@ -112,11 +110,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Add" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Add" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if consumer is not None and consumer.op_type == "MultiThreshold": add_weight_name = n.input[1] @@ -153,11 +147,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Mul" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Mul" and not model.is_fork_node(n) and not model.is_join_node(n): mul_weight_name = n.input[1] A = model.get_initializer(mul_weight_name) assert A is not None, "Initializer for mul weights is not set." @@ -203,9 +193,7 @@ def apply(self, model): is_scalar = np.prod(A.shape) == 1 actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape))) is_1d = actual_ndims == 1 - is_not_bipolar = ( - model.get_tensor_datatype(mul_weight_name) != DataType["BIPOLAR"] - ) + is_not_bipolar = model.get_tensor_datatype(mul_weight_name) != DataType["BIPOLAR"] is_signed = (A < 0).any() if is_signed and (is_scalar or is_1d) and is_not_bipolar: start_name = n.input[0] @@ -219,9 +207,7 @@ def apply(self, model): model.set_tensor_datatype(sign_mul_param_name, DataType["BIPOLAR"]) # replace original mul weight by magnitudes model.set_initializer(mul_weight_name, np.abs(A)) - new_mul = oh.make_node( - "Mul", [start_name, sign_mul_param_name], [middle_name] - ) + new_mul = oh.make_node("Mul", [start_name, sign_mul_param_name], [middle_name]) n.input[0] = middle_name graph.node.insert(node_ind - 1, new_mul) graph_modified = True @@ -338,13 +324,9 @@ def apply(self, model): mt_cand.output[0] ) # Create a new ValueInfoProto and set the shape - model.set_tensor_shape( - intermediate_tensor_name, intermediate_tensor_shape - ) + model.set_tensor_shape(intermediate_tensor_name, intermediate_tensor_shape) # Set the tensor layout - model.set_tensor_layout( - intermediate_tensor_name, DataLayout.NHWC - ) + model.set_tensor_layout(intermediate_tensor_name, DataLayout.NHWC) # Set the tensor FINN datatype model.set_tensor_datatype( intermediate_tensor_name, intermediate_tensor_finn_dtype @@ -379,8 +361,7 @@ def apply(self, model): for n in graph.node: node_ind += 1 if ( - n.op_type == "Reshape" - and (model.get_initializer(n.input[1]) == [1, -1]).all() + n.op_type == "Reshape" and (model.get_initializer(n.input[1]) == [1, -1]).all() ) or n.op_type == "Flatten": prod = model.find_producer(n.input[0]) if ( @@ -556,23 +537,17 @@ def apply(self, model): if sizes is not None: ishape = model.get_tensor_shape(mt_cand.input[0]) ns, cs, hs, ws = sizes / np.asarray(ishape) - model.set_initializer( - mt_cand.input[2], np.asarray([ns, cs, hs, ws]) - ) + model.set_initializer(mt_cand.input[2], np.asarray([ns, cs, hs, ws])) mt_cand.input.remove(mt_cand.input[3]) # scales already specified, transpose indices to NHWC scales = model.get_initializer(mt_cand.input[2]) assert scales is not None ns, cs, hs, ws = scales - model.set_initializer( - mt_cand.input[2], np.asarray([ns, hs, ws, cs]) - ) + model.set_initializer(mt_cand.input[2], np.asarray([ns, hs, ws, cs])) # get rid of first tranpose node mt_cand.input[0] = node.input[0] graph.node.remove(node) - is_last_node = mt_cand.output[0] in [ - x.name for x in model.graph.output - ] + is_last_node = mt_cand.output[0] in [x.name for x in model.graph.output] new_tensor_name = model.make_new_valueinfo_name() if is_last_node: diff --git a/src/finn/transformation/streamline/reorder.py b/src/finn/transformation/streamline/reorder.py index 29eefacc32..2e6aebf093 100644 --- a/src/finn/transformation/streamline/reorder.py +++ b/src/finn/transformation/streamline/reorder.py @@ -53,11 +53,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Add" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Add" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -73,9 +69,7 @@ def apply(self, model): A = model.get_initializer(mul_weight_name) B = model.get_initializer(add_weight_name) if (A is None) or (B is None): - warnings.warn( - "Mul or add does not have constant params, skipping" - ) + warnings.warn("Mul or add does not have constant params, skipping") continue start_name = n.input[0] middle_name = n.output[0] @@ -116,11 +110,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Mul" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Mul" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -174,11 +164,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Add" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Add" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -235,11 +221,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Add" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Add" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -317,11 +299,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Mul" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Mul" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -370,11 +348,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Mul" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Mul" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -436,11 +410,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Mul" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Mul" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -465,9 +435,7 @@ def apply(self, model): maxpool_out_shape = model.get_tensor_shape(maxpool_out_name) # do not support non-2D MaxPool - kernel_shape = list( - get_by_name(maxpool_node.attribute, "kernel_shape").ints - ) + kernel_shape = list(get_by_name(maxpool_node.attribute, "kernel_shape").ints) if len(kernel_shape) != 2: continue @@ -675,9 +643,7 @@ def apply(self, model): if ceil_mode is not None: ceil_mode = ceil_mode.i else: - ceil_mode = ( - 0 # default to ceil_mode=0 (equivalent to np.floor) - ) + ceil_mode = 0 # default to ceil_mode=0 (equivalent to np.floor) n.op_type = "MaxPoolNHWC" n.domain = "qonnx.custom_op.general" start_name = n.input[0] @@ -702,9 +668,7 @@ def apply(self, model): if ceil_mode is not None: ceil_mode = ceil_mode.i else: - ceil_mode = ( - 0 # default to ceil_mode=0 (equivalent to np.floor) - ) + ceil_mode = 0 # default to ceil_mode=0 (equivalent to np.floor) n.op_type = "MaxPoolNHWC" n.domain = "qonnx.custom_op.general" start_name = producer.input[0] @@ -739,8 +703,7 @@ def apply(self, model): if n.op_type == "Upsample" or n.op_type == "Resize": if model.get_tensor_layout(n.input[0]) != DataLayout.NCHW: warnings.warn( - "%s: Input not NCHW. Can't operate transformation on node." - % n.name + "%s: Input not NCHW. Can't operate transformation on node." % n.name ) continue consumer = model.find_consumer(n.output[0]) @@ -818,7 +781,6 @@ def apply(self, model): and model.is_fork_node(n) and not model.is_join_node(n) ): - # Restrict this transform to operations with constant parameters # Assuming parameters is in input 1 if len(n.input) > 1: @@ -863,9 +825,7 @@ def apply(self, model): consumer_node.input[idx] = new_output_tensor_name break else: - raise Exception( - "Consumer should have the current node output as input" - ) + raise Exception("Consumer should have the current node output as input") graph.node.insert(node_ind, consumer_node) @@ -892,9 +852,7 @@ def __init__(self): class MoveTransposePastFork(MoveOpPastFork): def __init__(self): - super().__init__( - ["Transpose"], lambda x: {"perm": get_by_name(x.attribute, "perm").ints} - ) + super().__init__(["Transpose"], lambda x: {"perm": get_by_name(x.attribute, "perm").ints}) class MoveMaxPoolPastMultiThreshold(Transformation): @@ -918,9 +876,7 @@ def apply(self, model): mt_out = consumer.output[0] mt_odt = model.get_tensor_datatype(mt_out) if mt_odt.signed() and has_padding: - warnings.warn( - "Skipping padded MaxPool + signed-output MultiThreshold" - ) + warnings.warn("Skipping padded MaxPool + signed-output MultiThreshold") continue # check for non-decreasing thresholds and nonnegative # scale factor in MultiThreshold @@ -1031,11 +987,7 @@ def apply(self, model): node_ind = 0 for n in graph.node: node_ind += 1 - if ( - n.op_type == "Flatten" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Flatten" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None @@ -1121,11 +1073,7 @@ def apply(self, model): graph_modified = False for n in graph.node: node_ind += 1 - if ( - n.op_type == "Transpose" - and not model.is_fork_node(n) - and not model.is_join_node(n) - ): + if n.op_type == "Transpose" and not model.is_fork_node(n) and not model.is_join_node(n): consumer = model.find_consumer(n.output[0]) if ( consumer is not None diff --git a/src/finn/transformation/streamline/round_thresholds.py b/src/finn/transformation/streamline/round_thresholds.py index 601dab04cb..5ba5ee0ff5 100644 --- a/src/finn/transformation/streamline/round_thresholds.py +++ b/src/finn/transformation/streamline/round_thresholds.py @@ -57,8 +57,7 @@ def apply(self, model): model.set_tensor_datatype(n.input[1], idtype) graph_modified = True if idtype.is_integer() and ( - (Tnew < (idtype.min() - 1)).any() - or (Tnew > (idtype.max() + 1)).any() + (Tnew < (idtype.min() - 1)).any() or (Tnew > (idtype.max() + 1)).any() ): # clip any large thresholds to input range + 1 Tnew = np.clip(Tnew, idtype.min() - 1, idtype.max() + 1) diff --git a/src/finn/util/create.py b/src/finn/util/create.py index ed3e1a843e..af92d1cb8e 100644 --- a/src/finn/util/create.py +++ b/src/finn/util/create.py @@ -108,15 +108,11 @@ def hls_mlp_maker(layer_spec): odt = lyr["odt"] if i == 0: - global_in = helper.make_tensor_value_info( - current_in_name, TensorProto.FLOAT, [1, mw] - ) + global_in = helper.make_tensor_value_info(current_in_name, TensorProto.FLOAT, [1, mw]) model.graph.input.append(global_in) if i == len(layer_spec) - 1: - global_out = helper.make_tensor_value_info( - current_out_name, TensorProto.FLOAT, [1, mh] - ) + global_out = helper.make_tensor_value_info(current_out_name, TensorProto.FLOAT, [1, mh]) model.graph.output.append(global_out) # there are two ways to implement bipolar weights and inputs for diff --git a/src/finn/util/data_packing.py b/src/finn/util/data_packing.py index a41fe882e5..7698850029 100644 --- a/src/finn/util/data_packing.py +++ b/src/finn/util/data_packing.py @@ -149,9 +149,7 @@ def pack_innermost_dim_as_hex_string( ndarray = np.asarray(ndarray, dtype=np.float32) def fun(x): - return array2hexstring( - x, dtype, pad_to_nbits, reverse=reverse_inner, prefix=prefix - ) + return array2hexstring(x, dtype, pad_to_nbits, reverse=reverse_inner, prefix=prefix) return np.apply_along_axis(fun, ndarray.ndim - 1, ndarray) @@ -232,9 +230,7 @@ def unpack_innermost_dim_from_hex_string( return array -def numpy_to_hls_code( - ndarray, dtype, hls_var_name, pack_innermost_dim=True, no_decl=False -): +def numpy_to_hls_code(ndarray, dtype, hls_var_name, pack_innermost_dim=True, no_decl=False): """Return C++ code representation of a numpy ndarray with FINN DataType dtype, using hls_var_name as the resulting C++ variable name. If pack_innermost_dim is specified, the innermost dimension of the ndarray @@ -311,9 +307,7 @@ def npy_to_rtlsim_input(input_file, input_dtype, pad_to_nbits, reverse_inner=Tru return packed_data -def rtlsim_output_to_npy( - output, path, dtype, shape, packedBits, targetBits, reverse_inner=True -): +def rtlsim_output_to_npy(output, path, dtype, shape, packedBits, targetBits, reverse_inner=True): """Convert a flattened sequence of Python arbitrary-precision integers output into a NumPy array, saved as npy file at path. Each arbitrary-precision integer is assumed to be a packed array of targetBits-bit elements, which @@ -418,9 +412,7 @@ def packed_bytearray_to_finnpy( """ - if ( - not issubclass(type(packed_bytearray), np.ndarray) - ) or packed_bytearray.dtype != np.uint8: + if (not issubclass(type(packed_bytearray), np.ndarray)) or packed_bytearray.dtype != np.uint8: raise Exception("packed_bytearray_to_finnpy needs NumPy uint8 arrays") if packed_bytearray.ndim == 0: raise Exception("packed_bytearray_to_finnpy expects at least 1D ndarray") @@ -446,9 +438,7 @@ def packed_bytearray_to_finnpy( if reverse_endian: packed_bytearray = np.flip(packed_bytearray, axis=-1) # convert innermost dim of byte array to hex strings - packed_hexstring = np.apply_along_axis( - npbytearray2hexstring, packed_dim, packed_bytearray - ) + packed_hexstring = np.apply_along_axis(npbytearray2hexstring, packed_dim, packed_bytearray) ret = unpack_innermost_dim_from_hex_string( packed_hexstring, dtype, output_shape, packed_bits, reverse_inner ) diff --git a/src/finn/util/imagenet.py b/src/finn/util/imagenet.py index b4548bb352..1d63adf58b 100644 --- a/src/finn/util/imagenet.py +++ b/src/finn/util/imagenet.py @@ -137,8 +137,7 @@ def measure_topk(n_images, fxn_pre, fxn_exec, fxn_post, verbose=True, k=5): class_names = { 0: "tench, Tinca tinca", 1: "goldfish, Carassius auratus", - 2: "great white shark, white shark, man-eater, man-eating shark, " - "Carcharodon carcharias", + 2: "great white shark, white shark, man-eater, man-eating shark, " "Carcharodon carcharias", 3: "tiger shark, Galeocerdo cuvieri", 4: "hammerhead, hammerhead shark", 5: "electric ray, crampfish, numbfish, torpedo", @@ -184,8 +183,7 @@ def measure_topk(n_images, fxn_pre, fxn_exec, fxn_post, verbose=True, k=5): 45: "Gila monster, Heloderma suspectum", 46: "green lizard, Lacerta viridis", 47: "African chameleon, Chamaeleo chamaeleon", - 48: "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, " - "Varanus komodoensis", + 48: "Komodo dragon, Komodo lizard, dragon lizard, giant lizard, " "Varanus komodoensis", 49: "African crocodile, Nile crocodile, Crocodylus niloticus", 50: "American alligator, Alligator mississipiensis", 51: "triceratops", @@ -286,8 +284,7 @@ def measure_topk(n_images, fxn_pre, fxn_exec, fxn_post, verbose=True, k=5): 144: "pelican", 145: "king penguin, Aptenodytes patagonica", 146: "albatross, mollymawk", - 147: "grey whale, gray whale, devilfish, Eschrichtius gibbosus, " - "Eschrichtius robustus", + 147: "grey whale, gray whale, devilfish, Eschrichtius gibbosus, " "Eschrichtius robustus", 148: "killer whale, killer, orca, grampus, sea wolf, Orcinus orca", 149: "dugong, Dugong dugon", 150: "sea lion", @@ -580,8 +577,7 @@ def measure_topk(n_images, fxn_pre, fxn_exec, fxn_post, verbose=True, k=5): 433: "bathing cap, swimming cap", 434: "bath towel", 435: "bathtub, bathing tub, bath, tub", - 436: "beach wagon, station wagon, wagon, estate car, beach waggon, " - "station waggon, waggon", + 436: "beach wagon, station wagon, wagon, estate car, beach waggon, " "station waggon, waggon", 437: "beacon, lighthouse, beacon light, pharos", 438: "beaker", 439: "bearskin, busby, shako", @@ -636,8 +632,7 @@ def measure_topk(n_images, fxn_pre, fxn_exec, fxn_post, verbose=True, k=5): 487: "cellular telephone, cellular phone, cellphone, cell, mobile phone", 488: "chain", 489: "chainlink fence", - 490: "chain mail, ring mail, mail, chain armor, chain armour, ring armor, " - "ring armour", + 490: "chain mail, ring mail, mail, chain armor, chain armour, ring armor, " "ring armour", 491: "chain saw, chainsaw", 492: "chest", 493: "chiffonier, commode", diff --git a/src/finn/util/platforms.py b/src/finn/util/platforms.py index 8212cb5712..77dc591445 100644 --- a/src/finn/util/platforms.py +++ b/src/finn/util/platforms.py @@ -104,9 +104,7 @@ def compute_resources(self): def guide_resources(self): guide = [] # TODO: assert limits is of correct size - guide_res = ( - np.tile(np.array(self.compute_resources), (self.ndevices, 1)) - ).astype(int) + guide_res = (np.tile(np.array(self.compute_resources), (self.ndevices, 1))).astype(int) for i in range(self.nslr * self.ndevices): # when in multi-FPGA mode, subtract cost of UDP connection from eth_slr local_slr = i % self.nslr @@ -159,9 +157,7 @@ def compute_connection_cost(self): xlocal[i][j] = 1 # tile connection cost matrices for entire system for i in range(self.ndevices): - x[ - i * self.nslr : (i + 1) * self.nslr, i * self.nslr : (i + 1) * self.nslr - ] = xlocal + x[i * self.nslr : (i + 1) * self.nslr, i * self.nslr : (i + 1) * self.nslr] = xlocal # set cost for ethernet connections, assuming daisy-chaining for i in range(self.ndevices - 1): x[i * self.nslr + self.eth_slr][(i + 1) * self.nslr + self.eth_slr] = 10 @@ -182,9 +178,7 @@ def compute_connection_resource(self): slllocal[i][j] = self.sll_count[i][j] # tile connection cost matrices for entire system for i in range(self.ndevices): - sll[ - i * self.nslr : (i + 1) * self.nslr, i * self.nslr : (i + 1) * self.nslr - ] = slllocal + sll[i * self.nslr : (i + 1) * self.nslr, i * self.nslr : (i + 1) * self.nslr] = slllocal # set cost for ethernet connections, assuming daisy-chaining eth = np.full((self.nslr * self.ndevices, self.nslr * self.ndevices), 0) # no Eth throughput constraints from one SLR to itself diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 7452394524..86cf2eed14 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -86,11 +86,7 @@ def file_to_basename(x): src_exts = [".v", ".sv"] all_verilog_files = list( - set( - filter( - lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs - ) - ) + set(filter(lambda x: any(map(lambda y: x.endswith(y), src_exts)), all_verilog_srcs)) ) verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" @@ -98,9 +94,7 @@ def file_to_basename(x): # use custom version of axis infrastructure vh # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) - custom_vh = pk.resource_filename( - "finn.qnn-data", "verilog/custom_axis_infrastructure.vh" - ) + custom_vh = pk.resource_filename("finn.qnn-data", "verilog/custom_axis_infrastructure.vh") shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") for fn in all_verilog_srcs: if fn.endswith(".vh"): @@ -137,9 +131,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" build_dir = make_build_dir("verilator_fifosim_") - fifosim_cpp_fname = pk.resource_filename( - "finn.qnn-data", "cpp/verilator_fifosim.cpp" - ) + fifosim_cpp_fname = pk.resource_filename("finn.qnn-data", "cpp/verilator_fifosim.cpp") with open(fifosim_cpp_fname, "r") as f: fifosim_cpp_template = f.read() assert len(model.graph.input) == 1, "Only a single input stream is supported" @@ -148,9 +140,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): first_node = model.find_consumer(iname) oname = model.graph.output[0].name last_node = model.find_producer(oname) - assert (first_node is not None) and ( - last_node is not None - ), "Failed to find first/last nodes" + assert (first_node is not None) and (last_node is not None), "Failed to find first/last nodes" fnode_inst = getCustomOp(first_node) lnode_inst = getCustomOp(last_node) ishape_folded = fnode_inst.get_folded_input_shape() @@ -177,7 +167,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): "FIFO_DEPTH_LOGGING": fifo_log, } - for (key, val) in template_dict.items(): + for key, val in template_dict.items(): fifosim_cpp_template = fifosim_cpp_template.replace(f"@{key}@", str(val)) with open(build_dir + "/verilator_fifosim.cpp", "w") as f: diff --git a/src/finn/util/test.py b/src/finn/util/test.py index 4250079ef3..1f36486048 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -137,9 +137,7 @@ def get_example_input(topology): onnx_tensor = onnx.load_tensor_from_string(raw_i) return nph.to_array(onnx_tensor) elif topology == "cnv": - fn = pk.resource_filename( - "finn.qnn-data", "cifar10/cifar10-test-data-class3.npz" - ) + fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) return input_tensor else: diff --git a/src/finn/util/vcd.py b/src/finn/util/vcd.py index 1f77276d5a..69dd82c5ea 100644 --- a/src/finn/util/vcd.py +++ b/src/finn/util/vcd.py @@ -69,7 +69,7 @@ def get_fifo_count_max(vcd_file, fifo_count_signal): assert len(d) != 0, "FIFO count signal not found" events = list(d.values())[0]["tv"] max = 0 - for (time, val) in events: + for time, val in events: current = int(val, base=2) if current > max: max = current @@ -140,7 +140,7 @@ def get_stream_if_stats(vcd_file, if_base_name): status = {"V": 0, "R": 0} last_time = 0 total_rising_clock_edges = 0 - for (sig, time, val) in events: + for sig, time, val in events: # pyverilator generates 5 time units per sample time = time / 5 # pyverilator generates 4 samples per clock period diff --git a/tests/brevitas/test_brevitas_avg_pool_export.py b/tests/brevitas/test_brevitas_avg_pool_export.py index 898f1fb732..053b632221 100644 --- a/tests/brevitas/test_brevitas_avg_pool_export.py +++ b/tests/brevitas/test_brevitas_avg_pool_export.py @@ -31,7 +31,7 @@ import os import torch from brevitas.export import export_qonnx -from brevitas.nn import TruncAvgPool2d, QuantIdentity, QuantReLU +from brevitas.nn import QuantIdentity, QuantReLU, TruncAvgPool2d from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_datatypes import InferDataTypes diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index b469b197fa..fa391efcab 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -79,9 +79,7 @@ def test_brevitas_mobilenet(): export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 - preproc_model.set_tensor_datatype( - preproc_model.graph.input[0].name, DataType["UINT8"] - ) + preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType["UINT8"]) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) @@ -121,6 +119,4 @@ def test_brevitas_mobilenet(): produced = odict[model.graph.output[0].name] produced_prob = odict["TopK_0_out0"] * a0 assert (produced.flatten() == expected_top5).all() - assert np.isclose( - produced_prob.flatten(), expected_top5_prob, atol=2.2 * 1e-1 - ).all() + assert np.isclose(produced_prob.flatten(), expected_top5_prob, atol=2.2 * 1e-1).all() diff --git a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py index ad6a7e53de..2911303501 100644 --- a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py +++ b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py @@ -52,9 +52,7 @@ @pytest.mark.parametrize("narrow_range", [False, True]) @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7)]) @pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_act_export_qhardtanh_nonscaled( - abits, narrow_range, max_val, QONNX_export -): +def test_brevitas_act_export_qhardtanh_nonscaled(abits, narrow_range, max_val, QONNX_export): def get_quant_type(bit_width): if bit_width is None: return QuantType.FP @@ -86,9 +84,7 @@ def get_quant_type(bit_width): export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( - np.float32 - ) + inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py index 1ad52fb5df..551345f649 100644 --- a/tests/brevitas/test_brevitas_qlinear.py +++ b/tests/brevitas/test_brevitas_qlinear.py @@ -53,9 +53,7 @@ @pytest.mark.parametrize("w_bits", [4]) @pytest.mark.parametrize("i_dtype", [DataType["UINT4"]]) @pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_qlinear( - bias, out_features, in_features, w_bits, i_dtype, QONNX_export -): +def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype, QONNX_export): i_shape = (1, in_features) w_shape = (out_features, in_features) b_linear = QuantLinear( @@ -67,9 +65,7 @@ def test_brevitas_qlinear( weight_quant_type=QuantType.INT, weight_scaling_per_output_channel=True, ) - weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype( - np.float32 - ) + weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype(np.float32) b_linear.weight.data = torch.from_numpy(weight_tensor_fp) b_linear.eval() if QONNX_export: diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index a4657d7924..9e1fcbdc2f 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -54,7 +54,6 @@ def test_brevitas_act_export_relu( ishape, QONNX_export, ): - b_act = QuantReLU( bit_width=abits, ) @@ -90,7 +89,6 @@ def test_brevitas_act_export_relu_channel( ishape, QONNX_export, ): - ch = ishape[1] b_act = QuantReLU( bit_width=abits, diff --git a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py index d35cc8d2dd..72a15810aa 100644 --- a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py +++ b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py @@ -52,9 +52,7 @@ @pytest.mark.parametrize("narrow_range", [False, True]) @pytest.mark.parametrize("min_val", [-1.0, -(1 - 2 ** (-7)), -2]) @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7), 2]) -@pytest.mark.parametrize( - "scaling_impl_type", [ScalingImplType.CONST, ScalingImplType.PARAMETER] -) +@pytest.mark.parametrize("scaling_impl_type", [ScalingImplType.CONST, ScalingImplType.PARAMETER]) @pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_qhardtanh_scaled( abits, narrow_range, min_val, max_val, scaling_impl_type, QONNX_export @@ -99,9 +97,7 @@ def get_quant_type(bit_width): export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) model = ModelWrapper(export_onnx_path) model = model.transform(InferShapes()) - inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype( - np.float32 - ) + inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] diff --git a/tests/brevitas/test_brevitas_selu_act_export.py b/tests/brevitas/test_brevitas_selu_act_export.py index 3f4807c5d7..c8d040dbee 100644 --- a/tests/brevitas/test_brevitas_selu_act_export.py +++ b/tests/brevitas/test_brevitas_selu_act_export.py @@ -48,9 +48,7 @@ @pytest.mark.parametrize("narrow", [True, False]) def test_brevitas_act_export_selu(abits, ishape, narrow): export_path = "test_brevitas_selu_act_export_%s.onnx" % str(abits) - b_act = torch.nn.Sequential( - torch.nn.SELU(), QuantIdentity(bit_width=abits, narrow=narrow) - ) + b_act = torch.nn.Sequential(torch.nn.SELU(), QuantIdentity(bit_width=abits, narrow=narrow)) export_qonnx( b_act, diff --git a/tests/brevitas/test_brevitas_validate_mobilenet.py b/tests/brevitas/test_brevitas_validate_mobilenet.py index 20e8ddad50..f3f7df0e3d 100644 --- a/tests/brevitas/test_brevitas_validate_mobilenet.py +++ b/tests/brevitas/test_brevitas_validate_mobilenet.py @@ -146,9 +146,7 @@ def test_brevitas_compare_exported_mobilenet(): model = model.transform(MergeONNXModels(preproc_model)) model.save(export_onnx_path + "/quant_mobilenet_v1_4b.onnx") - with open( - export_onnx_path + "/mobilenet_validation.csv", "w", newline="" - ) as csvfile: + with open(export_onnx_path + "/mobilenet_validation.csv", "w", newline="") as csvfile: writer = csv.writer(csvfile) writer.writerow( [ @@ -165,7 +163,7 @@ def test_brevitas_compare_exported_mobilenet(): workload = imagenet_util.get_val_images(n_images, interleave_classes=True) all_inds_ok = True all_probs_ok = True - for (img_path, target_id) in workload: + for img_path, target_id in workload: img_np = imagenet_util.load_resize_crop(img_path) img_torch = torch.from_numpy(img_np).float() # do forward pass in PyTorch/Brevitas diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 5edd77d95d..b08028e7cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -314,9 +314,7 @@ def test_export(self, topology, wbits, abits, QONNX_export): assert os.path.isfile(chkpt_name) def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "export" - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "export") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -324,9 +322,7 @@ def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) model = model.transform(RemoveStaticGraphInputs()) - chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "import_and_tidy" - ) + chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "import_and_tidy") model.save(chkpt) def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): @@ -338,9 +334,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): ishape = model.get_tensor_shape(global_inp_name) # preprocessing: torchvision's ToTensor divides uint8 inputs by 255 totensor_pyt = ToTensor() - chkpt_preproc_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "preproc" - ) + chkpt_preproc_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "preproc") export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name, opset_version=13) assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model @@ -353,9 +347,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): model.set_tensor_datatype(global_inp_name, DataType["UINT8"]) # postprocessing: insert Top-1 node at the end model = model.transform(InsertTopK(k=1)) - chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "pre_post" - ) + chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") # tidy-up again model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -367,9 +359,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): assert os.path.isfile(chkpt_name) def test_streamline(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "pre_post" - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold()) # move past any reshapes to be able to streamline input scaling @@ -385,14 +375,10 @@ def test_streamline(self, topology, wbits, abits, QONNX_export): model = model.transform(absorb.AbsorbScalarMulAddIntoTopK()) model = model.transform(InferDataLayouts()) model = model.transform(RemoveUnusedTensors()) - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") - ) + model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline")) def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "streamline" - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") model = load_test_checkpoint_or_skip(prev_chkpt_name) if topology == "tfc" and wbits == 1 and abits == 1: # use standalone thresholds for tfc-w1a1 to also exercise that option @@ -415,9 +401,7 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferDataLayouts()) model.save( - get_checkpoint_name( - topology, wbits, abits, QONNX_export, "convert_to_hls_layers" - ) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "convert_to_hls_layers") ) exp_layer_counts = { "tfc": [ @@ -452,7 +436,7 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): else: exp_key = topology exp_layer_counts = exp_layer_counts[exp_key] - for (op_type, exp_count) in exp_layer_counts: + for op_type, exp_count in exp_layer_counts: assert len(model.get_nodes_by_op_type(op_type)) == exp_count def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): @@ -484,9 +468,7 @@ def test_fold(self, topology, wbits, abits, QONNX_export): model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold")) def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fold" - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(MinimizeWeightBitWidth()) @@ -505,13 +487,9 @@ def test_cppsim(self, topology, wbits, abits, QONNX_export): model = model.transform(PrepareCppSim()) model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) - cppsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "cppsim" - ) + cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "cppsim") model.save(cppsim_chkpt) - parent_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_parent" - ) + parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) @@ -524,35 +502,25 @@ def test_cppsim(self, topology, wbits, abits, QONNX_export): def test_ipgen(self, topology, wbits, abits, QONNX_export, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fold" - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold") model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind) - ) + model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind)) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq", "alveo"]) def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipgen_" + kind - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(InsertAndSetFIFODepths(test_fpga_part, target_clk_ns)) fifo_layers = model.get_nodes_by_op_type("StreamingFIFO") assert len(fifo_layers) > 0 - model.save( - get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind - ) - ) + model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fifodepth_" + kind)) @pytest.mark.slow @pytest.mark.vivado @@ -577,17 +545,13 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): model.set_metadata_prop("exec_mode", "rtlsim") os.environ["LIVENESS_THRESHOLD"] = str(int(latency * 1.1)) if rtlsim_trace: - model.set_metadata_prop( - "rtlsim_trace", "%s_w%da%d.vcd" % (topology, wbits, abits) - ) + model.set_metadata_prop("rtlsim_trace", "%s_w%da%d.vcd" % (topology, wbits, abits)) os.environ["RTLSIM_TRACE_DEPTH"] = "3" rtlsim_chkpt = get_checkpoint_name( topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind ) model.save(rtlsim_chkpt) - parent_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_parent" - ) + parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) @@ -619,18 +583,10 @@ def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): if "TEST_END2END_VALIDATE_TOP1" not in os.environ: pytest.skip("TEST_END2END_VALIDATE_TOP1 not set") - prepostproc_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "pre_post" - ) - streamline_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "streamline" - ) - parent_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_parent" - ) - cppsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "cppsim" - ) + prepostproc_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") + streamline_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") + parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") + cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "cppsim") rtlsim_chkpt = get_checkpoint_name( topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind ) @@ -654,9 +610,7 @@ def test_build(self, topology, wbits, abits, QONNX_export, kind): cfg = get_build_env(kind, target_clk_ns) model = model.transform(cfg["build_fxn"]) model = model.transform(AnnotateResources("synth")) - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) - ) + model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind)) @pytest.mark.slow @pytest.mark.vivado @@ -665,12 +619,8 @@ def test_build(self, topology, wbits, abits, QONNX_export, kind): def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "build_" + kind - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) kind_to_driver_platform = {"zynq": "zynq-iodma", "alveo": "alveo"} model = model.transform(MakePYNQDriver(kind_to_driver_platform[kind])) - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind) - ) + model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind)) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index e6ca90b7b2..6e758d2d2d 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -109,9 +109,7 @@ def test_end2end_cybsec_mlp_export(QONNX_export): QuantReLU(bit_width=act_bit_width), QuantLinear(hidden3, num_classes, bias=True, weight_bit_width=weight_bit_width), ) - trained_state_dict = torch.load(assets_dir + "/state_dict.pth")[ - "models_state_dict" - ][0] + trained_state_dict = torch.load(assets_dir + "/state_dict.pth")["models_state_dict"][0] model.load_state_dict(trained_state_dict, strict=False) W_orig = model[0].weight.data.detach().numpy() # pad the second (593-sized) dimensions with 7 zeroes at the end @@ -132,9 +130,7 @@ def test_end2end_cybsec_mlp_export(QONNX_export): if QONNX_export: # With the onnx export from Brevitas we need to manually set # the FINN DataType at the input - export_qonnx( - model_for_export, torch.randn(input_shape), export_path=export_onnx_path - ) + export_qonnx(model_for_export, torch.randn(input_shape), export_path=export_onnx_path) model = ModelWrapper(export_onnx_path) model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"]) model.save(export_onnx_path) @@ -144,7 +140,10 @@ def test_end2end_cybsec_mlp_export(QONNX_export): model.save(export_onnx_path) else: export_finn_onnx( - model_for_export, export_path=export_onnx_path, input_t=input_qt, input_names=["onnx::Mul_0"] + model_for_export, + export_path=export_onnx_path, + input_t=input_qt, + input_names=["onnx::Mul_0"], ) assert os.path.isfile(export_onnx_path) # fix input datatype @@ -169,9 +168,7 @@ def test_end2end_cybsec_mlp_export(QONNX_export): assert finn_model.graph.node[3].op_type == "MatMul" assert finn_model.graph.node[-1].op_type == "MultiThreshold" # verify datatypes on some tensors - assert ( - finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"] - ) + assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"] first_matmul_w_name = finn_model.get_nodes_by_op_type("MatMul")[0].input[1] assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"] diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index 3a3c0fe237..e53022e74b 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -98,9 +98,7 @@ def test_end2end_mobilenet_export(): export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 - preproc_model.set_tensor_datatype( - preproc_model.graph.input[0].name, DataType["UINT8"] - ) + preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType["UINT8"]) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(FoldConstants()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) @@ -145,9 +143,7 @@ def test_end2end_mobilenet_export(): @pytest.mark.end2end def test_end2end_mobilenet_tidy_and_merge_with_preproc(): - preproc_model = load_test_checkpoint_or_skip( - build_dir + "/end2end_mobilenet_preproc.onnx" - ) + preproc_model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_preproc.onnx") model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_export.onnx") model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -191,17 +187,13 @@ def test_end2end_mobilenet_streamline(): model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) model.save(build_dir + "/end2end_mobilenet_streamlined.onnx") - assert ( - len(model.get_nodes_by_op_type("Add")) == 1 - ) # only final quantized bias Add op remains + assert len(model.get_nodes_by_op_type("Add")) == 1 # only final quantized bias Add op remains assert len(model.get_nodes_by_op_type("Mul")) == 0 # no Mul ops remain @pytest.mark.end2end def test_end2end_mobilenet_lowering(): - model = load_test_checkpoint_or_skip( - build_dir + "/end2end_mobilenet_streamlined.onnx" - ) + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_streamlined.onnx") model = model.transform(LowerConvsToMatMul()) model = model.transform(absorb.AbsorbTransposeIntoMultiThreshold()) model = model.transform(absorb.AbsorbConsecutiveTransposes()) @@ -229,9 +221,7 @@ def test_end2end_mobilenet_convert_to_hls_layers(): @pytest.mark.end2end def test_end2end_mobilenet_folding(): - model = load_test_checkpoint_or_skip( - build_dir + "/end2end_mobilenet_hls_layers.onnx" - ) + model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_hls_layers.onnx") # optional extra folding to use fewer resources # applied while setting the attributes on each node assert extra_fold in [1, 2, 4] diff --git a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py index 98a7c76ee4..2af0957e12 100644 --- a/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_1d_conv_layer.py @@ -96,12 +96,8 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_ out_chn = 20 conv_param_shape = [out_chn, in_chn, k_h, k_w] - out_feature_dim_h = compute_conv_output_dim( - in_feature_dim_h, k_h, stride_h, pad_h, dilation_h - ) - out_feature_dim_w = compute_conv_output_dim( - in_feature_dim_w, k_w, stride_w, pad_w, dilation_w - ) + out_feature_dim_h = compute_conv_output_dim(in_feature_dim_h, k_h, stride_h, pad_h, dilation_h) + out_feature_dim_w = compute_conv_output_dim(in_feature_dim_w, k_w, stride_w, pad_w, dilation_w) input_shape = [1, in_chn, in_feature_dim_h, in_feature_dim_w] output_shape = [1, out_chn, out_feature_dim_h, out_feature_dim_w] @@ -117,9 +113,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_ top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) - value_info = [ - helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) - ] + value_info = [helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape)] modelproto = qonnx_make_model( helper.make_graph( @@ -127,9 +121,7 @@ def test_convert_to_hls_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_ inputs=[top_in], outputs=[top_out], value_info=value_info, - nodes=[ - helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config) - ], + nodes=[helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config)], ) ) diff --git a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py index 089d1ae420..bb2c1d74c2 100644 --- a/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_channelwise_layer.py @@ -52,7 +52,6 @@ def prepare_inputs(input_tensor): def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape): - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, ishape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, ishape) p0 = helper.make_tensor_value_info("p0", TensorProto.FLOAT, pshape) @@ -76,13 +75,9 @@ def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape): # parameter datatype -@pytest.mark.parametrize( - "pdt", [DataType["BIPOLAR"], DataType["UINT4"], DataType["INT2"]] -) +@pytest.mark.parametrize("pdt", [DataType["BIPOLAR"], DataType["UINT4"], DataType["INT2"]]) # input datatype -@pytest.mark.parametrize( - "idt", [DataType["INT32"], DataType["UINT4"], DataType["INT4"]] -) +@pytest.mark.parametrize("idt", [DataType["INT32"], DataType["UINT4"], DataType["INT4"]]) # function @pytest.mark.parametrize("onnx_op_name", ["Add", "Mul"]) # vector parameter or scalar parameter (broadcast) @@ -92,9 +87,7 @@ def make_single_maxpool_modelwrapper(onnx_op_name, ishape, idt, pdt, pshape): @pytest.mark.fpgadataflow @pytest.mark.vivado @pytest.mark.slow -def test_convert_to_hls_channelwise_layer( - pdt, idt, onnx_op_name, scalar_param, exec_mode -): +def test_convert_to_hls_channelwise_layer(pdt, idt, onnx_op_name, scalar_param, exec_mode): ifm_ch = 16 ifm_dim = 5 ishape = (1, ifm_ch, ifm_dim, ifm_dim) @@ -134,9 +127,7 @@ def test_convert_to_hls_channelwise_layer( else: raise Exception("Unknown exec_mode") - ctx_produced = oxe.execute_onnx( - new_model, input_dict, return_full_exec_context=True - ) + ctx_produced = oxe.execute_onnx(new_model, input_dict, return_full_exec_context=True) y_produced = ctx_produced["outp"] assert (y_produced == y_expected).all() diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py index 3512c39cb3..94007bdd14 100755 --- a/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_fc_transition.py @@ -102,12 +102,8 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): out_chn = 8 conv_param_shape = [out_chn, in_chn, kernel_size_h, kernel_size_w] - output_size_h = compute_conv_output_dim( - input_size_h, kernel_size_h, stride_h, 2 * pad_h - ) - output_size_w = compute_conv_output_dim( - input_size_w, kernel_size_w, stride_w, 2 * pad_w - ) + output_size_h = compute_conv_output_dim(input_size_h, kernel_size_h, stride_h, 2 * pad_h) + output_size_w = compute_conv_output_dim(input_size_w, kernel_size_w, stride_w, 2 * pad_w) input_shape = [1, in_chn, input_size_h, input_size_w] fc_param_shape = [out_chn * output_size_h * output_size_w, fc_filters] @@ -120,34 +116,20 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): conv_config["pads"] = [pad_h, pad_w, pad_h, pad_w] conv_config["strides"] = [stride_h, stride_w] - global_in = helper.make_tensor_value_info( - "global_in", TensorProto.FLOAT, input_shape - ) - global_out = helper.make_tensor_value_info( - "global_out", TensorProto.FLOAT, output_shape - ) + global_in = helper.make_tensor_value_info("global_in", TensorProto.FLOAT, input_shape) + global_out = helper.make_tensor_value_info("global_out", TensorProto.FLOAT, output_shape) value_info = [ - helper.make_tensor_value_info( - "conv_param", TensorProto.FLOAT, conv_param_shape - ), + helper.make_tensor_value_info("conv_param", TensorProto.FLOAT, conv_param_shape), helper.make_tensor_value_info("thres1_param", TensorProto.FLOAT, (out_chn, 15)), - helper.make_tensor_value_info( - "matmul_param", TensorProto.FLOAT, fc_param_shape - ), - helper.make_tensor_value_info( - "thres2_param", TensorProto.FLOAT, (fc_filters, 15) - ), + helper.make_tensor_value_info("matmul_param", TensorProto.FLOAT, fc_param_shape), + helper.make_tensor_value_info("thres2_param", TensorProto.FLOAT, (fc_filters, 15)), helper.make_tensor_value_info("reshape_shape", TensorProto.INT64, []), ] if use_reshape: - flatten_node = helper.make_node( - "Reshape", ["thres1_out", "reshape_shape"], ["flatten_out"] - ) + flatten_node = helper.make_node("Reshape", ["thres1_out", "reshape_shape"], ["flatten_out"]) else: - flatten_node = helper.make_node( - "Flatten", ["thres1_out"], ["flatten_out"], axis=1 - ) + flatten_node = helper.make_node("Flatten", ["thres1_out"], ["flatten_out"], axis=1) modelproto = qonnx_make_model( helper.make_graph( @@ -156,9 +138,7 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): outputs=[global_out], value_info=value_info, nodes=[ - helper.make_node( - "Conv", ["global_in", "conv_param"], ["conv_out"], **conv_config - ), + helper.make_node("Conv", ["global_in", "conv_param"], ["conv_out"], **conv_config), helper.make_node( "MultiThreshold", ["conv_out", "thres1_param"], @@ -167,9 +147,7 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): out_dtype="UINT4", ), flatten_node, - helper.make_node( - "MatMul", ["flatten_out", "matmul_param"], ["matmul_out"] - ), + helper.make_node("MatMul", ["flatten_out", "matmul_param"], ["matmul_out"]), helper.make_node( "MultiThreshold", ["matmul_out", "thres2_param"], @@ -190,18 +168,10 @@ def test_convert_to_hls_conv_fc_transition(conv_config, depthwise, use_reshape): model.set_tensor_datatype("thres1_param", DataType["INT32"]) model.set_tensor_datatype("thres2_param", DataType["INT32"]) - model.set_initializer( - "conv_param", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape) - ) - model.set_initializer( - "thres1_param", get_multithreshold_rand_params(out_chn, 15, seed=0) - ) - model.set_initializer( - "thres2_param", get_multithreshold_rand_params(fc_filters, 15, seed=0) - ) - model.set_initializer( - "matmul_param", gen_finn_dt_tensor(fc_weight_dt, fc_param_shape) - ) + model.set_initializer("conv_param", gen_finn_dt_tensor(conv_weight_dt, conv_param_shape)) + model.set_initializer("thres1_param", get_multithreshold_rand_params(out_chn, 15, seed=0)) + model.set_initializer("thres2_param", get_multithreshold_rand_params(fc_filters, 15, seed=0)) + model.set_initializer("matmul_param", gen_finn_dt_tensor(fc_weight_dt, fc_param_shape)) model.set_initializer("reshape_shape", np.array([1, -1], dtype=np.int64)) model = model.transform(InferShapes()) diff --git a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py index 7b2793712d..95beffafac 100644 --- a/tests/fpgadataflow/test_convert_to_hls_conv_layer.py +++ b/tests/fpgadataflow/test_convert_to_hls_conv_layer.py @@ -82,9 +82,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod conv_param_shape = [out_chn, in_chn, kernel_size, kernel_size] total_pad = 2 * pad - out_feature_dim = compute_conv_output_dim( - in_feature_dim, kernel_size, stride, total_pad - ) + out_feature_dim = compute_conv_output_dim(in_feature_dim, kernel_size, stride, total_pad) input_shape = [1, in_chn, in_feature_dim, in_feature_dim] output_shape = [1, out_chn, out_feature_dim, out_feature_dim] @@ -100,9 +98,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape) top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, output_shape) - value_info = [ - helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape) - ] + value_info = [helper.make_tensor_value_info("p1", TensorProto.FLOAT, conv_param_shape)] modelproto = qonnx_make_model( helper.make_graph( @@ -110,9 +106,7 @@ def test_convert_to_hls_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mod inputs=[top_in], outputs=[top_out], value_info=value_info, - nodes=[ - helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config) - ], + nodes=[helper.make_node("Conv", ["top_in", "p1"], ["top_out"], **conv_config)], ) ) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 001c353c8e..296b4cf350 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -38,7 +38,11 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors +from qonnx.transformation.general import ( + GiveReadableTensorNames, + GiveUniqueNodeNames, + GiveUniqueParameterTensors, +) from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py index 0fa7155ac5..e9caeddb44 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py @@ -39,7 +39,11 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors +from qonnx.transformation.general import ( + GiveReadableTensorNames, + GiveUniqueNodeNames, + GiveUniqueParameterTensors, +) from qonnx.transformation.infer_shapes import InferShapes import finn.core.onnx_exec as oxe diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py index c837a46a7c..f8e566156b 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_synthetic.py @@ -91,21 +91,11 @@ def make_model(ch, ifmdim): add0_node = helper.make_node("Add", [inp.name, inp1_add0_ct.name], ["out_add0"]) add1_node = helper.make_node("Add", ["out_add0", inp1_add_ct.name], [inp1_add.name]) add2_node = helper.make_node("Add", ["out_add0", inp2_add_ct.name], [inp2_add.name]) - mul1_node = helper.make_node( - "Mul", [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name] - ) - mul2_node = helper.make_node( - "Mul", [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name] - ) - eltwise_add_node = helper.make_node( - "Add", [inp1_mul.name, inp2_mul.name], [eltwise_add.name] - ) - globalavgpool_node = helper.make_node( - "GlobalAveragePool", [eltwise_add.name], [pool.name] - ) - reshape_node = helper.make_node( - "Reshape", [pool.name, reshape_ct.name], [outp.name] - ) + mul1_node = helper.make_node("Mul", [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name]) + mul2_node = helper.make_node("Mul", [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name]) + eltwise_add_node = helper.make_node("Add", [inp1_mul.name, inp2_mul.name], [eltwise_add.name]) + globalavgpool_node = helper.make_node("GlobalAveragePool", [eltwise_add.name], [pool.name]) + reshape_node = helper.make_node("Reshape", [pool.name, reshape_ct.name], [outp.name]) graph = helper.make_graph( nodes=[ diff --git a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py index 6d628c9e53..417b4fbae2 100644 --- a/tests/fpgadataflow/test_convert_to_hls_pool_batch.py +++ b/tests/fpgadataflow/test_convert_to_hls_pool_batch.py @@ -48,9 +48,7 @@ from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def make_single_maxpool_modelwrapper( - k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt, use_1d=False -): +def make_single_maxpool_modelwrapper(k, stride, pad, ifm_ch, ifm_dim, ofm_dim, idt, use_1d=False): odt = idt if use_1d: ishape = [1, ifm_ch, 1, ifm_dim] @@ -74,9 +72,7 @@ def make_single_maxpool_modelwrapper( pads=pads, strides=strides, ) - graph = helper.make_graph( - nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) @@ -89,12 +85,8 @@ def make_single_maxpool_modelwrapper( def make_single_quantavpool_modelwrapper(k, stride, ifm_ch, ifm_dim, ofm_dim, idt, odt): - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim] - ) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim, ofm_dim] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim]) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim, ofm_dim]) mp_node = helper.make_node( "QuantAvgPool2d", @@ -108,9 +100,7 @@ def make_single_quantavpool_modelwrapper(k, stride, ifm_ch, ifm_dim, ofm_dim, id signed=1 if idt.signed() else 0, data_layout="NCHW", ) - graph = helper.make_graph( - nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) @@ -143,9 +133,7 @@ def prepare_inputs(input_tensor): @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_convert_to_hls_pool_batch( - idt, odt, pool_config, ifm_ch, pe, op_type, exec_mode -): +def test_convert_to_hls_pool_batch(idt, odt, pool_config, ifm_ch, pe, op_type, exec_mode): k, stride, pad, ifm_dim = pool_config if ifm_ch % pe != 0: @@ -184,9 +172,7 @@ def test_convert_to_hls_pool_batch( if idt.signed() != odt.signed(): pytest.skip("Skipping QuantAvgPool2d with idt.signed() != odt.signed()") - model = make_single_quantavpool_modelwrapper( - k, stride, ifm_ch, ifm_dim, ofm_dim, idt, odt - ) + model = make_single_quantavpool_modelwrapper(k, stride, ifm_ch, ifm_dim, ofm_dim, idt, odt) else: assert False, "{} is not a supported op_type".format(op_type) @@ -209,18 +195,14 @@ def test_convert_to_hls_pool_batch( if pad == 0: assert len(new_model.graph.node) == 4 assert new_model.graph.node[0].op_type == "Transpose" - assert new_model.graph.node[1].op_type.startswith( - "ConvolutionInputGenerator" - ) + assert new_model.graph.node[1].op_type.startswith("ConvolutionInputGenerator") assert new_model.graph.node[2].op_type == "Pool_Batch" assert new_model.graph.node[3].op_type == "Transpose" else: assert len(new_model.graph.node) == 5 assert new_model.graph.node[0].op_type == "Transpose" assert new_model.graph.node[1].op_type == "FMPadding_Batch" - assert new_model.graph.node[2].op_type.startswith( - "ConvolutionInputGenerator" - ) + assert new_model.graph.node[2].op_type.startswith("ConvolutionInputGenerator") assert new_model.graph.node[3].op_type == "Pool_Batch" assert new_model.graph.node[4].op_type == "Transpose" else: diff --git a/tests/fpgadataflow/test_depthwise_convolution.py b/tests/fpgadataflow/test_depthwise_convolution.py index 8ab22bcfdc..2ffd696528 100644 --- a/tests/fpgadataflow/test_depthwise_convolution.py +++ b/tests/fpgadataflow/test_depthwise_convolution.py @@ -57,7 +57,6 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): - # set up reference model consisting of Im2Col + MatMul (+ MultiThreshold) ofm_ch = ifm_ch total_pad = 2 * padding @@ -84,16 +83,10 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): ) # set up onnx model - inp = oh.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch] - ) - outp = oh.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, ofm_ch] - ) + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]) + outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, ofm_ch]) - W_sparse = oh.make_tensor_value_info( - "W_sparse", TensorProto.FLOAT, [ifm_ch * k * k, ofm_ch] - ) + W_sparse = oh.make_tensor_value_info("W_sparse", TensorProto.FLOAT, [ifm_ch * k * k, ofm_ch]) im2col_node = oh.make_node( "Im2Col", @@ -107,9 +100,7 @@ def set_up_reference_model(act, idt, wdt, k, ifm_dim, ifm_ch, stride, padding): depthwise=1, ) - matmul_node = oh.make_node( - "MatMul", inputs=["im2col_out", "W_sparse"], outputs=["outp"] - ) + matmul_node = oh.make_node("MatMul", inputs=["im2col_out", "W_sparse"], outputs=["outp"]) if act is None: node_list = [im2col_node, matmul_node] diff --git a/tests/fpgadataflow/test_fifosizing.py b/tests/fpgadataflow/test_fifosizing.py index 922232c2c2..f3716dea9b 100644 --- a/tests/fpgadataflow/test_fifosizing.py +++ b/tests/fpgadataflow/test_fifosizing.py @@ -84,8 +84,7 @@ def test_fifosizing_linear(method, topology): with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: sim_data = json.load(f) assert ( - float(sim_data["stable_throughput[images/s]"]) - / float(est_data["estimated_throughput_fps"]) + float(sim_data["stable_throughput[images/s]"]) / float(est_data["estimated_throughput_fps"]) > 0.9 ) # now run the same build using the generated folding and FIFO config @@ -98,12 +97,8 @@ def test_fifosizing_linear(method, topology): cfg_cmp.folding_config_file = tmp_output_dir + "/final_hw_config.json" build.build_dataflow_cfg(tmp_output_dir_cmp + "/model.onnx", cfg_cmp) - model0 = ModelWrapper( - tmp_output_dir + "/intermediate_models/step_create_stitched_ip.onnx" - ) - model1 = ModelWrapper( - tmp_output_dir_cmp + "/intermediate_models/step_create_stitched_ip.onnx" - ) + model0 = ModelWrapper(tmp_output_dir + "/intermediate_models/step_create_stitched_ip.onnx") + model1 = ModelWrapper(tmp_output_dir_cmp + "/intermediate_models/step_create_stitched_ip.onnx") assert len(model0.graph.node) == len(model1.graph.node) for i in range(len(model0.graph.node)): diff --git a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py index 13fab9a47f..186a6af42c 100644 --- a/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py +++ b/tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py @@ -51,9 +51,7 @@ def make_modelwrapper(C, pe, idt, odt, pdt, func, vecs): NumChannels = C.shape[0] inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, vecs + [NumChannels]) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, vecs + [NumChannels] - ) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, vecs + [NumChannels]) node_inp_list = ["inp", "const"] diff --git a/tests/fpgadataflow/test_fpgadataflow_checksum.py b/tests/fpgadataflow/test_fpgadataflow_checksum.py index cd404f5a63..403bb328ae 100644 --- a/tests/fpgadataflow/test_fpgadataflow_checksum.py +++ b/tests/fpgadataflow/test_fpgadataflow_checksum.py @@ -215,11 +215,7 @@ def write_drain(sim): ), """The second checksums do not match in cppsim vs. rtlsim""" - assert ( - checksum0_drain == 0 - ), "Drain read doesn't match drain write for first checksum" - assert ( - checksum1_drain == 0 - ), "Drain read doesn't match drain write for second checksum" + assert checksum0_drain == 0, "Drain read doesn't match drain write for first checksum" + assert checksum1_drain == 0, "Drain read doesn't match drain write for second checksum" # TODO: test for drain set to true diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py index 3cfff9ac34..d94b5d6399 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator.py @@ -46,13 +46,9 @@ from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode -def make_single_im2col_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt -): +def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt): odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch] ) @@ -86,9 +82,7 @@ def make_single_slidingwindow_modelwrapper( k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt, dw=0 ): odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim, ifm_dim, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim, ofm_dim, k * k * ifm_ch] ) @@ -152,9 +146,7 @@ def prepare_inputs(input_tensor): @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_slidingwindow( - idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw -): +def test_fpgadataflow_slidingwindow(idt, k, ifm_dim, ifm_ch, stride, dilation, exec_mode, simd, dw): ofm_dim = int(((ifm_dim - k) / stride) + 1) x = gen_finn_dt_tensor(idt, (1, ifm_dim, ifm_dim, ifm_ch)) @@ -187,9 +179,7 @@ def test_fpgadataflow_slidingwindow( if dw == 0: assert (y_produced == y_expected).all() else: - y_expected = y_expected.reshape( - 1, ofm_dim, ofm_dim, k * k, ifm_ch // simd, simd - ) + y_expected = y_expected.reshape(1, ofm_dim, ofm_dim, k * k, ifm_ch // simd, simd) y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) y_expected = y_expected.reshape(1, ofm_dim, ofm_dim, ifm_ch * k * k) assert (y_produced == y_expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py index f467f37618..aa89dde5e7 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator1d.py @@ -49,9 +49,7 @@ fpga_part = "xczu3eg-sbva484-1-e" -def make_single_im2col_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt -): +def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, simd, stride, dilation, idt): k_h, k_w = k ifm_dim_h, ifm_dim_w = ifm_dim stride_h, stride_w = stride @@ -59,9 +57,7 @@ def make_single_im2col_modelwrapper( ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -101,9 +97,7 @@ def make_single_slidingwindow_modelwrapper( ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -259,9 +253,7 @@ def test_fpgadataflow_slidingwindow_1d( if dw == 0: assert (y_produced == y_expected).all() else: - y_expected = y_expected.reshape( - 1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd - ) + y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd) y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) assert (y_produced == y_expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py index e8236c0c6b..53d7be0ebb 100755 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl.py @@ -49,9 +49,7 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilatio ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -91,9 +89,7 @@ def make_single_slidingwindow_modelwrapper( ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -191,21 +187,13 @@ def test_fpgadataflow_slidingwindow_rtl( if ifm_ch % simd != 0: pytest.skip("SIMD must divide number of input channels") if kernel_height > ifm_dim_h or stride_h > ifm_dim_h: - pytest.skip( - "Illegal convolution configuration: kernel or stride > FM dimension" - ) + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") if kernel_width > ifm_dim_w or stride_w > ifm_dim_w: - pytest.skip( - "Illegal convolution configuration: kernel or stride > FM dimension" - ) + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") if (k_h == 1 and dilation_h != 1) or (k_w == 1 and dilation_w != 1): - pytest.skip( - "Illegal convolution configuration: dilation for unitary kernel dim" - ) + pytest.skip("Illegal convolution configuration: dilation for unitary kernel dim") if (stride_h > k_h) or (stride_w > k_w) and not parallel_window: - pytest.skip( - "Not all combinations for stride > k edge case supported in default mode" - ) + pytest.skip("Not all combinations for stride > k edge case supported in default mode") if k_h == 1 and k_w == 1 and simd != ifm_ch: pytest.skip("1x1 Kernel only supported in parallel mode (SIMD=C)") if parallel_window and simd != ifm_ch: @@ -253,9 +241,7 @@ def test_fpgadataflow_slidingwindow_rtl( if dw == 0: assert (y_produced == y_expected).all() else: - y_expected = y_expected.reshape( - 1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd - ) + y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, k_h * k_w, ifm_ch // simd, simd) y_expected = y_expected.transpose(0, 1, 2, 4, 3, 5) y_expected = y_expected.reshape(1, ofm_dim_h, ofm_dim_w, ifm_ch * k_h * k_w) assert (y_produced == y_expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py index e586984b31..f5a06316e2 100644 --- a/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py +++ b/tests/fpgadataflow/test_fpgadataflow_convinputgenerator_rtl_dynamic.py @@ -63,32 +63,18 @@ from finn.util.basic import pyverilate_get_liveness_threshold_cycles -def create_conv_model( - idim_h, idim_w, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise -): +def create_conv_model(idim_h, idim_w, ifm, k, stride, ofm, idt, wdt, pad_mode, depthwise): np.random.seed(0) group = ifm if depthwise else 1 group_str = str(group) ishp = (1, ifm, idim_h, idim_w) - pad_0 = _auto_pad_to_explicit_padding( - pad_mode, idim_h, idim_w, k, k, stride, stride, 2 - ) - int_dim_h = compute_conv_output_dim( - idim_h, k, stride, total_pad=pad_0[0] + pad_0[2] - ) - int_dim_w = compute_conv_output_dim( - idim_w, k, stride, total_pad=pad_0[1] + pad_0[3] - ) + pad_0 = _auto_pad_to_explicit_padding(pad_mode, idim_h, idim_w, k, k, stride, stride, 2) + int_dim_h = compute_conv_output_dim(idim_h, k, stride, total_pad=pad_0[0] + pad_0[2]) + int_dim_w = compute_conv_output_dim(idim_w, k, stride, total_pad=pad_0[1] + pad_0[3]) - pad_1 = _auto_pad_to_explicit_padding( - pad_mode, int_dim_h, int_dim_w, k, k, stride, stride, 2 - ) - odim_h = compute_conv_output_dim( - int_dim_h, k, stride, total_pad=pad_1[0] + pad_1[2] - ) - odim_w = compute_conv_output_dim( - int_dim_w, k, stride, total_pad=pad_1[1] + pad_1[3] - ) + pad_1 = _auto_pad_to_explicit_padding(pad_mode, int_dim_h, int_dim_w, k, k, stride, stride, 2) + odim_h = compute_conv_output_dim(int_dim_h, k, stride, total_pad=pad_1[0] + pad_1[2]) + odim_w = compute_conv_output_dim(int_dim_w, k, stride, total_pad=pad_1[1] + pad_1[3]) oshp = (1, ifm, odim_h, odim_w) if depthwise else (1, ofm, odim_h, odim_w) wshp = (ifm, 1, k, k) if depthwise else (ofm, ifm, k, k) wshp_1 = (ifm, 1, k, k) if depthwise else (ofm, ofm, k, k) @@ -263,15 +249,11 @@ def test_fpgadataflow_conv_dynamic(cfg): # convert to hardware and prepare simulation model = largest_model.transform(LowerConvsToMatMul()) model = model.transform(to_hls.InferConvInpGen(use_rtl_variant=True)) - model = model.transform( - to_hls.InferQuantizedMatrixVectorActivation(mem_mode="decoupled") - ) + model = model.transform(to_hls.InferQuantizedMatrixVectorActivation(mem_mode="decoupled")) model = model.transform(to_hls.InferVectorVectorActivation()) model = model.transform(absorb.AbsorbConsecutiveTransposes()) parent_model = model.transform(CreateDataflowPartition()) - sdp_inst = getCustomOp( - parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] - ) + sdp_inst = getCustomOp(parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0]) model = ModelWrapper(sdp_inst.get_nodeattr("model")) assert len(model.get_nodes_by_op_type("ConvolutionInputGenerator_rtl")) == 2 if pad_mode == "VALID": @@ -331,15 +313,11 @@ def test_fpgadataflow_conv_dynamic(cfg): pad_nodes = model.get_nodes_by_op_type("FMPadding_rtl") padder0 = getCustomOp(pad_nodes[0]) update_tensor_dim(model, padder0.onnx_node.input[0], (idim_h, idim_w)) - update_tensor_dim( - model, padder0.onnx_node.output[0], (conv0_idim_h, conv0_idim_w) - ) + update_tensor_dim(model, padder0.onnx_node.output[0], (conv0_idim_h, conv0_idim_w)) pad_config0 = padder0.get_dynamic_config((idim_h, idim_w), pad0) padder1 = getCustomOp(pad_nodes[1]) update_tensor_dim(model, padder1.onnx_node.input[0], (int_dim_h, int_dim_w)) - update_tensor_dim( - model, padder1.onnx_node.output[0], (conv1_idim_h, conv1_idim_w) - ) + update_tensor_dim(model, padder1.onnx_node.output[0], (conv1_idim_h, conv1_idim_w)) pad_config1 = padder1.get_dynamic_config((int_dim_h, int_dim_w), pad1) configs = [ ("s_axilite_0_", pad_config0), @@ -380,9 +358,7 @@ def make_single_im2col_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, stride, dilatio ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -422,9 +398,7 @@ def make_single_slidingwindow_modelwrapper( ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, k_h * k_w * ifm_ch] ) @@ -515,13 +489,9 @@ def test_fpgadataflow_slidingwindow_rtl_dynamic( if ifm_ch % simd != 0: pytest.skip("SIMD must divide number of input channels") if kernel_height > ifm_dim_h or stride_h > ifm_dim_h: - pytest.skip( - "Illegal convolution configuration: kernel or stride > FM dimension" - ) + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") if kernel_width > ifm_dim_w or stride_w > ifm_dim_w: - pytest.skip( - "Illegal convolution configuration: kernel or stride > FM dimension" - ) + pytest.skip("Illegal convolution configuration: kernel or stride > FM dimension") if (k_h == 1 and (stride_h != 1 or dilation_h != 1)) or ( k_w == 1 and (stride_w != 1 or dilation_w != 1) ): diff --git a/tests/fpgadataflow/test_fpgadataflow_downsampler.py b/tests/fpgadataflow/test_fpgadataflow_downsampler.py index 64da0a2368..8a3c1fe682 100644 --- a/tests/fpgadataflow/test_fpgadataflow_downsampler.py +++ b/tests/fpgadataflow/test_fpgadataflow_downsampler.py @@ -122,9 +122,7 @@ def test_fpgadataflow_downsampler(is_1d, flip_1d, exec_mode): stride = 2 dt_in = DataType["UINT8"] dt_w = DataType["INT2"] - model = build_model( - is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=flip_1d - ) + model = build_model(is_1d, in_dim, k, stride, dt_in, dt_w, pad_half=0, flip_1d=flip_1d) inp = gen_finn_dt_tensor(dt_in, model.get_tensor_shape("in0")) idict = {"in0": inp} y_expected = execute_onnx(model, idict)["out0"] diff --git a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py index 441bbce50a..27bab93fb6 100644 --- a/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py +++ b/tests/fpgadataflow/test_fpgadataflow_duplicatestreams.py @@ -56,9 +56,7 @@ def make_dupstreams_modelwrapper(ch, pe, idim, idt, n_dupl): for i in range(n_dupl): outp_name = "outp%d" % i out_names.append(outp_name) - out_vi.append( - helper.make_tensor_value_info(outp_name, TensorProto.FLOAT, shape) - ) + out_vi.append(helper.make_tensor_value_info(outp_name, TensorProto.FLOAT, shape)) dupstrm_node = helper.make_node( "DuplicateStreams_Batch", @@ -72,9 +70,7 @@ def make_dupstreams_modelwrapper(ch, pe, idim, idt, n_dupl): inputDataType=idt.name, numInputVectors=[1, idim, idim], ) - graph = helper.make_graph( - nodes=[dupstrm_node], name="graph", inputs=[inp], outputs=out_vi - ) + graph = helper.make_graph(nodes=[dupstrm_node], name="graph", inputs=[inp], outputs=out_vi) model = qonnx_make_model(graph, producer_name="addstreams-model") model = ModelWrapper(model) diff --git a/tests/fpgadataflow/test_fpgadataflow_dwc.py b/tests/fpgadataflow/test_fpgadataflow_dwc.py index 2bde148a14..eb6e0651d9 100644 --- a/tests/fpgadataflow/test_fpgadataflow_dwc.py +++ b/tests/fpgadataflow/test_fpgadataflow_dwc.py @@ -42,7 +42,6 @@ def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style): - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, shape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape) @@ -59,9 +58,7 @@ def make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_styl impl_style=impl_style, ) - graph = helper.make_graph( - nodes=[DWC_node], name="dwc_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[DWC_node], name="dwc_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="dwc-model") model = ModelWrapper(model) @@ -99,9 +96,7 @@ def test_fpgadataflow_dwc_rtlsim(config): x = gen_finn_dt_tensor(finn_dtype, shape) input_dict = prepare_inputs(x, finn_dtype) - model = make_single_dwc_modelwrapper( - shape, inWidth, outWidth, finn_dtype, impl_style - ) + model = make_single_dwc_modelwrapper(shape, inWidth, outWidth, finn_dtype, impl_style) model = model.transform(InsertFIFO(create_shallow_fifos=True)) model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, 5)) diff --git a/tests/fpgadataflow/test_fpgadataflow_fifo.py b/tests/fpgadataflow/test_fpgadataflow_fifo.py index efdb3bf6aa..27417a78e1 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fifo.py +++ b/tests/fpgadataflow/test_fpgadataflow_fifo.py @@ -47,7 +47,6 @@ def make_single_fifo_modelwrapper(Shape, Depth, fld_shape, finn_dtype): - inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, Shape) outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, Shape) @@ -62,9 +61,7 @@ def make_single_fifo_modelwrapper(Shape, Depth, fld_shape, finn_dtype): dataType=str(finn_dtype.name), ) - graph = helper.make_graph( - nodes=[FIFO_node], name="fifo_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[FIFO_node], name="fifo_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="fifo-model") model = ModelWrapper(model) @@ -91,7 +88,6 @@ def prepare_inputs(input_tensor, dt): @pytest.mark.slow @pytest.mark.vivado def test_fpgadataflow_fifo_rtlsim(Shape, folded_shape, depth, finn_dtype): - # generate input data x = gen_finn_dt_tensor(finn_dtype, Shape) input_dict = prepare_inputs(x, finn_dtype) diff --git a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py index b95409fda8..c871811c5e 100644 --- a/tests/fpgadataflow/test_fpgadataflow_fmpadding.py +++ b/tests/fpgadataflow/test_fpgadataflow_fmpadding.py @@ -62,12 +62,8 @@ def make_single_fmpadding_modelwrapper(optype, idim, padding, num_ch, simd, idt) odim_h = idim_h + pad_h odim_w = idim_w + pad_w - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, idim_h, idim_w, num_ch] - ) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, odim_h, odim_w, num_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, idim_h, idim_w, num_ch]) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, odim_h, odim_w, num_ch]) FMPadding = helper.make_node( optype, @@ -99,9 +95,7 @@ def make_single_fmpadding_modelwrapper(optype, idim, padding, num_ch, simd, idt) # input image dimension @pytest.mark.parametrize("idim", [[8, 8], [10, 8]]) # number of rows and number of cols to add -@pytest.mark.parametrize( - "pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3], [7, 0, 8, 0]] -) +@pytest.mark.parametrize("pad", [[1, 1, 1, 1], [1, 1, 2, 2], [1, 3, 2, 3], [7, 0, 8, 0]]) # number of channels @pytest.mark.parametrize("num_ch", [2, 4]) # Input parallelism @@ -149,9 +143,7 @@ def test_fpgadataflow_fmpadding(idim, pad, num_ch, simd, idt, mode, impl_style): expected_oshape = (1, odim_h, odim_w, num_ch) assert y_produced.shape == expected_oshape - y_expected = np.pad( - x, ((0, 0), (pad[0], pad[2]), (pad[1], pad[3]), (0, 0)), "constant" - ) + y_expected = np.pad(x, ((0, 0), (pad[0], pad[2]), (pad[1], pad[3]), (0, 0)), "constant") assert (y_produced == y_expected).all() diff --git a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py index a2c3d09a55..1b3d87c11f 100644 --- a/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_globalaccpool.py @@ -61,9 +61,7 @@ def make_accpool_modelwrapper(ch, pe, idim, idt): inputDataType=idt.name, numInputVectors=[1, idim, idim], ) - graph = helper.make_graph( - nodes=[accpool_node], name="graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[accpool_node], name="graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="thresholding-model") model = ModelWrapper(model) diff --git a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py index 7e4069f5c4..2d85cc98f4 100644 --- a/tests/fpgadataflow/test_fpgadataflow_ipstitch.py +++ b/tests/fpgadataflow/test_fpgadataflow_ipstitch.py @@ -96,9 +96,7 @@ def create_one_fc_model(mem_mode="const"): mem_mode=mem_mode, ) - graph = helper.make_graph( - nodes=[fc0], name="fclayer_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[fc0], name="fclayer_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="fclayer-model") model = ModelWrapper(model) @@ -212,9 +210,7 @@ def test_fpgadataflow_ipstitch_gen_model(mem_mode): model = model.transform(HLSSynthIP()) assert model.graph.node[0].op_type == "MatrixVectorActivation" assert model.graph.node[-1].op_type == "TLastMarker" - model.save( - ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_gen_model_%s.onnx" % mem_mode - ) + model.save(ip_stitch_model_dir + "/test_fpgadataflow_ipstitch_gen_model_%s.onnx" % mem_mode) @pytest.mark.parametrize("mem_mode", ["const", "decoupled"]) diff --git a/tests/fpgadataflow/test_fpgadataflow_labelselect.py b/tests/fpgadataflow/test_fpgadataflow_labelselect.py index 553f263ba2..efd093b0b3 100644 --- a/tests/fpgadataflow/test_fpgadataflow_labelselect.py +++ b/tests/fpgadataflow/test_fpgadataflow_labelselect.py @@ -81,9 +81,7 @@ def prepare_inputs(input_tensor, idt): return {"inp": input_tensor} -@pytest.mark.parametrize( - "idt", [DataType["UINT8"], DataType["UINT16"], DataType["INT16"]] -) +@pytest.mark.parametrize("idt", [DataType["UINT8"], DataType["UINT16"], DataType["INT16"]]) # labels @pytest.mark.parametrize("labels", [10, 100]) # folding diff --git a/tests/fpgadataflow/test_fpgadataflow_lookup.py b/tests/fpgadataflow/test_fpgadataflow_lookup.py index 3164f2b4a6..7951007045 100644 --- a/tests/fpgadataflow/test_fpgadataflow_lookup.py +++ b/tests/fpgadataflow/test_fpgadataflow_lookup.py @@ -57,9 +57,7 @@ def make_lookup_model(embeddings, ishape, idt, edt): class LookupModel(nn.Module): def __init__(self, num_embeddings, embedding_dim): super().__init__() - self.lookup = nn.Embedding( - num_embeddings=num_embeddings, embedding_dim=embedding_dim - ) + self.lookup = nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim) def forward(self, x): x = self.lookup(x) diff --git a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py index 628721b429..67a40d96f3 100644 --- a/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py +++ b/tests/fpgadataflow/test_fpgadataflow_streamingmaxpool.py @@ -53,9 +53,7 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_ ifm_dim_h, ifm_dim_w = ifm_dim ofm_dim_h, ofm_dim_w = ofm_dim odt = idt - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim_h, ifm_dim_w, ifm_ch]) outp = helper.make_tensor_value_info( "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] ) @@ -70,9 +68,7 @@ def make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_ ceil_mode=ceil_mode, pads=[0, 0, 0, 0], ) - graph = helper.make_graph( - nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[mp_node], name="mp_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="mp-model") model = ModelWrapper(model) @@ -106,9 +102,7 @@ def prepare_inputs(input_tensor): @pytest.mark.fpgadataflow @pytest.mark.slow @pytest.mark.vivado -def test_fpgadataflow_streamingmaxpool( - idt, dim_1d, k, ifm_dim, ifm_ch, pe, ceil_mode, exec_mode -): +def test_fpgadataflow_streamingmaxpool(idt, dim_1d, k, ifm_dim, ifm_ch, pe, ceil_mode, exec_mode): ifm_dim_h = ifm_dim k_h = k if dim_1d: @@ -138,9 +132,7 @@ def test_fpgadataflow_streamingmaxpool( # prepare input data input_dict = prepare_inputs(x) - golden = make_single_maxpoolnhwc_modelwrapper( - k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_mode - ) + golden = make_single_maxpoolnhwc_modelwrapper(k, ifm_ch, ifm_dim, ofm_dim, idt, ceil_mode) y_expected = oxe.execute_onnx(golden, input_dict)["outp"] model = golden.transform(InferStreamingMaxPool()) diff --git a/tests/fpgadataflow/test_fpgadataflow_thresholding.py b/tests/fpgadataflow/test_fpgadataflow_thresholding.py index 445afdf458..2b7bc28a10 100644 --- a/tests/fpgadataflow/test_fpgadataflow_thresholding.py +++ b/tests/fpgadataflow/test_fpgadataflow_thresholding.py @@ -56,17 +56,11 @@ target_clk_ns = 5 -def make_single_thresholding_modelwrapper( - T, pe, idt, odt, actval, mem_mode, n_inp_vecs -): +def make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode, n_inp_vecs): NumChannels = T.shape[0] - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, n_inp_vecs + [NumChannels] - ) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, n_inp_vecs + [NumChannels] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, n_inp_vecs + [NumChannels]) node_inp_list = ["inp", "thresh"] @@ -140,9 +134,7 @@ def test_fpgadataflow_thresholding(idt, act, nf, ich, exec_mode, mem_mode): else: actval = odt.min() - model = make_single_thresholding_modelwrapper( - T, pe, idt, odt, actval, mem_mode, n_inp_vecs - ) + model = make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode, n_inp_vecs) if exec_mode == "cppsim": model = model.transform(PrepareCppSim()) @@ -219,9 +211,7 @@ def test_runtime_thresholds_single_layer(): else: actval = odt.min() - model = make_single_thresholding_modelwrapper( - T, pe, idt, odt, actval, mem_mode, n_inp_vecs - ) + model = make_single_thresholding_modelwrapper(T, pe, idt, odt, actval, mem_mode, n_inp_vecs) op_inst = getCustomOp(model.graph.node[0]) op_inst.set_nodeattr("runtime_writeable_weights", 1) op_inst.make_weight_file(T, "decoupled_runtime", "old_weights.dat") @@ -248,9 +238,7 @@ def test_runtime_thresholds_single_layer(): def read_weights(sim): addr = 0 for i in range(len(old_weight_stream)): - extracted_weight_stream.append( - axilite_read(sim, addr, basename="s_axilite_0_") - ) + extracted_weight_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) addr += 4 rtlsim_exec(model, exec_ctx, pre_hook=read_weights) @@ -273,9 +261,7 @@ def read_weights(sim): expected += act.min() assert (y == expected).all() - new_weights = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype( - np.float32 - ) + new_weights = np.random.randint(idt.min(), idt.max() + 1, (ich, n_steps)).astype(np.float32) # provide non-decreasing thresholds new_weights = np.sort(T, axis=1) op_inst.make_weight_file(new_weights, "decoupled_runtime", "new_weights.dat") diff --git a/tests/fpgadataflow/test_fpgadataflow_vvau.py b/tests/fpgadataflow/test_fpgadataflow_vvau.py index 95501078d6..4208169c0b 100644 --- a/tests/fpgadataflow/test_fpgadataflow_vvau.py +++ b/tests/fpgadataflow/test_fpgadataflow_vvau.py @@ -135,9 +135,7 @@ def _make_single_vvau_modelwrapper( mem_mode=mem_mode, ) - graph = helper.make_graph( - nodes=[VVAU_node], name="vvau_graph", inputs=[inp], outputs=[outp] - ) + graph = helper.make_graph(nodes=[VVAU_node], name="vvau_graph", inputs=[inp], outputs=[outp]) model = qonnx_make_model(graph, producer_name="vvau-model") model = ModelWrapper(model) @@ -202,9 +200,7 @@ def test_fpgadataflow_vvau( # Generate weights in expected shape for ONNX and HLS node W = gen_finn_dt_tensor(wdt, (channels, 1, k_h, k_w)) # shape: [channels, 1, k, k] - W_onnx = _infer_sparse_weight_tensor( - W, k_h, k_w, channels - ) # shape: [k*k*channels, channels] + W_onnx = _infer_sparse_weight_tensor(W, k_h, k_w, channels) # shape: [k*k*channels, channels] # Generate inputs in expected format for ONNX and HLS node x = gen_finn_dt_tensor(idt, (1, dim_h, dim_w, k_h * k_w * channels)) @@ -273,9 +269,7 @@ def test_fpgadataflow_vvau( # signed offset y_expected += act.min() - y_produced = oxe.execute_onnx(model, input_dict, return_full_exec_context=False)[ - "outp" - ] + y_produced = oxe.execute_onnx(model, input_dict, return_full_exec_context=False)["outp"] assert (y_produced == y_expected).all(), "incorrect result" diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index dc4a076a18..805578018c 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -228,11 +228,7 @@ def phi(x: float) -> float: # if not runtime-writable weights, then use the tighter bound on the accumulator # bit width as determined by the weight values themselves else: - beta = ( - np.log2(abs(weights).sum(axis=0).max()) - + idt.bitwidth() - - float(idt.signed()) - ) + beta = np.log2(abs(weights).sum(axis=0).max()) + idt.bitwidth() - float(idt.signed()) P = np.ceil(beta + phi(beta) + 1.0) # if the node is the last in the graph, then round up to the nearest 8 bits if model.find_direct_successors(inst.onnx_node) is None: @@ -262,9 +258,7 @@ def phi(x: float) -> float: @pytest.mark.parametrize("tdt", thresh_data_types) @pytest.mark.parametrize("rww", [True, False]) @pytest.mark.fpgadataflow -def test_minimize_accumulator_width( - wdt: DataType, idt: DataType, tdt: DataType, rww: bool -): +def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, rww: bool): """Testing MinimizeAccumulatorWidth for VVAU and MVAU. :param wdt: (DataType) The data type that we are testing for the weights @@ -272,9 +266,7 @@ def test_minimize_accumulator_width( :param tdt: (DataType) The data type that we are testing for the thresholds :param rww: (bool) Whether or not to use runtime-writeable weights""" if (not wdt.signed()) or isinstance(wdt, BipolarType): - pytest.skip( - "Closed-form accumulator calculation is designed to consider signed weights" - ) + pytest.skip("Closed-form accumulator calculation is designed to consider signed weights") # Create uniform-precision model model = make_unit_test_model(wdt, idt, tdt) @@ -286,9 +278,7 @@ def test_minimize_accumulator_width( if isinstance(inst, (MatrixVectorActivation, VectorVectorActivation)): inst.set_nodeattr("runtime_writeable_weights", int(rww)) cur_adt = DataType[inst.get_nodeattr("accDataType")] - assert ( - cur_adt.bitwidth() == def_adt.bitwidth() - ), "Default data type is incorrect" + assert cur_adt.bitwidth() == def_adt.bitwidth(), "Default data type is incorrect" # Apply the optimization model = model.transform(MinimizeAccumulatorWidth()) @@ -304,9 +294,7 @@ def test_minimize_accumulator_width( # bit width minimization logic in the MVAU and VVAU is exact and should be # less than or equal to this calculation exp_adt = calculate_accumulator_bit_width(inst, model) - assert ( - cur_adt.bitwidth() <= exp_adt.bitwidth() - ), "Mismatched accumulation data types" + assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" if model.find_direct_successors(inst.onnx_node) is None: assert ( cur_adt.bitwidth() % 8 @@ -315,6 +303,4 @@ def test_minimize_accumulator_width( cur_adt.bitwidth() == cur_odt.bitwidth() ), "outputDataType and accDataType should be equal" else: - assert ( - cur_odt.bitwidth() == idt.bitwidth() - ), "outputDataType should not be changed" + assert cur_odt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" diff --git a/tests/fpgadataflow/test_runtime_weights.py b/tests/fpgadataflow/test_runtime_weights.py index 16fed5c3cb..9b2f418776 100644 --- a/tests/fpgadataflow/test_runtime_weights.py +++ b/tests/fpgadataflow/test_runtime_weights.py @@ -96,9 +96,7 @@ def test_runtime_weights_single_layer(): def read_weights(sim): addr = 0 for i in range(len(old_weight_stream)): - extracted_weight_stream.append( - axilite_read(sim, addr, basename="s_axilite_0_") - ) + extracted_weight_stream.append(axilite_read(sim, addr, basename="s_axilite_0_")) addr += 4 rtlsim_exec(model, exec_ctx, pre_hook=read_weights) diff --git a/tests/fpgadataflow/test_set_folding.py b/tests/fpgadataflow/test_set_folding.py index 5355dd7044..ce9f4b12ed 100644 --- a/tests/fpgadataflow/test_set_folding.py +++ b/tests/fpgadataflow/test_set_folding.py @@ -45,7 +45,6 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): - W = np.random.randint(wdt.min(), wdt.max() + 1, size=(ch, ch)) W = W.astype(np.float32) @@ -55,9 +54,7 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): tensors = [] tensors.append(helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ch])) for i in range(1, nnodes): - inter = helper.make_tensor_value_info( - "inter_" + str(i), TensorProto.FLOAT, [1, ch] - ) + inter = helper.make_tensor_value_info("inter_" + str(i), TensorProto.FLOAT, [1, ch]) tensors.append(inter) tensors.append(helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ch])) @@ -115,10 +112,7 @@ def make_multi_fclayer_model(ch, wdt, adt, tdt, nnodes): @pytest.mark.parametrize("platform", ["Pynq-Z1", "Ultra96", "U200"]) @pytest.mark.fpgadataflow def test_set_folding(target_fps, platform): - - model = make_multi_fclayer_model( - 128, DataType["INT4"], DataType["INT2"], DataType["INT16"], 5 - ) + model = make_multi_fclayer_model(128, DataType["INT4"], DataType["INT2"], DataType["INT16"], 5) model = model.transform(GiveUniqueNodeNames()) parent_model = model.transform(CreateDataflowPartition()) diff --git a/tests/fpgadataflow/test_split_large_fifos.py b/tests/fpgadataflow/test_split_large_fifos.py index 0437d006cf..3061696a68 100644 --- a/tests/fpgadataflow/test_split_large_fifos.py +++ b/tests/fpgadataflow/test_split_large_fifos.py @@ -94,13 +94,9 @@ def test_split_large_fifos(depth, force_python_rtlsim): with open(tmp_output_dir + "/report/rtlsim_performance.json") as f: sim_data = json.load(f) assert ( - float(sim_data["throughput[images/s]"]) - / float(est_data["estimated_throughput_fps"]) - > 0.9 - ) - model = ModelWrapper( - tmp_output_dir + "/intermediate_models/step_set_fifo_depths.onnx" + float(sim_data["throughput[images/s]"]) / float(est_data["estimated_throughput_fps"]) > 0.9 ) + model = ModelWrapper(tmp_output_dir + "/intermediate_models/step_set_fifo_depths.onnx") # exclude final FIFO node (output FIFO, not part of test) fifo_nodes = model.get_nodes_by_op_type("StreamingFIFO")[:-1] golden_cfg = get_fifo_split_configs(depth, 256, 32768) diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index 836f1e059e..12f349b1e1 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -44,9 +44,7 @@ def test_notebook_exec(notebook): with open(notebook) as f: nb = nbformat.read(f, as_version=4) - ep = ExecutePreprocessor( - timeout=notebook_timeout_seconds, kernel_name="python3" - ) + ep = ExecutePreprocessor(timeout=notebook_timeout_seconds, kernel_name="python3") try: assert ep.preprocess(nb) is not None, f"Got empty notebook for {notebook}" except Exception: diff --git a/tests/transformation/streamline/test_absorb_mul_into_topk.py b/tests/transformation/streamline/test_absorb_mul_into_topk.py index 89ef74e0b3..1ca8fb06e9 100644 --- a/tests/transformation/streamline/test_absorb_mul_into_topk.py +++ b/tests/transformation/streamline/test_absorb_mul_into_topk.py @@ -71,18 +71,12 @@ def test_absorb_mul_into_topk(mul_positive, scalar): # initialize values # for mul if mul_positive is True: - a0_values = np.random.uniform(low=0.1, high=1, size=tuple(shape)).astype( - np.float32 - ) + a0_values = np.random.uniform(low=0.1, high=1, size=tuple(shape)).astype(np.float32) else: - a0_values = np.random.uniform(low=-1, high=-0.1, size=tuple(shape)).astype( - np.float32 - ) + a0_values = np.random.uniform(low=-1, high=-0.1, size=tuple(shape)).astype(np.float32) model.set_initializer("a0", a0_values) # for add - c0_values = np.random.uniform(low=-1, high=-0.1, size=tuple(shape)).astype( - np.float32 - ) + c0_values = np.random.uniform(low=-1, high=-0.1, size=tuple(shape)).astype(np.float32) model.set_initializer("c0", c0_values) model = model.transform(InsertTopK()) model = model.transform(InferShapes()) @@ -92,9 +86,7 @@ def test_absorb_mul_into_topk(mul_positive, scalar): model_transformed = model.transform(AbsorbScalarMulAddIntoTopK()) # compare execution results - inp_values = np.random.uniform(low=-10, high=10, size=(1, 1, 1, 1000)).astype( - np.float32 - ) + inp_values = np.random.uniform(low=-10, high=10, size=(1, 1, 1, 1000)).astype(np.float32) idict = {"global_in": inp_values} odict = oxe.execute_onnx(model, idict, True) y_indices = odict["global_out"] diff --git a/tests/transformation/streamline/test_absorb_transp_into_flatten.py b/tests/transformation/streamline/test_absorb_transp_into_flatten.py index 44b0c1d7e0..5b278bd552 100644 --- a/tests/transformation/streamline/test_absorb_transp_into_flatten.py +++ b/tests/transformation/streamline/test_absorb_transp_into_flatten.py @@ -65,9 +65,7 @@ def test_absorb_transp_into_flatten(perm, shape, ishape, data_layout): # model_transformed.save("test2.onnx") # verify transformation - inp_values = np.random.uniform(low=-1, high=1, size=tuple(ishape)).astype( - np.float32 - ) + inp_values = np.random.uniform(low=-1, high=1, size=tuple(ishape)).astype(np.float32) idict = {model.graph.input[0].name: inp_values} assert oxe.compare_execution(model, model_transformed, idict) diff --git a/tests/transformation/streamline/test_linear_past_eltwise.py b/tests/transformation/streamline/test_linear_past_eltwise.py index 4e5dcd6386..70fc395652 100644 --- a/tests/transformation/streamline/test_linear_past_eltwise.py +++ b/tests/transformation/streamline/test_linear_past_eltwise.py @@ -63,15 +63,9 @@ def make_model(shape): add1_node = helper.make_node("Add", [inp1.name, inp1_add_ct.name], [inp1_add.name]) add2_node = helper.make_node("Add", [inp2.name, inp2_add_ct.name], [inp2_add.name]) - mul1_node = helper.make_node( - "Mul", [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name] - ) - mul2_node = helper.make_node( - "Mul", [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name] - ) - eltwise_add_node = helper.make_node( - "Add", [inp1_mul.name, inp2_mul.name], [outp.name] - ) + mul1_node = helper.make_node("Mul", [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name]) + mul2_node = helper.make_node("Mul", [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name]) + eltwise_add_node = helper.make_node("Add", [inp1_mul.name, inp2_mul.name], [outp.name]) graph = helper.make_graph( nodes=[add1_node, add2_node, mul1_node, mul2_node, eltwise_add_node], name="graph", @@ -153,9 +147,7 @@ def test_linear_past_eltwise_add_multiple_forks(ch, ifmdim): num_of_params = 6 value_info = [] for i in range(num_of_params): - value_info += [ - helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape) - ] + value_info += [helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape)] modelproto = qonnx_make_model( helper.make_graph( @@ -180,9 +172,7 @@ def test_linear_past_eltwise_add_multiple_forks(ch, ifmdim): np.random.seed(0) for i in range(num_of_params): - model.set_initializer( - "p" + str(i), np.random.rand(*input_shape).astype(np.float32) - ) + model.set_initializer("p" + str(i), np.random.rand(*input_shape).astype(np.float32)) # need equal mults: model.set_initializer("p2", model.get_initializer("p1")) diff --git a/tests/transformation/streamline/test_maxpool_nhwc.py b/tests/transformation/streamline/test_maxpool_nhwc.py index d61eedaaf5..77dbf3a971 100644 --- a/tests/transformation/streamline/test_maxpool_nhwc.py +++ b/tests/transformation/streamline/test_maxpool_nhwc.py @@ -14,21 +14,13 @@ def create_maxpool(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt): - ofm_dim_h = compute_pool_output_dim( - ifm_dim[0], kernel_shape[0], strides[0], pads[0], ceil_mode - ) - ofm_dim_w = compute_pool_output_dim( - ifm_dim[1], kernel_shape[1], strides[1], pads[1], ceil_mode - ) - inp = oh.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] - ) + ofm_dim_h = compute_pool_output_dim(ifm_dim[0], kernel_shape[0], strides[0], pads[0], ceil_mode) + ofm_dim_w = compute_pool_output_dim(ifm_dim[1], kernel_shape[1], strides[1], pads[1], ceil_mode) + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]]) outp_mp = oh.make_tensor_value_info( "outp_mp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] ) - outp = oh.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] - ) + outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch]) maxpool_node = oh.make_node( "MaxPool", @@ -83,9 +75,7 @@ def create_maxpool(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt) @pytest.mark.parametrize("idt", [DataType["INT4"]]) def test_maxpool_nhwc(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt): # create MaxPool node - maxpool_model = create_maxpool( - ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt - ) + maxpool_model = create_maxpool(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, idt) # generate input tensor for testing input_tensor = gen_finn_dt_tensor(idt, [1, ifm_ch, ifm_dim[0], ifm_dim[1]]) @@ -100,9 +90,7 @@ def test_maxpool_nhwc(ifm_dim, ifm_ch, kernel_shape, pads, strides, ceil_mode, i # execute transformed model output_node_name = maxpool_model.graph.output[0].name - output_dict = oxe.execute_onnx( - maxpool_model, input_dict, return_full_exec_context=False - ) + output_dict = oxe.execute_onnx(maxpool_model, input_dict, return_full_exec_context=False) output = output_dict[output_node_name] # compare outputs diff --git a/tests/transformation/streamline/test_move_chw_add_past_conv.py b/tests/transformation/streamline/test_move_chw_add_past_conv.py index e1b324a798..8b2f10b658 100644 --- a/tests/transformation/streamline/test_move_chw_add_past_conv.py +++ b/tests/transformation/streamline/test_move_chw_add_past_conv.py @@ -85,13 +85,9 @@ def test_move_chw_add_past_conv(idim, k, s, ich, och): model = ModelWrapper(model) # initialize model - a0_values = np.random.uniform(low=0, high=1, size=tuple(add_param_shape)).astype( - np.float32 - ) + a0_values = np.random.uniform(low=0, high=1, size=tuple(add_param_shape)).astype(np.float32) model.set_initializer("a0", a0_values) - a1_values = np.random.uniform(low=0, high=1, size=tuple(conv_param_shape)).astype( - np.float32 - ) + a1_values = np.random.uniform(low=0, high=1, size=tuple(conv_param_shape)).astype(np.float32) model.set_initializer("a1", a1_values) model = model.transform(InferShapes()) diff --git a/tests/transformation/streamline/test_move_identical_op_past_join_op.py b/tests/transformation/streamline/test_move_identical_op_past_join_op.py index 7be9763162..dd83681fc2 100644 --- a/tests/transformation/streamline/test_move_identical_op_past_join_op.py +++ b/tests/transformation/streamline/test_move_identical_op_past_join_op.py @@ -56,18 +56,10 @@ def create_model(perm): "Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"] ) - in_transpose1 = oh.make_tensor_value_info( - "in_transpose1", TensorProto.FLOAT, in_shape - ) - in_transpose2 = oh.make_tensor_value_info( - "in_transpose2", TensorProto.FLOAT, in_shape - ) - out_transpose1 = oh.make_tensor_value_info( - "out_transpose1", TensorProto.FLOAT, out_shape - ) - out_transpose2 = oh.make_tensor_value_info( - "out_transpose2", TensorProto.FLOAT, out_shape - ) + in_transpose1 = oh.make_tensor_value_info("in_transpose1", TensorProto.FLOAT, in_shape) + in_transpose2 = oh.make_tensor_value_info("in_transpose2", TensorProto.FLOAT, in_shape) + out_transpose1 = oh.make_tensor_value_info("out_transpose1", TensorProto.FLOAT, out_shape) + out_transpose2 = oh.make_tensor_value_info("out_transpose2", TensorProto.FLOAT, out_shape) out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape) graph = oh.make_graph( diff --git a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py index 6126acd9e3..2dee153545 100644 --- a/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py +++ b/tests/transformation/streamline/test_move_maxpool_past_multithreshold.py @@ -67,14 +67,10 @@ def test_move_maxpool_past_multithreshold(): value_info = [] thres1_shape = [1, 1] - value_info += [ - helper.make_tensor_value_info("thres1", TensorProto.FLOAT, thres1_shape) - ] + value_info += [helper.make_tensor_value_info("thres1", TensorProto.FLOAT, thres1_shape)] thres2_shape = [ch, 14] - value_info += [ - helper.make_tensor_value_info("thres2", TensorProto.FLOAT, thres2_shape) - ] + value_info += [helper.make_tensor_value_info("thres2", TensorProto.FLOAT, thres2_shape)] nodes = [] nodes += [helper.make_node("MaxPool", ["top_in"], ["t1"], **maxpool_config)] @@ -114,9 +110,7 @@ def test_move_maxpool_past_multithreshold(): model = model.transform(InferDataTypes()) model.set_initializer("thres1", np.array([[0]], dtype=np.float32)) - model.set_initializer( - "thres2", get_multithreshold_rand_params(*thres2_shape, seed=0) - ) + model.set_initializer("thres2", get_multithreshold_rand_params(*thres2_shape, seed=0)) # Transform new_model = model.transform(MoveMaxPoolPastMultiThreshold()) diff --git a/tests/transformation/streamline/test_move_mul_past_dw_conv.py b/tests/transformation/streamline/test_move_mul_past_dw_conv.py index 72a6650ec4..303b97c69f 100644 --- a/tests/transformation/streamline/test_move_mul_past_dw_conv.py +++ b/tests/transformation/streamline/test_move_mul_past_dw_conv.py @@ -65,14 +65,10 @@ def test_move_mul_past_dw_conv(ifm_dim, ifm_ch, k, stride, pad_amt, dw): ofm_dim = compute_conv_output_dim(ifm_dim, k, stride, total_pad) # set up onnx model - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim]) mul = helper.make_tensor_value_info("mul", TensorProto.FLOAT, [1, ifm_ch, 1, 1]) W = helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_ch, ofm_dim, ofm_dim] - ) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_ch, ofm_dim, ofm_dim]) Mul_node = helper.make_node("Mul", ["inp", "mul"], ["mul_out"]) diff --git a/tests/transformation/streamline/test_move_mul_past_maxpool.py b/tests/transformation/streamline/test_move_mul_past_maxpool.py index 3bae2905a0..61dddd56e9 100755 --- a/tests/transformation/streamline/test_move_mul_past_maxpool.py +++ b/tests/transformation/streamline/test_move_mul_past_maxpool.py @@ -65,13 +65,9 @@ def test_move_mul_past_maxpool(ifm_dim, ifm_ch, k, stride, pad, cw, negative): ofm_dim = compute_pool_output_dim(ifm_dim, k, stride, pad) # set up onnx model - inp = helper.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim] - ) + inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim, ifm_dim]) mul = helper.make_tensor_value_info("mul", TensorProto.FLOAT, mul_shape) - outp = helper.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_ch, ofm_dim, ofm_dim] - ) + outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_ch, ofm_dim, ofm_dim]) Mul_node = helper.make_node("Mul", ["inp", "mul"], ["mul_out"]) diff --git a/tests/transformation/streamline/test_move_past_fork.py b/tests/transformation/streamline/test_move_past_fork.py index 7e77d7f9b3..e9433178c8 100644 --- a/tests/transformation/streamline/test_move_past_fork.py +++ b/tests/transformation/streamline/test_move_past_fork.py @@ -64,9 +64,7 @@ def test_move_past_fork_transpose(): new_model = model.transform(MoveTransposePastFork()) new_model = new_model.transform(GiveUniqueNodeNames()) nodes = new_model.graph.node - assert oxe.compare_execution( - model, new_model, {"in0": np.random.rand(*shp).astype(np.float32)} - ) + assert oxe.compare_execution(model, new_model, {"in0": np.random.rand(*shp).astype(np.float32)}) assert len(nodes) == 5 assert not new_model.is_fork_node(get_by_name(nodes, "Transpose_0")) @@ -120,9 +118,7 @@ def test_move_past_fork_linear(ch, ifmdim): for tensor_name in model.get_all_tensor_names(): if tensor_name.endswith("_param"): pshape = model.get_tensor_shape(tensor_name) - model.set_initializer( - tensor_name, np.random.rand(*pshape).astype(np.float32) - ) + model.set_initializer(tensor_name, np.random.rand(*pshape).astype(np.float32)) model = model.transform(GiveUniqueNodeNames()) # Transform new_model = model.transform(MoveLinearPastFork()) diff --git a/tests/transformation/streamline/test_move_scalar_past_matmul.py b/tests/transformation/streamline/test_move_scalar_past_matmul.py index 6c788294bc..e4f4357fff 100644 --- a/tests/transformation/streamline/test_move_scalar_past_matmul.py +++ b/tests/transformation/streamline/test_move_scalar_past_matmul.py @@ -63,9 +63,7 @@ def test_move_scalar_mul_past_matmul(): model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("mul_param", np.asarray([[3]], dtype=np.float32)) - model.set_initializer( - "matmul_param", np.asarray([[2, 4], [-1, 1]], dtype=np.float32) - ) + model.set_initializer("matmul_param", np.asarray([[2, 4], [-1, 1]], dtype=np.float32)) new_model = model.transform(MoveScalarMulPastMatMul()) inp_dict = {"top_in": np.asarray([[-1.0, 1.0]], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict) @@ -95,9 +93,7 @@ def test_move_scalar_add_past_matmul(): model = ModelWrapper(modelproto) model = model.transform(InferShapes()) model.set_initializer("add_param", np.asarray([[3]], dtype=np.float32)) - model.set_initializer( - "matmul_param", np.asarray([[2, 4], [-1, 1]], dtype=np.float32) - ) + model.set_initializer("matmul_param", np.asarray([[2, 4], [-1, 1]], dtype=np.float32)) new_model = model.transform(MoveScalarAddPastMatMul()) inp_dict = {"top_in": np.asarray([[-1.0, 1.0]], dtype=np.float32)} assert ox.compare_execution(model, new_model, inp_dict) diff --git a/tests/transformation/streamline/test_scale_resize_nhwc.py b/tests/transformation/streamline/test_scale_resize_nhwc.py index 5e107448f8..350f5b3133 100644 --- a/tests/transformation/streamline/test_scale_resize_nhwc.py +++ b/tests/transformation/streamline/test_scale_resize_nhwc.py @@ -18,9 +18,7 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] - inp = oh.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] - ) + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]]) param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [4]) @@ -30,9 +28,7 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): outp_up = oh.make_tensor_value_info( "outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] ) - outp = oh.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] - ) + outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch]) resize_node = oh.make_node( "Resize", @@ -73,18 +69,14 @@ def create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] - inp = oh.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] - ) + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch]) param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [4]) # Not actually used, only needed for compliance with the Resize node interface roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4]) - outp = oh.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] - ) + outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w]) outp_tr = oh.make_tensor_value_info( "outp_tr", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]] ) @@ -128,9 +120,7 @@ def create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt): def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): ofm_dim_h = ifm_dim[0] * scales[2] ofm_dim_w = ifm_dim[1] * scales[3] - inp = oh.make_tensor_value_info( - "inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch] - ) + inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_dim[0], ifm_dim[1], ifm_ch]) param = oh.make_tensor_value_info("scales", TensorProto.FLOAT, scales) @@ -144,9 +134,7 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt): outp_up = oh.make_tensor_value_info( "outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w] ) - outp = oh.make_tensor_value_info( - "outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch] - ) + outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch]) transpose_node1 = onnx.helper.make_node( "Transpose", @@ -209,9 +197,7 @@ def check_transform(model): # input channels @pytest.mark.parametrize("ifm_ch", [3]) # scales -@pytest.mark.parametrize( - "scales", [[1, 1, i, j] for i in range(2, 5) for j in range(2, 5)] -) +@pytest.mark.parametrize("scales", [[1, 1, i, j] for i in range(2, 5) for j in range(2, 5)]) # mode @pytest.mark.parametrize("mode", ["nearest"]) # input datatype @@ -220,9 +206,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # create models resize_model1 = create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) resize_model2 = create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt) - resize_model3 = create_transpose_resize_transpose( - ifm_dim, ifm_ch, scales, mode, idt - ) + resize_model3 = create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt) # set initializers resize_model1.set_initializer("scales", np.array(scales, dtype=np.float32)) @@ -245,9 +229,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # execute transformed model output_node_name1 = resize_model1.graph.output[0].name - output_dict1 = oxe.execute_onnx( - resize_model1, input_dict_nchw, return_full_exec_context=False - ) + output_dict1 = oxe.execute_onnx(resize_model1, input_dict_nchw, return_full_exec_context=False) output1 = output_dict1[output_node_name1] # compare outputs @@ -264,9 +246,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # execute transformed model output_node_name2 = resize_model2.graph.output[0].name - output_dict2 = oxe.execute_onnx( - resize_model2, input_dict_nhwc, return_full_exec_context=False - ) + output_dict2 = oxe.execute_onnx(resize_model2, input_dict_nhwc, return_full_exec_context=False) output2 = output_dict2[output_node_name2] # compare outputs @@ -283,9 +263,7 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt): # execute transformed model output_node_name3 = resize_model3.graph.output[0].name - output_dict3 = oxe.execute_onnx( - resize_model3, input_dict_nhwc, return_full_exec_context=False - ) + output_dict3 = oxe.execute_onnx(resize_model3, input_dict_nhwc, return_full_exec_context=False) output3 = output_dict3[output_node_name3] # compare outputs diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py index 245980f958..a5a9d34aaf 100644 --- a/tests/transformation/test_infer_data_layouts_cnv.py +++ b/tests/transformation/test_infer_data_layouts_cnv.py @@ -35,7 +35,11 @@ from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, GiveUniqueParameterTensors +from qonnx.transformation.general import ( + GiveReadableTensorNames, + GiveUniqueNodeNames, + GiveUniqueParameterTensors, +) from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul @@ -105,9 +109,7 @@ def test_infer_data_layouts_cnv(): # note: im2col output isn't really NHWC or any other common layout # since the concept of channels changes with lowering... but it is # conceptually close to NHWC since the innermost dim gets multiplied - assert ( - model.get_tensor_layout("ConvolutionInputGenerator_0_out0") == DataLayout.NHWC - ) + assert model.get_tensor_layout("ConvolutionInputGenerator_0_out0") == DataLayout.NHWC assert model.get_tensor_layout("MatrixVectorActivation_3_out0") == DataLayout.NHWC assert model.get_tensor_layout("Reshape_0_out0") == DataLayout.NC assert model.get_tensor_layout("MatrixVectorActivation_6_out0") == DataLayout.NC diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index e5f1eefe12..10fcb79cc7 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -58,9 +58,7 @@ def get_brev_model_and_sample_inputs(model_name, wbits, abits): brev_model = get_test_model_trained(model_name, wbits, abits) elif model_name == "CNV": in_shape = (1, 3, 32, 32) - fn = pk.resource_filename( - "finn.qnn-data", "cifar10/cifar10-test-data-class3.npz" - ) + fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 brev_model = get_test_model_trained(model_name, wbits, abits) @@ -105,9 +103,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): # Get test config and model ATOL = 1e-7 - brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs( - model_name, wbits, abits - ) + brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs(model_name, wbits, abits) temp_dir = TemporaryDirectory() qonnx_base_path = temp_dir.name + "/qonnx_{}.onnx" finn_base_path = temp_dir.name + "/finn_{}.onnx" @@ -117,9 +113,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): brev_output = brev_model.forward(torch_input_tensor).detach().numpy() # Get "clean" FINN model and its output - _ = export_finn_onnx( - brev_model, torch.randn(in_shape), finn_base_path.format("raw") - ) + _ = export_finn_onnx(brev_model, torch.randn(in_shape), finn_base_path.format("raw")) model = ModelWrapper(finn_base_path.format("raw")) model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferShapes()) @@ -166,8 +160,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): output_dict = oxe.execute_onnx(model, input_dict, False) test_output = output_dict[model.graph.output[0].name] assert np.isclose(test_output, finn_export_output, atol=ATOL).all(), ( - "The output of the FINN model " - "and the QONNX -> FINN converted model should match." + "The output of the FINN model " "and the QONNX -> FINN converted model should match." ) # Run analysis passes on the converted model diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index 39f0b0dc89..02136b31a2 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -57,9 +57,7 @@ def test_end2end_build_dataflow_directory(): assert os.path.isfile(output_dir + "/report/estimate_layer_cycles.json") assert os.path.isfile(output_dir + "/report/estimate_layer_resources.json") assert os.path.isfile(output_dir + "/report/rtlsim_perf_batch_1.vcd") - assert os.path.isfile( - output_dir + "/report/estimate_layer_config_alternatives.json" - ) + assert os.path.isfile(output_dir + "/report/estimate_layer_config_alternatives.json") assert os.path.isfile(output_dir + "/report/estimate_network_performance.json") assert os.path.isfile(output_dir + "/report/ooc_synth_and_timing.json") assert os.path.isfile(output_dir + "/report/rtlsim_performance.json") @@ -71,16 +69,8 @@ def test_end2end_build_dataflow_directory(): verif_batchsize = np.load(target_dir + "/input.npy").shape[0] for i in range(verif_batchsize): verify_out_dir = output_dir + "/verification_output" - assert os.path.isfile( - verify_out_dir + f"/verify_initial_python_{i}_SUCCESS.npy" - ) - assert os.path.isfile( - verify_out_dir + f"/verify_streamlined_python_{i}_SUCCESS.npy" - ) - assert os.path.isfile( - verify_out_dir + f"/verify_folded_hls_cppsim_{i}_SUCCESS.npy" - ) - assert os.path.isfile( - verify_out_dir + f"/verify_stitched_ip_rtlsim_{i}_SUCCESS.npy" - ) + assert os.path.isfile(verify_out_dir + f"/verify_initial_python_{i}_SUCCESS.npy") + assert os.path.isfile(verify_out_dir + f"/verify_streamlined_python_{i}_SUCCESS.npy") + assert os.path.isfile(verify_out_dir + f"/verify_folded_hls_cppsim_{i}_SUCCESS.npy") + assert os.path.isfile(verify_out_dir + f"/verify_stitched_ip_rtlsim_{i}_SUCCESS.npy") assert os.path.isfile(output_dir + f"/report/verify_rtlsim_{i}.vcd") diff --git a/tests/util/test_create.py b/tests/util/test_create.py index dc44e4bd45..b8b439cf18 100644 --- a/tests/util/test_create.py +++ b/tests/util/test_create.py @@ -34,9 +34,7 @@ @pytest.mark.util -@pytest.mark.parametrize( - "bitwidth", [DataType["BIPOLAR"], DataType["INT2"], DataType["INT4"]] -) +@pytest.mark.parametrize("bitwidth", [DataType["BIPOLAR"], DataType["INT2"], DataType["INT4"]]) def test_hls_random_mlp_maker(bitwidth): w = bitwidth a = bitwidth diff --git a/tests/util/test_data_packing_hls.py b/tests/util/test_data_packing_hls.py index 859b926543..b95bcd5d42 100644 --- a/tests/util/test_data_packing_hls.py +++ b/tests/util/test_data_packing_hls.py @@ -105,16 +105,12 @@ def test_npy2apintstream(test_shape, dtype): ) with open(test_dir + "/compile.sh", "w") as f: f.write(cmd_compile) - compile = subprocess.Popen( - ["sh", "compile.sh"], stdout=subprocess.PIPE, cwd=test_dir - ) + compile = subprocess.Popen(["sh", "compile.sh"], stdout=subprocess.PIPE, cwd=test_dir) (stdout, stderr) = compile.communicate() # make copy before saving the array ndarray = ndarray.copy() np.save(npy_in, ndarray) - execute = subprocess.Popen( - "./test_npy2apintstream", stdout=subprocess.PIPE, cwd=test_dir - ) + execute = subprocess.Popen("./test_npy2apintstream", stdout=subprocess.PIPE, cwd=test_dir) (stdout, stderr) = execute.communicate() produced = np.load(npy_out) success = (produced == ndarray).all() diff --git a/tutorials/fpga_flow/gen_tb_data.py b/tutorials/fpga_flow/gen_tb_data.py index a525d92bfc..e73fd65094 100755 --- a/tutorials/fpga_flow/gen_tb_data.py +++ b/tutorials/fpga_flow/gen_tb_data.py @@ -48,9 +48,7 @@ tb_data.write("{:02X}".format(test_x[i][j][k])) tb_data.write("\n") tb_data.write( - "ffffffffffffffffffffffffffffffffffffffffffffffffffffff{:02X}\n".format( - test_y[i] - ) + "ffffffffffffffffffffffffffffffffffffffffffffffffffffff{:02X}\n".format(test_y[i]) ) print("Testbench data generated at " + file_name) From 3497cfefeb29998f2b3b10c81e19c972a744cac7 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 28 Jun 2023 14:43:15 +0100 Subject: [PATCH 526/628] [deps/ci] Downgrade tool version for ci and update qonnx commit --- docker/jenkins/Jenkinsfile | 4 ++-- fetch-repos.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index d8fea0124c..2954877c2a 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -5,8 +5,8 @@ node { checkout scm } withEnv([ - "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2023.1_0507_1903/installs/lin64", - "FINN_XILINX_VERSION=2023.1", + "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.2_1014_8888/installs/lin64", + "FINN_XILINX_VERSION=2022.2", "FINN_DOCKER_TAG=xilinx/finn:jenkins", "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" diff --git a/fetch-repos.sh b/fetch-repos.sh index ddae4020ed..4416f87bfe 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="0c980ef410c7c99b33c5b96486233f5a723ca1bc" +QONNX_COMMIT="6ca8f8e0af84e49facac5cdc34735eaf6e938300" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="d30ba0d6b3db4a333072624fa3d10827a686488d" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" @@ -39,7 +39,7 @@ XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" KV260_BDF_COMMIT="98e0d3efc901f0b974006bc4370c2a7ad8856c79" EXP_BOARD_FILES_MD5="30eecc497c31050bd46d10ea20eba232" -QONNX_URL="https://github.com/iksnagreb/qonnx.git" +QONNX_URL="https://github.com/fastmachinelearning/qonnx.git" FINN_EXP_URL="https://github.com/Xilinx/finn-experimental.git" BREVITAS_URL="https://github.com/Xilinx/brevitas.git" PYVERILATOR_URL="https://github.com/maltanar/pyverilator.git" From b355a6cb530a2a7c0687b5164ac1417564f2a239 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 28 Jun 2023 16:22:54 +0100 Subject: [PATCH 527/628] Forgot to add test_support_board_map inclusion into test file Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 30bbadb6fc..14616522ec 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -91,7 +91,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root, make_build_dir +from finn.util.basic import get_finn_root, make_build_dir, test_support_board_map from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, From c1b86d82fc8f1af2826e812c9c0f1b6971c6798e Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 28 Jun 2023 16:36:48 +0100 Subject: [PATCH 528/628] [Deps/tests] Update brevitas and delete finn_onnx export in brevitas tests --- fetch-repos.sh | 2 +- tests/brevitas/test_brevitas_cnv.py | 16 ++--- tests/brevitas/test_brevitas_debug.py | 58 +++++-------------- tests/brevitas/test_brevitas_fc.py | 19 ++---- ...revitas_non_scaled_quanthardtanh_export.py | 20 +++---- tests/brevitas/test_brevitas_qconv2d.py | 20 +++---- tests/brevitas/test_brevitas_qlinear.py | 20 +++---- .../brevitas/test_brevitas_relu_act_export.py | 36 ++++-------- .../test_brevitas_scaled_qhardtanh_export.py | 20 +++---- 9 files changed, 67 insertions(+), 144 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 4416f87bfe..0bfae82854 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -29,7 +29,7 @@ QONNX_COMMIT="6ca8f8e0af84e49facac5cdc34735eaf6e938300" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" -BREVITAS_COMMIT="d30ba0d6b3db4a333072624fa3d10827a686488d" +BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="c17aa478ae574971d115afa9fa4d9c215857d1ac" diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py index 1a96815105..c8adafdce9 100644 --- a/tests/brevitas/test_brevitas_cnv.py +++ b/tests/brevitas/test_brevitas_cnv.py @@ -33,7 +33,7 @@ import numpy as np import os import torch -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import GiveUniqueNodeNames, RemoveStaticGraphInputs @@ -50,21 +50,15 @@ @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [1, 2]) @pytest.mark.parametrize("wbits", [1, 2]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_cnv_export_exec(wbits, abits, QONNX_export): +def test_brevitas_cnv_export_exec(wbits, abits): if wbits > abits: pytest.skip("No wbits > abits cases at the moment") cnv = get_test_model_trained("CNV", wbits, abits) ishape = (1, 3, 32, 32) - if QONNX_export: - export_qonnx(cnv, torch.randn(ishape), export_onnx_path) - qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(export_onnx_path) - else: - export_finn_onnx(cnv, torch.randn(ishape), export_onnx_path) + export_qonnx(cnv, torch.randn(ishape), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) diff --git a/tests/brevitas/test_brevitas_debug.py b/tests/brevitas/test_brevitas_debug.py index 547c026e21..d6879a727b 100644 --- a/tests/brevitas/test_brevitas_debug.py +++ b/tests/brevitas/test_brevitas_debug.py @@ -34,12 +34,9 @@ import onnx.numpy_helper as nph import os import torch -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper -from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import RemoveStaticGraphInputs -from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe @@ -48,41 +45,23 @@ @pytest.mark.brevitas_export -@pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.parametrize("QONNX_FINN_conversion", [False, True]) -def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion): - if (not QONNX_export) and QONNX_FINN_conversion: - pytest.skip("This test configuration is not valid and is thus skipped.") +def test_brevitas_debug(QONNX_FINN_conversion): finn_onnx = "test_brevitas_debug.onnx" fc = get_test_model_trained("TFC", 2, 2) ishape = (1, 1, 28, 28) - if QONNX_export: - dbg_hook = bo.enable_debug(fc, proxy_level=True) - export_qonnx(fc, torch.randn(ishape), finn_onnx) - # DebugMarkers have the brevitas.onnx domain, so that needs adjusting - model = ModelWrapper(finn_onnx) - dbg_nodes = model.get_nodes_by_op_type("DebugMarker") - for dbg_node in dbg_nodes: - dbg_node.domain = "qonnx.custom_op.general" - model.save(finn_onnx) - qonnx_cleanup(finn_onnx, out_file=finn_onnx) - if QONNX_FINN_conversion: - model = ModelWrapper(finn_onnx) - model = model.transform(ConvertQONNXtoFINN()) - model.save(finn_onnx) - else: - dbg_hook = bo.enable_debug(fc) - export_finn_onnx(fc, torch.randn(ishape), finn_onnx) + dbg_hook = bo.enable_debug(fc, proxy_level=True) + export_qonnx(fc, torch.randn(ishape), finn_onnx) + # DebugMarkers have the brevitas.onnx domain, so that needs adjusting + model = ModelWrapper(finn_onnx) + dbg_nodes = model.get_nodes_by_op_type("DebugMarker") + for dbg_node in dbg_nodes: + dbg_node.domain = "qonnx.custom_op.general" + model.save(finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) + if QONNX_FINN_conversion: model = ModelWrapper(finn_onnx) - # DebugMarkers have the brevitas.onnx domain, so that needs adjusting - # ToDo: We should probably have transformation pass, which does this - # domain conversion for us? - dbg_nodes = model.get_nodes_by_op_type("DebugMarker") - for dbg_node in dbg_nodes: - dbg_node.domain = "qonnx.custom_op.general" - model = model.transform(InferShapes()) - model = model.transform(FoldConstants()) - model = model.transform(RemoveStaticGraphInputs()) + model = model.transform(ConvertQONNXtoFINN()) model.save(finn_onnx) model = ModelWrapper(finn_onnx) assert len(model.graph.input) == 1 @@ -106,17 +85,12 @@ def test_brevitas_debug(QONNX_export, QONNX_FINN_conversion): names_common = names_brevitas.intersection(names_finn) # The different exports return debug markers in different numbers and places print(len(names_common)) - if QONNX_export and not QONNX_FINN_conversion: + if not QONNX_FINN_conversion: assert len(names_common) == 12 - elif QONNX_export and QONNX_FINN_conversion: - assert len(names_common) == 8 else: - assert len(names_common) == 16 + assert len(names_common) == 8 for dbg_name in names_common: - if QONNX_export: - tensor_pytorch = dbg_hook.values[dbg_name].value.detach().numpy() - else: - tensor_pytorch = dbg_hook.values[dbg_name].detach().numpy() + tensor_pytorch = dbg_hook.values[dbg_name].value.detach().numpy() tensor_finn = output_dict[dbg_name] assert np.isclose(tensor_finn, tensor_pytorch, atol=1e-5).all() os.remove(finn_onnx) diff --git a/tests/brevitas/test_brevitas_fc.py b/tests/brevitas/test_brevitas_fc.py index 3aaa96f9a5..842d099f57 100644 --- a/tests/brevitas/test_brevitas_fc.py +++ b/tests/brevitas/test_brevitas_fc.py @@ -32,7 +32,7 @@ import onnx import onnx.numpy_helper as nph import torch -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -55,26 +55,19 @@ @pytest.mark.parametrize("wbits", [1, 2]) # network topology / size @pytest.mark.parametrize("size", ["TFC", "SFC", "LFC"]) -# QONNX export -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits, QONNX_export): +def test_brevitas_fc_onnx_export_and_exec(size, wbits, abits): if size == "LFC" and wbits == 2 and abits == 2: pytest.skip("No LFC-w2a2 present at the moment") if wbits > abits: pytest.skip("No wbits > abits cases at the moment") - nname = "%s_%dW%dA_QONNX-%d" % (size, wbits, abits, QONNX_export) + nname = "%s_%dW%dA" % (size, wbits, abits) finn_onnx = export_onnx_path + "/%s.onnx" % nname fc = get_test_model_trained(size, wbits, abits) ishape = (1, 1, 28, 28) - if QONNX_export: - export_qonnx(fc, torch.randn(ishape), finn_onnx) - qonnx_cleanup(finn_onnx, out_file=finn_onnx) - model = ModelWrapper(finn_onnx) - model = model.transform(ConvertQONNXtoFINN()) - model.save(finn_onnx) - else: - export_finn_onnx(fc, torch.randn(ishape), finn_onnx) + export_qonnx(fc, torch.randn(ishape), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) model = ModelWrapper(finn_onnx) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(RemoveStaticGraphInputs()) diff --git a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py index 2911303501..08a193714a 100644 --- a/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py +++ b/tests/brevitas/test_brevitas_non_scaled_quanthardtanh_export.py @@ -35,7 +35,7 @@ from brevitas.core.quant import QuantType from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantHardTanh from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -51,8 +51,7 @@ @pytest.mark.parametrize("abits", [1, 2, 4, 8]) @pytest.mark.parametrize("narrow_range", [False, True]) @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7)]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_act_export_qhardtanh_nonscaled(abits, narrow_range, max_val, QONNX_export): +def test_brevitas_act_export_qhardtanh_nonscaled(abits, narrow_range, max_val): def get_quant_type(bit_width): if bit_width is None: return QuantType.FP @@ -73,16 +72,11 @@ def get_quant_type(bit_width): scaling_impl_type=ScalingImplType.CONST, narrow_range=narrow_range, ) - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_act, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_act, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} diff --git a/tests/brevitas/test_brevitas_qconv2d.py b/tests/brevitas/test_brevitas_qconv2d.py index faeb3ff48e..4b27671891 100644 --- a/tests/brevitas/test_brevitas_qconv2d.py +++ b/tests/brevitas/test_brevitas_qconv2d.py @@ -35,7 +35,7 @@ from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType from brevitas.core.stats import StatsOp -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantConv2d from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -53,8 +53,7 @@ @pytest.mark.parametrize("dw", [False, True]) @pytest.mark.parametrize("bias", [True, False]) @pytest.mark.parametrize("in_channels", [32]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_QConv2d(dw, bias, in_channels, QONNX_export): +def test_brevitas_QConv2d(dw, bias, in_channels): ishape = (1, 32, 111, 111) if dw is True: groups = in_channels @@ -93,16 +92,11 @@ def test_brevitas_QConv2d(dw, bias, in_channels, QONNX_export): weight_tensor = gen_finn_dt_tensor(DataType["INT4"], w_shape) b_conv.weight = torch.nn.Parameter(torch.from_numpy(weight_tensor).float()) b_conv.eval() - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_conv, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_conv, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_conv, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=-1.0, high=1.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} diff --git a/tests/brevitas/test_brevitas_qlinear.py b/tests/brevitas/test_brevitas_qlinear.py index 551345f649..a6ea077e7a 100644 --- a/tests/brevitas/test_brevitas_qlinear.py +++ b/tests/brevitas/test_brevitas_qlinear.py @@ -32,7 +32,7 @@ import os import torch from brevitas.core.quant import QuantType -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantLinear from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -52,8 +52,7 @@ @pytest.mark.parametrize("in_features", [3]) @pytest.mark.parametrize("w_bits", [4]) @pytest.mark.parametrize("i_dtype", [DataType["UINT4"]]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype, QONNX_export): +def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype): i_shape = (1, in_features) w_shape = (out_features, in_features) b_linear = QuantLinear( @@ -68,16 +67,11 @@ def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype, QONN weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype(np.float32) b_linear.weight.data = torch.from_numpy(weight_tensor_fp) b_linear.eval() - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_linear, torch.randn(i_shape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_linear, torch.randn(i_shape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_linear, torch.randn(i_shape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = gen_finn_dt_tensor(i_dtype, i_shape) idict = {model.graph.input[0].name: inp_tensor} diff --git a/tests/brevitas/test_brevitas_relu_act_export.py b/tests/brevitas/test_brevitas_relu_act_export.py index 9e1fcbdc2f..2254670202 100644 --- a/tests/brevitas/test_brevitas_relu_act_export.py +++ b/tests/brevitas/test_brevitas_relu_act_export.py @@ -33,7 +33,7 @@ import os import torch from brevitas.core.scaling import ScalingImplType -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantReLU from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -48,25 +48,18 @@ @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) @pytest.mark.parametrize("ishape", [(1, 15), (1, 32, 1, 1)]) -@pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_relu( abits, ishape, - QONNX_export, ): b_act = QuantReLU( bit_width=abits, ) - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_act, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_act, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} @@ -83,11 +76,9 @@ def test_brevitas_act_export_relu( @pytest.mark.brevitas_export @pytest.mark.parametrize("abits", [2, 4, 8]) @pytest.mark.parametrize("ishape", [(1, 15, 4, 4), (1, 32, 1, 1)]) -@pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_relu_channel( abits, ishape, - QONNX_export, ): ch = ishape[1] b_act = QuantReLU( @@ -97,16 +88,11 @@ def test_brevitas_act_export_relu_channel( scaling_per_output_channel=True, per_channel_broadcastable_shape=(1, ch, 1, 1), ) - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_act, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_act, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=-1.0, high=6.0, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} diff --git a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py index 72a15810aa..e7d87faed8 100644 --- a/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py +++ b/tests/brevitas/test_brevitas_scaled_qhardtanh_export.py @@ -35,7 +35,7 @@ from brevitas.core.quant import QuantType from brevitas.core.restrict_val import RestrictValueType from brevitas.core.scaling import ScalingImplType -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantHardTanh from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.infer_shapes import InferShapes @@ -53,9 +53,8 @@ @pytest.mark.parametrize("min_val", [-1.0, -(1 - 2 ** (-7)), -2]) @pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7), 2]) @pytest.mark.parametrize("scaling_impl_type", [ScalingImplType.CONST, ScalingImplType.PARAMETER]) -@pytest.mark.parametrize("QONNX_export", [False, True]) def test_brevitas_act_export_qhardtanh_scaled( - abits, narrow_range, min_val, max_val, scaling_impl_type, QONNX_export + abits, narrow_range, min_val, max_val, scaling_impl_type ): def get_quant_type(bit_width): if bit_width is None: @@ -86,16 +85,11 @@ def get_quant_type(bit_width): ) } b_act.load_state_dict(checkpoint) - if QONNX_export: - m_path = export_onnx_path - export_qonnx(b_act, torch.randn(ishape), m_path) - qonnx_cleanup(m_path, out_file=m_path) - model = ModelWrapper(m_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(m_path) - else: - export_finn_onnx(b_act, torch.randn(ishape), export_onnx_path) - model = ModelWrapper(export_onnx_path) + m_path = export_onnx_path + export_qonnx(b_act, torch.randn(ishape), m_path) + qonnx_cleanup(m_path, out_file=m_path) + model = ModelWrapper(m_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(np.float32) idict = {model.graph.input[0].name: inp_tensor} From f2872c7fbe3ced9c9441da7e5a07383e2b757fcf Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Thu, 29 Jun 2023 11:42:14 +0100 Subject: [PATCH 529/628] Add missing itertools library import Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 14616522ec..564a1ee7cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -30,6 +30,8 @@ import numpy as np +import itertools + # as of Feb'20 there is a bug that segfaults ONNX shape inference if we # import pytorch before onnx, so we make sure to import onnx first import onnx # NOQA From 14192c643cdee0cfe26cbf070a443845750ff746 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 4 Jul 2023 08:59:25 +0100 Subject: [PATCH 530/628] [Tests] Remove finn onnx export from end2end bnn tests --- tests/end2end/test_end2end_bnn_pynq.py | 159 ++++++++++--------------- 1 file changed, 66 insertions(+), 93 deletions(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index b08028e7cb..87c1d6005c 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -36,7 +36,7 @@ import os import torch import warnings -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from dataset_loading import cifar, mnist from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -106,12 +106,11 @@ rtlsim_trace = False -def get_checkpoint_name(topology, wbits, abits, QONNX_export, step): - return build_dir + "/end2end_%s_w%da%d_QONNX-%d_%s.onnx" % ( +def get_checkpoint_name(topology, wbits, abits, step): + return build_dir + "/end2end_%s_w%da%d_%s.onnx" % ( topology, wbits, abits, - QONNX_export, step, ) @@ -293,28 +292,24 @@ def topology2dataset(topology): @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("abits", [1, 2]) @pytest.mark.parametrize("topology", ["lfc", "tfc", "cnv"]) -@pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.end2end class TestEnd2End: - def test_export(self, topology, wbits, abits, QONNX_export): + def test_export(self, topology, wbits, abits): if wbits > abits: pytest.skip("No wbits > abits end2end network configs for now") if topology == "lfc" and not (wbits == 1 and abits == 1): pytest.skip("Skipping certain lfc configs") (model, ishape) = get_trained_network_and_ishape(topology, wbits, abits) - chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "export") - if QONNX_export: - export_qonnx(model, torch.randn(ishape), chkpt_name, opset_version=13) - qonnx_cleanup(chkpt_name, out_file=chkpt_name) - model = ModelWrapper(chkpt_name) - model = model.transform(ConvertQONNXtoFINN()) - model.save(chkpt_name) - else: - export_finn_onnx(model, torch.randn(ishape), chkpt_name) + chkpt_name = get_checkpoint_name(topology, wbits, abits, "export") + export_qonnx(model, torch.randn(ishape), chkpt_name, opset_version=13) + qonnx_cleanup(chkpt_name, out_file=chkpt_name) + model = ModelWrapper(chkpt_name) + model = model.transform(ConvertQONNXtoFINN()) + model.save(chkpt_name) assert os.path.isfile(chkpt_name) - def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "export") + def test_import_and_tidy(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "export") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -322,20 +317,22 @@ def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): model = model.transform(GiveReadableTensorNames()) model = model.transform(InferDataTypes()) model = model.transform(RemoveStaticGraphInputs()) - chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "import_and_tidy") + chkpt = get_checkpoint_name(topology, wbits, abits, "import_and_tidy") model.save(chkpt) - def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "import_and_tidy" - ) + def test_add_pre_and_postproc(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "import_and_tidy") model = load_test_checkpoint_or_skip(prev_chkpt_name) global_inp_name = model.graph.input[0].name ishape = model.get_tensor_shape(global_inp_name) # preprocessing: torchvision's ToTensor divides uint8 inputs by 255 totensor_pyt = ToTensor() - chkpt_preproc_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "preproc") - export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name, opset_version=13) + chkpt_preproc_name = get_checkpoint_name(topology, wbits, abits, "preproc") + export_qonnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name, opset_version=13) + qonnx_cleanup(chkpt_preproc_name, out_file=chkpt_preproc_name) + pre_model = ModelWrapper(chkpt_preproc_name) + pre_model = pre_model.transform(ConvertQONNXtoFINN()) + pre_model.save(chkpt_preproc_name) assert os.path.isfile(chkpt_preproc_name) # join preprocessing and core model pre_model = ModelWrapper(chkpt_preproc_name) @@ -347,7 +344,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): model.set_tensor_datatype(global_inp_name, DataType["UINT8"]) # postprocessing: insert Top-1 node at the end model = model.transform(InsertTopK(k=1)) - chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") + chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post") # tidy-up again model = model.transform(InferShapes()) model = model.transform(FoldConstants()) @@ -358,8 +355,8 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): model.save(chkpt_name) assert os.path.isfile(chkpt_name) - def test_streamline(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") + def test_streamline(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "pre_post") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(absorb.AbsorbSignBiasIntoMultiThreshold()) # move past any reshapes to be able to streamline input scaling @@ -375,10 +372,10 @@ def test_streamline(self, topology, wbits, abits, QONNX_export): model = model.transform(absorb.AbsorbScalarMulAddIntoTopK()) model = model.transform(InferDataLayouts()) model = model.transform(RemoveUnusedTensors()) - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline")) + model.save(get_checkpoint_name(topology, wbits, abits, "streamline")) - def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") + def test_convert_to_hls_layers(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "streamline") model = load_test_checkpoint_or_skip(prev_chkpt_name) if topology == "tfc" and wbits == 1 and abits == 1: # use standalone thresholds for tfc-w1a1 to also exercise that option @@ -400,9 +397,7 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): model = model.transform(absorb.AbsorbConsecutiveTransposes()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(InferDataLayouts()) - model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "convert_to_hls_layers") - ) + model.save(get_checkpoint_name(topology, wbits, abits, "convert_to_hls_layers")) exp_layer_counts = { "tfc": [ ("Reshape", 1), @@ -439,57 +434,45 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): for op_type, exp_count in exp_layer_counts: assert len(model.get_nodes_by_op_type(op_type)) == exp_count - def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "convert_to_hls_layers" - ) + def test_create_dataflow_partition(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "convert_to_hls_layers") model = load_test_checkpoint_or_skip(prev_chkpt_name) parent_model = model.transform(CreateDataflowPartition()) - parent_model_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_parent" - ) + parent_model_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_parent") parent_model.save(parent_model_chkpt) sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) dataflow_model_filename = sdp_node.get_nodeattr("model") dataflow_model = load_test_checkpoint_or_skip(dataflow_model_filename) - dataflow_model_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_model" - ) + dataflow_model_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_model") dataflow_model.save(dataflow_model_chkpt) - def test_fold(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "dataflow_model" - ) + def test_fold(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "dataflow_model") model = load_test_checkpoint_or_skip(prev_chkpt_name) folding_fxn = get_folding_function(topology, wbits, abits) model = folding_fxn(model) - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold")) + model.save(get_checkpoint_name(topology, wbits, abits, "fold")) - def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold") + def test_minimize_bit_width(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fold") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(MinimizeAccumulatorWidth()) model = model.transform(MinimizeWeightBitWidth()) - curr_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "minimize_bit_width" - ) + curr_chkpt_name = get_checkpoint_name(topology, wbits, abits, "minimize_bit_width") model.save(curr_chkpt_name) @pytest.mark.slow @pytest.mark.vivado - def test_cppsim(self, topology, wbits, abits, QONNX_export): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "minimize_bit_width" - ) + def test_cppsim(self, topology, wbits, abits): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "minimize_bit_width") model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(PrepareCppSim()) model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) - cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "cppsim") + cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, "cppsim") model.save(cppsim_chkpt) - parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") + parent_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_parent") (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) @@ -499,36 +482,34 @@ def test_cppsim(self, topology, wbits, abits, QONNX_export): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_ipgen(self, topology, wbits, abits, QONNX_export, kind): + def test_ipgen(self, topology, wbits, abits, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold") + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fold") model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(GiveUniqueNodeNames()) model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) model = model.transform(HLSSynthIP()) - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind)) + model.save(get_checkpoint_name(topology, wbits, abits, "ipgen_" + kind)) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind) + def test_set_fifo_depths(self, topology, wbits, abits, kind): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "ipgen_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(InsertAndSetFIFODepths(test_fpga_part, target_clk_ns)) fifo_layers = model.get_nodes_by_op_type("StreamingFIFO") assert len(fifo_layers) > 0 - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fifodepth_" + kind)) + model.save(get_checkpoint_name(topology, wbits, abits, "fifodepth_" + kind)) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq"]) - def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind - ) + def test_ipstitch_rtlsim(self, topology, wbits, abits, kind): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fifodepth_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(InsertDWC()) @@ -547,11 +528,9 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): if rtlsim_trace: model.set_metadata_prop("rtlsim_trace", "%s_w%da%d.vcd" % (topology, wbits, abits)) os.environ["RTLSIM_TRACE_DEPTH"] = "3" - rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind - ) + rtlsim_chkpt = get_checkpoint_name(topology, wbits, abits, "ipstitch_rtlsim_" + kind) model.save(rtlsim_chkpt) - parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") + parent_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_parent") (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) @@ -561,10 +540,8 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq"]) - def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind - ) + def test_throughput_rtlsim(self, topology, wbits, abits, kind): + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "ipstitch_rtlsim_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) n_nodes = len(model.graph.node) perf_est = model.analysis(dataflow_performance) @@ -580,16 +557,14 @@ def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.parametrize("kind", ["zynq"]) - def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): + def test_validate_top1(self, topology, wbits, abits, kind): if "TEST_END2END_VALIDATE_TOP1" not in os.environ: pytest.skip("TEST_END2END_VALIDATE_TOP1 not set") - prepostproc_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "pre_post") - streamline_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") - parent_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "dataflow_parent") - cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, QONNX_export, "cppsim") - rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind - ) + prepostproc_chkpt = get_checkpoint_name(topology, wbits, abits, "pre_post") + streamline_chkpt = get_checkpoint_name(topology, wbits, abits, "streamline") + parent_chkpt = get_checkpoint_name(topology, wbits, abits, "dataflow_parent") + cppsim_chkpt = get_checkpoint_name(topology, wbits, abits, "cppsim") + rtlsim_chkpt = get_checkpoint_name(topology, wbits, abits, "ipstitch_rtlsim_" + kind) dataset = topology2dataset(topology) assert measure_top1_accuracy(prepostproc_chkpt, dataset) > 80 assert measure_top1_accuracy(streamline_chkpt, dataset) > 80 @@ -600,27 +575,25 @@ def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.vivado @pytest.mark.vitis @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_build(self, topology, wbits, abits, QONNX_export, kind): + def test_build(self, topology, wbits, abits, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fifodepth_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) cfg = get_build_env(kind, target_clk_ns) model = model.transform(cfg["build_fxn"]) model = model.transform(AnnotateResources("synth")) - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind)) + model.save(get_checkpoint_name(topology, wbits, abits, "build_" + kind)) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): + def test_make_pynq_driver(self, topology, wbits, abits, kind): if kind == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") - prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "build_" + kind) model = load_test_checkpoint_or_skip(prev_chkpt_name) kind_to_driver_platform = {"zynq": "zynq-iodma", "alveo": "alveo"} model = model.transform(MakePYNQDriver(kind_to_driver_platform[kind])) - model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind)) + model.save(get_checkpoint_name(topology, wbits, abits, "driver_" + kind)) From 41d6056b4962594251347d2369b5d9cce43d8d26 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Jul 2023 09:13:13 +0100 Subject: [PATCH 531/628] [GHA] Update docker image workflow to only target dev --- .github/workflows/docker-image.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 00c25a4a31..f9a251a8c7 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -1,8 +1,6 @@ name: DockerImage on: - pull_request: - branches: [ dev ] push: branches: [ dev ] From 85f37d4a56f4c2c255ab778bb224135345dda919 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Jul 2023 12:18:42 +0100 Subject: [PATCH 532/628] [Tests/Deps] Update qonnx commit and update tests --- fetch-repos.sh | 2 +- .../qonnx/infer_quant_avg_pool_2d.py | 3 +- tests/end2end/test_end2end_cybsec_mlp.py | 94 +++++++------------ .../test_convert_to_hls_layers_cnv.py | 11 ++- .../test_convert_to_hls_layers_fc.py | 13 ++- tests/transformation/test_qonnx_to_finn.py | 31 +----- 6 files changed, 56 insertions(+), 98 deletions(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 0bfae82854..651f06452b 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="6ca8f8e0af84e49facac5cdc34735eaf6e938300" +QONNX_COMMIT="0aec35a16948155e81c1640b71650206e733db3e" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" diff --git a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py index 72d473419a..52eb55355a 100644 --- a/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py +++ b/src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py @@ -165,7 +165,8 @@ def apply(self, model): # Trunc node rounding_mode = get_by_name(t_node.attribute, "rounding_mode") - if rounding_mode is None or rounding_mode.s != b"FLOOR": + normalized_mode_string = rounding_mode.s.upper() + if rounding_mode is None or normalized_mode_string != b"FLOOR": raise ValueError( "The Trunc node must have the rounding_mode " "set to 'FLOOR'." ) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 6e758d2d2d..7b73700909 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -37,9 +37,8 @@ import torch import torch.nn as nn from brevitas.core.quant import QuantType -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from brevitas.nn import QuantIdentity, QuantLinear, QuantReLU -from brevitas.quant_tensor import QuantTensor from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.util.cleanup import cleanup as qonnx_cleanup @@ -55,13 +54,13 @@ build_dir = os.environ["FINN_BUILD_DIR"] -def get_checkpoint_name(step, QONNX_export): +def get_checkpoint_name(step): if step == "build": # checkpoint for build step is an entire dir - return build_dir + "/end2end_cybsecmlp_build_QONNX-%d" % (QONNX_export) + return build_dir + "/end2end_cybsecmlp_build" else: # other checkpoints are onnx files - return build_dir + "/end2end_cybsecmlp_QONNX-%d_%s.onnx" % (QONNX_export, step) + return build_dir + "/end2end_cybsecmlp_%s.onnx" % step class CybSecMLPForExport(nn.Module): @@ -82,9 +81,8 @@ def forward(self, x): return out_final -@pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.end2end -def test_end2end_cybsec_mlp_export(QONNX_export): +def test_end2end_cybsec_mlp_export(): assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/") # load up trained net in Brevitas input_size = 593 @@ -116,72 +114,45 @@ def test_end2end_cybsec_mlp_export(QONNX_export): W_new = np.pad(W_orig, [(0, 0), (0, 7)]) model[0].weight.data = torch.from_numpy(W_new) model_for_export = CybSecMLPForExport(model) - export_onnx_path = get_checkpoint_name("export", QONNX_export) + export_onnx_path = get_checkpoint_name("export") input_shape = (1, 600) - # create a QuantTensor instance to mark the input as bipolar during export - input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32) - input_a = 2 * input_a - 1 - scale = 1.0 - input_t = torch.from_numpy(input_a * scale) - input_qt = QuantTensor( - input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True - ) - if QONNX_export: - # With the onnx export from Brevitas we need to manually set - # the FINN DataType at the input - export_qonnx(model_for_export, torch.randn(input_shape), export_path=export_onnx_path) - model = ModelWrapper(export_onnx_path) - model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"]) - model.save(export_onnx_path) - qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) - model = ModelWrapper(export_onnx_path) - model = model.transform(ConvertQONNXtoFINN()) - model.save(export_onnx_path) - else: - export_finn_onnx( - model_for_export, - export_path=export_onnx_path, - input_t=input_qt, - input_names=["onnx::Mul_0"], - ) + # With the onnx export from Brevitas we need to manually set + # the FINN DataType at the input + export_qonnx(model_for_export, torch.randn(input_shape), export_path=export_onnx_path) + model = ModelWrapper(export_onnx_path) + model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"]) + model.save(export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) + model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) assert os.path.isfile(export_onnx_path) # fix input datatype - finn_model = ModelWrapper(export_onnx_path) - finnonnx_in_tensor_name = finn_model.graph.input[0].name - assert tuple(finn_model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600) + finnonnx_in_tensor_name = model.graph.input[0].name + assert tuple(model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600) # verify a few exported ops - if QONNX_export: - # The first "Mul" node doesn't exist in the QONNX export, - # because the QuantTensor scale is not exported. - # However, this node would have been unity scale anyways and - # the models are still equivalent. - assert finn_model.graph.node[0].op_type == "Add" - assert finn_model.graph.node[1].op_type == "Div" - assert finn_model.graph.node[2].op_type == "MatMul" - assert finn_model.graph.node[-1].op_type == "MultiThreshold" - else: - assert finn_model.graph.node[0].op_type == "Mul" - assert finn_model.get_initializer(finn_model.graph.node[0].input[1]) == 1.0 - assert finn_model.graph.node[1].op_type == "Add" - assert finn_model.graph.node[2].op_type == "Div" - assert finn_model.graph.node[3].op_type == "MatMul" - assert finn_model.graph.node[-1].op_type == "MultiThreshold" + # The first "Mul" node doesn't exist in the QONNX export, + # because the QuantTensor scale is not exported. + # However, this node would have been unity scale anyways and + # the models are still equivalent. + assert model.graph.node[0].op_type == "Add" + assert model.graph.node[1].op_type == "Div" + assert model.graph.node[2].op_type == "MatMul" + assert model.graph.node[-1].op_type == "MultiThreshold" # verify datatypes on some tensors - assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"] - first_matmul_w_name = finn_model.get_nodes_by_op_type("MatMul")[0].input[1] - assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"] + assert model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"] + first_matmul_w_name = model.get_nodes_by_op_type("MatMul")[0].input[1] + assert model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"] @pytest.mark.slow @pytest.mark.vivado @pytest.mark.end2end -@pytest.mark.parametrize("QONNX_export", [False, True]) -def test_end2end_cybsec_mlp_build(QONNX_export): - model_file = get_checkpoint_name("export", QONNX_export) +def test_end2end_cybsec_mlp_build(): + model_file = get_checkpoint_name("export") load_test_checkpoint_or_skip(model_file) build_env = get_build_env(build_kind, target_clk_ns) - output_dir = make_build_dir(f"test_end2end_cybsec_mlp_build_QONNX-{QONNX_export}") + output_dir = make_build_dir("test_end2end_cybsec_mlp_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, @@ -219,4 +190,5 @@ def test_end2end_cybsec_mlp_build(QONNX_export): est_res_dict = json.load(f) assert est_res_dict["total"]["LUT"] == 7904.0 assert est_res_dict["total"]["BRAM_18K"] == 36.0 - shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build", QONNX_export)) + shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build")) + shutil.rmtree(get_checkpoint_name("build")) diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index 296b4cf350..c4f3807aa0 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -33,7 +33,7 @@ import numpy as np import os import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount @@ -46,6 +46,7 @@ from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -53,6 +54,7 @@ from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.transformation.streamline.reorder import MakeMaxPoolNHWC from finn.util.test import get_test_model_trained @@ -66,8 +68,10 @@ @pytest.mark.parametrize("fused_activation", [True, False]) def test_convert_to_hls_layers_cnv_w1a1(fused_activation): cnv = get_test_model_trained("CNV", 1, 1) - export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) + export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) + qonnx_cleanup(export_onnx_path_cnv, out_file=export_onnx_path_cnv) model = ModelWrapper(export_onnx_path_cnv) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -81,7 +85,6 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): model = model.transform(ConvertBipolarMatMulToXnorPopcount()) model = model.transform(Streamline()) model = model.transform(InferDataLayouts()) - # model.save("golden.onnx") # load one of the test vectors fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") input_tensor = np.load(fn)["arr_0"].astype(np.float32) @@ -134,11 +137,9 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): assert len(swg_nodes) == 6 mp_nodes = model.get_nodes_by_op_type("StreamingMaxPool_Batch") assert len(mp_nodes) == 2 - # model.save("cnv-pre-compile.onnx") model = model.transform(PrepareCppSim()) model = model.transform(CompileCppSim()) model = model.transform(SetExecMode("cppsim")) - # model.save("cnv-post-compile.onnx") produced_ctx = oxe.execute_onnx(model, input_dict, True) produced = produced_ctx[model.graph.output[0].name] assert np.isclose(expected, produced, atol=1e-3).all() diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py index e9caeddb44..8a7b2509a4 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_fc.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_fc.py @@ -33,7 +33,7 @@ import onnx.numpy_helper as nph import os import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -45,6 +45,7 @@ GiveUniqueParameterTensors, ) from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls @@ -52,6 +53,7 @@ from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds from finn.util.test import get_test_model_trained @@ -63,8 +65,10 @@ @pytest.mark.vivado def test_convert_to_hls_layers_tfc_w1a1(): tfc = get_test_model_trained("TFC", 1, 1) - export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) + export_qonnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) @@ -135,8 +139,11 @@ def test_convert_to_hls_layers_tfc_w1a1(): @pytest.mark.vivado def test_convert_to_hls_layers_tfc_w1a2(): tfc = get_test_model_trained("TFC", 1, 2) - export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) + export_qonnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) + model.save(export_onnx_path) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 10fcb79cc7..0c68bd44b4 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -35,12 +35,9 @@ import onnx import onnx.numpy_helper as nph import torch -from brevitas.export import export_finn_onnx, export_qonnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper -from qonnx.transformation.fold_constants import FoldConstants -from qonnx.transformation.general import GiveUniqueNodeNames, RemoveStaticGraphInputs -from qonnx.transformation.infer_shapes import InferShapes from qonnx.util.cleanup import cleanup from tempfile import TemporaryDirectory @@ -106,32 +103,12 @@ def test_QONNX_to_FINN(model_name, wbits, abits): brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs(model_name, wbits, abits) temp_dir = TemporaryDirectory() qonnx_base_path = temp_dir.name + "/qonnx_{}.onnx" - finn_base_path = temp_dir.name + "/finn_{}.onnx" # Get Brevitas output torch_input_tensor = torch.from_numpy(input_tensor).float() brev_output = brev_model.forward(torch_input_tensor).detach().numpy() - # Get "clean" FINN model and its output - _ = export_finn_onnx(brev_model, torch.randn(in_shape), finn_base_path.format("raw")) - model = ModelWrapper(finn_base_path.format("raw")) - model = model.transform(GiveUniqueNodeNames()) - model = model.transform(InferShapes()) - model = model.transform(FoldConstants()) - model = model.transform(RemoveStaticGraphInputs()) - model.save(finn_base_path.format("clean")) - - model = ModelWrapper(finn_base_path.format("clean")) - input_dict = {model.graph.input[0].name: input_tensor} - output_dict = oxe.execute_onnx(model, input_dict, False) - finn_export_output = output_dict[model.graph.output[0].name] - # This test always fails on MobileNet for some reason - if model_name != "mobilenet": - assert np.isclose( - brev_output, finn_export_output, atol=ATOL - ).all(), "The output of the Brevitas model and the FINN model should match." - - # Get the equivalent QONNX model + # Get QONNX model _ = export_qonnx(brev_model, torch.randn(in_shape), qonnx_base_path.format("raw")) cleanup(qonnx_base_path.format("raw"), out_file=qonnx_base_path.format("clean")) @@ -146,7 +123,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): # This test always fails on MobileNet for some reason if model_name != "mobilenet": assert np.isclose( - qonnx_export_output, finn_export_output, atol=ATOL + brev_output, qonnx_export_output, atol=ATOL ).all(), "The output of the FINN model and the QONNX model should match." # Run QONNX to FINN conversion @@ -159,7 +136,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): input_dict = {model.graph.input[0].name: input_tensor} output_dict = oxe.execute_onnx(model, input_dict, False) test_output = output_dict[model.graph.output[0].name] - assert np.isclose(test_output, finn_export_output, atol=ATOL).all(), ( + assert np.isclose(test_output, qonnx_export_output, atol=ATOL).all(), ( "The output of the FINN model " "and the QONNX -> FINN converted model should match." ) From 90468e7f69fcb9ade5ee4b2fcce9cd52ab4a696f Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 5 Jul 2023 14:47:42 +0100 Subject: [PATCH 533/628] [Tests] Fix mobilenet qonnx to finn onnx conversion test --- tests/transformation/test_qonnx_to_finn.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 0c68bd44b4..345aba6016 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -88,9 +88,6 @@ def analysis_testing_for_no_quant_nodes(model): @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("model_name", ["TFC", "SFC", "LFC", "CNV", "mobilenet"]) def test_QONNX_to_FINN(model_name, wbits, abits): - if model_name == "mobilenet": - pytest.xfail("MobileNet test is temporarily excluded from QONNX testing.") - if wbits > abits: pytest.skip("No wbits > abits cases at the moment") if model_name == "LFC" and wbits == 2 and abits == 2: @@ -99,7 +96,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits): pytest.skip("Mobilenet only runs at W2A2, though it's technically W4A4.") # Get test config and model - ATOL = 1e-7 + ATOL = 1e-6 brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs(model_name, wbits, abits) temp_dir = TemporaryDirectory() qonnx_base_path = temp_dir.name + "/qonnx_{}.onnx" @@ -120,11 +117,6 @@ def test_QONNX_to_FINN(model_name, wbits, abits): assert np.isclose( brev_output, qonnx_export_output, atol=ATOL ).all(), "The output of the Brevitas model and the QONNX model should match." - # This test always fails on MobileNet for some reason - if model_name != "mobilenet": - assert np.isclose( - brev_output, qonnx_export_output, atol=ATOL - ).all(), "The output of the FINN model and the QONNX model should match." # Run QONNX to FINN conversion model = ModelWrapper(qonnx_base_path.format("clean")) From 0cd757fbdabea18779f5374842b45a4fd755db10 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 6 Jul 2023 15:50:01 +0100 Subject: [PATCH 534/628] [Tests] Mark mobilenet export as xfail --- tests/transformation/test_qonnx_to_finn.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 345aba6016..5bbcb1f9d4 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -88,6 +88,8 @@ def analysis_testing_for_no_quant_nodes(model): @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("model_name", ["TFC", "SFC", "LFC", "CNV", "mobilenet"]) def test_QONNX_to_FINN(model_name, wbits, abits): + if model_name == "mobilenet": + pytest.xfail("MobileNet test is temporarily excluded from QONNX testing.") if wbits > abits: pytest.skip("No wbits > abits cases at the moment") if model_name == "LFC" and wbits == 2 and abits == 2: From a48b5037871468e8a3e890b4719258c7dd1736e2 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 6 Jul 2023 16:50:29 +0100 Subject: [PATCH 535/628] [Tests] Update tests to only use qonnx export --- tests/brevitas/test_brevitas_mobilenet.py | 14 ++++++++++---- .../brevitas/test_brevitas_validate_mobilenet.py | 15 +++++++++------ tests/end2end/test_end2end_mobilenet_v1.py | 12 +++++++++--- .../streamline/test_sign_to_thres.py | 8 ++++++-- .../streamline/test_streamline_cnv.py | 8 ++++++-- .../streamline/test_streamline_fc.py | 8 ++++++-- .../test_batchnorm_to_affine_bnn_pynq.py | 12 +++++++++--- .../transformation/test_infer_data_layouts_cnv.py | 8 ++++++-- tests/transformation/test_infer_datatypes_lfc.py | 8 ++++++-- 9 files changed, 67 insertions(+), 26 deletions(-) diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index fa391efcab..f98e85bb85 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -30,7 +30,7 @@ import numpy as np import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from PIL import Image from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -45,16 +45,17 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.insert_topk import InsertTopK from qonnx.transformation.merge_onnx_models import MergeONNXModels +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe import finn.transformation.streamline.absorb as absorb +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.basic import get_finn_root, make_build_dir from finn.util.pytorch import NormalizePreProc from finn.util.test import crop_center, get_test_model_trained, resize_smaller_side @pytest.mark.brevitas_export -@pytest.mark.xfail def test_brevitas_mobilenet(): # get single image as input and prepare image img = Image.open(get_finn_root() + "/tests/brevitas/king_charles.jpg") @@ -76,8 +77,10 @@ def test_brevitas_mobilenet(): std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) - export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + export_qonnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + qonnx_cleanup(preproc_onnx, out_file=preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) + preproc_model = preproc_model.transform(ConvertQONNXtoFINN()) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType["UINT8"]) preproc_model = preproc_model.transform(InferShapes()) @@ -87,7 +90,8 @@ def test_brevitas_mobilenet(): finn_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_exported.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) - export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + export_qonnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) # do forward pass in PyTorch/Brevitas input_tensor = preproc.forward(img_torch) @@ -98,7 +102,9 @@ def test_brevitas_mobilenet(): expected_top5_prob = [] for index in expected_top5: expected_top5_prob.append(expected_topk[index]) + model = ModelWrapper(finn_onnx) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(InsertTopK()) diff --git a/tests/brevitas/test_brevitas_validate_mobilenet.py b/tests/brevitas/test_brevitas_validate_mobilenet.py index f3f7df0e3d..18f8fa9a41 100644 --- a/tests/brevitas/test_brevitas_validate_mobilenet.py +++ b/tests/brevitas/test_brevitas_validate_mobilenet.py @@ -35,7 +35,7 @@ import torch import torchvision.datasets as datasets import torchvision.transforms as transforms -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import ( @@ -49,10 +49,12 @@ from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.insert_topk import InsertTopK from qonnx.transformation.merge_onnx_models import MergeONNXModels +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe import finn.transformation.streamline.absorb as absorb import finn.util.imagenet as imagenet_util +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.basic import make_build_dir from finn.util.pytorch import NormalizePreProc from finn.util.test import get_test_model_trained @@ -102,9 +104,6 @@ def test_brevitas_mobilenet_preproc(): @pytest.mark.brevitas_export @pytest.mark.slow -# marked as XFAIL until Brevitas export issues are resolved: -# https://github.com/Xilinx/brevitas/issues/173 -@pytest.mark.xfail def test_brevitas_compare_exported_mobilenet(): if "IMAGENET_VAL_PATH" not in os.environ.keys(): pytest.skip("Can't do validation without IMAGENET_VAL_PATH") @@ -114,8 +113,10 @@ def test_brevitas_compare_exported_mobilenet(): # export preprocessing preproc_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_preproc.onnx" preproc = NormalizePreProc(mean, std, ch) - export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + export_qonnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + qonnx_cleanup(preproc_onnx, out_file=preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) + preproc_model = preproc_model.transform(ConvertQONNXtoFINN()) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) @@ -125,8 +126,10 @@ def test_brevitas_compare_exported_mobilenet(): mobilenet = get_test_model_trained("mobilenet", 4, 4) if debug_mode: dbg_hook = bo.enable_debug(mobilenet) - export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + export_qonnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) model = ModelWrapper(finn_onnx) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(RemoveStaticGraphInputs()) diff --git a/tests/end2end/test_end2end_mobilenet_v1.py b/tests/end2end/test_end2end_mobilenet_v1.py index e53022e74b..2d25a2bf0d 100644 --- a/tests/end2end/test_end2end_mobilenet_v1.py +++ b/tests/end2end/test_end2end_mobilenet_v1.py @@ -31,7 +31,7 @@ import os import time import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from PIL import Image from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper @@ -52,6 +52,7 @@ from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.transformation.merge_onnx_models import MergeONNXModels from qonnx.transformation.remove import RemoveIdentityOps +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb @@ -63,6 +64,7 @@ ) from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.transformation.streamline.collapse_repeated import CollapseRepeatedMul from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds @@ -95,8 +97,10 @@ def test_end2end_mobilenet_export(): std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) - export_finn_onnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + export_qonnx(preproc, torch.randn(1, 3, 224, 224), preproc_onnx) + qonnx_cleanup(preproc_onnx, out_file=preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) + preproc_model = preproc_model.transform(ConvertQONNXtoFINN()) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType["UINT8"]) preproc_model = preproc_model.transform(InferShapes()) @@ -109,7 +113,8 @@ def test_end2end_mobilenet_export(): # export mobilenet finn_onnx = build_dir + "/end2end_mobilenet_export.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) - export_finn_onnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + export_qonnx(mobilenet, torch.randn(1, 3, 224, 224), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) # calculate golden output with pytorch/brevitas and save as .npy # get single image as input and prepare image @@ -145,6 +150,7 @@ def test_end2end_mobilenet_export(): def test_end2end_mobilenet_tidy_and_merge_with_preproc(): preproc_model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_preproc.onnx") model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_export.onnx") + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(InsertTopK()) diff --git a/tests/transformation/streamline/test_sign_to_thres.py b/tests/transformation/streamline/test_sign_to_thres.py index 72e400346d..1386592563 100644 --- a/tests/transformation/streamline/test_sign_to_thres.py +++ b/tests/transformation/streamline/test_sign_to_thres.py @@ -32,13 +32,15 @@ import onnx.numpy_helper as nph import os import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import ConvertSignToThres from finn.util.test import get_test_model_trained @@ -48,8 +50,10 @@ @pytest.mark.streamline def test_sign_to_thres(): lfc = get_test_model_trained("LFC", 1, 1) - export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + export_qonnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) new_model = model.transform(ConvertSignToThres()) diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index c5d8e2517f..86e4356ae4 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -32,7 +32,7 @@ import numpy as np import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import ( @@ -43,8 +43,10 @@ RemoveUnusedTensors, ) from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.util.basic import make_build_dir from finn.util.test import get_test_model_trained @@ -65,8 +67,10 @@ def test_streamline_cnv(size, wbits, abits): nname = "%s_%dW%dA" % (size, wbits, abits) finn_onnx = export_onnx_path + "/%s.onnx" % nname fc = get_test_model_trained(size, wbits, abits) - export_finn_onnx(fc, torch.randn(1, 3, 32, 32), finn_onnx) + export_qonnx(fc, torch.randn(1, 3, 32, 32), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) model = ModelWrapper(finn_onnx) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/transformation/streamline/test_streamline_fc.py b/tests/transformation/streamline/test_streamline_fc.py index 07c3a0f3cb..edc4a96fe2 100644 --- a/tests/transformation/streamline/test_streamline_fc.py +++ b/tests/transformation/streamline/test_streamline_fc.py @@ -32,7 +32,7 @@ import onnx import onnx.numpy_helper as nph import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants @@ -44,8 +44,10 @@ RemoveUnusedTensors, ) from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.util.basic import make_build_dir from finn.util.test import get_test_model_trained @@ -68,8 +70,10 @@ def test_streamline_fc(size, wbits, abits): nname = "%s_%dW%dA" % (size, wbits, abits) finn_onnx = export_onnx_path + "/%s.onnx" % nname fc = get_test_model_trained(size, wbits, abits) - export_finn_onnx(fc, torch.randn(1, 1, 28, 28), finn_onnx) + export_qonnx(fc, torch.randn(1, 1, 28, 28), finn_onnx) + qonnx_cleanup(finn_onnx, out_file=finn_onnx) model = ModelWrapper(finn_onnx) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py index 60e81ffe81..b95c26d25f 100644 --- a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py +++ b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py @@ -35,14 +35,16 @@ import onnx.numpy_helper as nph import os import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from pkgutil import get_data from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.batchnorm_to_affine import BatchNormToAffine from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.core.onnx_exec as oxe +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.test import get_test_model_trained export_onnx_path = "test_output_bn2affine.onnx" @@ -51,8 +53,10 @@ @pytest.mark.transform def test_batchnorm_to_affine_cnv_w1a1(): lfc = get_test_model_trained("CNV", 1, 1) - export_finn_onnx(lfc, torch.randn(1, 3, 32, 32), export_onnx_path) + export_qonnx(lfc, torch.randn(1, 3, 32, 32), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") @@ -76,8 +80,10 @@ def test_batchnorm_to_affine_cnv_w1a1(): @pytest.mark.transform def test_batchnorm_to_affine_lfc_w1a1(): lfc = get_test_model_trained("LFC", 1, 1) - export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + export_qonnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) new_model = model.transform(BatchNormToAffine()) diff --git a/tests/transformation/test_infer_data_layouts_cnv.py b/tests/transformation/test_infer_data_layouts_cnv.py index a5a9d34aaf..25bf890271 100644 --- a/tests/transformation/test_infer_data_layouts_cnv.py +++ b/tests/transformation/test_infer_data_layouts_cnv.py @@ -31,7 +31,7 @@ import os import qonnx.core.data_layout as DataLayout import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount from qonnx.transformation.fold_constants import FoldConstants @@ -43,9 +43,11 @@ from qonnx.transformation.infer_data_layouts import InferDataLayouts from qonnx.transformation.infer_shapes import InferShapes from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul +from qonnx.util.cleanup import cleanup as qonnx_cleanup import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.transformation.streamline import Streamline from finn.transformation.streamline.reorder import MakeMaxPoolNHWC from finn.util.test import get_test_model_trained @@ -56,8 +58,10 @@ @pytest.mark.transform def test_infer_data_layouts_cnv(): cnv = get_test_model_trained("CNV", 1, 1) - export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) + export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv) + qonnx_cleanup(export_onnx_path_cnv, out_file=export_onnx_path_cnv) model = ModelWrapper(export_onnx_path_cnv) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) diff --git a/tests/transformation/test_infer_datatypes_lfc.py b/tests/transformation/test_infer_datatypes_lfc.py index 173532cb76..b9d9dc558f 100644 --- a/tests/transformation/test_infer_datatypes_lfc.py +++ b/tests/transformation/test_infer_datatypes_lfc.py @@ -30,14 +30,16 @@ import os import torch -from brevitas.export import export_finn_onnx +from brevitas.export import export_qonnx from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.transformation.fold_constants import FoldConstants from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames from qonnx.transformation.infer_datatypes import InferDataTypes from qonnx.transformation.infer_shapes import InferShapes +from qonnx.util.cleanup import cleanup as qonnx_cleanup +from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.test import get_test_model_trained export_onnx_path = "test_infer_datatypes.onnx" @@ -46,8 +48,10 @@ @pytest.mark.transform def test_infer_datatypes_lfc(): lfc = get_test_model_trained("LFC", 1, 1) - export_finn_onnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + export_qonnx(lfc, torch.randn(1, 1, 28, 28), export_onnx_path) + qonnx_cleanup(export_onnx_path, out_file=export_onnx_path) model = ModelWrapper(export_onnx_path) + model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(GiveUniqueNodeNames()) From 391cd76ee3edb6e802d9b565a99993c775cc2194 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Jul 2023 12:07:42 +0100 Subject: [PATCH 536/628] [deps] Bump clize to 5.0.1 and sigtools to 4.0.1 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e3f74c23f9..1427d4f1ee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ bitstring==3.1.7 -clize==4.1.1 +clize==5.0.1 dataclasses-json==0.5.7 gspread==3.6.0 ipython==8.12.2 @@ -13,7 +13,7 @@ psutil==5.9.4 pyscaffold==4.4 scipy==1.10.1 setupext-janitor>=1.1.2 -sigtools==2.0.3 +sigtools==4.0.1 toposort==1.7.0 vcdvcd==1.0.5 wget==3.2 From 7924bf7271b41dd808feac0e8c5017222490f553 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Jul 2023 14:31:14 +0100 Subject: [PATCH 537/628] [NBs] Update notebooks to only use QONNX export --- ...1_brevitas_network_import_via_QONNX.ipynb} | 4 +- ...revitas_network_import_via_FINN-ONNX.ipynb | 321 ------------------ .../bnn-pynq/cnv_end2end_example.ipynb | 21 +- .../bnn-pynq/tfc_end2end_example.ipynb | 23 +- .../1-train-mlp-with-brevitas.ipynb | 29 +- .../2-import-into-finn-and-verify.ipynb | 2 +- tests/brevitas/test_brevitas_mobilenet.py | 1 + tests/notebooks/test_jupyter_notebooks.py | 3 +- 8 files changed, 52 insertions(+), 352 deletions(-) rename notebooks/basics/{1b_brevitas_network_import_via_QONNX.ipynb => 1_brevitas_network_import_via_QONNX.ipynb} (97%) delete mode 100644 notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb diff --git a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb b/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb similarity index 97% rename from notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb rename to notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb index 58fa3fc7e1..f15f716e7f 100644 --- a/notebooks/basics/1b_brevitas_network_import_via_QONNX.ipynb +++ b/notebooks/basics/1_brevitas_network_import_via_QONNX.ipynb @@ -6,7 +6,7 @@ "source": [ "# Importing Brevitas networks into FINN with the QONNX interchange format\n", "\n", - "**Note: This notebook is very similar to the 1a notebook, in that it shows the same concepts for the QONNX ingestion as 1a does for FINN-ONNX. Section 1 is identical in both notebooks.**\n", + "**Note: Previously it was possible to directly export the FINN-ONNX interchange format from Brevitas to pass to the FINN compiler. This support is deprecated and FINN uses the export to the QONNX format as a front end, internally FINN uses still the FINN-ONNX format.**\n", "\n", "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", "\n", @@ -318,7 +318,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb b/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb deleted file mode 100644 index 756faf149d..0000000000 --- a/notebooks/basics/1a_brevitas_network_import_via_FINN-ONNX.ipynb +++ /dev/null @@ -1,321 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Importing Brevitas networks into FINN with the FINN-ONNX interchange format\n", - "\n", - "**Note: This notebook is very similar to the 1b notebook, in that it shows the same concepts for the FINN-ONNX ingestion as 1b does for QONNX. Section 1 is identical in both notebooks.**\n", - "\n", - "In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows:\n", - "\n", - "1. Load up the trained PyTorch model\n", - "2. Call Brevitas FINN-ONNX export and visualize with Netron\n", - "3. Import into FINN and call cleanup transformations\n", - "\n", - "We'll use the following utility functions to print the source code for function calls (`showSrc()`) and to visualize a network using netron (`showInNetron()`) in the Jupyter notebook:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import onnx\n", - "from finn.util.visualization import showSrc, showInNetron" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. Load up the trained PyTorch model\n", - "\n", - "The FINN Docker image comes with several [example Brevitas networks](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq), and we'll use the LFC-w1a1 model as the example network here. This is a binarized fully connected network trained on the MNIST dataset. Let's start by looking at what the PyTorch network definition looks like:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from brevitas_examples import bnn_pynq\n", - "showSrc(bnn_pynq.models.FC)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the network topology is constructed using a few helper functions that generate the quantized linear layers and quantized activations. The bitwidth of the layers is actually parametrized in the constructor, so let's instantiate a 1-bit weights and activations version of this network. We also have pretrained weights for this network, which we will load into the model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from finn.util.test import get_test_model\n", - "lfc = get_test_model(netname = \"LFC\", wbits = 1, abits = 1, pretrained = True)\n", - "lfc" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have now instantiated our trained PyTorch network. Let's try to run an example MNIST image through the network using PyTorch." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import matplotlib.pyplot as plt\n", - "from pkgutil import get_data\n", - "import onnx\n", - "import onnx.numpy_helper as nph\n", - "raw_i = get_data(\"qonnx.data\", \"onnx/mnist-conv/test_data_set_0/input_0.pb\")\n", - "input_tensor = onnx.load_tensor_from_string(raw_i)\n", - "input_tensor_npy = nph.to_array(input_tensor)\n", - "input_tensor_pyt = torch.from_numpy(input_tensor_npy).float()\n", - "imgplot = plt.imshow(input_tensor_npy.reshape(28,28), cmap='gray')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.nn.functional import softmax\n", - "# do forward pass in PyTorch/Brevitas\n", - "produced = lfc.forward(input_tensor_pyt).detach()\n", - "probabilities = softmax(produced, dim=-1).flatten()\n", - "probabilities" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "objects = [str(x) for x in range(10)]\n", - "y_pos = np.arange(len(objects))\n", - "plt.bar(y_pos, probabilities, align='center', alpha=0.5)\n", - "plt.xticks(y_pos, objects)\n", - "plt.ylabel('Predicted Probability')\n", - "plt.title('LFC-w1a1 Predictions for Image')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Call Brevitas FINN-ONNX export and visualize with Netron\n", - "\n", - "Brevitas comes with built-in FINN-ONNX export functionality. This is similar to the regular ONNX export capabilities of PyTorch, with a few differences:\n", - "\n", - "1. The weight quantization logic is not exported as part of the graph; rather, the quantized weights themselves are exported.\n", - "2. Special quantization annotations are used to preserve the low-bit quantization information. ONNX (at the time of writing) supports 8-bit quantization as the minimum bitwidth, whereas FINN-ONNX quantization annotations can go down to binary/bipolar quantization.\n", - "3. Low-bit quantized activation functions are exported as MultiThreshold operators.\n", - "\n", - "It's actually quite straightforward to export ONNX from our Brevitas model as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from brevitas.export import export_finn_onnx\n", - "export_onnx_path = \"/tmp/LFCW1A1_finn-onnx.onnx\"\n", - "input_shape = (1, 1, 28, 28)\n", - "export_finn_onnx(lfc, torch.randn(input_shape), export_onnx_path);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's examine what the exported ONNX model looks like. For this, we will use the Netron visualizer:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "showInNetron(export_onnx_path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When running this notebook in the FINN Docker container, you should be able to see an interactive visualization of the imported network above, and click on individual nodes to inspect their parameters. If you look at any of the MatMul nodes, you should be able to see that the weights are all {-1, +1} values, and the activations are Sign functions." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Import into FINN and call cleanup transformations\n", - "\n", - "We will now import this ONNX model into FINN using the ModelWrapper, and examine some of the graph attributes from Python." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(export_onnx_path)\n", - "model.graph.node[8]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ModelWrapper exposes a range of other useful functions as well. For instance, by convention the second input of the MatMul node will be a pre-initialized weight tensor, which we can view using the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_initializer(model.graph.node[8].input[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also examine the quantization annotations and shapes of various tensors using the convenience functions provided by ModelWrapper." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_tensor_datatype(model.graph.node[8].input[1]).name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "model.get_tensor_shape(model.graph.node[8].input[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we want to operate further on this model in FINN, it is a good idea to execute certain \"cleanup\" transformations on this graph. Here, we will run shape inference and constant folding on this graph, and visualize the resulting graph in Netron again." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from qonnx.transformation.fold_constants import FoldConstants\n", - "from qonnx.transformation.infer_shapes import InferShapes\n", - "model = model.transform(InferShapes())\n", - "model = model.transform(FoldConstants())\n", - "export_onnx_path_transformed = \"/tmp/LFCW1A1-finn-onnx-clean.onnx\"\n", - "model.save(export_onnx_path_transformed)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "showInNetron(export_onnx_path_transformed)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that the resulting graph has become smaller and simpler. Specifically, the input reshaping is now a single Reshape node instead of the Shape -> Gather -> Unsqueeze -> Concat -> Reshape sequence. We can now use the internal ONNX execution capabilities of FINN to ensure that we still get the same output from this model as we did with PyTorch." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import finn.core.onnx_exec as oxe\n", - "input_dict = {\"0\": nph.to_array(input_tensor)}\n", - "output_dict = oxe.execute_onnx(model, input_dict)\n", - "produced_finn = output_dict[list(output_dict.keys())[0]]\n", - "\n", - "produced_finn" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.isclose(produced, produced_finn).all()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have succesfully verified that the transformed and cleaned-up FINN graph still produces the same output, and can now use this model for further processing in FINN." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index 73e9f4e6e1..a0dbbf4834 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -72,7 +72,7 @@ "source": [ "## 1. Brevitas Export, FINN Import and Tidy-Up\n", "\n", - "Similar to what we did in the TFC-w1a1 end-to-end notebook, we will start by exporting the [pretrained CNV-w1a1 network](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq) to ONNX, importing that into FINN and running the \"tidy-up\" transformations to have a first look at the topology." + "Similar to what we did in the TFC-w1a1 end-to-end notebook, we will start by exporting the [pretrained CNV-w1a1 network](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq) to ONNX, importing that into FINN and running the \"tidy-up\" transformations to have a first look at the topology. The network will be exported in QONNX format and then converted into the FINN-ONNX format to prepare it for the FINN compiler." ] }, { @@ -84,15 +84,20 @@ "import torch\n", "import onnx\n", "from finn.util.test import get_test_model_trained\n", - "from brevitas.export import export_finn_onnx\n", + "from brevitas.export import export_qonnx\n", + "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", "from qonnx.core.modelwrapper import ModelWrapper\n", + "from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN\n", "from qonnx.transformation.infer_shapes import InferShapes\n", "from qonnx.transformation.fold_constants import FoldConstants\n", "from qonnx.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames, RemoveStaticGraphInputs\n", "\n", "cnv = get_test_model_trained(\"CNV\", 1, 1)\n", - "export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), build_dir + \"/end2end_cnv_w1a1_export.onnx\")\n", - "model = ModelWrapper(build_dir + \"/end2end_cnv_w1a1_export.onnx\")\n", + "export_onnx_path = build_dir + \"/end2end_cnv_w1a1_export.onnx\"\n", + "export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path)\n", + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)\n", + "model = ModelWrapper(export_onnx_path)\n", + "model = model.transform(ConvertQONNXtoFINN())\n", "model = model.transform(InferShapes())\n", "model = model.transform(FoldConstants())\n", "model = model.transform(GiveUniqueNodeNames())\n", @@ -149,10 +154,12 @@ "# preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", "totensor_pyt = ToTensor()\n", "chkpt_preproc_name = build_dir+\"/end2end_cnv_w1a1_preproc.onnx\"\n", - "export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", + "export_qonnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", + "qonnx_cleanup(chkpt_preproc_name, out_file=chkpt_preproc_name)\n", + "pre_model = ModelWrapper(chkpt_preproc_name)\n", + "pre_model = pre_model.transform(ConvertQONNXtoFINN())\n", "\n", "# join preprocessing and core model\n", - "pre_model = ModelWrapper(chkpt_preproc_name)\n", "model = model.transform(MergeONNXModels(pre_model))\n", "# add input quantization annotation: UINT8 for all BNN-PYNQ models\n", "global_inp_name = model.graph.input[0].name\n", @@ -633,7 +640,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb index f99944e31f..a5c97328a5 100644 --- a/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb @@ -84,17 +84,20 @@ "import torch\n", "import onnx\n", "from finn.util.test import get_test_model_trained\n", - "from brevitas.export import export_finn_onnx\n", + "from brevitas.export import export_qonnx\n", + "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", "\n", "tfc = get_test_model_trained(\"TFC\", 1, 1)\n", - "export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\"); # semicolon added to suppress log" + "export_onnx_path = build_dir+\"/tfc_w1_a1.onnx\"\n", + "export_qonnx(tfc, torch.randn(1, 1, 28, 28), build_dir+\"/tfc_w1_a1.onnx\"); # semicolon added to suppress log\n", + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The model was now exported, loaded with the pretrained weights and saved under the name \"tfc_w1_a1.onnx\".\n", + "The model was now exported in QONNX format, loaded with the pretrained weights and saved under the name \"tfc_w1_a1.onnx\".\n", "To visualize the exported model, Netron can be used. Netron is a visualizer for neural networks and allows interactive investigation of network properties. For example, you can click on the individual nodes and view the properties." ] }, @@ -111,7 +114,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now that we have the model in .onnx format, we can work with it using FINN. For that, `ModelWrapper` is used. It is a wrapper around the ONNX model which provides several helper functions to make it easier to work with the model. 'ModelWrapper' is imported from the [QONNX repo](https://github.com/fastmachinelearning/qonnx), this repository contains several functionality that is used in FINN." + "Now that we have the model in .onnx format, we can work with it using FINN. For that, `ModelWrapper` is used. It is a wrapper around the ONNX model which provides several helper functions to make it easier to work with the model. 'ModelWrapper' is imported from the [QONNX repo](https://github.com/fastmachinelearning/qonnx), this repository contains several functionality that is used in FINN. The model was exported in QONNX format, to feed it into the FINN flow, our first step is to convert it to the FINN-ONNX format." ] }, { @@ -121,7 +124,9 @@ "outputs": [], "source": [ "from qonnx.core.modelwrapper import ModelWrapper\n", - "model = ModelWrapper(build_dir+\"/tfc_w1_a1.onnx\")" + "from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN\n", + "model = ModelWrapper(build_dir+\"/tfc_w1_a1.onnx\")\n", + "model = model.transform(ConvertQONNXtoFINN())" ] }, { @@ -268,10 +273,12 @@ "# preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", "totensor_pyt = ToTensor()\n", "chkpt_preproc_name = build_dir+\"/tfc_w1_a1_preproc.onnx\"\n", - "export_finn_onnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", + "export_qonnx(totensor_pyt, torch.randn(ishape), chkpt_preproc_name)\n", + "qonnx_cleanup(chkpt_preproc_name, out_file=chkpt_preproc_name)\n", + "pre_model = ModelWrapper(chkpt_preproc_name)\n", + "pre_model = pre_model.transform(ConvertQONNXtoFINN())\n", "\n", "# join preprocessing and core model\n", - "pre_model = ModelWrapper(chkpt_preproc_name)\n", "model = model.transform(MergeONNXModels(pre_model))\n", "# add input quantization annotation: UINT8 for all BNN-PYNQ models\n", "global_inp_name = model.graph.input[0].name\n", @@ -1007,7 +1014,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb index 0f90b8ee78..2885100512 100644 --- a/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb +++ b/notebooks/end2end_example/cybersecurity/1-train-mlp-with-brevitas.ipynb @@ -53,7 +53,7 @@ " * [(Option 1) Train the Model from Scratch](#train_scratch)\n", " * [(Option 2) Load Pre-Trained Parameters](#load_pretrained)\n", "* [Network Surgery Before Export](#network_surgery)\n", - "* [Export to FINN-ONNX](#export_finn_onnx)" + "* [Export to QONNX and Conversion to FINN-ONNX](#export_qonnx)" ] }, { @@ -667,12 +667,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Export to FINN-ONNX \n", + "# Export to QONNX and Conversion to FINN-ONNX \n", "\n", "\n", "[ONNX](https://onnx.ai/) is an open format built to represent machine learning models, and the FINN compiler expects an ONNX model as input. We'll now export our network into ONNX to be imported and used in FINN for the next notebooks. Note that the particular ONNX representation used for FINN differs from standard ONNX, you can read more about this [here](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-finn-onnx).\n", "\n", - "You can see below how we export a trained network in Brevitas into a FINN-compatible ONNX representation. Note how we create a `QuantTensor` instance with dummy data to tell Brevitas how our inputs look like, which will be used to set the input quantization annotation on the exported model." + "You can see below how we export a trained network in Brevitas into a FINN-compatible ONNX representation (QONNX). QONNX is the format we can export from Brevitas, to feed it into the FINN compiler, we will need to make a conversion to the FINN-ONNX format which is the intermediate representation the compiler works on. The conversion of the FINN-ONNX format is a FINN compiler transformation and to be able to apply it to our model, we will need to wrap it into [ModelWrapper](https://finn.readthedocs.io/en/latest/internals.html#modelwrapper). This is a wrapper around the ONNX model which provides several helper functions to make it easier to work with the model. Then we can call the conversion function to obtain the model in FINN-ONNX format." ] }, { @@ -681,8 +681,10 @@ "metadata": {}, "outputs": [], "source": [ - "from brevitas.export import export_finn_onnx\n", - "from brevitas.quant_tensor import QuantTensor\n", + "from brevitas.export import export_qonnx\n", + "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", + "from qonnx.core.modelwrapper import ModelWrapper\n", + "from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN\n", "\n", "ready_model_filename = model_dir + \"/cybsec-mlp-ready.onnx\"\n", "input_shape = (1, 600)\n", @@ -692,18 +694,23 @@ "input_a = 2 * input_a - 1\n", "scale = 1.0\n", "input_t = torch.from_numpy(input_a * scale)\n", - "input_qt = QuantTensor(\n", - " input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True\n", - ")\n", "\n", "#Move to CPU before export\n", "model_for_export.cpu()\n", "\n", "# Export to ONNX\n", - "export_finn_onnx(\n", - " model_for_export, export_path=ready_model_filename, input_t=input_qt\n", + "export_qonnx(\n", + " model_for_export, export_path=ready_model_filename, input_t=input_t\n", ")\n", "\n", + "# clean-up\n", + "qonnx_cleanup(ready_model_filename, out_file=ready_model_filename)\n", + "\n", + "# ModelWrapper\n", + "model = ModelWrapper(ready_model_filename)\n", + "model = model.transform(ConvertQONNXtoFINN())\n", + "model.save(ready_model_filename)\n", + "\n", "print(\"Model saved to %s\" % ready_model_filename)" ] }, @@ -759,7 +766,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb index 5f4924b309..a5bc165573 100644 --- a/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb +++ b/notebooks/end2end_example/cybersecurity/2-import-into-finn-and-verify.ipynb @@ -399,7 +399,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.6" } }, "nbformat": 4, diff --git a/tests/brevitas/test_brevitas_mobilenet.py b/tests/brevitas/test_brevitas_mobilenet.py index f98e85bb85..be200f6cd4 100644 --- a/tests/brevitas/test_brevitas_mobilenet.py +++ b/tests/brevitas/test_brevitas_mobilenet.py @@ -56,6 +56,7 @@ @pytest.mark.brevitas_export +@pytest.mark.xfail def test_brevitas_mobilenet(): # get single image as input and prepare image img = Image.open(get_finn_root() + "/tests/brevitas/king_charles.jpg") diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index 12f349b1e1..c2542380f1 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -13,8 +13,7 @@ basics_notebooks = [ pytest.param(notebook_basic_dir + "0_how_to_work_with_onnx.ipynb"), - pytest.param(notebook_basic_dir + "1a_brevitas_network_import_via_FINN-ONNX.ipynb"), - pytest.param(notebook_basic_dir + "1b_brevitas_network_import_via_QONNX.ipynb"), + pytest.param(notebook_basic_dir + "1_brevitas_network_import_via_QONNX.ipynb"), ] advanced_notebooks = [ From 96fc4f57670811fafe1753a63bf0ccfc521da077 Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 7 Jul 2023 15:54:13 +0100 Subject: [PATCH 538/628] [Deps] Update qonnx version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 651f06452b..67a2832b3d 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="0aec35a16948155e81c1640b71650206e733db3e" +QONNX_COMMIT="90f2936e72cc689873e03a4b882bfeb857d51306" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From 3873325a31897b8ccbde9a211f90d5184338368e Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 11 Jul 2023 09:44:30 +0100 Subject: [PATCH 539/628] [AlveoBuild] Set axilite address range to a minimum of 4K --- src/finn/transformation/fpgadataflow/create_stitched_ip.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index 6e40f39687..c9db69400b 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -536,6 +536,7 @@ def apply(self, model): puts "CRITICAL WARNING: Unable to construct address map for $port." } { set range [expr 2**$awidth] + set range [expr $range < 4096 ? 4096 : $range] puts "INFO: Building address map for $port: 0+:$range" set name [get_property NAME $port] set addr_block [ipx::add_address_block Reg0 [ipx::add_memory_map $name $core]] From 1d6d5ee3d45deacc9700fb188af6284b94a136e1 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 11 Jul 2023 11:40:52 +0100 Subject: [PATCH 540/628] Remove reference to get_build_env Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_cybsec_mlp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index a1681dc6fa..e31c86c985 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -50,6 +50,7 @@ from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 +build_board = "Pynq-Z1" build_dir = os.environ["FINN_BUILD_DIR"] @@ -150,14 +151,13 @@ def test_end2end_cybsec_mlp_export(): def test_end2end_cybsec_mlp_build(): model_file = get_checkpoint_name("export") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) output_dir = make_build_dir("test_end2end_cybsec_mlp_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, target_fps=1000000, synth_clk_period_ns=target_clk_ns, - board="Pynq-Z1", + board=build_board, shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, From 1e898d83737331a1346dcd9b802a20ef5ba8c58d Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 17 Jul 2023 21:22:52 +0100 Subject: [PATCH 541/628] Adjust how deployment dirs are created for sanity_bnn suite Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 8198538388..6b288bd382 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -294,7 +294,8 @@ def topology2dataset(topology): def deploy_based_on_board(model, model_title, topology, wbits, abits, board): - if os.environ.get('FINN_DEPLOY_DIR') is not None: + # Check if a deployment directory for this board type already exists + if ("FINN_DEPLOY_DIR" in os.environ) and (board in os.environ["FINN_DEPLOY_DIR"]): deploy_dir_root = os.environ["FINN_DEPLOY_DIR"] else: deploy_dir_root = make_build_dir(prefix="hw_deployment_" + board + "_") From a641f011945d79a4a0028b1ea40a1b169ef15efe Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 17 Jul 2023 21:24:02 +0100 Subject: [PATCH 542/628] Latest dev changes has affected what tests pass or are destructive, adjust test scripts to workaround these changes Signed-off-by: Fionn O'Donohoe --- docker/jenkins/hack_driver_script.py | 8 ++------ docker/jenkins/test_bnn_hw_pytest.py | 18 ++---------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py index cd3becf7cf..568c62150d 100755 --- a/docker/jenkins/hack_driver_script.py +++ b/docker/jenkins/hack_driver_script.py @@ -18,13 +18,9 @@ def hack_driver_script(board, test_dir): # Specify the line to be replaced and the new line line_to_replace = "ishape_normal" if "cnv" in test_dir: - new_line = " \"ishape_normal\" : [(1, 32, 32, 3)]," + new_line = " \"ishape_normal\" : [(1, 3, 32, 32)]," else: - # Usually a size of (1, 784) to being with - if board == "Pynq-Z1": - new_line = " \"ishape_normal\" : [(1, 28, 28, 1)]," - else: - new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," + new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," # Iterate over the lines and replace the specified line for i in range(len(lines)): diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index f2b437e800..1d1e22ed2c 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -25,17 +25,6 @@ def remove_cache_dirs(dir_list): del tmp_list[i] return tmp_list -def remove_destructive_board_tests(board, test_list): - tmp_list = list(test_list) - if "Pynq" in board: - # both tests are destructive to the Pynq-Z1 board and require a board reboot - for i in range(len(tmp_list)-1, -1, -1): - if "bnn_w2_a2_cnv_QE-True" in tmp_list[i]: - del tmp_list[i] - elif "bnn_w1_a1_tfc_QE-True" in tmp_list[i]: - del tmp_list[i] - return tmp_list - def delete_file(file_path): # Check if the file exists before deleting it if os.path.exists(file_path): @@ -78,11 +67,8 @@ def pytest_generate_tests(metafunc): test_dirs = remove_cache_dirs(test_dirs) for marker in all_markers_used: - platform = get_platform(marker) - if "Pynq" in marker: - remove_destructive_board_tests("Pynq", test_dirs) - scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) - elif "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + if "Pynq" in marker or "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + platform = get_platform(marker) scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) if len(scenarios) > 0: From eb5faa77dbbdcad2cb192fcd1b419391c8324ad5 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 18 Jul 2023 15:25:55 +0100 Subject: [PATCH 543/628] [Deps] Update qonnx version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 67a2832b3d..49d8621bb9 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="90f2936e72cc689873e03a4b882bfeb857d51306" +QONNX_COMMIT="8755423377e9c01dd2d2358c320484399b5d6625" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From ba0d58f6cbe671adcee74c1bc83d775d9f201e9a Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 14:47:50 +0100 Subject: [PATCH 544/628] remove additional spacing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/test_bnn_hw_pytest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 1d1e22ed2c..961efd1cc1 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -94,7 +94,7 @@ def test_type_execute(self, test_dir, batch_size, platform): bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 - + # Load the output and reference arrays output_array = np.load(output_execute_results_file) reference_array = np.load(execute_results_reference_file) @@ -159,8 +159,8 @@ def test_type_throughput(self, test_dir, batch_size, platform): ) ret_str += "\n" + "-----------------------------" largest_bsize = bsize_range[-1] - + # Dump the metrics to a text file with open(throughput_results_formatted_file, "w") as f: f.write(ret_str) - assert os.path.exists(throughput_results_formatted_file) \ No newline at end of file + assert os.path.exists(throughput_results_formatted_file) From 111c873027cf7eb918b8da93cc8e41c350fab0b6 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:06:41 +0100 Subject: [PATCH 545/628] No need for buildDiscarder function in the pipeline itself Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 12725594df..f73fd78baa 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,7 +1,4 @@ pipeline { - options { - buildDiscarder(logRotator(numToKeepStr: '30', artifactNumToKeepStr: '30')) - } agent { node { label 'finn-build' } } environment { FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" From 746315c3c533df717b7def757aee0e186d7f9562 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:08:08 +0100 Subject: [PATCH 546/628] Env variables are controlled by external CI system and can be removed from the pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f73fd78baa..9d9d6ebabb 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,12 +1,5 @@ pipeline { agent { node { label 'finn-build' } } - environment { - FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" - FINN_XILINX_VERSION="2022.1" - FINN_DOCKER_TAG="xilinx/finn:jenkins" - FINN_HOST_BUILD_DIR="/scratch/users/finn_ci" - PLATFORM_REPO_PATHS="/opt/xilinx/platforms" - } stages { stage('Quicktest') { steps { From 91a5437fdbc06bbd9fc63c3a6fddda04e3b6f865 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:08:54 +0100 Subject: [PATCH 547/628] Specific agent not required when setting up pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 9d9d6ebabb..6f01b06e55 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,5 +1,5 @@ pipeline { - agent { node { label 'finn-build' } } + agent none stages { stage('Quicktest') { steps { From 8b7d7812292f98e23083c866c0f2352bdda6b153 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:10:41 +0100 Subject: [PATCH 548/628] Add boolean build parameters in order to select tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 6f01b06e55..9100e3ed0d 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,5 +1,9 @@ pipeline { agent none + parameters { + booleanParam(name: 'fpgadataflow', defaultValue: true, description: 'Run fpgadataflow tests') + booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') + } stages { stage('Quicktest') { steps { From 88462e1f8d3138b109d10ede04a1fc5acec96095 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:22:08 +0100 Subject: [PATCH 549/628] Add sanity suite unit and fpgadataflow tests The fpgadataflow tests were placed in their own stage with their own build parameter as the test takes longer than a day to run. This means that this suite cannot sensibly be used in daily CI test runs. Some notes on the stages and their setup: - the when{} block is used as an 'if' statement, checking if a certain input parameter to the pipeline has been set. By default - the fpgadataflow stage will not run unless explicitly set to true by the tester/CI system - FINN_HOST_BUILD_DIR is set to a unique directory per stage for ease of use/test cleanup - catchError is used in order to allow the pipeline to continue to possible future stages if a stage along the way fails. Otherwise the first failed stage found would end the test run Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 79 +++++++++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 9100e3ed0d..eb94885362 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,20 +1,79 @@ pipeline { agent none parameters { - booleanParam(name: 'fpgadataflow', defaultValue: true, description: 'Run fpgadataflow tests') + booleanParam(name: 'fpgadataflow', defaultValue: false, description: 'Run fpgadataflow tests') booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') } stages { - stage('Quicktest') { - steps { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh './run-docker.sh quicktest' + stage('Sanity Tests') { + parallel { + stage('Sanity - Unit Tests') { + when { + expression { params['sanity'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "sanity_ut" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Multiple markers with pytest needs its own script + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") + sh './run-docker.sh ./run-tests.sh' + } + } + } + } + stage('Sanity - fpgadataflow Tests') { + when { + expression { params['fpgadataflow'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "fpgadataflow" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + } + } + } + } } } } } + +void cleanPreviousBuildFiles(String buildDir) { + // Delete any build files from a previous build + // Previous build folders affect findCopyZip() and can cause the stage to fail + sh "rm -rf ${buildDir}/*" +} + +void createMultiMarkerScript(String markers, String testResultsFilename) { + // Passing multiple markers when running ./run-docker.sh does not work with bash. + // Therefore, create a script to maintain the single quotes that surround the markers + sh """echo "#!/bin/bash +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tests.sh + """ + + // Give permissions to script + sh 'chmod 777 run-tests.sh' +} + +void runDockerPytestWithMarker(String marker, String testResultsFilename) { + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" +} From 80029f1b0a603e76e855d96c21009d0ec6ad886c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:30:50 +0100 Subject: [PATCH 550/628] Add sanity bitstream build tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index eb94885362..1b1f4fc92e 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -7,6 +7,30 @@ pipeline { stages { stage('Sanity Tests') { parallel { + stage('Sanity - Build Hardware') { + when { + expression { return params['sanity'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "bnn_build_sanity" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + } + } + } + } stage('Sanity - Unit Tests') { when { expression { params['sanity'] } From 3900428317634ee06f5fee549e46047057ecab78 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:35:31 +0100 Subject: [PATCH 551/628] Collect all files needed for HW testing, adding a stage to collect test scripts Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 39 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1b1f4fc92e..06a1910b16 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,6 +27,12 @@ pipeline { // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + + // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves + findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") + findCopyZip("ZCU104", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_ZCU104_zip") + findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") + findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") } } } @@ -78,6 +84,22 @@ pipeline { } } } + stage('Sanity - Setup Hardware Tests') { + when { + expression { return params['sanity'] } + } + agent { + label 'finn-build' + } + steps { + script { + // Stash the HW test scripts to be used on slave nodes + dir('docker/jenkins') { + stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' + } + } + } + } } } @@ -101,3 +123,20 @@ python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tes void runDockerPytestWithMarker(String marker, String testResultsFilename) { sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" } + +void findBoardBuildFiles(String board, String searchDir, String dirToFind) { + def result = sh(script: "find $searchDir -type d -name \"$dirToFind*\"", returnStdout: true).trim() + if (result.empty) { + error "Directory containing '$dirToFind' not found." + } + return result +} + +void findCopyZip(String board, String findDir, String copyDir, String stashName) { + def buildDir = findBoardBuildFiles(board, findDir, "hw_deployment_${board}") + sh "cp -r ${buildDir}/${board} ${copyDir}/" + dir(copyDir) { + sh "zip -r ${board}.zip ${board}/" + stash name: stashName, includes: "${board}.zip" + } +} From 31ef8d616047bc601f278821c92c0b920b58cebc Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:42:26 +0100 Subject: [PATCH 552/628] Add hw testing stages - only run if build stage was successful Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 175 +++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 06a1910b16..2b2a5786c6 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -33,6 +33,8 @@ pipeline { findCopyZip("ZCU104", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_ZCU104_zip") findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") + + env.BNN_BUILD_SANITY = "SUCCESS" } } } @@ -100,6 +102,159 @@ pipeline { } } } + stage('Sanity - Run Hardware Tests') { + parallel { + stage('BNN Sanity - U250') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (&& params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-u250' + } + environment { + BOARD = 'U250' + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script + sh './run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - Pynq-Z1') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-pynq' + } + environment { + BOARD = 'Pynq-Z1' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_PynqZ1_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + // The marker here omits the '-Z1' as '-' is a special character + // that will not work with Pytest + createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - ZCU104') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-zcu104' + } + environment { + BOARD = 'ZCU104' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - KV260_SOM') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-kv260' + } + environment { + BOARD = 'KV260_SOM' + USER_CREDENTIALS = credentials('user-ubuntu-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + } + } } } @@ -140,3 +295,23 @@ void findCopyZip(String board, String findDir, String copyDir, String stashName) stash name: stashName, includes: "${board}.zip" } } + +void createTestScript(String board, String marker, String testResultsFilename) { + if(board == "U250") + sh """echo "#!/bin/bash +. /opt/xilinx/xrt/setup.sh +. ${CONDA_ENV_ACTIVATE} +python hack_driver_script.py +python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh + """ + else + sh """echo "#!/bin/bash +. /etc/profile.d/pynq_venv.sh +. /etc/profile.d/xrt_setup.sh +python hack_driver_script.py +python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh + """ + + // Give permissions to script + sh 'chmod 777 run-tests.sh' +} From 674ef2669feedfa68bd84cd822d7a714971446b3 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:45:36 +0100 Subject: [PATCH 553/628] Only run HW tests if board is online first, fail the pipeline if board is offline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 72 +++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2b2a5786c6..60c9e47370 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -95,6 +95,12 @@ pipeline { } steps { script { + // Check which boards are online before running HW tests + env.ALVEO_HOST_ONLINE = isNodeOnline('finn-u250') + env.PYNQ_ONLINE = isNodeOnline('finn-pynq') + env.ZCU104_ONLINE = isNodeOnline('finn-zcu104') + env.KV260_ONLINE = isNodeOnline('finn-kv260') + // Stash the HW test scripts to be used on slave nodes dir('docker/jenkins') { stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' @@ -108,7 +114,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (&& params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.ALVEO_HOST_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-u250' @@ -144,7 +150,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.PYNQ_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-pynq' @@ -183,7 +189,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.ZCU104_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-zcu104' @@ -220,7 +226,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.KV260_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-kv260' @@ -255,6 +261,18 @@ pipeline { } } } + stage('Check Stage Results') { + agent { + label 'finn-build' + } + steps { + catchError(buildResult: 'SUCCESS') { + script { + checkAllBoards() + } + } + } + } } } @@ -315,3 +333,49 @@ python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh // Give permissions to script sh 'chmod 777 run-tests.sh' } + +void isNodeOnline(String labelName) { + Label label = Jenkins.instance.getLabel(labelName) + def agentOnline = false + + if (label) { + List nodes = Jenkins.instance.getNodes() + + nodes.each { node -> + if (node.getAssignedLabels().contains(label)) { + def computer = node.toComputer() + if (computer && computer.isOnline()) { + agentOnline = true + } else { + echo """Agent ${node.displayName} is offline""" + } + } + } + } else { + echo """Node with label ${labelName} not found""" + } + + return agentOnline +} + +def checkAllBoards() { + def overallResult = true + + if (env.PYNQ_ONLINE == 'false') { + overallResult = false + } + + if (env.ALVEO_HOST_ONLINE == 'false') { + overallResult = false + } + + if (env.KV260_ONLINE == 'false') { + overallResult = false + } + + if (env.ZCU104_ONLINE == 'false') { + overallResult = false + } + + return overallResult +} From 507a97bdca4f3b2f202295e4bb9225e57cec7ea1 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:58:24 +0100 Subject: [PATCH 554/628] Collect test result files in final stage and plot with JUnit plugin - only if that test stage ran successfully Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 86 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 60c9e47370..6402fcde6c 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -34,6 +34,10 @@ pipeline { findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") + // Stash the test results file(s) + stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_SANITY = "SUCCESS" } } @@ -58,6 +62,12 @@ pipeline { // Multiple markers with pytest needs its own script createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") sh './run-docker.sh ./run-tests.sh' + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.SANITY_UT = "SUCCESS" } } } @@ -80,6 +90,12 @@ pipeline { // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.FPGADATAFLOW = "SUCCESS" } } } @@ -139,12 +155,23 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_U250 = "SUCCESS" + // Execute the script sh './run-tests.sh' } } } } + post { + always { + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - Pynq-Z1') { when { @@ -178,12 +205,24 @@ pipeline { // that will not work with Pytest createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_PYNQZ1 = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - ZCU104') { when { @@ -215,12 +254,24 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_ZCU104 = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - KV260_SOM') { when { @@ -252,12 +303,24 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_KV260_SOM = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } } } @@ -272,6 +335,23 @@ pipeline { } } } + post { + always { + script { + // Only unstash for stages that ran + unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") + unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") + unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") + unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") + unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") + unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + + // Plot what XML files were created during the test run + junit '**/*.xml' + } + } + } } } } @@ -379,3 +459,9 @@ def checkAllBoards() { return overallResult } + +void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { + if (stageEnvVariableSet) { + unstash stashName + } +} From 06a6b3d5c58f97fbfcbb9e93744807c9cedeabf9 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:59:55 +0100 Subject: [PATCH 555/628] Add post success/failure stage messages Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 6402fcde6c..e757cb7710 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -171,6 +171,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - Pynq-Z1') { @@ -222,6 +228,12 @@ pipeline { stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - ZCU104') { @@ -271,6 +283,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - KV260_SOM') { @@ -320,6 +338,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } } @@ -465,3 +489,11 @@ void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { unstash stashName } } + +void postFailure(String board) { + echo "Failed to run ${board} tests" +} + +void postSuccess(String board) { + echo "${board} tests passed" +} From be6ed941c76370b20b54e44db4e717920ba9ae0c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:20:01 +0100 Subject: [PATCH 556/628] Add file archiving - for XML test result files Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index e757cb7710..2f7eab1190 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -373,6 +373,9 @@ pipeline { // Plot what XML files were created during the test run junit '**/*.xml' + + // Archive the XML test results + archiveArtifacts artifacts: "*.xml" } } } From d31ffcaef305d7d099f227cedb3d64061acfaa9d Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:28:55 +0100 Subject: [PATCH 557/628] Add end2end build tests - collecting results as well Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 159 +++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2f7eab1190..c15e686d16 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -3,6 +3,7 @@ pipeline { parameters { booleanParam(name: 'fpgadataflow', defaultValue: false, description: 'Run fpgadataflow tests') booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') + booleanParam(name: 'end2end', defaultValue: false, description: 'Run end2end tests') } stages { stage('Sanity Tests') { @@ -102,6 +103,159 @@ pipeline { } } } + stage('End2end - Build Hardware') { + parallel { + stage('End2end') { + when { + expression { params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "end2end" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Delete any build files from a previous build + sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}.xml") + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.END2END = "SUCCESS" + } + } + } + } + stage('BNN end2end - U250') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "U250" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_U250 = "SUCCESS" + } + } + } + stage('BNN end2end - Pynq-Z1') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "Pynq-Z1" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_PYNQZ1 = "SUCCESS" + } + } + } + stage('BNN end2end - ZCU104') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "ZCU104" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_ZCU104 = "SUCCESS" + } + } + } + stage('BNN end2end - KV260_SOM') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "KV260_SOM" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_KV260_SOM = "SUCCESS" + } + } + } + } + } stage('Sanity - Setup Hardware Tests') { when { expression { return params['sanity'] } @@ -370,6 +524,11 @@ pipeline { unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") + unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") + unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") + unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") // Plot what XML files were created during the test run junit '**/*.xml' From df81b048314d5d2e5ad6db4e9b580edcdf6bb34f Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:40:21 +0100 Subject: [PATCH 558/628] Add end2end hardware tests - collecting results as well Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 230 ++++++++++++++++++++++++++++++++++++- 1 file changed, 229 insertions(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index c15e686d16..a117625230 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -256,7 +256,7 @@ pipeline { } } } - stage('Sanity - Setup Hardware Tests') { + stage('Sanity & BNN end2end - Setup Hardware Tests') { when { expression { return params['sanity'] } } @@ -502,6 +502,230 @@ pipeline { } } } + stage('End2end - Run Hardware Tests') { + parallel { + stage('BNN end2end - U250') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.ALVEO_HOST_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_U250 == 'SUCCESS') } + } + agent { + label 'finn-u250' + } + environment { + BOARD = 'U250' + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_U250 = "SUCCESS" + + // Execute the script + sh './run-tests.sh' + } + } + } + } + post { + always { + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - Pynq-Z1') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.PYNQ_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_PYNQZ1 == 'SUCCESS') } + } + agent { + label 'finn-pynq' + } + environment { + BOARD = 'Pynq-Z1' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "PynqZ1_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + // The marker here omits the '-Z1' as '-' is a special character + // that will not work with Pytest + createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_PYNQZ1 = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - ZCU104') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.ZCU104_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_ZCU104 == 'SUCCESS') } + } + agent { + label 'finn-zcu104' + } + environment { + BOARD = 'ZCU104' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_ZCU104 = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - KV260_SOM') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.KV260_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_KV260_SOM == 'SUCCESS') } + } + agent { + label 'finn-kv260' + } + environment { + BOARD = 'KV260_SOM' + USER_CREDENTIALS = credentials('user-ubuntu-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_KV260_SOM = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + } + } stage('Check Stage Results') { agent { label 'finn-build' @@ -529,6 +753,10 @@ pipeline { unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") + unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") + unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") + unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") + unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") // Plot what XML files were created during the test run junit '**/*.xml' From feb4b277c679c96e1528e8753e85431a336881cb Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:42:33 +0100 Subject: [PATCH 559/628] Add catchError for end2end bnn build stages to allow pipeline to continue on error Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 96 +++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index a117625230..1fc80a6feb 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -147,20 +147,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_U250 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_U250 = "SUCCESS" + } } } } @@ -177,20 +179,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_PYNQZ1 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_PYNQZ1 = "SUCCESS" + } } } } @@ -207,20 +211,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_ZCU104 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_ZCU104 = "SUCCESS" + } } } } @@ -237,20 +243,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_KV260_SOM = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_KV260_SOM = "SUCCESS" + } } } } From 7e258a84e79980484156f29701a768d835597524 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:19:35 +0100 Subject: [PATCH 560/628] Add pytest-html library and add to all tests in Jenkinsfile. Archive the results as well Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 76 +++++++++++++++++++------------------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index d69ccc9725..69425df1ee 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -102,6 +102,7 @@ RUN pip install pandas==1.5.3 RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading +RUN pip install pytest-html==3.2.0 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1fc80a6feb..d8869eeb5b 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,7 +27,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}") // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") @@ -36,7 +36,7 @@ pipeline { findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml" + stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_SANITY = "SUCCESS" @@ -61,11 +61,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Multiple markers with pytest needs its own script - createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}") sh './run-docker.sh ./run-tests.sh' // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.SANITY_UT = "SUCCESS" @@ -90,10 +90,10 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}") // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.FPGADATAFLOW = "SUCCESS" @@ -123,10 +123,10 @@ pipeline { sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}.xml") + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}") // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.END2END = "SUCCESS" @@ -154,11 +154,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_U250 = "SUCCESS" @@ -186,11 +186,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_PYNQZ1 = "SUCCESS" @@ -218,11 +218,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_ZCU104 = "SUCCESS" @@ -250,11 +250,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_KV260_SOM = "SUCCESS" @@ -315,7 +315,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_U250 = "SUCCESS" @@ -330,7 +330,7 @@ pipeline { always { dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -371,7 +371,7 @@ pipeline { // Create test script // The marker here omits the '-Z1' as '-' is a special character // that will not work with Pytest - createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_PYNQZ1 = "SUCCESS" @@ -387,7 +387,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -426,7 +426,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_ZCU104 = "SUCCESS" @@ -442,7 +442,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -481,7 +481,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_KV260_SOM = "SUCCESS" @@ -497,7 +497,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -539,7 +539,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_U250 = "SUCCESS" @@ -554,7 +554,7 @@ pipeline { always { dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -595,7 +595,7 @@ pipeline { // Create test script // The marker here omits the '-Z1' as '-' is a special character // that will not work with Pytest - createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_PYNQZ1 = "SUCCESS" @@ -611,7 +611,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -650,7 +650,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_ZCU104 = "SUCCESS" @@ -666,7 +666,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -705,7 +705,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_KV260_SOM = "SUCCESS" @@ -721,7 +721,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -769,8 +769,8 @@ pipeline { // Plot what XML files were created during the test run junit '**/*.xml' - // Archive the XML test results - archiveArtifacts artifacts: "*.xml" + // Archive the XML & HTML test results + archiveArtifacts artifacts: "*.xml *.html" } } } @@ -788,7 +788,7 @@ void createMultiMarkerScript(String markers, String testResultsFilename) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash -python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ // Give permissions to script @@ -796,7 +796,7 @@ python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tes } void runDockerPytestWithMarker(String marker, String testResultsFilename) { - sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html""" } void findBoardBuildFiles(String board, String searchDir, String dirToFind) { @@ -822,14 +822,14 @@ void createTestScript(String board, String marker, String testResultsFilename) { . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} python hack_driver_script.py -python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh python hack_driver_script.py -python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ // Give permissions to script From 6b5e7680781fde8fbbcd7a529cbf1ca6c52f1b58 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:39:59 +0100 Subject: [PATCH 561/628] Add pytest-html-merger library to combine individual HTML files created in the jenkins pipeline Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 50 ++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 69425df1ee..91a22952ff 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -103,6 +103,7 @@ RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading RUN pip install pytest-html==3.2.0 +RUN pip install pytest-html-merger==0.0.8 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index d8869eeb5b..f782569643 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -748,29 +748,37 @@ pipeline { post { always { script { - // Only unstash for stages that ran - unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") - unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") - unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") - unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") - unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") - unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") - unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") - unstashSuccessfulStage(env.END2END, "end2end") - unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") - unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") - unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") - unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") - unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") - unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") - unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") - unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") - - // Plot what XML files were created during the test run - junit '**/*.xml' + sh 'mkdir -p reports' + cleanPreviousBuildFiles('reports') + dir('reports') { + // Only unstash for stages that ran + unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") + unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") + unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") + unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") + unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") + unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") + unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") + unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") + unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") + unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") + unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") + unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") + unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") + } + + // Combine individual HTML files to one single report + sh './run-docker.sh pytest_html_merger -i reports/ -o reports/test_report_final.html' // Archive the XML & HTML test results - archiveArtifacts artifacts: "*.xml *.html" + archiveArtifacts artifacts: "reports/*.xml" + archiveArtifacts artifacts: "reports/*.html" + + // Plot what XML files were created during the test run + junit 'reports/*.xml' } } } From fb9218e15b8ad0b8bacf4af610c5df1fb50e52c0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:56:08 +0100 Subject: [PATCH 562/628] Add code coverage for sanity unit tests and for fpgadataflow tests. Archive the results Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 31 ++++++++++++++++++++----------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 91a22952ff..e11e8136fd 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -104,6 +104,7 @@ RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading RUN pip install pytest-html==3.2.0 RUN pip install pytest-html-merger==0.0.8 +RUN pip install pytest-cov==4.1.0 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f782569643..b7998ae5b9 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,7 +27,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}") + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}", '') // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") @@ -61,7 +61,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Multiple markers with pytest needs its own script - createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}") + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}", "--cov --cov-report=html:coverage_sanity_ut") sh './run-docker.sh ./run-tests.sh' // Stash the test results file(s) @@ -90,7 +90,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}") + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}", "--cov --cov-report=html:coverage_fpgadataflow") // Stash the test results file(s) stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" @@ -123,7 +123,7 @@ pipeline { sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}") + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}", '') // Stash the test results file(s) stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" @@ -154,7 +154,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -186,7 +186,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") // Stash the test results file(s) @@ -218,7 +218,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -250,7 +250,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -777,6 +777,9 @@ pipeline { archiveArtifacts artifacts: "reports/*.xml" archiveArtifacts artifacts: "reports/*.html" + archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") + archiveSuccessfulStage(env.FPGADATAFLOW, "coverage_fpgadataflow") + // Plot what XML files were created during the test run junit 'reports/*.xml' } @@ -796,15 +799,15 @@ void createMultiMarkerScript(String markers, String testResultsFilename) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash -python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}" >> run-tests.sh """ // Give permissions to script sh 'chmod 777 run-tests.sh' } -void runDockerPytestWithMarker(String marker, String testResultsFilename) { - sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html""" +void runDockerPytestWithMarker(String marker, String testResultsFilename, String additionalOptions) { + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}""" } void findBoardBuildFiles(String board, String searchDir, String dirToFind) { @@ -896,6 +899,12 @@ void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { } } +void archiveSuccessfulStage(String stageEnvVariableSet, String folder) { + if (stageEnvVariableSet) { + archiveArtifacts artifacts: "${folder}/**/*" + } +} + void postFailure(String board) { echo "Failed to run ${board} tests" } From c28e8f026d64b871dc4cedf349b4f990b5ddc4df Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 25 Jul 2023 13:09:15 +0100 Subject: [PATCH 563/628] Forgot to add additionalOptions as a function input Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index b7998ae5b9..98baad74ec 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -795,7 +795,7 @@ void cleanPreviousBuildFiles(String buildDir) { sh "rm -rf ${buildDir}/*" } -void createMultiMarkerScript(String markers, String testResultsFilename) { +void createMultiMarkerScript(String markers, String testResultsFilename, String additionalOptions) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash From 0a2b850da0b957db687615565de8f44f98ef4718 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 25 Jul 2023 13:18:09 +0100 Subject: [PATCH 564/628] Remove postFailure() and postSuccess() functions. This is an attempt to reduce the method count used in the pipeline as the current size causes the "groovyjarjarasm.asm.MethodTooLargeException: Method too large" error. As a result the pipeline does not run at all. This is a well known limitation. Removing unneccessary functions shrinks the method count and allows the Jenkinsfile to run. Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 56 -------------------------------------- 1 file changed, 56 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 98baad74ec..1ab8e81f46 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -333,12 +333,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - Pynq-Z1') { @@ -390,12 +384,6 @@ pipeline { stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - ZCU104') { @@ -445,12 +433,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - KV260_SOM') { @@ -500,12 +482,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } } @@ -557,12 +533,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - Pynq-Z1') { @@ -614,12 +584,6 @@ pipeline { stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - ZCU104') { @@ -669,12 +633,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - KV260_SOM') { @@ -724,12 +682,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } } @@ -904,11 +856,3 @@ void archiveSuccessfulStage(String stageEnvVariableSet, String folder) { archiveArtifacts artifacts: "${folder}/**/*" } } - -void postFailure(String board) { - echo "Failed to run ${board} tests" -} - -void postSuccess(String board) { - echo "${board} tests passed" -} From 61cba651c155c258fe5a529a2be5d2b3fdf2d3d0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 26 Jul 2023 15:46:15 +0100 Subject: [PATCH 565/628] Remove driver hack from BNN testing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 4 +-- docker/jenkins/hack_driver_script.py | 49 ---------------------------- 2 files changed, 1 insertion(+), 52 deletions(-) delete mode 100755 docker/jenkins/hack_driver_script.py diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2f7eab1190..c19cb97dec 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { // Stash the HW test scripts to be used on slave nodes dir('docker/jenkins') { - stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' + stash name: 'bnn_test_files', includes: 'test_bnn_hw_pytest.py' } } } @@ -426,14 +426,12 @@ void createTestScript(String board, String marker, String testResultsFilename) { sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh """ diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py deleted file mode 100755 index 568c62150d..0000000000 --- a/docker/jenkins/hack_driver_script.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -def remove_cache_dirs(dir_list): - tmp_list = list(dir_list) - for i in range(len(tmp_list)-1, -1, -1): - if ".pytest_cache" in tmp_list[i]: - del tmp_list[i] - elif "__pycache__" in tmp_list[i]: - del tmp_list[i] - return tmp_list - -def hack_driver_script(board, test_dir): - test_script_file = "driver.py" - # Read the contents of the test script file - with open(test_script_file, "r") as f: - lines = f.readlines() - - # Specify the line to be replaced and the new line - line_to_replace = "ishape_normal" - if "cnv" in test_dir: - new_line = " \"ishape_normal\" : [(1, 3, 32, 32)]," - else: - new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," - - # Iterate over the lines and replace the specified line - for i in range(len(lines)): - if line_to_replace in lines[i]: - lines[i] = new_line + "\n" - break # Only replace the first occurrence - - # Write the modified contents back to the test script file - with open(test_script_file, "w") as f: - f.writelines(lines) - -if __name__ == "__main__": - current_dir = os.getcwd() - board = os.path.basename(current_dir) - - # Get list of local directories - removing the Python cache directories - local_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] - local_dirs = remove_cache_dirs(local_dirs) - - # Now create the full paths for each relative path - local_dirs_full_path = [os.path.join(current_dir, name) for name in local_dirs if os.path.isdir(os.path.join(current_dir, name))] - - # Change the driver.py script for each of the test directories - for dir in local_dirs_full_path: - os.chdir(dir) - hack_driver_script(board, dir) From 10d34b5fea8904f949c4ddab98cd5c0a1321fa10 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 26 Jul 2023 15:49:32 +0100 Subject: [PATCH 566/628] Add input tensor data reshaping and transposing for BNN networks Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 6b288bd382..59fbb0c1cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -321,7 +321,22 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) - np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy) + + # Some changes are required in order to prepare the input tensor data for hardware + # testing. The ONNX graphs for these models contain nodes that manipulate the input + # tensor shape which FINN considers when creating the model. The same input tensor + # shaping needs to be done here on the input data. + # For the convolutional models, the graph contains the Transpose node. The Brevitas + # model works in NCHW layout but the FINN kernels are optimized for NHWC. + # The FC models contain a Reshape node, which FINN uses, so we therefore have to + # reshape the input tensor data to match the reshaping in the model + if topology == "cnv": + input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) + else: + input_shape = input_tensor_npy.shape + input_tensor_npy = (input_shape[0], np.prod(input_shape[1:])) + + np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy.copy()) np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) # driver.py and python libraries From 363a0874b6ab42e746f7cb89d36fcebf485a4a03 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 26 Jul 2023 18:22:16 +0100 Subject: [PATCH 567/628] [Deps] Update omx version --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 49d8621bb9..9e3ee3ef99 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -33,7 +33,7 @@ BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" CNPY_COMMIT="4e8810b1a8637695171ed346ce68f6984e585ef4" HLSLIB_COMMIT="c17aa478ae574971d115afa9fa4d9c215857d1ac" -OMX_COMMIT="d1065a788219ca0eb54d5e57600b1f9d7f67d4cc" +OMX_COMMIT="0b59762f9e4c4f7e5aa535ee9bc29f292434ca7a" AVNET_BDF_COMMIT="2d49cfc25766f07792c0b314489f21fe916b639b" XIL_BDF_COMMIT="8cf4bb674a919ac34e3d99d8d71a9e60af93d14e" KV260_BDF_COMMIT="98e0d3efc901f0b974006bc4370c2a7ad8856c79" From fe0915258bc9278a7d83ddbe27fc811ce604ae67 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 28 Jul 2023 15:10:26 +0100 Subject: [PATCH 568/628] Add markers for BNN test suites to quiesce warnings when running pytest Signed-off-by: Fionn O'Donohoe --- setup.cfg | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setup.cfg b/setup.cfg index fb070a436e..a70eaeb2f3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -130,6 +130,11 @@ markers = fpgadataflow: mark tests related to hls layers end2end: mark tests that run the end2end flow notebooks: mark tests that execute all Jupyter notebooks + sanity_bnn: mark tests that execute the sanity BNN test + bnn_u250: mark tests that execute U250 BNN tests + bnn_kv260: mark tests that execute KV260 BNN tests + bnn_pynq: mark tests that execute Pynq-Z1 BNN tests + bnn_zcu104: mark tests that execute ZCU104 BNN tests norecursedirs = dist build From d7370db4fcde4cfb41af7a34e2494a7482fee6af Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 28 Jul 2023 15:49:12 +0100 Subject: [PATCH 569/628] Add pytest library version and associates plugins for HTML report capturing Newer version of pytest caused an issue when gathering HTML reports: ModuleNotFoundError: No module named 'py.xml'; 'py' is not a package Apparently this is not a pytest bug but due to a related plugin and is caused by depending on the py package but not declaring it as a dependency. The exact versions of the libraries specified in this commit allow for HTML report gathering. This was tested in docker and on hardware in 2 virtual environments: virtual_env and conda (zynq and alveo environments respectively) Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index e11e8136fd..06dc109808 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -102,7 +102,11 @@ RUN pip install pandas==1.5.3 RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading -RUN pip install pytest-html==3.2.0 +# these versions of pytest and associated plugins allow for stable collection of +# test reports and code coverage reports in HTML +RUN pip install pytest==6.2.5 +RUN pip install pytest-metadata==1.7.0 +RUN pip install pytest-html==3.0.0 RUN pip install pytest-html-merger==0.0.8 RUN pip install pytest-cov==4.1.0 From 5615d8d3b89f1b11d90cc3225a2703d7e2f3e8e9 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:25:17 +0100 Subject: [PATCH 570/628] [custom op]: set output datatype MVAU given no activation function --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 204a41e21c..b125745708 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -664,6 +664,8 @@ def minimize_accumulator_width(self, model): # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) + if self.get_nodeattr("noActivation"): + self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 153c2d4e8f15bfab81d6dca4261fee72739419b8 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:25:43 +0100 Subject: [PATCH 571/628] [custom op]: update tensor datatype for consistency --- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 3bcc5c05cf..72ee2f7af6 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -211,6 +211,8 @@ def minimize_accumulator_width(self, model): threshold_tensor ).all(), "Thresholds can't be expressed with type %s" % str(tdt) self.set_nodeattr("weightDataType", tdt.name) + # Update QONNX DataType of tensor for consistency + model.set_tensor_datatype(self.onnx_node.input[1], tdt) return DataType[self.get_nodeattr("weightDataType")] def get_instream_width(self, ind=0): From f367a5aa3f2fc1bafe17ae5982057830964dffc0 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:26:32 +0100 Subject: [PATCH 572/628] [minimize acc width]: apply InferDataTypes to propagate changes in each loop iteration --- .../fpgadataflow/minimize_accumulator_width.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py b/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py index bc020ca428..8d04d5b817 100644 --- a/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py +++ b/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py @@ -28,6 +28,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.base import Transformation +from qonnx.transformation.infer_datatypes import InferDataTypes from finn.util.fpgadataflow import is_fpgadataflow_node @@ -41,9 +42,15 @@ def __init__(self): super().__init__() def apply(self, model): - for node in model.graph.node: + for node_id in range(len(model.graph.node)): + # Since InferDataTypes potentially changes node attributes in each loop iterations, + # the for-loop cannot loop over a list of a snapshot of the graph's node protos + node = model.graph.node[node_id] if is_fpgadataflow_node(node) is True: inst = getCustomOp(node) if hasattr(inst, "minimize_accumulator_width"): inst.minimize_accumulator_width(model) + # Since this transformation is applied iteratively, we have to ensure that + # we propagate the new datatype to other layers + model = model.transform(InferDataTypes()) return (model, False) From 763fa48bbef716c1ff15cdb2423c073d2aa52aef Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 17:57:57 +0100 Subject: [PATCH 573/628] [custom op]: set outputDataType in case of no activation --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index f817751852..9a9c6714fe 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -190,6 +190,8 @@ def minimize_accumulator_width(self, model): adt = DataType[new_adt_name] # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) + if self.get_nodeattr("noActivation"): + self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From 04fd18e3ebe2e3240474f5208258e2fd8ea48dc8 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 1 Aug 2023 10:50:58 +0100 Subject: [PATCH 574/628] [CustomOp] Remove outdated stream depth pragma from decoupled mode --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 3 --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 204a41e21c..7c180534b1 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -1274,9 +1274,6 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=weights_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=8 variable=weights_" + self.hls_sname() - ) else: raise Exception( diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index f817751852..58a85b29ee 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -979,9 +979,6 @@ def pragmas(self): self.code_gen_dict["$PRAGMAS$"].append( "#pragma HLS INTERFACE axis port=weights_" + self.hls_sname() ) - self.code_gen_dict["$PRAGMAS$"].append( - "#pragma HLS stream depth=8 variable=weights_" + self.hls_sname() - ) else: raise Exception( """Please set mem_mode to "const", "decoupled", or external, From 2c929b959ab1e5696a4d982bf6f35a40d72a61eb Mon Sep 17 00:00:00 2001 From: Felix Jentzsch Date: Tue, 1 Aug 2023 19:33:49 +0200 Subject: [PATCH 575/628] Fix verilator_fifosim for RTL SWG component --- src/finn/util/pyverilator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 86cf2eed14..73c8755bfb 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -188,7 +188,8 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): xpm_memory = f"{vivado_path}/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" xpm_cdc = f"{vivado_path}/data/ip/xpm/xpm_cdc/hdl/xpm_cdc.sv" xpm_fifo = f"{vivado_path}/data/ip/xpm/xpm_fifo/hdl/xpm_fifo.sv" - verilog_file_arg = ["finn_design_wrapper.v", xpm_memory, xpm_cdc, xpm_fifo] + swg_pkg = os.environ["FINN_ROOT"] + "/finn-rtllib/swg/swg_pkg.sv" + verilog_file_arg = [swg_pkg, "finn_design_wrapper.v", xpm_memory, xpm_cdc, xpm_fifo] verilator_args = [ "perl", From d2c682759de2c874116946000fdf207a2cdab5c9 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 2 Aug 2023 16:06:54 +0100 Subject: [PATCH 576/628] [Util] Update Alveo platforms --- src/finn/util/basic.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 3bc5b803db..05f748d3bb 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -62,10 +62,10 @@ alveo_part_map["U280"] = "xcu280-fsvh2892-2L-e" alveo_default_platform = dict() -alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_201920_3" -alveo_default_platform["U200"] = "xilinx_u200_xdma_201830_2" -alveo_default_platform["U250"] = "xilinx_u250_gen3x16_xdma_2_1_202010_1" -alveo_default_platform["U280"] = "xilinx_u280_xdma_201920_3" +alveo_default_platform["U50"] = "xilinx_u50_gen3x16_xdma_5_202210_1" +alveo_default_platform["U200"] = "xilinx_u200_gen3x16_xdma_2_202110_1" +alveo_default_platform["U250"] = "xilinx_u250_gen3x16_xdma_4_1_202210_1" +alveo_default_platform["U280"] = "xilinx_u280_gen3x16_xdma_1_202211_1" def get_rtlsim_trace_depth(): From 121e893f73edabc370a102f3227b322db918e253 Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 9 May 2023 14:11:09 -0700 Subject: [PATCH 577/628] [MVAU] Handling minimize acc bw for no-activation nodes --- .../fpgadataflow/matrixvectoractivation.py | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 7c180534b1..2f99ddca77 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -655,14 +655,28 @@ def minimize_accumulator_width(self, model): adt = DataType.get_smallest_possible(-acc_max - 1) else: adt = DataType.get_smallest_possible(acc_max) - # if this is the last node in the graph, then ensure the datatype is - # divisibly by 8 bits + # if this is the last node in the graph, then ensure the datatype of the + # output is divisible by 8 if model.find_direct_successors(self.onnx_node) is None: - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + if self.get_nodeattr("noActivation"): + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) + else: + odt = DataType[self.get_nodeattr("outputDataType")] + bw = roundup_to_integer_multiple(odt.bitwidth(), 8) + new_odt_name = odt.name.replace(str(odt.bitwidth()), str(bw)) + if bw != odt.bitwidth(): + warn_str = "outputDataType changing for %s: %s -> %s " % ( + self.onnx_node.name, + odt.name, + new_odt_name, + ) + warnings.warn(warn_str) + odt = DataType[new_odt_name] + self.set_nodeattr("outputDataType", odt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From 93d9cdb3b38d706222cc122d17638b9a9828a24e Mon Sep 17 00:00:00 2001 From: icolbert Date: Tue, 9 May 2023 14:11:35 -0700 Subject: [PATCH 578/628] [VVAU] Handling minimize acc bw for no-activation nodes --- .../fpgadataflow/vectorvectoractivation.py | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 58a85b29ee..773c49915f 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -182,14 +182,28 @@ def minimize_accumulator_width(self, model): adt = DataType.get_smallest_possible(-acc_max - 1) else: adt = DataType.get_smallest_possible(acc_max) - # if this is the last node in the graph, then ensure the datatype is - # divisibly by 8 bits + # if this is the last node in the graph, then ensure the datatype of the + # output is divisible by 8 if model.find_direct_successors(self.onnx_node) is None: - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) + if self.get_nodeattr("noActivation"): + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt + self.set_nodeattr("outputDataType", adt.name) + else: + odt = DataType[self.get_nodeattr("outputDataType")] + bw = roundup_to_integer_multiple(odt.bitwidth(), 8) + new_odt_name = odt.name.replace(str(odt.bitwidth()), str(bw)) + if bw != odt.bitwidth(): + warn_str = "outputDataType changing for %s: %s -> %s " % ( + self.onnx_node.name, + odt.name, + new_odt_name, + ) + warnings.warn(warn_str) + odt = DataType[new_odt_name] + self.set_nodeattr("outputDataType", odt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From 0123d2629d0c89f384a5243b22a96a5a33daeac7 Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 31 May 2023 09:36:11 -0700 Subject: [PATCH 579/628] [MVAU] Fixing to maintain prior functionality --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 2f99ddca77..cef336bdd4 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -667,7 +667,10 @@ def minimize_accumulator_width(self, model): else: odt = DataType[self.get_nodeattr("outputDataType")] bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - new_odt_name = odt.name.replace(str(odt.bitwidth()), str(bw)) + # NOTE: keeping previous functionality of converting outputDataType + # to accDataType on the last node. May want to preserve outputDataType + # in the future by replacing adt with odt below. + new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) if bw != odt.bitwidth(): warn_str = "outputDataType changing for %s: %s -> %s " % ( self.onnx_node.name, From aa345e333df5c81ab28f134063b67d4ca7ccb14f Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 31 May 2023 09:36:24 -0700 Subject: [PATCH 580/628] [VVAU] Fixing to maintain prior functionality --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 773c49915f..29bf9651f0 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -194,7 +194,10 @@ def minimize_accumulator_width(self, model): else: odt = DataType[self.get_nodeattr("outputDataType")] bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - new_odt_name = odt.name.replace(str(odt.bitwidth()), str(bw)) + # NOTE: keeping previous functionality of converting outputDataType + # to accDataType on the last node. May want to preserve outputDataType + # in the future by replacing adt with odt below. + new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) if bw != odt.bitwidth(): warn_str = "outputDataType changing for %s: %s -> %s " % ( self.onnx_node.name, From 3f05b634b6a837159bb4a0f1ccc83b97f1705e8f Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 31 May 2023 09:38:25 -0700 Subject: [PATCH 581/628] Updating unit test to check correct functionality --- tests/fpgadataflow/test_minimize_bit_width.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index 805578018c..ad7b1cdf86 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -297,10 +297,11 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" if model.find_direct_successors(inst.onnx_node) is None: assert ( - cur_adt.bitwidth() % 8 - ) == 0, "bit width of last node needs to be divisible by 8" - assert ( - cur_adt.bitwidth() == cur_odt.bitwidth() - ), "outputDataType and accDataType should be equal" + cur_odt.bitwidth() % 8 + ) == 0, "output bit width of last node needs to be divisible by 8" + if inst.get_nodeattr("noActivation"): + assert ( + cur_adt.bitwidth() == cur_odt.bitwidth() + ), "outputDataType and accDataType should be equal" else: assert cur_odt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" From d853ab50ab2c2811f9001b003d9f527082100bfe Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 7 Jun 2023 10:54:46 -0700 Subject: [PATCH 582/628] [MVAU] updating minimize_accumulator logic --- .../fpgadataflow/matrixvectoractivation.py | 50 +++++++++++-------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index cef336bdd4..1d6a6f5576 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -655,31 +655,37 @@ def minimize_accumulator_width(self, model): adt = DataType.get_smallest_possible(-acc_max - 1) else: adt = DataType.get_smallest_possible(acc_max) - # if this is the last node in the graph, then ensure the datatype of the - # output is divisible by 8 - if model.find_direct_successors(self.onnx_node) is None: - if self.get_nodeattr("noActivation"): + + is_last_node = model.find_direct_successors(self.onnx_node) is None + + # if no activation, output and accumulator datatypes are the same + if self.get_nodeattr("noActivation"): + # if last node, we need to round the accumulator datatype (adt) + # up to the nearest 8 and set the output datatype (odt) + if is_last_node: bw = roundup_to_integer_multiple(adt.bitwidth(), 8) new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) adt = DataType[new_adt_name] - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) - else: - odt = DataType[self.get_nodeattr("outputDataType")] - bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - # NOTE: keeping previous functionality of converting outputDataType - # to accDataType on the last node. May want to preserve outputDataType - # in the future by replacing adt with odt below. - new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - if bw != odt.bitwidth(): - warn_str = "outputDataType changing for %s: %s -> %s " % ( - self.onnx_node.name, - odt.name, - new_odt_name, - ) - warnings.warn(warn_str) - odt = DataType[new_odt_name] - self.set_nodeattr("outputDataType", odt.name) + self.set_nodeattr("outputDataType", adt.name) + + # if last node has activation, then ensure the output datatype is divisible by 8 + if not self.get_nodeattr("noActivation") and is_last_node: + odt = DataType[self.get_nodeattr("outputDataType")] + bw = roundup_to_integer_multiple(odt.bitwidth(), 8) + # NOTE: keeping previous functionality of converting odt to adt on the last + # node, could preserve odt in the future by replacing adt with odt. This + # may yield unfavorable functionality for Bipolar and/or Ternary datatypes + new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + if bw != odt.bitwidth(): + warn_str = "outputDataType changing for %s: %s -> %s " % ( + self.onnx_node.name, + odt.name, + new_odt_name, + ) + warnings.warn(warn_str) + odt = DataType[new_odt_name] + self.set_nodeattr("outputDataType", odt.name) + self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From a3ee3a37289ceef2395c1867aca5edbe8813a27b Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 7 Jun 2023 10:54:57 -0700 Subject: [PATCH 583/628] [VVAU] updating minimize_accumulator logic --- .../fpgadataflow/vectorvectoractivation.py | 51 ++++++++++--------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 29bf9651f0..09e749be57 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -182,32 +182,37 @@ def minimize_accumulator_width(self, model): adt = DataType.get_smallest_possible(-acc_max - 1) else: adt = DataType.get_smallest_possible(acc_max) - # if this is the last node in the graph, then ensure the datatype of the - # output is divisible by 8 - if model.find_direct_successors(self.onnx_node) is None: - if self.get_nodeattr("noActivation"): + + is_last_node = model.find_direct_successors(self.onnx_node) is None + + # if no activation, output and accumulator datatypes are the same + if self.get_nodeattr("noActivation"): + # if last node, we need to round the accumulator datatype (adt) + # up to the nearest 8 and set the output datatype (odt) + if is_last_node: bw = roundup_to_integer_multiple(adt.bitwidth(), 8) new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) adt = DataType[new_adt_name] - # for no-activation nodes, output dt = acc dt - self.set_nodeattr("outputDataType", adt.name) - else: - odt = DataType[self.get_nodeattr("outputDataType")] - bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - # NOTE: keeping previous functionality of converting outputDataType - # to accDataType on the last node. May want to preserve outputDataType - # in the future by replacing adt with odt below. - new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - if bw != odt.bitwidth(): - warn_str = "outputDataType changing for %s: %s -> %s " % ( - self.onnx_node.name, - odt.name, - new_odt_name, - ) - warnings.warn(warn_str) - odt = DataType[new_odt_name] - self.set_nodeattr("outputDataType", odt.name) - self.set_nodeattr("accDataType", adt.name) + self.set_nodeattr("outputDataType", adt.name) + + # if last node has activation, then ensure the output datatype is divisible by 8 + if not self.get_nodeattr("noActivation") and is_last_node: + odt = DataType[self.get_nodeattr("outputDataType")] + bw = roundup_to_integer_multiple(odt.bitwidth(), 8) + # NOTE: keeping previous functionality of converting odt to adt on the last + # node, could preserve odt in the future by replacing adt with odt. This + # may yield unfavorable functionality for Bipolar and/or Ternary datatypes + new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + if bw != odt.bitwidth(): + warn_str = "outputDataType changing for %s: %s -> %s " % ( + self.onnx_node.name, + odt.name, + new_odt_name, + ) + warnings.warn(warn_str) + odt = DataType[new_odt_name] + self.set_nodeattr("outputDataType", odt.name) + return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From f172adc8fcc19338756b8b936a78a2de5620e142 Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 8 Jun 2023 10:37:14 -0700 Subject: [PATCH 584/628] [VVAU] fixing bug with setting accDataType --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 09e749be57..af2591f703 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -213,6 +213,7 @@ def minimize_accumulator_width(self, model): odt = DataType[new_odt_name] self.set_nodeattr("outputDataType", odt.name) + self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 1e5cbc8d65690f8c3c684506fd5f3778e017a027 Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 8 Jun 2023 10:37:59 -0700 Subject: [PATCH 585/628] Fixing test_minimize_bit_width unit test --- tests/fpgadataflow/test_minimize_bit_width.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index ad7b1cdf86..0427bbd4d8 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -294,14 +294,12 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, # bit width minimization logic in the MVAU and VVAU is exact and should be # less than or equal to this calculation exp_adt = calculate_accumulator_bit_width(inst, model) - assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" - if model.find_direct_successors(inst.onnx_node) is None: + assert ( + cur_adt.bitwidth() <= exp_adt.bitwidth() + ), "Mismatched accumulation data types" + + # if there is no activation, outputDataType = accDataType + if inst.get_nodeattr("noActivation"): assert ( - cur_odt.bitwidth() % 8 - ) == 0, "output bit width of last node needs to be divisible by 8" - if inst.get_nodeattr("noActivation"): - assert ( - cur_adt.bitwidth() == cur_odt.bitwidth() - ), "outputDataType and accDataType should be equal" - else: - assert cur_odt.bitwidth() == idt.bitwidth(), "outputDataType should not be changed" + cur_adt.bitwidth() == cur_odt.bitwidth() + ), "outputDataType and accDataType should be equal" From d9e4654c79b4461189fe6381163f17eaf8037597 Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 14 Jun 2023 18:20:37 -0700 Subject: [PATCH 586/628] [MVAU] Updating minimize_accumulator_width logic --- .../fpgadataflow/matrixvectoractivation.py | 92 ++++++------------- 1 file changed, 29 insertions(+), 63 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 1d6a6f5576..ccf5b00918 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -589,11 +589,14 @@ def minimize_accumulator_width(self, model): # for the bipolar case they need to be converted to bipolar if self.get_nodeattr("binaryXnorMode"): weights = 2 * weights - 1 + + thresholds = None if len(self.onnx_node.input) > 2: thresholds = model.get_initializer(self.onnx_node.input[2]) - else: - thresholds = None + idt = self.get_input_datatype() + + (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) # if runtime-writeable weights, then the values of the weights can # change and we need to use the worst-case values from the datatypes if self.get_nodeattr("runtime_writeable_weights"): @@ -604,11 +607,7 @@ def minimize_accumulator_width(self, model): upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) - # if not runtime-writeable weights, then we can calculate the min - # and max values of the accumulation range using knowledge of the - # weights and input data types since they are fixed - else: - (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) + # if the thresholds can be used to determine range, then adjust the range # according to the known values of the thresholds if thresholds is not None: @@ -617,76 +616,43 @@ def minimize_accumulator_width(self, model): min_threshold = thresholds.min() max_threshold = thresholds.max() # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): + if max_threshold > acc_max or min_threshold < acc_min: warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) - thresholds = np.clip(thresholds, clip_lower, clip_upper) + thresholds = np.clip(thresholds, acc_min, acc_max) model.set_initializer(self.onnx_node.input[2], thresholds) threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) min_threshold = thresholds.min() max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) - else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) - else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( + acc_min = min(min_threshold, acc_min) + acc_max = max(max_threshold, acc_max) + + # if the acc_range is always greater than 0, then acc_max <= 2^P - 1 + if acc_min >= 0: + acc_bit_width = np.log2(acc_max + 1) + acc_bit_width = math.ceil(acc_bit_width) + adt = DataType[f"UINT{acc_bit_width}"] + # if the acc_range is signed, then acc_min >= -2^{P-1} and acc_max <= + # 2^{P - 1} - 1, which means 2^{P - 1} >= max(-acc_min, 1 + acc_max) + else: + _acc_max = max(-acc_min, 1 + acc_max) + acc_bit_width = np.log2(_acc_max) + 1 + acc_bit_width = math.ceil(acc_bit_width) + adt = DataType[f"INT{acc_bit_width}"] + + # if activation, assert that the thresholds can be expressed with adt + if thresholds is not None: + assert np.vectorize(adt.allowed)( threshold_tensor ).all(), "Thresholds in %s can't be expressed with type %s" % ( self.onnx_node.name, - str(tdt), + str(adt), ) - adt = tdt # Set activation datatype to the threshold datatype - else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) - else: - adt = DataType.get_smallest_possible(-acc_max - 1) - else: - adt = DataType.get_smallest_possible(acc_max) - - is_last_node = model.find_direct_successors(self.onnx_node) is None # if no activation, output and accumulator datatypes are the same if self.get_nodeattr("noActivation"): - # if last node, we need to round the accumulator datatype (adt) - # up to the nearest 8 and set the output datatype (odt) - if is_last_node: - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] self.set_nodeattr("outputDataType", adt.name) - - # if last node has activation, then ensure the output datatype is divisible by 8 - if not self.get_nodeattr("noActivation") and is_last_node: - odt = DataType[self.get_nodeattr("outputDataType")] - bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - # NOTE: keeping previous functionality of converting odt to adt on the last - # node, could preserve odt in the future by replacing adt with odt. This - # may yield unfavorable functionality for Bipolar and/or Ternary datatypes - new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - if bw != odt.bitwidth(): - warn_str = "outputDataType changing for %s: %s -> %s " % ( - self.onnx_node.name, - odt.name, - new_odt_name, - ) - warnings.warn(warn_str) - odt = DataType[new_odt_name] - self.set_nodeattr("outputDataType", odt.name) - self.set_nodeattr("accDataType", adt.name) + return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 4dc169463ec0586380d2f19b1ae39e3c7d64955d Mon Sep 17 00:00:00 2001 From: icolbert Date: Wed, 14 Jun 2023 18:20:48 -0700 Subject: [PATCH 587/628] [VVAU] Updating minimize_accumulator_width logic --- .../fpgadataflow/vectorvectoractivation.py | 87 ++++++------------- 1 file changed, 26 insertions(+), 61 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index af2591f703..035b6f28ec 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -121,6 +121,8 @@ def minimize_accumulator_width(self, model): else: thresholds = None idt = self.get_input_datatype() + + (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) # if runtime-writeable weights, then the values of the weights can # change and we need to use the worst-case values from the datatypes if self.get_nodeattr("runtime_writeable_weights"): @@ -131,11 +133,7 @@ def minimize_accumulator_width(self, model): upper_range = calculate_matvec_accumulator_range(upper_worst, idt) acc_min = min(min(lower_range), min(upper_range)) acc_max = max(max(upper_range), max(upper_range)) - # if not runtime-writeable weights, then we can calculate the min - # and max values of the accumulation range using knowledge of the - # weights and input data types since they are fixed - else: - (acc_min, acc_max) = calculate_matvec_accumulator_range(weights, idt) + # if the thresholds can be used to determine range, then adjust the range # according to the known values of the thresholds if thresholds is not None: @@ -144,76 +142,43 @@ def minimize_accumulator_width(self, model): min_threshold = thresholds.min() max_threshold = thresholds.max() # clip threshold values - clip_upper = None - clip_lower = None - if max_threshold > acc_max + 1: - clip_upper = acc_max + 1 - if min_threshold < acc_min: - clip_lower = acc_min - if (clip_lower is not None) or (clip_upper is not None): + if max_threshold > acc_max or min_threshold < acc_min: warnings.warn("Clipping some thresholds in %s" % self.onnx_node.name) - thresholds = np.clip(thresholds, clip_lower, clip_upper) + thresholds = np.clip(thresholds, acc_min, acc_max) model.set_initializer(self.onnx_node.input[2], thresholds) threshold_tensor = self.get_hls_compatible_threshold_tensor(thresholds) min_threshold = thresholds.min() max_threshold = thresholds.max() - # get range required by threshold values - tdt_min = min(acc_min, min_threshold) - tdt_max = max(acc_max, max_threshold) - if tdt_min < 0: - if abs(tdt_min) > tdt_max: - tdt = DataType.get_smallest_possible(tdt_min) - else: - tdt = DataType.get_smallest_possible(-tdt_max - 1) - else: - tdt = DataType.get_smallest_possible(tdt_max) - assert np.vectorize(tdt.allowed)( + acc_min = min(min_threshold, acc_min) + acc_max = max(max_threshold, acc_max) + + # if the acc_range is always greater than 0, then acc_max <= 2^P - 1 + if acc_min >= 0: + acc_bit_width = np.log2(acc_max + 1) + acc_bit_width = math.ceil(acc_bit_width) + adt = DataType[f"UINT{acc_bit_width}"] + # if the acc_range is signed, then acc_min >= -2^{P-1} and acc_max <= + # 2^{P - 1} - 1, which means 2^{P - 1} >= max(-acc_min, 1 + acc_max) + else: + _acc_max = max(-acc_min, 1 + acc_max) + acc_bit_width = np.log2(_acc_max) + 1 + acc_bit_width = math.ceil(acc_bit_width) + adt = DataType[f"INT{acc_bit_width}"] + + # if activation, assert that the thresholds can be expressed with adt + if thresholds is not None: + assert np.vectorize(adt.allowed)( threshold_tensor ).all(), "Thresholds in %s can't be expressed with type %s" % ( self.onnx_node.name, - str(tdt), + str(adt), ) - adt = tdt # Set activation datatype to the threshold datatype - else: - if acc_min < 0: - if abs(acc_min) > acc_max: - adt = DataType.get_smallest_possible(acc_min) - else: - adt = DataType.get_smallest_possible(-acc_max - 1) - else: - adt = DataType.get_smallest_possible(acc_max) - - is_last_node = model.find_direct_successors(self.onnx_node) is None # if no activation, output and accumulator datatypes are the same if self.get_nodeattr("noActivation"): - # if last node, we need to round the accumulator datatype (adt) - # up to the nearest 8 and set the output datatype (odt) - if is_last_node: - bw = roundup_to_integer_multiple(adt.bitwidth(), 8) - new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - adt = DataType[new_adt_name] self.set_nodeattr("outputDataType", adt.name) - - # if last node has activation, then ensure the output datatype is divisible by 8 - if not self.get_nodeattr("noActivation") and is_last_node: - odt = DataType[self.get_nodeattr("outputDataType")] - bw = roundup_to_integer_multiple(odt.bitwidth(), 8) - # NOTE: keeping previous functionality of converting odt to adt on the last - # node, could preserve odt in the future by replacing adt with odt. This - # may yield unfavorable functionality for Bipolar and/or Ternary datatypes - new_odt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) - if bw != odt.bitwidth(): - warn_str = "outputDataType changing for %s: %s -> %s " % ( - self.onnx_node.name, - odt.name, - new_odt_name, - ) - warnings.warn(warn_str) - odt = DataType[new_odt_name] - self.set_nodeattr("outputDataType", odt.name) - self.set_nodeattr("accDataType", adt.name) + return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 7d5a8b6b2103b6ebd931a0b9d0479808007d5c4d Mon Sep 17 00:00:00 2001 From: icolbert Date: Thu, 3 Aug 2023 07:53:34 -0700 Subject: [PATCH 588/628] Pre-commit fixes --- tests/fpgadataflow/test_minimize_bit_width.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index 0427bbd4d8..4be0a260b7 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -294,9 +294,7 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, # bit width minimization logic in the MVAU and VVAU is exact and should be # less than or equal to this calculation exp_adt = calculate_accumulator_bit_width(inst, model) - assert ( - cur_adt.bitwidth() <= exp_adt.bitwidth() - ), "Mismatched accumulation data types" + assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" # if there is no activation, outputDataType = accDataType if inst.get_nodeattr("noActivation"): From f11856f3c5db5825722bfd977f644c01f5ad6139 Mon Sep 17 00:00:00 2001 From: aziz bahri Date: Fri, 4 Aug 2023 10:27:30 +0100 Subject: [PATCH 589/628] [Deps] Update qonnx version to include qcdq2qonnx changes Signed-off-by: aziz bahri --- fetch-repos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fetch-repos.sh b/fetch-repos.sh index 9e3ee3ef99..5b07d11273 100755 --- a/fetch-repos.sh +++ b/fetch-repos.sh @@ -27,7 +27,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -QONNX_COMMIT="8755423377e9c01dd2d2358c320484399b5d6625" +QONNX_COMMIT="04e24583fb5c1895744801480db3ced8a5b6a914" FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c" BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5" PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f" From d07655968644a0c7a19a04986fca3984a2ea43ab Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 4 Aug 2023 14:14:48 +0100 Subject: [PATCH 590/628] [MVAU/VVAU] DataType divisibility by 8 for last node if no activation --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 7 +++++++ src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index ccf5b00918..7eb56db382 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -650,6 +650,13 @@ def minimize_accumulator_width(self, model): # if no activation, output and accumulator datatypes are the same if self.get_nodeattr("noActivation"): + # if this is the last node in the graph, then ensure the datatype is + # divisibly by 8 bits + if model.find_direct_successors(self.onnx_node) is None: + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index 035b6f28ec..bd5bb75f1d 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -176,6 +176,13 @@ def minimize_accumulator_width(self, model): # if no activation, output and accumulator datatypes are the same if self.get_nodeattr("noActivation"): + # if this is the last node in the graph, then ensure the datatype is + # divisibly by 8 bits + if model.find_direct_successors(self.onnx_node) is None: + bw = roundup_to_integer_multiple(adt.bitwidth(), 8) + new_adt_name = adt.name.replace(str(adt.bitwidth()), str(bw)) + adt = DataType[new_adt_name] + # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) From f713ab09794a66e9cddae605055666064b855caf Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 4 Aug 2023 16:39:16 +0100 Subject: [PATCH 591/628] [Tests] Include divisibility by 8 in minimize bit width testing --- tests/fpgadataflow/test_minimize_bit_width.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/fpgadataflow/test_minimize_bit_width.py b/tests/fpgadataflow/test_minimize_bit_width.py index 4be0a260b7..0e704230e7 100644 --- a/tests/fpgadataflow/test_minimize_bit_width.py +++ b/tests/fpgadataflow/test_minimize_bit_width.py @@ -296,8 +296,13 @@ def test_minimize_accumulator_width(wdt: DataType, idt: DataType, tdt: DataType, exp_adt = calculate_accumulator_bit_width(inst, model) assert cur_adt.bitwidth() <= exp_adt.bitwidth(), "Mismatched accumulation data types" - # if there is no activation, outputDataType = accDataType + # if there is no activation, outputDataType = accDataType and if it is the last node + # it needs to be divisible by 8 if inst.get_nodeattr("noActivation"): assert ( cur_adt.bitwidth() == cur_odt.bitwidth() ), "outputDataType and accDataType should be equal" + if model.find_direct_successors(inst.onnx_node) is None: + assert ( + cur_adt.bitwidth() % 8 + ) == 0, "bit width of last node needs to be divisible by 8" From f52871dfe71df725ef85eeb66b6ff9ca7dff1d2d Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 4 Aug 2023 17:11:37 +0100 Subject: [PATCH 592/628] [Custom Op] Delete obsolete lines after merging with dev --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index db31090f44..bd5bb75f1d 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -184,8 +184,6 @@ def minimize_accumulator_width(self, model): adt = DataType[new_adt_name] # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) - if self.get_nodeattr("noActivation"): - self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From 8357c102633a1ce25666f600d30b66ad6f94dfdf Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 8 Aug 2023 10:42:52 +0100 Subject: [PATCH 593/628] [Lint] Run pre-commit over files --- docker/jenkins/test_bnn_hw_pytest.py | 76 +++++++++++++++++------ tests/end2end/test_end2end_bnn_pynq.py | 83 +++++++++++++++++++------- 2 files changed, 122 insertions(+), 37 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 961efd1cc1..c8f4fbf74d 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -1,14 +1,15 @@ -import os -import numpy as np -from scipy.stats import linregress -import subprocess import pytest + import itertools import logging +import numpy as np +import os +import subprocess +from scipy.stats import linregress # no __init__ constructors allowed in Pytest - so use global variables instead base_dir_global = os.getcwd() -default_test_run_timeout = 30 # seconds +default_test_run_timeout = 30 # seconds output_execute_results_file = "output.npy" execute_results_reference_file = "output_reference.npy" output_throughput_results_file = "nw_metrics.txt" @@ -18,13 +19,14 @@ def remove_cache_dirs(dir_list): tmp_list = list(dir_list) - for i in range(len(tmp_list)-1, -1, -1): + for i in range(len(tmp_list) - 1, -1, -1): if ".pytest_cache" in tmp_list[i]: del tmp_list[i] elif "__pycache__" in tmp_list[i]: del tmp_list[i] return tmp_list + def delete_file(file_path): # Check if the file exists before deleting it if os.path.exists(file_path): @@ -36,16 +38,21 @@ def delete_file(file_path): else: logger.info(f"File '{file_path}' does not exist. Continuing with the script.") + def get_platform(board_str): return "alveo" if "U250" in board_str else "zynq-iodma" + def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, platform_list): test_cases = [ - (f'{marker}_{param1}_batchSize-{param2}_platform-{param3}', { - 'test_dir': param1, - 'batch_size': param2, - 'platform': param3, - }) + ( + f"{marker}_{param1}_batchSize-{param2}_platform-{param3}", + { + "test_dir": param1, + "batch_size": param2, + "platform": param3, + }, + ) for param1, param2, param3 in itertools.product( test_dir_list, batch_size_list, @@ -54,6 +61,7 @@ def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, pla ] return test_cases + def pytest_generate_tests(metafunc): idlist = [] argvalues = [] @@ -61,15 +69,21 @@ def pytest_generate_tests(metafunc): # Separate the full list of markers used on command line. # This allows a user to select multiple markers - all_markers_used = metafunc.config.getoption("-m").split(" ") + all_markers_used = metafunc.config.getoption("-m").split(" ") current_dir = os.getcwd() - test_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + test_dirs = [ + name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name)) + ] test_dirs = remove_cache_dirs(test_dirs) for marker in all_markers_used: if "Pynq" in marker or "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: platform = get_platform(marker) - scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + scenarios.extend( + get_full_parameterized_test_list( + marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform] + ) + ) if len(scenarios) > 0: for scenario in scenarios: @@ -92,7 +106,21 @@ def test_type_execute(self, test_dir, batch_size, platform): # Run test option: execute bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" - result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + result = subprocess.run( + [ + "python", + "driver.py", + "--exec_mode=execute", + f"--batchsize={batch_size}", + f"--bitfile={bitfile}", + "--inputfile=input.npy", + "--outputfile=output.npy", + f"--platform={platform}", + ], + capture_output=True, + text=True, + timeout=default_test_run_timeout, + ) assert result.returncode == 0 # Load the output and reference arrays @@ -112,7 +140,21 @@ def test_type_throughput(self, test_dir, batch_size, platform): # Run test option: throughput bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" - result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + result = subprocess.run( + [ + "python", + "driver.py", + "--exec_mode=throughput_test", + f"--batchsize={batch_size}", + f"--bitfile={bitfile}", + "--inputfile=input.npy", + "--outputfile=output.npy", + f"--platform={platform}", + ], + capture_output=True, + text=True, + timeout=default_test_run_timeout, + ) assert result.returncode == 0 # Check if nw_metrics.txt now exists after test run @@ -158,7 +200,7 @@ def test_type_throughput(self, test_dir, batch_size, platform): np.round(v["DRAM_out_bandwidth[MB/s]"], 2), ) ret_str += "\n" + "-----------------------------" - largest_bsize = bsize_range[-1] + # largest_bsize = bsize_range[-1] # Dump the metrics to a text file with open(throughput_results_formatted_file, "w") as f: diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 59fbb0c1cb..07e977a266 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -28,9 +28,8 @@ import pytest -import numpy as np - import itertools +import numpy as np # as of Feb'20 there is a bug that segfaults ONNX shape inference if we # import pytorch before onnx, so we make sure to import onnx first @@ -41,7 +40,6 @@ from brevitas.export import export_qonnx from dataset_loading import cifar, mnist from distutils.dir_util import copy_tree -from shutil import copy from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -60,6 +58,7 @@ from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.transformation.merge_onnx_models import MergeONNXModels from qonnx.util.cleanup import cleanup as qonnx_cleanup +from shutil import copy import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb @@ -348,12 +347,15 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): # parameters that make up inputs to test case(s) def get_full_parameterized_test_list(marker, wbits_list, abits_list, topology_list, board_list): test_cases = [ - (f'{marker}_w{param1}_a{param2}_{param3}_{param4}', { - 'wbits': param1, - 'abits': param2, - 'topology': param3, - 'board': param4, - }) + ( + f"{marker}_w{param1}_a{param2}_{param3}_{param4}", + { + "wbits": param1, + "abits": param2, + "topology": param3, + "board": param4, + }, + ) for param1, param2, param3, param4 in itertools.product( wbits_list, abits_list, @@ -376,21 +378,63 @@ def pytest_generate_tests(metafunc): # Separate the full list of markers used on command line. # This allows a user to select multiple markers - all_markers_used = metafunc.config.getoption("-m").split(" ") + all_markers_used = metafunc.config.getoption("-m").split(" ") for marker in all_markers_used: if "sanity_bnn" in marker: - # Define a set of sanity tests that target each of the supported boards with fixed parameters - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[1], topology_list=["lfc"], board_list=[test_support_board_map[0]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[2], topology_list=["cnv"], board_list=[test_support_board_map[1]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["tfc"], board_list=[test_support_board_map[2]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["cnv"], board_list=[test_support_board_map[3]])) + # Define a set of sanity tests that target each of + # the supported boards with fixed parameters + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[1], + abits_list=[1], + topology_list=["lfc"], + board_list=[test_support_board_map[0]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[1], + abits_list=[2], + topology_list=["cnv"], + board_list=[test_support_board_map[1]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[2], + abits_list=[2], + topology_list=["tfc"], + board_list=[test_support_board_map[2]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[2], + abits_list=[2], + topology_list=["cnv"], + board_list=[test_support_board_map[3]], + ) + ) if "bnn_" in marker: # Target the full set of parameters for a single board # Extract the board name from the marker used, as it is in the form of 'bnn_' - bnn_board = next((element for element in test_support_board_map if marker.split("_")[1] in element.lower()), None) - test_cases = get_full_parameterized_test_list("bnn", wbits, abits, topology, [bnn_board]) + bnn_board = next( + ( + element + for element in test_support_board_map + if marker.split("_")[1] in element.lower() + ), + None, + ) + test_cases = get_full_parameterized_test_list( + "bnn", wbits, abits, topology, [bnn_board] + ) scenarios.extend(test_cases) if len(scenarios) > 0: @@ -401,6 +445,7 @@ def pytest_generate_tests(metafunc): argvalues.append([x[1] for x in items]) metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + @pytest.mark.sanity_bnn @pytest.mark.bnn_pynq @pytest.mark.bnn_zcu104 @@ -706,9 +751,7 @@ def test_make_pynq_driver(self, topology, wbits, abits, board): model.save(get_checkpoint_name(topology, wbits, abits, "driver_" + board)) def test_deploy(self, topology, wbits, abits, board): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, "driver_" + board - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "driver_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) model_title = "%s_w%d_a%d_%s" % ("bnn", wbits, abits, topology) deploy_based_on_board(model, model_title, topology, wbits, abits, board) From 3df0c17191cafd2c5e90f0aa2f310626b1297e67 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 8 Aug 2023 11:39:08 +0100 Subject: [PATCH 594/628] [GHA] exclude bnn_pynq from quicktest --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index 466fcfb09d..814cec03d1 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,7 +6,7 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL" + python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks or bnn_pynq)' --dist=loadfile -n $PYTEST_PARALLEL" elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL" From 066d0277ff70bb7cf990baacc563788467a5c836 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 14:34:29 +0100 Subject: [PATCH 595/628] [CI] Split Jenkinsfiles into CI and testing --- docker/jenkins/Jenkinsfile_CI | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 docker/jenkins/Jenkinsfile_CI diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI new file mode 100644 index 0000000000..2954877c2a --- /dev/null +++ b/docker/jenkins/Jenkinsfile_CI @@ -0,0 +1,46 @@ +node { + def app + stage('Clone repository') { + /* Let's make sure we have the repository cloned to our workspace */ + checkout scm + } + withEnv([ + "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.2_1014_8888/installs/lin64", + "FINN_XILINX_VERSION=2022.2", + "FINN_DOCKER_TAG=xilinx/finn:jenkins", + "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", + "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" + ]){ + parallel firstBranch: { + stage('Brevitas export') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + } + } + }, secondBranch: { + stage('Streamlining transformations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + } + } + }, thirdBranch: { + stage('Util functions') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mutil") + } + } + }, fourthBranch: { + stage('General transformations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mtransform") + } + } + }, fifthBranch: { + stage('Fpgadataflow transformations and simulations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + } + } + } + } +} From 6f84ed9466f5cc8ef1041236c0c3369b786a90dc Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 14:44:51 +0100 Subject: [PATCH 596/628] [Tests] Rename board map for tests --- src/finn/util/basic.py | 4 ++-- tests/end2end/test_end2end_bnn_pynq.py | 16 ++++++---------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 7dd04996ba..a184a53862 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -31,8 +31,8 @@ import sys import tempfile -# supported boards -test_support_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] +# test boards +test_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] # mapping from PYNQ board names to FPGA part names pynq_part_map = dict() diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 07e977a266..0343b9082b 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -92,7 +92,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root, make_build_dir, test_support_board_map +from finn.util.basic import get_finn_root, make_build_dir, test_board_map from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -390,7 +390,7 @@ def pytest_generate_tests(metafunc): wbits_list=[1], abits_list=[1], topology_list=["lfc"], - board_list=[test_support_board_map[0]], + board_list=[test_board_map[0]], ) ) scenarios.extend( @@ -399,7 +399,7 @@ def pytest_generate_tests(metafunc): wbits_list=[1], abits_list=[2], topology_list=["cnv"], - board_list=[test_support_board_map[1]], + board_list=[test_board_map[1]], ) ) scenarios.extend( @@ -408,7 +408,7 @@ def pytest_generate_tests(metafunc): wbits_list=[2], abits_list=[2], topology_list=["tfc"], - board_list=[test_support_board_map[2]], + board_list=[test_board_map[2]], ) ) scenarios.extend( @@ -417,7 +417,7 @@ def pytest_generate_tests(metafunc): wbits_list=[2], abits_list=[2], topology_list=["cnv"], - board_list=[test_support_board_map[3]], + board_list=[test_board_map[3]], ) ) @@ -425,11 +425,7 @@ def pytest_generate_tests(metafunc): # Target the full set of parameters for a single board # Extract the board name from the marker used, as it is in the form of 'bnn_' bnn_board = next( - ( - element - for element in test_support_board_map - if marker.split("_")[1] in element.lower() - ), + (element for element in test_board_map if marker.split("_")[1] in element.lower()), None, ) test_cases = get_full_parameterized_test_list( From e080625d0b41f1ad8972ab2b1bf7b0ae899be174 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 17:16:09 +0100 Subject: [PATCH 597/628] [Tests] Fix bug in reshaping input npy for remote execution --- tests/end2end/test_end2end_bnn_pynq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 0343b9082b..d98c06f7d0 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -333,7 +333,8 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) else: input_shape = input_tensor_npy.shape - input_tensor_npy = (input_shape[0], np.prod(input_shape[1:])) + new_input_shape = (input_shape[0], np.prod(input_shape[1:])) + input_tensor_npy = input_tensor_npy.reshape(new_input_shape) np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy.copy()) np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) From fe09f06d9cf35994269db2f667472167f05d6165 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 23 Aug 2023 09:51:42 +0100 Subject: [PATCH 598/628] [CI] Fix bug with build parameters and result flags sharing common names --- docker/jenkins/Jenkinsfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 7ca9aedafc..f4f0533c3f 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -96,7 +96,7 @@ pipeline { stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline - env.FPGADATAFLOW = "SUCCESS" + env.FPGADATAFLOW_RESULT = "SUCCESS" } } } @@ -129,7 +129,7 @@ pipeline { stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline - env.END2END = "SUCCESS" + env.END2END_RESULT = "SUCCESS" } } } @@ -705,13 +705,13 @@ pipeline { dir('reports') { // Only unstash for stages that ran unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") - unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.FPGADATAFLOW_RESULT, "fpgadataflow") unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") - unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.END2END_RESULT, "end2end") unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") @@ -730,7 +730,7 @@ pipeline { archiveArtifacts artifacts: "reports/*.html" archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") - archiveSuccessfulStage(env.FPGADATAFLOW, "coverage_fpgadataflow") + archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") // Plot what XML files were created during the test run junit 'reports/*.xml' From db99ec811957310b68c5818e506c3374402dd16f Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 2 Aug 2023 11:13:31 +0100 Subject: [PATCH 599/628] Add support to pull in a .Xilinx directory to allow beta devices to be enabled inside docker container See https://docs.xilinx.com/r/en-US/ug835-vivado-tcl-commands/Tcl-Initialization-Scripts for information on using tcl init scripts --- docker/finn_entrypoint.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index 4e0266ca6b..b441c9359a 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -114,6 +114,27 @@ else yecho "If you need Vitis HLS, ensure HLS_PATH is set correctly and mounted into the Docker container." fi +if [ -d "$FINN_ROOT/.Xilinx" ]; then + mkdir "$HOME/.Xilinx" + if [ -f "$FINN_ROOT/.Xilinx/HLS_init.tcl" ]; then + cp "$FINN_ROOT/.Xilinx/HLS_init.tcl" "$HOME/.Xilinx/" + else + yecho "Unable to find $FINN_ROOT/.Xilinx/HLS_init.tcl" + fi + + if [ -f "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" ]; then + mkdir "$HOME/.Xilinx/Vivado/" + cp "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" "$HOME/.Xilinx/Vivado/" + else + yecho "Unable to find $FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" + fi +else + yecho "Unable to find $FINN_ROOT/.Xilinx" + yecho "Functionality dependent on beta devices will not be available." + yecho "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl " + yecho "are set correctly and mounted into the Docker container." +fi + export PATH=$PATH:$HOME/.local/bin # execute the provided command(s) as root exec "$@" From 26e3306796d3d0daac94b87c9d4d01676ecf134e Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 24 Aug 2023 15:40:48 +0100 Subject: [PATCH 600/628] [NBs] Add first draft of advanced builder settings notebook --- .../4_advanced_builder_settings.ipynb | 789 ++++++++++++++++++ 1 file changed, 789 insertions(+) create mode 100644 notebooks/advanced/4_advanced_builder_settings.ipynb diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb new file mode 100644 index 0000000000..ce02ab618e --- /dev/null +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -0,0 +1,789 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8fcff912", + "metadata": {}, + "source": [ + "# Advanced Builder settings\n", + "\n", + "**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".\n", + "\n", + "\"drawing\"\n", + "\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", + "\n", + "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." + ] + }, + { + "cell_type": "markdown", + "id": "a830e730", + "metadata": {}, + "source": [ + "In this tutorial, we will have a more detailed look into the FINN builder tool and explore different options to customize your FINN design. We assume that you have already completed the [Cybersecurity notebooks](../end2end_example/cybersecurity) and that you have a basic understanding of how the FINN compiler works and how to use the FINN builder tool." + ] + }, + { + "cell_type": "markdown", + "id": "5ec9a0db", + "metadata": {}, + "source": [ + "## Outline\n", + "---------------\n", + "\n", + "1. [Introduction to the CNV-w2a2 network](#intro_cnv)\n", + "2. [Recap default builder flow](#recap_builder)\n", + "3. [How to make a custom build step](#custom_step)\n", + "4. [Folding configuration json](#folding_config)\n", + "5. [Additional builder arguments](#builder_arg)\n", + " 1. [Verification steps](#verify)\n", + " 2. [Examples for additional builder arguments](#example_args)\n", + " 3. [Other builder arguments](#other_args)" + ] + }, + { + "cell_type": "markdown", + "id": "5dbed63f", + "metadata": {}, + "source": [ + "## Introduction to the CNV-w2a2 network \n", + "\n", + "The particular quantized neural network (QNN) we will be targeting in this notebook is referred to as CNV-w2a2 and it classifies 32x32 RGB images into one of ten CIFAR-10 classes. All weights and activations in this network are quantized to two bit, with the exception of the input (which is RGB with 8 bits per channel) and the final output (which is 32-bit numbers). It is similar to the convolutional neural network used in the [cnv_end2end_example](../end2end_example/bnn-pynq/cnv_end2end_example.ipynb) Jupyter notebook.\n", + "\n", + "\n", + "You'll have a chance to interactively examine the layers that make up the network in Netron in a moment, so that's enough about the network for now. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce459f3c", + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.basic import make_build_dir\n", + "from finn.util.visualization import showInNetron, showSrc\n", + "import os\n", + " \n", + "build_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe262964", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from finn.util.test import get_test_model_trained\n", + "from brevitas.export import export_qonnx\n", + "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", + "from qonnx.core.modelwrapper import ModelWrapper\n", + "from qonnx.core.datatype import DataType\n", + "\n", + "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", + "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path)\n", + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)\n", + "#model = ModelWrapper(export_onnx_path)\n", + "#model.set_tensor_datatype(model.graph.input[0].name, DataType[\"UINT8\"])\n", + "#model.save(build_dir + \"/end2end_cnv_w2a2_tidy.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87f59da6", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/end2end_cnv_w2a2_export.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "c764ed76", + "metadata": {}, + "source": [ + "## Quick recap, how to setup up default builder flow for resource estimations " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9007705a", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_estimates_only\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_cfg.estimate_only_dataflow_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02e4c0f0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72de8d4c", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3fe1186", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.estimate_only_dataflow_steps))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "029da0da", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "showSrc(build_dataflow_steps.step_tidy_up)" + ] + }, + { + "cell_type": "markdown", + "id": "e9c2c97f", + "metadata": {}, + "source": [ + "## How to make a custom build step " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b9d43cc8", + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.pytorch import ToTensor\n", + "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", + "\n", + "def custom_step_add_pre_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", + " ishape = model.get_tensor_shape(model.graph.input[0].name)\n", + " # preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", + " preproc = ToTensor()\n", + " export_qonnx(preproc, torch.randn(ishape), \"preproc.onnx\", opset_version=11)\n", + " preproc_model = ModelWrapper(\"preproc.onnx\")\n", + " # set input finn datatype to UINT8\n", + " preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType[\"UINT8\"])\n", + " model = model.transform(MergeONNXModels(preproc_model))\n", + " return model\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f00b465", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_pre_proc\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3a2bcea", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87e5651e", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_proc/intermediate_models/custom_step_add_pre_proc.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8c6f1bd0", + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.transformation.insert_topk import InsertTopK\n", + "\n", + "def custom_step_add_post_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", + " model = model.transform(InsertTopK(k=1))\n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "57adbb44", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_pre_and_post_proc\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0598b81", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44127417", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/step_convert_to_hls.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "5ffbadd1", + "metadata": {}, + "source": [ + "## Folding configuration json " + ] + }, + { + "cell_type": "markdown", + "id": "c164040f", + "metadata": {}, + "source": [ + "To learn about the influence of folding factors/parallelism in FINN, please have a look at this notebook: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f75f5634", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object, indent=1))" + ] + }, + { + "cell_type": "markdown", + "id": "ba856c28", + "metadata": {}, + "source": [ + "Hardware configuration for each layer\n", + "\n", + "FIFO depths\n", + "\n", + "Type of memory/compute resources to be used\n", + "\n", + "Parallelism along different dimensions (“PE”, ”SIMD”)\n", + "\n", + "Baked-in, decoupled or external parameters\n", + "\n", + "Influences almost all flows\n", + "\n", + "step_apply_folding_config\n", + "\n", + "Values tuned for performance & footprint\n", + "\n", + "Many additional constraints not visible from .json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7f42774", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_pre_and_post_proc/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cdd9f706", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_all_lutram\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " folding_config_file = \"folding_config_all_lutram.json\",\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99b647c0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc680178", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_all_lutram/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "695ecfb1", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_all_lutram/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59e8aaaa", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_all_bram\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " folding_config_file = \"folding_config_all_bram.json\",\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2cdc1aa0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd0388fd", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_all_bram/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e60a3efb", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_all_bram/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "markdown", + "id": "4a675834", + "metadata": {}, + "source": [ + "## Additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "e0c167f4", + "metadata": {}, + "source": [ + "### Verification steps " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4fe7318e", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "showSrc(build_dataflow_steps.step_tidy_up)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce1aa025", + "metadata": {}, + "outputs": [], + "source": [ + "showSrc(build_cfg.VerificationStepType)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e157d03c", + "metadata": {}, + "outputs": [], + "source": [ + "# Get golden io pair from Brevitas and save as .npy files\n", + "from finn.util.test import get_trained_network_and_ishape, get_example_input, get_topk\n", + "import numpy as np\n", + "\n", + "\n", + "(brevitas_model, ishape) = get_trained_network_and_ishape(\"cnv\", 2, 2)\n", + "input_tensor_npy = get_example_input(\"cnv\")\n", + "input_tensor_torch = torch.from_numpy(input_tensor_npy).float()\n", + "input_tensor_torch = ToTensor().forward(input_tensor_torch).detach()\n", + "output_tensor_npy = brevitas_model.forward(input_tensor_torch).detach().numpy()\n", + "output_tensor_npy = get_topk(output_tensor_npy, k=1)\n", + "\n", + "np.save(\"input.npy\", input_tensor_npy)\n", + "np.save(\"expected_output.npy\", output_tensor_npy)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cd3032b", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_with_verification\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + " verify_steps=[\n", + " build_cfg.VerificationStepType.QONNX_TO_FINN_PYTHON,\n", + " build_cfg.VerificationStepType.TIDY_UP_PYTHON,\n", + " build_cfg.VerificationStepType.STREAMLINED_PYTHON,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a3a46e76", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "markdown", + "id": "f0b30546", + "metadata": {}, + "source": [ + "### Examples for additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "ddfb40e4", + "metadata": {}, + "source": [ + "#### Standalone Thresholds" + ] + }, + { + "cell_type": "markdown", + "id": "b710fd28", + "metadata": {}, + "source": [ + "#### RTL Convolutional Input Generator" + ] + }, + { + "cell_type": "markdown", + "id": "4609f94d", + "metadata": {}, + "source": [ + "### Other builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "37b6853d", + "metadata": {}, + "source": [ + "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9f6aa29", + "metadata": {}, + "outputs": [], + "source": [ + "# Filter out methods\n", + "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", + "print(\"\\n\".join(builder_args))" + ] + }, + { + "cell_type": "markdown", + "id": "b12ab370", + "metadata": {}, + "source": [ + "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 316d23a03dc70b260093a3811e7156e1ca1a7c06 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 24 Aug 2023 16:59:11 +0100 Subject: [PATCH 601/628] [NBs] Checking in advanced nb --- .../4_advanced_builder_settings.ipynb | 269 +++++++++++++++++- 1 file changed, 256 insertions(+), 13 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index ce02ab618e..5936118089 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -34,7 +34,8 @@ "\n", "1. [Introduction to the CNV-w2a2 network](#intro_cnv)\n", "2. [Recap default builder flow](#recap_builder)\n", - "3. [How to make a custom build step](#custom_step)\n", + "3. [Build steps](#build_step)\n", + " 1. [How to make a custom build step](#custom_step)\n", "4. [Folding configuration json](#folding_config)\n", "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", @@ -86,10 +87,7 @@ "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path)\n", - "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)\n", - "#model = ModelWrapper(export_onnx_path)\n", - "#model.set_tensor_datatype(model.graph.input[0].name, DataType[\"UINT8\"])\n", - "#model.save(build_dir + \"/end2end_cnv_w2a2_tidy.onnx\")" + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)" ] }, { @@ -154,7 +152,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -167,6 +165,14 @@ "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "7e561a91", + "metadata": {}, + "source": [ + "## Build steps " + ] + }, { "cell_type": "code", "execution_count": null, @@ -177,6 +183,25 @@ "print(\"\\n\".join(build_cfg.estimate_only_dataflow_steps))" ] }, + { + "cell_type": "markdown", + "id": "dd3ef987", + "metadata": {}, + "source": [ + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "313fac18", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_tidy_up.__doc__)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -193,7 +218,7 @@ "id": "e9c2c97f", "metadata": {}, "source": [ - "## How to make a custom build step " + "### How to make a custom build step " ] }, { @@ -349,7 +374,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -388,9 +413,9 @@ "import json\n", "\n", "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", - " json_object = json.load(json_file)\n", + " folding_config = json.load(json_file)\n", "\n", - "print(json.dumps(json_object, indent=1))" + "print(json.dumps(folding_config, indent=1))" ] }, { @@ -430,6 +455,38 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "d4d177dc", + "metadata": {}, + "source": [ + "You can manually change, here we generate two new folding configurations with either all lutram or all bram" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "112af6fd", + "metadata": {}, + "outputs": [], + "source": [ + "# Set all ram_style to LUT RAM\n", + "for key in folding_config:\n", + " if \"ram_style\" in folding_config[key]:\n", + " folding_config[key][\"ram_style\"] = \"distributed\" \n", + "# Save as .json \n", + "with open(\"folding_config_all_lutram.json\", \"w\") as jsonFile:\n", + " json.dump(folding_config, jsonFile)\n", + " \n", + "# Set all ram_style to BRAM\n", + "for key in folding_config:\n", + " if \"ram_style\" in folding_config[key]:\n", + " folding_config[key][\"ram_style\"] = \"block\" \n", + "# Save as .json \n", + "with open(\"folding_config_all_bram.json\", \"w\") as jsonFile:\n", + " json.dump(folding_config, jsonFile)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -481,7 +538,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -558,7 +615,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -669,6 +726,7 @@ " \"step_streamline\",\n", " \"step_convert_to_hls\",\n", " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", " \"step_apply_folding_config\",\n", " \"step_minimize_bit_width\",\n", " \"step_generate_estimate_reports\",\n", @@ -700,7 +758,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -719,6 +777,80 @@ "#### Standalone Thresholds" ] }, + { + "cell_type": "markdown", + "id": "bddbd686", + "metadata": {}, + "source": [ + " picture of im2col + matmul + multithreshold" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de55871e", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_standalone_thresholds\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " standalone_thresholds = True,\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c143f97a", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba36f07b", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, { "cell_type": "markdown", "id": "b710fd28", @@ -727,6 +859,72 @@ "#### RTL Convolutional Input Generator" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8249280d", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_rtl_swg\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " force_rtl_conv_inp_gen = True,\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64e83b16", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "09c45dcd", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, { "cell_type": "markdown", "id": "4609f94d", @@ -763,6 +961,51 @@ "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" ] + }, + { + "cell_type": "markdown", + "id": "9aba0493", + "metadata": {}, + "source": [ + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports so far, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec39b9f2", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" + ] + }, + { + "cell_type": "markdown", + "id": "76df000f", + "metadata": {}, + "source": [ + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "caf49f03", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ec10985", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 033fdc30267ed34c6aee2ccb88c4828acc995aa7 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 28 Aug 2023 22:04:43 +0100 Subject: [PATCH 602/628] [NB] First two sections of advanced nb --- .../4_advanced_builder_settings.ipynb | 268 +++++++++++++++++- 1 file changed, 256 insertions(+), 12 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 5936118089..63f69a6385 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -11,8 +11,7 @@ "\n", "\"drawing\"\n", "\n", - "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", - "\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." ] }, @@ -53,7 +52,7 @@ "The particular quantized neural network (QNN) we will be targeting in this notebook is referred to as CNV-w2a2 and it classifies 32x32 RGB images into one of ten CIFAR-10 classes. All weights and activations in this network are quantized to two bit, with the exception of the input (which is RGB with 8 bits per channel) and the final output (which is 32-bit numbers). It is similar to the convolutional neural network used in the [cnv_end2end_example](../end2end_example/bnn-pynq/cnv_end2end_example.ipynb) Jupyter notebook.\n", "\n", "\n", - "You'll have a chance to interactively examine the layers that make up the network in Netron in a moment, so that's enough about the network for now. \n" + "You'll have a chance to interactively examine the layers that make up the network in Netron. We start by setting the build directory to the directory this notebook is in and importing helper functions to use in the notebook to examine ONNX graphs and source code." ] }, { @@ -63,13 +62,21 @@ "metadata": {}, "outputs": [], "source": [ - "from finn.util.basic import make_build_dir\n", + "#from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showInNetron, showSrc\n", "import os\n", " \n", "build_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"" ] }, + { + "cell_type": "markdown", + "id": "7fc6444c", + "metadata": {}, + "source": [ + "In the next step, we will export the trained network directly from Brevitas to the QONNX format. QONNX is the intermediate representation (IR) that is used as the frontend to the FINN compiler. Please note that the internal representation of the network is still the FINN-ONNX format. [QONNX and FINN-ONNX](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-qonnx-and-finn-onnx) are extensions to the ONNX format to represent quantization, especially below 8 bit, in ONNX graphs. The main difference is that quantization in QONNX graphs is represented using dedicated quantization nodes ([more about QONNX](https://github.com/fastmachinelearning/qonnx)) while the quantization in FINN-ONNX is an annotation attached to the tensors." + ] + }, { "cell_type": "code", "execution_count": null, @@ -81,8 +88,6 @@ "from finn.util.test import get_test_model_trained\n", "from brevitas.export import export_qonnx\n", "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", - "from qonnx.core.modelwrapper import ModelWrapper\n", - "from qonnx.core.datatype import DataType\n", "\n", "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", @@ -90,6 +95,14 @@ "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)" ] }, + { + "cell_type": "markdown", + "id": "d24b632f", + "metadata": {}, + "source": [ + "After the export, we call a clean up function on the model. This makes sure, that for example all shapes in the network are inferred, constant folding was applied and all tensors and nodes have unique names. In the next step, we can visualize the graph using Netron. When scrolling through the graph, you can see the Quant nodes that indicate the quantization in the network. In the [first step](https://github.com/Xilinx/finn/blob/main/src/finn/builder/build_dataflow_steps.py#L260) of the FINN builder flow, the network gets converted from the QONNX format to the FINN-ONNX format. That means these Quant nodes will not be present in the graph anymore and instead the quantization will be attached as an annotation to the tensors." + ] + }, { "cell_type": "code", "execution_count": null, @@ -108,6 +121,14 @@ "## Quick recap, how to setup up default builder flow for resource estimations " ] }, + { + "cell_type": "markdown", + "id": "a26e5418", + "metadata": {}, + "source": [ + "As a quick recap, let's set up the builder like we have done in the cybersecurity example to get the resource estimates for our example network." + ] + }, { "cell_type": "code", "execution_count": null, @@ -155,16 +176,130 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "4fa0b9f5", + "metadata": {}, + "source": [ + "The output directory was created and we can extract information about our model and also how it was processed in the FINN compiler from the generated files. Let's focus on the intermediate models for now. You can find them in the output directory in the folder \"intermediate_models\"." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05a941ef", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_estimates_only/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "d746eff3", + "metadata": {}, + "source": [ + "After each FINN builder step, the graph is saved as .onnx file. In the cell above we sort the intermediate models by time in descending order (`ls -t -r`) to visualize the builder flow. As you can see after the conversion to the FINN-ONNX format (`step_qonnx_to_finn`), the graph is prepared by tidy up and streamlining (`step_tidy_up` and `step_streamline`) and then the high level nodes are converted to HLS layers (`step_convert_to_hls`). Then there is a partition created from all layers that were converted to HLS layers (`step_create_dataflow_partition`), then optimizations are applied (`step_target_fps_parallelization`, `step_apply_folding_config` and `step_minimize_bit_width`). In the final step of this example we generate resource and performance reports for the network (`step_generate_estimate_reports`). Use the code below to investigate the network after each step." + ] + }, { "cell_type": "code", "execution_count": null, "id": "72de8d4c", "metadata": {}, "outputs": [], + "source": [ + "model_to_investigate = \"step_qonnx_to_finn.onnx\"\n", + "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/\"+model_to_investigate)" + ] + }, + { + "cell_type": "markdown", + "id": "bccebd0d", + "metadata": {}, + "source": [ + "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there is layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d86463a", + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "2719cc09", + "metadata": {}, + "source": [ + "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized the node will be converted." + ] + }, + { + "cell_type": "markdown", + "id": "ff7fa549", + "metadata": {}, + "source": [ + "
    \n", + "Important notice: We are working on supporting additional data types and this limitation might disappear in the near future.\n", + "
    " + ] + }, + { + "cell_type": "markdown", + "id": "6e6d942e", + "metadata": {}, + "source": [ + "When we click on the `global_in` in the graph, we can see that the quantization annotation does not contain a data type. If no data type is set and it can not be derived from the preceeding node, the FINN compiler automatically assumes that the data type is floating point. This is why the first node does not get converted into an HLS layer, the input is assumed to be floating point." + ] + }, + { + "cell_type": "markdown", + "id": "8b8994e6", + "metadata": {}, + "source": [ + "The solution to the problem depends on the actual data input.\n", + "1. The data set is quantized and `global_in` is an integer: We set the data type of the tensor `global_in` before passing the model to the FINN compiler using [helper functions of ModelWrapper](https://finn.readthedocs.io/en/latest/internals.html#helper-functions-for-tensors).\n", + "2. The data set is not quantized: we can either execute the first layer in software (e.g. as part of the Python driver) or we can add a preprocessing step into the graph." + ] + }, + { + "cell_type": "markdown", + "id": "7504dce7", + "metadata": {}, + "source": [ + "Even though in the example of the CNVw2a2, the inputs are 32x32 RGB images, so the input values are 8 bit (UINT8) \"quantized\", the input to the exported model is floating point. For training in Brevitas, these values were normalized between 0 and 1.0 and so the exported model expects floating point values as input. \n", + "This means we are in scenario 2. In the next section we will develop a custom step for the FINN builder flow to add preprocessing to our network.\n", + "\n", + "But before we move to the next section, let's take a look at the last two nodes in the graph that were not converted to HLS layers." + ] + }, + { + "cell_type": "markdown", + "id": "f9c2696b", + "metadata": {}, + "source": [ + "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", + "\n", + "Our example is a network for image classification, so that we know that the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "\n", + "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " + ] + }, + { + "cell_type": "markdown", + "id": "4fc8fbf5", + "metadata": {}, + "source": [ + "These pre-processing and post-processing steps are network dependent and we will need to write **custom steps** that can then be executed using the FINN builder tool.\n", + "\n", + "In the next section we will first look into how a standard build step inside FINN looks like and then we will write our own custom steps for pre- and post-processing and add them to the builder configuration." + ] + }, { "cell_type": "markdown", "id": "7e561a91", @@ -173,6 +308,14 @@ "## Build steps " ] }, + { + "cell_type": "markdown", + "id": "fb18b21d", + "metadata": {}, + "source": [ + "The following steps are executed when using the `estimates_only`-flow." + ] + }, { "cell_type": "code", "execution_count": null, @@ -213,6 +356,14 @@ "showSrc(build_dataflow_steps.step_tidy_up)" ] }, + { + "cell_type": "markdown", + "id": "2809f6a7", + "metadata": {}, + "source": [ + "Each steps gets the model and the build configuration as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." + ] + }, { "cell_type": "markdown", "id": "e9c2c97f", @@ -221,6 +372,14 @@ "### How to make a custom build step " ] }, + { + "cell_type": "markdown", + "id": "537a44e7", + "metadata": {}, + "source": [ + "When writing our own custom steps, we use the same pattern. See below the code for the pre-processing for the example network." + ] + }, { "cell_type": "code", "execution_count": null, @@ -230,6 +389,8 @@ "source": [ "from finn.util.pytorch import ToTensor\n", "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", + "from qonnx.core.modelwrapper import ModelWrapper\n", + "from qonnx.core.datatype import DataType\n", "\n", "def custom_step_add_pre_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", " ishape = model.get_tensor_shape(model.graph.input[0].name)\n", @@ -239,11 +400,22 @@ " preproc_model = ModelWrapper(\"preproc.onnx\")\n", " # set input finn datatype to UINT8\n", " preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType[\"UINT8\"])\n", + " # merge pre-processing onnx model with cnv model (passed as input argument)\n", " model = model.transform(MergeONNXModels(preproc_model))\n", " return model\n", " " ] }, + { + "cell_type": "markdown", + "id": "7a6798aa", + "metadata": {}, + "source": [ + "In the next step we can modify the builder configuration to execute a custom sequence of builder steps, including the newly implemented pre-processing custom step.\n", + "\n", + "For that we create a list `build_steps` which contains next to the standard steps from the `estimate_only` flow, also the new custom step to add the pre-processing. This list then gets passed in the build configuration." + ] + }, { "cell_type": "code", "execution_count": null, @@ -254,11 +426,11 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_pre_proc\"\n", + "output_dir = \"output_pre_proc\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -275,7 +447,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -298,6 +470,24 @@ "build.build_dataflow_cfg(model_file, cfg_estimates)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "51b7dbd5", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_pre_proc/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "4690049f", + "metadata": {}, + "source": [ + "An intermediate .onnx file after the execution of the custom step was automatically created, let's have a look at the graph." + ] + }, { "cell_type": "code", "execution_count": null, @@ -308,6 +498,16 @@ "showInNetron(build_dir+\"/output_pre_proc/intermediate_models/custom_step_add_pre_proc.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "90c6bef9", + "metadata": {}, + "source": [ + "The graph is in QONNX format and a division by 255 is inserted in the beginning. We can now use the CIFAR-10 images directly as input to the graph and the new `global_in` tensor is UINT8.\n", + "\n", + "You can already have a look on how the intermediate models have changed by modifying the code in the cell above. Before we go into more detail, we will add another custom step to insert the post-processing. In this case this means the insertion of a TopK node." + ] + }, { "cell_type": "code", "execution_count": null, @@ -332,7 +532,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_pre_and_post_proc\"\n", + "output_dir = \"output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -354,7 +554,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -377,16 +577,60 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "95230896", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_pre_and_post_proc/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "3a0263b1", + "metadata": {}, + "source": [ + "You can use the code in the cell below to investigate the generated intermediate models. " + ] + }, { "cell_type": "code", "execution_count": null, "id": "44127417", "metadata": {}, "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/custom_step_add_post_proc.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "5cc97505", + "metadata": {}, + "source": [ + "Let's have a look at the model after the conversion to hls, to verify that now all layers are correctly converted." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63131e3e", + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "8fd0af6b", + "metadata": {}, + "source": [ + "The model contains now a `Thresholding` layer in the beginning and a `LabelSelect_Batch` layer at the end. Please note, that there is still a `Transpose` node as the first layer of the graph, but we can solve this by converting the input data to the NHWC format before streaming it into the FINN accelerator." + ] + }, { "cell_type": "markdown", "id": "5ffbadd1", From 68726ac8a329473bc183af993eacb66c17a0c88a Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 29 Aug 2023 18:15:58 +0100 Subject: [PATCH 603/628] [NB] Add section about folding configurations to advanced nb --- .../4_advanced_builder_settings.ipynb | 135 ++++++++++++++---- 1 file changed, 104 insertions(+), 31 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 63f69a6385..1e17f640ef 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -11,7 +11,7 @@ "\n", "\"drawing\"\n", "\n", - "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from a small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." ] }, @@ -62,7 +62,6 @@ "metadata": {}, "outputs": [], "source": [ - "#from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showInNetron, showSrc\n", "import os\n", " \n", @@ -218,7 +217,7 @@ "id": "bccebd0d", "metadata": {}, "source": [ - "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there is layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." + "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there are layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." ] }, { @@ -236,7 +235,7 @@ "id": "2719cc09", "metadata": {}, "source": [ - "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized the node will be converted." + "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized to integer the node will be converted." ] }, { @@ -285,7 +284,7 @@ "source": [ "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", "\n", - "Our example is a network for image classification, so that we know that the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "Our example is a network for image classification, so the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", "\n", "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " ] @@ -361,7 +360,7 @@ "id": "2809f6a7", "metadata": {}, "source": [ - "Each steps gets the model and the build configuration as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." + "Each steps gets the model (`model: ModelWrapper`) and the build configuration (`cfg: DataflowBuildConfig`) as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." ] }, { @@ -602,7 +601,8 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/custom_step_add_post_proc.onnx\")" + "model_to_investigate = \"custom_step_add_post_proc.onnx\"\n", + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/\"+model_to_investigate)" ] }, { @@ -644,7 +644,17 @@ "id": "c164040f", "metadata": {}, "source": [ - "To learn about the influence of folding factors/parallelism in FINN, please have a look at this notebook: " + "The FINN compiler allows the user to implement a network in streaming dataflow architecture, this means every layer is implemented individually and the data is streamed through the accelerator. We can customize each layer for specific performance and resource requirements by adjusting the parallelism and resource type of each layer. In the FINN context we refer to this customization of parallelism in each layer as folding. To learn more details about the influence of folding factors/parallelism in FINN, please have a look at our [folding tutorial](3_folding.ipynb).\n", + "\n", + "In this section, we will look into the interface over which we can influence the customization of each layer using the FINN builder tool: A json file containing the folding configuration." + ] + }, + { + "cell_type": "markdown", + "id": "1299b86d", + "metadata": {}, + "source": [ + "Depending on the invoked step, the FINN compiler can produce or consume a .json file containing the folding configuration for each layer. In the cell below, we will have a look at the automatically generated .json file, which is produced by `step_target_fps_parallelization`. We use this then as starting point to manipulate the folding configuration and feed it back into the builder tool." ] }, { @@ -664,26 +674,28 @@ }, { "cell_type": "markdown", - "id": "ba856c28", + "id": "8de787a7", "metadata": {}, "source": [ - "Hardware configuration for each layer\n", - "\n", - "FIFO depths\n", - "\n", - "Type of memory/compute resources to be used\n", - "\n", - "Parallelism along different dimensions (“PE”, ”SIMD”)\n", - "\n", - "Baked-in, decoupled or external parameters\n", - "\n", - "Influences almost all flows\n", - "\n", - "step_apply_folding_config\n", - "\n", - "Values tuned for performance & footprint\n", - "\n", - "Many additional constraints not visible from .json" + "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", + "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", + "* `ram_style` determines which memory resource will be used for the layer.\n", + " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", + " * `distributed`: LUTRAM will be used\n", + " * `block`: BRAM will be used\n", + " * `ultra`: URAM will be used, if available on the selected board\n", + "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "* `resType`: This is a node attribute for the MVAU layer and can be set to `lut` or `dsp`. Please note that selecting `dsp` will not enable the optimized RTL variant of the MVAU but rather generate HLS code utilizing DSPs, this is not optimal yet but can give an additional parameter for design space exploration.\n", + "* `runtime_writeable_weights`: FINN offers the option to implement the weights as \"runtime writable\", this means you can write the weight values from the driver via an axilite interface." + ] + }, + { + "cell_type": "markdown", + "id": "fd1519fe", + "metadata": {}, + "source": [ + "In the following part of the tutorial, we will use the auto generated json file as starting point to create two new json files which explore the `ram_style` attribute. We will use one of the generated reports from the FINN builder to see the impact of these changes.\n", + "For that, we will extract the total resources from the *estimate_layer_resources.json* report in the following cell." ] }, { @@ -699,12 +711,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "0be3b0e1", + "metadata": {}, + "source": [ + "The FINN compiler estimates the network to use ~500 BRAM blocks and ~100k LUTs." + ] + }, { "cell_type": "markdown", "id": "d4d177dc", "metadata": {}, "source": [ - "You can manually change, here we generate two new folding configurations with either all lutram or all bram" + "We will use the `auto_folding_config.json` and create two folding configuration from that file:\n", + "* All `ram_style` attributes set to `distributed`\n", + "* All `ram_style` attributes set to `block`" ] }, { @@ -714,6 +736,9 @@ "metadata": {}, "outputs": [], "source": [ + "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", + " folding_config = json.load(json_file)\n", + "\n", "# Set all ram_style to LUT RAM\n", "for key in folding_config:\n", " if \"ram_style\" in folding_config[key]:\n", @@ -731,6 +756,14 @@ " json.dump(folding_config, jsonFile)" ] }, + { + "cell_type": "markdown", + "id": "0e64a499", + "metadata": {}, + "source": [ + "After generating these files, we will invoke the builder flow. To enable the FINN builder to take the generated folding configuration as input, we will need to set the additional builder argument `folding_config_file` and we will change the `build_steps` to not run `step_target_fps_parallelization`. The build step does not necessarily need to be excluded, but since we pass a separate folding configuration, the output from that step would be overwritten anyways, so we skip it for a faster execution." + ] + }, { "cell_type": "code", "execution_count": null, @@ -741,7 +774,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_all_lutram\"\n", + "output_dir = \"output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -762,7 +795,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", @@ -785,6 +818,14 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "e705767d", + "metadata": {}, + "source": [ + "We can now have a look at the produced model, when clicking on the individual nodes, you can see that all layers have the node attribute `ram_style` set to `distributed`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -808,6 +849,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "55208c70", + "metadata": {}, + "source": [ + "The estimation report shows that BRAM utilization is down to zero and the LUT count went up to around 150k." + ] + }, + { + "cell_type": "markdown", + "id": "11b8430a", + "metadata": {}, + "source": [ + "Let's do the same with the folding configuration which sets all memory resources to use BRAM." + ] + }, { "cell_type": "code", "execution_count": null, @@ -818,7 +875,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_all_bram\"\n", + "output_dir = \"output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -839,7 +896,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", @@ -885,6 +942,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "97f87780", + "metadata": {}, + "source": [ + "The initial implementation already had a high utilization of BRAM, but the estimations went now up to 522 BRAMs while the LUT count went down to ~99k." + ] + }, + { + "cell_type": "markdown", + "id": "e65a8ded", + "metadata": {}, + "source": [ + "You can use this example as a starting point to manipulate the folding configuration yourself. Instead of using the above code, you can also manually open one of the example .json files and set the values differently. Please be aware that the node attributes can not be set to arbitrary values. Especially the folding factors need to fulfil [certain constraints](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer). The other settings for node attributes, can be best looked up in the individual custom operator classes: [e.g. for MVAU](https://github.com/Xilinx/finn/blob/dev/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py#L64)" + ] + }, { "cell_type": "markdown", "id": "4a675834", From 45e8c37faa7d542dddb0a6439f3085aaf83e4c96 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 29 Aug 2023 21:25:17 +0100 Subject: [PATCH 604/628] [nb] Add details about verification section in advanced nb --- .../4_advanced_builder_settings.ipynb | 180 +++++++++++++++--- 1 file changed, 151 insertions(+), 29 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 1e17f640ef..16c4e1a8fa 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -7,8 +7,6 @@ "source": [ "# Advanced Builder settings\n", "\n", - "**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".\n", - "\n", "\"drawing\"\n", "\n", "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from a small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", @@ -135,6 +133,8 @@ "metadata": {}, "outputs": [], "source": [ + "## Quick recap on how to setup the default builder flow for resource estimations\n", + "\n", "import finn.builder.build_dataflow as build\n", "import finn.builder.build_dataflow_config as build_cfg\n", "import os\n", @@ -422,6 +422,8 @@ "metadata": {}, "outputs": [], "source": [ + "## Builder flow with custom step for pre-processing\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", @@ -528,14 +530,16 @@ "metadata": {}, "outputs": [], "source": [ + "## Builder flow with custom step for pre-processing and post-processing\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -771,14 +775,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with custom folding configuration\n", + "## folding_config_file = \"folding_config_all_lutram.json\"\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -872,14 +879,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with custom folding configuration\n", + "## folding_config_file = \"folding_config_all_bram.json\"\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -966,6 +976,22 @@ "## Additional builder arguments " ] }, + { + "cell_type": "markdown", + "id": "f7012b9a", + "metadata": {}, + "source": [ + "In this section, we will have a peak into additional builder arguments the FINN compiler exposes. We will not be able to cover all but you will be able to have a look at a list and we encourage you to take your time to look into the different options there are to customize the FINN builder configuration." + ] + }, + { + "cell_type": "markdown", + "id": "467d8829", + "metadata": {}, + "source": [ + "We start by enabling the verification flow in the builder. The FINN compiler applies multiple transformations to the model before it gets turned into hardware, so we need to make sure that the functional behavior of the network does not change." + ] + }, { "cell_type": "markdown", "id": "e0c167f4", @@ -974,6 +1000,14 @@ "### Verification steps " ] }, + { + "cell_type": "markdown", + "id": "308d52ba", + "metadata": {}, + "source": [ + "Earlier in the tutorial, we had a look at how build steps are written. When investigating the `step_tidy_up`, we can see that before the changed model is returned a verification step can be run. In the case of `step_tidy_up` it is the step `\"initial python\"` that can be initiated by setting `VerificationStepType.TIDY_UP_PYTHON`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -985,6 +1019,14 @@ "showSrc(build_dataflow_steps.step_tidy_up)" ] }, + { + "cell_type": "markdown", + "id": "2bbb84fb", + "metadata": {}, + "source": [ + "Some of the default build steps have automatic verification enabled, when the corresponding verification step is set." + ] + }, { "cell_type": "code", "execution_count": null, @@ -995,6 +1037,14 @@ "showSrc(build_cfg.VerificationStepType)" ] }, + { + "cell_type": "markdown", + "id": "da1a2b88", + "metadata": {}, + "source": [ + "In the cells below, we will use an example input from the CIFAR-10 data set and use the forward pass in Brevitas to generate a reference output. We save the input as `input.npy` and the reference output as `expected_output.npy`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1018,6 +1068,14 @@ "np.save(\"expected_output.npy\", output_tensor_npy)" ] }, + { + "cell_type": "markdown", + "id": "d03450e7", + "metadata": {}, + "source": [ + "In the next step we set up the builder flow again, this time we will set the build argument `verify_steps` and pass a list of verification steps." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1025,14 +1083,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## verification steps\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_with_verification\"\n", + "output_dir = \"output_with_verification\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1050,7 +1111,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1067,6 +1128,14 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "1d05b985", + "metadata": {}, + "source": [ + "When execution the code below, the verification will be invoked in the background. After the execution we can check if the verification was successful by investigating the output directory." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1078,6 +1147,61 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "ca1d571d", + "metadata": {}, + "source": [ + "The output directory has now an additional directory called `verification_output`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca74d537", + "metadata": {}, + "outputs": [], + "source": [ + "!ls output_with_verification" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "908ecda4", + "metadata": {}, + "outputs": [], + "source": [ + "!ls output_with_verification/verification_output" + ] + }, + { + "cell_type": "markdown", + "id": "bcbc6f49", + "metadata": {}, + "source": [ + "The directory contains three .npy files. These files are the saved output files from the different verification steps. The suffix indicates if the array matches with the expected output. In our case, the suffix is for all verification steps `_SUCCESS`. Since the outputs are saved as .npy, we can open and investigate the files simply in Python." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a1b6ca9", + "metadata": {}, + "outputs": [], + "source": [ + "verify_initial_python = np.load(\"output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", + "print(\"The output of the verification step after the step_tidy_up is: \" + str(verify_initial_python))" + ] + }, + { + "cell_type": "markdown", + "id": "6558e19e", + "metadata": {}, + "source": [ + "If the generated output does not match the expected output, these files can be used for debugging." + ] + }, { "cell_type": "markdown", "id": "f0b30546", @@ -1109,14 +1233,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## standalone_thresholds = True\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_standalone_thresholds\"\n", + "output_dir = \"output_standalone_thresholds\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1134,7 +1261,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1183,14 +1310,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## force_rtl_conv_inp_gen = True\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_rtl_swg\"\n", + "output_dir = \"output_rtl_swg\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1208,7 +1338,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1275,7 +1405,7 @@ "id": "b12ab370", "metadata": {}, "source": [ - "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" ] }, @@ -1315,14 +1445,6 @@ "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ec10985", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From e72c9dd0f3274833536c319ce791076811d4989b Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 31 Aug 2023 15:50:19 +0100 Subject: [PATCH 605/628] [nb] Clean up advanced nb --- .../4_advanced_builder_settings.ipynb | 179 +++++++++--------- 1 file changed, 86 insertions(+), 93 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 16c4e1a8fa..1136dba9f4 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -36,8 +36,8 @@ "4. [Folding configuration json](#folding_config)\n", "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", - " 2. [Examples for additional builder arguments](#example_args)\n", - " 3. [Other builder arguments](#other_args)" + " 2. [Other builder arguments](#other_args)\n", + " 3. [Examples for additional builder arguments](#example_args)" ] }, { @@ -284,9 +284,9 @@ "source": [ "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", "\n", - "Our example is a network for image classification, so the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "Our example is a network for image classification, so the output is a vector of 10 values that give a predicition score for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", "\n", - "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " + "Since the last two layers are scalar operations, they have the same influence on all predicition scores in the output vector and we can safely merge them into the TopK node. " ] }, { @@ -683,12 +683,13 @@ "source": [ "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", - "* `ram_style` determines which memory resource will be used for the layer.\n", + "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to determine which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", " * `distributed`: LUTRAM will be used\n", " * `block`: BRAM will be used\n", " * `ultra`: URAM will be used, if available on the selected board\n", - "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "\n", "* `resType`: This is a node attribute for the MVAU layer and can be set to `lut` or `dsp`. Please note that selecting `dsp` will not enable the optimized RTL variant of the MVAU but rather generate HLS code utilizing DSPs, this is not optimal yet but can give an additional parameter for design space exploration.\n", "* `runtime_writeable_weights`: FINN offers the option to implement the weights as \"runtime writable\", this means you can write the weight values from the driver via an axilite interface." ] @@ -1204,32 +1205,98 @@ }, { "cell_type": "markdown", - "id": "f0b30546", + "id": "4609f94d", "metadata": {}, "source": [ - "### Examples for additional builder arguments " + "### Other builder arguments " ] }, { "cell_type": "markdown", - "id": "ddfb40e4", + "id": "37b6853d", "metadata": {}, "source": [ - "#### Standalone Thresholds" + "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9f6aa29", + "metadata": {}, + "outputs": [], + "source": [ + "# Filter out methods\n", + "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", + "print(\"\\n\".join(builder_args))" + ] + }, + { + "cell_type": "markdown", + "id": "b12ab370", + "metadata": {}, + "source": [ + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + ] + }, + { + "cell_type": "markdown", + "id": "9aba0493", + "metadata": {}, + "source": [ + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec39b9f2", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" ] }, { "cell_type": "markdown", - "id": "bddbd686", + "id": "76df000f", "metadata": {}, "source": [ - " picture of im2col + matmul + multithreshold" + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "caf49f03", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + ] + }, + { + "cell_type": "markdown", + "id": "3b98eb65", + "metadata": {}, + "source": [ + "### Examples for additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "0dbdab42", + "metadata": {}, + "source": [ + "#### Standalone Thresholds" ] }, { "cell_type": "code", "execution_count": null, - "id": "de55871e", + "id": "2619ebde", "metadata": {}, "outputs": [], "source": [ @@ -1277,7 +1344,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c143f97a", + "id": "b2e9bc42", "metadata": {}, "outputs": [], "source": [ @@ -1288,7 +1355,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ba36f07b", + "id": "32ae296e", "metadata": {}, "outputs": [], "source": [ @@ -1297,7 +1364,7 @@ }, { "cell_type": "markdown", - "id": "b710fd28", + "id": "074d8253", "metadata": {}, "source": [ "#### RTL Convolutional Input Generator" @@ -1306,7 +1373,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8249280d", + "id": "ab0c4974", "metadata": {}, "outputs": [], "source": [ @@ -1354,7 +1421,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64e83b16", + "id": "19fe4d85", "metadata": {}, "outputs": [], "source": [ @@ -1365,86 +1432,12 @@ { "cell_type": "code", "execution_count": null, - "id": "09c45dcd", + "id": "4c1f1ce9", "metadata": {}, "outputs": [], "source": [ "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" ] - }, - { - "cell_type": "markdown", - "id": "4609f94d", - "metadata": {}, - "source": [ - "### Other builder arguments " - ] - }, - { - "cell_type": "markdown", - "id": "37b6853d", - "metadata": {}, - "source": [ - "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e9f6aa29", - "metadata": {}, - "outputs": [], - "source": [ - "# Filter out methods\n", - "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", - "print(\"\\n\".join(builder_args))" - ] - }, - { - "cell_type": "markdown", - "id": "b12ab370", - "metadata": {}, - "source": [ - "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", - "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" - ] - }, - { - "cell_type": "markdown", - "id": "9aba0493", - "metadata": {}, - "source": [ - "So far, in this notebook, we only looked at configurations up to the generation of estimate reports so far, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec39b9f2", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" - ] - }, - { - "cell_type": "markdown", - "id": "76df000f", - "metadata": {}, - "source": [ - "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "caf49f03", - "metadata": {}, - "outputs": [], - "source": [ - "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", - "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" - ] } ], "metadata": { From 79212877f4818eb322b76066741a7ac31a62a7fb Mon Sep 17 00:00:00 2001 From: auphelia Date: Sat, 2 Sep 2023 17:43:35 +0100 Subject: [PATCH 606/628] [NB] Rework end part of advanced builder tutorial --- .../4_advanced_builder_settings.ipynb | 182 ++++++++++++++++-- 1 file changed, 168 insertions(+), 14 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 1136dba9f4..aa244e4983 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -37,7 +37,7 @@ "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", " 2. [Other builder arguments](#other_args)\n", - " 3. [Examples for additional builder arguments](#example_args)" + " 3. [Examples for additional builder arguments & bitfile generation](#example_args)" ] }, { @@ -684,7 +684,7 @@ "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", - "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to determine which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", + "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to choose which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", " * `distributed`: LUTRAM will be used\n", " * `block`: BRAM will be used\n", @@ -1216,7 +1216,8 @@ "id": "37b6853d", "metadata": {}, "source": [ - "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + "Next to the enablement of the verification flows, the FINN builder has numerous additional builder arguments to further customize your network. \n", + "Let's have a look at the options for the arguments. We want to only filter out the FINN specific arguments." ] }, { @@ -1236,8 +1237,9 @@ "id": "b12ab370", "metadata": {}, "source": [ - "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", - "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. This class is used for the implementation of the FINN builder. In this tutorial, we are mainly interested in the FINN specific arguments. \n", + "\n", + "Some of these arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part and folding_config_file. In the code of the FINN builder, the function of each builder argument is documents, you can have a look [here](https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155) and scroll through the available builder arguments." ] }, { @@ -1245,7 +1247,9 @@ "id": "9aba0493", "metadata": {}, "source": [ - "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow.\n", + "\n", + "Let's have a look at the default build dataflow steps for the complete FINN flow." ] }, { @@ -1258,6 +1262,15 @@ "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" ] }, + { + "cell_type": "markdown", + "id": "b9bc5715", + "metadata": {}, + "source": [ + "You can see that after the generation of the estimate reports, the code generation and the ip generation is invoked (`step_hls_codegen` and `step_hls_ipgen`). The FIFO depths are determined and the FIFOs are inserted in the network (`step_set_fifo_depths`), we can then create an IP design of our whole network by stitching the IPs from each layer together (`step_create_stitched_ip`). At this point we have an implementation of the neural network that we can integrate within a bigger FPGA design, we can run performance measurements using simulation (`step_measure_rtlsim_performance`) and out-of-context synthesis (`step_out_of_context_synthesis`) for it.\n", + "The FINN builder also provides automatic system integration for Zynq and Alveo devices, this can be invoked by running `step_synthesize_bitfile`, `step_make_pynq_driver` and `step_deployment_package`." + ] + }, { "cell_type": "markdown", "id": "76df000f", @@ -1274,7 +1287,25 @@ "outputs": [], "source": [ "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", - "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + "print(build_dataflow_steps.step_hls_codegen.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c84a9fbc", + "metadata": {}, + "outputs": [], + "source": [ + "showSrc(build_dataflow_steps.step_hls_codegen)" + ] + }, + { + "cell_type": "markdown", + "id": "c249f141", + "metadata": {}, + "source": [ + "This concludes the advanced builder settings tutorial. Below you can find code that can help you investigating more of the builder arguments and invoking the whole flow to generate a bitfile." ] }, { @@ -1282,7 +1313,7 @@ "id": "3b98eb65", "metadata": {}, "source": [ - "### Examples for additional builder arguments " + "### Examples for additional builder arguments & bitfile generation " ] }, { @@ -1293,6 +1324,21 @@ "#### Standalone Thresholds" ] }, + { + "cell_type": "markdown", + "id": "e21ff36f", + "metadata": {}, + "source": [ + "In FINN, convolutions are expressed with three components:\n", + "* An Im2Col operation\n", + "* A matrix multiplication\n", + "* A MultiThreshold operation\n", + "\n", + "When converting these nodes into HLS layers, by default the MatMul and the MultiThreshold gets converted into **one** component called Matrix-Vector-Activation Unit (MVAU). But the FINN compiler allows us to implement the activation separately. This gives an additional possibility for customization because we can adjust the folding parameters of the standalone threshold unit independently. \n", + "\n", + "If you would like to enable this feature, you can set the build argument `standalone_thresholds` to `True`. In the code below this feature is enabled and you can have a look at the generated .onnx file. Please note that you need to uncomment the code first." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1348,8 +1394,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -1359,7 +1405,7 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" + "#showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" ] }, { @@ -1370,6 +1416,26 @@ "#### RTL Convolutional Input Generator" ] }, + { + "cell_type": "markdown", + "id": "b85e5ac7", + "metadata": {}, + "source": [ + "Recently, we have worked on the *Operator Hardening* in the FINN compiler. This means that we implement core building blocks in RTL instead of using HLS.\n", + "One of these components is already available in the FINN compiler, you can enable the RTL implementation of the ConvolutionInputGenerator (aka Sliding Window Generator) by setting the build argument `force_rtl_conv_inp_gen` to `True`.\n", + "In the code below this feature is enabled and you can have a look at the generated .onnx file. Please note that you need to uncomment the code first." + ] + }, + { + "cell_type": "markdown", + "id": "2a90b63f", + "metadata": {}, + "source": [ + "
    \n", + "Important notice: We are actively working on the integration of RTL components in the FINN flow, the enablement like shown below might change in the future.\n", + "
    " + ] + }, { "cell_type": "code", "execution_count": null, @@ -1425,8 +1491,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -1436,7 +1502,95 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + "#showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "601eb5f8", + "metadata": {}, + "source": [ + "#### Run the whole flow" + ] + }, + { + "cell_type": "markdown", + "id": "42aa929b", + "metadata": {}, + "source": [ + "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take up to an hour. Please note that you need to uncomment the code first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4efd46f4", + "metadata": {}, + "outputs": [], + "source": [ + "## Build flow with hardware build\n", + "\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "output_dir = \"output_bitfile\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + " \"step_hls_codegen\",\n", + " \"step_hls_ipgen\",\n", + " \"step_set_fifo_depths\",\n", + " \"step_create_stitched_ip\",\n", + " \"step_measure_rtlsim_performance\",\n", + " \"step_out_of_context_synthesis\",\n", + " \"step_synthesize_bitfile\",\n", + " \"step_make_pynq_driver\",\n", + " \"step_deployment_package\",\n", + "]\n", + "\n", + "cfg_build = build.DataflowBuildConfig(\n", + " output_dir = output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " build_cfg.DataflowOutputType.STITCHED_IP,\n", + " build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE,\n", + " build_cfg.DataflowOutputType.OOC_SYNTH,\n", + " build_cfg.DataflowOutputType.BITFILE,\n", + " build_cfg.DataflowOutputType.PYNQ_DRIVER,\n", + " build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7ff6c19", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_build);" ] } ], From 3295c9bdd60fa1e8a99ae32de456e84ff7decda6 Mon Sep 17 00:00:00 2001 From: auphelia Date: Sat, 2 Sep 2023 22:30:47 +0100 Subject: [PATCH 607/628] [nb] Comment last build flow run --- notebooks/advanced/4_advanced_builder_settings.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index aa244e4983..8e0e3ef8cf 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -1518,7 +1518,7 @@ "id": "42aa929b", "metadata": {}, "source": [ - "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take up to an hour. Please note that you need to uncomment the code first." + "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take over an hour. Please note that you need to uncomment the code first." ] }, { @@ -1566,7 +1566,7 @@ "cfg_build = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 100,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1589,8 +1589,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_build);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_build);" ] } ], From c7cbe5e5f478fe73caf7aa3c1ffac53a519dc33e Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 10:27:51 +0100 Subject: [PATCH 608/628] [nb] Update final build flow --- .../4_advanced_builder_settings.ipynb | 40 +++++++++++++++---- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 8e0e3ef8cf..38bc19a6ca 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -154,7 +154,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = estimates_output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_cfg.estimate_only_dataflow_steps,\n", @@ -450,7 +450,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -559,7 +559,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1114,7 +1114,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1376,7 +1376,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " standalone_thresholds = True,\n", @@ -1473,7 +1473,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " force_rtl_conv_inp_gen = True,\n", @@ -1521,6 +1521,24 @@ "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take over an hour. Please note that you need to uncomment the code first." ] }, + { + "cell_type": "markdown", + "id": "ffa2a352", + "metadata": {}, + "source": [ + "For an optimized design, we download the folding configuration for cnv-w2a2 on the Pynq-Z1 board from [finn-examples](https://github.com/Xilinx/finn-examples). And will pass it to the build flow. Please also note below that we now pass the board as argument to the builder (`board = \"Pynq-Z1\"`) instead of just the fpga part. This time we will select all possible outputs to generate. Please be aware that running the full build might take a few hours." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "765e5ee7", + "metadata": {}, + "outputs": [], + "source": [ + "!wget https://raw.githubusercontent.com/Xilinx/finn-examples/main/build/bnn-pynq/folding_config/cnv-w2a2_folding_config.json" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1528,6 +1546,11 @@ "metadata": {}, "outputs": [], "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", "## Build flow with hardware build\n", "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", @@ -1566,9 +1589,10 @@ "cfg_build = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 100,\n", " synth_clk_period_ns = 10.0,\n", - " fpga_part = \"xc7z020clg400-1\",\n", + " folding_config_file = \"cnv-w2a2_folding_config.json\",\n", + " board = \"Pynq-Z1\",\n", + " shell_flow_type = build_cfg.ShellFlowType.VIVADO_ZYNQ,\n", " steps = build_steps,\n", " generate_outputs=[\n", " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", From ed163af32f0a43382f19145138432a042840bc55 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 10:37:34 +0100 Subject: [PATCH 609/628] [Tests] Integrate advanced notebook into test suite --- tests/notebooks/test_jupyter_notebooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index c2542380f1..e1415b9066 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -21,6 +21,7 @@ pytest.param(notebook_advanced_dir + "1_custom_transformation_pass.ipynb"), pytest.param(notebook_advanced_dir + "2_custom_op.ipynb"), pytest.param(notebook_advanced_dir + "3_folding.ipynb"), + pytest.param(notebook_advanced_dir + "4_advanced_builder_settings.ipynb"), ] cyber_notebooks = [ From 2d42e9b8650942aad6a52fb7378548238fcc43ff Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 11:01:15 +0100 Subject: [PATCH 610/628] [NBs] Make paths in advanced notebook absolute for testing --- notebooks/advanced/4_advanced_builder_settings.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 38bc19a6ca..4af48ac233 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -190,7 +190,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_estimates_only/intermediate_models" + "!ls -t -r {build_dir}/output_estimates_only/intermediate_models" ] }, { @@ -478,7 +478,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_pre_proc/intermediate_models" + "!ls -t -r {build_dir}/output_pre_proc/intermediate_models" ] }, { @@ -587,7 +587,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_pre_and_post_proc/intermediate_models" + "!ls -t -r {build_dir}/output_pre_and_post_proc/intermediate_models" ] }, { @@ -1163,7 +1163,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls output_with_verification" + "!ls {build_dir}/output_with_verification" ] }, { @@ -1173,7 +1173,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls output_with_verification/verification_output" + "!ls {build_dir}/output_with_verification/verification_output" ] }, { From bbda540140427aa1d43a7f78c7e79332bc4e7bbe Mon Sep 17 00:00:00 2001 From: johnnoel Date: Fri, 22 Sep 2023 16:03:46 +0100 Subject: [PATCH 611/628] Update .Xilinx messaging --- docker/finn_entrypoint.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index b441c9359a..6b33a4c9bc 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -118,6 +118,7 @@ if [ -d "$FINN_ROOT/.Xilinx" ]; then mkdir "$HOME/.Xilinx" if [ -f "$FINN_ROOT/.Xilinx/HLS_init.tcl" ]; then cp "$FINN_ROOT/.Xilinx/HLS_init.tcl" "$HOME/.Xilinx/" + gecho "Found HLS_init.tcl and copied to $HOME/.Xilinx/HLS_init.tcl" else yecho "Unable to find $FINN_ROOT/.Xilinx/HLS_init.tcl" fi @@ -125,14 +126,13 @@ if [ -d "$FINN_ROOT/.Xilinx" ]; then if [ -f "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" ]; then mkdir "$HOME/.Xilinx/Vivado/" cp "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" "$HOME/.Xilinx/Vivado/" + gecho "Found Vivado_init.tcl and copied to $HOME/.Xilinx/Vivado/Vivado_init.tcl" else yecho "Unable to find $FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" fi else - yecho "Unable to find $FINN_ROOT/.Xilinx" - yecho "Functionality dependent on beta devices will not be available." - yecho "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl " - yecho "are set correctly and mounted into the Docker container." + echo "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl are set correctly and mounted" + echo "See https://docs.xilinx.com/r/en-US/ug835-vivado-tcl-commands/Tcl-Initialization-Scripts" fi export PATH=$PATH:$HOME/.local/bin From 161544ae8765b6fe29ef37e5184ab8eca6eee7a1 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Fri, 29 Sep 2023 11:16:57 +0100 Subject: [PATCH 612/628] Move successful archive step to parallel stage instead of post --- docker/jenkins/Jenkinsfile | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f4f0533c3f..1f86ac1ef6 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -69,6 +69,9 @@ pipeline { // Use an env variable to help collect test results later in pipeline env.SANITY_UT = "SUCCESS" + + // Archive coverage report if successful + archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") } } } @@ -97,6 +100,9 @@ pipeline { // Use an env variable to help collect test results later in pipeline env.FPGADATAFLOW_RESULT = "SUCCESS" + + // Archive coverage report if successful + archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") } } } @@ -729,9 +735,6 @@ pipeline { archiveArtifacts artifacts: "reports/*.xml" archiveArtifacts artifacts: "reports/*.html" - archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") - archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") - // Plot what XML files were created during the test run junit 'reports/*.xml' } From dd7806eff7b80212440d115886f16c26773de1a6 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 2 Oct 2023 16:37:06 +0100 Subject: [PATCH 613/628] [CI] Append a space to FINN_DOCKER_EXTRA to avoid malformed docker commands Jenkins unexpectedly trims trailing spaces from env variables. This leads to badly formed inputs for docker. Appending an extra space solves this issues and causes no further problems. --- run-docker.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/run-docker.sh b/run-docker.sh index c24dcec724..8df03636bb 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -100,6 +100,9 @@ SCRIPTPATH=$(dirname "$SCRIPT") DOCKER_INTERACTIVE="" +# Catch FINN_DOCKER_EXTRA options being passed in without a trailing space +FINN_DOCKER_EXTRA+=" " + if [ "$1" = "test" ]; then gecho "Running test suite (all tests)" DOCKER_CMD="python setup.py test" From 56b155fb60651ac8d9bf1d68603808ce78bb0fee Mon Sep 17 00:00:00 2001 From: johnnoel Date: Thu, 5 Oct 2023 15:31:39 +0100 Subject: [PATCH 614/628] [CI] Address PR comments --- docker/jenkins/Jenkinsfile | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1f86ac1ef6..2d7ea5e918 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -126,7 +126,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Delete any build files from a previous build - sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}", '') @@ -310,7 +310,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "rm -rf ${env.BOARD}*" + cleanPreviousBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -358,7 +358,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_PynqZ1_zip" @@ -409,7 +409,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -458,7 +458,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -510,7 +510,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "rm -rf ${env.BOARD}*" + cleanPreviousBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -558,7 +558,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "PynqZ1_zip" @@ -609,7 +609,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -658,7 +658,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -747,7 +747,17 @@ pipeline { void cleanPreviousBuildFiles(String buildDir) { // Delete any build files from a previous build // Previous build folders affect findCopyZip() and can cause the stage to fail - sh "rm -rf ${buildDir}/*" + if (!buildDir.empty) { + sh "rm -rf ${buildDir}" + } +} + +void cleanPreviousBoardBuildFiles(String boardDir) { + // Delete any board build files + // Specifically used on Pynq boards which require sudo to delete + if (!boardDir.empty) { + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${boardDir}*" + } } void createMultiMarkerScript(String markers, String testResultsFilename, String additionalOptions) { @@ -765,7 +775,7 @@ void runDockerPytestWithMarker(String marker, String testResultsFilename, String sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}""" } -void findBoardBuildFiles(String board, String searchDir, String dirToFind) { +def findBoardBuildFiles(String searchDir, String dirToFind) { def result = sh(script: "find $searchDir -type d -name \"$dirToFind*\"", returnStdout: true).trim() if (result.empty) { error "Directory containing '$dirToFind' not found." @@ -774,7 +784,7 @@ void findBoardBuildFiles(String board, String searchDir, String dirToFind) { } void findCopyZip(String board, String findDir, String copyDir, String stashName) { - def buildDir = findBoardBuildFiles(board, findDir, "hw_deployment_${board}") + def buildDir = findBoardBuildFiles(findDir, "hw_deployment_${board}") sh "cp -r ${buildDir}/${board} ${copyDir}/" dir(copyDir) { sh "zip -r ${board}.zip ${board}/" @@ -802,7 +812,7 @@ python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${tes sh 'chmod 777 run-tests.sh' } -void isNodeOnline(String labelName) { +def isNodeOnline(String labelName) { Label label = Jenkins.instance.getLabel(labelName) def agentOnline = false From 206737f9bbb2ff90a8ead03422cc7aac2e3dc7ac Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 14:21:55 +0100 Subject: [PATCH 615/628] [Fix] Deprecate pkg-resources and update setuptools --- .isort.cfg | 2 +- .../bnn-pynq/cnv_end2end_example.ipynb | 11 ++++++----- requirements.txt | 2 ++ setup.py | 10 ---------- .../transformation/fpgadataflow/create_stitched_ip.py | 4 +--- .../transformation/fpgadataflow/make_pynq_driver.py | 11 +++++------ src/finn/util/pyverilator.py | 6 ++---- src/finn/util/test.py | 8 ++++---- tests/brevitas/test_brevitas_cnv.py | 8 ++++---- tests/end2end/test_end2end_cybsec_mlp.py | 4 +--- tests/end2end/test_ext_weights.py | 7 ++----- tests/fpgadataflow/test_convert_to_hls_layers_cnv.py | 8 ++++---- .../transformation/streamline/test_streamline_cnv.py | 8 ++++---- .../test_batchnorm_to_affine_bnn_pynq.py | 8 ++++---- tests/transformation/test_qonnx_to_finn.py | 8 ++++---- tests/util/test_build_dataflow.py | 4 +--- 16 files changed, 45 insertions(+), 64 deletions(-) diff --git a/.isort.cfg b/.isort.cfg index 6cfe1c8919..5378b88fad 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -2,7 +2,7 @@ line_length=88 indent=' ' skip=.tox,.venv,build,dist -known_standard_library=setuptools,pkg_resources +known_standard_library=setuptools known_test=pytest known_first_party=finn sections=FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index a0dbbf4834..9e9d52e476 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -516,12 +516,13 @@ "metadata": {}, "outputs": [], "source": [ - "import pkg_resources as pk\n", + "import importlib_resources\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", - "fn = pk.resource_filename(\"finn.qnn-data\", \"cifar10/cifar10-test-data-class3.npz\")\n", - "x = np.load(fn)[\"arr_0\"]\n", + "ref = importlib_resources.files(\"finn.qnn-data\") / \"cifar10/cifar10-test-data-class3.npz\"\n", + "with importlib_resources.as_file(ref) as fn:\n", + " x = np.load(fn)[\"arr_0\"]\n", "x = x.reshape(3, 32,32).transpose(1, 2, 0)\n", "plt.imshow(x)" ] @@ -640,9 +641,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/requirements.txt b/requirements.txt index 1427d4f1ee..e03eff2c98 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ bitstring==3.1.7 clize==5.0.1 dataclasses-json==0.5.7 gspread==3.6.0 +importlib-resources==6.1.0 ipython==8.12.2 numpy==1.24.1 onnx==1.13.0 @@ -13,6 +14,7 @@ psutil==5.9.4 pyscaffold==4.4 scipy==1.10.1 setupext-janitor>=1.1.2 +setuptools==68.2.2 sigtools==4.0.1 toposort==1.7.0 vcdvcd==1.0.5 diff --git a/setup.py b/setup.py index 8fd781462c..7457bb9b38 100644 --- a/setup.py +++ b/setup.py @@ -35,17 +35,7 @@ PyScaffold helps you to put up the scaffold of your new Python project. Learn more under: https://pyscaffold.org/ """ -from pkg_resources import VersionConflict, require from setuptools import setup -import sys - -try: - require("setuptools>=38.3") -except VersionConflict: - print("Error: version of setuptools is too old (<38.3)!") - sys.exit(1) - - if __name__ == "__main__": setup(use_pyscaffold=True) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index c9db69400b..9a653fe404 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import json import multiprocessing as mp import os @@ -499,7 +497,7 @@ def apply(self, model): "[ipx::get_file_groups xilinx_simulationcheckpoint]" % block_name ) # add a rudimentary driver mdd to get correct ranges in xparameters.h later on - example_data_dir = pk.resource_filename("finn.qnn-data", "mdd-data/") + example_data_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/mdd-data" copytree(example_data_dir, vivado_stitch_proj_dir + "/data") ##### diff --git a/src/finn/transformation/fpgadataflow/make_pynq_driver.py b/src/finn/transformation/fpgadataflow/make_pynq_driver.py index 5a0e47c130..6d1fa290b4 100644 --- a/src/finn/transformation/fpgadataflow/make_pynq_driver.py +++ b/src/finn/transformation/fpgadataflow/make_pynq_driver.py @@ -26,9 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pkg_resources as pk - import numpy as np import os import qonnx @@ -89,8 +86,8 @@ def apply(self, model): model.set_metadata_prop("pynq_driver_dir", pynq_driver_dir) # create the base FINN driver -- same for all accels - driver_base_template = pk.resource_filename( - "finn.qnn-data", "templates/driver/driver_base.py" + driver_base_template = ( + os.environ["FINN_ROOT"] + "/src/finn/qnn-data/templates/driver/driver_base.py" ) driver_base_py = pynq_driver_dir + "/driver_base.py" shutil.copy(driver_base_template, driver_base_py) @@ -268,7 +265,9 @@ def apply(self, model): # add validate.py to run full top-1 test (only for suitable networks) validate_py = pynq_driver_dir + "/validate.py" - validate_template = pk.resource_filename("finn.qnn-data", "templates/driver/validate.py") + validate_template = ( + os.environ["FINN_ROOT"] + "/src/finn/qnn-data/templates/driver/validate.py" + ) shutil.copy(validate_template, validate_py) # generate weight files for runtime-writable layers diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 73c8755bfb..318ba7045e 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import numpy as np import os import shutil @@ -94,7 +92,7 @@ def file_to_basename(x): # use custom version of axis infrastructure vh # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) - custom_vh = pk.resource_filename("finn.qnn-data", "verilog/custom_axis_infrastructure.vh") + custom_vh = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh" shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") for fn in all_verilog_srcs: if fn.endswith(".vh"): @@ -131,7 +129,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" build_dir = make_build_dir("verilator_fifosim_") - fifosim_cpp_fname = pk.resource_filename("finn.qnn-data", "cpp/verilator_fifosim.cpp") + fifosim_cpp_fname = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/cpp/verilator_fifosim.cpp" with open(fifosim_cpp_fname, "r") as f: fifosim_cpp_template = f.read() assert len(model.graph.input) == 1, "Only a single input stream is supported" diff --git a/src/finn/util/test.py b/src/finn/util/test.py index 1f36486048..5ff884f62d 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -137,8 +136,9 @@ def get_example_input(topology): onnx_tensor = onnx.load_tensor_from_string(raw_i) return nph.to_array(onnx_tensor) elif topology == "cnv": - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) return input_tensor else: raise Exception("Unknown topology, can't return example input") diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py index c8adafdce9..3950a5b6a7 100644 --- a/tests/brevitas/test_brevitas_cnv.py +++ b/tests/brevitas/test_brevitas_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import os import torch @@ -65,8 +64,9 @@ def test_brevitas_cnv_export_exec(wbits, abits): model = model.transform(RemoveStaticGraphInputs()) assert len(model.graph.input) == 1 assert len(model.graph.output) == 1 - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 7b73700909..12267aed47 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import json @@ -83,7 +81,7 @@ def forward(self, x): @pytest.mark.end2end def test_end2end_cybsec_mlp_export(): - assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/") + assets_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/cybsec-mlp" # load up trained net in Brevitas input_size = 593 hidden1 = 64 diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index bef2e0ffa7..c91019ba99 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import os @@ -84,9 +82,8 @@ def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) build_env = get_build_env(build_kind, target_clk_ns) - folding_config_file = pk.resource_filename( - "finn.qnn-data", "test_ext_weights/tfc-w1a1-extw.json" - ) + test_data = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/test_ext_weights" + folding_config_file = test_data + "/tfc-w1a1-extw.json" output_dir = make_build_dir("test_end2end_ext_weights_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index c4f3807aa0..c9cb4f0802 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import os import torch @@ -86,8 +85,9 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): model = model.transform(Streamline()) model = model.transform(InferDataLayouts()) # load one of the test vectors - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # generate expected value from streamlined net diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index 86e4356ae4..8a91a49278 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import torch from brevitas.export import export_qonnx @@ -78,8 +77,9 @@ def test_streamline_cnv(size, wbits, abits): model = model.transform(GiveReadableTensorNames()) model = model.transform(RemoveStaticGraphInputs()) # load one of the test vectors - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution diff --git a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py index b95c26d25f..fd5033674b 100644 --- a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py +++ b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -59,8 +58,9 @@ def test_batchnorm_to_affine_cnv_w1a1(): model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) input_dict = {"0": input_tensor} diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 5bbcb1f9d4..939082b87b 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -27,10 +27,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -55,8 +54,9 @@ def get_brev_model_and_sample_inputs(model_name, wbits, abits): brev_model = get_test_model_trained(model_name, wbits, abits) elif model_name == "CNV": in_shape = (1, 3, 32, 32) - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 brev_model = get_test_model_trained(model_name, wbits, abits) elif model_name == "mobilenet": diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index 02136b31a2..3649d6709e 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import numpy as np @@ -44,7 +42,7 @@ def test_end2end_build_dataflow_directory(): test_dir = make_build_dir("test_build_dataflow_directory_") target_dir = test_dir + "/build_dataflow" - example_data_dir = pk.resource_filename("finn.qnn-data", "build_dataflow/") + example_data_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/build_dataflow" copytree(example_data_dir, target_dir) build_dataflow_directory(target_dir) # check the generated files From aac2704561e21d857e2d9651c284bc324ab6dfbc Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 15:44:39 +0100 Subject: [PATCH 616/628] [Setup] Removing pyscaffold from requirements --- setup.cfg | 2 -- setup.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index fb070a436e..e69ce4f829 100644 --- a/setup.cfg +++ b/setup.cfg @@ -56,8 +56,6 @@ packages = find_namespace: include_package_data = True package_dir = =src -# DON'T CHANGE THE FOLLOWING LINE! IT WILL BE UPDATED BY PYSCAFFOLD! -setup_requires = pyscaffold>=3.2a0,<3.3a0 # The usage of test_requires is discouraged, see `Dependency Management` docs # tests_require = pytest; pytest-cov # Require a specific Python version, e.g. Python 2.7 or >= 3.4 diff --git a/setup.py b/setup.py index 7457bb9b38..9a06632af1 100644 --- a/setup.py +++ b/setup.py @@ -38,4 +38,4 @@ from setuptools import setup if __name__ == "__main__": - setup(use_pyscaffold=True) + setup() From d8a4048d731af9dbf424745d94122d96a2a675ed Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 15:51:47 +0100 Subject: [PATCH 617/628] [Setup] Removing direct calls of setup.py --- docker/quicktest.sh | 8 ++++---- run-docker.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index 466fcfb09d..a990246b49 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,16 +6,16 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL" + pytest -m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" - python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL" + pytest -k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "rtlsim" ]; then echo "Running rtlsim test suite with pytest-parallel" - python setup.py test --addopts "-k rtlsim --workers $PYTEST_PARALLEL" + pytest -k rtlsim --workers $PYTEST_PARALLEL elif [ $1 = "end2end" ]; then echo "Running end2end test suite with no parallelism" - python setup.py test --addopts "-k end2end" + pytest -k end2end elif [ $1 = "full" ]; then echo "Running full test suite, each step with appropriate parallelism" $0 main; diff --git a/run-docker.sh b/run-docker.sh index c24dcec724..cb23595365 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -102,7 +102,7 @@ DOCKER_INTERACTIVE="" if [ "$1" = "test" ]; then gecho "Running test suite (all tests)" - DOCKER_CMD="python setup.py test" + DOCKER_CMD="pytest" elif [ "$1" = "quicktest" ]; then gecho "Running test suite (non-Vivado, non-slow tests)" DOCKER_CMD="quicktest.sh" From 13313c293eb00d2eb88edcaf68386fdf52152aeb Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 17:21:45 +0100 Subject: [PATCH 618/628] [CI/docs] hotfix for Jenkins and docs to not use setup.py --- docker/jenkins/Jenkinsfile | 10 +++++----- docs/finn/developers.rst | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2954877c2a..6be8845ab7 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + sh("bash run-docker.sh pytest -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") + sh("bash run-docker.sh pytest -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") + sh("bash run-docker.sh pytest -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest -mfpgadataflow") } } } diff --git a/docs/finn/developers.rst b/docs/finn/developers.rst index f9252f764c..1e1c48e2b5 100644 --- a/docs/finn/developers.rst +++ b/docs/finn/developers.rst @@ -159,8 +159,8 @@ from the FINN root directory as follows: If you want to run tests in parallel (e.g. to take advantage of a multi-core CPU) you can use: -* pytest-parallel for any rtlsim tests, e.g. `python setup.py test --addopts "-k rtlsim --workers auto"` -* pytest-xdist for anything else, make sure to add `--dist=loadfile` if you have tests in the same file that have dependencies on each other e.g. `python setup.py test --addopts "-k mytest -n auto --dist=loadfile"` +* pytest-parallel for any rtlsim tests, e.g. `pytest -k rtlsim --workers auto` +* pytest-xdist for anything else, make sure to add `--dist=loadfile` if you have tests in the same file that have dependencies on each other e.g. `pytest -k mytest -n auto --dist=loadfile` Finally, the full test suite with appropriate parallelization can be run inside the container by: From b2e04731c238056f49ff820f4ac26bfc99d4a609 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Tue, 10 Oct 2023 16:20:22 +0100 Subject: [PATCH 619/628] [CI] exclude bnn_pynq from quicktest --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index a990246b49..3684e3a0d4 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,7 +6,7 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - pytest -m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL + pytest -m 'not (vivado or slow or vitis or board or notebooks or bnn_pynq)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" pytest -k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL From 98e94f72e9c2b53dc63d30aa4d3bea466a178c19 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Tue, 10 Oct 2023 17:49:26 +0100 Subject: [PATCH 620/628] [CI] fixing linting, lingering line left behind after resolving merge conflict --- tests/end2end/test_ext_weights.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index 25fb5e91e9..2f5f136d3a 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -80,7 +80,6 @@ def test_end2end_ext_weights_download(): def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) test_data = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/test_ext_weights" folding_config_file = test_data + "/tfc-w1a1-extw.json" output_dir = make_build_dir("test_end2end_ext_weights_build") From 5eb535a7c86a84d7195b8059765ea33f075c761b Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 11 Oct 2023 14:34:10 +0100 Subject: [PATCH 621/628] [NB] make all output paths absolute in advanced notebook --- .../4_advanced_builder_settings.ipynb | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 4af48ac233..e748d85a1c 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -143,7 +143,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_estimates_only\"\n", + "estimates_output_dir = build_dir + \"/output_estimates_only\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -427,7 +427,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_pre_proc\"\n", + "output_dir = build_dir + \"/output_pre_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -535,7 +535,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_pre_and_post_proc\"\n", + "output_dir = build_dir + \"/output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -782,7 +782,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_all_lutram\"\n", + "output_dir = build_dir + \"/output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -886,7 +886,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_all_bram\"\n", + "output_dir = build_dir + \"/output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1090,7 +1090,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_with_verification\"\n", + "output_dir = build_dir + \"/output_with_verification\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1191,7 +1191,7 @@ "metadata": {}, "outputs": [], "source": [ - "verify_initial_python = np.load(\"output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", + "verify_initial_python = np.load(build_dir + \"/output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", "print(\"The output of the verification step after the step_tidy_up is: \" + str(verify_initial_python))" ] }, @@ -1352,7 +1352,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_standalone_thresholds\"\n", + "output_dir = build_dir + \"/output_standalone_thresholds\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1449,7 +1449,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_rtl_swg\"\n", + "output_dir = build_dir + \"/output_rtl_swg\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1556,7 +1556,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_bitfile\"\n", + "output_dir = build_dir + \"/output_bitfile\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", From 99e9b7366a5ab0238319c314ef81b1bb9f2d988a Mon Sep 17 00:00:00 2001 From: johnnoel Date: Thu, 12 Oct 2023 11:19:59 +0100 Subject: [PATCH 622/628] [CI] remove reference to unused hack script --- docker/jenkins/Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2d7ea5e918..47f855f433 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -797,14 +797,12 @@ void createTestScript(String board, String marker, String testResultsFilename) { sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ From 07e3b39efc2dd9e82c3ffb239d1de934f564b84d Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 16 Oct 2023 16:12:45 +0100 Subject: [PATCH 623/628] [Tests] fix end2end bnn_pynq cnv transpose --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index d98c06f7d0..b296dad827 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -330,7 +330,7 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): # The FC models contain a Reshape node, which FINN uses, so we therefore have to # reshape the input tensor data to match the reshaping in the model if topology == "cnv": - input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) + input_tensor_npy = input_tensor_npy.transpose(0, 2, 3, 1) else: input_shape = input_tensor_npy.shape new_input_shape = (input_shape[0], np.prod(input_shape[1:])) From 1bf20d50e156ea251f378caee128689346cab2b3 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 18 Oct 2023 14:38:38 +0100 Subject: [PATCH 624/628] [Tests] Disabling end2end_bnn_pynq U250 tests failing due to routing on 2022.2 tools --- tests/end2end/test_end2end_bnn_pynq.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index b296dad827..8ac2493d1e 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -728,6 +728,19 @@ def test_build(self, topology, wbits, abits, board): build_data = get_build_env(board, target_clk_ns) if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") + if board == "U250" and wbits == 1 and abits == 1: + if topology == "lfc" or topology == "tfc": + pytest.xfail( + "bnn_w" + + str(wbits) + + "_a" + + str(abits) + + "_" + + topology + + "_" + + board + + " test_build currently disabled, see CR-1171874" + ) prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fifodepth_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(build_data["build_fxn"]) From 4f51ed68a1dcd7dd44007c2cf0c6af05b21cd327 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 23 Oct 2023 11:00:49 +0100 Subject: [PATCH 625/628] [CI] Use virtual env instead of Conda for Jenkins testing --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 47f855f433..b19cbbccf1 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -796,7 +796,7 @@ void createTestScript(String board, String marker, String testResultsFilename) { if(board == "U250") sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh -. ${CONDA_ENV_ACTIVATE} +. ${VENV_ACTIVATE} python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else From 1093276f33651324eb8e2ed0779a1e1915b7158f Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 24 Oct 2023 18:12:52 +0100 Subject: [PATCH 626/628] [Jenkinsfile] Update Jenkinsfile_CI with pytest command --- docker/jenkins/Jenkinsfile_CI | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index 2954877c2a..f04ea0a49d 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest --addopts -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + sh("bash run-docker.sh pytest --addopts -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") + sh("bash run-docker.sh pytest --addopts -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") + sh("bash run-docker.sh pytest --addopts -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest --addopts -mfpgadataflow") } } } From 6e86f9c2a1acc465e803d9cd9ecc2ce80c184e70 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 24 Oct 2023 18:15:58 +0100 Subject: [PATCH 627/628] [Jenkins] Delete obsolete option in pytest command --- docker/jenkins/Jenkinsfile_CI | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index f04ea0a49d..6be8845ab7 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mstreamline") + sh("bash run-docker.sh pytest -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mutil") + sh("bash run-docker.sh pytest -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mtransform") + sh("bash run-docker.sh pytest -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest -mfpgadataflow") } } } From bd7f3b3b1a8fd29482caf3f027414ce8d1b2a619 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 25 Oct 2023 16:03:18 +0100 Subject: [PATCH 628/628] [Jenkins] Add node label to Jenkinsfile_CI to target specific machine if available --- docker/jenkins/Jenkinsfile_CI | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index 6be8845ab7..5e7d5f1475 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -1,4 +1,4 @@ -node { +node('finn-build || built-in') { def app stage('Clone repository') { /* Let's make sure we have the repository cloned to our workspace */