Skip to content

Commit

Permalink
Adding pre-commit hooks
Browse files Browse the repository at this point in the history
  • Loading branch information
svkeerthy committed Jan 11, 2024
1 parent 8003488 commit 3fb3d3f
Show file tree
Hide file tree
Showing 65 changed files with 525 additions and 450 deletions.
13 changes: 13 additions & 0 deletions .github/workflows/formatting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: pre-commit checks
on:
workflow_dispatch:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: pre-commit/[email protected]
10 changes: 5 additions & 5 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ on:
branches: [ main ]



# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
Expand All @@ -21,19 +21,19 @@ jobs:
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2

- name: Doxygen Action
uses: mattnotmitt/[email protected]
with:
# Path to Doxyfile
doxyfile-path: "./docs/Doxyfile" # default is ./Doxyfile
# Working directory
working-directory: "." # default is .

- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
# Default Doxyfile build documentation to html directory.
# Default Doxyfile build documentation to html directory.
# Change the directory if changes in Doxyfile
publish_dir: ./html
publish_dir: ./html
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
.vscode
build*
install*
install*
25 changes: 25 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
exclude: test-suite/oracle
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
- repo: https://github.com/pocc/pre-commit-hooks
rev: v1.1.1
hooks:
- id: clang-format
args: [-i]
# - repo: https://github.com/dfm/black_nbconvert
# rev: v0.4.0
# hooks:
# - id: black_nbconvert
- repo: https://github.com/maxwinterstein/shfmt-py
rev: v3.4.3.1
hooks:
- id: shfmt
16 changes: 8 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ message("LLVM_MLBRIDGE: ${LLVM_MLBRIDGE}")

if(NOT LLVM_MLBRIDGE)
find_package(LLVM 10.0.0 REQUIRED CONFIG)
include_directories(${LLVM_INCLUDE_DIRS})
include_directories(${LLVM_INCLUDE_DIRS})
link_directories(${LLVM_LIBRARY_DIR})
add_compile_definitions(C_LIBRARY)
endif()
Expand All @@ -25,9 +25,9 @@ if (DEFINED LLVM_HAVE_TF_AOT OR LLVM_HAVE_TFLITE)
set(LLVM_INLINER_MODEL_PATH_DEFAULT "models/inliner-Oz")

set(LLVM_INLINER_MODEL_CURRENT_URL "<UNSPECIFIED>" CACHE STRING "URL to download the LLVM inliner model")

if (DEFINED LLVM_HAVE_TF_AOT)

tf_find_and_compile(
${LLVM_INLINER_MODEL_PATH}
${LLVM_INLINER_MODEL_CURRENT_URL}
Expand All @@ -41,23 +41,23 @@ if (DEFINED LLVM_HAVE_TF_AOT OR LLVM_HAVE_TFLITE)
)
endif()
endif()

add_subdirectory(MLModelRunner)
add_subdirectory(SerDes)

if(LLVM_MLBRIDGE)
include(AddLLVM)
include(HandleLLVMOptions)
include(LLVMDistributionSupport)

add_llvm_library(LLVMMLBridge
tools.cpp
${GeneratedMLSources}

ADDITIONAL_HEADER_DIRS
${CMAKE_CURRENT_SOURCE_DIR}/include
${TENSORFLOW_AOT_PATH}/include

LINK_LIBS
ModelRunnerLib
$<TARGET_OBJECTS:SerDesLib>
Expand All @@ -68,7 +68,7 @@ endif(LLVM_MLBRIDGE)

llvm_map_components_to_libnames(llvm_libs support core irreader analysis TransformUtils)

add_custom_target(protobuf_version ALL
add_custom_target(protobuf_version ALL
COMMAND ${CMAKE_COMMAND} -E echo "protoc path = $<TARGET_FILE:protobuf::protoc> Using Protobuf ${Protobuf_VERSION}")

add_library(MLCompilerBridgeC STATIC $<TARGET_OBJECTS:ModelRunnerCWrapper>)
Expand Down
4 changes: 1 addition & 3 deletions CompilerInterface/BaseCompilerInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
class BaseCompilerInterface(ABC):
## Initializes correct SerDes object
def __init__(self, data_format=None):
self.serdes_obj = SerDes(data_format)
self.serdes_obj = SerDes(data_format)

## Places data for next request into a buffer after serialization.
# @param Unserialized data for next query to compiler
Expand All @@ -18,5 +18,3 @@ def populate_buffer(self, data):
## Sends query to compiler and returns deserialized result.
def evaluate(self):
pass


51 changes: 31 additions & 20 deletions CompilerInterface/GrpcCompilerInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,32 @@ class GrpcCompilerInterface(BaseCompilerInterface):
## Initializes GrpcCompilerInterface object.
# @param mode Can be 'client' or 'server'.
# @param stub_class gRPC stub class used in 'client' mode
# @param hostip
# @param hostport
# @param hostip
# @param hostport
# @param add_server_method used in 'server' mode
# @param grpc_service_obj used in 'server' mode
def __init__(self, mode, stub_class=None, hostip='127.0.0.1', hostport=50051, add_server_method=None,grpc_service_obj=None ):
super().__init__('protobuf')
def __init__(
self,
mode,
stub_class=None,
hostip="127.0.0.1",
hostport=50051,
add_server_method=None,
grpc_service_obj=None,
):
super().__init__("protobuf")
self.mode = mode
self.host=hostip
self.host = hostip
self.server_port = hostport

if self.mode == 'client':
self.channel = grpc.insecure_channel('{}:{}'.format(self.host,self.server_port))
if self.mode == "client":
self.channel = grpc.insecure_channel(
"{}:{}".format(self.host, self.server_port)
)
print("Setting stub", stub_class)
self.stub = stub_class(self.channel)
elif self.mode == 'server':

elif self.mode == "server":
self.grpc_service_obj = grpc_service_obj
self.add_server_method = add_server_method
self.start_server()
Expand All @@ -38,23 +48,24 @@ def __del__(self):
pass

## Sends query to compiler and returns deserialized result.
def evaluate(self, mode = None):
def evaluate(self, mode=None):
out = self.serdes_obj.getOutputBuffer()
return self.stub.queryCompiler(out)


## Starts gRPC server
def start_server(self):
if self.mode == 'server':
server=grpc.server(futures.ThreadPoolExecutor(max_workers=20),options = [
('grpc.max_send_message_length', 200*1024*1024), #50MB
('grpc.max_receive_message_length', 200*1024*1024) #50MB
])
if self.mode == "server":
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=20),
options=[
("grpc.max_send_message_length", 200 * 1024 * 1024), # 50MB
("grpc.max_receive_message_length", 200 * 1024 * 1024), # 50MB
],
)

self.add_server_method(self.grpc_service_obj,server)
server.add_insecure_port('{}:{}'.format(self.host,self.server_port))
self.add_server_method(self.grpc_service_obj, server)
server.add_insecure_port("{}:{}".format(self.host, self.server_port))

server.start()
print("Server Running")
print("Server Running")
server.wait_for_termination()

11 changes: 5 additions & 6 deletions CompilerInterface/PipeCompilerInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
## This class implements methods for communication with compiler using pipes.
class PipeCompilerInterface(BaseCompilerInterface):
## Initializes PipeCompilerInterface object.
# @param data_format Data format for serialization
# @param data_format Data format for serialization
# @param pipe_name Name for pipe file
def __init__(self, data_format=None, pipe_name=None):
super().__init__(data_format)
Expand All @@ -23,29 +23,28 @@ def __del__(self):
self.remove_pipes()

## Sends query to compiler and returns deserialized result.
def evaluate(self, mode = None):
def evaluate(self, mode=None):
out = self.serdes_obj.getOutputBuffer()
if out is not None:
self.tc.write(out)
self.tc.flush()

if mode == 'exit':
if mode == "exit":
return None

result = self.serdes_obj.deserializeData(self.fc)

return result


## Creates pipe files for communication.
## Creates pipe files for communication.
def init_pipes(self):
self.to_compiler = self.pipe_name + ".in"
self.from_compiler = self.pipe_name + ".out"
if os.path.exists(self.to_compiler):
os.remove(self.to_compiler)
if os.path.exists(self.from_compiler):
os.remove(self.from_compiler)

os.mkfifo(self.to_compiler, 0o666)
os.mkfifo(self.from_compiler, 0o666)

Expand Down
12 changes: 7 additions & 5 deletions CompilerInterface/SerDes.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def __init__(self, data_format):
self.read_stream_iter = None

self.serMap = {
"json": self.serializeJson,
"json": self.serializeJson,
"bytes": self.serializeBytes,
"protobuf": self.serializeProtobuf,
}
Expand All @@ -39,11 +39,13 @@ def deserializeJson(self, datastream):
## Deserializes and returns bitstream data
def deserializeBytes(self, datastream):
if self.read_stream_iter is None:
self.read_stream_iter = log_reader.read_stream2(datastream) # try to make it indep
self.read_stream_iter = log_reader.read_stream2(
datastream
) # try to make it indep
hdr = datastream.read(8)
context, observation_id, features, score = next(self.read_stream_iter)
return features

# Not implemented
def deserializeProtobuf(self, datastream):
raise NotImplementedError
Expand All @@ -52,7 +54,7 @@ def deserializeProtobuf(self, datastream):
def serializeData(self, data):
self.serMap[self.data_format](data)

## Serializes data to JSON
## Serializes data to JSON
def serializeJson(self, data):
msg = json.dumps({"out": data}).encode("utf-8")
hdr = len(msg).to_bytes(8, "little")
Expand All @@ -72,7 +74,7 @@ def serializeProtobuf(self, data):
self.buffer = data

## Returns value in buffer and empties it
# @return Data from output buffer
# @return Data from output buffer
def getOutputBuffer(self):
out = self.buffer
self.buffer = None
Expand Down
2 changes: 1 addition & 1 deletion MLModelRunner/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ if (NOT TENSORFLOW_AOT_PATH STREQUAL "")
install(TARGETS tf_xla_runtime EXPORT LLVMExports
ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX} COMPONENT tf_xla_runtime)
set_property(GLOBAL APPEND PROPERTY LLVM_EXPORTS tf_xla_runtime)

# Once we add more modules, we should handle this more automatically.
if (DEFINED LLVM_OVERRIDE_MODEL_HEADER_INLINERSIZEMODEL)
set(LLVM_INLINER_MODEL_PATH "none")
Expand Down
2 changes: 1 addition & 1 deletion MLModelRunner/ONNXModelRunner/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -50,5 +50,5 @@ add_library(ONNXModelRunnerLib OBJECT onnx.cpp
)
endif(LLVM_MLBRIDGE)

target_link_libraries(ONNXModelRunnerLib PUBLIC onnxruntime)
target_link_libraries(ONNXModelRunnerLib PUBLIC onnxruntime)
target_include_directories(ONNXModelRunnerLib PUBLIC "${ONNXRUNTIME_ROOTDIR}/include" "${ONNXRUNTIME_ROOTDIR}/include/onnxruntime/core/session" ${TENSORFLOW_AOT_PATH}/include)
9 changes: 4 additions & 5 deletions MLModelRunner/ONNXModelRunner/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@ Integration of a trained model happnes in two steps.

## Directory structure

The direcory and its subdirctories contain source code for OONX model integrations as well as base classes of vaious components used by and RL algorithm.
The direcory and its subdirctories contain source code for OONX model integrations as well as base classes of vaious components used by and RL algorithm.

.
├── onnx.cpp # Defination of ONNXModel class
├── agent.cpp # RL Agent class which will compute action by quring model
├── Include
├── Include
│ ├── agent.h # Declare RL agent class
│ ├── driver.h # A interface class to query RL baaed model
│ ├── driver.h # A interface class to query RL baaed model
│ ├── environment.h # Declare a base Environment class which can be extended further
│ ├── onnx.h # Declaration of ONNXModel class
│ └── utils.h # Utility defination used my Agent class
Expand Down Expand Up @@ -89,11 +89,10 @@ struct Hello : public FunctionPass, Environment {

bool runOnFunction(Function &F) override {
...
// Creates instance of DriverService with InferenceEngine as parent class
// Creates instance of DriverService with InferenceEngine as parent class
InferenceEngine* inference_driver = new DriverService(Environment* env);
inference_driver->getPredictions(PassData passData, OptInfo &predictions);
...
}

```
2 changes: 1 addition & 1 deletion MLModelRunner/ONNXModelRunner/agent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ unsigned Agent::computeAction(Observation &input) {
LLVM_DEBUG(llvm::errs() << "\nmax value and index are " << *max << " "
<< argmaxVal << "\n");
return argmaxVal;
}
}
1 change: 0 additions & 1 deletion MLModelRunner/ONNXModelRunner/onnx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include <assert.h>
#include <iostream>
#include <numeric>
#include <iostream>

ONNXModel::ONNXModel(const char *model_path) : model_path(model_path) {
env = new Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test");
Expand Down
2 changes: 1 addition & 1 deletion MLModelRunner/Utils/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ if(LLVM_MLBRIDGE)
target_include_directories(ModelRunnerUtils PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../include)
endif(LLVM_MLBRIDGE)

add_library(ModelRunnerCUtils OBJECT MLConfig.cpp)
add_library(ModelRunnerCUtils OBJECT MLConfig.cpp)
Loading

0 comments on commit 3fb3d3f

Please sign in to comment.