Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrate facto utils to OSS #7686

Merged
merged 1 commit into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 91 additions & 0 deletions examples/cadence/operators/facto_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.

# pyre-strict

import copy
from typing import List, OrderedDict, Tuple

import torch
from inputgen.argtuple.gen import ArgumentTupleGenerator
from inputgen.specs.model import ConstraintProducer as cp
from inputgen.utils.random_manager import random_manager
from inputgen.variable.type import ScalarDtype
from specdb.db import SpecDictDB

# seed to generate identical cases every run to reproduce from bisect
random_manager.seed(1729)


def apply_tensor_contraints(op_name: str, tensor_constraints: list[object]) -> None:
match op_name:
case (
"sigmoid.default"
| "_softmax.default"
| "rsqrt.default"
| "exp.default"
| "mul.Tensor"
| "div.Tensor"
):
tensor_constraints.append(
cp.Dtype.In(lambda deps: [torch.float]),
)
case (
"add.Tensor"
| "sub.Tensor"
| "add.Scalar"
| "sub.Scalar"
| "mul.Scalar"
| "div.Scalar"
):
tensor_constraints.append(
cp.Dtype.In(lambda deps: [torch.float, torch.int]),
)
case _:
tensor_constraints.append(
cp.Dtype.In(lambda deps: [torch.float, torch.int]),
)
tensor_constraints.extend(
[
cp.Value.Ge(lambda deps, dtype, struct: -(2**8)),
cp.Value.Le(lambda deps, dtype, struct: 2**8),
cp.Rank.Ge(lambda deps: 1),
cp.Rank.Le(lambda deps: 2**2),
cp.Size.Ge(lambda deps, r, d: 1),
cp.Size.Le(lambda deps, r, d: 2**2),
]
)


def facto_testcase_gen(op_name: str) -> List[Tuple[List[str], OrderedDict[str, str]]]:
# minimal example to test add.Tensor using FACTO
spec = SpecDictDB[op_name]

for index, in_spec in enumerate(copy.deepcopy(spec.inspec)):
if in_spec.type.is_scalar():
if in_spec.name != "alpha":
spec.inspec[index].constraints.extend(
[
cp.Dtype.In(lambda deps: [ScalarDtype.float, ScalarDtype.int]),
cp.Value.Ge(lambda deps, dtype: -(2**8)),
cp.Value.Le(lambda deps, dtype: 2**2),
cp.Size.Ge(lambda deps, r, d: 1),
cp.Size.Le(lambda deps, r, d: 2**2),
]
)
else:
spec.inspec[index].constraints.extend(
[
cp.Value.Gt(lambda deps, dtype: 0),
cp.Value.Le(lambda deps, dtype: 2),
]
)
elif in_spec.type.is_tensor():
tensor_constraints = []
# common tensor constraints
apply_tensor_contraints(op_name, tensor_constraints)
spec.inspec[index].constraints.extend(tensor_constraints)

return [
(posargs, inkwargs)
for posargs, inkwargs, _ in ArgumentTupleGenerator(spec).gen()
]
14 changes: 14 additions & 0 deletions examples/cadence/operators/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.

load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest")
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")

TESTS_LIST = [
"add_op",
Expand All @@ -16,6 +17,19 @@ def define_common_targets():
for op in TESTS_LIST:
_define_test_target(op)

python_library(
name = "facto_util",
srcs = [
"facto_util.py",
],
typing = True,
deps = [
"fbcode//caffe2:torch",
"fbcode//pytorch/facto:inputgen",
"fbcode//pytorch/facto:specdb",
],
)


def _define_test_target(test_name):
file_name = "test_{}".format(test_name)
Expand Down