Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cherry-pick changes from the 6.5.0 branch #20

Open
wants to merge 6 commits into
base: databricks-bazel-7.4.1
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions src/main/cpp/blaze.cc
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,13 @@ static vector<string> GetServerExeArgs(const blaze_util::Path &jvm_path,

result.push_back("--product_name=" + startup_options.product_name);

#ifdef __linux__
if (!startup_options.cgroup_parent.empty()) {
result.push_back("--experimental_cgroup_parent=" +
startup_options.cgroup_parent);
}
#endif

startup_options.AddExtraOptions(&result);

// The option sources are transmitted in the following format:
Expand Down
6 changes: 6 additions & 0 deletions src/main/cpp/blaze_util_posix.cc
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,12 @@ int ExecuteDaemon(const blaze_util::Path& exe,
if (daemon_output_append) {
daemonize_args.push_back("-a");
}
#ifdef __linux__
if (!options.cgroup_parent.empty()) {
daemonize_args.push_back("-c");
daemonize_args.push_back(options.cgroup_parent);
}
#endif
daemonize_args.push_back("--");
daemonize_args.push_back(exe.AsNativePath());
std::copy(args_vector.begin(), args_vector.end(),
Expand Down
10 changes: 10 additions & 0 deletions src/main/cpp/startup_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ StartupOptions::StartupOptions(const string &product_name,
macos_qos_class(QOS_CLASS_UNSPECIFIED),
#endif
unlimit_coredumps(false),
#ifdef __linux__
cgroup_parent(),
#endif
windows_enable_symlinks(false) {
// To ensure predictable behavior from PathFragmentConverter in Java,
// output_root must be an absolute path. In particular, if we were to return a
Expand Down Expand Up @@ -169,6 +172,7 @@ StartupOptions::StartupOptions(const string &product_name,
RegisterUnaryStartupFlag("output_user_root");
RegisterUnaryStartupFlag("server_jvm_out");
RegisterUnaryStartupFlag("failure_detail_out");
RegisterUnaryStartupFlag("experimental_cgroup_parent");
}

StartupOptions::~StartupOptions() {}
Expand Down Expand Up @@ -390,6 +394,12 @@ blaze_exit_code::ExitCode StartupOptions::ProcessArg(
"multiple times.";
return blaze_exit_code::BAD_ARGV;
}
} else if ((value = GetUnaryOption(
arg, next_arg, "--experimental_cgroup_parent")) != nullptr) {
#ifdef __linux__
cgroup_parent = value;
option_sources["cgroup_parent"] = rcfile;
#endif
} else {
bool extra_argument_processed;
blaze_exit_code::ExitCode process_extra_arg_exit_code = ProcessArgExtra(
Expand Down
4 changes: 4 additions & 0 deletions src/main/cpp/startup_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,10 @@ class StartupOptions {
// Whether to raise the soft coredump limit to the hard one or not.
bool unlimit_coredumps;

#ifdef __linux__
std::string cgroup_parent;
#endif

// Whether to create symbolic links on Windows for files. Requires
// developer mode to be enabled.
bool windows_enable_symlinks;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,9 @@ public enum WorkerProtocolFormat {
public static final String DIFFERENTIATE_WORKSPACE_CACHE =
"internal-differentiate-workspace-cache";

/** Disables cgroups for a spawn */
public static final String NO_SUPPORTS_CGROUPS = "no-supports-cgroups";

/**
* Indicates that the action is compatible with path mapping, e.g., removing the configuration
* segment from the paths of all inputs and outputs.
Expand Down
1 change: 1 addition & 0 deletions src/main/java/com/google/devtools/build/lib/analysis/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1691,6 +1691,7 @@ java_library(
":config/run_under",
":config/starlark_defined_config_transition",
":platform_options",
":test/test_configuration",
"//src/main/java/com/google/devtools/build/lib/actions:action_environment",
"//src/main/java/com/google/devtools/build/lib/actions:artifacts",
"//src/main/java/com/google/devtools/build/lib/actions:build_configuration_event",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
import com.google.devtools.build.lib.skyframe.serialization.autocodec.AutoCodec;
import com.google.devtools.build.lib.starlarkbuildapi.BuildConfigurationApi;
import com.google.devtools.build.lib.util.OS;
import com.google.devtools.build.lib.util.Pair;
import com.google.devtools.build.lib.util.RegexFilter;
import com.google.devtools.build.lib.vfs.PathFragment;
import com.google.devtools.build.skyframe.SkyValue;
Expand Down Expand Up @@ -988,4 +989,16 @@ public static BuildEvent buildEvent(@Nullable BuildConfigurationValue configurat
public ImmutableSet<String> getReservedActionMnemonics() {
return reservedActionMnemonics;
}

public Map<String, Double> getTestResources(com.google.devtools.build.lib.packages.TestSize size) {
if (!buildOptions.contains(com.google.devtools.build.lib.analysis.test.TestConfiguration.TestOptions.class)) {
return ImmutableMap.of();
}
return buildOptions
.get(com.google.devtools.build.lib.analysis.test.TestConfiguration.TestOptions.class)
.testResources
.stream()
.collect(ImmutableMap.toImmutableMap(e -> e.getFirst(), e -> e.getSecond().get(size)));
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import com.google.devtools.build.lib.analysis.test.CoverageConfiguration.CoverageOptions;
import com.google.devtools.build.lib.analysis.test.TestShardingStrategy.ShardingStrategyConverter;
import com.google.devtools.build.lib.cmdline.Label;
import com.google.devtools.build.lib.packages.DatabricksEngflowTestPool;
import com.google.devtools.build.lib.packages.TestSize;
import com.google.devtools.build.lib.packages.TestTimeout;
import com.google.devtools.build.lib.util.Pair;
Expand Down Expand Up @@ -86,6 +87,19 @@ public static class TestOptions extends FragmentOptions {
+ "-1 tells blaze to use its default timeouts for that category.")
public Map<TestTimeout, Duration> testTimeout;

@Option(
name = "databricks_engflow_test_pools",
defaultValue = "null",
converter = DatabricksEngflowTestPool.DatabricksEngflowTestPoolConverter.class,
documentationCategory = OptionDocumentationCategory.TESTING,
effectTags = {OptionEffectTag.EXECUTION},
help = "DATABRICKS ONLY: Override the default Engflow Remote Execution worker pool for test sizes. " +
"This is done by setting the Pool execution property of TestRunner actions. This has LESS precedence " +
"than targets and platforms that set the Pool property." +
"If a single string is specified it will override all sizes. If 4 comma-separated strings " +
"are specified, they will override the pools for small, medium, large and enormous (in that order).")
public Map<TestSize, DatabricksEngflowTestPool> databricksEngflowTestPools;

@Option(
name = "default_test_resources",
defaultValue = "null",
Expand Down Expand Up @@ -451,6 +465,10 @@ public boolean checkShardingSupport() {
return options.checkShardingSupport;
}

public Map<TestSize, DatabricksEngflowTestPool> getDatabricksEngflowTestPools() {
return options.databricksEngflowTestPools;
}

/**
* Option converter that han handle two styles of value for "--runs_per_test":
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
import com.google.devtools.build.lib.collect.nestedset.NestedSet;
import com.google.devtools.build.lib.collect.nestedset.NestedSetBuilder;
import com.google.devtools.build.lib.events.Event;
import com.google.devtools.build.lib.packages.DatabricksEngflowTestPool;
import com.google.devtools.build.lib.packages.TestSize;
import com.google.devtools.build.lib.server.FailureDetails.Execution.Code;
import com.google.devtools.build.lib.server.FailureDetails.FailureDetail;
import com.google.devtools.build.lib.server.FailureDetails.TestAction;
Expand Down Expand Up @@ -1066,6 +1068,27 @@ public NestedSet<Artifact> getPossibleInputsForTesting() {
return getInputs();
}

private static final String ENGFLOW_POOL_EXEC_PROPERTY = "Pool";

/**
* Databricks override: make sure to send actions to the correct Bazel Remote execution pools based on test sizes.
*/
@Override
public ImmutableMap<String, String> getExecProperties() {
ImmutableMap<String, String> execProperties = super.getExecProperties();
Map<TestSize, DatabricksEngflowTestPool> pools = testConfiguration.getDatabricksEngflowTestPools();
if (execProperties.containsKey(ENGFLOW_POOL_EXEC_PROPERTY) || pools == null) {
return execProperties;
}
return ImmutableMap.<String, String>builder()
.putAll(execProperties)
.put(
ENGFLOW_POOL_EXEC_PROPERTY,
pools.get(testProperties.getSize()).getPool()
)
.build();
}

/** The same set of paths as the parent test action, resolved against a given exec root. */
public final class ResolvedPaths {
private final Path execRoot;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,21 @@ private static ResourceSet getResourceSetFromSize(TestSize size) {
Map<String, String> executionInfo = Maps.newLinkedHashMap();
executionInfo.putAll(TargetUtils.getExecutionInfo(rule));

Map<String, Double> requestedResources;
try {
requestedResources = parseTags(ruleContext.getLabel(), executionInfo);
} catch (UserExecException e) {
requestedResources = new HashMap<>();
}

Map<String, Double> testResources = ruleContext.getConfiguration().getTestResources(size);
for (Map.Entry<String, Double> request: testResources.entrySet()) {
if (requestedResources.containsKey(request.getKey())) {
continue;
}
executionInfo.put(String.format("resources:%s:%f", request.getKey(), request.getValue()), "");
}

boolean incompatibleExclusiveTestSandboxed = false;

testConfiguration = ruleContext.getFragment(TestConfiguration.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,18 @@ public class BuildRequestOptions extends OptionsBase {
+ " under the threshold.")
public int maxResultTargets;

@Option(
name = "hide_aspect_results",
converter = Converters.CommaSeparatedOptionListConverter.class,
defaultValue = "",
documentationCategory = OptionDocumentationCategory.UNDOCUMENTED,
effectTags = {OptionEffectTag.TERMINAL_OUTPUT},
help =
"Comma-separated list of aspect names to not display in results (see --show_result). "
+ "Useful for keeping aspects added by wrappers which are typically not interesting "
+ "to end users out of console output.")
public List<String> hideAspectResults;

@Option(
name = "symlink_prefix",
defaultValue = "null",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,15 @@ private boolean outputTargets(
// Splits aspects based on whether they are validation aspects.
final ImmutableSet<AspectKey> aspectsToPrint;
final ImmutableList<AspectKey> validationAspects;
final ImmutableSet<String> aspectsToIgnore =
ImmutableSet.copyOf(request.getBuildOptions().hideAspectResults);
final ImmutableSet<AspectKey> filteredAspects = aspects.keySet().stream()
.filter(k -> !aspectsToIgnore.contains(k.getAspectClass().getName()))
.collect(ImmutableSet.toImmutableSet());
if (request.useValidationAspect()) {
var aspectsToPrintBuilder = ImmutableSet.<AspectKey>builder();
var validationAspectsBuilder = ImmutableList.<AspectKey>builder();
for (AspectKey key : aspects.keySet()) {
for (AspectKey key : filteredAspects) {
if (Objects.equals(
key.getAspectClass().getName(), AspectCollection.VALIDATION_ASPECT_NAME)) {
validationAspectsBuilder.add(key);
Expand All @@ -116,7 +121,7 @@ private boolean outputTargets(
aspectsToPrint = aspectsToPrintBuilder.build();
validationAspects = validationAspectsBuilder.build();
} else {
aspectsToPrint = aspects.keySet();
aspectsToPrint = filteredAspects;
validationAspects = ImmutableList.of();
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
package com.google.devtools.build.lib.packages;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import com.google.devtools.common.options.Converter;
import com.google.devtools.common.options.OptionsParsingException;

import java.util.*;

/**
* DATABRICKS ONLY: Represents a pool of Engflow Remote Execution worker pools.
* Borrows heavily from {@link TestTimeout}.
*/
public class DatabricksEngflowTestPool {
private final String pool;

public DatabricksEngflowTestPool(String pool) {
this.pool = pool;
}

public String getPool() {
return pool;
}

public static class DatabricksEngflowTestPoolConverter extends Converter.Contextless<Map<TestSize, DatabricksEngflowTestPool>> {

@Override
public String getTypeDescription() {
return "a single string or comma-separated list of 4 strings";
}

@Override
public Map<TestSize, DatabricksEngflowTestPool> convert(String input) throws OptionsParsingException {
List<DatabricksEngflowTestPool> values = new ArrayList<>();
for (String token: Splitter.on(",").limit(6).split(input)) {
if (!token.isEmpty() || values.size() > 1) {
values.add(new DatabricksEngflowTestPool(token));
}
}
EnumMap<TestSize, DatabricksEngflowTestPool> pools = new EnumMap<>(TestSize.class);
if (values.size() == 1) {
pools.put(TestSize.SMALL, values.get(0));
pools.put(TestSize.MEDIUM, values.get(0));
pools.put(TestSize.LARGE, values.get(0));
pools.put(TestSize.ENORMOUS, values.get(0));
} else if (values.size() == 4) {
pools.put(TestSize.SMALL, values.get(0));
pools.put(TestSize.MEDIUM, values.get(1));
pools.put(TestSize.LARGE, values.get(2));
pools.put(TestSize.ENORMOUS, values.get(3));
} else {
throw new OptionsParsingException("Invalid number of comma-separated entries");
}
return pools;
}
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DatabricksEngflowTestPool that = (DatabricksEngflowTestPool) o;
return Objects.equals(pool, that.pool);
}

@Override
public int hashCode() {
return Objects.hash(pool);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
// limitations under the License.
package com.google.devtools.build.lib.profiler;

import static java.util.Map.entry;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;

Expand Down Expand Up @@ -714,6 +715,14 @@ public void logEventAtTime(long atTimeNanos, ProfilerTask type, String descripti
logTask(atTimeNanos, 0, type, description);
}

/** Log arbitrary data. */
public void logData(TraceData data) {
JsonTraceFileWriter writer = writerRef.get();
if (writer != null) {
writer.enqueue(data);
}
}

/** Used to log "events" - tasks with zero duration. */
@VisibleForTesting
void logEvent(ProfilerTask type, String description) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -499,4 +499,30 @@ public String getTypeDescription() {
"When --noautodetect_server_javabase is passed, Bazel does not fall back to the local "
+ "JDK for running the bazel server and instead exits.")
public boolean autodetectServerJavabase;

/**
* Note: This option is only used by the C++ client, never by the Java server. It is included here
* to make sure that the option is documented in the help output, which is auto-generated by Java
* code. This also helps ensure that the server is killed if the value of this option changes.
*/
@Option(
name = "experimental_cgroup_parent",
defaultValue = "null",
documentationCategory = OptionDocumentationCategory.BAZEL_CLIENT_OPTIONS,
effectTags = {
OptionEffectTag.BAZEL_MONITORING,
OptionEffectTag.EXECUTION,
},
valueHelp = "<path>",
help =
"The cgroup where to start the bazel server as an absolute path. The server "
+ "process will be started in the specified cgroup for each supported controller. "
+ "For example, if the value of this flag is /build/bazel and the cpu and memory "
+ "controllers are mounted respectively on /sys/fs/cgroup/cpu and "
+ "/sys/fs/cgroup/memory, the server will be started in the cgroups "
+ "/sys/fs/cgroup/cpu/build/bazel and /sys/fs/cgroup/memory/build/bazel."
+ "It is not an error if the specified cgroup is not writable for one or more "
+ "of the controllers. This options does not have any effect on "
+ "platforms that do not support cgroups.")
public String cgroupParent;
}
Loading
Loading