pr feedback

This commit is contained in:
Frostebite
2025-12-06 23:00:43 +00:00
parent c61c9f8373
commit a99defafbc
18 changed files with 190 additions and 152 deletions

125
dist/index.js generated vendored
View File

@@ -3378,7 +3378,7 @@ class TaskService {
Bucket: cloud_runner_1.default.buildParameters.awsStackName,
};
const results = await s3.send(new client_s3_1.ListObjectsV2Command(listRequest));
return (results.Contents || []).map((obj) => ({ Key: obj.Key || '' }));
return (results.Contents || []).map((object) => ({ Key: object.Key || '' }));
}
}
exports.TaskService = TaskService;
@@ -3825,7 +3825,7 @@ const command_hook_service_1 = __nccwpck_require__(96159);
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
class KubernetesJobSpecFactory {
static getJobSpec(command, image, mountdir, workingDirectory, environment, secrets, buildGuid, buildParameters, secretName, pvcName, jobName, k8s, containerName, ip = '') {
const endpointEnvNames = new Set([
const endpointEnvironmentNames = new Set([
'AWS_S3_ENDPOINT',
'AWS_ENDPOINT',
'AWS_CLOUD_FORMATION_ENDPOINT',
@@ -3838,7 +3838,7 @@ class KubernetesJobSpecFactory {
const adjustedEnvironment = environment.map((x) => {
let value = x.value;
if (typeof value === 'string' &&
endpointEnvNames.has(x.name) &&
endpointEnvironmentNames.has(x.name) &&
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))) {
// Replace localhost with host.k3d.internal so pods can access host services
// This simulates accessing external services (like real AWS S3)
@@ -3979,17 +3979,16 @@ class KubernetesPods {
type: x.type || '',
}));
const errorDetails = [];
errorDetails.push(`Pod: ${podName}`);
errorDetails.push(`Phase: ${phase}`);
errorDetails.push(`Pod: ${podName}`, `Phase: ${phase}`);
if (conditions.length > 0) {
errorDetails.push(`Conditions: ${JSON.stringify(conditions.map((c) => ({ type: c.type, status: c.status, reason: c.reason, message: c.message })), undefined, 2)}`);
}
let containerExitCode;
let containerSucceeded = false;
if (containerStatuses.length > 0) {
containerStatuses.forEach((cs, idx) => {
for (const [index, cs] of containerStatuses.entries()) {
if (cs.state?.waiting) {
errorDetails.push(`Container ${idx} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
errorDetails.push(`Container ${index} (${cs.name}) waiting: ${cs.state.waiting.reason} - ${cs.state.waiting.message || ''}`);
}
if (cs.state?.terminated) {
const exitCode = cs.state.terminated.exitCode;
@@ -3997,9 +3996,9 @@ class KubernetesPods {
if (exitCode === 0) {
containerSucceeded = true;
}
errorDetails.push(`Container ${idx} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${exitCode})`);
errorDetails.push(`Container ${index} (${cs.name}) terminated: ${cs.state.terminated.reason} - ${cs.state.terminated.message || ''} (exit code: ${exitCode})`);
}
});
}
}
if (events.length > 0) {
errorDetails.push(`Recent events: ${JSON.stringify(events.slice(-5), undefined, 2)}`);
@@ -4027,7 +4026,7 @@ class KubernetesPods {
if (wasKilled && hasPreStopHookFailure && (containerExitCode === undefined || !containerSucceeded)) {
cloud_runner_logger_1.default.log(`Pod ${podName} was killed with PreStopHook failure. Waiting for container status to determine if container succeeded...`);
// Wait a bit for container status to become available (up to 30 seconds)
for (let i = 0; i < 6; i++) {
for (let index = 0; index < 6; index++) {
await new Promise((resolve) => setTimeout(resolve, 5000));
try {
const updatedPod = (await kubeClient.listNamespacedPod(namespace)).body.items.find((x) => podName === x.metadata?.name);
@@ -5306,10 +5305,10 @@ class Caching {
// Parse disk usage percentage (e.g., "72G 72G 196M 100%")
const usageMatch = diskCheckOutput.match(/(\d+)%/);
if (usageMatch) {
diskUsagePercent = parseInt(usageMatch[1], 10);
diskUsagePercent = Number.parseInt(usageMatch[1], 10);
}
}
catch (error) {
catch {
// Ignore disk check errors
}
// If disk usage is high (>90%), proactively clean up old cache files
@@ -5335,7 +5334,7 @@ class Caching {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`rm -f ${cacheArtifactName}.tar${compressionSuffix} 2>/dev/null || true`);
}
catch (error) {
catch {
// Ignore cleanup errors
}
try {
@@ -5529,6 +5528,10 @@ class RemoteClient {
const logFile = cli_1.Cli.options['logFile'];
process.stdin.resume();
process.stdin.setEncoding('utf8');
// For K8s, ensure stdout is unbuffered so messages are captured immediately
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
process.stdout.setDefaultEncoding('utf8');
}
let lingeringLine = '';
process.stdin.on('data', (chunk) => {
const lines = chunk.toString().split('\n');
@@ -5538,7 +5541,8 @@ class RemoteClient {
// For K8s, write to both log file and stdout so kubectl logs can capture it
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
node_fs_1.default.appendFileSync(logFile, element);
// Write to stdout so kubectl logs can capture it
// Write to stdout so kubectl logs can capture it - ensure newline is included
// Stdout flushes automatically on newline, so no explicit flush needed
process.stdout.write(`${element}\n`);
cloud_runner_logger_1.default.log(element);
}
@@ -5551,6 +5555,7 @@ class RemoteClient {
if (cloud_runner_options_1.default.providerStrategy === 'k8s') {
if (lingeringLine) {
node_fs_1.default.appendFileSync(logFile, lingeringLine);
// Stdout flushes automatically on newline
process.stdout.write(`${lingeringLine}\n`);
}
cloud_runner_logger_1.default.log(lingeringLine);
@@ -5569,7 +5574,13 @@ class RemoteClient {
try {
const libraryFolderHost = cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute;
if (node_fs_1.default.existsSync(libraryFolderHost)) {
const libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost).catch(() => []);
let libraryEntries = [];
try {
libraryEntries = await node_fs_1.default.promises.readdir(libraryFolderHost);
}
catch {
libraryEntries = [];
}
if (libraryEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/Library`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.libraryFolderAbsolute), `lib-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
@@ -5588,7 +5599,13 @@ class RemoteClient {
try {
const buildFolderHost = cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute;
if (node_fs_1.default.existsSync(buildFolderHost)) {
const buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost).catch(() => []);
let buildEntries = [];
try {
buildEntries = await node_fs_1.default.promises.readdir(buildFolderHost);
}
catch {
buildEntries = [];
}
if (buildEntries.length > 0) {
await caching_1.Caching.PushToCache(cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(`${cloud_runner_folders_1.CloudRunnerFolders.cacheFolderForCacheKeyFull}/build`), cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.projectBuildFolderAbsolute), `build-${cloud_runner_1.default.buildParameters.buildGuid}`);
}
@@ -5619,6 +5636,7 @@ class RemoteClient {
// For K8s, kubectl logs reads from stdout/stderr, so we must write to stdout
const successMessage = `Activation successful`;
// Write to stdout first so kubectl logs can capture it
// Stdout flushes automatically on newline
process.stdout.write(`${successMessage}\n`);
// Also log via CloudRunnerLogger for GitHub Actions
cloud_runner_logger_1.default.log(successMessage);
@@ -5706,22 +5724,22 @@ class RemoteClient {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetSha}`);
}
catch (_error) {
catch {
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git fetch origin ${targetSha} || true`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetSha}`);
}
catch (_error2) {
catch (error) {
remote_client_logger_1.RemoteClientLogger.logWarning(`Falling back to branch checkout; SHA not found: ${targetSha}`);
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${targetBranch}`);
}
catch (_error3) {
catch {
if ((targetBranch || '').startsWith('pull/')) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout origin/${targetBranch}`);
}
else {
throw _error2;
throw error;
}
}
}
@@ -5771,7 +5789,7 @@ class RemoteClient {
remote_client_logger_1.RemoteClientLogger.log(`Pulled LFS files without explicit token configuration`);
return;
}
catch (_error) {
catch {
/* no-op: best-effort git lfs pull without tokens may fail */
void 0;
}
@@ -5835,17 +5853,17 @@ class RemoteClient {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git reset --hard "${sha}"`);
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${sha}`);
}
catch (_error) {
catch {
remote_client_logger_1.RemoteClientLogger.logWarning(`Retained workspace: SHA not found, falling back to branch ${branch}`);
try {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout ${branch}`);
}
catch (_error2) {
catch (error) {
if ((branch || '').startsWith('pull/')) {
await cloud_runner_system_1.CloudRunnerSystem.Run(`git checkout origin/${branch}`);
}
else {
throw _error2;
throw error;
}
}
}
@@ -6278,11 +6296,11 @@ class SharedWorkspaceLocking {
}
catch {
const region = input_1.default.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION || 'us-east-1';
const createParams = { Bucket: bucket };
const createParameters = { Bucket: bucket };
if (region && region !== 'us-east-1') {
createParams.CreateBucketConfiguration = { LocationConstraint: region };
createParameters.CreateBucketConfiguration = { LocationConstraint: region };
}
await SharedWorkspaceLocking.s3.send(new client_s3_1.CreateBucketCommand(createParams));
await SharedWorkspaceLocking.s3.send(new client_s3_1.CreateBucketCommand(createParameters));
}
}
static async listObjects(prefix, bucket = SharedWorkspaceLocking.bucket) {
@@ -6453,12 +6471,9 @@ class SharedWorkspaceLocking {
const timestamp = Date.now();
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${timestamp}_${workspace}_workspace`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) })));
const workspaces = await SharedWorkspaceLocking.GetAllWorkspaces(buildParametersContext);
cloud_runner_logger_1.default.log(`All workspaces ${workspaces}`);
if (!(await SharedWorkspaceLocking.IsWorkspaceBelowMax(workspace, buildParametersContext))) {
@@ -6473,23 +6488,17 @@ class SharedWorkspaceLocking {
const ending = existingWorkspace ? workspace : `${workspace}_workspace`;
const key = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${Date.now()}_${runId}_${ending}_lock`;
await SharedWorkspaceLocking.ensureBucketExists();
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`touch ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.PutObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key, Body: new Uint8Array(0) })));
const hasLock = await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext);
if (hasLock) {
cloud_runner_1.default.lockedWorkspace = workspace;
}
else {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${key}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: key })));
}
return hasLock;
}
@@ -6501,15 +6510,12 @@ class SharedWorkspaceLocking {
cloud_runner_logger_1.default.log(`Deleting lock ${workspace}/${file}`);
cloud_runner_logger_1.default.log(`rm ${SharedWorkspaceLocking.workspaceRoot}${buildParametersContext.cacheKey}/${file}`);
if (file) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({
Bucket: SharedWorkspaceLocking.bucket,
Key: `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/${file}`,
}));
}
})));
}
return !(await SharedWorkspaceLocking.HasWorkspaceLock(workspace, runId, buildParametersContext));
}
@@ -6517,12 +6523,9 @@ class SharedWorkspaceLocking {
const prefix = `${SharedWorkspaceLocking.workspacePrefix}${buildParametersContext.cacheKey}/`;
const files = await SharedWorkspaceLocking.listObjects(prefix);
for (const file of files.filter((x) => x.includes(`_${workspace}_`))) {
if (SharedWorkspaceLocking.useRclone) {
await SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`);
}
else {
await SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` }));
}
await (SharedWorkspaceLocking.useRclone
? SharedWorkspaceLocking.rclone(`delete ${SharedWorkspaceLocking.bucket}/${prefix}${file}`)
: SharedWorkspaceLocking.s3.send(new client_s3_1.DeleteObjectCommand({ Bucket: SharedWorkspaceLocking.bucket, Key: `${prefix}${file}` })));
}
}
static async ReadLines(command) {
@@ -6610,7 +6613,7 @@ class TaskParameterSerializer {
return TaskParameterSerializer.serializeFromType(cloud_runner_options_1.default);
}
static serializeAwsEnvironmentVariables() {
const awsEnvVars = [
const awsEnvironmentVariables = [
'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY',
'AWS_DEFAULT_REGION',
@@ -6622,7 +6625,7 @@ class TaskParameterSerializer {
'AWS_KINESIS_ENDPOINT',
'AWS_CLOUD_WATCH_LOGS_ENDPOINT',
];
return awsEnvVars
return awsEnvironmentVariables
.filter((key) => process.env[key] !== undefined)
.map((key) => ({
name: key,
@@ -7398,7 +7401,7 @@ class BuildAutomationWorkflow {
BRANCH="${cloud_runner_1.default.buildParameters.cloudRunnerBranch}"
REPO="${cloud_runner_folders_1.CloudRunnerFolders.unityBuilderRepoUrl}"
DEST="${cloud_runner_folders_1.CloudRunnerFolders.ToLinuxFolder(cloud_runner_folders_1.CloudRunnerFolders.builderPathAbsolute)}"
if [ -n "$(git ls-remote --heads \"$REPO\" \"$BRANCH\" 2>/dev/null)" ]; then
if [ -n "$(git ls-remote --heads "$REPO" "$BRANCH" 2>/dev/null)" ]; then
git clone -q -b "$BRANCH" "$REPO" "$DEST"
else
echo "Remote branch $BRANCH not found in $REPO; falling back to a known branch"

2
dist/index.js.map generated vendored

File diff suppressed because one or more lines are too long