mirror of
https://github.com/game-ci/unity-builder.git
synced 2026-02-03 15:39:07 +08:00
fix: k3d/LocalStack networking - use shared Docker network and container name
This commit is contained in:
41
.github/workflows/cloud-runner-integrity.yml
vendored
41
.github/workflows/cloud-runner-integrity.yml
vendored
@@ -34,6 +34,8 @@ jobs:
|
||||
K3D_NODE_CONTAINERS: 'k3d-unity-builder-agent-0'
|
||||
AWS_FORCE_PROVIDER: aws
|
||||
RESOURCE_TRACKING: 'true'
|
||||
# LocalStack container name on shared Docker network (for K8s pods to access)
|
||||
LOCALSTACK_HOST: localstack-main
|
||||
steps:
|
||||
# ==========================================
|
||||
# SETUP SECTION
|
||||
@@ -67,19 +69,27 @@ jobs:
|
||||
docker system prune -af --volumes || true
|
||||
docker image prune -af || true
|
||||
docker volume prune -f || true
|
||||
# Create a shared network for k3d and LocalStack
|
||||
docker network rm cloud-runner-net 2>/dev/null || true
|
||||
docker network create cloud-runner-net || true
|
||||
echo "Disk usage after cleanup:"
|
||||
df -h
|
||||
- name: Start LocalStack (S3) as managed Docker container
|
||||
run: |
|
||||
echo "Starting LocalStack as managed Docker container..."
|
||||
# Start LocalStack with specific name and resource limits
|
||||
# Note: Using default DATA_DIR to avoid tmpfs mount conflicts
|
||||
# Get host IP for container networking (host.docker.internal equivalent)
|
||||
HOST_IP=$(ip route | grep default | awk '{print $3}')
|
||||
echo "Host gateway IP: $HOST_IP"
|
||||
# Start LocalStack with specific name on the shared network
|
||||
# Use host networking alias so k3d pods can reach it
|
||||
docker run -d \
|
||||
--name localstack-main \
|
||||
--network bridge \
|
||||
--network cloud-runner-net \
|
||||
--add-host=host.docker.internal:host-gateway \
|
||||
-p 4566:4566 \
|
||||
-e SERVICES=s3,cloudformation,ecs,kinesis,cloudwatch,logs \
|
||||
-e DEBUG=0 \
|
||||
-e HOSTNAME_EXTERNAL=localstack-main \
|
||||
localstack/localstack:latest || true
|
||||
# Wait for LocalStack to be ready - check both health endpoint and S3 service
|
||||
echo "Waiting for LocalStack to be ready..."
|
||||
@@ -197,15 +207,22 @@ jobs:
|
||||
- name: Create k3s cluster (k3d)
|
||||
timeout-minutes: 5
|
||||
run: |
|
||||
# Create cluster - host.k3d.internal will allow pods to access host services (LocalStack)
|
||||
# Get LocalStack container IP on the shared network
|
||||
LOCALSTACK_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' localstack-main 2>/dev/null || echo "")
|
||||
echo "LocalStack container IP: $LOCALSTACK_IP"
|
||||
# Create cluster on the same network as LocalStack
|
||||
# This allows pods to access LocalStack directly by container name or IP
|
||||
k3d cluster create unity-builder \
|
||||
--agents 1 \
|
||||
--network cloud-runner-net \
|
||||
--wait
|
||||
kubectl config current-context | cat
|
||||
# Store LocalStack IP for later use in tests
|
||||
echo "LOCALSTACK_IP=$LOCALSTACK_IP" >> $GITHUB_ENV
|
||||
- name: Verify cluster readiness and LocalStack connectivity
|
||||
timeout-minutes: 2
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
for i in {1..60}; do
|
||||
if kubectl get nodes 2>/dev/null | grep -q Ready; then
|
||||
echo "Cluster is ready"
|
||||
break
|
||||
@@ -217,13 +234,19 @@ jobs:
|
||||
kubectl get storageclass
|
||||
# Show node resources
|
||||
kubectl describe nodes | grep -A 5 "Allocated resources" || true
|
||||
# Get LocalStack IP for connectivity test
|
||||
LOCALSTACK_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' localstack-main 2>/dev/null || echo "")
|
||||
echo "LocalStack container IP: $LOCALSTACK_IP"
|
||||
# Test LocalStack connectivity from k3d cluster
|
||||
echo "Testing LocalStack connectivity from k3d cluster..."
|
||||
echo "From host (should work):"
|
||||
echo "From host via localhost (should work):"
|
||||
curl -s --max-time 5 http://localhost:4566/_localstack/health | head -5 || echo "Host connectivity failed"
|
||||
echo "From k3d cluster via host.k3d.internal:"
|
||||
kubectl run test-localstack --image=curlimages/curl --rm -i --restart=Never --timeout=10s -- \
|
||||
curl -v --max-time 5 http://host.k3d.internal:4566/_localstack/health 2>&1 | head -20 || \
|
||||
echo "From host via container name (should work on shared network):"
|
||||
docker run --rm --network cloud-runner-net curlimages/curl \
|
||||
curl -s --max-time 5 http://localstack-main:4566/_localstack/health 2>&1 | head -5 || echo "Container network test failed"
|
||||
echo "From k3d cluster via LocalStack container IP ($LOCALSTACK_IP):"
|
||||
kubectl run test-localstack --image=curlimages/curl --rm -i --restart=Never --timeout=30s -- \
|
||||
curl -v --max-time 10 http://${LOCALSTACK_IP}:4566/_localstack/health 2>&1 | head -30 || \
|
||||
echo "Cluster connectivity test - if this fails, LocalStack may not be accessible from k3d"
|
||||
- name: Clean up K8s test resources before tests
|
||||
run: |
|
||||
|
||||
48
dist/index.js
generated
vendored
48
dist/index.js
generated
vendored
@@ -2136,6 +2136,36 @@ const cloud_runner_options_1 = __importDefault(__nccwpck_require__(66965));
|
||||
const github_1 = __importDefault(__nccwpck_require__(83654));
|
||||
const aws_client_factory_1 = __nccwpck_require__(30161);
|
||||
class AWSTaskRunner {
|
||||
/**
|
||||
* Transform localhost endpoints to host.docker.internal for container environments.
|
||||
* When LocalStack is used, ECS tasks run in Docker containers that need to reach
|
||||
* LocalStack on the host machine via host.docker.internal.
|
||||
*/
|
||||
static transformEndpointsForContainer(environment) {
|
||||
const endpointEnvironmentNames = new Set([
|
||||
'AWS_S3_ENDPOINT',
|
||||
'AWS_ENDPOINT',
|
||||
'AWS_CLOUD_FORMATION_ENDPOINT',
|
||||
'AWS_ECS_ENDPOINT',
|
||||
'AWS_KINESIS_ENDPOINT',
|
||||
'AWS_CLOUD_WATCH_LOGS_ENDPOINT',
|
||||
'INPUT_AWSS3ENDPOINT',
|
||||
'INPUT_AWSENDPOINT',
|
||||
]);
|
||||
return environment.map((x) => {
|
||||
let value = x.value;
|
||||
if (typeof value === 'string' &&
|
||||
endpointEnvironmentNames.has(x.name) &&
|
||||
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))) {
|
||||
// Replace localhost with host.docker.internal so ECS containers can access host services
|
||||
value = value
|
||||
.replace('http://localhost', 'http://host.docker.internal')
|
||||
.replace('http://127.0.0.1', 'http://host.docker.internal');
|
||||
cloud_runner_logger_1.default.log(`AWS TaskRunner: Replaced localhost with host.docker.internal for ${x.name}: ${value}`);
|
||||
}
|
||||
return { name: x.name, value };
|
||||
});
|
||||
}
|
||||
static async runTask(taskDef, environment, commands) {
|
||||
const cluster = taskDef.baseResources?.find((x) => x.LogicalResourceId === 'ECSCluster')?.PhysicalResourceId || '';
|
||||
const taskDefinition = taskDef.taskDefResources?.find((x) => x.LogicalResourceId === 'TaskDefinition')?.PhysicalResourceId || '';
|
||||
@@ -2143,6 +2173,8 @@ class AWSTaskRunner {
|
||||
const SubnetTwo = taskDef.baseResources?.find((x) => x.LogicalResourceId === 'PublicSubnetTwo')?.PhysicalResourceId || '';
|
||||
const ContainerSecurityGroup = taskDef.baseResources?.find((x) => x.LogicalResourceId === 'ContainerSecurityGroup')?.PhysicalResourceId || '';
|
||||
const streamName = taskDef.taskDefResources?.find((x) => x.LogicalResourceId === 'KinesisStream')?.PhysicalResourceId || '';
|
||||
// Transform localhost endpoints for container environment
|
||||
const transformedEnvironment = AWSTaskRunner.transformEndpointsForContainer(environment);
|
||||
const runParameters = {
|
||||
cluster,
|
||||
taskDefinition,
|
||||
@@ -2151,7 +2183,7 @@ class AWSTaskRunner {
|
||||
containerOverrides: [
|
||||
{
|
||||
name: taskDef.taskDefStackName,
|
||||
environment,
|
||||
environment: transformedEnvironment,
|
||||
command: ['-c', command_hook_service_1.CommandHookService.ApplyHooksToCommands(commands, cloud_runner_1.default.buildParameters)],
|
||||
},
|
||||
],
|
||||
@@ -4041,6 +4073,7 @@ Object.defineProperty(exports, "__esModule", ({ value: true }));
|
||||
const client_node_1 = __nccwpck_require__(89679);
|
||||
const command_hook_service_1 = __nccwpck_require__(96159);
|
||||
const cloud_runner_1 = __importDefault(__nccwpck_require__(79144));
|
||||
const cloud_runner_logger_1 = __importDefault(__nccwpck_require__(42864));
|
||||
class KubernetesJobSpecFactory {
|
||||
static getJobSpec(command, image, mountdir, workingDirectory, environment, secrets, buildGuid, buildParameters, secretName, pvcName, jobName, k8s, containerName, ip = '') {
|
||||
const endpointEnvironmentNames = new Set([
|
||||
@@ -4053,16 +4086,21 @@ class KubernetesJobSpecFactory {
|
||||
'INPUT_AWSS3ENDPOINT',
|
||||
'INPUT_AWSENDPOINT',
|
||||
]);
|
||||
// Determine the LocalStack hostname to use for K8s pods
|
||||
// Priority: LOCALSTACK_HOST env var > localstack-main (container name on shared network)
|
||||
const localstackHost = process.env['LOCALSTACK_HOST'] || 'localstack-main';
|
||||
cloud_runner_logger_1.default.log(`K8s pods will use LocalStack host: ${localstackHost}`);
|
||||
const adjustedEnvironment = environment.map((x) => {
|
||||
let value = x.value;
|
||||
if (typeof value === 'string' &&
|
||||
endpointEnvironmentNames.has(x.name) &&
|
||||
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))) {
|
||||
// Replace localhost with host.k3d.internal so pods can access host services
|
||||
// This simulates accessing external services (like real AWS S3)
|
||||
// Replace localhost with the LocalStack container hostname
|
||||
// When k3d and LocalStack are on the same Docker network, pods can reach LocalStack by container name
|
||||
value = value
|
||||
.replace('http://localhost', 'http://host.k3d.internal')
|
||||
.replace('http://127.0.0.1', 'http://host.k3d.internal');
|
||||
.replace('http://localhost', `http://${localstackHost}`)
|
||||
.replace('http://127.0.0.1', `http://${localstackHost}`);
|
||||
cloud_runner_logger_1.default.log(`Replaced localhost with ${localstackHost} for ${x.name}: ${value}`);
|
||||
}
|
||||
return { name: x.name, value };
|
||||
});
|
||||
|
||||
2
dist/index.js.map
generated
vendored
2
dist/index.js.map
generated
vendored
File diff suppressed because one or more lines are too long
@@ -15,6 +15,44 @@ import { AwsClientFactory } from './aws-client-factory';
|
||||
|
||||
class AWSTaskRunner {
|
||||
private static readonly encodedUnderscore = `$252F`;
|
||||
|
||||
/**
|
||||
* Transform localhost endpoints to host.docker.internal for container environments.
|
||||
* When LocalStack is used, ECS tasks run in Docker containers that need to reach
|
||||
* LocalStack on the host machine via host.docker.internal.
|
||||
*/
|
||||
private static transformEndpointsForContainer(
|
||||
environment: CloudRunnerEnvironmentVariable[],
|
||||
): CloudRunnerEnvironmentVariable[] {
|
||||
const endpointEnvironmentNames = new Set([
|
||||
'AWS_S3_ENDPOINT',
|
||||
'AWS_ENDPOINT',
|
||||
'AWS_CLOUD_FORMATION_ENDPOINT',
|
||||
'AWS_ECS_ENDPOINT',
|
||||
'AWS_KINESIS_ENDPOINT',
|
||||
'AWS_CLOUD_WATCH_LOGS_ENDPOINT',
|
||||
'INPUT_AWSS3ENDPOINT',
|
||||
'INPUT_AWSENDPOINT',
|
||||
]);
|
||||
|
||||
return environment.map((x) => {
|
||||
let value = x.value;
|
||||
if (
|
||||
typeof value === 'string' &&
|
||||
endpointEnvironmentNames.has(x.name) &&
|
||||
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))
|
||||
) {
|
||||
// Replace localhost with host.docker.internal so ECS containers can access host services
|
||||
value = value
|
||||
.replace('http://localhost', 'http://host.docker.internal')
|
||||
.replace('http://127.0.0.1', 'http://host.docker.internal');
|
||||
CloudRunnerLogger.log(`AWS TaskRunner: Replaced localhost with host.docker.internal for ${x.name}: ${value}`);
|
||||
}
|
||||
|
||||
return { name: x.name, value };
|
||||
});
|
||||
}
|
||||
|
||||
static async runTask(
|
||||
taskDef: CloudRunnerAWSTaskDef,
|
||||
environment: CloudRunnerEnvironmentVariable[],
|
||||
@@ -32,6 +70,9 @@ class AWSTaskRunner {
|
||||
const streamName =
|
||||
taskDef.taskDefResources?.find((x) => x.LogicalResourceId === 'KinesisStream')?.PhysicalResourceId || '';
|
||||
|
||||
// Transform localhost endpoints for container environment
|
||||
const transformedEnvironment = AWSTaskRunner.transformEndpointsForContainer(environment);
|
||||
|
||||
const runParameters = {
|
||||
cluster,
|
||||
taskDefinition,
|
||||
@@ -40,7 +81,7 @@ class AWSTaskRunner {
|
||||
containerOverrides: [
|
||||
{
|
||||
name: taskDef.taskDefStackName,
|
||||
environment,
|
||||
environment: transformedEnvironment,
|
||||
command: ['-c', CommandHookService.ApplyHooksToCommands(commands, CloudRunner.buildParameters)],
|
||||
},
|
||||
],
|
||||
|
||||
@@ -4,6 +4,7 @@ import { CommandHookService } from '../../services/hooks/command-hook-service';
|
||||
import CloudRunnerEnvironmentVariable from '../../options/cloud-runner-environment-variable';
|
||||
import CloudRunnerSecret from '../../options/cloud-runner-secret';
|
||||
import CloudRunner from '../../cloud-runner';
|
||||
import CloudRunnerLogger from '../../services/core/cloud-runner-logger';
|
||||
|
||||
class KubernetesJobSpecFactory {
|
||||
static getJobSpec(
|
||||
@@ -32,6 +33,12 @@ class KubernetesJobSpecFactory {
|
||||
'INPUT_AWSS3ENDPOINT',
|
||||
'INPUT_AWSENDPOINT',
|
||||
]);
|
||||
|
||||
// Determine the LocalStack hostname to use for K8s pods
|
||||
// Priority: LOCALSTACK_HOST env var > localstack-main (container name on shared network)
|
||||
const localstackHost = process.env['LOCALSTACK_HOST'] || 'localstack-main';
|
||||
CloudRunnerLogger.log(`K8s pods will use LocalStack host: ${localstackHost}`);
|
||||
|
||||
const adjustedEnvironment = environment.map((x) => {
|
||||
let value = x.value;
|
||||
if (
|
||||
@@ -39,11 +46,12 @@ class KubernetesJobSpecFactory {
|
||||
endpointEnvironmentNames.has(x.name) &&
|
||||
(value.startsWith('http://localhost') || value.startsWith('http://127.0.0.1'))
|
||||
) {
|
||||
// Replace localhost with host.k3d.internal so pods can access host services
|
||||
// This simulates accessing external services (like real AWS S3)
|
||||
// Replace localhost with the LocalStack container hostname
|
||||
// When k3d and LocalStack are on the same Docker network, pods can reach LocalStack by container name
|
||||
value = value
|
||||
.replace('http://localhost', 'http://host.k3d.internal')
|
||||
.replace('http://127.0.0.1', 'http://host.k3d.internal');
|
||||
.replace('http://localhost', `http://${localstackHost}`)
|
||||
.replace('http://127.0.0.1', `http://${localstackHost}`);
|
||||
CloudRunnerLogger.log(`Replaced localhost with ${localstackHost} for ${x.name}: ${value}`);
|
||||
}
|
||||
|
||||
return { name: x.name, value } as CloudRunnerEnvironmentVariable;
|
||||
|
||||
Reference in New Issue
Block a user