2016-03-09 3 views
0

У меня есть план терраобранизации (ниже), который создает пару узлов в приватном VPC на AWS. Кажется, что все работает хорошо, но я не могу ssh или ping между узлами в VPC.Terraform AWS Маршрутизация между двумя или более узлами в частной подсети

Что мне не хватает в следующей конфигурации, чтобы позволить двум узлам в частной сети иметь возможность разговаривать друг с другом?

provider "aws" { 
    region = "${var.aws_region}" 
    access_key = "${var.aws_access_key}" 
    secret_key = "${var.aws_secret_key}" 
} 


# Create a VPC to launch our instances into 
resource "aws_vpc" "default" { 
    cidr_block = "10.0.0.0/16" 


    tags { 
    Name = "SolrCluster1" 
    } 
} 


# Create an internet gateway to give our subnet access to the outside world 
resource "aws_internet_gateway" "default" { 
    vpc_id = "${aws_vpc.default.id}" 


    tags { 
    Name = "SolrCluster1" 
    } 
} 


# Grant the VPC internet access on its main route table 
resource "aws_route" "internet_access" { 
    route_table_id = "${aws_vpc.default.main_route_table_id}" 
    destination_cidr_block = "0.0.0.0/0" 
    gateway_id = "${aws_internet_gateway.default.id}" 
} 




# Create a subnet to launch our instances into 
resource "aws_subnet" "private" { 
    vpc_id = "${aws_vpc.default.id}" 
    cidr_block = "10.0.1.0/24" 


    # if true, instances launched into this subnet should be assigned a public IP 
    map_public_ip_on_launch = true 


    # availability_zone = 


    tags { 
    Name = "SolrCluster1" 
    } 
} 




# Security Group to Access the instances over SSH, and 8983 
resource "aws_security_group" "main_security_group" { 
    name = "SolrCluster1" 
    description = "Allow access to the servers via port 22" 


    vpc_id = "${aws_vpc.default.id}" 


    // allow traffic from the SG itself for tcp 
    ingress { 
    from_port = 1 
    to_port = 65535 
    protocol = "tcp" 
    self = true 
    } 


    // allow traffic from the SG itself for udp 
    ingress { 
    from_port = 1 
    to_port = 65535 
    protocol = "udp" 
    self = true 
    } 


    // allow SSH traffic from anywhere TODO: Button this up a bit? 
    ingress { 
    from_port = 22 
    to_port = 22 
    protocol = "tcp" 
    cidr_blocks = ["0.0.0.0/0"] 
    } 


    // allow ICMP 
    ingress { 
    from_port = -1 
    to_port = -1 
    protocol = "icmp" 
    cidr_blocks = ["0.0.0.0/0"] 
    } 


} 


resource "aws_instance" "solr" { 
    ami = "ami-408c7f28" 
    instance_type = "t1.micro" 


    # The name of our SSH keypair we created above. 
    # key_name = "${aws_key_pair.auth.id}" 
    key_name = "${var.key_name}" 


    vpc_security_group_ids = ["${aws_security_group.main_security_group.id}"] 


    # Launch the instances into our subnet 
    subnet_id = "${aws_subnet.private.id}" 


    # The connection block tells our provisioner how to communicate with the 
    # resource (instance) 
    connection { 
    # The default username for our AMI 
    user = "ubuntu" 
    # The connection will use the local SSH agent for authentication. 
    private_key = "${file(var.private_key_path)}" 
    } 


    /* provisioner "remote-exec" { */ 
    /* inline = [ */ 
    /*  "sudo apt-get -y update", */ 
    /*  "sudo apt-get -y --force-yes install nginx", */ 
    /*  "sudo service nginx start" */ 
    /* ] */ 
    /* } */ 


    tags { 
    Name = "SolrDev${count.index}" 
    } 


    count = 2 
} 

ответ

1

Оказалось, я ушел из правила EGRESS для моей подсети:

egress { 
    from_port = 1 
    to_port = 65535 
    protocol = "tcp" 
    self = true 
    } 

    // allow traffic from the SG itself for udp 
    egress { 
    from_port = 1 
    to_port = 65535 
    protocol = "udp" 
    self = true 
    } 
Смежные вопросы