The requirements.
Provide more secure access to the RDS instance for the end users compared to placing database in public network subnet with public IP.
Below solution will use AWS EC2 cloud-init feature to install ansible with required galaxy modules to update /home/ec2-user/.ssh/authorized_keys file whenever a new SSH public key will be placed in public_keys directory.
This way EC2 will be recreated every time when SSH key will be added or removed because cloud-init definition will change.
As this solution depends on ansible then we need to ensure that new key file name will be added to ansible/bastion/roles/ssh/tasks/main.yaml
Below is an example of task main.yaml file:
- name: Gather EC2 metadata facts
amazon.aws.ec2_metadata_facts:
- name: Set up multiple authorized key taken from files
ansible.posix.authorized_key:
user: ec2-user
state: present
key: '{{ item }}'
with_file:
- public_keys/tom_id_rsa.pub
and below playbook the definition:
---
- hosts: localhost
connection: local
remote_user: ec2-user
become: yes
become_method: sudo
gather_facts: yes
roles:
- {role: 'ssh'}
The assumptions.
- VPC and subnets (public and private)
- Route53 DNS public zone
- S3 Bucket which hosts ansible playbook
- KMS Key (customer managed)
There is ansible playbook in the root dir named /ansible, with below directory structure:
├── ansible
│ └── bastion
│ ├── ansible.cfg
│ ├── main.yml
│ └── roles
│ └── ssh
│ ├── files
│ │ └── public_keys
│ │ └── tom_id_rsa.pub
│ └── tasks
│ └── main.yaml
Architecture with data flow
The below architecture is coming from article named aws-bastion-host-jump-box
CDK code
Let's import required components from the assumptions section
imported_vpc = ec2.Vpc.from_lookup(
self,
id="imported_vpc",
vpc_id=ssm.StringParameter.value_from_lookup(
self, parameter_name=self.object_names["vpc_id_ssm_param_name"]
),
)
imported_kms_key = kms.Key.from_lookup(
self, id="imported_kms_key", alias_name=f'alias/{self.object_names["shared_kms_key_alias"]}'
)
imported_route53_public_zone = route53.PublicHostedZone.from_public_hosted_zone_attributes(
self,
id="imported_public_hosted_zone_id",
hosted_zone_id=ssm.StringParameter.value_for_string_parameter(
self, parameter_name=self.object_names["ssm_public_dns_zone_id"]
),
zone_name=self.object_names["ssm_public_dns_zone_name"],
)
Let's create bastion and grant necessary permissions
bastion = self.bastion_host(
props=props,
ansible_bucket=shared_ansible_s3_bucket,
route53_public_zone=imported_route53_public_zone,
shared_kms_key=imported_kms_key,
vpc=imported_vpc,
)
shared_ansible_s3_bucket.grant_read(bastion)
imported_kms_key.grant_decrypt(bastion)
Let's copy ansible playbook to S3
self.bucket_deployment(
destination_key_prefix="bastion", object_path="../../ansible/bastion", bucket=shared_ansible_s3_bucket
)
Let's have methods for bastion_host and bucket_deployment
def bucket_deployment(self, destination_key_prefix: str, object_path: str, bucket: s3.IBucket) -> None:
"""Deploy directory or zip archive to S3 bucket.
:param bucket: The AWS S3 Bucket CDK object to which deployment will occur
:param destination_key_prefix: The prefix which will be used to deploy object into
:param object_path: Path to the directory or zip archive in filesystem
:return:
"""
this_dir = path.dirname(__file__)
source_asset = s3_deployment.Source.asset(path.join(this_dir, object_path))
s3_deployment.BucketDeployment(
self,
id=f"{destination_key_prefix}_deployment",
destination_bucket=bucket,
sources=[source_asset],
destination_key_prefix=destination_key_prefix,
)
def bastion_host(
self,
props: Dict,
ansible_bucket: s3.IBucket,
route53_public_zone: route53.IPublicHostedZone,
shared_kms_key: kms.IKey,
vpc: ec2.IVpc,
) -> ec2.BastionHostLinux:
"""Create bastion host to route network traffic (grant access) to the
resources placed inside private subnets.
:param ansible_bucket: The CDK instance of existing s3 bucket that host ansible playbooks
:param route53_public_zone: The CDK object for Route53 zone.
In this zone the DNS entry for bastion host will be created
:param props: The dictionary which contain configuration values loaded initially from /config/config-env.yaml
:param shared_kms_key: The AWS KMS key shared for this project
:param vpc: The EC2 VPC object, this vpc will be used to place bastion host in it
"""
ansible_copy_keys_init_list: List[ec2.InitCommand] = []
ssh_pub_keys_path = "ansible/bastion/roles/ssh/files/public_keys"
# pylint: disable=W0612
for dir_path, dirs, files in walk(ssh_pub_keys_path):
ansible_copy_keys_init_list.extend(
ec2.InitCommand.shell_command(
shell_command=f"aws s3 cp s3://{ansible_bucket.bucket_name}/bastion/roles/ssh/files/public_keys/{file_name} /tmp/"
)
for file_name in files
)
init = ec2.CloudFormationInit.from_config_sets(
config_sets={
# Applies the configs below in this order
"default": [
"config",
"yum_packages",
"ansible_galaxy_modules_installation",
"ansible_playbook_from_s3",
"copy_ssh_pub_keys_from_s3",
"enable_fail2ban",
]
},
configs={
"config": ec2.InitConfig(
[
ec2.InitCommand.shell_command(shell_command="yum update -y"),
ec2.InitCommand.shell_command(shell_command="amazon-linux-extras install ansible2 -y"),
ec2.InitCommand.shell_command(shell_command="amazon-linux-extras install epel -y"),
]
),
"yum_packages": ec2.InitConfig(
[
ec2.InitPackage.yum("htop"),
ec2.InitPackage.yum("fail2ban"),
]
),
"ansible_galaxy_modules_installation": ec2.InitConfig(
[
ec2.InitCommand.shell_command(
cwd="/root", shell_command="ansible-galaxy collection install ansible.posix"
),
ec2.InitCommand.shell_command(
cwd="/root", shell_command="ansible-galaxy collection install amazon.aws"
),
]
),
"ansible_playbook_from_s3": ec2.InitConfig(
[
ec2.InitCommand.shell_command(shell_command="mkdir /root/ansible"),
ec2.InitCommand.shell_command(
shell_command=f"aws s3 cp s3://{ansible_bucket.bucket_name}/bastion /root/ansible/bastion --recursive"
),
ec2.InitCommand.shell_command(
cwd="/root/ansible/bastion", shell_command="ansible-playbook main.yml"
),
]
),
# This section does not implement any real change on Bastion host. It exists only for
# changing the EC2 cloud-init definition that will force EC2 replacement whenever
# a new ssh public key file will be added to the repository
"copy_ssh_pub_keys_from_s3": ec2.InitConfig(ansible_copy_keys_init_list),
"enable_fail2ban": ec2.InitConfig(
[
ec2.InitCommand.shell_command(shell_command="systemctl enable fail2ban"),
ec2.InitCommand.shell_command(shell_command="systemctl start fail2ban"),
]
),
},
)
init_options = ec2.ApplyCloudFormationInitOptions(
config_sets=["default"], timeout=cdk.Duration.minutes(30), include_url=True, include_role=True
)
security_group = ec2.SecurityGroup(
self,
id="bastion_security_group",
vpc=vpc,
security_group_name=f'{self.object_names["standard_prefix"]}-bastion',
)
security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=22))
ssm.StringParameter(
self,
id="bastion_security_group_id_ssm_param",
string_value=security_group.security_group_id,
parameter_name=self.object_names["bastion_security_group_id_ssm_param"],
)
bastion = ec2.BastionHostLinux(
self,
id="bastion_host",
block_devices=[
ec2.BlockDevice(
device_name="/dev/xvda",
volume=ec2.BlockDeviceVolume.ebs(
volume_size=10,
encrypted=True,
volume_type=ec2.EbsDeviceVolumeType.GP3,
kms_key=shared_kms_key,
delete_on_termination=True,
),
)
],
init=init,
init_options=init_options,
instance_name=f'{props["stage"]}-{props["project"]}-bastion',
instance_type=ec2.InstanceType.of(
instance_class=ec2.InstanceClass.BURSTABLE4_GRAVITON, instance_size=ec2.InstanceSize.MICRO
),
security_group=security_group,
subnet_selection=ec2.SubnetSelection(
subnet_group_name="public",
),
vpc=vpc,
)
route53.CnameRecord(
self,
id="bastion_dns_record",
domain_name=bastion.instance_public_dns_name,
record_name=f"bastion.{route53_public_zone.zone_name}",
zone=route53_public_zone,
comment="bastion host",
ttl=cdk.Duration.minutes(1),
)
return bastion
Top comments (0)