This is a rather naive approach that doesn't utilize a lot of Pulumi's strengths, like allowing us to use design patterns, when building out infrastructure. But should, hopefully, illustrate the benefits of having your infrastructure as code in code you actually read and write everyday.
1package main
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2"
8 "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs"
9 "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam"
10 "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb"
11 "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/rds"
12 "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
13)
14
15func main() {
16 pulumi.Run(func(ctx *pulumi.Context) error {
17 availabilityZones := []string{"us-east-1a", "us-east-1b"}
18
19 // VPC
20 vpc, err := ec2.NewVpc(ctx, "grafto-vpc", &ec2.VpcArgs{
21 CidrBlock: pulumi.String("10.0.0.0/16"),
22 EnableDnsHostnames: pulumi.Bool(true),
23 EnableDnsSupport: pulumi.Bool(true),
24 })
25 if err != nil {
26 return err
27 }
28
29 startingSubnetCidrRange := "10.0.0.0/20"
30
31 // SUBNETS
32 subnets := make(map[string][]*ec2.Subnet, len(availabilityZones))
33 for i, az := range availabilityZones {
34 var cidrRangePublic string
35 var cidrRangePrivate string
36 if i == 0 {
37 cidrRangePublic = startingSubnetCidrRange
38 cidrRangePrivate = fmt.Sprintf("10.0.%v.0/20", 16)
39 } else {
40 cidrRangePublic = fmt.Sprintf("10.0.%v.0/20", 16*(i+1))
41 cidrRangePrivate = fmt.Sprintf("10.0.%v.0/20", 16*(i+2))
42 }
43
44 publicSubnet, err := ec2.NewSubnet(
45 ctx,
46 fmt.Sprintf("grafto-%s-subnet-%v", "public", i+1),
47 &ec2.SubnetArgs{
48 VpcId: vpc.ID(),
49 CidrBlock: pulumi.String(cidrRangePublic),
50 AvailabilityZone: pulumi.String(az),
51 },
52 )
53 if err != nil {
54 return err
55 }
56
57 subnets["public"] = append(subnets["public"], publicSubnet)
58
59 privateSubnet, err := ec2.NewSubnet(
60 ctx,
61 fmt.Sprintf("grafto-%s-subnet-%v", "private", i+1),
62 &ec2.SubnetArgs{
63 VpcId: vpc.ID(),
64 CidrBlock: pulumi.String(cidrRangePrivate),
65 AvailabilityZone: pulumi.String(az),
66 },
67 )
68 if err != nil {
69 return err
70 }
71
72 subnets["private"] = append(subnets["private"], privateSubnet)
73 }
74
75 // INTERNET GATEWAY
76 internetGateway, err := ec2.NewInternetGateway(
77 ctx,
78 "grafto-internet-gateway",
79 &ec2.InternetGatewayArgs{
80 VpcId: vpc.ID(),
81 },
82 )
83 if err != nil {
84 return err
85 }
86
87 publicRouteTable, err := ec2.NewRouteTable(
88 ctx,
89 "grafto-public-route-table",
90 &ec2.RouteTableArgs{
91 VpcId: vpc.ID(),
92 },
93 )
94 if err != nil {
95 return err
96 }
97
98 _, err = ec2.NewRoute(ctx, "grafto-public-route", &ec2.RouteArgs{
99 DestinationCidrBlock: pulumi.String("0.0.0.0/0"),
100 GatewayId: internetGateway.ID(),
101 RouteTableId: publicRouteTable.ID(),
102 })
103 if err != nil {
104 return err
105 }
106
107 _, err = ec2.NewRouteTableAssociation(
108 ctx,
109 "grafto-public-route-ass-1",
110 &ec2.RouteTableAssociationArgs{
111 RouteTableId: publicRouteTable.ID(),
112 SubnetId: subnets["public"][0].ID(),
113 },
114 )
115 if err != nil {
116 return err
117 }
118
119 _, err = ec2.NewRouteTableAssociation(
120 ctx,
121 "grafto-public-route-ass-2",
122 &ec2.RouteTableAssociationArgs{
123 RouteTableId: publicRouteTable.ID(),
124 SubnetId: subnets["public"][1].ID(),
125 },
126 )
127 if err != nil {
128 return err
129 }
130
131 // NATGATEWAY
132 elasticIP, err := ec2.NewEip(ctx, "grafto-elastic-ip", &ec2.EipArgs{})
133 if err != nil {
134 return err
135 }
136
137 natGateway, err := ec2.NewNatGateway(ctx, "grafto-nat-gateway", &ec2.NatGatewayArgs{
138 AllocationId: elasticIP.ID(),
139 SubnetId: subnets["public"][0].ID(),
140 })
141 if err != nil {
142 return err
143 }
144
145 privateRouteTable, err := ec2.NewRouteTable(
146 ctx,
147 "grafto-private-route-table",
148 &ec2.RouteTableArgs{
149 VpcId: vpc.ID(),
150 },
151 )
152 if err != nil {
153 return err
154 }
155
156 _, err = ec2.NewRoute(ctx, "grafto-private-route", &ec2.RouteArgs{
157 DestinationCidrBlock: pulumi.String("0.0.0.0/0"),
158 NatGatewayId: natGateway.ID(),
159 RouteTableId: privateRouteTable.ID(),
160 })
161 if err != nil {
162 return err
163 }
164
165 _, err = ec2.NewRouteTableAssociation(
166 ctx,
167 "grafto-private-route-ass-1",
168 &ec2.RouteTableAssociationArgs{
169 RouteTableId: privateRouteTable.ID(),
170 SubnetId: subnets["private"][0].ID(),
171 },
172 )
173 if err != nil {
174 return err
175 }
176
177 _, err = ec2.NewRouteTableAssociation(
178 ctx,
179 "grafto-private-route-ass-2",
180 &ec2.RouteTableAssociationArgs{
181 RouteTableId: privateRouteTable.ID(),
182 SubnetId: subnets["private"][1].ID(),
183 },
184 )
185 if err != nil {
186 return err
187 }
188
189 // SECURITY GROUP
190 applicationLoadBalancer, err := ec2.NewSecurityGroup(
191 ctx,
192 "grafto-alb-sg",
193 &ec2.SecurityGroupArgs{
194 VpcId: vpc.ID(),
195 Ingress: ec2.SecurityGroupIngressArray{
196 &ec2.SecurityGroupIngressArgs{
197 CidrBlocks: pulumi.StringArray{
198 pulumi.String("0.0.0.0/0"),
199 },
200 FromPort: pulumi.Int(80),
201 ToPort: pulumi.Int(80),
202 Protocol: pulumi.String("tcp"),
203 },
204 },
205 Egress: ec2.SecurityGroupEgressArray{
206 &ec2.SecurityGroupEgressArgs{
207 CidrBlocks: pulumi.StringArray{
208 pulumi.String("0.0.0.0/0"),
209 },
210 FromPort: pulumi.Int(0),
211 ToPort: pulumi.Int(0),
212 Protocol: pulumi.String("-1"),
213 },
214 },
215 },
216 )
217 if err != nil {
218 return err
219 }
220
221 ecsSG, err := ec2.NewSecurityGroup(
222 ctx,
223 "grafto-ecs-sg",
224 &ec2.SecurityGroupArgs{
225 VpcId: vpc.ID(),
226 Ingress: ec2.SecurityGroupIngressArray{
227 &ec2.SecurityGroupIngressArgs{
228 CidrBlocks: pulumi.StringArray{
229 pulumi.String("0.0.0.0/0"),
230 },
231 FromPort: pulumi.Int(0),
232 ToPort: pulumi.Int(0),
233 Protocol: pulumi.String("-1"),
234 },
235 },
236 Egress: ec2.SecurityGroupEgressArray{
237 &ec2.SecurityGroupEgressArgs{
238 CidrBlocks: pulumi.StringArray{
239 pulumi.String("0.0.0.0/0"),
240 },
241 FromPort: pulumi.Int(0),
242 ToPort: pulumi.Int(0),
243 Protocol: pulumi.String("-1"),
244 },
245 },
246 },
247 )
248 if err != nil {
249 return err
250 }
251
252 rdsSGG, err := ec2.NewSecurityGroup(
253 ctx,
254 "grafto-rds-sgg",
255 &ec2.SecurityGroupArgs{
256 VpcId: vpc.ID(),
257 Ingress: ec2.SecurityGroupIngressArray{
258 &ec2.SecurityGroupIngressArgs{
259 CidrBlocks: pulumi.StringArray{
260 pulumi.String("0.0.0.0/0"),
261 },
262 FromPort: pulumi.Int(0),
263 ToPort: pulumi.Int(0),
264 Protocol: pulumi.String("-1"),
265 },
266 },
267 Egress: ec2.SecurityGroupEgressArray{
268 &ec2.SecurityGroupEgressArgs{
269 CidrBlocks: pulumi.StringArray{
270 pulumi.String("0.0.0.0/0"),
271 },
272 FromPort: pulumi.Int(0),
273 ToPort: pulumi.Int(0),
274 Protocol: pulumi.String("-1"),
275 },
276 },
277 },
278 )
279 if err != nil {
280 return err
281 }
282
283 rdsSg, err := rds.NewSubnetGroup(ctx, "grafto-rds-sg", &rds.SubnetGroupArgs{
284 SubnetIds: pulumi.StringArray{
285 subnets["private"][0].ID(),
286 subnets["private"][1].ID(),
287 },
288 })
289 if err != nil {
290 return err
291 }
292
293 database, err := rds.NewInstance(ctx, "grafto-rds-psql", &rds.InstanceArgs{
294 AllocatedStorage: pulumi.Int(10),
295 DbName: pulumi.String("grafto"),
296 Password: pulumi.String("password"),
297 Username: pulumi.String("grafto"),
298 Engine: pulumi.String("postgres"),
299 EngineVersion: pulumi.String("16.3"),
300 InstanceClass: pulumi.String("db.t3.micro"),
301 ParameterGroupName: pulumi.String("default.postgres16"),
302 DbSubnetGroupName: rdsSg.Name,
303 VpcSecurityGroupIds: pulumi.StringArray{
304 rdsSGG.ID(),
305 },
306 SkipFinalSnapshot: pulumi.Bool(true),
307 PubliclyAccessible: pulumi.Bool(false),
308 })
309 if err != nil {
310 return err
311 }
312
313 loadBalancer, err := lb.NewLoadBalancer(ctx, "grafto-load-balancer", &lb.LoadBalancerArgs{
314 Internal: pulumi.Bool(false),
315 LoadBalancerType: pulumi.String("application"),
316 SecurityGroups: pulumi.StringArray{
317 applicationLoadBalancer.ID(),
318 },
319 Subnets: pulumi.StringArray{
320 subnets["public"][0].ID(),
321 subnets["public"][1].ID(),
322 },
323 EnableDeletionProtection: pulumi.Bool(false),
324 })
325 if err != nil {
326 return err
327 }
328 ctx.Export("url", pulumi.Sprintf("http://%s", loadBalancer.DnsName))
329
330 targetGroup, err := lb.NewTargetGroup(ctx, "grafto-alb-target-group", &lb.TargetGroupArgs{
331 HealthCheck: &lb.TargetGroupHealthCheckArgs{
332 Path: pulumi.String("/api/health"),
333 Protocol: pulumi.String("HTTP"),
334 },
335 Name: pulumi.String("grafto-app-tg"),
336 Port: pulumi.Int(80),
337 Protocol: pulumi.String("HTTP"),
338 TargetType: pulumi.String("ip"),
339 VpcId: vpc.ID(),
340 })
341 if err != nil {
342 return err
343 }
344
345 _, err = lb.NewListener(ctx, "grafto-alb-listener", &lb.ListenerArgs{
346 DefaultActions: lb.ListenerDefaultActionArray{
347 lb.ListenerDefaultActionArgs{
348 TargetGroupArn: targetGroup.Arn,
349 Type: pulumi.String("forward"),
350 },
351 },
352 LoadBalancerArn: loadBalancer.Arn,
353 Port: pulumi.Int(80),
354 Protocol: pulumi.String("HTTP"),
355 })
356 if err != nil {
357 return err
358 }
359
360 // IAM RELATED STUFF
361 _, err = iam.NewServiceLinkedRole(
362 ctx,
363 "elastic-container-service",
364 &iam.ServiceLinkedRoleArgs{
365 AwsServiceName: pulumi.String("ecs.amazonaws.com"),
366 Description: pulumi.String("Role to enable Amazon ECS to manage your cluster."),
367 },
368 )
369 if err != nil {
370 return err
371 }
372
373 _, err = iam.NewServiceLinkedRole(ctx, "rds", &iam.ServiceLinkedRoleArgs{
374 AwsServiceName: pulumi.String("rds.amazonaws.com"),
375 Description: pulumi.String("Role to enable Amazon RDS to manage your cluster."),
376 })
377 if err != nil {
378 return err
379 }
380
381 _, err = iam.NewServiceLinkedRole(ctx, "elastic-load-balancer", &iam.ServiceLinkedRoleArgs{
382 AwsServiceName: pulumi.String("elasticloadbalancing.amazonaws.com"),
383 Description: pulumi.String("Allows ELB to call AWS services on your behalf"),
384 })
385 if err != nil {
386 return err
387 }
388
389 _, err = iam.NewServiceLinkedRole(
390 ctx,
391 "application-autoscaling",
392 &iam.ServiceLinkedRoleArgs{
393 AwsServiceName: pulumi.String("ecs.application-autoscaling.amazonaws.com"),
394 Description: pulumi.String(
395 "Allows application autoscaling to call AWS services on your behalf",
396 ),
397 },
398 )
399 if err != nil {
400 return err
401 }
402
403 roleJson, err := json.Marshal(map[string]interface{}{
404 "Version": "2012-10-17",
405 "Statement": []map[string]interface{}{
406 {
407 "Action": []string{
408 "sts:AssumeRole",
409 },
410 "Principal": map[string]string{"Service": "ecs-tasks.amazonaws.com"},
411 "Effect": "Allow",
412 },
413 },
414 })
415 if err != nil {
416 return err
417 }
418 role, err := iam.NewRole(ctx, "grafto-iam-role", &iam.RoleArgs{
419 Name: pulumi.String("grafto-iam-role"),
420 AssumeRolePolicy: pulumi.String(string(roleJson)),
421 })
422 if err != nil {
423 return err
424 }
425
426 rolePolicyJson, err := json.Marshal(map[string]interface{}{
427 "Version": "2012-10-17",
428 "Statement": []map[string]interface{}{
429 {
430 "Action": []string{
431 "ecr:*",
432 },
433 "Effect": "Allow",
434 "Resource": "*",
435 },
436 },
437 })
438 if err != nil {
439 return err
440 }
441 _, err = iam.NewRolePolicy(ctx, "grafto-iam-role-policy", &iam.RolePolicyArgs{
442 Name: pulumi.String("grafto-iam-role"),
443 Role: role.Name,
444 Policy: pulumi.String(string(rolePolicyJson)),
445 })
446 if err != nil {
447 return err
448 }
449
450 // ELASTIC CONTAINER SERVICE
451 cluster, err := ecs.NewCluster(ctx, "grafto-ecs-cluster", &ecs.ClusterArgs{
452 Name: pulumi.String("grafto"),
453 })
454 if err != nil {
455 return err
456 }
457
458 taskContainerDefinition := pulumi.JSONMarshal([]map[string]interface{}{
459 {
460 "name": "grafto-task",
461 "image": "docker.io/mbvofdocker/grafto:pulumi-blog",
462 "portMappings": []map[string]interface{}{
463 {
464 "containerPort": 8080,
465 "hostPort": 8080,
466 "protocol": "HTTP",
467 },
468 },
469 "essential": true,
470 "command": []string{"./app"},
471 "environment": []map[string]interface{}{
472 {
473 "name": "ENVIRONMENT",
474 "value": "production",
475 },
476 {
477 "name": "SERVER_HOST",
478 "value": "0.0.0.0",
479 },
480 {
481 "name": "SERVER_PORT",
482 "value": "8080",
483 },
484 {
485 "name": "DEFAULT_SENDER_SIGNATURE",
486 "value": "[email protected]",
487 },
488 {
489 "name": "POSTMARK_API_TOKEN",
490 "value": "insert-valid-token-here",
491 },
492 {
493 "name": "DB_KIND",
494 "value": "postgres",
495 },
496 {
497 "name": "DB_PORT",
498 "value": "5432",
499 },
500 {
501 "name": "DB_HOST",
502 "value": database.Address.ApplyT(
503 func(addr string) string {
504 return addr
505 },
506 ).(pulumi.StringOutput),
507 },
508 {
509 "name": "DB_NAME",
510 "value": database.DbName.ApplyT(
511 func(name string) string {
512 return name
513 },
514 ).(pulumi.StringOutput),
515 },
516 {
517 "name": "DB_USER",
518 "value": database.Username.ApplyT(
519 func(name string) string {
520 return name
521 },
522 ).(pulumi.StringOutput),
523 },
524 {
525 "name": "DB_PASSWORD",
526 "value": database.Password.ApplyT(
527 func(pass *string) string {
528 return *pass
529 },
530 ).(pulumi.StringOutput),
531 },
532 {
533 "name": "DB_SSL_MODE",
534 "value": "require",
535 },
536 {
537 "name": "PASSWORD_PEPPER",
538 "value": "lotsandlotsofrandomcharshere",
539 },
540 {
541 "name": "PROJECT_NAME",
542 "value": "Pulumi Grafto BLog Post",
543 },
544 {
545 "name": "APP_HOST",
546 "value": loadBalancer.DnsName.ApplyT(func(url string) string {
547 return url
548 }),
549 },
550 {
551 "name": "APP_SCHEME",
552 "value": "http",
553 },
554 {
555 "name": "CSRF_TOKEN",
556 "value": "lotsandlotsofrandomcharshere",
557 },
558 {
559 "name": "SESSION_KEY",
560 "value": "lotsandlotsofrandomcharshere",
561 },
562 {
563 "name": "SESSION_ENCRYPTION_KEY",
564 "value": "lotsandlotsofrandomcharshere",
565 },
566 {
567 "name": "TOKEN_SIGNING_KEY",
568 "value": "lotsandlotsofrandomcharshere",
569 },
570 },
571 },
572 })
573 taskDefinition, err := ecs.NewTaskDefinition(ctx, "grafto-task", &ecs.TaskDefinitionArgs{
574 ContainerDefinitions: taskContainerDefinition,
575 Cpu: pulumi.String("256"),
576 ExecutionRoleArn: role.Arn,
577 Family: pulumi.String("grafto"),
578 Memory: pulumi.String("512"),
579 NetworkMode: pulumi.String("awsvpc"),
580 TaskRoleArn: role.Arn,
581 })
582 if err != nil {
583 return err
584 }
585
586 _, err = ecs.NewService(ctx, "grafto-service", &ecs.ServiceArgs{
587 Cluster: cluster.Arn,
588 DeploymentMaximumPercent: pulumi.IntPtr(200),
589 DeploymentMinimumHealthyPercent: pulumi.IntPtr(50),
590 DesiredCount: pulumi.IntPtr(1),
591 ForceNewDeployment: pulumi.Bool(true),
592 LoadBalancers: ecs.ServiceLoadBalancerArray{
593 &ecs.ServiceLoadBalancerArgs{
594 TargetGroupArn: targetGroup.Arn,
595 ContainerName: pulumi.String("grafto-task"),
596 ContainerPort: pulumi.Int(8080),
597 },
598 },
599 NetworkConfiguration: ecs.ServiceNetworkConfigurationArgs{
600 Subnets: pulumi.StringArray{
601 subnets["private"][0].ID(),
602 subnets["private"][1].ID(),
603 },
604 SecurityGroups: pulumi.StringArray{
605 ecsSG.ID(),
606 },
607 },
608 Name: pulumi.String("grafto-ecs-service"),
609 LaunchType: pulumi.String("FARGATE"),
610 PlatformVersion: pulumi.String("1.4.0"),
611 TaskDefinition: taskDefinition.Arn,
612 })
613 if err != nil {
614 return err
615 }
616
617 return nil
618 })
619}
An obvious improvement to the above would be to enable HTTPS; if you check the load balancer's security group you can see that we allow ingress traffic on port 80. This is the only entrypoint since our Fargate tasks are all in private networks, so adding a certicate would limiting it to port 443 could go a long way.
Take a look at the calculations of the cidr ranges. If we add too many availability zones this will fail which is something to handle as well. Could be a simple check on how many AZs is requsted and limited it to a certain level, but should still be fixed.
It would also be beneficial to store the environmental variables somewhere like AWS's parameter store, and not directly in the code.
You'll probably also have noticed multiple opportunities for re-using code, through setup functions or, my personal favorite in this case, builders. Builders can simply the code a lot, especially if the amount of tasks you've in your ecs service increase. In a future article we'll improve upon this so we can easily expand upon our intrastructure.
An interesting comparison would be to do the same for Terraform and see how much they differ, and if the effort in making the infrastructure code resuable with different design patterns make sense in the end.
But for now, that's all. Happy hacking!
Thanks for reading.
If you enjoyed this one you can check one some of these: