13 Commits
master ... ecs

Author SHA1 Message Date
Cedric Ziel
093b9b27a6 Use service registry for discovery 2020-05-12 09:51:23 +02:00
Cedric Ziel
8c6ee60e36 Add dispatch service 2020-05-11 16:30:58 +02:00
Cedric Ziel
f97fa7a455 Add payment service 2020-05-11 15:51:35 +02:00
Cedric Ziel
a9206d7479 Add ratings service 2020-05-11 15:40:22 +02:00
Cedric Ziel
4422db5072 Add shipping service 2020-05-11 15:32:16 +02:00
Cedric Ziel
5967945d58 Add mysql service 2020-05-11 15:19:32 +02:00
Cedric Ziel
c58432972c Add cart service 2020-05-11 15:05:55 +02:00
Cedric Ziel
6a638b4d6a Add user service 2020-05-11 14:56:09 +02:00
Cedric Ziel
96f8ef8be8 Add catalogue service 2020-05-11 14:16:44 +02:00
Cedric Ziel
e95b14892f Add RabbitMQ service 2020-05-11 13:57:47 +02:00
Cedric Ziel
c7e6b9d252 Add redis service 2020-05-11 13:51:10 +02:00
Cedric Ziel
f3f350409c Add web & mongodb service 2020-05-11 13:42:42 +02:00
Cedric Ziel
b3ee2880d9 Add cluster descriptor
* add template for instana-agent
2020-05-11 13:42:08 +02:00
124 changed files with 2558 additions and 2234 deletions

2
.env
View File

@@ -1,3 +1,3 @@
# environment file for docker-compose
REPO=robotshop
TAG=2.1.0
TAG=0.4.17

View File

@@ -1,19 +0,0 @@
name: docker-compose-actions-workflow
on:
push:
branches:
- 'master'
paths-ignore:
- 'DCOS/**'
- 'K8s/**'
- 'load-gen/**'
- 'OpenShift/**'
- 'Swarm/**'
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build the stack
run: docker-compose build

View File

@@ -49,9 +49,9 @@ By default the `payment` service uses https://www.paypal.com as the pseudo payme
$ helm install --set payment.gateway=https://foobar.com ...
```
## Website Monitoring / End-User Monitoring
## End User Monitoring
Optionally Website Monitoring / End-User Monitoring can be enabled for the web pages. Take a look at the [documentation](https://docs.instana.io/website_monitoring/) to see how to get a key and an endpoint url.
Optionally End User Monitoring can be enabled for the web pages. Take a look at the [documentation](https://docs.instana.io/products/website_monitoring/) to see how to get a key and an endpoint url.
```shell
$ helm install \
@@ -60,38 +60,3 @@ $ helm install \
...
```
## Use with Minis
When running on `minishift` or `minikube` set `nodeport` to true. The store will then be available on the IP address of your mini and node port of the web service.
```shell
$ mini[kube|shift] ip
192.168.66.101
$ kubectl get svc web
```
Combine the IP and port number to make the URL `http://192.168.66.101:32145`
### MiniShift
Openshift is like K8s but not K8s. Set `openshift` to true or things will break. See the notes and scripts in the OpenShift directory of this repo.
```shell
$ helm install robot-shop --set openshift=true helm
```
### Deployment Parameters
| Key | Default | Type | Description |
| ---------------- | ------- | ------ | ----------- |
| eum.key | null | string | EUM Access Key |
| eum.url | https://eum-eu-west-1.instana.io | url | EUM endpoint URL |
| image.pullPolicy | IfNotPresent | string | Kubernetes pull policy. One of Always,IfNotPresent, or Never. |
| image.repo | robotshop | string | Base docker repository to pull the images from. |
| image.version | latest | string | Docker tag to pull. |
| nodeport | false | booelan | Whether to expose the services via node port. |
| openshift | false | boolean | If OpenShift additional configuration is applied. |
| payment.gateway | null | string | External URL end-point to simulate partial/3rd party traces. |
| psp.enabled | false | boolean | Enable Pod Security Policy for clusters with a PSP Admission controller |
| redis.storageClassName | standard | string | Storage class to use with Redis's StatefulSet. The default for EKS is gp2. |
| ocCreateRoute | false | boolean | If you are running on OpenShift and need a Route to the web service, set this to `true` |

View File

@@ -20,8 +20,7 @@ spec:
containers:
- name: cart
image: {{ .Values.image.repo }}/rs-cart:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
# agent networking access
# agent networking access
env:
- name: INSTANA_AGENT_HOST
valueFrom:

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: catalogue
image: {{ .Values.image.repo }}/rs-catalogue:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: INSTANA_AGENT_HOST
valueFrom:

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: dispatch
image: {{ .Values.image.repo }}/rs-dispatch:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# agent networking access
- name: INSTANA_AGENT_HOST

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: mongodb
image: {{ .Values.image.repo }}/rs-mongodb:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 27017
resources:

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: mysql
image: {{ .Values.image.repo }}/rs-mysql-db:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
# added for Istio
securityContext:
capabilities:

View File

@@ -23,7 +23,6 @@ spec:
containers:
- name: payment
image: {{ .Values.image.repo }}/rs-payment:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
# agent networking access
env:
- name: INSTANA_AGENT_HOST

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: rabbitmq
image: rabbitmq:3.7-management-alpine
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 5672
- containerPort: 15672

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: ratings
image: {{ .Values.image.repo }}/rs-ratings:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 80
resources:
@@ -30,12 +29,4 @@ spec:
requests:
cpu: 100m
memory: 50Mi
readinessProbe:
httpGet:
path: /_health
port: 80
initialDelaySeconds: 5
periodSeconds: 5
failureThreshold: 30
successThreshold: 1
restartPolicy: Always

View File

@@ -1,5 +1,5 @@
apiVersion: apps/v1
kind: StatefulSet
kind: Deployment
metadata:
labels:
service: redis
@@ -9,7 +9,6 @@ spec:
selector:
matchLabels:
service: redis
serviceName: redis
template:
metadata:
labels:
@@ -21,12 +20,8 @@ spec:
containers:
- name: redis
image: redis:4.0.6
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 6379
volumeMounts:
- name: data
mountPath: /mnt/redis
resources:
limits:
cpu: 200m
@@ -35,16 +30,3 @@ spec:
cpu: 100m
memory: 50Mi
restartPolicy: Always
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
{{ if not .Values.openshift }}
storageClassName: {{ .Values.redis.storageClassName }}
volumeMode: Filesystem
{{ end }}
resources:
requests:
storage: 1Gi

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: shipping
image: {{ .Values.image.repo }}/rs-shipping:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 8080
# it's Java it needs lots of memory
@@ -31,12 +30,4 @@ spec:
requests:
cpu: 100m
memory: 500Mi
readinessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
failureThreshold: 30
successThreshold: 1
restartPolicy: Always

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: user
image: {{ .Values.image.repo }}/rs-user:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
# agent networking access
- name: INSTANA_AGENT_HOST

View File

@@ -20,7 +20,6 @@ spec:
containers:
- name: web
image: {{ .Values.image.repo }}/rs-web:{{ .Values.image.version }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.eum.key }}
env:
- name: INSTANA_EUM_KEY

View File

@@ -11,19 +11,4 @@ spec:
targetPort: 8080
selector:
service: web
{{ if .Values.nodeport }}
type: NodePort
{{ else }}
type: LoadBalancer
{{ end }}
---
{{if .Values.ocCreateRoute}}
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: web
spec:
to:
kind: Service
name: web
{{end}}

View File

@@ -3,7 +3,6 @@
image:
repo: robotshop
version: latest
pullPolicy: IfNotPresent
# Alternative payment gateway URL
# Default is https://www.paypal.com
@@ -22,14 +21,3 @@ eum:
psp:
enabled: false
# For the mini ones minikube, minishift set to true
nodeport: false
# "special" Openshift. Set to true when deploying to any openshift flavour
openshift: false
# Storage class to use with redis statefulset.
redis:
storageClassName: standard
ocCreateRoute: false

View File

@@ -4,45 +4,17 @@ See the official [documentation](https://docs.instana.io/quick_start/agent_setup
# Robot Shop Deployment
## OCP 3.x
Have a look at the contents of the *setup.sh* and *deploy,sh* scripts, you may want to tweak some settings to suit your environment.
For OpenShift run the `setup.sh` script to create the project and set the extra permissions.
Run the *setup.sh* script first, you will need the passwords for the developer and system:admin users.
Use the Helm chart for Kubernetes to install Stan's Robot Shop. To install on Minishift.
Once the set up is completed, run the *deploy.sh* script. This script imports the application images from Docker Hub into OpenShift, then it creates applications from those images.
### Helm 3
When the deployment has completed, to make Stan's Robot Shop accessible the web service needs to be updated.
```shell
$ cd K8s
$ oc login -u developer
$ oc project robot-shop
$ helm install robot-shop --set openshift=true --set nodeport=true helm
```bash
oc edit svc web
```
To connect to the shop.
```shell
$ minishift ip
192.168.99.106
$ oc get svc web
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
web NodePort 172.30.180.253 <none> 8080:31147/TCP 4m
```
Use the IP and the node port to form the URL `http://192.168.99.106:31147/`
## OCP 4.x
For Openshift cluster in version 4.x follow these steps:
```
export KUBECONFIG=/path/to/oc/cluster/dir/auth/kubeconfig
oc adm new-project robot-shop
oc adm policy add-scc-to-user anyuid -z default -n robot-shop
oc adm policy add-scc-to-user privileged -z default -n robot-shop
cd robot-shop/K8s
helm install robot-shop --set openshift=true -n robot-shop helm
```
Change *type* to **NodePort** when running on Minishift or **LoadBalancer** for regular OpenShift.

48
OpenShift/deploy.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/sh
# set -x
set -e
# Put your EUM key here
EUM_KEY=""
echo "logging in as developer"
oc login -u developer
oc project robot-shop
# set the environment from the .env file
for VAR in $(egrep '^[A-Z]+=' ../.env)
do
export $VAR
done
# import all the images from docker hub into OpenShift
for LINE in $(awk '/^ {2}[a-z]+:$/ {printf "%s", $0} /image: / {print $2}' ../docker-compose.yaml)
do
NAME=$(echo "$LINE" | cut -d: -f1)
IMAGE=$(echo "$LINE" | cut -d: -f2-)
FULL_IMAGE=$(eval "echo $IMAGE")
echo "NAME $NAME"
echo "importing $FULL_IMAGE"
oc import-image $FULL_IMAGE --from $FULL_IMAGE --confirm
# a bit of a hack but appears to work
BASE=$(basename $FULL_IMAGE)
oc new-app -i $BASE --name $NAME
done
# Set EUM environment if required
if [ -n "$EUM_KEY" ]
then
oc set env dc/web INSTANA_EUM_KEY=$EUM_KEY
fi
echo " "
echo "Deployment complete"
echo "To make Robot Shop accessible, please run <oc edit svc web>"
echo "Change type from ClusterIP to NodePort on minishift or LoadBalancer on OpenShift"
echo " "

View File

@@ -1,12 +1,28 @@
#!/bin/sh
# Put your EUM key here
EUM_KEY=""
# set -x
# This only works for default local install of minishift
# Need to tweak some settings in OpenShift
echo "logging in as system:admin"
oc login -u system:admin
oc adm new-project robot-shop
# Optionally label the nodes with role infra
for NODE in $(oc get node | awk '{if ($3 == "infra" || $3 == "<none>") print $1}' -)
do
oc label node $NODE 'type=infra'
done
oc adm new-project robot-shop --node-selector='type=infra'
oc adm policy add-role-to-user admin developer -n robot-shop
oc adm policy add-scc-to-user anyuid -z default
oc adm policy add-scc-to-user privileged -z default
oc login -u developer
oc logout
echo " "
echo "OpenShift set up complete, ready to deploy Robot Shop now."
echo " "

View File

@@ -6,7 +6,7 @@ You can get more detailed information from my [blog post](https://www.instana.co
This sample microservice application has been built using these technologies:
- NodeJS ([Express](http://expressjs.com/))
- Java ([Spring Boot](https://spring.io/))
- Java ([Spark Java](http://sparkjava.com/))
- Python ([Flask](http://flask.pocoo.org))
- Golang
- PHP (Apache)
@@ -24,14 +24,6 @@ To see the application performance results in the Instana dashboard, you will fi
## Build from Source
To optionally build from source (you will need a newish version of Docker to do this) use Docker Compose. Optionally edit the `.env` file to specify an alternative image registry and version tag; see the official [documentation](https://docs.docker.com/compose/env-file/) for more information.
To download the tracing module for Nginx, it needs a valid Instana agent key. Set this in the environment before starting the build.
```shell
$ export INSTANA_AGENT_KEY="<your agent key>"
```
Now build all the images.
```shell
$ docker-compose build
```
@@ -78,18 +70,57 @@ You can run Kubernetes locally using [minikube](https://github.com/kubernetes/mi
The Docker container images are all available on [Docker Hub](https://hub.docker.com/u/robotshop/).
Install Stan's Robot Shop to your Kubernetes cluster using the [Helm](K8s/helm/README.md) chart.
Install Stan's Robot Shop to your Kubernetes cluster using the helm chart.
To deploy the Instana agent to Kubernetes, just use the [helm](https://github.com/instana/helm-charts) chart.
```shell
$ cd K8s/helm
$ helm install --name robot-shop --namespace robot-shop .
```
There are some customisations that can be made see the [README](K8s/helm/README.md).
To deploy the Instana agent to Kubernetes, just use the [helm](https://hub.helm.sh/charts/stable/instana-agent) chart.
```shell
$ helm install --name instana-agent --namespace instana-agent \
--set agent.key=INSTANA_AGENT_KEY \
--set agent.endpointHost=HOST \
--set agent.endpointPort=PORT \
--set zone.name=CLUSTER_NAME \
stable/instana-agent
```
If you are having difficulties getting helm running with your K8s install, it is most likely due to RBAC, most K8s now have RBAC enabled by default. Therefore helm requires a [service account](https://github.com/helm/helm/blob/master/docs/rbac.md) to have permission to do stuff.
## Accessing the Store
If you are running the store locally via *docker-compose up* then, the store front is available on localhost port 8080 [http://localhost:8080](http://localhost:8080/)
If you are running the store on Kubernetes via minikube then, find the IP address of Minikube and the Node Port of the web service.
If you are running the store on Kubernetes via minikube then, to make the store front accessible edit the *web* service definition and change the type to *NodePort* and add a port entry *nodePort: 30080*.
```shell
$ kubectl -n robot-shop edit service web
```
Snippet
```yaml
spec:
ports:
- name: "8080"
port: 8080
protocol: TCP
targetPort: 8080
nodePort: 30080
selector:
service: web
sessionAffinity: None
type: NodePort
```
The store front is then available on the IP address of minikube port 30080. To find the IP address of your minikube instance.
```shell
$ minikube ip
$ kubectl get svc web
```
If you are using a cloud Kubernetes / Openshift / Mesosphere then it will be available on the load balancer of that system.
@@ -97,15 +128,16 @@ If you are using a cloud Kubernetes / Openshift / Mesosphere then it will be ava
## Load Generation
A separate load generation utility is provided in the `load-gen` directory. This is not automatically run when the application is started. The load generator is built with Python and [Locust](https://locust.io). The `build.sh` script builds the Docker image, optionally taking *push* as the first argument to also push the image to the registry. The registry and tag settings are loaded from the `.env` file in the parent directory. The script `load-gen.sh` runs the image, it takes a number of command line arguments. You could run the container inside an orchestration system (K8s) as well if you want to, an example descriptor is provided in K8s directory. For more details see the [README](load-gen/README.md) in the load-gen directory.
## Website Monitoring / End-User Monitoring
## End User Monitoring
### Docker Compose
To enable Website Monioring / End-User Monitoring (EUM) see the official [documentation](https://docs.instana.io/website_monitoring/) for how to create a configuration. There is no need to inject the JavaScript fragment into the page, this will be handled automatically. Just make a note of the unique key and set the environment variable `INSTANA_EUM_KEY` and `INSTANA_EUM_REPORTING_URL` for the web image within `docker-compose.yaml`.
To enable End User Monitoring (EUM) see the official [documentation](https://docs.instana.io/products/website_monitoring/) for how to create a configuration. There is no need to inject the javascript fragment into the page, this will be handled automatically. Just make a note of the unique key and set the environment variable INSTANA_EUM_KEY for the web image, see `docker-compose.yaml` for an example.
If you are running the Instana backend on premise, you will also need to set the Reporting URL to your local instance. Set the environment variable INSTANA_EUM_REPORTING_URL as above. See the Instana EUM API [reference](https://docs.instana.io/products/website_monitoring/api/#api-structure)
### Kubernetes
The Helm chart for installing Stan's Robot Shop supports setting the key and endpoint url required for website monitoring, see the [README](K8s/helm/README.md).
The Helm chart for installing Stan's Robot Shop supports setting the key and endpoint url for EUM, see the [README](K8s/helm/README.md).
## Prometheus

571
aws-ecs-ec2/cluster.yaml Normal file
View File

@@ -0,0 +1,571 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: A stack for deploying containerized applications onto a cluster of EC2
hosts using Elastic Container Service. This stack runs containers on
hosts that are in a private VPC subnet. Outbound network traffic from the
hosts must go out through a NAT gateway. There are two load balancers, one
inside the public subnet, which can be used to send traffic to the
containers in the private subnet, and one in the private subnet, which can
be used for private internal traffic between internal services.
Parameters:
DesiredCapacity:
Type: Number
Default: '3'
Description: Number of EC2 instances to launch in your ECS cluster.
MaxSize:
Type: Number
Default: '6'
Description: Maximum number of EC2 instances that can be launched in your ECS cluster.
ECSAMI:
Description: AMI ID
Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>
Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id
InstanceType:
Description: EC2 instance type
Type: String
Default: c4.xlarge
AllowedValues: [t2.micro, t2.small, t2.medium, t2.large, m3.medium, m3.large,
m3.xlarge, m3.2xlarge, m4.large, m4.xlarge, m4.2xlarge, m4.4xlarge, m4.10xlarge,
c4.large, c4.xlarge, c4.2xlarge, c4.4xlarge, c4.8xlarge, c3.large, c3.xlarge,
c3.2xlarge, c3.4xlarge, c3.8xlarge, r3.large, r3.xlarge, r3.2xlarge, r3.4xlarge,
r3.8xlarge, i2.xlarge, i2.2xlarge, i2.4xlarge, i2.8xlarge]
ConstraintDescription: Please choose a valid instance type.
Mappings:
# Hard values for the subnet masks. These masks define
# the range of internal IP addresses that can be assigned.
# The VPC can have all IP's from 10.0.0.0 to 10.0.255.255
# There are four subnets which cover the ranges:
#
# 10.0.0.0 - 10.0.0.255
# 10.0.1.0 - 10.0.1.255
# 10.0.2.0 - 10.0.2.255
# 10.0.3.0 - 10.0.3.255
#
# If you need more IP addresses (perhaps you have so many
# instances that you run out) then you can customize these
# ranges to add more
SubnetConfig:
VPC:
CIDR: '10.0.0.0/16'
PublicOne:
CIDR: '10.0.0.0/24'
PublicTwo:
CIDR: '10.0.1.0/24'
PrivateOne:
CIDR: '10.0.2.0/24'
PrivateTwo:
CIDR: '10.0.3.0/24'
Resources:
# VPC in which containers will be networked.
# It has two public subnets, and two private subnets.
# We distribute the subnets across the first two available subnets
# for the region, for high availability.
VPC:
Type: AWS::EC2::VPC
Properties:
EnableDnsSupport: true
EnableDnsHostnames: true
CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR']
# Two public subnets, where containers can have public IP addresses
PublicSubnetOne:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone:
Fn::Select:
- 0
- Fn::GetAZs: {Ref: 'AWS::Region'}
VpcId: !Ref 'VPC'
CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR']
MapPublicIpOnLaunch: true
PublicSubnetTwo:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone:
Fn::Select:
- 1
- Fn::GetAZs: {Ref: 'AWS::Region'}
VpcId: !Ref 'VPC'
CidrBlock: !FindInMap ['SubnetConfig', 'PublicTwo', 'CIDR']
MapPublicIpOnLaunch: true
# Two private subnets where containers will only have private
# IP addresses, and will only be reachable by other members of the
# VPC
PrivateSubnetOne:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone:
Fn::Select:
- 0
- Fn::GetAZs: {Ref: 'AWS::Region'}
VpcId: !Ref 'VPC'
CidrBlock: !FindInMap ['SubnetConfig', 'PrivateOne', 'CIDR']
PrivateSubnetTwo:
Type: AWS::EC2::Subnet
Properties:
AvailabilityZone:
Fn::Select:
- 1
- Fn::GetAZs: {Ref: 'AWS::Region'}
VpcId: !Ref 'VPC'
CidrBlock: !FindInMap ['SubnetConfig', 'PrivateTwo', 'CIDR']
# Setup networking resources for the public subnets. Containers
# in the public subnets have public IP addresses and the routing table
# sends network traffic via the internet gateway.
InternetGateway:
Type: AWS::EC2::InternetGateway
GatewayAttachement:
Type: AWS::EC2::VPCGatewayAttachment
Properties:
VpcId: !Ref 'VPC'
InternetGatewayId: !Ref 'InternetGateway'
PublicRouteTable:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref 'VPC'
PublicRoute:
Type: AWS::EC2::Route
DependsOn: GatewayAttachement
Properties:
RouteTableId: !Ref 'PublicRouteTable'
DestinationCidrBlock: '0.0.0.0/0'
GatewayId: !Ref 'InternetGateway'
PublicSubnetOneRouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
SubnetId: !Ref PublicSubnetOne
RouteTableId: !Ref PublicRouteTable
PublicSubnetTwoRouteTableAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
SubnetId: !Ref PublicSubnetTwo
RouteTableId: !Ref PublicRouteTable
# Setup networking resources for the private subnets. Containers
# in these subnets have only private IP addresses, and must use a NAT
# gateway to talk to the internet. We launch two NAT gateways, one for
# each private subnet.
NatGatewayOneAttachment:
Type: AWS::EC2::EIP
DependsOn: GatewayAttachement
Properties:
Domain: vpc
NatGatewayTwoAttachment:
Type: AWS::EC2::EIP
DependsOn: GatewayAttachement
Properties:
Domain: vpc
NatGatewayOne:
Type: AWS::EC2::NatGateway
Properties:
AllocationId: !GetAtt NatGatewayOneAttachment.AllocationId
SubnetId: !Ref PublicSubnetOne
NatGatewayTwo:
Type: AWS::EC2::NatGateway
Properties:
AllocationId: !GetAtt NatGatewayTwoAttachment.AllocationId
SubnetId: !Ref PublicSubnetTwo
PrivateRouteTableOne:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref 'VPC'
PrivateRouteOne:
Type: AWS::EC2::Route
Properties:
RouteTableId: !Ref PrivateRouteTableOne
DestinationCidrBlock: 0.0.0.0/0
NatGatewayId: !Ref NatGatewayOne
PrivateRouteTableOneAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PrivateRouteTableOne
SubnetId: !Ref PrivateSubnetOne
PrivateRouteTableTwo:
Type: AWS::EC2::RouteTable
Properties:
VpcId: !Ref 'VPC'
PrivateRouteTwo:
Type: AWS::EC2::Route
Properties:
RouteTableId: !Ref PrivateRouteTableTwo
DestinationCidrBlock: 0.0.0.0/0
NatGatewayId: !Ref NatGatewayTwo
PrivateRouteTableTwoAssociation:
Type: AWS::EC2::SubnetRouteTableAssociation
Properties:
RouteTableId: !Ref PrivateRouteTableTwo
SubnetId: !Ref PrivateSubnetTwo
# OPTIONAL: VPC Endpoint for DynamoDB
# If a container needs to access DynamoDB this allows a container in the private subnet
# to talk to DynamoDB directly without needing to go via the NAT gateway. This reduces
# the amount of bandwidth through the gateway, meaning that the gateway is free to serve
# your other traffic.
DynamoDBEndpoint:
Type: AWS::EC2::VPCEndpoint
Properties:
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Action: "*"
Principal: "*"
Resource: "*"
RouteTableIds:
- !Ref 'PrivateRouteTableOne'
- !Ref 'PrivateRouteTableTwo'
ServiceName: !Join [ "", [ "com.amazonaws.", { "Ref": "AWS::Region" }, ".dynamodb" ] ]
VpcId: !Ref 'VPC'
# ECS Resources
ECSCluster:
Type: AWS::ECS::Cluster
# A security group for the EC2 hosts that will run the containers.
# Two rules, allowing network traffic from a public facing load
# balancer and from other hosts in the security group.
#
# Remove any of the following ingress rules that are not needed.
# If you want to make direct requests to a container using its
# public IP address you'll need to add a security group rule
# to allow traffic from all IP addresses.
EcsHostSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Access to the ECS hosts that run containers
VpcId: !Ref 'VPC'
EcsSecurityGroupIngressFromPublicALB:
Type: AWS::EC2::SecurityGroupIngress
Properties:
Description: Ingress from the public ALB
GroupId: !Ref 'EcsHostSecurityGroup'
IpProtocol: -1
SourceSecurityGroupId: !Ref 'PublicLoadBalancerSG'
EcsSecurityGroupIngressFromPrivateALB:
Type: AWS::EC2::SecurityGroupIngress
Properties:
Description: Ingress from the private ALB
GroupId: !Ref 'EcsHostSecurityGroup'
IpProtocol: -1
SourceSecurityGroupId: !Ref 'PrivateLoadBalancerSG'
EcsSecurityGroupIngressFromSelf:
Type: AWS::EC2::SecurityGroupIngress
Properties:
Description: Ingress from other containers in the same security group
GroupId: !Ref 'EcsHostSecurityGroup'
IpProtocol: -1
SourceSecurityGroupId: !Ref 'EcsHostSecurityGroup'
# Autoscaling group. This launches the actual EC2 instances that will register
# themselves as members of the cluster, and run the docker containers.
ECSAutoScalingGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
VPCZoneIdentifier:
- !Ref PrivateSubnetOne
- !Ref PrivateSubnetTwo
LaunchConfigurationName: !Ref 'ContainerInstances'
MinSize: '1'
MaxSize: !Ref 'MaxSize'
DesiredCapacity: !Ref 'DesiredCapacity'
CreationPolicy:
ResourceSignal:
Timeout: PT15M
UpdatePolicy:
AutoScalingReplacingUpdate:
WillReplace: 'true'
ContainerInstances:
Type: AWS::AutoScaling::LaunchConfiguration
Properties:
ImageId: !Ref 'ECSAMI'
SecurityGroups: [!Ref 'EcsHostSecurityGroup']
InstanceType: !Ref 'InstanceType'
IamInstanceProfile: !Ref 'EC2InstanceProfile'
UserData:
Fn::Base64: !Sub |
#!/bin/bash -xe
echo ECS_CLUSTER=${ECSCluster} >> /etc/ecs/ecs.config
yum install -y aws-cfn-bootstrap curl
/opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource ECSAutoScalingGroup --region ${AWS::Region}
AutoscalingRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: [application-autoscaling.amazonaws.com]
Action: ['sts:AssumeRole']
Path: /
Policies:
- PolicyName: service-autoscaling
PolicyDocument:
Statement:
- Effect: Allow
Action:
- 'application-autoscaling:*'
- 'cloudwatch:DescribeAlarms'
- 'cloudwatch:PutMetricAlarm'
- 'ecs:DescribeServices'
- 'ecs:UpdateService'
Resource: '*'
EC2InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: /
Roles: [!Ref 'EC2Role']
# Role for the EC2 hosts. This allows the ECS agent on the EC2 hosts
# to communciate with the ECS control plane, as well as download the docker
# images from ECR to run on your host.
EC2Role:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: [ec2.amazonaws.com]
Action: ['sts:AssumeRole']
Path: /
Policies:
- PolicyName: ecs-service
PolicyDocument:
Statement:
- Effect: Allow
Action:
- 'ecs:CreateCluster'
- 'ecs:DeregisterContainerInstance'
- 'ecs:DiscoverPollEndpoint'
- 'ecs:Poll'
- 'ecs:RegisterContainerInstance'
- 'ecs:StartTelemetrySession'
- 'ecs:Submit*'
- 'logs:CreateLogStream'
- 'logs:PutLogEvents'
- 'ecr:GetAuthorizationToken'
- 'ecr:BatchGetImage'
- 'ecr:GetDownloadUrlForLayer'
Resource: '*'
# Load balancers for getting traffic to containers.
# This sample template creates two load balancers:
#
# - One public load balancer, hosted in public subnets that is accessible
# to the public, and is intended to route traffic to one or more public
# facing services.
# - One private load balancer, hosted in private subnets, that only
# accepts traffic from other containers in the cluster, and is
# intended for private services that should not be accessed directly
# by the public.
# A public facing load balancer, this is used for accepting traffic from the public
# internet and directing it to public facing microservices
PublicLoadBalancerSG:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Access to the public facing load balancer
VpcId: !Ref 'VPC'
SecurityGroupIngress:
# Allow access to ALB from anywhere on the internet
- CidrIp: 0.0.0.0/0
IpProtocol: -1
PublicLoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
Scheme: internet-facing
LoadBalancerAttributes:
- Key: idle_timeout.timeout_seconds
Value: '30'
Subnets:
# The load balancer is placed into the public subnets, so that traffic
# from the internet can reach the load balancer directly via the internet gateway
- !Ref PublicSubnetOne
- !Ref PublicSubnetTwo
SecurityGroups: [!Ref 'PublicLoadBalancerSG']
# A dummy target group is used to setup the ALB to just drop traffic
# initially, before any real service target groups have been added.
DummyTargetGroupPublic:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
HealthCheckIntervalSeconds: 6
HealthCheckPath: /
HealthCheckProtocol: HTTP
HealthCheckTimeoutSeconds: 5
HealthyThresholdCount: 2
Name: !Join ['-', [!Ref 'AWS::StackName', 'drop-1']]
Port: 80
Protocol: HTTP
UnhealthyThresholdCount: 2
VpcId: !Ref 'VPC'
PublicLoadBalancerListener:
Type: AWS::ElasticLoadBalancingV2::Listener
DependsOn:
- PublicLoadBalancer
Properties:
DefaultActions:
- TargetGroupArn: !Ref 'DummyTargetGroupPublic'
Type: 'forward'
LoadBalancerArn: !Ref 'PublicLoadBalancer'
Port: 80
Protocol: HTTP
# An internal load balancer, this would be used for a service that is not
# directly accessible to the public, but instead should only receive traffic
# from your other services.
PrivateLoadBalancerSG:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Access to the internal load balancer
VpcId: !Ref 'VPC'
PrivateLoadBalancerIngressFromECS:
Type: AWS::EC2::SecurityGroupIngress
Properties:
Description: Only accept traffic from a container in the container host security group
GroupId: !Ref 'PrivateLoadBalancerSG'
IpProtocol: -1
SourceSecurityGroupId: !Ref 'EcsHostSecurityGroup'
PrivateLoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
Properties:
Scheme: internal
LoadBalancerAttributes:
- Key: idle_timeout.timeout_seconds
Value: '30'
Subnets:
# This load balancer is put into the private subnet, so that there is no
# route for the public to even be able to access the private load balancer.
- !Ref PrivateSubnetOne
- !Ref PrivateSubnetTwo
SecurityGroups: [!Ref 'PrivateLoadBalancerSG']
# This dummy target group is used to setup the ALB to just drop traffic
# initially, before any real service target groups have been added.
DummyTargetGroupPrivate:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
HealthCheckIntervalSeconds: 6
HealthCheckPath: /
HealthCheckProtocol: HTTP
HealthCheckTimeoutSeconds: 5
HealthyThresholdCount: 2
Name: !Join ['-', [!Ref 'AWS::StackName', 'drop-2']]
Port: 80
Protocol: HTTP
UnhealthyThresholdCount: 2
VpcId: !Ref 'VPC'
PrivateLoadBalancerListener:
Type: AWS::ElasticLoadBalancingV2::Listener
DependsOn:
- PrivateLoadBalancer
Properties:
DefaultActions:
- TargetGroupArn: !Ref 'DummyTargetGroupPrivate'
Type: 'forward'
LoadBalancerArn: !Ref 'PrivateLoadBalancer'
Port: 80
Protocol: HTTP
# This is an IAM role which authorizes ECS to manage resources on your
# account on your behalf, such as updating your load balancer with the
# details of where your containers are, so that traffic can reach your
# containers.
ECSRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: [ecs.amazonaws.com]
Action: ['sts:AssumeRole']
Path: /
Policies:
- PolicyName: ecs-service
PolicyDocument:
Statement:
- Effect: Allow
Action:
# Rules which allow ECS to attach network interfaces to instances
# on your behalf in order for awsvpc networking mode to work right
- 'ec2:AttachNetworkInterface'
- 'ec2:CreateNetworkInterface'
- 'ec2:CreateNetworkInterfacePermission'
- 'ec2:DeleteNetworkInterface'
- 'ec2:DeleteNetworkInterfacePermission'
- 'ec2:Describe*'
- 'ec2:DetachNetworkInterface'
# Rules which allow ECS to update load balancers on your behalf
# with the information sabout how to send traffic to your containers
- 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer'
- 'elasticloadbalancing:DeregisterTargets'
- 'elasticloadbalancing:Describe*'
- 'elasticloadbalancing:RegisterInstancesWithLoadBalancer'
- 'elasticloadbalancing:RegisterTargets'
Resource: '*'
# These are the values output by the CloudFormation template. Be careful
# about changing any of them, because of them are exported with specific
# names so that the other task related CF templates can use them.
Outputs:
ClusterName:
Description: The name of the ECS cluster
Value: !Ref 'ECSCluster'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'ClusterName' ] ]
InternalUrl:
Description: The url of the internal load balancer
Value: !Join ['', ['http://', !GetAtt 'PrivateLoadBalancer.DNSName']]
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'InternalUrl' ] ]
ExternalUrl:
Description: The url of the external load balancer
Value: !Join ['', ['http://', !GetAtt 'PublicLoadBalancer.DNSName']]
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'ExternalUrl' ] ]
ECSRole:
Description: The ARN of the ECS role
Value: !GetAtt 'ECSRole.Arn'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'ECSRole' ] ]
PublicListener:
Description: The ARN of the public load balancer's Listener
Value: !Ref PublicLoadBalancerListener
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PublicListener' ] ]
PrivateListener:
Description: The ARN of the public load balancer's Listener
Value: !Ref PrivateLoadBalancerListener
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PrivateListener' ] ]
VPCId:
Description: The ID of the VPC that this stack is deployed in
Value: !Ref 'VPC'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'VPCId' ] ]
PublicSubnetOne:
Description: Public subnet one
Value: !Ref 'PublicSubnetOne'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PublicSubnetOne' ] ]
PublicSubnetTwo:
Description: Public subnet two
Value: !Ref 'PublicSubnetTwo'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PublicSubnetTwo' ] ]
PrivateSubnetOne:
Description: Private subnet one
Value: !Ref 'PrivateSubnetOne'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PrivateSubnetOne' ] ]
PrivateSubnetTwo:
Description: Private subnet two
Value: !Ref 'PrivateSubnetTwo'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'PrivateSubnetTwo' ] ]
EcsHostSecurityGroup:
Description: A security group used to allow containers to receive traffic
Value: !Ref 'EcsHostSecurityGroup'
Export:
Name: !Join [ ':', [ !Ref 'AWS::StackName', 'EcsHostSecurityGroup' ] ]

View File

@@ -0,0 +1,120 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: Deploy the instana agent on ECS as a daemon service
Parameters:
StackName:
Type: String
Default: production
Description: The name of the parent cluster stack that you created. Necessary
to locate and reference resources created by that stack.
ServiceName:
Type: String
Default: instana-agent
Description: A name for the service
ImageUrl:
Type: String
Default: instana/agent
Description: The url of a docker image that contains the application process that
will handle the traffic for this service
ContainerCpu:
Type: Number
Default: 1024
Description: How much CPU to give the container. 1024 is 1 CPU
ContainerMemory:
Type: Number
Default: 2048
Description: How much memory in megabytes to give the container
Role:
Type: String
Default: ""
Description: (Optional) An IAM role to give the service's containers if the code within needs to
access other AWS resources like S3 buckets, DynamoDB tables, etc
InstanaAgentKey:
Type: String
InstanaAgentEndpoint:
Type: String
Default: "ingress-red-saas.instana.io"
InstanaAgentEndpointPort:
Type: Number
Default: '443'
Conditions:
HasCustomRole: !Not [ !Equals [!Ref 'Role', ''] ]
Resources:
# The task definition. This is a simple metadata description of what
# container to run, and what resource requirements it has.
TaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'ServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
NetworkMode: host
IpcMode: host
PidMode: host
Volumes:
- Host:
SourcePath: "/var/run"
Name: "host-var-run"
- Host:
SourcePath: "/run"
Name: "host-run"
- Host:
SourcePath: "/dev"
Name: "host-dev"
- Host:
SourcePath: "/sys"
Name: "host-sys"
- Host:
SourcePath: "/var/log"
Name: "host-var-log"
ContainerDefinitions:
- Name: !Ref 'ServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: !Ref 'ImageUrl'
Privileged: true
MountPoints:
- ContainerPath: /var/run
SourceVolume: "host-var-run"
ReadOnly: false
- ContainerPath: /run
SourceVolume: "host-run"
ReadOnly: false
- ContainerPath: /dev
SourceVolume: "host-dev"
ReadOnly: false
- ContainerPath: /sys
SourceVolume: "host-sys"
ReadOnly: false
- ContainerPath: /var/log
SourceVolume: "host-var-log"
ReadOnly: false
Environment:
- Name: INSTANA_ZONE
Value: !Ref 'StackName'
- Name: INSTANA_AGENT_ENDPOINT
Value: !Ref 'InstanaAgentEndpoint'
- Name: INSTANA_AGENT_ENDPOINT_PORT
Value: !Ref 'InstanaAgentEndpointPort'
- Name: INSTANA_AGENT_KEY
Value: !Ref 'InstanaAgentKey'
# The service. The service is a resource which allows you to run multiple
# copies of a type of task, and gather up their logs and metrics, as well
# as monitor the number of running tasks and replace any that have crashed
Service:
Type: AWS::ECS::Service
Properties:
ServiceName: !Ref 'ServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
TaskDefinition: !Ref 'TaskDefinition'
SchedulingStrategy: DAEMON

752
aws-ecs-ec2/services.yaml Normal file
View File

@@ -0,0 +1,752 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: Deploy robot shop services to a given ECS cluster
Parameters:
StackName:
Type: String
Default: ecs-ec2-robotshop
Description: The name of the parent cluster stack that you created. Necessary
to locate and reference resources created by that stack.
WebServiceName:
Type: String
Default: web
Description: The web service name
MongoDbServiceName:
Type: String
Default: mongodb
Description: The mongodb service name
RedisServiceName:
Type: String
Default: redis
Description: The redis service name
RabbitMqServiceName:
Type: String
Default: rabbitmq
Description: The rabbitmq service name
CatalogueServiceName:
Type: String
Default: catalogue
Description: The catalogue service name
UserServiceName:
Type: String
Default: user
Description: The user service name
CartServiceName:
Type: String
Default: cart
Description: The cart service name
MySqlServiceName:
Type: String
Default: mysql
Description: The cart service name
ShippingServiceName:
Type: String
Default: shipping
Description: The cart service name
RatingsServiceName:
Type: String
Default: ratings
Description: The cart service name
PaymentServiceName:
Type: String
Default: payment
Description: The payment service name
DispatchServiceName:
Type: String
Default: dispatch
Description: The payment service name
ImageUrl:
Type: String
Default: nginx
Description: The url of a docker image that contains the application process that
will handle the traffic for this service
WebContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
MongoDbContainerPort:
Type: Number
Default: 27017
Description: What port number the application inside the docker container is binding to
RedisContainerPort:
Type: Number
Default: 6379
Description: What port number the application inside the docker container is binding to
RabbitMqContainerPort:
Type: Number
Default: 5672
Description: What port number the application inside the docker container is binding to
CatalogueContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
UserContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
CartContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
MySqlContainerPort:
Type: Number
Default: 3306
Description: What port number the application inside the docker container is binding to
ShippingContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
RatingsContainerPort:
Type: Number
Default: 80
Description: What port number the application inside the docker container is binding to
PaymentContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
DispatchContainerPort:
Type: Number
Default: 8080
Description: What port number the application inside the docker container is binding to
ContainerCpu:
Type: Number
Default: 256
Description: How much CPU to give the container. 1024 is 1 CPU
ContainerMemory:
Type: Number
Default: 512
Description: How much memory in megabytes to give the container
Path:
Type: String
Default: "*"
Description: A path on the public load balancer that this service
should be connected to. Use * to send all load balancer
traffic to this service.
Priority:
Type: Number
Default: 1
Description: The priority for the routing rule added to the load balancer.
This only applies if your have multiple services which have been
assigned to different paths on the load balancer.
DesiredCount:
Type: Number
Default: 2
Description: How many copies of the service task to run
Role:
Type: String
Default: ""
Description: (Optional) An IAM role to give the service's containers if the code within needs to
access other AWS resources like S3 buckets, DynamoDB tables, etc
WebLogGroup:
Type: String
Default: "rs-web"
Conditions:
HasCustomRole: !Not [ !Equals [!Ref 'Role', ''] ]
Resources:
Registry:
Type: AWS::ServiceDiscovery::PrivateDnsNamespace
Properties:
Description: RobotShop registry for ecs
Name: robot-shop
Vpc:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'VPCId']]
# MongoDB service
MongoDbTaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'MongoDbServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'MongoDbServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-mongodb:0.4.17
PortMappings:
- ContainerPort: !Ref 'MongoDbContainerPort'
LogConfiguration:
LogDriver: json-file
MongoDbService:
Type: AWS::ECS::Service
DependsOn: LoadBalancerRule
Properties:
ServiceName: !Ref 'MongoDbServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'MongoDbTaskDefinition'
# Redis service
RedisTaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'RedisServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'RedisServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: redis:4.0.6
PortMappings:
- ContainerPort: !Ref 'RedisContainerPort'
LogConfiguration:
LogDriver: json-file
RedisService:
Type: AWS::ECS::Service
DependsOn: LoadBalancerRule
Properties:
ServiceName: !Ref 'RedisServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'RedisTaskDefinition'
# rabbitmq service
RabbitMqTaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'RabbitMqServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'RabbitMqServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: rabbitmq:3.7-management-alpine
PortMappings:
- ContainerPort: !Ref 'RabbitMqContainerPort'
LogConfiguration:
LogDriver: json-file
RabbitMqService:
Type: AWS::ECS::Service
DependsOn: LoadBalancerRule
Properties:
ServiceName: !Ref 'RabbitMqServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'RabbitMqTaskDefinition'
# catalogue service
CatalogueTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['MongoDbService']
Properties:
Family: !Ref 'CatalogueServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'CatalogueServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-catalogue:0.4.17
PortMappings:
- ContainerPort: !Ref 'CatalogueContainerPort'
LogConfiguration:
LogDriver: json-file
CatalogueService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, CatalogueRegistryService]
Properties:
ServiceName: !Ref 'CatalogueServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'CatalogueServiceName'
ContainerPort: !Ref 'CatalogueContainerPort'
RegistryArn: !GetAtt CatalogueRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'CatalogueTaskDefinition'
CatalogueRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'CatalogueServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# user service
UserTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['MongoDbService', 'RedisService']
Properties:
Family: !Ref 'UserServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'UserServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-user:0.4.17
PortMappings:
- ContainerPort: !Ref 'UserContainerPort'
LogConfiguration:
LogDriver: json-file
UserService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, UserRegistryService]
Properties:
ServiceName: !Ref 'UserServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'UserServiceName'
ContainerPort: !Ref 'UserContainerPort'
RegistryArn: !GetAtt UserRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'UserTaskDefinition'
UserRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'UserServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# cart service
CartTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['RedisService']
Properties:
Family: !Ref 'CartServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'CartServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-cart:0.4.17
PortMappings:
- ContainerPort: !Ref 'CartContainerPort'
LogConfiguration:
LogDriver: json-file
CartService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, CartRegistryService]
Properties:
ServiceName: !Ref 'CartServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'CartServiceName'
ContainerPort: !Ref 'CartContainerPort'
RegistryArn: !GetAtt CartRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'CartTaskDefinition'
CartRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'CartServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# mysql service
MySqlTaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'MySqlServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'MySqlServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
LinuxParameters:
Capabilities:
Add:
- NET_ADMIN
Image: robotshop/rs-mysql-db:0.4.17
PortMappings:
- ContainerPort: !Ref 'MySqlContainerPort'
LogConfiguration:
LogDriver: json-file
MySqlService:
Type: AWS::ECS::Service
DependsOn: LoadBalancerRule
Properties:
ServiceName: !Ref 'MySqlServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'MySqlTaskDefinition'
# shipping service
ShippingTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['MySqlService']
Properties:
Family: !Ref 'ShippingServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'ShippingServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-shipping:0.4.17
PortMappings:
- ContainerPort: !Ref 'ShippingContainerPort'
LogConfiguration:
LogDriver: json-file
ShippingService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, ShippingRegistryService]
Properties:
ServiceName: !Ref 'ShippingServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'ShippingServiceName'
ContainerPort: !Ref 'ShippingContainerPort'
RegistryArn: !GetAtt ShippingRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'ShippingTaskDefinition'
ShippingRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'ShippingServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# ratings service
RatingsTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['MySqlService']
Properties:
Family: !Ref 'RatingsServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'RatingsServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-ratings:0.4.17
PortMappings:
- ContainerPort: !Ref 'RatingsContainerPort'
LogConfiguration:
LogDriver: json-file
RatingsService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, RatingsRegistryService]
Properties:
ServiceName: !Ref 'RatingsServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'RatingsServiceName'
ContainerPort: !Ref 'RatingsContainerPort'
RegistryArn: !GetAtt RatingsRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'RatingsTaskDefinition'
RatingsRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'RatingsServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# payment service
PaymentTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['RabbitMqService']
Properties:
Family: !Ref 'PaymentServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
NetworkMode: awsvpc
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'PaymentServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-payment:0.4.17
PortMappings:
- ContainerPort: !Ref 'PaymentContainerPort'
LogConfiguration:
LogDriver: json-file
PaymentService:
Type: AWS::ECS::Service
DependsOn: [LoadBalancerRule, PaymentRegistryService]
Properties:
ServiceName: !Ref 'PaymentServiceName'
NetworkConfiguration:
AwsvpcConfiguration:
Subnets:
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetOne']]
- Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PrivateSubnetTwo']]
ServiceRegistries:
- ContainerName: !Ref 'PaymentServiceName'
ContainerPort: !Ref 'PaymentContainerPort'
RegistryArn: !GetAtt PaymentRegistryService.Arn
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'PaymentTaskDefinition'
PaymentRegistryService:
Type: AWS::ServiceDiscovery::Service
DependsOn: Registry
Properties:
Name: !Ref 'PaymentServiceName'
DnsConfig:
NamespaceId: !GetAtt Registry.Id
DnsRecords:
- TTL: 10
Type: SRV
- TTL: 10
Type: A
RoutingPolicy: WEIGHTED
# dispatch service
DispatchTaskDefinition:
Type: AWS::ECS::TaskDefinition
DependsOn: ['RabbitMqService']
Properties:
Family: !Ref 'DispatchServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'DispatchServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-dispatch:0.4.17
PortMappings:
- ContainerPort: !Ref 'DispatchContainerPort'
LogConfiguration:
LogDriver: json-file
DispatchService:
Type: AWS::ECS::Service
DependsOn: LoadBalancerRule
Properties:
ServiceName: !Ref 'DispatchServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DesiredCount: 1
TaskDefinition: !Ref 'DispatchTaskDefinition'
WebTaskDefinition:
Type: AWS::ECS::TaskDefinition
Properties:
Family: !Ref 'WebServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
TaskRoleArn:
Fn::If:
- 'HasCustomRole'
- !Ref 'Role'
- !Ref "AWS::NoValue"
ContainerDefinitions:
- Name: !Ref 'WebServiceName'
Cpu: !Ref 'ContainerCpu'
Memory: !Ref 'ContainerMemory'
Image: robotshop/rs-web:0.4.17
Environment:
- Name: CATALOGUE_HOST
Value: catalogue.robot-shop
- Name: USER_HOST
Value: user.robot-shop
- Name: CART_HOST
Value: cart.robot-shop
- Name: SHIPPING_HOST
Value: shipping.robot-shop
- Name: PAYMENT_HOST
Value: payment.robot-shop
- Name: RATINGS_HOST
Value: ratings.robot-shop
PortMappings:
- ContainerPort: !Ref 'WebContainerPort'
LogConfiguration:
LogDriver: awslogs
Options:
awslogs-create-group: true
awslogs-region: !Ref AWS::Region
awslogs-group: !Ref WebLogGroup
awslogs-stream-prefix: ecs
WebService:
Type: AWS::ECS::Service
DependsOn: ['LoadBalancerRule', 'CatalogueService']
Properties:
ServiceName: !Ref 'WebServiceName'
Cluster:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'ClusterName']]
DeploymentConfiguration:
MaximumPercent: 200
MinimumHealthyPercent: 75
DesiredCount: !Ref 'DesiredCount'
TaskDefinition: !Ref 'WebTaskDefinition'
LoadBalancers:
- ContainerName: !Ref 'WebServiceName'
ContainerPort: !Ref 'WebContainerPort'
TargetGroupArn: !Ref 'WebTargetGroup'
WebTargetGroup:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
HealthCheckIntervalSeconds: 6
HealthCheckPath: /
HealthCheckProtocol: HTTP
HealthCheckTimeoutSeconds: 5
HealthyThresholdCount: 2
Name: !Ref 'WebServiceName'
Port: !Ref 'WebContainerPort'
Protocol: HTTP
UnhealthyThresholdCount: 2
VpcId:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'VPCId']]
LoadBalancerRule:
Type: AWS::ElasticLoadBalancingV2::ListenerRule
Properties:
Actions:
- TargetGroupArn: !Ref 'WebTargetGroup'
Type: 'forward'
Conditions:
- Field: path-pattern
Values: [!Ref 'Path']
ListenerArn:
Fn::ImportValue:
!Join [':', [!Ref 'StackName', 'PublicListener']]
Priority: !Ref 'Priority'

View File

@@ -0,0 +1,8 @@
# Robot-Shop on AWS ECS with Fargate
## Prerequisites
The `ecs-cli` tool has to be on your `$PATH`.
[Read more on installing it](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_CLI_installation.html)

View File

@@ -1,6 +1,4 @@
FROM node:14
ENV INSTANA_AUTO_PROFILE true
FROM node:10
EXPOSE 8080

View File

@@ -12,11 +12,11 @@
"body-parser": "^1.18.1",
"express": "^4.15.4",
"redis": "^2.8.0",
"request": "^2.88.2",
"request": "^2.83.0",
"pino": "^5.10.8",
"express-pino-logger": "^4.0.0",
"pino-pretty": "^2.5.0",
"@instana/collector": "^1.132.2",
"@instana/collector": "^1.65.0",
"prom-client": "^11.5.3"
}
}

View File

@@ -48,20 +48,6 @@ app.use((req, res, next) => {
next();
});
app.use((req, res, next) => {
let dcs = [
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1"
];
let span = instana.currentSpan();
span.annotate('custom.sdk.tags.datacenter', dcs[Math.floor(Math.random() * dcs.length)]);
next();
});
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());

View File

@@ -1,6 +1,4 @@
FROM node:14
ENV INSTANA_AUTO_PROFILE true
FROM node:10
EXPOSE 8080

View File

@@ -15,6 +15,6 @@
"pino": "^5.10.8",
"express-pino-logger": "^4.0.0",
"pino-pretty": "^2.5.0",
"@instana/collector": "^1.132.2"
"@instana/collector": "^1.90.0"
}
}

View File

@@ -38,20 +38,6 @@ app.use((req, res, next) => {
next();
});
app.use((req, res, next) => {
let dcs = [
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1"
];
let span = instana.currentSpan();
span.annotate('custom.sdk.tags.datacenter', dcs[Math.floor(Math.random() * dcs.length)]);
next();
});
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());

View File

@@ -1,10 +1,16 @@
FROM golang:1.17
FROM golang:1.12.7
WORKDIR /go/src/app
ENV GOPATH=/go
COPY *.go .
RUN apt-get update && apt-get install -y go-dep
RUN go mod init dispatch && go get
RUN go install
WORKDIR /go/src/github.com/instana/dispatch
CMD dispatch
COPY src/ /go/src/github.com/instana/dispatch
RUN dep init && dep ensure
RUN go build -o bin/gorcv
# TODO stage this build
CMD bin/gorcv

View File

@@ -1,233 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"time"
"github.com/instana/go-sensor"
ot "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/streadway/amqp"
)
const (
Service = "dispatch"
)
var (
amqpUri string
rabbitChan *amqp.Channel
rabbitCloseError chan *amqp.Error
rabbitReady chan bool
errorPercent int
dataCenters = []string{
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1",
}
)
func connectToRabbitMQ(uri string) *amqp.Connection {
for {
conn, err := amqp.Dial(uri)
if err == nil {
return conn
}
log.Println(err)
log.Printf("Reconnecting to %s\n", uri)
time.Sleep(1 * time.Second)
}
}
func rabbitConnector(uri string) {
var rabbitErr *amqp.Error
for {
rabbitErr = <-rabbitCloseError
if rabbitErr == nil {
return
}
log.Printf("Connecting to %s\n", amqpUri)
rabbitConn := connectToRabbitMQ(uri)
rabbitConn.NotifyClose(rabbitCloseError)
var err error
// create mappings here
rabbitChan, err = rabbitConn.Channel()
failOnError(err, "Failed to create channel")
// create exchange
err = rabbitChan.ExchangeDeclare("robot-shop", "direct", true, false, false, false, nil)
failOnError(err, "Failed to create exchange")
// create queue
queue, err := rabbitChan.QueueDeclare("orders", true, false, false, false, nil)
failOnError(err, "Failed to create queue")
// bind queue to exchange
err = rabbitChan.QueueBind(queue.Name, "orders", "robot-shop", false, nil)
failOnError(err, "Failed to bind queue")
// signal ready
rabbitReady <- true
}
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s : %s", msg, err)
}
}
func getOrderId(order []byte) string {
id := "unknown"
var f interface{}
err := json.Unmarshal(order, &f)
if err == nil {
m := f.(map[string]interface{})
id = m["orderid"].(string)
}
return id
}
func createSpan(headers map[string]interface{}, order string) {
// headers is map[string]interface{}
// carrier is map[string]string
carrier := make(ot.TextMapCarrier)
// convert by copying k, v
for k, v := range headers {
carrier[k] = v.(string)
}
// get the order id
log.Printf("order %s\n", order)
// opentracing
var span ot.Span
tracer := ot.GlobalTracer()
spanContext, err := tracer.Extract(ot.HTTPHeaders, carrier)
if err == nil {
log.Println("Creating child span")
// create child span
span = tracer.StartSpan("getOrder", ot.ChildOf(spanContext))
fakeDataCenter := dataCenters[rand.Intn(len(dataCenters))]
span.SetTag("datacenter", fakeDataCenter)
} else {
log.Println(err)
log.Println("Failed to get context from headers")
log.Println("Creating root span")
// create root span
span = tracer.StartSpan("getOrder")
}
span.SetTag(string(ext.SpanKind), ext.SpanKindConsumerEnum)
span.SetTag(string(ext.MessageBusDestination), "robot-shop")
span.SetTag("exchange", "robot-shop")
span.SetTag("sort", "consume")
span.SetTag("address", "rabbitmq")
span.SetTag("key", "orders")
span.LogFields(otlog.String("orderid", order))
defer span.Finish()
time.Sleep(time.Duration(42+rand.Int63n(42)) * time.Millisecond)
if rand.Intn(100) < errorPercent {
span.SetTag("error", true)
span.LogFields(
otlog.String("error.kind", "Exception"),
otlog.String("message", "Failed to dispatch to SOP"))
log.Println("Span tagged with error")
}
processSale(span)
}
func processSale(parentSpan ot.Span) {
tracer := ot.GlobalTracer()
span := tracer.StartSpan("processSale", ot.ChildOf(parentSpan.Context()))
defer span.Finish()
span.SetTag(string(ext.SpanKind), "intermediate")
span.LogFields(otlog.String("info", "Order sent for processing"))
time.Sleep(time.Duration(42+rand.Int63n(42)) * time.Millisecond)
}
func main() {
rand.Seed(time.Now().Unix())
// Instana tracing
ot.InitGlobalTracer(instana.NewTracerWithOptions(&instana.Options{
Service: Service,
LogLevel: instana.Info,
EnableAutoProfile: true,
}))
// Init amqpUri
// get host from environment
amqpHost, ok := os.LookupEnv("AMQP_HOST")
if !ok {
amqpHost = "rabbitmq"
}
amqpUri = fmt.Sprintf("amqp://guest:guest@%s:5672/", amqpHost)
// get error threshold from environment
errorPercent = 0
epct, ok := os.LookupEnv("DISPATCH_ERROR_PERCENT")
if ok {
epcti, err := strconv.Atoi(epct)
if err == nil {
if epcti > 100 {
epcti = 100
}
if epcti < 0 {
epcti = 0
}
errorPercent = epcti
}
}
log.Printf("Error Percent is %d\n", errorPercent)
// MQ error channel
rabbitCloseError = make(chan *amqp.Error)
// MQ ready channel
rabbitReady = make(chan bool)
go rabbitConnector(amqpUri)
rabbitCloseError <- amqp.ErrClosed
go func() {
for {
// wait for rabbit to be ready
ready := <-rabbitReady
log.Printf("Rabbit MQ ready %v\n", ready)
// subscribe to bound queue
msgs, err := rabbitChan.Consume("orders", "", true, false, false, false, nil)
failOnError(err, "Failed to consume")
for d := range msgs {
log.Printf("Order %s\n", d.Body)
log.Printf("Headers %v\n", d.Headers)
id := getOrderId(d.Body)
go createSpan(d.Headers, id)
}
}
}()
log.Println("Waiting for messages")
select {}
}

219
dispatch/src/main.go Normal file
View File

@@ -0,0 +1,219 @@
package main
import (
"fmt"
"log"
"time"
"os"
"math/rand"
"strconv"
"encoding/json"
"github.com/streadway/amqp"
"github.com/instana/go-sensor"
ot "github.com/opentracing/opentracing-go"
ext "github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
)
const (
Service = "dispatch"
)
var (
amqpUri string
rabbitChan *amqp.Channel
rabbitCloseError chan *amqp.Error
rabbitReady chan bool
errorPercent int
)
func connectToRabbitMQ(uri string) *amqp.Connection {
for {
conn, err := amqp.Dial(uri)
if err == nil {
return conn
}
log.Println(err)
log.Printf("Reconnecting to %s\n", uri)
time.Sleep(1 * time.Second)
}
}
func rabbitConnector(uri string) {
var rabbitErr *amqp.Error
for {
rabbitErr = <-rabbitCloseError
if rabbitErr != nil {
log.Printf("Connecting to %s\n", amqpUri)
rabbitConn := connectToRabbitMQ(uri)
rabbitConn.NotifyClose(rabbitCloseError)
var err error
// create mappings here
rabbitChan, err = rabbitConn.Channel()
failOnError(err, "Failed to create channel")
// create exchange
err = rabbitChan.ExchangeDeclare("robot-shop", "direct", true, false, false, false, nil)
failOnError(err, "Failed to create exchange")
// create queue
queue, err := rabbitChan.QueueDeclare("orders", true, false, false, false, nil)
failOnError(err, "Failed to create queue")
// bind queue to exchange
err = rabbitChan.QueueBind(queue.Name, "orders", "robot-shop", false, nil)
failOnError(err, "Failed to bind queue")
// signal ready
rabbitReady <- true
}
}
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("$s : %s", msg, err)
panic(fmt.Sprintf("%s : %s", msg, err))
}
}
func getOrderId(order []byte) string {
id := "unknown"
var f interface{}
err := json.Unmarshal(order, &f)
if err == nil {
m := f.(map[string]interface{})
id = m["orderid"].(string)
}
return id
}
func createSpan(headers map[string]interface{}, order string) {
// headers is map[string]interface{}
// carrier is map[string]string
carrier := make(ot.TextMapCarrier)
// convert by copying k, v
for k, v := range headers {
carrier[k] = v.(string)
}
// get the order id
log.Printf("order %s\n", order)
// opentracing
var span ot.Span
tracer := ot.GlobalTracer()
spanContext, err := tracer.Extract(ot.HTTPHeaders, carrier)
if err == nil {
log.Println("Creating child span")
// create child span
span = tracer.StartSpan("getOrder", ot.ChildOf(spanContext))
} else {
log.Println(err)
log.Println("Failed to get context from headers")
log.Println("Creating root span")
// create root span
span = tracer.StartSpan("getOrder")
}
span.SetTag(string(ext.SpanKind), ext.SpanKindConsumerEnum)
span.SetTag(string(ext.MessageBusDestination), "robot-shop")
span.SetTag("exchange", "robot-shop")
span.SetTag("sort", "consume")
span.SetTag("address", "rabbitmq")
span.SetTag("key", "orders")
span.LogFields(otlog.String("orderid", order))
defer span.Finish()
time.Sleep(time.Duration(42 + rand.Int63n(42)) * time.Millisecond)
if rand.Intn(100) < errorPercent {
span.SetTag("error", true)
span.LogFields(
otlog.String("error.kind", "Exception"),
otlog.String("message", "Failed to dispatch to SOP"))
log.Println("Span tagged with error")
}
processSale(span)
}
func processSale(parentSpan ot.Span) {
tracer := ot.GlobalTracer()
span := tracer.StartSpan("processSale", ot.ChildOf(parentSpan.Context()))
defer span.Finish()
span.SetTag(string(ext.SpanKind), "intermediate")
span.LogFields(otlog.String("info", "Order sent for processing"))
time.Sleep(time.Duration(42 + rand.Int63n(42)) * time.Millisecond)
}
func main() {
// Instana tracing
ot.InitGlobalTracer(instana.NewTracerWithOptions(&instana.Options{
Service: Service,
LogLevel: instana.Info}))
// Init amqpUri
// get host from environment
amqpHost, ok := os.LookupEnv("AMQP_HOST")
if !ok {
amqpHost = "rabbitmq"
}
amqpUri = fmt.Sprintf("amqp://guest:guest@%s:5672/", amqpHost)
// get error threshold from environment
errorPercent = 0
epct, ok := os.LookupEnv("DISPATCH_ERROR_PERCENT")
if ok {
epcti, err := strconv.Atoi(epct)
if err == nil {
if epcti > 100 {
epcti = 100
}
if epcti < 0 {
epcti = 0
}
errorPercent = epcti
}
}
log.Printf("Error Percent is %d\n", errorPercent)
// MQ error channel
rabbitCloseError = make(chan *amqp.Error)
// MQ ready channel
rabbitReady = make(chan bool)
go rabbitConnector(amqpUri)
rabbitCloseError <- amqp.ErrClosed
go func() {
for {
// wait for rabbit to be ready
ready := <-rabbitReady
log.Printf("Rabbit MQ ready %v\n", ready)
// subscribe to bound queue
msgs, err := rabbitChan.Consume("orders", "", true, false, false, false, nil)
failOnError(err, "Failed to consume")
for d := range msgs {
log.Printf("Order %s\n", d.Body)
log.Printf("Headers %v\n", d.Headers)
id := getOrderId(d.Body)
go createSpan(d.Headers, id)
}
}
}()
log.Println("Waiting for messages")
forever := make(chan bool)
<-forever
}

View File

@@ -10,8 +10,3 @@ services:
- robot-shop
depends_on:
- web
logging: &logging
driver: "json-file"
options:
max-size: "25m"
max-file: "2"

View File

@@ -6,23 +6,14 @@ services:
image: ${REPO}/rs-mongodb:${TAG}
networks:
- robot-shop
logging: &logging
driver: "json-file"
options:
max-size: "25m"
max-file: "2"
redis:
image: redis:6.2-alpine
image: redis:4.0.6
networks:
- robot-shop
logging:
<<: *logging
rabbitmq:
image: rabbitmq:3.8-management-alpine
image: rabbitmq:3.7-management-alpine
networks:
- robot-shop
logging:
<<: *logging
catalogue:
build:
context: catalogue
@@ -31,13 +22,6 @@ services:
- mongodb
networks:
- robot-shop
healthcheck:
test: [ "CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/health" ]
interval: 10s
timeout: 10s
retries: 3
logging:
<<: *logging
user:
build:
context: user
@@ -47,13 +31,6 @@ services:
- redis
networks:
- robot-shop
healthcheck:
test: [ "CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/health" ]
interval: 10s
timeout: 10s
retries: 3
logging:
<<: *logging
cart:
build:
context: cart
@@ -62,13 +39,6 @@ services:
- redis
networks:
- robot-shop
healthcheck:
test: [ "CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/health" ]
interval: 10s
timeout: 10s
retries: 3
logging:
<<: *logging
mysql:
build:
context: mysql
@@ -77,8 +47,6 @@ services:
- NET_ADMIN
networks:
- robot-shop
logging:
<<: *logging
shipping:
build:
context: shipping
@@ -87,30 +55,14 @@ services:
- mysql
networks:
- robot-shop
healthcheck:
test: ["CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/health"]
interval: 10s
timeout: 10s
retries: 3
logging:
<<: *logging
ratings:
build:
context: ratings
image: ${REPO}/rs-ratings:${TAG}
environment:
APP_ENV: prod
networks:
- robot-shop
depends_on:
- mysql
healthcheck:
test: ["CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost/_health"]
interval: 10s
timeout: 10s
retries: 3
logging:
<<: *logging
payment:
build:
context: payment
@@ -119,16 +71,9 @@ services:
- rabbitmq
networks:
- robot-shop
healthcheck:
test: ["CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/health"]
interval: 10s
timeout: 10s
retries: 3
# Uncomment to change payment gateway
#environment:
#PAYMENT_GATEWAY: "https://www.worldpay.com"
logging:
<<: *logging
dispatch:
build:
context: dispatch
@@ -137,14 +82,9 @@ services:
- rabbitmq
networks:
- robot-shop
logging:
<<: *logging
web:
build:
context: web
args:
# agent key to download tracing libs
KEY: ${INSTANA_AGENT_KEY}
image: ${REPO}/rs-web:${TAG}
depends_on:
- catalogue
@@ -155,17 +95,11 @@ services:
- "8080:8080"
networks:
- robot-shop
healthcheck:
test: [ "CMD", "curl", "-H", "X-INSTANA-SYNTHETIC: 1", "-f", "http://localhost:8080/" ]
interval: 10s
timeout: 10s
retries: 3
# Uncomment to enable Instana EUM
# environment:
# INSTANA_EUM_KEY: <your eum key>
# INSTANA_EUM_REPORTING_URL: <your reporting url>
logging:
<<: *logging
# INSTANA_EUM_REPORTING_URL: https://eum-us-west-2.instana.io
# INSTANA_EUM_REPORTING_URL: https://eum-eu-west-1.instana.io
networks:
robot-shop:

View File

@@ -1,32 +0,0 @@
# Configuration
Edit `fluent.conf` setting the parameters to match either your Humio account or Elasticsearch instance. See the [fluentd documentation](https://docs.fluentd.org/output/elasticsearch) and/or [Humio documentation](https://docs.humio.com/docs/ingesting-data/data-shippers/fluentd/) for details.
Start `fluentd` in a Docker container using the `run.sh` script.
## Docker Compose
To have all the containers in Stan's Robot Shop use fluentd for logging, the `docker-compose.yaml` needs to be edited. Change the logging section at the top of the file.
```yaml
services:
mongodb:
build:
context: mongo
image: ${REPO}/rs-mongodb:${TAG}
networks:
- robto-shop
logging: &logging
driver: "fluentd"
options:
fluentd-address: localhost:24224
tag: "{{.ImageName}}"
redis:
```
If Robot Shop is already running, shut it down `docker-compose down`
Start Robot Shop with `docker-compose up -d`. It takes a few minutes to start, after that check with Humio or ELK for log entries.
Set up [logging integration](https://www.instana.com/docs/logging/) in Instana.

View File

@@ -1,24 +0,0 @@
<source>
@type forward
</source>
<filter **>
@type record_transformer
enable_ruby
<record>
docker.container_id ${record["container_id"]}
docker.image_name ${tag}
</record>
</filter>
<match **>
@type elasticsearch
host cloud.humio.com
port 9200
scheme https
ssl_version TLSv1_2
user <Humio index or Elasticsearch user>
password <Humio API key or Elasticsearch password>
logstash_format true
</match>

View File

@@ -1,12 +0,0 @@
#!/bin/sh
IMAGE_NAME="robotshop/fluentd:elastic"
docker run \
-d \
--rm \
--name fluentd \
-p 24224:24224 \
-v $(pwd)/fluent.conf:/fluentd/etc/fluent.conf \
$IMAGE_NAME

View File

@@ -1,9 +0,0 @@
FROM fluentd
USER root
RUN apk update && \
apk add --virtual .build-dependencies build-base ruby-dev
RUN fluent-gem install fluent-plugin-elasticsearch && \
fluent-gem install fluent-plugin-kubernetes_metadata_filter && \
fluent-gem install fluent-plugin-multi-format-parser

View File

@@ -1,11 +0,0 @@
# Kubernetes
Edit the `fluentd.yaml` file inserting your Humio or Elasticsearch instance details.
Apply the configuration:
```shell
$ kubectl apply -f fluentd.yaml
```
Set up [logging integration](https://www.instana.com/docs/logging/) in Instana.

View File

@@ -1,148 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: logging
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentd
namespace: logging
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fluentd
namespace: logging
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: logging
#
# CONFIGURATION
#
---
apiVersion: v1
kind: ConfigMap
metadata:
name: fluentd-config
namespace: logging
data:
fluent.conf: |
<source>
@type tail
path /var/log/containers/*.log
pos_file /var/log/fluentd-containers.log.pos
tag kubernetes.*
read_from_head false
<parse>
@type json
</parse>
</source>
<filter kubernetes.**>
@type kubernetes_metadata
@id filter_kube_metadata
</filter>
# Throw away what is not needed first
#<match fluent.**>
#@type null
#</match>
<match kubernetes.var.log.containers.**kube-system**.log>
@type null
</match>
# Capture what is left
<match **>
@type elasticsearch
host cloud.humio.com
port 9200
scheme https
ssl_version TLSv1_2
logstash_format true
user <Humio index or Elasticsearch user>
password <Humio API key or Elasticsearch password>
</match>
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: logging
labels:
k8s-app: fluentd
#https://github.com/kubernetes/kubernetes/issues/51376
#kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
name: fluentd
template:
metadata:
labels:
name: fluentd
#kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
terminationGracePeriodSeconds: 30
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: robotshop/fluentd:elastic
#args:
# - "-v"
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: fluentd-config
mountPath: /fluentd/etc
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
imagePullPolicy: Always
volumes:
- name: fluentd-config
configMap:
name: fluentd-config
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@@ -1,10 +0,0 @@
# Logging with Fluentd
This example works with [Humio](https://humio.com/) and [ELK](https://elastic.co/). Fluentd is used to ship the logs from the containers to the logging backend.
## Build Fluentd Container
The default `fluentd` Docker image does not include the output plugin for Elasticsearch. Therefore a new Docker image based on the default image with the Elasticsearch output plugin installed should be created, see the `Dockerfile` and `build.sh` script for examples. This example has already been built and pushed to Docker Hub.
Deployment is slightly different depending on which platform Robot Shop is run on. See the appropriate subdirectories for the required files and further instructions.

View File

@@ -1,12 +0,0 @@
#!/bin/sh
IMAGE_NAME="robotshop/fluentd:elastic"
docker build -t "$IMAGE_NAME" .
if [ "$1" = "push" ]
then
docker push "$IMAGE_NAME"
fi

View File

@@ -1,4 +1,4 @@
FROM python:3.9
FROM python:3.6
# Some default values
ENV HOST="http://localhost:8080/" \

View File

@@ -38,8 +38,8 @@ fi
echo "Starting $CLIENTS clients for ${RUN_TIME:-ever}"
if [ "$SILENT" -eq 1 ]
then
locust -f robot-shop.py --host "$HOST" --headless -r 1 -u $NUM_CLIENTS $TIME > /dev/null 2>&1
locust -f robot-shop.py --host "$HOST" --no-web -r 1 -c $NUM_CLIENTS $TIME > /dev/null 2>&1
else
locust -f robot-shop.py --host "$HOST" --headless -r 1 -u $NUM_CLIENTS $TIME
locust -f robot-shop.py --host "$HOST" --no-web -r 1 -c $NUM_CLIENTS $TIME
fi

View File

@@ -1 +1 @@
locust
locustio

View File

@@ -1,59 +1,33 @@
import os
import random
from locust import HttpUser, task, between
from locust import HttpLocust, TaskSet, task
from random import choice
from random import randint
class UserBehavior(HttpUser):
wait_time = between(2, 10)
# source: https://tools.tracemyip.org/search--ip/list
fake_ip_addresses = [
# white house
"156.33.241.5",
# Hollywood
"34.196.93.245",
# Chicago
"98.142.103.241",
# Los Angeles
"192.241.230.151",
# Berlin
"46.114.35.116",
# Singapore
"52.77.99.130",
# Sydney
"60.242.161.215"
]
class UserBehavior(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
print('Starting')
@task
def login(self):
fake_ip = random.choice(self.fake_ip_addresses)
credentials = {
'name': 'user',
'password': 'password'
}
res = self.client.post('/api/user/login', json=credentials, headers={'x-forwarded-for': fake_ip})
res = self.client.post('/api/user/login', json=credentials)
print('login {}'.format(res.status_code))
@task
def load(self):
fake_ip = random.choice(self.fake_ip_addresses)
self.client.get('/', headers={'x-forwarded-for': fake_ip})
user = self.client.get('/api/user/uniqueid', headers={'x-forwarded-for': fake_ip}).json()
self.client.get('/')
user = self.client.get('/api/user/uniqueid').json()
uniqueid = user['uuid']
print('User {}'.format(uniqueid))
self.client.get('/api/catalogue/categories', headers={'x-forwarded-for': fake_ip})
self.client.get('/api/catalogue/categories')
# all products in catalogue
products = self.client.get('/api/catalogue/products', headers={'x-forwarded-for': fake_ip}).json()
products = self.client.get('/api/catalogue/products').json()
for i in range(2):
item = None
while True:
@@ -63,36 +37,39 @@ class UserBehavior(HttpUser):
# vote for item
if randint(1, 10) <= 3:
self.client.put('/api/ratings/api/rate/{}/{}'.format(item['sku'], randint(1, 5)), headers={'x-forwarded-for': fake_ip})
self.client.put('/api/ratings/api/rate/{}/{}'.format(item['sku'], randint(1, 5)))
self.client.get('/api/catalogue/product/{}'.format(item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/ratings/api/fetch/{}'.format(item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/cart/add/{}/{}/1'.format(uniqueid, item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/catalogue/product/{}'.format(item['sku']))
self.client.get('/api/ratings/api/fetch/{}'.format(item['sku']))
self.client.get('/api/cart/add/{}/{}/1'.format(uniqueid, item['sku']))
cart = self.client.get('/api/cart/cart/{}'.format(uniqueid), headers={'x-forwarded-for': fake_ip}).json()
cart = self.client.get('/api/cart/cart/{}'.format(uniqueid)).json()
item = choice(cart['items'])
self.client.get('/api/cart/update/{}/{}/2'.format(uniqueid, item['sku']), headers={'x-forwarded-for': fake_ip})
self.client.get('/api/cart/update/{}/{}/2'.format(uniqueid, item['sku']))
# country codes
code = choice(self.client.get('/api/shipping/codes', headers={'x-forwarded-for': fake_ip}).json())
city = choice(self.client.get('/api/shipping/cities/{}'.format(code['code']), headers={'x-forwarded-for': fake_ip}).json())
code = choice(self.client.get('/api/shipping/codes').json())
city = choice(self.client.get('/api/shipping/cities/{}'.format(code['code'])).json())
print('code {} city {}'.format(code, city))
shipping = self.client.get('/api/shipping/calc/{}'.format(city['uuid']), headers={'x-forwarded-for': fake_ip}).json()
shipping = self.client.get('/api/shipping/calc/{}'.format(city['uuid'])).json()
shipping['location'] = '{} {}'.format(code['name'], city['name'])
print('Shipping {}'.format(shipping))
# POST
cart = self.client.post('/api/shipping/confirm/{}'.format(uniqueid), json=shipping, headers={'x-forwarded-for': fake_ip}).json()
cart = self.client.post('/api/shipping/confirm/{}'.format(uniqueid), json=shipping).json()
print('Final cart {}'.format(cart))
order = self.client.post('/api/payment/pay/{}'.format(uniqueid), json=cart, headers={'x-forwarded-for': fake_ip}).json()
order = self.client.post('/api/payment/pay/{}'.format(uniqueid), json=cart).json()
print('Order {}'.format(order))
@task
def error(self):
fake_ip = random.choice(self.fake_ip_addresses)
if os.environ.get('ERROR') == '1':
print('Error request')
cart = {'total': 0, 'tax': 0}
self.client.post('/api/payment/pay/partner-57', json=cart, headers={'x-forwarded-for': fake_ip})
self.client.post('/api/payment/pay/partner-57', json=cart)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000
max_wait = 5000

View File

@@ -1,4 +1,4 @@
FROM mongo:5
FROM mongo:3.6.1
COPY *.js /docker-entrypoint-initdb.d/

View File

@@ -3,17 +3,17 @@
//
db = db.getSiblingDB('catalogue');
db.products.insertMany([
{sku: 'Watson', name: 'Watson', description: 'Probably the smartest AI on the planet', price: 2001, instock: 2, categories: ['Artificial Intelligence']},
{sku: 'Ewooid', name: 'Ewooid', description: 'Fully sentient assistant', price: 200, instock: 0, categories: ['Artificial Intelligence']},
{sku: 'HPTD', name: 'High-Powered Travel Droid', description: 'Traveling to the far reaches of the Galaxy? You need this for protection. Comes in handy when you are lost in space', price: 1200, instock: 12, categories: ['Robot']},
{sku: 'UHJ', name: 'Ultimate Harvesting Juggernaut', description: 'Extraterrestrial vegetation harvester', price: 5000, instock: 10, categories: ['Robot']},
{sku: 'EPE', name: 'Extreme Probe Emulator', description: 'Versatile interface adapter for hacking into systems', price: 953, instock: 1, categories: ['Robot']},
{sku: 'EMM', name: 'Exceptional Medical Machine', description: 'Fully automatic surgery droid with exceptional bedside manner', price: 1024, instock: 1, categories: ['Robot']},
{sku: 'SHCE', name: 'Strategic Human Control Emulator', description: 'Diplomatic protocol assistant', price: 300, instock: 12, categories: ['Robot']},
{sku: 'RED', name: 'Responsive Enforcer Droid', description: 'Security detail, will gaurd anything', price: 700, instock: 5, categories: ['Robot']},
{sku: 'RMC', name: 'Robotic Mining Cyborg', description: 'Excellent tunneling capability to get those rare minerals', price: 42, instock: 48, categories: ['Robot']},
{sku: 'STAN-1', name: 'Stan', description: 'Observability guru', price: 67, instock: 1000, categories: ['Robot', 'Artificial Intelligence']},
{sku: 'CNA', name: 'Cybernated Neutralization Android', description: 'Is your spaceship a bit whiffy? This little fellow will bring a breath of fresh air', price: 1000, instock: 0, categories: ['Robot']}
{sku: 'HAL-1', name: 'HAL', description: 'Sorry Dave, I cant do that', price: 2001, instock: 2, categories: ['Artificial Intelligence']},
{sku: 'PB-1', name: 'Positronic Brain', description: 'Highly advanced sentient processing unit with the laws of robotics burned in', price: 200, instock: 0, categories: ['Artificial Intelligence']},
{sku: 'ROB-1', name: 'Robbie', description: 'Large mechanical workhorse, crude but effective. Comes in handy when you are lost in space', price: 1200, instock: 12, categories: ['Robot']},
{sku: 'EVE-1', name: 'Eve', description: 'Extraterrestrial Vegetation Evaluator', price: 5000, instock: 10, categories: ['Robot']},
{sku: 'C3P0', name: 'C3P0', description: 'Protocol android', price: 953, instock: 1, categories: ['Robot']},
{sku: 'R2D2', name: 'R2D2', description: 'R2 maintenance robot and secret messenger. Help me Obi Wan', price: 1024, instock: 1, categories: ['Robot']},
{sku: 'K9', name: 'K9', description: 'Time travelling companion at heel', price: 300, instock: 12, categories: ['Robot']},
{sku: 'RD-10', name: 'Kryten', description: 'Red Drawf crew member', price: 700, instock: 5, categories: ['Robot']},
{sku: 'HHGTTG', name: 'Marvin', description: 'Marvin, your paranoid android. Brain the size of a planet', price: 42, instock: 48, categories: ['Robot']},
{sku: 'STAN-1', name: 'Stan', description: 'APM guru', price: 67, instock: 1000, categories: ['Robot', 'Artificial Intelligence']},
{sku: 'STNG', name: 'Mr Data', description: 'Could be R. Daneel Olivaw? Protype positronic brain android', price: 1000, instock: 0, categories: ['Robot']}
]);
// full text index for searching

View File

@@ -1,6 +1,4 @@
FROM mysql:5.7
VOLUME /data
FROM mysql:5.7.20
ENV MYSQL_ALLOW_EMPTY_PASSWORD=yes \
MYSQL_DATABASE=cities \
@@ -13,6 +11,6 @@ RUN /root/config.sh
COPY scripts/* /docker-entrypoint-initdb.d/
#RUN /entrypoint.sh mysqld & while [ ! -f /tmp/finished ]; do sleep 10; done
#RUN rm /docker-entrypoint-initdb.d/*
RUN /entrypoint.sh mysqld & while [ ! -f /tmp/finished ]; do sleep 10; done
RUN rm /docker-entrypoint-initdb.d/*

View File

@@ -1,4 +1,4 @@
FROM python:3.9
FROM python:3.6
EXPOSE 8080
USER root

View File

@@ -1,5 +1,3 @@
import random
import instana
import os
import sys
@@ -9,6 +7,8 @@ import uuid
import json
import requests
import traceback
import opentracing as ot
import opentracing.ext.tags as tags
from flask import Flask
from flask import Response
from flask import request
@@ -59,6 +59,11 @@ def pay(id):
anonymous_user = True
# add some log info to the active trace
span = ot.tracer.active_span
span.log_kv({'id': id})
span.log_kv({'cart': cart})
# check user exists
try:
req = requests.get('http://{user}:8080/check/{id}'.format(user=USER, id=id))
@@ -126,13 +131,36 @@ def pay(id):
def queueOrder(order):
app.logger.info('queue order')
# RabbitMQ pika is not currently traced automatically
# opentracing tracer is automatically set to Instana tracer
# start a span
# For screenshot demo requirements optionally add in a bit of delay
delay = int(os.getenv('PAYMENT_DELAY_MS', 0))
time.sleep(delay / 1000)
parent_span = ot.tracer.active_span
with ot.tracer.start_active_span('queueOrder', child_of=parent_span,
tags={
'exchange': Publisher.EXCHANGE,
'key': Publisher.ROUTING_KEY
}) as tscope:
tscope.span.set_tag('span.kind', 'intermediate')
tscope.span.log_kv({'orderid': order.get('orderid')})
with ot.tracer.start_active_span('rabbitmq', child_of=tscope.span,
tags={
'exchange': Publisher.EXCHANGE,
'sort': 'publish',
'address': Publisher.HOST,
'key': Publisher.ROUTING_KEY
}
) as scope:
headers = {}
publisher.publish(order, headers)
# For screenshot demo requirements optionally add in a bit of delay
delay = int(os.getenv('PAYMENT_DELAY_MS', 0))
time.sleep(delay / 1000)
headers = {}
ot.tracer.inject(scope.span.context, ot.Format.HTTP_HEADERS, headers)
app.logger.info('msg headers {}'.format(headers))
publisher.publish(order, headers)
def countItems(items):

View File

@@ -1,11 +0,0 @@
#!/bin/sh
for DFILE in $(find . -name Dockerfile -print)
do
# multiple images
for IMAGE in $(awk '/^FROM/ { print $2 }' $DFILE)
do
echo "Pulling $IMAGE"
docker pull $IMAGE
done
done

View File

@@ -1,13 +1,16 @@
# Use composer to install dependencies
FROM composer AS build
COPY composer.json /app/
RUN composer install
#
# Build the app
#
FROM php:7.4-apache
FROM php:7.3-apache
RUN apt-get update && apt-get install -yqq unzip libzip-dev \
&& docker-php-ext-install pdo_mysql opcache zip
# Enable AutoProfile for PHP which is currently opt-in beta
RUN echo "instana.enable_auto_profile=1" > "/usr/local/etc/php/conf.d/zzz-instana-extras.ini"
RUN docker-php-ext-install pdo_mysql
# relax permissions on status
COPY status.conf /etc/apache2/mods-available/status.conf
@@ -16,16 +19,8 @@ RUN a2enmod rewrite && a2enmod status
WORKDIR /var/www/html
# copy dependencies from previous step
COPY --from=build /app/vendor/ /var/www/html/vendor/
COPY html/ /var/www/html
COPY --from=composer /usr/bin/composer /usr/bin/composer
RUN composer install
# This is important. Symfony needs write permissions and we
# dont know the context in which the container will run, i.e.
# which user will be forced from the outside so better play
# safe for this simple demo.
RUN rm -Rf /var/www/var/*
RUN chown -R www-data /var/www
RUN chmod -R 777 /var/www

5
ratings/composer.json Normal file
View File

@@ -0,0 +1,5 @@
{
"require": {
"monolog/monolog": "^1.24.0"
}
}

View File

@@ -1,10 +1,6 @@
DirectoryIndex index.php
<IfModule mod_rewrite.c>
RewriteEngine On
RewriteCond %{ENV:REDIRECT_STATUS} =""
RewriteRule ^index\.php(?:/(.*)|$) %{ENV:BASE}/$1 [R=301,L]
RewriteCond %{REQUEST_URI} !=/server-status
RewriteCond %{REQUEST_FILENAME} !-f
RewriteRule ^ %{ENV:BASE}/index.php [L]
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule api/(.*)$ api.php?request=$1 [QSA,NC,L]
</IfModule>

106
ratings/html/API.class.php Normal file
View File

@@ -0,0 +1,106 @@
<?php
// load composer installed files
require_once(__DIR__.'/vendor/autoload.php');
use Monolog\Logger;
use Monolog\Handler\StreamHandler;
abstract class API {
protected $method = '';
protected $endpoint = '';
protected $verb = '';
protected $args = array();
protected $file = Null;
protected $logger = Null;
protected $logHandler = Null;
public function __construct($request) {
// Logging
$this->logHandler = new StreamHandler('php://stdout', Logger::INFO);
// CORS
header('Access-Control-Allow-Origin: *');
header('Access-Control-Allow-Methods: *');
header('Content-Type: application/json');
$this->args = explode('/', rtrim($request, '/'));
$this->endpoint = array_shift($this->args);
if(array_key_exists(0, $this->args) && !is_numeric($this->args[0])) {
$this->verb = array_shift($this->args);
}
$this->method = $_SERVER['REQUEST_METHOD'];
if($this->method == 'POST' && array_key_exists('HTTP_X_METHOD', $_SERVER)) {
if($_SERVER['HTTP_X_HTTP_METHOD'] == 'DELETE') {
$this->method = 'DELETE';
} else if($_SERVER['HTTP_X_HTTP_METHOD'] == 'PUT') {
$this->method = 'PUT';
} else {
throw new Exception('Unexpected header');
}
}
switch($this->method) {
case 'DELETE':
case 'POST':
$this->request = $this->_cleanInputs($_POST);
break;
case 'GET':
$this->request = $this->_cleanInputs($_GET);
break;
case 'PUT':
$this->request = $this->_cleanInputs($_GET);
$this->file = file_get_contents('php://input');
break;
}
}
public function processAPI() {
if(method_exists($this, $this->endpoint)) {
try {
$result = $this->{$this->endpoint}();
return $this->_response($result, 200);
} catch (Exception $e) {
return $this->_response($e->getMessage(), $e->getCode());
}
}
return $this->_response("No endpoint: $this->endpoint", 404);
}
private function _response($data, $status = 200) {
header('HTTP/1.1 ' . $status . ' ' . $this->_requestStatus($status));
return json_encode($data);
}
private function _cleanInputs($data) {
$clean_input = array();
if(is_array($data)) {
foreach($data as $k => $v) {
$clean_input[$k] = $this->_cleanInputs($v);
}
} else {
$clean_input = trim(strip_tags($data));
}
return $clean_input;
}
private function _requestStatus($code) {
$status = array(
200 => 'OK',
400 => 'Bad Request',
404 => 'Not Found',
405 => 'Method Not Allowed',
500 => 'Internal Server Error');
return (array_key_exists("$code", $status) ? $status["$code"] : $status['500']);
}
}
?>

179
ratings/html/api.php Normal file
View File

@@ -0,0 +1,179 @@
<?php
require_once 'API.class.php';
use Monolog\Logger;
class RatingsAPI extends API {
public function __construct($request, $origin) {
parent::__construct($request);
// Logging
$this->logger = new Logger('RatingsAPI');
$this->logger->pushHandler($this->logHandler);
}
protected function health() {
$this->logger->info('health OK');
return 'OK';
}
protected function dump() {
$data = array();
$data['method'] = $this->method;
$data['verb'] = $this->verb;
$data = array_merge($data, array('args' => $this->args));
return $data;
}
// ratings/fetch/sku
protected function fetch() {
if($this->method == 'GET' && isset($this->verb) && count($this->args) == 0) {
$sku = $this->verb;
if(! $this->_checkSku($sku)) {
throw new Exception("$sku not found", 404);
}
$data = $this->_getRating($sku);
return $data;
} else {
$this->logger->warn('fetch rating - bad request');
throw new Exception('Bad request', 400);
}
}
// ratings/rate/sku/score
protected function rate() {
if($this->method == 'PUT' && isset($this->verb) && count($this->args) == 1) {
$sku = $this->verb;
$score = intval($this->args[0]);
$score = min(max(1, $score), 5);
if(! $this->_checkSku($sku)) {
throw new Exception("$sku not found", 404);
}
$rating = $this->_getRating($sku);
if($rating['avg_rating'] == 0) {
// not rated yet
$this->_insertRating($sku, $score);
} else {
// iffy maths
$newAvg = (($rating['avg_rating'] * $rating['rating_count']) + $score) / ($rating['rating_count'] + 1);
$this->_updateRating($sku, $newAvg, $rating['rating_count'] + 1);
}
} else {
$this->logger->warn('set rating - bad request');
throw new Exception('Bad request', 400);
}
return 'OK';
}
private function _getRating($sku) {
$db = $this->_dbConnect();
if($db) {
$stmt = $db->prepare('select avg_rating, rating_count from ratings where sku = ?');
if($stmt->execute(array($sku))) {
$data = $stmt->fetch();
if($data) {
// for some reason avg_rating is return as a string
$data['avg_rating'] = floatval($data['avg_rating']);
return $data;
} else {
// nicer to return an empty record than throw 404
return array('avg_rating' => 0, 'rating_count' => 0);
}
} else {
$this->logger->error('failed to query data');
throw new Exception('Failed to query data', 500);
}
} else {
$this->logger->error('database connection error');
throw new Exception('Database connection error', 500);
}
}
private function _updateRating($sku, $score, $count) {
$db = $this->_dbConnect();
if($db) {
$stmt = $db->prepare('update ratings set avg_rating = ?, rating_count = ? where sku = ?');
if(! $stmt->execute(array($score, $count, $sku))) {
$this->logger->error('failed to update rating');
throw new Exception('Failed to update data', 500);
}
} else {
$this->logger->error('database connection error');
throw new Exception('Database connection error', 500);
}
}
private function _insertRating($sku, $score) {
$db = $this->_dbConnect();
if($db) {
$stmt = $db->prepare('insert into ratings(sku, avg_rating, rating_count) values(?, ?, ?)');
if(! $stmt->execute(array($sku, $score, 1))) {
$this->logger->error('failed to insert data');
throw new Exception('Failed to insert data', 500);
}
} else {
$this->logger->error('database connection error');
throw new Exception('Database connection error', 500);
}
}
private function _dbConnect() {
$dsn = getenv('PDO_URL') ? getenv('PDO_URL') : 'mysql:host=mysql;dbname=ratings;charset=utf8mb4';
$opt = array(
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
PDO::ATTR_EMULATE_PREPARES => false
);
$db = false;
try {
$db = new PDO($dsn, 'ratings', 'iloveit', $opt);
} catch (PDOException $e) {
$msg = $e->getMessage();
$this->logger->error("Database error $msg");
$db = false;
}
return $db;
}
// check sku exists in product catalogue
private function _checkSku($sku) {
$url = getenv('CATALOGUE_URL') ? getenv('CATALOGUE_URL') : 'http://catalogue:8080/';
$url = $url . 'product/' . $sku;
$opt = array(
CURLOPT_RETURNTRANSFER => true,
);
$curl = curl_init($url);
curl_setopt_array($curl, $opt);
$data = curl_exec($curl);
if(! $data) {
$this->logger->error('failed to connect to catalogue');
throw new Exception('Failed to connect to catalogue', 500);
}
$status = curl_getinfo($curl, CURLINFO_RESPONSE_CODE);
$this->logger->info("catalogue status $status");
curl_close($curl);
return $status == 200;
}
}
if(!array_key_exists('HTTP_ORIGIN', $_SERVER)) {
$_SERVER['HTTP_ORIGIN'] = $_SERVER['SERVER_NAME'];
}
try {
$API = new RatingsAPI($_REQUEST['request'], $_SERVER['HTTP_ORIGIN']);
echo $API->processAPI();
} catch(Exception $e) {
echo json_encode(Array('error' => $e->getMessage()));
}
?>

View File

@@ -1,24 +0,0 @@
{
"require": {
"php": "^7.4",
"ext-curl": "*",
"ext-json": "*",
"ext-pdo": "*",
"psr/log": "*",
"monolog/monolog": "^1.24.0",
"symfony/config": "^5.2",
"symfony/http-kernel": "^5.2",
"symfony/http-foundation": "^5.2",
"symfony/routing": "^5.2",
"symfony/dependency-injection": "^5.2",
"symfony/framework-bundle": "^5.2",
"doctrine/annotations": "^1.10",
"symfony/monolog-bundle": "^3.5",
"instana/instana-php-sdk": "^1.10"
},
"autoload": {
"psr-4": {
"Instana\\RobotShop\\Ratings\\": "src/"
}
}
}

View File

@@ -1,15 +0,0 @@
<?php
declare(strict_types=1);
require __DIR__.'/vendor/autoload.php';
use Instana\RobotShop\Ratings\Kernel;
use Symfony\Component\HttpFoundation\Request;
$env = getenv('APP_ENV') ?: 'dev';
$kernel = new Kernel($env, true);
$request = Request::createFromGlobals();
$response = $kernel->handle($request);
$response->send();
$kernel->terminate($request, $response);

View File

@@ -1,3 +1 @@
<?php
phpinfo();
<?php phpinfo(); ?>

View File

@@ -1,46 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Controller;
use Instana\RobotShop\Ratings\Service\HealthCheckService;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\Routing\Annotation\Route;
/**
* @Route("/_health")
*/
class HealthController implements LoggerAwareInterface
{
use LoggerAwareTrait;
/**
* @var HealthCheckService
*/
private $healthCheckService;
public function __construct(HealthCheckService $healthCheckService)
{
$this->healthCheckService = $healthCheckService;
}
public function __invoke(Request $request)
{
$checks = [];
try {
$this->healthCheckService->checkConnectivity();
$checks['pdo_connectivity'] = true;
} catch (\PDOException $e) {
$checks['pdo_connectivity'] = false;
}
$this->logger->info('Health-Check', $checks);
return new JsonResponse($checks, $checks['pdo_connectivity'] ? Response::HTTP_OK : Response::HTTP_BAD_REQUEST);
}
}

View File

@@ -1,90 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Controller;
use Instana\RobotShop\Ratings\Service\CatalogueService;
use Instana\RobotShop\Ratings\Service\RatingsService;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Component\HttpKernel\Exception\HttpException;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
use Symfony\Component\Routing\Annotation\Route;
/**
* @Route("/api")
*/
class RatingsApiController implements LoggerAwareInterface
{
use LoggerAwareTrait;
/**
* @var RatingsService
*/
private $ratingsService;
/**
* @var CatalogueService
*/
private $catalogueService;
public function __construct(CatalogueService $catalogueService, RatingsService $ratingsService)
{
$this->ratingsService = $ratingsService;
$this->catalogueService = $catalogueService;
}
/**
* @Route(path="/rate/{sku}/{score}", methods={"PUT"})
*/
public function put(Request $request, string $sku, int $score): Response
{
$score = min(max(1, $score), 5);
try {
if (false === $this->catalogueService->checkSKU($sku)) {
throw new NotFoundHttpException("$sku not found");
}
} catch (\Exception $e) {
throw new HttpException(500, $e->getMessage(), $e);
}
try {
$rating = $this->ratingsService->ratingBySku($sku);
if (0 === $rating['avg_rating']) {
// not rated yet
$this->ratingsService->addRatingForSKU($sku, $score);
} else {
// iffy maths
$newAvg = (($rating['avg_rating'] * $rating['rating_count']) + $score) / ($rating['rating_count'] + 1);
$this->ratingsService->updateRatingForSKU($sku, $newAvg, $rating['rating_count'] + 1);
}
return new JsonResponse([
'success' => true,
]);
} catch (\Exception $e) {
throw new HttpException(500, 'Unable to update rating', $e);
}
}
/**
* @Route("/fetch/{sku}", methods={"GET"})
*/
public function get(Request $request, string $sku): Response
{
try {
if (!$this->ratingsService->ratingBySku($sku)) {
throw new NotFoundHttpException("$sku not found");
}
} catch (\Exception $e) {
throw new HttpException(500, $e->getMessage(), $e);
}
return new JsonResponse($this->ratingsService->ratingBySku($sku));
}
}

View File

@@ -1,55 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings;
use PDO;
use PDOException;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
class Database implements LoggerAwareInterface
{
use LoggerAwareTrait;
/**
* @var string
*/
private $dsn;
/**
* @var string
*/
private $user;
/**
* @var string
*/
private $password;
public function __construct(string $dsn, string $user, string $password)
{
$this->dsn = $dsn;
$this->user = $user;
$this->password = $password;
}
public function getConnection(): PDO
{
$opt = [
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
PDO::ATTR_DEFAULT_FETCH_MODE => PDO::FETCH_ASSOC,
PDO::ATTR_EMULATE_PREPARES => false,
];
try {
return new PDO($this->dsn, $this->user, $this->password, $opt);
} catch (PDOException $e) {
$msg = $e->getMessage();
$this->logger->error("Database error $msg");
return null;
}
}
}

View File

@@ -1,42 +0,0 @@
<?php
namespace Instana\RobotShop\Ratings\EventListener;
use Instana\InstanaRuntimeException;
use Instana\Tracer;
use Psr\Log\LoggerInterface;
class InstanaDataCenterListener
{
private static $dataCenters = [
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1"
];
/**
* @var LoggerInterface
*/
private $logger;
public function __construct(LoggerInterface $logger)
{
$this->logger = $logger;
}
public function __invoke()
{
try {
$entry = Tracer::getEntrySpan();
$dataCenter = self::$dataCenters[array_rand(self::$dataCenters)];
$entry->annotate('datacenter', $dataCenter);
$this->logger->info(sprintf('Annotated DataCenter %s', $dataCenter));
} catch (InstanaRuntimeException $exception) {
$this->logger->error('Unable to annotate entry span: %s', $exception->getMessage());
}
}
}

View File

@@ -1,68 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Integration;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Symfony\Component\HttpKernel\Event\FinishRequestEvent;
use Symfony\Component\HttpKernel\Event\RequestEvent;
use Symfony\Component\HttpKernel\KernelEvents;
use Symfony\Contracts\Service\ResetInterface;
class InstanaHeadersLoggingProcessor implements EventSubscriberInterface, ResetInterface
{
private $routeData;
public static function getSubscribedEvents(): array
{
return [
KernelEvents::REQUEST => ['addHeaderData', 1],
KernelEvents::FINISH_REQUEST => ['removeHeaderData', 1],
];
}
public function __invoke(array $records): array
{
if ($this->routeData && !isset($records['extra']['requests'])) {
$records['extra']['instana'] = array_values($this->routeData);
}
return $records;
}
public function addHeaderData(RequestEvent $event): void
{
if ($event->isMasterRequest()) {
$this->reset();
}
$request = $event->getRequest();
if (null === $request->headers->get('X-INSTANA-L')) {
return;
}
$currentTraceHeaders = [
'l' => $request->headers->get('X-INSTANA-L', 'n/a'),
's' => $request->headers->get('X-INSTANA-S', 'n/a'),
't' => $request->headers->get('X-INSTANA-T', 'n/a'),
];
if (null !== $request->headers->get('X-INSTANA-SYNTHETIC')) {
$currentTraceHeaders['sy'] = $request->headers->get('X-INSTANA-SYNTHETIC');
}
$this->routeData[spl_object_id($request)] = $currentTraceHeaders;
}
public function reset(): void
{
$this->routeData = [];
}
public function removeHeaderData(FinishRequestEvent $event): void
{
$requestId = spl_object_id($event->getRequest());
unset($this->routeData[$requestId]);
}
}

View File

@@ -1,136 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings;
use Instana\RobotShop\Ratings\Controller\HealthController;
use Instana\RobotShop\Ratings\Controller\RatingsApiController;
use Instana\RobotShop\Ratings\EventListener\InstanaDataCenterListener;
use Instana\RobotShop\Ratings\Integration\InstanaHeadersLoggingProcessor;
use Instana\RobotShop\Ratings\Service\CatalogueService;
use Instana\RobotShop\Ratings\Service\HealthCheckService;
use Instana\RobotShop\Ratings\Service\RatingsService;
use Monolog\Formatter\LineFormatter;
use Symfony\Bundle\FrameworkBundle\FrameworkBundle;
use Symfony\Bundle\FrameworkBundle\Kernel\MicroKernelTrait;
use Symfony\Bundle\MonologBundle\MonologBundle;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Symfony\Component\HttpKernel\Event\ResponseEvent;
use Symfony\Component\HttpKernel\Kernel as BaseKernel;
use Symfony\Component\HttpKernel\KernelEvents;
use Symfony\Component\Routing\RouteCollectionBuilder;
class Kernel extends BaseKernel implements EventSubscriberInterface
{
use MicroKernelTrait;
public function registerBundles()
{
return [
new FrameworkBundle(),
new MonologBundle(),
];
}
/**
* {@inheritdoc}
*/
public static function getSubscribedEvents()
{
return [
KernelEvents::RESPONSE => 'corsResponseFilter',
];
}
public function corsResponseFilter(ResponseEvent $event)
{
$response = $event->getResponse();
$response->headers->add([
'Access-Control-Allow-Origin' => '*',
'Access-Control-Allow-Methods' => '*',
]);
}
protected function configureContainer(ContainerBuilder $c, LoaderInterface $loader): void
{
$c->loadFromExtension('framework', [
'secret' => 'S0ME_SECRET',
]);
$c->loadFromExtension('monolog', [
'handlers' => [
'stdout' => [
'type' => 'stream',
'level' => 'info',
'path' => 'php://stdout',
'channels' => ['!request'],
],
],
]);
$c->setParameter('catalogueUrl', getenv('CATALOGUE_URL') ?: 'http://catalogue:8080');
$c->setParameter('pdo_dsn', getenv('PDO_URL') ?: 'mysql:host=mysql;dbname=ratings;charset=utf8mb4');
$c->setParameter('pdo_user', 'ratings');
$c->setParameter('pdo_password', 'iloveit');
$c->setParameter('logger.name', 'RatingsAPI');
$c->register(InstanaHeadersLoggingProcessor::class)
->addTag('kernel.event_subscriber')
->addTag('monolog.processor');
$c->register('monolog.formatter.instana_headers', LineFormatter::class)
->addArgument('[%%datetime%%] [%%extra.token%%] %%channel%%.%%level_name%%: %%message%% %%context%% %%extra%%\n');
$c->register(Database::class)
->addArgument($c->getParameter('pdo_dsn'))
->addArgument($c->getParameter('pdo_user'))
->addArgument($c->getParameter('pdo_password'))
->addMethodCall('setLogger', [new Reference('logger')])
->setAutowired(true);
$c->register(CatalogueService::class)
->addArgument($c->getParameter('catalogueUrl'))
->addMethodCall('setLogger', [new Reference('logger')])
->setAutowired(true);
$c->register(HealthCheckService::class)
->addArgument(new Reference('database.connection'))
->addMethodCall('setLogger', [new Reference('logger')])
->setAutowired(true);
$c->register('database.connection', \PDO::class)
->setFactory([new Reference(Database::class), 'getConnection']);
$c->setAlias(\PDO::class, 'database.connection');
$c->register(RatingsService::class)
->addMethodCall('setLogger', [new Reference('logger')])
->setAutowired(true);
$c->register(HealthController::class)
->addMethodCall('setLogger', [new Reference('logger')])
->addTag('controller.service_arguments')
->setAutowired(true);
$c->register(RatingsApiController::class)
->addMethodCall('setLogger', [new Reference('logger')])
->addTag('controller.service_arguments')
->setAutowired(true);
$c->register(InstanaDataCenterListener::class)
->addTag('kernel.event_listener', [
'event' => 'kernel.request'
])
->setAutowired(true);
}
protected function configureRoutes(RouteCollectionBuilder $routes)
{
$routes->import(__DIR__.'/Controller/', '/', 'annotation');
}
}

View File

@@ -1,48 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Service;
use Exception;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
class CatalogueService implements LoggerAwareInterface
{
use LoggerAwareTrait;
/**
* @var string
*/
private $catalogueUrl;
public function __construct(string $catalogueUrl)
{
$this->catalogueUrl = $catalogueUrl;
}
public function checkSKU(string $sku): bool
{
$url = sprintf('%s/product/%s', $this->catalogueUrl, $sku);
$opt = [
CURLOPT_RETURNTRANSFER => true,
];
$curl = curl_init($url);
curl_setopt_array($curl, $opt);
$data = curl_exec($curl);
if (!$data) {
$this->logger->error('failed to connect to catalogue');
throw new Exception('Failed to connect to catalogue');
}
$status = curl_getinfo($curl, CURLINFO_RESPONSE_CODE);
$this->logger->info("catalogue status $status");
curl_close($curl);
return 200 === $status;
}
}

View File

@@ -1,29 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Service;
use PDO;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
class HealthCheckService implements LoggerAwareInterface
{
use LoggerAwareTrait;
/**
* @var PDO
*/
private $pdo;
public function __construct(PDO $pdo)
{
$this->pdo = $pdo;
}
public function checkConnectivity(): bool
{
return $this->pdo->prepare('SELECT 1 + 1 FROM DUAL;')->execute();
}
}

View File

@@ -1,67 +0,0 @@
<?php
declare(strict_types=1);
namespace Instana\RobotShop\Ratings\Service;
use PDO;
use Psr\Log\LoggerAwareInterface;
use Psr\Log\LoggerAwareTrait;
class RatingsService implements LoggerAwareInterface
{
private const QUERY_RATINGS_BY_SKU = 'select avg_rating, rating_count from ratings where sku = ?';
private const QUERY_UPDATE_RATINGS_BY_SKU = 'update ratings set avg_rating = ?, rating_count = ? where sku = ?';
private const QUERY_INSERT_RATING = 'insert into ratings(sku, avg_rating, rating_count) values(?, ?, ?)';
use LoggerAwareTrait;
/**
* @var PDO
*/
private $connection;
public function __construct(PDO $connection)
{
$this->connection = $connection;
}
public function ratingBySku(string $sku): array
{
$stmt = $this->connection->prepare(self::QUERY_RATINGS_BY_SKU);
if (false === $stmt->execute([$sku])) {
$this->logger->error('failed to query data');
throw new \Exception('Failed to query data', 500);
}
$data = $stmt->fetch();
if ($data) {
// for some reason avg_rating is return as a string
$data['avg_rating'] = (float) $data['avg_rating'];
return $data;
}
// nicer to return an empty record than throw 404
return ['avg_rating' => 0, 'rating_count' => 0];
}
public function updateRatingForSKU(string $sku, $score, int $count): void
{
$stmt = $this->connection->prepare(self::QUERY_UPDATE_RATINGS_BY_SKU);
if (!$stmt->execute([$score, $count, $sku])) {
$this->logger->error('failed to update rating');
throw new \Exception('Failed to update data', 500);
}
}
public function addRatingForSKU($sku, $rating): void
{
$stmt = $this->connection->prepare(self::QUERY_INSERT_RATING);
if (!$stmt->execute([$sku, $rating, 1])) {
$this->logger->error('failed to insert data');
throw new \Exception('Failed to insert data', 500);
}
}
}

View File

4
shipping/.gitignore vendored
View File

@@ -1,4 +0,0 @@
/target
/.classpath
/.project
/.settings

View File

@@ -1,14 +1,15 @@
#
# Build
#
FROM debian:10 AS build
FROM openjdk:8-jdk AS build
RUN apt-get update && apt-get -y install maven
WORKDIR /opt/shipping
COPY pom.xml /opt/shipping/
RUN mvn dependency:resolve
RUN mvn install
COPY src /opt/shipping/src/
RUN mvn package
@@ -24,7 +25,7 @@ WORKDIR /opt/shipping
ENV CART_ENDPOINT=cart:8080
ENV DB_HOST=mysql
COPY --from=build /opt/shipping/target/shipping-1.0.jar shipping.jar
COPY --from=build /opt/shipping/target/shipping-1.0-jar-with-dependencies.jar shipping.jar
CMD [ "java", "-Xmn256m", "-Xmx768m", "-jar", "shipping.jar" ]

View File

@@ -1,78 +1,78 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.3.3.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.instana</groupId>
<artifactId>shipping</artifactId>
<version>1.0</version>
<name>shipping service</name>
<description>Shipping calculations</description>
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>steveww</groupId>
<artifactId>shipping</artifactId>
<version>1.0</version>
<packaging>jar</packaging>
<name>Spark Java Sample</name>
<properties>
<java.version>1.8</java.version>
</properties>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependencies>
<dependency>
<groupId>org.springframework.retry</groupId>
<artifactId>spring-retry</artifactId>
<groupId>com.sparkjava</groupId>
<artifactId>spark-core</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>c3p0</groupId>
<artifactId>c3p0</artifactId>
<version>0.9.1.2</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.45</version>
</dependency>
<dependency>
<groupId>commons-dbutils</groupId>
<artifactId>commons-dbutils</artifactId>
<version>1.7</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.8.2</version>
</dependency>
<dependency>
<groupId>com.instana</groupId>
<artifactId>instana-java-sdk</artifactId>
<version>1.2.0</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.12</version>
<version>4.5.5</version>
</dependency>
</dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<archive>
<manifest>
<mainClass>org.steveww.spark.Main</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,75 +0,0 @@
package com.instana.robotshop.shipping;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
public class CartHelper {
private static final Logger logger = LoggerFactory.getLogger(CartHelper.class);
private String baseUrl;
public CartHelper(String baseUrl) {
this.baseUrl = baseUrl;
}
// TODO - Remove deprecated calls
public String addToCart(String id, String data) {
logger.info("add shipping to cart {}", id);
StringBuilder buffer = new StringBuilder();
CloseableHttpClient httpClient = null;
try {
// set timeout to 5 secs
HttpParams httpParams = new BasicHttpParams();
HttpConnectionParams.setConnectionTimeout(httpParams, 5000);
httpClient = HttpClients.createDefault();
HttpPost postRequest = new HttpPost(baseUrl + id);
StringEntity payload = new StringEntity(data);
payload.setContentType("application/json");
postRequest.setEntity(payload);
CloseableHttpResponse res = httpClient.execute(postRequest);
if (res.getStatusLine().getStatusCode() == 200) {
BufferedReader in = new BufferedReader(new InputStreamReader(res.getEntity().getContent()));
String line;
while ((line = in.readLine()) != null) {
buffer.append(line);
}
} else {
logger.warn("Failed with code {}", res.getStatusLine().getStatusCode());
}
try {
res.close();
} catch(IOException e) {
logger.warn("httpresponse", e);
}
} catch(Exception e) {
logger.warn("http client exception", e);
} finally {
if (httpClient != null) {
try {
httpClient.close();
} catch(IOException e) {
logger.warn("httpclient", e);
}
}
}
// this will be empty on error
return buffer.toString();
}
}

View File

@@ -1,85 +0,0 @@
package com.instana.robotshop.shipping;
import javax.persistence.Table;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Column;
/*
* Bean for City
*/
@Entity
@Table(name = "cities")
public class City {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private long uuid;
@Column(name = "country_code")
private String code;
private String city;
private String name;
private String region;
private double latitude;
private double longitude;
public long getUuid() {
return this.uuid;
}
public String getCode() {
return this.code;
}
public void setCode(String code) {
this.code = code;
}
public String getCity() {
return this.city;
}
public void setCity(String city) {
this.city = city;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public String getRegion() {
return this.region;
}
public void setRegion(String code) {
this.region = region;
}
public double getLatitude() {
return this.latitude;
}
public void setLatitude(double latitude) {
this.latitude = latitude;
}
public double getLongitude() {
return this.longitude;
}
public void setLongitude(double longitude) {
this.longitude = longitude;
}
@Override
public String toString() {
return String.format("Country: %s City: %s Region: %s Coords: %f %f", this.code, this.city, this.region, this.latitude, this.longitude);
}
}

View File

@@ -1,17 +0,0 @@
package com.instana.robotshop.shipping;
import java.util.List;
import org.springframework.data.repository.CrudRepository;
import org.springframework.data.jpa.repository.Query;
public interface CityRepository extends CrudRepository<City, Long> {
List<City> findByCode(String code);
@Query(
value = "select c from City c where c.code = ?1 and c.city like ?2%"
)
List<City> match(String code, String text);
City findById(long id);
}

View File

@@ -1,47 +0,0 @@
package com.instana.robotshop.shipping;
import javax.persistence.Table;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
/*
* Bean for Code
*/
@Entity
@Table(name = "codes")
public class Code {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private long uuid;
private String code;
private String name;
public long getUuid() {
return this.uuid;
}
public String getCode() {
return this.code;
}
public void setCode(String code) {
this.code = code;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return String.format("Code: %s Name: %s", this.code, this.name);
}
}

View File

@@ -1,12 +0,0 @@
package com.instana.robotshop.shipping;
import java.util.List;
import org.springframework.data.repository.PagingAndSortingRepository;
public interface CodeRepository extends PagingAndSortingRepository<Code, Long> {
Iterable<Code> findAll();
Code findById(long id);
}

View File

@@ -1,145 +0,0 @@
package com.instana.robotshop.shipping;
import java.util.List;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.data.domain.Sort;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.server.ResponseStatusException;
import org.springframework.http.HttpStatus;
@RestController
public class Controller {
private static final Logger logger = LoggerFactory.getLogger(Controller.class);
private String CART_URL = String.format("http://%s/shipping/", getenv("CART_ENDPOINT", "cart"));
public static List bytesGlobal = Collections.synchronizedList(new ArrayList<byte[]>());
@Autowired
private CityRepository cityrepo;
@Autowired
private CodeRepository coderepo;
private String getenv(String key, String def) {
String val = System.getenv(key);
val = val == null ? def : val;
return val;
}
@GetMapping(path = "/memory")
public int memory() {
byte[] bytes = new byte[1024 * 1024 * 25];
Arrays.fill(bytes,(byte)8);
bytesGlobal.add(bytes);
return bytesGlobal.size();
}
@GetMapping(path = "/free")
public int free() {
bytesGlobal.clear();
return bytesGlobal.size();
}
@GetMapping("/health")
public String health() {
return "OK";
}
@GetMapping("/count")
public String count() {
long count = cityrepo.count();
return String.valueOf(count);
}
@GetMapping("/codes")
public Iterable<Code> codes() {
logger.info("all codes");
Iterable<Code> codes = coderepo.findAll(Sort.by(Sort.Direction.ASC, "name"));
return codes;
}
@GetMapping("/cities/{code}")
public List<City> cities(@PathVariable String code) {
logger.info("cities by code {}", code);
List<City> cities = cityrepo.findByCode(code);
return cities;
}
@GetMapping("/match/{code}/{text}")
public List<City> match(@PathVariable String code, @PathVariable String text) {
logger.info("match code {} text {}", code, text);
if (text.length() < 3) {
throw new ResponseStatusException(HttpStatus.BAD_REQUEST);
}
List<City> cities = cityrepo.match(code, text);
/*
* This is a dirty hack to limit the result size
* I'm sure there is a more spring boot way to do this
* TODO - neater
*/
if (cities.size() > 10) {
cities = cities.subList(0, 9);
}
return cities;
}
@GetMapping("/calc/{id}")
public Ship caclc(@PathVariable long id) {
double homeLatitude = 51.164896;
double homeLongitude = 7.068792;
logger.info("Calculation for {}", id);
City city = cityrepo.findById(id);
if (city == null) {
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "city not found");
}
Calculator calc = new Calculator(city);
long distance = calc.getDistance(homeLatitude, homeLongitude);
// avoid rounding
double cost = Math.rint(distance * 5) / 100.0;
Ship ship = new Ship(distance, cost);
logger.info("shipping {}", ship);
return ship;
}
// enforce content type
@PostMapping(path = "/confirm/{id}", consumes = "application/json", produces = "application/json")
public String confirm(@PathVariable String id, @RequestBody String body) {
logger.info("confirm id: {}", id);
logger.info("body {}", body);
CartHelper helper = new CartHelper(CART_URL);
String cart = helper.addToCart(id, body);
if (cart.equals("")) {
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "cart not found");
}
return cart;
}
}

View File

@@ -1,29 +0,0 @@
package com.instana.robotshop.shipping;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.sql.DataSource;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Bean;
@Configuration
public class JpaConfig {
private static final Logger logger = LoggerFactory.getLogger(JpaConfig.class);
@Bean
public DataSource getDataSource() {
String JDBC_URL = String.format("jdbc:mysql://%s/cities?useSSL=false&autoReconnect=true", System.getenv("DB_HOST") == null ? "mysql" : System.getenv("DB_HOST"));
logger.info("jdbc url {}", JDBC_URL);
DataSourceBuilder bob = DataSourceBuilder.create();
bob.driverClassName("com.mysql.jdbc.Driver");
bob.url(JDBC_URL);
bob.username("shipping");
bob.password("secret");
return bob.build();
}
}

View File

@@ -1,30 +0,0 @@
package com.instana.robotshop.shipping;
import java.sql.Connection;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.springframework.jdbc.datasource.AbstractDataSource;
import org.springframework.retry.annotation.Retryable;
import org.springframework.retry.annotation.Backoff;
class RetryableDataSource extends AbstractDataSource {
private DataSource delegate;
public RetryableDataSource(DataSource delegate) {
this.delegate = delegate;
}
@Override
@Retryable(maxAttempts = 10, backoff = @Backoff(multiplier = 2.3, maxDelay = 30000))
public Connection getConnection() throws SQLException {
return delegate.getConnection();
}
@Override
@Retryable(maxAttempts = 10, backoff = @Backoff(multiplier = 2.3, maxDelay = 30000))
public Connection getConnection(String username, String password) throws SQLException {
return delegate.getConnection(username, password);
}
}

View File

@@ -1,76 +0,0 @@
package com.instana.robotshop.shipping;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.sql.DataSource;
import com.instana.sdk.support.SpanSupport;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.context.annotation.Bean;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.retry.annotation.EnableRetry;
import org.springframework.web.servlet.config.annotation.EnableWebMvc;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
import java.util.Random;
@SpringBootApplication
@EnableRetry
@EnableWebMvc
public class ShippingServiceApplication implements WebMvcConfigurer {
private static final String[] DATA_CENTERS = {
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1"
};
public static void main(String[] args) {
SpringApplication.run(ShippingServiceApplication.class, args);
}
@Bean
public BeanPostProcessor dataSourceWrapper() {
return new DataSourcePostProcessor();
}
@Order(Ordered.HIGHEST_PRECEDENCE)
private static class DataSourcePostProcessor implements BeanPostProcessor {
@Override
public Object postProcessBeforeInitialization(Object bean, String name) throws BeansException {
if (bean instanceof DataSource) {
bean = new RetryableDataSource((DataSource)bean);
}
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String name) throws BeansException {
return bean;
}
}
@Override
public void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(new InstanaDatacenterTagInterceptor());
}
private static class InstanaDatacenterTagInterceptor extends HandlerInterceptorAdapter {
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
SpanSupport.annotate("datacenter", DATA_CENTERS[new Random().nextInt(DATA_CENTERS.length)]);
return super.preHandle(request, response, handler);
}
}
}

View File

@@ -1,20 +1,23 @@
package com.instana.robotshop.shipping;
package org.steveww.spark;
public class Location {
private double latitude;
private double longitude;
public class Calculator {
private double latitude = 0;
private double longitude = 0;
Calculator(double latitdue, double longitude) {
public Location(double latitude, double longitude) {
this.latitude = latitude;
this.longitude = longitude;
}
Calculator(City city) {
this.latitude = city.getLatitude();
this.longitude = city.getLongitude();
public double getLatitude() {
return this.latitude;
}
public double getLongitude() {
return this.longitude;
}
/**
* Calculate the distance between this location and the target location.
* Use decimal lat/long degrees
@@ -31,13 +34,10 @@ public class Calculator {
double diffLatR = Math.toRadians(targetLatitude - this.latitude);
double diffLongR = Math.toRadians(targetLongitude - this.longitude);
double a = Math.sin(diffLatR / 2.0) * Math.sin(diffLatR / 2.0)
+ Math.cos(latitudeR) * Math.cos(targetLatitudeR)
* Math.sin(diffLongR / 2.0) * Math.sin(diffLongR);
double a = Math.sin(diffLatR / 2.0) * Math.sin(diffLatR / 2.0) + Math.cos(latitudeR) * Math.cos(targetLatitudeR) * Math.sin(diffLongR / 2.0) * Math.sin(diffLongR);
double c = 2.0 * Math.atan2(Math.sqrt(a), Math.sqrt(1.0 - a));
return (long)Math.rint(earthRadius * c / 1000.0);
}
}

View File

@@ -0,0 +1,252 @@
package org.steveww.spark;
import com.mchange.v2.c3p0.ComboPooledDataSource;
import spark.Spark;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.dbutils.handlers.MapListHandler;
import org.apache.commons.dbutils.DbUtils;
import com.google.gson.Gson;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.Types;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
public class Main {
private static String CART_URL = null;
private static String JDBC_URL = null;
private static Logger logger = LoggerFactory.getLogger(Main.class);
private static ComboPooledDataSource cpds = null;
public static void main(String[] args) {
// Get ENV configuration values
CART_URL = String.format("http://%s/shipping/", System.getenv("CART_ENDPOINT") != null ? System.getenv("CART_ENDPOINT") : "cart");
JDBC_URL = String.format("jdbc:mysql://%s/cities?useSSL=false&autoReconnect=true", System.getenv("DB_HOST") != null ? System.getenv("DB_HOST") : "mysql");
//
// Create database connector
// TODO - might need a retry loop here
//
try {
cpds = new ComboPooledDataSource();
cpds.setDriverClass( "com.mysql.jdbc.Driver" ); //loads the jdbc driver
cpds.setJdbcUrl( JDBC_URL );
cpds.setUser("shipping");
cpds.setPassword("secret");
// some config
cpds.setMinPoolSize(5);
cpds.setAcquireIncrement(5);
cpds.setMaxPoolSize(20);
cpds.setMaxStatements(180);
}
catch(Exception e) {
logger.error("Database Exception", e);
}
// Spark
Spark.port(8080);
Spark.get("/health", (req, res) -> "OK");
Spark.get("/count", (req, res) -> {
String data;
try {
data = queryToJson("select count(*) as count from cities");
res.header("Content-Type", "application/json");
} catch(Exception e) {
logger.error("count", e);
res.status(500);
data = "ERROR";
}
return data;
});
Spark.get("/codes", (req, res) -> {
String data;
try {
String query = "select code, name from codes order by name asc";
data = queryToJson(query);
res.header("Content-Type", "application/json");
} catch(Exception e) {
logger.error("codes", e);
res.status(500);
data = "ERROR";
}
return data;
});
// needed for load gen script
Spark.get("/cities/:code", (req, res) -> {
String data;
try {
String query = "select uuid, name from cities where country_code = ?";
logger.info("Query " + query);
data = queryToJson(query, req.params(":code"));
res.header("Content-Type", "application/json");
} catch(Exception e) {
logger.error("cities", e);
res.status(500);
data = "ERROR";
}
return data;
});
Spark.get("/match/:code/:text", (req, res) -> {
String data;
try {
String query = "select uuid, name from cities where country_code = ? and city like ? order by name asc limit 10";
logger.info("Query " + query);
data = queryToJson(query, req.params(":code"), req.params(":text") + "%");
res.header("Content-Type", "application/json");
} catch(Exception e) {
logger.error("match", e);
res.status(500);
data = "ERROR";
}
return data;
});
Spark.get("/calc/:uuid", (req, res) -> {
double homeLat = 51.164896;
double homeLong = 7.068792;
String data;
Location location = getLocation(req.params(":uuid"));
Ship ship = new Ship();
if(location != null) {
long distance = location.getDistance(homeLat, homeLong);
// charge 0.05 Euro per km
// try to avoid rounding errors
double cost = Math.rint(distance * 5) / 100.0;
ship.setDistance(distance);
ship.setCost(cost);
res.header("Content-Type", "application/json");
data = new Gson().toJson(ship);
} else {
data = "no location";
logger.warn(data);
res.status(400);
}
return data;
});
Spark.post("/confirm/:id", (req, res) -> {
logger.info("confirm " + req.params(":id") + " - " + req.body());
String cart = addToCart(req.params(":id"), req.body());
logger.info("new cart " + cart);
if(cart.equals("")) {
res.status(404);
} else {
res.header("Content-Type", "application/json");
}
return cart;
});
logger.info("Ready");
}
/**
* Query to Json - QED
**/
private static String queryToJson(String query, Object ... args) {
List<Map<String, Object>> listOfMaps = null;
try {
QueryRunner queryRunner = new QueryRunner(cpds);
listOfMaps = queryRunner.query(query, new MapListHandler(), args);
} catch (SQLException se) {
throw new RuntimeException("Couldn't query the database.", se);
}
return new Gson().toJson(listOfMaps);
}
/**
* Special case for location, dont want Json
**/
private static Location getLocation(String uuid) {
Location location = null;
Connection conn = null;
PreparedStatement stmt = null;
ResultSet rs = null;
String query = "select latitude, longitude from cities where uuid = ?";
try {
conn = cpds.getConnection();
stmt = conn.prepareStatement(query);
stmt.setInt(1, Integer.parseInt(uuid));
rs = stmt.executeQuery();
while(rs.next()) {
location = new Location(rs.getDouble(1), rs.getDouble(2));
break;
}
} catch(Exception e) {
logger.error("Location exception", e);
} finally {
DbUtils.closeQuietly(conn, stmt, rs);
}
return location;
}
private static String addToCart(String id, String data) {
StringBuilder buffer = new StringBuilder();
DefaultHttpClient httpClient = null;
try {
// set timeout to 5 secs
HttpParams httpParams = new BasicHttpParams();
HttpConnectionParams.setConnectionTimeout(httpParams, 5000);
httpClient = new DefaultHttpClient(httpParams);
HttpPost postRequest = new HttpPost(CART_URL + id);
StringEntity payload = new StringEntity(data);
payload.setContentType("application/json");
postRequest.setEntity(payload);
HttpResponse res = httpClient.execute(postRequest);
if(res.getStatusLine().getStatusCode() == 200) {
BufferedReader in = new BufferedReader(new InputStreamReader(res.getEntity().getContent()));
String line;
while((line = in.readLine()) != null) {
buffer.append(line);
}
} else {
logger.warn("Failed with code: " + res.getStatusLine().getStatusCode());
}
} catch(Exception e) {
logger.error("http client exception", e);
} finally {
if(httpClient != null) {
httpClient.getConnectionManager().shutdown();
}
}
return buffer.toString();
}
}

View File

@@ -1,4 +1,4 @@
package com.instana.robotshop.shipping;
package org.steveww.spark;
/**
* Bean to hold shipping information
@@ -12,7 +12,7 @@ public class Ship {
this.cost = 0.0;
}
public Ship(long distance, double cost) {
public Ship(long distnace, double cost) {
this.distance = distance;
this.cost = cost;
}
@@ -32,10 +32,5 @@ public class Ship {
public double getCost() {
return this.cost;
}
@Override
public String toString() {
return String.format("Distance: %d Cost: %f", distance, cost);
}
}

View File

@@ -1,6 +0,0 @@
spring.jmx.enabled=true
management.endpoint.info.enabled=true
management.endpoint.health.enabled=true
management.endpoint.metrics.enabled=true
management.endpoint.env.enabled=true

View File

@@ -1,6 +1,4 @@
FROM node:14
ENV INSTANA_AUTO_PROFILE true
FROM node:10
EXPOSE 8080

View File

@@ -16,6 +16,6 @@
"pino": "^5.10.8",
"express-pino-logger": "^4.0.0",
"pino-pretty": "^2.5.0",
"@instana/collector": "^1.132.2"
"@instana/collector": "^1.90.0"
}
}

View File

@@ -41,20 +41,6 @@ app.use((req, res, next) => {
next();
});
app.use((req, res, next) => {
let dcs = [
"asia-northeast2",
"asia-south1",
"europe-west3",
"us-east1",
"us-west1"
];
let span = instana.currentSpan();
span.annotate('custom.sdk.tags.datacenter', dcs[Math.floor(Math.random() * dcs.length)]);
next();
});
app.use(bodyParser.urlencoded({ extended: true }));
app.use(bodyParser.json());
@@ -68,6 +54,7 @@ app.get('/health', (req, res) => {
// use REDIS INCR to track anonymous users
app.get('/uniqueid', (req, res) => {
req.log.error('Unique ID test');
// get number from Redis
redisClient.incr('anonymous-counter', (err, r) => {
if(!err) {

View File

@@ -1,28 +1,4 @@
FROM alpine AS build
ARG KEY
WORKDIR /instana
RUN apk add --update --no-cache curl
ENV ARTI_PATH='https://artifact-public.instana.io/artifactory/shared/com/instana/nginx_tracing/'
RUN if [ -n "$KEY" ]; then \
sensor_version=$(curl --user "_:$KEY" ${ARTI_PATH} | grep -o '>[0-9]\+\.[0-9]\+\.[0-9]\+'| cut -f 2 -d '>'|sort -V|tail -1 ); \
echo "Downloading sensor version ${sensor_version} for Nginx version 1.21.6" ; \
curl \
--output instana.zip \
--user "_:$KEY" \
${ARTI_PATH}/${sensor_version}/linux-amd64-glibc-nginx-1.21.6.zip && \
unzip instana.zip && \
mv glibc-libinstana_sensor.so libinstana_sensor.so && \
mv glibc-nginx-1.21.6-ngx_http_ot_module.so ngx_http_opentracing_module.so; \
else echo "KEY not provided. Not adding tracing"; \
touch dummy.so; \
fi
FROM nginx:1.21.6
FROM nginx:1.16
EXPOSE 8080
@@ -31,14 +7,11 @@ ENV CATALOGUE_HOST=catalogue \
CART_HOST=cart \
SHIPPING_HOST=shipping \
PAYMENT_HOST=payment \
RATINGS_HOST=ratings \
INSTANA_SERVICE_NAME=nginx-web
# Instana tracing
COPY --from=build /instana/*.so /tmp/
RATINGS_HOST=ratings
COPY entrypoint.sh /root/
ENTRYPOINT ["/root/entrypoint.sh"]
COPY default.conf.template /etc/nginx/conf.d/default.conf.template
COPY static /usr/share/nginx/html

View File

@@ -1,7 +1,3 @@
# Instana tracing
opentracing_load_tracer /usr/local/lib/libinstana_sensor.so /etc/instana-config.json;
opentracing_propagate_context;
server {
listen 8080;
server_name localhost;
@@ -22,7 +18,7 @@ server {
location /images/ {
expires 5s;
root /usr/share/nginx/html;
try_files $uri /images/placeholder.png;
try_files $uri /images/placeholder.jpg;
}
#error_page 404 /404.html;
@@ -86,3 +82,4 @@ server {
access_log off;
}
}

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# set -x
#set -x
# echo "arg 1 $1"
@@ -15,10 +15,8 @@ if [ -n "$INSTANA_EUM_KEY" -a -n "$INSTANA_EUM_REPORTING_URL" ]
then
echo "Enabling Instana EUM"
# use | instead of / as command delimiter to avoid eacaping the url
# strip off any trailing /
SAFE_URL=$(echo "$INSTANA_EUM_REPORTING_URL" | sed 's|/*$||')
sed -i "s|INSTANA_EUM_KEY|$INSTANA_EUM_KEY|" $BASE_DIR/eum-tmpl.html
sed -i "s|INSTANA_EUM_REPORTING_URL|$SAFE_URL|" $BASE_DIR/eum-tmpl.html
sed -i "s|INSTANA_EUM_REPORTING_URL|$INSTANA_EUM_REPORTING_URL|" $BASE_DIR/eum-tmpl.html
cp $BASE_DIR/eum-tmpl.html $BASE_DIR/eum.html
else
echo "EUM not enabled"
@@ -31,30 +29,5 @@ chmod 644 $BASE_DIR/eum.html
# apply environment variables to default.conf
envsubst '${CATALOGUE_HOST} ${USER_HOST} ${CART_HOST} ${SHIPPING_HOST} ${PAYMENT_HOST} ${RATINGS_HOST}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
if [ -f /tmp/ngx_http_opentracing_module.so -a -f /tmp/libinstana_sensor.so ]
then
echo "Patching for Instana tracing"
mv /tmp/ngx_http_opentracing_module.so /usr/lib/nginx/modules
mv /tmp/libinstana_sensor.so /usr/local/lib
cat - /etc/nginx/nginx.conf << !EOF! > /tmp/nginx.conf
# Extra configuration for Instana tracing
load_module modules/ngx_http_opentracing_module.so;
# Pass through these env vars
env INSTANA_SERVICE_NAME;
env INSTANA_AGENT_HOST;
env INSTANA_AGENT_PORT;
env INSTANA_MAX_BUFFERED_SPANS;
env INSTANA_DEV;
!EOF!
mv /tmp/nginx.conf /etc/nginx/nginx.conf
echo "{}" > /etc/instana-config.json
else
echo "Tracing not enabled"
# remove tracing config
sed -i '1,3d' /etc/nginx/conf.d/default.conf
fi
exec nginx-debug -g "daemon off;"

View File

@@ -1,13 +1,12 @@
<!-- EUM include -->
<script>
(function(s,t,a,n){s[t]||(s[t]=a,n=s[a]=function(){n.q.push(arguments)},
n.q=[],n.v=2,n.l=1*new Date)})(window,"InstanaEumObject","ineum");
ineum('reportingUrl', 'INSTANA_EUM_REPORTING_URL');
(function(i,s,o,g,r,a,m){i['InstanaEumObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//eum.instana.io/eum.min.js','ineum');
ineum('key', 'INSTANA_EUM_KEY');
ineum('trackSessions');
ineum('page', 'splash');
ineum('reportingUrl', 'INSTANA_EUM_REPORTING_URL');
ineum('page', 'splash.html');
</script>
<script defer crossorigin="anonymous" src="INSTANA_EUM_REPORTING_URL/eum.min.js"></script>
<!-- EUM include end -->

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

BIN
web/static/images/C3P0.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Some files were not shown because too many files have changed in this diff Show More