#1510 Improvments done in Circuit Breaker

This commit is contained in:
swarajsaaj
2020-10-01 21:09:39 +05:30
parent 9088ac51f6
commit b29bd66369
16 changed files with 754 additions and 252 deletions

View File

@ -52,40 +52,105 @@ In terms of code, the end user application is:
```java
public class App {
private static final Logger LOGGER = LoggerFactory.getLogger(App.class);
/**
* Program entry point.
*
* @param args command line args
*/
public static void main(String[] args) {
var obj = new MonitoringService();
var circuitBreaker = new CircuitBreaker(3000, 1, 2000 * 1000 * 1000);
var serverStartTime = System.nanoTime();
while (true) {
LOGGER.info(obj.localResourceResponse());
LOGGER.info(obj.remoteResourceResponse(circuitBreaker, serverStartTime));
LOGGER.info(circuitBreaker.getState());
try {
Thread.sleep(5 * 1000);
} catch (InterruptedException e) {
LOGGER.error(e.getMessage());
}
var delayedService = new DelayedRemoteService(serverStartTime, 5);
//Set the circuit Breaker parameters
var delayedServiceCircuitBreaker = new DefaultCircuitBreaker(delayedService, 3000, 2,
2000 * 1000 * 1000);
var quickService = new QuickRemoteService();
//Set the circuit Breaker parameters
var quickServiceCircuitBreaker = new DefaultCircuitBreaker(quickService, 3000, 2,
2000 * 1000 * 1000);
//Create an object of monitoring service which makes both local and remote calls
var monitoringService = new MonitoringService(delayedServiceCircuitBreaker,
quickServiceCircuitBreaker);
//Fetch response from local resource
LOGGER.info(monitoringService.localResourceResponse());
//Fetch response from delayed service 2 times, to meet the failure threshold
LOGGER.info(monitoringService.delayedServiceResponse());
LOGGER.info(monitoringService.delayedServiceResponse());
//Fetch current state of delayed service circuit breaker after crossing failure threshold limit
//which is OPEN now
LOGGER.info(delayedServiceCircuitBreaker.getState());
//Meanwhile, the delayed service is down, fetch response from the healthy quick service
LOGGER.info(monitoringService.quickServiceResponse());
LOGGER.info(quickServiceCircuitBreaker.getState());
//Wait for the delayed service to become responsive
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
//Check the state of delayed circuit breaker, should be HALF_OPEN
LOGGER.info(delayedServiceCircuitBreaker.getState());
//Fetch response from delayed service, which should be healthy by now
LOGGER.info(monitoringService.delayedServiceResponse());
//As successful response is fetched, it should be CLOSED again.
LOGGER.info(delayedServiceCircuitBreaker.getState());
}
}
```
The monitoring service:
``` java
```java
public class MonitoringService {
private final CircuitBreaker delayedService;
private final CircuitBreaker quickService;
public MonitoringService(CircuitBreaker delayedService, CircuitBreaker quickService) {
this.delayedService = delayedService;
this.quickService = quickService;
}
//Assumption: Local service won't fail, no need to wrap it in a circuit breaker logic
public String localResourceResponse() {
return "Local Service is working";
}
public String remoteResourceResponse(CircuitBreaker circuitBreaker, long serverStartTime) {
/**
* Fetch response from the delayed service (with some simulated startup time).
*
* @return response string
*/
public String delayedServiceResponse() {
try {
return circuitBreaker.call("delayedService", serverStartTime);
} catch (Exception e) {
return this.delayedService.attemptRequest();
} catch (RemoteServiceException e) {
return e.getMessage();
}
}
/**
* Fetches response from a healthy service without any failure.
*
* @return response string
*/
public String quickServiceResponse() {
try {
return this.quickService.attemptRequest();
} catch (RemoteServiceException e) {
return e.getMessage();
}
}
@ -95,76 +160,129 @@ As it can be seen, it does the call to get local resources directly, but it wrap
remote (costly) service in a circuit breaker object, which prevents faults as follows:
```java
public class CircuitBreaker {
public class DefaultCircuitBreaker implements CircuitBreaker {
private final long timeout;
private final long retryTimePeriod;
private final RemoteService service;
long lastFailureTime;
int failureCount;
private final int failureThreshold;
private State state;
private final long futureTime = 1000 * 1000 * 1000 * 1000;
CircuitBreaker(long timeout, int failureThreshold, long retryTimePeriod) {
/**
* Constructor to create an instance of Circuit Breaker.
*
* @param timeout Timeout for the API request. Not necessary for this simple example
* @param failureThreshold Number of failures we receive from the depended service before changing
* state to 'OPEN'
* @param retryTimePeriod Time period after which a new request is made to remote service for
* status check.
*/
DefaultCircuitBreaker(RemoteService serviceToCall, long timeout, int failureThreshold,
long retryTimePeriod) {
this.service = serviceToCall;
// We start in a closed state hoping that everything is fine
this.state = State.CLOSED;
this.failureThreshold = failureThreshold;
// Timeout for the API request.
// Used to break the calls made to remote resource if it exceeds the limit
this.timeout = timeout;
this.retryTimePeriod = retryTimePeriod;
//An absurd amount of time in future which basically indicates the last failure never happened
this.lastFailureTime = System.nanoTime() + futureTime;
this.failureCount = 0;
}
private void reset() {
//Reset everything to defaults
@Override
public void recordSuccess() {
this.failureCount = 0;
this.lastFailureTime = System.nanoTime() + futureTime;
this.lastFailureTime = System.nanoTime() + futureTime;
this.state = State.CLOSED;
}
private void recordFailure() {
@Override
public void recordFailure() {
failureCount = failureCount + 1;
this.lastFailureTime = System.nanoTime();
}
protected void setState() {
if (failureCount > failureThreshold) {
//Evaluate the current state based on failureThreshold, failureCount and lastFailureTime.
protected void evaluateState() {
if (failureCount >= failureThreshold) { //Then something is wrong with remote service
if ((System.nanoTime() - lastFailureTime) > retryTimePeriod) {
//We have waited long enough and should try checking if service is up
state = State.HALF_OPEN;
} else {
//Service would still probably be down
state = State.OPEN;
}
} else {
//Everything is working fine
state = State.CLOSED;
}
}
@Override
public String getState() {
evaluateState();
return state.name();
}
public void setStateForBypass(State state) {
/**
* Break the circuit beforehand if it is known service is down Or connect the circuit manually if
* service comes online before expected.
*
* @param state State at which circuit is in
*/
@Override
public void setState(State state) {
this.state = state;
switch (state) {
case OPEN:
this.failureCount = failureThreshold;
this.lastFailureTime = System.nanoTime();
break;
case HALF_OPEN:
this.failureCount = failureThreshold;
this.lastFailureTime = System.nanoTime() - retryTimePeriod;
break;
default:
this.failureCount = 0;
}
}
public String call(String serviceToCall, long serverStartTime) throws Exception {
setState();
/**
* Executes service call.
*
* @return Value from the remote resource, stale response or a custom exception
*/
@Override
public String attemptRequest() throws RemoteServiceException {
evaluateState();
if (state == State.OPEN) {
// return cached response if no the circuit is in OPEN state
return "This is stale response from API";
} else {
if (serviceToCall.equals("delayedService")) {
var delayedService = new DelayedService(20);
var response = delayedService.response(serverStartTime);
if (response.split(" ")[3].equals("working")) {
reset();
return response;
} else {
recordFailure();
throw new Exception("Remote service not responding");
}
} else {
throw new Exception("Unknown Service Name");
// Make the API request if the circuit is not OPEN
try {
//In a real application, this would be run in a thread and the timeout
//parameter of the circuit breaker would be utilized to know if service
//is working. Here, we simulate that based on server response itself
var response = service.call();
// Yay!! the API responded fine. Let's reset everything.
recordSuccess();
return response;
} catch (RemoteServiceException ex) {
recordFailure();
throw ex;
}
}
}
}
```
How does the above pattern prevent failures? Let's understand via this finite state machine