Spring Cloud: testing S3 client with TestContainters - amazon-s3

I use Spring Cloud's ResourceLoader to access S3, e.g.:
public class S3DownUpLoader {
private final ResourceLoader resourceLoader;
#Autowired
public S3DownUpLoader(ResourceLoader resourceLoader) {
this.resourceLoader = resourceLoader;
}
public String storeOnS3(String filename, byte[] data) throws IOException {
String location = "s3://" + bucket + "/" + filename;
WritableResource writeableResource = (WritableResource) this.resourceLoader.getResource(location);
FileCopyUtils.copy( data, writeableResource.getOutputStream());
return filename;
}
It works okey and I need help to test the code with Localstack/Testcontainers. I've tried following test, but it does not work - my production profile gets picked up(s3 client with localstack config is not injected):
#RunWith(SpringRunner.class)
#SpringBootTest
public class S3DownUpLoaderTest {
#ClassRule
public static LocalStackContainer localstack = new LocalStackContainer().withServices(S3);
#Autowired
S3DownUpLoader s3DownUpLoader;
#Test
public void testA() {
s3DownUpLoader.storeOnS3(...);
}
#TestConfiguration
#EnableContextResourceLoader
public static class S3Configuration {
#Primary
#Bean(destroyMethod = "shutdown")
public AmazonS3 amazonS3() {
return AmazonS3ClientBuilder
.standard()
.withEndpointConfiguration(localstack.getEndpointConfiguration(S3))
.withCredentials(localstack.getDefaultCredentialsProvider())
.build();
}
}
}

as we discussed on GitHub,
We solve this problem in a slightly different way. I've actually never seen the way you use the WritableResource, which looks very interesting. None the less, this is how we solve this issue:
#RunWith(SpringRunner.class)
#SpringBootTest(properties = "spring.profiles.active=test")
#ContextConfiguration(classes = AbstractAmazonS3Test.S3Configuration.class)
public abstract class AbstractAmazonS3Test {
private static final String REGION = Regions.EU_WEST_1.getName();
/**
* Configure S3.
*/
#TestConfiguration
public static class S3Configuration {
#Bean
public AmazonS3 amazonS3() {
//localstack docker image is running locally on port 4572 for S3
final String serviceEndpoint = String.format("http://%s:%s", "127.0.0.1", "4572");
return AmazonS3Client.builder()
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, REGION))
.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("dummyKey", "dummySecret")))
.build();
}
}
}
And a sample test:
public class CsvS3UploadServiceIntegrationTest extends AbstractAmazonS3Test {
private static final String SUCCESS_CSV = "a,b";
private static final String STANDARD_STORAGE = "STANDARD";
#Autowired
private AmazonS3 s3;
#Autowired
private S3ConfigurationProperties properties;
#Autowired
private CsvS3UploadService service;
#Before
public void setUp() {
s3.createBucket(properties.getBucketName());
}
#After
public void tearDown() {
final String bucketName = properties.getBucketName();
s3.listObjects(bucketName).getObjectSummaries().stream()
.map(S3ObjectSummary::getKey)
.forEach(key -> s3.deleteObject(bucketName, key));
s3.deleteBucket(bucketName);
}
#Test
public void uploadSuccessfulCsv() {
service.uploadSuccessfulCsv(SUCCESS_CSV);
final S3ObjectSummary s3ObjectSummary = getOnlyFileFromS3();
assertThat(s3ObjectSummary.getKey(), containsString("-success.csv"));
assertThat(s3ObjectSummary.getETag(), is("b345e1dc09f20fdefdea469f09167892"));
assertThat(s3ObjectSummary.getStorageClass(), is(STANDARD_STORAGE));
assertThat(s3ObjectSummary.getSize(), is(3L));
}
private S3ObjectSummary getOnlyFileFromS3() {
final ObjectListing listing = s3.listObjects(properties.getBucketName());
final List<S3ObjectSummary> objects = listing.getObjectSummaries();
assertThat(objects, iterableWithSize(1));
return Iterables.getOnlyElement(objects);
}
}
And the code under test:
#Service
#RequiredArgsConstructor
#EnableConfigurationProperties(S3ConfigurationProperties.class)
public class CsvS3UploadServiceImpl implements CsvS3UploadService {
private static final String CSV_MIME_TYPE = CSV_UTF_8.toString();
private final AmazonS3 amazonS3;
private final S3ConfigurationProperties properties;
private final S3ObjectKeyService s3ObjectKeyService;
#Override
public void uploadSuccessfulCsv(final String source) {
final String key = s3ObjectKeyService.getSuccessKey();
doUpload(source, key, getObjectMetadata(source));
}
private void doUpload(final String source, final String key, final ObjectMetadata metadata) {
try (ReaderInputStream in = new ReaderInputStream(new StringReader(source), UTF_8)) {
final PutObjectRequest request = new PutObjectRequest(properties.getBucketName(), key, in, metadata);
amazonS3.putObject(request);
} catch (final IOException ioe) {
throw new CsvUploadException("Unable to upload " + key, ioe);
}
}
private ObjectMetadata getObjectMetadata(final String source) {
final ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(CSV_MIME_TYPE);
metadata.setContentLength(source.getBytes(UTF_8).length);
metadata.setContentMD5(getMD5ChecksumAsBase64(source));
metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
return metadata;
}
private String getMD5ChecksumAsBase64(final String source) {
final HashCode md5 = Hashing.md5().hashString(source, UTF_8);
return Base64.getEncoder().encodeToString(md5.asBytes());
}
}

It seems the only way to provide custom amazonS3 bean for ResourceLoader is to inject it manually. The test looks like
#RunWith(SpringRunner.class)
#SpringBootTest
#ContextConfiguration(classes = S3DownUpLoaderTest.S3Configuration.class)
public class S3DownUpLoaderTest implements ApplicationContextAware {
private static final String BUCKET_NAME = "bucket";
#ClassRule
public static LocalStackContainer localstack = new LocalStackContainer().withServices(S3);
#Autowired
S3DownUpLoader s3DownUpLoader;
#Autowired
SimpleStorageProtocolResolver resourceLoader;
#Autowired
AmazonS3 amazonS3;
#Before
public void setUp(){
amazonS3.createBucket(BUCKET_NAME);
}
#Test
public void someTestA() throws IOException {
....
}
#After
public void tearDown(){
ObjectListing object_listing = amazonS3.listObjects(QLM_BUCKET_NAME);
while (true) {
for (S3ObjectSummary summary : object_listing.getObjectSummaries()) {
amazonS3.deleteObject(BUCKET_NAME, summary.getKey());
}
// more object_listing to retrieve?
if (object_listing.isTruncated()) {
object_listing = amazonS3.listNextBatchOfObjects(object_listing);
} else {
break;
}
};
amazonS3.deleteBucket(BUCKET_NAME);
}
#Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
if (applicationContext instanceof ConfigurableApplicationContext) {
ConfigurableApplicationContext configurableApplicationContext = (ConfigurableApplicationContext) applicationContext;
configurableApplicationContext.addProtocolResolver(this.resourceLoader);
}
}
public static class S3Configuration {
#Bean
public S3DownUpLoader s3DownUpLoader(ResourceLoader resourceLoader){
return new S3DownUpLoader(resourceLoader);
}
#Bean(destroyMethod = "shutdown")
public AmazonS3 amazonS3() {
return AmazonS3ClientBuilder
.standard()
.withEndpointConfiguration(localstack.getEndpointConfiguration(S3))
.withCredentials(localstack.getDefaultCredentialsProvider())
.build();
}
#Bean
public SimpleStorageProtocolResolver resourceLoader(){
return new SimpleStorageProtocolResolver(amazonS3());
}
}

Related

How to do failure tolerance for Flink to sink data to hdfs as gzip compression?

We want to write compressed data to HDFS by Flink's BucketingSink or StreamingFileSink. I have write my own Writer which works fine if no failure occurs. However when It encounters a failure and restart from checkpoint, It will generate valid-length file(hadoop < 2.7) or truncate the file. Unluckily gzips are binary files which have trailer at the end of file. Therefore simple truncation does not work in my case. Any ideas to enable exactly-once semantic for compression hdfs sink?
That's my writer's code:
public class HdfsCompressStringWriter extends StreamWriterBaseV2<JSONObject> {
private static final long serialVersionUID = 2L;
/**
* The {#code CompressFSDataOutputStream} for the current part file.
*/
private transient GZIPOutputStream compressionOutputStream;
public HdfsCompressStringWriter() {}
#Override
public void open(FileSystem fs, Path path) throws IOException {
super.open(fs, path);
this.setSyncOnFlush(true);
compressionOutputStream = new GZIPOutputStream(this.getStream(), true);
}
public void close() throws IOException {
if (compressionOutputStream != null) {
compressionOutputStream.close();
compressionOutputStream = null;
}
resetStream();
}
#Override
public void write(JSONObject element) throws IOException {
if (element == null || !element.containsKey("body")) {
return;
}
String content = element.getString("body") + "\n";
compressionOutputStream.write(content.getBytes());
compressionOutputStream.flush();
}
#Override
public Writer<JSONObject> duplicate() {
return new HdfsCompressStringWriter();
}
}
I would recommend to implement a BulkWriter for the StreamingFileSink which compresses the elements via a GZIPOutputStream. The code could look the following:
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.enableCheckpointing(1000);
final DataStream<Integer> input = env.addSource(new InfinitySource());
final StreamingFileSink<Integer> streamingFileSink = StreamingFileSink.<Integer>forBulkFormat(new Path("output"), new GzipBulkWriterFactory<>()).build();
input.addSink(streamingFileSink);
env.execute();
}
private static class GzipBulkWriterFactory<T> implements BulkWriter.Factory<T> {
#Override
public BulkWriter<T> create(FSDataOutputStream fsDataOutputStream) throws IOException {
final GZIPOutputStream gzipOutputStream = new GZIPOutputStream(fsDataOutputStream, true);
return new GzipBulkWriter<>(new ObjectOutputStream(gzipOutputStream), gzipOutputStream);
}
}
private static class GzipBulkWriter<T> implements BulkWriter<T> {
private final GZIPOutputStream gzipOutputStream;
private final ObjectOutputStream objectOutputStream;
public GzipBulkWriter(ObjectOutputStream objectOutputStream, GZIPOutputStream gzipOutputStream) {
this.gzipOutputStream = gzipOutputStream;
this.objectOutputStream = objectOutputStream;
}
#Override
public void addElement(T t) throws IOException {
objectOutputStream.writeObject(t);
}
#Override
public void flush() throws IOException {
objectOutputStream.flush();
}
#Override
public void finish() throws IOException {
objectOutputStream.flush();
gzipOutputStream.finish();
}
}

Extent report is not giving proper report on parallel execution

ReporterClass.Java:
package POM_Classes;
import com.aventstack.extentreports.AnalysisStrategy;
import com.aventstack.extentreports.ExtentReports;
import com.aventstack.extentreports.ExtentTest;
import com.aventstack.extentreports.reporter.ExtentHtmlReporter;
public class ReporterClass {
public static ExtentHtmlReporter html;
public ExtentReports extent;
public ExtentTest test, suiteTest;
public String testCaseName, testNodes, testDescription, category, authors;
public void startResult() {
html = new ExtentHtmlReporter("./reports/result.html");
html.setAppendExisting(true);
extent = new ExtentReports();
extent.attachReporter(html);
}
/*public ExtentTest startTestModule(String testCaseName, String testDescription) {
suiteTest = extent.createTest(testCaseName, testDescription);
return suiteTest;
}*/
public ExtentTest startTestCase(String testName) {
System.out.println(testName);
test = extent.createTest(testName);
return test;
}
public void reportStep(String desc, String status) {
if (status.equalsIgnoreCase("PASS")) {
test.pass(desc);
} else if (status.equalsIgnoreCase("FAIL")) {
test.fail(desc);
} else if (status.equalsIgnoreCase("WARNING")) {
test.warning(desc);
} else if (status.equalsIgnoreCase("INFO")) {
test.info(desc);
}
}
public void endTestcase() {
extent.setAnalysisStrategy(AnalysisStrategy.CLASS);
}
public void endResult() {
extent.flush();
}
}
Usage:
#Test
public void 961_NavigateToMyAlertsAndAddNewAlerts()
throws IOException, InterruptedException, ATUTestRecorderException, APIException {
driver = launchTargetUrl(this.getClass().getSimpleName().toString());
startResult();
test = startTestCase(Thread.currentThread().getStackTrace()[1].getMethodName().toString());
LoginApplication(driver,transactionusername, transactionuserpassword,test);
HomePage homePage = new HomePage(driver,test);
homePage.MyAlerts.click();
MyAlerts myalerts = new MyAlerts(driver,test);
String selectedcardName;
selectedcardName = driver.findElement(myalerts.cardName).getText().trim();
System.out.println(selectedcardName);
}
#AfterMethod
public void afterMethod(ITestResult result) throws ATUTestRecorderException {
resultOfTest(result);
endTestcase();
endResult();
closeBrowsers(driver);
}
The test case which first gets completed has the report and if the another test case is completed then that result overrides the old one..
If I change public static ExtentReports extent; then it maintains only thread so it logs only one test case and the other parallel execution is not even recorded.. How to resolve this?
Ok, here you go. I haven't tested this yet, but this should allow you to use Extent with parallel usage.
Reporter:
public abstract class ReporterClass {
private static final ExtentReports EXTENT = ExtentManager.getInstance();
public ExtentTest test, suiteTest;
public String testCaseName, testNodes, testDescription, category, authors;
public synchronized ExtentTest startTestCase(String testName) {
System.out.println(testName);
return ExtentTestManager.createTest(testName);
}
public synchronized void reportStep(String desc, String status) {
if (status.equalsIgnoreCase("PASS")) {
ExtentTestManager.getTest().pass(desc);
} else if (status.equalsIgnoreCase("FAIL")) {
ExtentTestManager.getTest().fail(desc);
} else if (status.equalsIgnoreCase("WARNING")) {
ExtentTestManager.getTest().warning(desc);
} else if (status.equalsIgnoreCase("INFO")) {
ExtentTestManager.getTest().info(desc);
}
}
public synchronized void endResult() {
EXTENT.flush();
}
#BeforeMethod
public synchronized void beforeMethod(Method method) {
startTestCase(method.getName());
}
#AfterMethod
public synchronized void afterMethod(ITestResult result) throws ATUTestRecorderException {
reportStep(result.getThrowable(), result.getStatus());
endResult();
closeBrowsers(driver);
}
}
Base:
public abstract class BaseClass extends ReporterClass {
// .. abstractions
}
Extent utils:
public class ExtentTestManager {
static Map<Integer, ExtentTest> extentTestMap = new HashMap<Integer, ExtentTest>();
private static final ExtentReports EXTENT = ExtentManager.getInstance();
public static synchronized ExtentTest getTest() {
return extentTestMap.get((int) (long) (Thread.currentThread().getId()));
}
public static synchronized ExtentTest createTest(String testName) {
return createTest(testName, "");
}
public static synchronized ExtentTest createTest(String testName, String desc) {
ExtentTest test = EXTENT.createTest(testName, desc);
extentTestMap.put((int) (long) (Thread.currentThread().getId()), test);
return test;
}
}
public class ExtentManager {
private static ExtentReports extent;
public synchronized static ExtentReports getInstance() {
if (extent == null) {
createInstance("reports/extent.html");
}
return extent;
}
public synchronized static ExtentReports createInstance(String fileName) {
ExtentHtmlReporter htmlReporter = new ExtentHtmlReporter(fileName);
htmlReporter.config().setTestViewChartLocation(ChartLocation.BOTTOM);
htmlReporter.config().setChartVisibilityOnOpen(true);
htmlReporter.config().setTheme(Theme.STANDARD);
htmlReporter.config().setDocumentTitle(fileName);
htmlReporter.config().setEncoding("utf-8");
htmlReporter.config().setReportName(fileName);
htmlReporter.setAppendExisting(true);
extent = new ExtentReports();
extent.setAnalysisStrategy(AnalysisStrategy.CLASS);
extent.attachReporter(htmlReporter);
return extent;
}
}
Finally, your slim tests. Notice there is 0 lines of reporter code here - see ReporterClass.
public class MyTestsClass extends BaseClass {
#Test
public void 961_NavigateToMyAlertsAndAddNewAlerts()
throws IOException, InterruptedException, ATUTestRecorderException, APIException {
driver = launchTargetUrl(this.getClass().getSimpleName().toString());
LoginApplication(driver,transactionusername, transactionuserpassword,test);
HomePage homePage = new HomePage(driver,test);
homePage.MyAlerts.click();
MyAlerts myalerts = new MyAlerts(driver,test);
String selectedcardName;
selectedcardName = driver.findElement(myalerts.cardName).getText().trim();
System.out.println(selectedcardName);
}
}
//Add below class in your Project. First you need to add the respective object and
//call them respectively. Declaring Extent and Driver as static is a big problem in
//Parallel/execution.
//Avoid using static as far as possible by using the below class.
import java.util.ArrayList;
import java.util.List;
import org.openqa.selenium.WebDriver;
import com.aventstack.extentreports.ExtentReports;
import com.aventstack.extentreports.ExtentTest;
public class WebDriverFactory {
private static ThreadLocal<WebDriver> drivers=new ThreadLocal<>();
private static List<WebDriver> storeDrivers=new ArrayList<WebDriver>();
private static List<ExtentTest> extent=new ArrayList<ExtentTest>();
private static ThreadLocal<ExtentTest> reports=new ThreadLocal<ExtentTest>();
static
{
Runtime.getRuntime().addShutdownHook(new Thread(){
public void run()
{
storeDrivers.stream().forEach(WebDriver::quit);
}
});
}
public static WebDriver getDriver()
{
return drivers.get();
}
public static ExtentTest getextentReportObject()
{
return reports.get();
}
public static void addDriver(WebDriver driver)
{
storeDrivers.add(driver);
drivers.set(driver);
}
public static void addExtentReportObject(ExtentTest report)
{
extent.add(report);
reports.set(report);
}
public static void removeDriver()
{
storeDrivers.remove(drivers.get());
drivers.remove();
}
}
//Add and Invoke the object in the following way
/*** Add and invoke the object in the below fashion **/
WebDriverFactory.addExtentReportObject(extent.createTest("Monitor Scenario
").createNode("Monitor Page Validation"));
WebDriverFactory.getextentReportObject().assignCategory("#SmokeTest");

RabbitMQ not serialize message, error convert

I've seen some related questions here, but none worked for me, the rabbit will not serialize my message coming from another application.
Caused by: org.springframework.amqp.AmqpException: No method found for class [B
Below my configuration class to receive the messages.
#Configuration
public class RabbitConfiguration implements RabbitListenerConfigurer{
public final static String EXCHANGE_NAME = "wallet-accounts";
public final static String QUEUE_PAYMENT = "wallet-accounts.payment";
public final static String QUEUE_RECHARGE = "wallet-accounts.recharge";
#Bean
public List<Declarable> ds() {
return queues(QUEUE_PAYMENT, QUEUE_RECHARGE);
}
#Autowired
private ConnectionFactory rabbitConnectionFactory;
#Bean
public AmqpAdmin amqpAdmin() {
return new RabbitAdmin(rabbitConnectionFactory);
}
#Bean
public TopicExchange exchange() {
return new TopicExchange(EXCHANGE_NAME);
}
private List<Declarable> queues(String ... names){
List<Declarable> result = new ArrayList<>();
for (int i = 0; i < names.length; i++) {
result.add(makeQueue(names[i]));
result.add(makeBinding(names[i]));
}
return result;
}
private static Binding makeBinding(String queueName){
return new Binding(queueName, DestinationType.QUEUE, EXCHANGE_NAME, queueName, null);
}
private static Queue makeQueue(String name){
return new Queue(name);
}
#Bean
public MappingJackson2MessageConverter jackson2Converter() {
MappingJackson2MessageConverter converter = new MappingJackson2MessageConverter();
return converter;
}
#Bean
public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() {
DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory();
factory.setMessageConverter(jackson2Converter());
return factory;
}
#Override
public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) {
registrar.setMessageHandlerMethodFactory(myHandlerMethodFactory());
}
}
Using this other configuration, the error is almost the same:
Caused by: org.springframework.amqp.support.converter.MessageConversionException: failed to resolve class name. Class not found [br.com.beblue.wallet.payment.application.accounts.PaymentEntryCommand]
Configuration:
#Configuration
public class RabbitConfiguration {
public final static String EXCHANGE_NAME = "wallet-accounts";
public final static String QUEUE_PAYMENT = "wallet-accounts.payment";
public final static String QUEUE_RECHARGE = "wallet-accounts.recharge";
#Bean
public List<Declarable> ds() {
return queues(QUEUE_PAYMENT, QUEUE_RECHARGE);
}
#Autowired
private ConnectionFactory rabbitConnectionFactory;
#Bean
public AmqpAdmin amqpAdmin() {
return new RabbitAdmin(rabbitConnectionFactory);
}
#Bean
public TopicExchange exchange() {
return new TopicExchange(EXCHANGE_NAME);
}
#Bean
public MessageConverter jsonMessageConverter() {
return new Jackson2JsonMessageConverter();
}
private List<Declarable> queues(String ... names){
List<Declarable> result = new ArrayList<>();
for (int i = 0; i < names.length; i++) {
result.add(makeQueue(names[i]));
result.add(makeBinding(names[i]));
}
return result;
}
private static Binding makeBinding(String queueName){
return new Binding(queueName, DestinationType.QUEUE, EXCHANGE_NAME, queueName, null);
}
private static Queue makeQueue(String name){
return new Queue(name);
}
}
Can anyone tell me what's wrong with these settings, or what's missing?
No method found for class [B
Means there is a default SimpleMessageConverter which can't convert your incoming application/json. It is just not aware of that content-type and just falls back to the byte[] to return.
Class not found [br.com.beblue.wallet.payment.application.accounts.PaymentEntryCommand]
Means that Jackson2JsonMessageConverter can't convert your application/json because the incoming __TypeId__ header, representing class of the content, cannot be found in the local classpath.
Well, definitely your configuration for the DefaultMessageHandlerMethodFactory does not make sense for the AMQP conversion. You should consider to use SimpleRabbitListenerContainerFactory bean definition and its setMessageConverter. And yes, consider to inject the proper org.springframework.amqp.support.converter.MessageConverter implementation.
https://docs.spring.io/spring-amqp/docs/1.7.3.RELEASE/reference/html/_reference.html#async-annotation-conversion
From the Spring Boot perspective there is SimpleRabbitListenerContainerFactoryConfigurer to configure on the matter:
https://docs.spring.io/spring-boot/docs/1.5.6.RELEASE/reference/htmlsingle/#boot-features-using-amqp-receiving

Unit testing a jee filter

I am trying to test this filter:
public class HttpMethodOverrideHeaderFilter extends OncePerRequestFilter {
private static final String X_HTTP_METHOD_OVERRIDE_HEADER = "X-HTTP-Method-Override";
#Override
protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
if (isMethodOverriden(request)) {
HttpServletRequest wrapper = new HttpMethodRequestWrapper(request, request.getHeader(X_HTTP_METHOD_OVERRIDE_HEADER).toUpperCase(Locale.ENGLISH));
filterChain.doFilter(wrapper, response);
}
else {
filterChain.doFilter(request, response);
}
}
private boolean isMethodOverriden(HttpServletRequest request) {
String methodOverride = request.getHeader(X_HTTP_METHOD_OVERRIDE_HEADER);
return RequestMethod.POST.name().equalsIgnoreCase(request.getMethod()) &&
(RequestMethod.PUT.name().equalsIgnoreCase(methodOverride) || RequestMethod.DELETE.name().equalsIgnoreCase(methodOverride));
}
protected static class HttpMethodRequestWrapper extends HttpServletRequestWrapper {
private final String method;
public HttpMethodRequestWrapper(HttpServletRequest request, String method) {
super(request);
this.method = method;
}
#Override
public String getMethod() {
return this.method;
}
}
}
And this is the unit test:
#RunWith(MockitoJUnitRunner.class)
public class HttpMethodOverrideHeaderFilterTest {
private static final String X_HTTP_METHOD_OVERRIDE_HEADER = "X-HTTP-Method-Override";
private HttpMethodOverrideHeaderFilter httpMethodOverrideHeaderFilter;
#Mock
private HttpServletRequest httpServletRequest;
#Mock
private HttpServletResponse httpServletResponse;
#Mock
private FilterChain filterChain;
#Before
public void setUp() {
httpMethodOverrideHeaderFilter = new HttpMethodOverrideHeaderFilter();
}
#Test
public void testDoFilterInternalWithPUTMethodAsOverrideHeader() throws Exception {
when(httpServletRequest.getHeader(X_HTTP_METHOD_OVERRIDE_HEADER)).thenReturn("PUT");
when(httpServletRequest.getMethod()).thenReturn("POST");
HttpServletRequest wrapper = new HttpMethodOverrideHeaderFilter.HttpMethodRequestWrapper(httpServletRequest, "PUT");
httpMethodOverrideHeaderFilter.doFilterInternal(httpServletRequest, httpServletResponse, filterChain);
verify(filterChain).doFilter(wrapper, httpServletResponse);
}
}
The test is not passing as wrapper is not the same instance. Basically what I need to know is if the wrapper was set the PUT method. Any ideas?
I found a way to do it:
#Test
public void testDoFilterInternalWithPUTMethodAsOverrideHeader() throws Exception {
when(httpServletRequest.getHeader(X_HTTP_METHOD_OVERRIDE_HEADER)).thenReturn("PUT");
when(httpServletRequest.getMethod()).thenReturn("POST");
httpMethodOverrideHeaderFilter.doFilterInternal(httpServletRequest, httpServletResponse, filterChain);
ArgumentCaptor<ServletRequest> requestCaptor = ArgumentCaptor.forClass(ServletRequest.class);
ArgumentCaptor<ServletResponse> responseCaptor = ArgumentCaptor.forClass(ServletResponse.class);
verify(filterChain).doFilter(requestCaptor.capture(), responseCaptor.capture());
HttpMethodOverrideHeaderFilter.HttpMethodRequestWrapper wrapper = (HttpMethodOverrideHeaderFilter.HttpMethodRequestWrapper) requestCaptor.getValue();
assertEquals(wrapper.getMethod(), "PUT");
}
if anyone know any better way, let me know!!!

How Test PUT RestController in Spring Boot

How can I test one PUT request with Spring Boot??
I have this method:
#RequestMapping(method = RequestMethod.PUT, value = "/")
public NaturezaTitulo save(#RequestBody NaturezaTitulo naturezaTitulo){
return naturezaTituloService.save(naturezaTitulo);
}
and this test class:
#RunWith(SpringJUnit4ClassRunner.class)
#SpringApplicationConfiguration(classes = Application.class)
#WebAppConfiguration
public class NaturezaTituloControllerTest {
private MediaType contentType = new MediaType(MediaType.APPLICATION_JSON.getType(),
MediaType.APPLICATION_JSON.getSubtype(),
Charset.forName("utf8"));
private MockMvc mockMvc;
private HttpMessageConverter mappingJackson2HttpMessageConverter;
private List<NaturezaTitulo> naturezaTituloList = new ArrayList<>();
#Autowired
private WebApplicationContext webApplicationContext;
#Autowired
void setConverters(HttpMessageConverter<?>[] converters) {
this.mappingJackson2HttpMessageConverter = Arrays.asList(converters).stream().filter(
hmc -> hmc instanceof MappingJackson2HttpMessageConverter).findAny().get();
Assert.assertNotNull("the JSON message converter must not be null",
this.mappingJackson2HttpMessageConverter);
}
#Before
public void setup() throws Exception {
this.mockMvc = webAppContextSetup(webApplicationContext).build();
}
#Test
public void naturezaTituloNotFound() throws Exception {
mockMvc.perform(get("/naturezatitulo/55ce2dd6222e629f4b8d6fe0"))
.andExpect(status().is4xxClientError());
}
#Test
public void naturezaTituloSave() throws Exception {
NaturezaTitulo naturezaTitulo = new NaturezaTitulo();
naturezaTitulo.setNatureza("Testando");
mockMvc.perform(put("/naturezatitulo/").content(this.json(naturezaTitulo))
.contentType(contentType))
.andExpect(jsonPath("$.id", notNullValue()));
}
protected String json(Object o) throws IOException {
MockHttpOutputMessage mockHttpOutputMessage = new MockHttpOutputMessage();
this.mappingJackson2HttpMessageConverter.write(
o, MediaType.APPLICATION_JSON, mockHttpOutputMessage);
return mockHttpOutputMessage.getBodyAsString();
}
}
but I got this error:
java.lang.IllegalArgumentException: json can not be null or empty at
com.jayway.jsonpath.internal.Utils.notEmpty(Utils.java:259)
how can I pass one object from body in Put test?
tks