package com.example.demo.batch;
import com.example.demo.model.User;
import com.example.demo.model.UserDTO;
import org.springframework.batch.core.Job;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.job.builder.JobBuilder;
import org.springframework.batch.core.repository.JobRepository;
import org.springframework.batch.core.step.builder.StepBuilder;
import org.springframework.batch.item.ItemProcessor;
import org.springframework.batch.item.database.JdbcBatchItemWriter;
import org.springframework.batch.item.database.JdbcPagingItemReader;
import org.springframework.batch.item.database.builder.JdbcBatchItemWriterBuilder;
import org.springframework.batch.item.database.builder.JdbcPagingItemReaderBuilder;
import org.springframework.batch.item.file.FlatFileItemReader;
import org.springframework.batch.item.file.builder.FlatFileItemReaderBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.ClassPathResource;
import org.springframework.transaction.PlatformTransactionManager;
import javax.sql.DataSource;
@Configuration
public class BatchConfig {
@Bean
public Job userImportJob(JobRepository jobRepository, Step importStep) {
return new JobBuilder("userImportJob", jobRepository)
.start(importStep)
.build();
}
@Bean
public Step importStep(JobRepository jobRepository,
PlatformTransactionManager transactionManager,
FlatFileItemReader<UserDTO> reader,
UserProcessor processor,
JdbcBatchItemWriter<User> writer) {
return new StepBuilder("importStep", jobRepository)
.<UserDTO, User>chunk(100, transactionManager)
.reader(reader)
.processor(processor)
.writer(writer)
.faultTolerant()
.skip(Exception.class)
.skipLimit(10)
.build();
}
@Bean
public FlatFileItemReader<UserDTO> csvReader() {
return new FlatFileItemReaderBuilder<UserDTO>()
.name("userCsvReader")
.resource(new ClassPathResource("users.csv"))
.delimited()
.names("name", "email", "age")
.targetType(UserDTO.class)
.linesToSkip(1)
.build();
}
@Bean
public UserProcessor processor() {
return new UserProcessor();
}
@Bean
public JdbcBatchItemWriter<User> databaseWriter(DataSource dataSource) {
return new JdbcBatchItemWriterBuilder<User>()
.dataSource(dataSource)
.sql("INSERT INTO users (name, email, age) VALUES (:name, :email, :age)")
.beanMapped()
.build();
}
public static class UserProcessor implements ItemProcessor<UserDTO, User> {
@Override
public User process(UserDTO dto) throws Exception {
if (dto.email() == null || !dto.email().contains("@")) {
return null; // Skip invalid records
}
User user = new User();
user.setName(dto.name().toUpperCase());
user.setEmail(dto.email().toLowerCase());
user.setAge(dto.age());
return user;
}
}
}
package com.example.demo.controller;
import org.springframework.batch.core.Job;
import org.springframework.batch.core.JobParameters;
import org.springframework.batch.core.JobParametersBuilder;
import org.springframework.batch.core.launch.JobLauncher;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class BatchController {
private final JobLauncher jobLauncher;
private final Job userImportJob;
public BatchController(JobLauncher jobLauncher, Job userImportJob) {
this.jobLauncher = jobLauncher;
this.userImportJob = userImportJob;
}
@PostMapping("/batch/import")
public String runBatch() throws Exception {
JobParameters params = new JobParametersBuilder()
.addLong("time", System.currentTimeMillis())
.toJobParameters();
jobLauncher.run(userImportJob, params);
return "Batch job started";
}
}
Spring Batch handles large-scale batch processing—ETL, data migration, report generation. Jobs contain steps; steps have readers, processors, and writers. Chunk-oriented processing reads, processes, and writes data in configurable batches. ItemReader fetches data from databases, files, or APIs. ItemProcessor transforms data. ItemWriter persists results. Skip and retry logic handles failures gracefully. Job parameters enable reusability. JobRepository tracks execution metadata. Partitioning parallelizes processing across threads or nodes. Listeners provide hooks for monitoring and logging. Spring Batch ensures fault tolerance and restartability. It's ideal for scheduled bulk operations, data synchronization, and business-critical batch workflows. Proper configuration balances memory usage, throughput, and reliability.