java

关注公众号 jb51net

关闭
首页 > 软件编程 > java > MyBatis二级缓存

MyBatis之二级缓存用法及说明

作者:Lisonseekpan

本文介绍了MyBatis二级缓存的设计与实现,涵盖其在MyBatis中的位置、源码解析、详细使用案例、最佳实践总结及源码调试技巧等内容,强调了设计模式的应用,如装饰器模式、模板方法模式等,并提供了缓存穿透、击穿、雪崩等问题的解决方案

1. 架构总览

1.1 二级缓存在MyBatis中的位置

MyBatis Runtime
├── Configuration (全局配置)
│├── MapperRegistry (Mapper注册中心)
│└── Cache (缓存管理器)
├── Executor (执行器)
│├── BaseExecutor (基础执行器,含一级缓存)
│└── CachingExecutor (二级缓存装饰器) ★
└── SqlSession (会话)

2. 源码深度解析

2.1 核心类图解析

// 核心接口和类
public interface Cache {
String getId();
void putObject(Object key, Object value);
Object getObject(Object key);
Object removeObject(Object key);
void clear();
int getSize();
ReadWriteLock getReadWriteLock();
}

// 默认实现:PerpetualCache
public class PerpetualCache implements Cache {
private final String id;
private final Map<Object, Object> cache = new HashMap<>();
// 基础的HashMap实现
}

// 装饰器模式:各种Cache装饰器
public class LruCache implements Cache {
private final Cache delegate;
private final Map<Object, Object> keyMap;
private Object eldestKey;
// LRU算法实现
}

2.2 CachingExecutor:二级缓存的核心

public class CachingExecutor implements Executor {
private final Executor delegate;// 被装饰的执行器(通常是SimpleExecutor)
private final TransactionalCacheManager tcm = new TransactionalCacheManager();

@Override
public <E> List<E> query(MappedStatement ms,
Object parameter,
RowBounds rowBounds,
ResultHandler resultHandler,
CacheKey key,
BoundSql boundSql) throws SQLException {

// 1. 获取MappedStatement的缓存
Cache cache = ms.getCache();

if (cache != null) {
// 2. 检查是否需要刷新缓存
flushCacheIfRequired(ms);

// 3. 如果useCache=true且没有设置resultHandler
if (ms.isUseCache() && resultHandler == null) {
// 4. 确保没有输出参数
ensureNoOutParams(ms, boundSql);

// 5. 从二级缓存获取
@SuppressWarnings("unchecked")
List<E> list = (List<E>) tcm.getObject(cache, key);

if (list == null) {
// 6. 缓存未命中,委托给实际执行器查询
list = delegate.query(ms, parameter, rowBounds,
resultHandler, key, boundSql);

// 7. 将结果放入二级缓存
tcm.putObject(cache, key, list);
}
return list;
}
}
// 8. 没有缓存配置,直接查询
return delegate.query(ms, parameter, rowBounds,
resultHandler, key, boundSql);
}
}

2.3 CacheKey的生成机制

public class CacheKey implements Cloneable, Serializable {
private static final int DEFAULT_MULTIPLYER = 37;
private static final int DEFAULT_HASHCODE = 17;

private final int multiplier;
private int hashcode;
private long checksum;
private int count;
private List<Object> updateList;// 用于equals比较

public CacheKey(Object[] objects) {
this.multiplier = DEFAULT_MULTIPLYER;
this.hashcode = DEFAULT_HASHCODE;

// 关键:按顺序组合所有元素生成hash
updateAll(objects);
}

public void update(Object object) {
int baseHashCode = object == null ? 1 :
ArrayUtil.hashCode(object);

count++;
checksum += baseHashCode;
baseHashCode *= count;

hashcode = multiplier * hashcode + baseHashCode;

updateList.add(object);
}

// CacheKey生成位置:BaseExecutor.createCacheKey()
public CacheKey createCacheKey(MappedStatement ms,
Object parameterObject,
RowBounds rowBounds,
BoundSql boundSql) {
CacheKey cacheKey = new CacheKey();
cacheKey.update(ms.getId());// Mapper ID
cacheKey.update(rowBounds.getOffset());// 分页offset
cacheKey.update(rowBounds.getLimit());// 分页limit
cacheKey.update(boundSql.getSql());// SQL语句

// 参数
List<ParameterMapping> parameterMappings =
boundSql.getParameterMappings();
TypeHandlerRegistry typeHandlerRegistry =
ms.getConfiguration().getTypeHandlerRegistry();

for (ParameterMapping parameterMapping : parameterMappings) {
Object value;
String propertyName = parameterMapping.getProperty();
// ... 获取参数值逻辑
cacheKey.update(value);
}

// 环境ID
if (ms.getConfiguration().getEnvironment() != null) {
cacheKey.update(ms.getConfiguration()
.getEnvironment().getId());
}

return cacheKey;
}
}

2.4 缓存装饰器链的实现

/**
* 装饰器模式的典型应用
* 配置:<cache eviction="LRU" flushInterval="60000" size="512" readOnly="true"/>
* 实际创建的缓存对象链:
* SynchronizedCache
*-> LoggingCache
*-> SerializedCache
*-> LruCache
*-> PerpetualCache
*/
public class CacheBuilder {
private Class<? extends Cache> implementation = PerpetualCache.class;
private final List<Class<? extends Cache>> decorators = new ArrayList<>();
private Class<? extends Cache> evictionClass;
private Long flushInterval;
private Integer size;
private boolean readWrite;
private Properties properties;

public Cache build() {
// 1. 创建基础缓存
Cache cache = newBaseCacheInstance(implementation, id);
setCacheProperties(cache);

// 2. 根据配置添加装饰器
if (PerpetualCache.class.equals(cache.getClass())) {
for (Class<? extends Cache> decorator : decorators) {
cache = newCacheDecoratorInstance(decorator, cache);
setCacheProperties(cache);
}
cache = setStandardDecorators(cache);
}

return cache;
}

private Cache setStandardDecorators(Cache cache) {
try {
MetaObject metaCache = SystemMetaObject.forObject(cache);

// 添加序列化装饰器(如果readOnly=false)
if (!readWrite) {
cache = new SerializedCache(cache);
}

// 添加日志装饰器
cache = new LoggingCache(cache);

// 添加同步装饰器
cache = new SynchronizedCache(cache);

// 添加定时清理装饰器
if (flushInterval != null) {
cache = new ScheduledCache(cache);
((ScheduledCache) cache).setClearInterval(flushInterval);
}

// 添加LRU/FIFO等装饰器
if (size != null && size > 0) {
cache = new LruCache(cache);
((LruCache) cache).setSize(size);
}

return cache;
} catch (Exception e) {
throw new CacheException("Error building standard cache decorators.", e);
}
}
}

2.5 TransactionalCacheManager:事务性缓存管理

/**
* 管理事务中的缓存操作
* 关键:只有事务提交后,缓存才会真正生效
*/
public class TransactionalCacheManager {
// key: Cache对象, value: TransactionalCache
private final Map<Cache, TransactionalCache> transactionalCaches =
new HashMap<>();

public Object getObject(Cache cache, CacheKey key) {
return getTransactionalCache(cache).getObject(key);
}

public void putObject(Cache cache, CacheKey key, Object value) {
getTransactionalCache(cache).putObject(key, value);
}

public void commit() {
for (TransactionalCache txCache : transactionalCaches.values()) {
txCache.commit();
}
}

public void rollback() {
for (TransactionalCache txCache : transactionalCaches.values()) {
txCache.rollback();
}
}

private TransactionalCache getTransactionalCache(Cache cache) {
return transactionalCaches.computeIfAbsent(cache,
key -> new TransactionalCache(cache));
}
}

public class TransactionalCache implements Cache {
private final Cache delegate;
private boolean clearOnCommit;// 提交时是否清空
private final Map<Object, Object> entriesToAddOnCommit = new HashMap<>();
private final Set<Object> entriesMissedInCache = new HashSet<>();

@Override
public void putObject(Object key, Object value) {
// 不直接放入delegate,先放入临时map
entriesToAddOnCommit.put(key, value);
}

public void commit() {
if (clearOnCommit) {
delegate.clear();
}
// 提交时批量放入缓存
flushPendingEntries();
reset();
}

private void flushPendingEntries() {
for (Map.Entry<Object, Object> entry :
entriesToAddOnCommit.entrySet()) {
delegate.putObject(entry.getKey(), entry.getValue());
}
// 处理未命中的查询
for (Object entry : entriesMissedInCache) {
if (!entriesToAddOnCommit.containsKey(entry)) {
delegate.putObject(entry, null); // 缓存null值
}
}
}
}

3. 详细使用案例

3.1 基础配置与使用

<!-- mybatis-config.xml -->
<configuration>
<settings>
<!-- 开启二级缓存(默认true) -->
<setting name="cacheEnabled" value="true"/>
<!-- 本地缓存作用域 -->
<setting name="localCacheScope" value="SESSION"/>
</settings>

<!-- 使用第三方缓存 -->
<typeAliases>
<typeAlias type="org.mybatis.caches.ehcache.EhcacheCache"
alias="EH_CACHE"/>
</typeAliases>
</configuration>

<!-- UserMapper.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.mapper.UserMapper">

<!-- 开启二级缓存并详细配置 -->
<cache
type="org.mybatis.caches.ehcache.EhcacheCache"
eviction="LRU"
flushInterval="60000"
size="1000"
readOnly="false"
blocking="false"
properties="timeToLiveSeconds=3600,timeToIdleSeconds=1800"/>

<!-- 结果映射 -->
<resultMap id="userResultMap" type="User">
<id property="id" column="id"/>
<result property="username" column="username"/>
<result property="email" column="email"/>
<!-- 注意:关联对象需要单独处理缓存 -->
<association property="department" column="dept_id"
select="com.example.mapper.DeptMapper.selectById"
fetchType="lazy"/>
</resultMap>

<!-- 使用缓存的查询 -->
<select id="selectById" resultMap="userResultMap" useCache="true">
SELECT * FROM users WHERE id = #{id}
</select>

<!-- 不使用缓存的查询 -->
<select id="selectForUpdate" resultMap="userResultMap" useCache="false">
SELECT * FROM users WHERE id = #{id} FOR UPDATE
</select>

<!-- 执行前刷新缓存的查询 -->
<select id="selectAfterRefresh" resultMap="userResultMap"
flushCache="true">
SELECT * FROM users WHERE id = #{id}
</select>

<!-- 更新操作,默认flushCache="true" -->
<update id="updateUser" parameterType="User">
UPDATE users
SET username=#{username}, email=#{email}
WHERE id=#{id}
</update>
</mapper>

3.2 注解方式配置

package com.example.mapper;

import org.apache.ibatis.annotations.*;
import org.apache.ibatis.cache.decorators.LruCache;
import org.apache.ibatis.cache.impl.PerpetualCache;
import org.apache.ibatis.mapping.StatementType;

// 方式1:直接使用@CacheNamespace
@CacheNamespace(
implementation = PerpetualCache.class,// 实现类
eviction = LruCache.class,// 回收策略
flushInterval = 60000L,// 刷新间隔
size = 512,// 缓存大小
readWrite = true,// 读写缓存
blocking = false,// 是否阻塞
properties = {
@Property(name = "timeToLiveSeconds", value = "3600"),
@Property(name = "timeToIdleSeconds", value = "1800")
}
)
// 方式2:引用XML配置的缓存
// @CacheNamespaceRef(UserMapper.class)
public interface UserMapper {

// 默认使用缓存
@Select("SELECT * FROM users WHERE id = #{id}")
@Results(id = "userResult", value = {
@Result(property = "id", column = "id", id = true),
@Result(property = "username", column = "username"),
@Result(property = "email", column = "email")
})
User selectById(Long id);

// 明确指定使用缓存
@Select("SELECT * FROM users WHERE username = #{username}")
@Options(useCache = true, flushCache = Options.FlushCachePolicy.FALSE)
User selectByUsername(String username);

// 明确指定不使用缓存
@Select("SELECT * FROM users WHERE id = #{id}")
@Options(useCache = false)
User selectForUpdate(Long id);

// 更新操作,刷新缓存
@Update("UPDATE users SET username = #{username} WHERE id = #{id}")
@Options(flushCache = Options.FlushCachePolicy.TRUE)
int updateUsername(@Param("id") Long id, @Param("username") String username);

// 存储过程调用,缓存处理
@Select(value = "{CALL get_user_by_id(#{id, mode=IN})}",
statementType = StatementType.CALLABLE)
@Options(useCache = true)
User selectByProcedure(Long id);
}

3.3 复杂场景:关联查询缓存

/**
* 关联查询的缓存处理方案
*/
public class AssociationCacheExample {

// DeptMapper.java
@CacheNamespace
public interface DeptMapper {
@Select("SELECT * FROM departments WHERE id = #{id}")
Department selectById(Long id);
}

// UserMapper.java - 方案1:嵌套查询(N+1问题)
public interface UserMapper {
@Select("SELECT * FROM users WHERE id = #{id}")
@Results({
@Result(property = "id", column = "id"),
@Result(property = "deptId", column = "dept_id"),
@Result(property = "department", column = "dept_id",
one = @One(select = "com.example.mapper.DeptMapper.selectById",
fetchType = FetchType.LAZY))
})
User selectUserWithDept(Long id);
}

// 方案2:联合查询(缓存处理)
@Select("SELECT u.*, d.name as dept_name, d.code as dept_code " +
"FROM users u LEFT JOIN departments d ON u.dept_id = d.id " +
"WHERE u.id = #{id}")
@Results(id = "userWithDept", value = {
@Result(property = "id", column = "id", id = true),
@Result(property = "username", column = "username"),
@Result(property = "department", javaType = Department.class,
resultMap = "com.example.mapper.DeptMapper.deptResult")
})
@Options(useCache = true)
User selectUserWithDeptJoin(Long id);

/**
* 测试关联查询缓存
*/
@Test
public void testAssociationCache() {
try (SqlSession session = sqlSessionFactory.openSession()) {
UserMapper mapper = session.getMapper(UserMapper.class);

// 第一次查询:会查询user和dept
System.out.println("第一次查询:");
User user1 = mapper.selectUserWithDept(1L);
session.commit(); // 必须提交

// 第二次查询:user从缓存获取,dept可能从缓存获取
System.out.println("\n第二次查询:");
User user2 = mapper.selectUserWithDept(1L);

// 验证缓存
System.out.println("User对象相同:" + (user1 == user2));
System.out.println("Dept对象相同:" +
(user1.getDepartment() == user2.getDepartment()));
}
}
}

3.4 自定义缓存实现

/**
* 自定义Redis缓存实现
*/
package com.example.cache;

import org.apache.ibatis.cache.Cache;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;

import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

public class CustomRedisCache implements Cache {

private final String id;
private final JedisPool jedisPool;
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();

// 序列化器
private final Serializer serializer = new JdkSerializer();

public CustomRedisCache(String id) {
this.id = id;
this.jedisPool = new JedisPool("localhost", 6379);
}

@Override
public String getId() {
return id;
}

@Override
public void putObject(Object key, Object value) {
try (Jedis jedis = jedisPool.getResource()) {
byte[] keyBytes = serializer.serialize(key);
byte[] valueBytes = serializer.serialize(value);

jedis.setex(keyBytes, 3600, valueBytes); // 1小时过期

// 维护key集合,用于clear操作
jedis.sadd(getKeysSetName(), keyBytes);
}
}

@Override
public Object getObject(Object key) {
try (Jedis jedis = jedisPool.getResource()) {
byte[] keyBytes = serializer.serialize(key);
byte[] valueBytes = jedis.get(keyBytes);

return valueBytes == null ? null : serializer.deserialize(valueBytes);
}
}

@Override
public Object removeObject(Object key) {
try (Jedis jedis = jedisPool.getResource()) {
byte[] keyBytes = serializer.serialize(key);
byte[] valueBytes = jedis.get(keyBytes);

jedis.del(keyBytes);
jedis.srem(getKeysSetName(), keyBytes);

return valueBytes == null ? null : serializer.deserialize(valueBytes);
}
}

@Override
public void clear() {
try (Jedis jedis = jedisPool.getResource()) {
String keysSetName = getKeysSetName();
Set<byte[]> keys = jedis.smembers(keysSetName.getBytes());

if (!keys.isEmpty()) {
jedis.del(keys.toArray(new byte[0][]));
}
jedis.del(keysSetName);
}
}

@Override
public int getSize() {
try (Jedis jedis = jedisPool.getResource()) {
Long size = jedis.scard(getKeysSetName());
return size != null ? size.intValue() : 0;
}
}

@Override
public ReadWriteLock getReadWriteLock() {
return readWriteLock;
}

private String getKeysSetName() {
return id + ":keys";
}

// 序列化接口
interface Serializer {
byte[] serialize(Object obj);
Object deserialize(byte[] bytes);
}

// JDK序列化实现
static class JdkSerializer implements Serializer {
@Override
public byte[] serialize(Object obj) {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(obj);
return baos.toByteArray();
} catch (IOException e) {
throw new CacheException("Serialization failed", e);
}
}

@Override
public Object deserialize(byte[] bytes) {
if (bytes == null) return null;
try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
ObjectInputStream ois = new ObjectInputStream(bais)) {
return ois.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new CacheException("Deserialization failed", e);
}
}
}
}

3.5 分布式环境下的缓存同步

/**
* 分布式缓存同步解决方案
*/
public class DistributedCacheSync {

/**
* 方案1:使用Redis Pub/Sub同步缓存失效
*/
@Component
public class CacheInvalidationListener {

private final JedisPool jedisPool;
private final SqlSessionFactory sqlSessionFactory;

@PostConstruct
public void init() {
new Thread(() -> {
try (Jedis jedis = jedisPool.getResource()) {
jedis.subscribe(new JedisPubSub() {
@Override
public void onMessage(String channel, String message) {
// 收到缓存失效消息
handleCacheInvalidation(message);
}
}, "cache:invalidation");
}
}).start();
}

private void handleCacheInvalidation(String message) {
// 消息格式:mapperName:cacheKey
String[] parts = message.split(":");
if (parts.length == 2) {
String mapperName = parts[0];
String cacheKey = parts[1];

try (SqlSession session = sqlSessionFactory.openSession()) {
Configuration config = session.getConfiguration();
Cache cache = config.getCache(mapperName);

if (cache != null) {
// 清除指定缓存
cache.removeObject(cacheKey);
System.out.println("清除缓存:" + mapperName + " - " + cacheKey);
}
}
}
}

/**
* 发送缓存失效通知
*/
public void notifyCacheInvalidation(String mapperName, String cacheKey) {
try (Jedis jedis = jedisPool.getResource()) {
String message = mapperName + ":" + cacheKey;
jedis.publish("cache:invalidation", message);
}
}
}

/**
* 方案2:自定义Cache实现,集成分布式锁
*/
public class DistributedLockCache implements Cache {

private final Cache delegate;
private final RedissonClient redisson;

@Override
public void putObject(Object key, Object value) {
String lockKey = "lock:cache:" + getId() + ":" + key.hashCode();
RLock lock = redisson.getLock(lockKey);

try {
lock.lock(5, TimeUnit.SECONDS); // 获取分布式锁
delegate.putObject(key, value);
} finally {
if (lock.isHeldByCurrentThread()) {
lock.unlock();
}
}
}
}
}

3.6 性能监控与调优

/**
* 二级缓存性能监控工具
*/
public class CachePerformanceMonitor {

private static final Map<String, CacheMetrics> metricsMap =
new ConcurrentHashMap<>();

@Data
public static class CacheMetrics {
private String cacheId;
private AtomicLong hitCount = new AtomicLong();
private AtomicLong missCount = new AtomicLong();
private AtomicLong putCount = new AtomicLong();
private AtomicLong evictionCount = new AtomicLong();
private AtomicLong totalTime = new AtomicLong();

public double getHitRate() {
long total = hitCount.get() + missCount.get();
return total == 0 ? 0 : (double) hitCount.get() / total;
}

public double getAverageAccessTime() {
long totalAccess = hitCount.get() + missCount.get();
return totalAccess == 0 ? 0 : (double) totalTime.get() / totalAccess;
}
}

/**
* 监控装饰器
*/
public static class MonitoredCache implements Cache {
private final Cache delegate;
private final CacheMetrics metrics;

public MonitoredCache(Cache delegate) {
this.delegate = delegate;
this.metrics = metricsMap.computeIfAbsent(delegate.getId(),
id -> new CacheMetrics());
this.metrics.setCacheId(delegate.getId());
}

@Override
public Object getObject(Object key) {
long start = System.nanoTime();
try {
Object value = delegate.getObject(key);
if (value != null) {
metrics.getHitCount().incrementAndGet();
} else {
metrics.getMissCount().incrementAndGet();
}
return value;
} finally {
long time = System.nanoTime() - start;
metrics.getTotalTime().addAndGet(time);
}
}

@Override
public void putObject(Object key, Object value) {
long start = System.nanoTime();
try {
delegate.putObject(key, value);
metrics.getPutCount().incrementAndGet();
} finally {
long time = System.nanoTime() - start;
metrics.getTotalTime().addAndGet(time);
}
}

// 其他方法...
}

/**
* 生成监控报告
*/
public static void generateReport() {
System.out.println("=== 二级缓存性能报告 ===");
System.out.printf("%-30s %-10s %-10s %-10s %-10s%n",
"Cache ID", "Hit Rate", "Hit", "Miss", "Avg Time(ns)");
System.out.println("-".repeat(80));

for (CacheMetrics metrics : metricsMap.values()) {
System.out.printf("%-30s %-10.2f %-10d %-10d %-10.2f%n",
metrics.getCacheId(),
metrics.getHitRate() * 100,
metrics.getHitCount().get(),
metrics.getMissCount().get(),
metrics.getAverageAccessTime());
}
}
}

/**
* 使用AOP进行缓存监控
*/
@Aspect
@Component
public class CacheMonitorAspect {

@Pointcut("execution(* org.apache.ibatis.cache.Cache.getObject(..))")
public void cacheGetPointcut() {}

@Around("cacheGetPointcut()")
public Object monitorCacheAccess(ProceedingJoinPoint pjp) throws Throwable {
Cache cache = (Cache) pjp.getTarget();
Object key = pjp.getArgs()[0];

String cacheId = cache.getId();
String keyStr = key.toString();

long start = System.currentTimeMillis();
Object result = pjp.proceed();
long time = System.currentTimeMillis() - start;

// 记录到日志或监控系统
LogRecord record = new LogRecord(cacheId, keyStr,
result != null ? "HIT" : "MISS", time);

// 发送到监控系统
sendToMonitor(record);

return result;
}
}

4. 最佳实践总结

4.1 配置建议

<!-- 生产环境推荐配置 -->
<cache
type="org.mybatis.caches.redis.RedisCache"
eviction="LRU"
flushInterval="0"<!-- 分布式环境下不自动刷新 -->
size="0"<!-- Redis管理大小 -->
readOnly="false"
blocking="true"<!-- 防止缓存击穿 -->
properties="
maxTotal=100,
maxIdle=10,
minIdle=5,
testOnBorrow=true,
timeBetweenEvictionRunsMillis=30000
"
/>

4.2 代码规范

// 1. 明确的事务边界
@Transactional
public User getUserWithCache(Long id) {
// 必须在一个事务内
try (SqlSession session = sqlSessionFactory.openSession()) {
UserMapper mapper = session.getMapper(UserMapper.class);
User user = mapper.selectById(id);
session.commit();// 关键:必须提交!
return user;
}
}

// 2. 批量操作处理
@Transactional
public void batchUpdateUsers(List<User> users) {
try (SqlSession session = sqlSessionFactory.openSession()) {
UserMapper mapper = session.getMapper(UserMapper.class);

for (User user : users) {
mapper.updateUser(user);
// 每100条提交一次,避免缓存过大
if (i % 100 == 0) {
session.commit();
session.clearCache();// 可选:清空缓存
}
}
session.commit();
}
}

4.3 常见问题解决方案

问题现象解决方案
缓存穿透查询不存在的数据1. 缓存空对象
2. 布隆过滤器
缓存击穿热点key失效1. 永不过期
2. 互斥锁
缓存雪崩大量key同时失效1. 随机过期时间
2. 二级缓存
数据不一致缓存与DB不一致1. 合理设置过期时间
2. 更新时清除缓存

5. 源码调试技巧

5.1 调试缓存流程

// 在IDE中设置断点:
// 1. CachingExecutor.query() - 入口
// 2. TransactionalCacheManager.getObject() - 获取缓存
// 3. CacheKey.update() - 查看缓存键生成
// 4. BaseExecutor.createCacheKey() - 创建缓存键

// 启用MyBatis日志
org.apache.ibatis.cache.decorators.LoggingCache.level = DEBUG
org.apache.ibatis.executor.CachingExecutor.level = TRACE

5.2 查看缓存状态

public class CacheInspector {

public static void inspectCache(SqlSessionFactory factory) {
Configuration config = factory.getConfiguration();

// 获取所有缓存
Map<String, Cache> caches = config.getCaches();

for (Map.Entry<String, Cache> entry : caches.entrySet()) {
System.out.println("Cache: " + entry.getKey());
Cache cache = entry.getValue();

// 获取缓存装饰器链
while (cache != null) {
System.out.println("|- " + cache.getClass().getSimpleName());

// 通过反射获取delegate
try {
Field delegateField = cache.getClass()
.getDeclaredField("delegate");
delegateField.setAccessible(true);
cache = (Cache) delegateField.get(cache);
} catch (Exception e) {
break;
}
}
}
}
}

总结

MyBatis二级缓存的源码设计体现了多个设计模式的精妙应用:

  1. 装饰器模式:灵活组合缓存功能
  2. 模板方法模式:CachingExecutor的查询流程
  3. 策略模式:不同的缓存淘汰策略
  4. 代理模式:TransactionalCache的事务管理

在实际使用中,需要根据业务场景选择合适的缓存策略,并注意:

通过深入理解源码,可以更好地利用二级缓存提升系统性能,同时避免常见的数据一致性问题。

以上为个人经验,希望能给大家一个参考,也希望大家多多支持脚本之家。

您可能感兴趣的文章:
阅读全文