当读操作远远高于写操作时,这时候使用 读写锁 让 读-读 可以并发,提高性能。 类似于数据库中的
select ...from ... lock in share mode
基本使用
提供一个 数据容器类 内部分别使用读锁保护数据的
read()
方法,写锁保护数据的 write()
方法@Slf4j(topic = "c.TestReadWriteLock") public class TestReadWriteLock { public static void main(String[] args) throws InterruptedException { } } @Slf4j(topic = "c.DataContainer") class DataContainer { private Object data; private ReentrantReadWriteLock rw = new ReentrantReadWriteLock(); private ReentrantReadWriteLock.ReadLock r = rw.readLock(); private ReentrantReadWriteLock.WriteLock w = rw.writeLock(); public Object read() { log.debug("获取读锁..."); r.lock(); try { log.debug("读取"); sleep(1); return data; } finally { log.debug("释放读锁..."); r.unlock(); } } public void write() { log.debug("获取写锁..."); w.lock(); try { log.debug("写入"); sleep(1); } finally { log.debug("释放写锁..."); w.unlock(); } } }
读锁-读锁 可以并发
DataContainer dataContainer = new DataContainer(); new Thread(() -> { dataContainer.read(); }, "t1").start(); new Thread(() -> { dataContainer.read(); }, "t2").start();
输出结果,从这里可以看到 Thread-0 锁定期间,Thread-1 的读操作不受影响
14:05:14.341 c.DataContainer [t2] - 获取读锁... 14:05:14.341 c.DataContainer [t1] - 获取读锁... 14:05:14.345 c.DataContainer [t1] - 读取 14:05:14.345 c.DataContainer [t2] - 读取 14:05:15.365 c.DataContainer [t2] - 释放读锁... 14:05:15.386 c.DataContainer [t1] - 释放读锁...
读锁-写锁 相互阻塞
DataContainer dataContainer = new DataContainer(); new Thread(() -> { dataContainer.read(); }, "t1").start(); Thread.sleep(100); new Thread(() -> { dataContainer.write(); }, "t2").start();
14:04:21.838 c.DataContainer [t1] - 获取读锁... 14:04:21.838 c.DataContainer [t2] - 获取写锁... 14:04:21.841 c.DataContainer [t2] - 写入 14:04:22.843 c.DataContainer [t2] - 释放写锁... 14:04:22.843 c.DataContainer [t1] - 读取 14:04:23.843 c.DataContainer [t1] - 释放读锁..
写锁-写锁 也是相互阻塞的,这里就不测试了
注意事项
读锁不支持条件变量
重入时升级不支持
即持有读锁的情况下去获取写锁,会导致获取写锁永久等待
r.lock(); try { // ... w.lock(); try { // ... } finally{ w.unlock(); } } finally{ r.unlock(); }
重入时降级支持
即持有写锁的情况下去获取读锁
class CachedData { Object data; // 是否有效,如果失效,需要重新计算 data volatile boolean cacheValid; final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); void processCachedData() { rwl.readLock().lock(); if (!cacheValid) { // Must release read lock before acquiring write lock // 获取写锁前必须释放读锁 rwl.readLock().unlock(); rwl.writeLock().lock(); try { // Recheck state because another thread might have // acquired write lock and changed state before we did. // 判断是否有其它线程已经获取了写锁、更新了缓存, 避免重复更新 if (!cacheValid) { data = ... cacheValid = true; } // Downgrade by acquiring read lock before releasing write lock // 降级为读锁, 释放写锁, 这样能够让其它线程读取缓存 rwl.readLock().lock(); } finally { rwl.writeLock().unlock(); // Unlock write, still hold read } } try { use(data); } finally { rwl.readLock().unlock(); } } }
应用 : 缓存数据库一致性
案例代码
封装连接MySQL执行sql语句代码为GenericDao
public class GenericDao { static String URL = "jdbc:mysql://localhost:3306/test"; static String USERNAME = "root"; static String PASSWORD = "root"; public <T> List<T> queryList(Class<T> beanClass, String sql, Object... args) { System.out.println("sql: [" + sql + "] params:" + Arrays.toString(args)); BeanRowMapper<T> mapper = new BeanRowMapper<>(beanClass); return queryList(sql, mapper, args); } public <T> T queryOne(Class<T> beanClass, String sql, Object... args) { System.out.println("sql: [" + sql + "] params:" + Arrays.toString(args)); BeanRowMapper<T> mapper = new BeanRowMapper<>(beanClass); return queryOne(sql, mapper, args); } private <T> List<T> queryList(String sql, RowMapper<T> mapper, Object... args) { try (Connection conn = DriverManager.getConnection(URL, USERNAME, PASSWORD)) { try (PreparedStatement psmt = conn.prepareStatement(sql)) { if (args != null) { for (int i = 0; i < args.length; i++) { psmt.setObject(i + 1, args[i]); } } List<T> list = new ArrayList<>(); try (ResultSet rs = psmt.executeQuery()) { while (rs.next()) { T obj = mapper.map(rs); list.add(obj); } } return list; } } catch (SQLException e) { throw new RuntimeException(e); } } private <T> T queryOne(String sql, RowMapper<T> mapper, Object... args) { List<T> list = queryList(sql, mapper, args); return list.size() == 0 ? null : list.get(0); } public int update(String sql, Object... args) { System.out.println("sql: [" + sql + "] params:" + Arrays.toString(args)); try (Connection conn = DriverManager.getConnection(URL, USERNAME, PASSWORD)) { try (PreparedStatement psmt = conn.prepareStatement(sql)) { if (args != null) { for (int i = 0; i < args.length; i++) { psmt.setObject(i + 1, args[i]); } } return psmt.executeUpdate(); } } catch (SQLException e) { throw new RuntimeException(e); } } interface RowMapper<T> { T map(ResultSet rs); } static class BeanRowMapper<T> implements RowMapper<T> { private Class<T> beanClass; private Map<String, PropertyDescriptor> propertyMap = new HashMap<>(); public BeanRowMapper(Class<T> beanClass) { this.beanClass = beanClass; try { BeanInfo beanInfo = Introspector.getBeanInfo(beanClass); PropertyDescriptor[] propertyDescriptors = beanInfo.getPropertyDescriptors(); for (PropertyDescriptor pd : propertyDescriptors) { propertyMap.put(pd.getName().toLowerCase(), pd); } } catch (IntrospectionException e) { throw new RuntimeException(e); } } @Override public T map(ResultSet rs) { try { ResultSetMetaData metaData = rs.getMetaData(); int columnCount = metaData.getColumnCount(); T t = beanClass.newInstance(); for (int i = 1; i <= columnCount; i++) { String columnLabel = metaData.getColumnLabel(i); PropertyDescriptor pd = propertyMap.get(columnLabel.toLowerCase()); if (pd != null) { pd.getWriteMethod().invoke(t, rs.getObject(i)); } } return t; } catch (SQLException | InstantiationException | IllegalAccessException | InvocationTargetException e) { throw new RuntimeException(e); } } } }
class SqlPair { private String sql; private Object[] args; public SqlPair(String sql, Object[] args) { this.sql = sql; this.args = args; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SqlPair sqlPair = (SqlPair) o; return Objects.equals(sql, sqlPair.sql) && Arrays.equals(args, sqlPair.args); } @Override public int hashCode() { int result = Objects.hash(sql); result = 31 * result + Arrays.hashCode(args); return result; } }
装饰器模式
class GenericDaoCached extends GenericDao { private GenericDao dao = new GenericDao(); private Map<SqlPair, Object> map = new HashMap<>(); private ReentrantReadWriteLock rw = new ReentrantReadWriteLock(); @Override public <T> List<T> queryList(Class<T> beanClass, String sql, Object... args) { return dao.queryList(beanClass, sql, args); } @Override public <T> T queryOne(Class<T> beanClass, String sql, Object... args) { // 先从缓存中找,找到直接返回 SqlPair key = new SqlPair(sql, args);; rw.readLock().lock(); try { T value = (T) map.get(key); if(value != null) { return value; } } finally { rw.readLock().unlock(); } rw.writeLock().lock(); try { // 多个线程 T value = (T) map.get(key); if(value == null) { // 缓存中没有,查询数据库 value = dao.queryOne(beanClass, sql, args); map.put(key, value); } return value; } finally { rw.writeLock().unlock(); } } @Override public int update(String sql, Object... args) { rw.writeLock().lock(); try { // 先更新库 int update = dao.update(sql, args); // 清空缓存 map.clear(); return update; } finally { rw.writeLock().unlock(); } } }
public class TestGenericDao { public static void main(String[] args) { GenericDao dao = new GenericDaoCached(); System.out.println("============> 查询"); String sql = "select * from emp where empno = ?"; int empno = 7369; Emp emp = dao.queryOne(Emp.class, sql, empno); System.out.println(emp); emp = dao.queryOne(Emp.class, sql, empno); System.out.println(emp); emp = dao.queryOne(Emp.class, sql, empno); System.out.println(emp); System.out.println("============> 更新"); dao.update("update emp set sal = ? where empno = ?", 800, empno); emp = dao.queryOne(Emp.class, sql, empno); System.out.println(emp); } }
缓存更新策略
- 先清缓存,再更新数据库
@Override public int update(String sql, Object... args) { rw.writeLock().lock(); try { map.clear(); // 先更新缓存 //再更新数据库 return dao.update(sql, args); } finally { rw.writeLock().unlock(); } }
- 先更新数据库,再清空缓存
@Override public int update(String sql, Object... args) { rw.writeLock().lock(); try { // 先更新库 int update = dao.update(sql, args); // 清空缓存 map.clear(); return update; } finally { rw.writeLock().unlock(); } }
第一种情况:b线程清空缓存后,a线程正好在查询,此时会去查询数据库,能够保证数据一致性
第二种情况假设查询线程 A 查询数据时恰好缓存数据由于时间到期失效,或是第一次查询
这种情况的出现几率非常小
读写锁实现一致性缓存
使用读写锁实现一个简单的按需加载缓存
@Override public <T> T queryOne(Class<T> beanClass, String sql, Object... args) { // 先从缓存中找,找到直接返回 SqlPair key = new SqlPair(sql, args);; rw.readLock().lock(); try { T value = (T) map.get(key); if(value != null) { return value; } } finally { rw.readLock().unlock(); } rw.writeLock().lock(); try { // 多个线程 T value = (T) map.get(key); if(value == null) { //双重检查 DCL // 缓存中没有,查询数据库 value = dao.queryOne(beanClass, sql, args); map.put(key, value); } return value; } finally { rw.writeLock().unlock(); } } @Override public int update(String sql, Object... args) { rw.writeLock().lock(); try { // 先更新库 int update = dao.update(sql, args); // 清空缓存 map.clear(); return update; } finally { rw.writeLock().unlock(); } }
注意
以上实现体现的是读写锁的应用,保证缓存和数据库的一致性,但有下面的问题没有考虑
- 适合读多写少,如果写操作比较频繁,以上实现性能低
- 没有考虑缓存容量
- 没有考虑缓存过期
- 只适合单机
- 并发性还是低,目前只会用一把锁
- 更新方法太过简单粗暴,清空了所有 key(考虑按类型分区或重新设计 key)
- 乐观锁实现:用 CAS 去更新
读写锁原理
UML
读写锁用的是同一个 Sycn 同步器,因此等待队列、state 等也是同一个
t1 w.lock,t2 r.lock
t1 成功上锁,流程与 ReentrantLock 加锁相比没有特殊之处,不同是写锁状态占了 state 的低 16 位,而读锁使用的是 state 的高 16 位
/** * The lock returned by method {@link ReentrantReadWriteLock#writeLock}. */ public static class WriteLock implements Lock, java.io.Serializable { private static final long serialVersionUID = -4992448646407690164L; private final Sync sync; /** * Acquires the write lock. * * <p>Acquires the write lock if neither the read nor write lock * are held by another thread * and returns immediately, setting the write lock hold count to * one. * * <p>If the current thread already holds the write lock then the * hold count is incremented by one and the method returns * immediately. * * <p>If the lock is held by another thread then the current * thread becomes disabled for thread scheduling purposes and * lies dormant until the write lock has been acquired, at which * time the write lock hold count is set to one. * 获取写锁。 * 如果读锁和写锁都没有被另一个线程持有,则获取写锁并立即返回,将写锁持有计数设置为 1。 * 如果当前线程已经持有写锁,则持有计数加一并且该方法立即返回。 * 如果该锁由另一个线程持有,则当前线程将出于线程调度目的而被禁用并处于休眠状态,直到获得写锁为止,此时写锁持有计数设置为 1。 */ public void lock() { sync.acquire(1); }
acquire方法继承自AQS,其具体实现在读写锁实现的AQS同步器sync中
protected final boolean tryAcquire(int acquires) { /* * Walkthrough: * 1. If read count nonzero or write count nonzero * and owner is a different thread, fail. * 2. If count would saturate, fail. (This can only * happen if count is already nonzero.) * 3. Otherwise, this thread is eligible for lock if * it is either a reentrant acquire or * queue policy allows it. If so, update state * and set owner. */ Thread current = Thread.currentThread(); int c = getState(); int w = exclusiveCount(c); if (c != 0) { // (Note: if c != 0 and w == 0 then shared count != 0) // 是否读锁 || 是否自己加的写锁 if (w == 0 || current != getExclusiveOwnerThread()) return false; if (w + exclusiveCount(acquires) > MAX_COUNT) throw new Error("Maximum lock count exceeded"); // Reentrant acquire setState(c + acquires); return true; } if (writerShouldBlock() || !compareAndSetState(c, c + acquires)) return false; setExclusiveOwnerThread(current); return true; }
t2 执行 r.lock,这时进入读锁的 sync.acquireShared(1) 流程,首先会进入
tryAcquireShared
流程。如果有写锁占据,那么 tryAcquireShared 返回 -1 表示失败tryAcquireShared 返回值表示
- -1 表示失败
- 0 表示成功,但后继节点不会继续唤醒
- 正数表示成功,而且数值是还有几个后继节点需要唤醒,读写锁返回 1
public void lock() { sync.acquireShared(1); }
public final void acquireShared(int arg) { if (tryAcquireShared(arg) < 0) doAcquireShared(arg); }
protected final int tryAcquireShared(int unused) { /* * Walkthrough: * 1. If write lock held by another thread, fail. * 2. Otherwise, this thread is eligible for * lock wrt state, so ask if it should block * because of queue policy. If not, try * to grant by CASing state and updating count. * Note that step does not check for reentrant * acquires, which is postponed to full version * to avoid having to check hold count in * the more typical non-reentrant case. * 3. If step 2 fails either because thread * apparently not eligible or CAS fails or count * saturated, chain to version with full retry loop. */ Thread current = Thread.currentThread(); int c = getState(); if (exclusiveCount(c) != 0 && getExclusiveOwnerThread() != current) return -1; int r = sharedCount(c); if (!readerShouldBlock() && r < MAX_COUNT && compareAndSetState(c, c + SHARED_UNIT)) { if (r == 0) { firstReader = current; firstReaderHoldCount = 1; } else if (firstReader == current) { firstReaderHoldCount++; } else { HoldCounter rh = cachedHoldCounter; if (rh == null || rh.tid != getThreadId(current)) cachedHoldCounter = rh = readHolds.get(); else if (rh.count == 0) readHolds.set(rh); rh.count++; } return 1; } return fullTryAcquireShared(current); }
这时会进入
sync.doAcquireShared(1)
流程,首先也是调用 addWaiter 添加节点,不同之处在于节点被设置为Node.SHARED 模式而非 Node.EXCLUSIVE 模式,注意此时 t2 仍处于活跃状态/** * Acquires in shared uninterruptible mode. * @param arg the acquire argument */ private void doAcquireShared(int arg) { final Node node = addWaiter(Node.SHARED); boolean failed = true; try { boolean interrupted = false; for (;;) { final Node p = node.predecessor(); if (p == head) { int r = tryAcquireShared(arg); if (r >= 0) { setHeadAndPropagate(node, r); p.next = null; // help GC if (interrupted) selfInterrupt(); failed = false; return; } } if (shouldParkAfterFailedAcquire(p, node) && parkAndCheckInterrupt()) interrupted = true; } } finally { if (failed) cancelAcquire(node); } }
t2 会看看自己的节点是不是老二,如果是,还会再次调用 tryAcquireShared(1) 来尝试获取锁
如果没有成功,在 doAcquireShared 内 for (;;) 循环一次,把前驱节点的 waitStatus 改为 -1,再 for (;;) 循环一次尝试 tryAcquireShared(1) 如果还不成功,那么在 parkAndCheckInterrupt() 处 park
t3 r.lock,t4 w.lock
这种状态下,假设又有 t3 加读锁和 t4 加写锁,这期间 t1 仍然持有锁,就变成了下面的样子
t1 w.unlock
这时会走到写锁的 sync.release(1) 流程,调用 sync.tryRelease(1) 成功,变成下面的样子
public void unlock() { sync.release(1); }
public final boolean release(int arg) { if (tryRelease(arg)) { Node h = head; if (h != null && h.waitStatus != 0) unparkSuccessor(h); return true; } return false; }
接下来执行唤醒流程 sync.unparkSuccessor,即让老二恢复运行,这时 t2 在 doAcquireShared 内
parkAndCheckInterrupt()
处恢复运行这回再来一次 for (;;) 执行 tryAcquireShared 成功则让读锁计数加一
这时 t2 已经恢复运行,接下来 t2 调用
setHeadAndPropagate(node, 1)
,它原本所在节点被置为头节点事情还没完,在
setHeadAndPropagate
方法内还会检查下一个节点是否是 shared,如果是则调用doReleaseShared()
将 head 的状态从 -1 改为 0 并唤醒老二,这时 t3 在
doAcquireShared
内parkAndCheckInterrupt()
处恢复运行这回再来一次 for (;;) 执行
tryAcquireShared
成功则让读锁计数加一
这时 t3 已经恢复运行,接下来 t3 调用 setHeadAndPropagate(node, 1),它原本所在节点被置为头节点
下一个节点不是 shared 了,因此不会继续唤醒 t4 所在节点
t2 r.unlock,t3 r.unlock
t2 进入 sync.releaseShared(1) 中,调用 tryReleaseShared(1) 让计数减一,但由于计数还不为零
t3 进入
sync.releaseShared(1)
中,调用 tryReleaseShared(1)
让计数减一,这回计数为零了,进入doReleaseShared()
将头节点从 -1 改为 0 并唤醒老二,即之后 t4 在 acquireQueued 中 parkAndCheckInterrupt 处恢复运行,再次 for (;;) 这次自己是老二,并且没有其他竞争,tryAcquire(1) 成功,修改头结点,流程结束
源码流程回顾
写锁上锁流程
static final class NonfairSync extends Sync { // ... 省略无关代码 // 外部类 WriteLock 方法, 方便阅读, 放在此处 public void lock() { sync.acquire(1); } // AQS 继承过来的方法, 方便阅读, 放在此处 public final void acquire(int arg) { if ( // 尝试获得写锁失败 !tryAcquire(arg) && // 将当前线程关联到一个 Node 对象上, 模式为独占模式 // 进入 AQS 队列阻塞 acquireQueued(addWaiter(Node.EXCLUSIVE), arg) ) { selfInterrupt(); } } // Sync 继承过来的方法, 方便阅读, 放在此处 protected final boolean tryAcquire(int acquires) { // 获得低 16 位, 代表写锁的 state 计数 Thread current = Thread.currentThread(); int c = getState(); int w = exclusiveCount(c); if (c != 0) { if ( // c != 0 and w == 0 表示有读锁, 或者 w == 0 || // 如果exclusiveOwnerThread不是自己 current != getExclusiveOwnerThread() ) { // 获得锁失败 return false; } // 写锁计数超过低 16 位, 报异常 if (w + exclusiveCount(acquires) > MAX_COUNT) throw new Error("Maximum lock count exceeded"); // 写锁重入, 获得锁成功 setState(c + acquires); return true; } if ( // 判断写锁是否该阻塞, 或者 writerShouldBlock() || // 尝试更改计数失败 !compareAndSetState(c, c + acquires) { // 获得锁失败 return false; } // 获得锁成功 setExclusiveOwnerThread(current); return true; } // 非公平锁 writerShouldBlock 总是返回 false, 无需阻塞 final boolean writerShouldBlock() { return false; } }
写锁释放流程
static final class NonfairSync extends Sync { // ... 省略无关代码 // WriteLock 方法, 方便阅读, 放在此处 public void unlock() { sync.release(1); } // AQS 继承过来的方法, 方便阅读, 放在此处 public final boolean release(int arg) { // 尝试释放写锁成功 if (tryRelease(arg)) { // unpark AQS 中等待的线程 Node h = head; if (h != null && h.waitStatus != 0) unparkSuccessor(h); return true; } return false; } // Sync 继承过来的方法, 方便阅读, 放在此处 protected final boolean tryRelease(int releases) { if (!isHeldExclusively()) throw new IllegalMonitorStateException(); int nextc = getState() - releases; // 因为可重入的原因, 写锁计数为 0, 才算释放成功 boolean free = exclusiveCount(nextc) == 0; if (free) { setExclusiveOwnerThread(null); } setState(nextc); return free; } }
读锁上锁流程
static final class NonfairSync extends Sync { // ReadLock 方法, 方便阅读, 放在此处 public void lock() { sync.acquireShared(1); } // AQS 继承过来的方法, 方便阅读, 放在此处 public final void acquireShared(int arg) { // tryAcquireShared 返回负数, 表示获取读锁失败 if (tryAcquireShared(arg) < 0) { doAcquireShared(arg); } } // Sync 继承过来的方法, 方便阅读, 放在此处 protected final int tryAcquireShared(int unused) { Thread current = Thread.currentThread(); int c = getState(); // 如果是其它线程持有写锁, 获取读锁失败 if ( exclusiveCount(c) != 0 && getExclusiveOwnerThread() != current ) { return -1; } int r = sharedCount(c); if ( // 读锁不该阻塞(如果老二是写锁,读锁该阻塞), 并且 !readerShouldBlock() && // 小于读锁计数, 并且 r < MAX_COUNT && // 尝试增加计数成功 compareAndSetState(c, c + SHARED_UNIT) ) { // ... 省略不重要的代码 return 1; } return fullTryAcquireShared(current); } // 非公平锁 readerShouldBlock 看 AQS 队列中第一个节点是否是写锁 // true 则该阻塞, false 则不阻塞 final boolean readerShouldBlock() { return apparentlyFirstQueuedIsExclusive(); } // AQS 继承过来的方法, 方便阅读, 放在此处 // 与 tryAcquireShared 功能类似, 但会不断尝试 for (;;) 获取读锁, 执行过程中无阻塞 final int fullTryAcquireShared(Thread current) { HoldCounter rh = null; for (;;) { int c = getState(); if (exclusiveCount(c) != 0) { if (getExclusiveOwnerThread() != current) return -1; } else if (readerShouldBlock()) { // ... 省略不重要的代码 } if (sharedCount(c) == MAX_COUNT) throw new Error("Maximum lock count exceeded"); if (compareAndSetState(c, c + SHARED_UNIT)) { // ... 省略不重要的代码 return 1; } } } // AQS 继承过来的方法, 方便阅读, 放在此处 private void doAcquireShared(int arg) { // 将当前线程关联到一个 Node 对象上, 模式为共享模式 final Node node = addWaiter(Node.SHARED); boolean failed = true; try { boolean interrupted = false; for (;;) { final Node p = node.predecessor(); if (p == head) { // 再一次尝试获取读锁 int r = tryAcquireShared(arg); // 成功 if (r >= 0) { // ㈠ // r 表示可用资源数, 在这里总是 1 允许传播 //(唤醒 AQS 中下一个 Share 节点) setHeadAndPropagate(node, r); p.next = null; // help GC if (interrupted) selfInterrupt(); failed = false; return; } } if ( // 是否在获取读锁失败时阻塞(前一个阶段 waitStatus == Node.SIGNAL) shouldParkAfterFailedAcquire(p, node) && // park 当前线程 parkAndCheckInterrupt() ) { interrupted = true; } } } finally { if (failed) cancelAcquire(node); } } // ㈠ AQS 继承过来的方法, 方便阅读, 放在此处 private void setHeadAndPropagate(Node node, int propagate) { Node h = head; // Record old head for check below // 设置自己为 head setHead(node); // propagate 表示有共享资源(例如共享读锁或信号量) // 原 head waitStatus == Node.SIGNAL 或 Node.PROPAGATE // 现在 head waitStatus == Node.SIGNAL 或 Node.PROPAGATE if (propagate > 0 || h == null || h.waitStatus < 0 || (h = head) == null || h.waitStatus < 0) { Node s = node.next; // 如果是最后一个节点或者是等待共享读锁的节点 if (s == null || s.isShared()) { // 进入 ㈡ doReleaseShared(); } } } // ㈡ AQS 继承过来的方法, 方便阅读, 放在此处 private void doReleaseShared() { // 如果 head.waitStatus == Node.SIGNAL ==> 0 成功, 下一个节点 unpark // 如果 head.waitStatus == 0 ==> Node.PROPAGATE, 为了解决 bug, 见后面分析 for (;;) { Node h = head; // 队列还有节点 if (h != null && h != tail) { int ws = h.waitStatus; if (ws == Node.SIGNAL) { if (!compareAndSetWaitStatus(h, Node.SIGNAL, 0)) continue; // loop to recheck cases // 下一个节点 unpark 如果成功获取读锁 // 并且下下个节点还是 shared, 继续 doReleaseShared unparkSuccessor(h); } else if (ws == 0 && !compareAndSetWaitStatus(h, 0, Node.PROPAGATE)) continue; // loop on failed CAS } if (h == head) // loop if head changed break; } } }
读锁释放流程
static final class NonfairSync extends Sync { // ReadLock 方法, 方便阅读, 放在此处 public void unlock() { sync.releaseShared(1); } // AQS 继承过来的方法, 方便阅读, 放在此处 public final boolean releaseShared(int arg) { if (tryReleaseShared(arg)) { doReleaseShared(); return true; } return false; } // Sync 继承过来的方法, 方便阅读, 放在此处 protected final boolean tryReleaseShared(int unused) { // ... 省略不重要的代码 for (;;) { int c = getState(); int nextc = c - SHARED_UNIT; if (compareAndSetState(c, nextc)) { // 读锁的计数不会影响其它获取读锁线程, 但会影响其它获取写锁线程 // 计数为 0 才是真正释放 return nextc == 0; } } } // AQS 继承过来的方法, 方便阅读, 放在此处 private void doReleaseShared() { // 如果 head.waitStatus == Node.SIGNAL ==> 0 成功, 下一个节点 unpark // 如果 head.waitStatus == 0 ==> Node.PROPAGATE for (;;) { Node h = head; if (h != null && h != tail) { int ws = h.waitStatus; // 如果有其它线程也在释放读锁,那么需要将 waitStatus 先改为 0 // 防止 unparkSuccessor 被多次执行 if (ws == Node.SIGNAL) { if (!compareAndSetWaitStatus(h, Node.SIGNAL, 0)) continue; // loop to recheck cases unparkSuccessor(h); } // 如果已经是 0 了,改为 -3,用来解决传播性,见后文信号量 bug 分析 else if (ws == 0 && !compareAndSetWaitStatus(h, 0, Node.PROPAGATE)) continue; // loop on failed CAS } if (h == head) // loop if head changed break; } } }