//先看构造函数
public ConcurrentHashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
DEFAULT_INITIAL_CAPACITY),
DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
putAll(m);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;
int ssize = 1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
//默认情况下为28
this.segmentShift = 32 - sshift;
//默认情况下为15
this.segmentMask = ssize - 1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
//计算每个segment存放多少个元素
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
//默认情况下为2
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)
cap <<= 1;
// create segments and segments[0]
//初始化一个segment
Segment<K,V> s0 =
new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
(HashEntry<K,V>[])new HashEntry[cap]);
//初始化segment数组
Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
//将Segment[0] 指向s0
UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
this.segments = ss;
}
public V put(K key, V value) {
Segment<K,V> s;
if (value == null)
throw new NullPointerException();
//获取hash值
int hash = hash(key);
//定位segments数组中的segment
int j = (hash >>> segmentShift) & segmentMask;
//获取segment对象
if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck
(segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
//没有获取到新建一个segment
s = ensureSegment(j);
return s.put(key, hash, value, false);
}
//利用CAS机制在Segments数组的u上新建一个segment
private Segment<K,V> ensureSegment(int k) {
final Segment<K,V>[] ss = this.segments;
long u = (k << SSHIFT) + SBASE; // raw offset
Segment<K,V> seg;
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
Segment<K,V> proto = ss[0]; // use segment 0 as prototype
int cap = proto.table.length;
float lf = proto.loadFactor;
int threshold = (int)(cap * lf);
HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) { // recheck
Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
== null) {
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
break;
}
}
}
return seg;
}
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
//尝试获取锁获取成功返回null失败执行scanAndLockForPut(该方法也会获取到锁)
HashEntry<K,V> node = tryLock() ? null :
scanAndLockForPut(key, hash, value);
//获取到了锁
V oldValue;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
//获取entry
HashEntry<K,V> first = entryAt(tab, index);
for (HashEntry<K,V> e = first;;) {
if (e != null) {
K k;
//找到了数组中原本有key直接修改value值
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;
}
//链表中没有对应的key
else {
//在等待获取锁期间已经确定first位置没有元素,初始化了node。
if (node != null)
node.setNext(first);
else
node = new HashEntry<K,V>(hash, key, value, first);
int c = count + 1;
if (c > threshold && tab.length < MAXIMUM_CAPACITY)
//扩容
rehash(node);
else
//设置值
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
//在获取锁期间一直扫描看当前的key是否存在
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
//通过hash获取Segment位置上的第一个HashEntry
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
HashEntry<K,V> node = null;
int retries = -1; // negative while locating node
//无限循环直到获取到锁
while (!tryLock()) {
HashEntry<K,V> f; // to recheck first below
if (retries < 0) {
//如果first位置没有元素
if (e == null) {
if (node == null) // speculatively create node
//创建元素
node = new HashEntry<K,V>(hash, key, value, null);
retries = 0;
}
else if (key.equals(e.key))
retries = 0;
else
e = e.next;
}
else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
}
else if ((retries & 1) == 0 &&
(f = entryForHash(this, hash)) != first) {
e = first = f; // re-traverse if entry changed
retries = -1;
}
}
return node;
}
//获取segment中的entry
static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
HashEntry<K,V>[] tab;
return (seg == null || (tab = seg.table) == null) ? null :
(HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
}
//获取在table数组上的entry
@SuppressWarnings("unchecked")
static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
return (tab == null) ? null :
(HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)i << TSHIFT) + TBASE);
}
//扩容
private void rehash(HashEntry<K,V> node) {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity << 1;
threshold = (int)(newCapacity * loadFactor);
HashEntry<K,V>[] newTable =
(HashEntry<K,V>[]) new HashEntry[newCapacity];
int sizeMask = newCapacity - 1;
for (int i = 0; i < oldCapacity ; i++) {
HashEntry<K,V> e = oldTable[i];
if (e != null) {
HashEntry<K,V> next = e.next;
int idx = e.hash & sizeMask;
if (next == null) // Single node on list
newTable[idx] = e;
else { // Reuse consecutive sequence at same slot
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
//获取HashEntry中计算出来和以前不相等的hash值,这些元素以及这些元素的next由于hash值一直所以不需要在计算hash了。
for (HashEntry<K,V> last = next;
last != null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
//将lastIdx位置上设置为lastRun。
newTable[lastIdx] = lastRun;
// 移动除了lastRun之外的所有元素,lastRun之后的元素已经在上面的代码移动过了。
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
}
}
}
}
//设置node的值
int nodeIndex = node.hash & sizeMask; // add the new node
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
HashEntry<K,V> e) {
UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
}
/**
put方法的总结:
1、首先计算hash值定位到一个segment。如果没有segment,初始化一个segment。(简单理解一个segment相当于一个带锁的HashMap)
2、在segment中,首先尝试获取锁。如果没有获取到锁则无限循环去获取锁以及如果当前table[]上是空的创建firstNode。
3、获得了锁,根据Hash值获取table[]上的node。循环遍历如果找到了key值和put的key值相等则直接将值替换。
4、没有找到。则如果第二部创建了firstNode则直接用firstNode设置next即可,第二部没有成功创建firstNode则新建一个node。
5、如果大小超过当前的临界值了则扩容并且设置值。没有超过直接设置值。
*/
//如果有key值了则返回值没有则插入。和put的实现差不多。
public V putIfAbsent(K key, V value) {
Segment<K,V> s;
if (value == null)
throw new NullPointerException();
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K,V>)UNSAFE.getObject
(segments, (j << SSHIFT) + SBASE)) == null)
s = ensureSegment(j);
return s.put(key, hash, value, true);
}
public void putAll(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
put(e.getKey(), e.getValue());
}
//这个方法没有上锁那么怎么保证可见性?因为HashEntry和table[]是volatile的。所以可以保证happen-before关系。
public V get(Object key) {
Segment<K,V> s; // manually integrate access methods to reduce overhead
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
(tab = s.table) != null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}
//根据key删除元素
public V remove(Object key) {
int hash = hash(key);
//定位Segment
Segment<K,V> s = segmentForHash(hash);
return s == null ? null : s.remove(key, hash, null);
}
final V remove(Object key, int hash, Object value) {
//尝试获得锁
if (!tryLock())
scanAndLock(key, hash);
V oldValue = null;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> e = entryAt(tab, index);
HashEntry<K,V> pred = null;
while (e != null) {
K k;
HashEntry<K,V> next = e.next;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
V v = e.value;
if (value == null || value == v || value.equals(v)) {
if (pred == null)
//设置为first
setEntryAt(tab, index, next);
else
//删掉
pred.setNext(next);
++modCount;
--count;
oldValue = v;
}
break;
}
pred = e;
e = next;
}
} finally {
unlock();
}
return oldValue;
}
//这段代码的作用不太明白,网上说是为了在cpu缓存,提升效率。
private void scanAndLock(Object key, int hash) {
// similar to but simpler than scanAndLockForPut
//找到table[]上的元素
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
int retries = -1;
//没有获取到锁一直循环
while (!tryLock()) {
HashEntry<K,V> f;
if (retries < 0) {
if (e == null || key.equals(e.key))
retries = 0;
else
e = e.next;
}
else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
}
else if ((retries & 1) == 0 &&
(f = entryForHash(this, hash)) != first) {
e = first = f;
retries = -1;
}
}
}
//给定的键值都匹配才删除。实现和remove差不多。
public boolean remove(Object key, Object value) {
int hash = hash(key);
Segment<K,V> s;
return value != null && (s = segmentForHash(hash)) != null &&
s.remove(key, hash, value) != null;
}
//根据key替换值(比用put效率高)
public V replace(K key, V value) {
int hash = hash(key);
if (value == null)
throw new NullPointerException();
Segment<K,V> s = segmentForHash(hash);
return s == null ? null : s.replace(key, hash, value);
}
final V replace(K key, int hash, V value) {
if (!tryLock())
scanAndLock(key, hash);
V oldValue = null;
try {
HashEntry<K,V> e;
for (e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
e.value = value;
++modCount;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
//只有当键值都相等时才替换
public boolean replace(K key, V oldValue, V newValue) {
int hash = hash(key);
if (oldValue == null || newValue == null)
throw new NullPointerException();
Segment<K,V> s = segmentForHash(hash);
return s != null && s.replace(key, hash, oldValue, newValue);
}
final boolean replace(K key, int hash, V oldValue, V newValue) {
if (!tryLock())
scanAndLock(key, hash);
boolean replaced = false;
try {
HashEntry<K,V> e;
for (e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
if (oldValue.equals(e.value)) {
e.value = newValue;
++modCount;
replaced = true;
}
break;
}
}
} finally {
unlock();
}
return replaced;
}
//是否为空
public boolean isEmpty() {
long sum = 0L;
final Segment<K,V>[] segments = this.segments;
//循环遍历一次
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
if (seg.count != 0)
return false;
//count是0了
sum += seg.modCount;
}
}
//再次检查防止正好有元素加入进来了
if (sum != 0L) { // recheck unless no modifications
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
if (seg.count != 0)
return false;
sum -= seg.modCount;
}
}
//两次modCount不相等说明肯定有元素加入
if (sum != 0L)
return false;
}
return true;
}
//返回元素个数(这个算法很精妙,先不加锁循环3次如果相邻两次获得的modCount修改次数一致则直接返回,否则加锁计算size值。)
public int size() {
// Try a few times to get accurate count. On failure due to
// continuous async changes in table, resort to locking.
final Segment<K,V>[] segments = this.segments;
int size;
boolean overflow; // true if size overflows 32 bits
long sum; // sum of modCounts
long last = 0L; // previous sum
int retries = -1; // first iteration isn't retry
try {
for (;;) {
//当循环了3次之后还是没有获得一致性则上锁计算
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
//为了获得更加准确的值,创建所有的segment并锁定,lock是可重入锁。下一次会重入。
ensureSegment(j).lock(); // force creation
}
sum = 0L;
size = 0;
overflow = false;
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
sum += seg.modCount;
int c = seg.count;
if (c < 0 || (size += c) < 0)
overflow = true;
}
}
//当2次循环获得的sum一致则表示这个瞬间size可以获得
if (sum == last)
break;
last = sum;
}
} finally {
//解锁
if (retries > RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
segmentAt(segments, j).unlock();
}
}
return overflow ? Integer.MAX_VALUE : size;
}
//清空元素
public void clear() {
final Segment<K,V>[] segments = this.segments;
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> s = segmentAt(segments, j);
if (s != null)
s.clear();
}
}
final void clear() {
//获取锁
lock();
try {
HashEntry<K,V>[] tab = table;
for (int i = 0; i < tab.length ; i++)
清空
setEntryAt(tab, i, null);
++modCount;
count = 0;
} finally {
//释放锁
unlock();
}
}
//是否包含某个键(和get的实现一样)
public boolean containsKey(Object key) {
Segment<K,V> s; // same as get() except no need for volatile value read
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
(tab = s.table) != null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return true;
}
}
return false;
}
//是否包含某个值(这个没法计算hash值所以要扫描整个segments效率比containsKey慢)。算法和size()的实现差不多。都是先不加锁循环,找不到再加锁循环。
public boolean containsValue(Object value) {
// Same idea as size()
if (value == null)
throw new NullPointerException();
final Segment<K,V>[] segments = this.segments;
boolean found = false;
long last = 0;
int retries = -1;
try {
outer: for (;;) {
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
ensureSegment(j).lock(); // force creation
}
long hashSum = 0L;
int sum = 0;
for (int j = 0; j < segments.length; ++j) {
HashEntry<K,V>[] tab;
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null && (tab = seg.table) != null) {
for (int i = 0 ; i < tab.length; i++) {
HashEntry<K,V> e;
for (e = entryAt(tab, i); e != null; e = e.next) {
V v = e.value;
if (v != null && value.equals(v)) {
found = true;
break outer;
}
}
}
sum += seg.modCount;
}
}
if (retries > 0 && sum == last)
break;
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
segmentAt(segments, j).unlock();
}
}
return found;
}
public boolean contains(Object value) {
return containsValue(value);
}
//返回key的set视图
public Set<K> keySet() {
Set<K> ks = keySet;
return (ks != null) ? ks : (keySet = new KeySet());
}
final class KeySet extends AbstractSet<K> {
public Iterator<K> iterator() {
return new KeyIterator();
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public boolean contains(Object o) {
return ConcurrentHashMap.this.containsKey(o);
}
public boolean remove(Object o) {
return ConcurrentHashMap.this.remove(o) != null;
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
final class KeyIterator
extends HashIterator
implements Iterator<K>, Enumeration<K>
{
public final K next() { return super.nextEntry().key; }
public final K nextElement() { return super.nextEntry().key; }
}
abstract class HashIterator {
int nextSegmentIndex;
int nextTableIndex;
HashEntry<K,V>[] currentTable;
HashEntry<K, V> nextEntry;
HashEntry<K, V> lastReturned;
HashIterator() {
nextSegmentIndex = segments.length - 1;
nextTableIndex = -1;
advance();
}
/**
从尾巴开始寻找第一个entry
*/
final void advance() {
for (;;) {
if (nextTableIndex >= 0) {
if ((nextEntry = entryAt(currentTable,
nextTableIndex--)) != null)
break;
}
else if (nextSegmentIndex >= 0) {
//从最后面开始遍历
Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
if (seg != null && (currentTable = seg.table) != null)
nextTableIndex = currentTable.length - 1;
}
else
break;
}
}
final HashEntry<K,V> nextEntry() {
HashEntry<K,V> e = nextEntry;
if (e == null)
throw new NoSuchElementException();
lastReturned = e; // cannot assign until after null check
//如果nextEntry.next==null 再次循环返回下一个nextEntry
if ((nextEntry = e.next) == null)
advance();
return e;
}
public final boolean hasNext() { return nextEntry != null; }
public final boolean hasMoreElements() { return nextEntry != null; }
public final void remove() {
if (lastReturned == null)
throw new IllegalStateException();
ConcurrentHashMap.this.remove(lastReturned.key);
lastReturned = null;
}
}
//返回value的collection视图
public Collection<V> values() {
Collection<V> vs = values;
return (vs != null) ? vs : (values = new Values());
}
final class Values extends AbstractCollection<V> {
public Iterator<V> iterator() {
return new ValueIterator();
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public boolean contains(Object o) {
return ConcurrentHashMap.this.containsValue(o);
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
final class ValueIterator
extends HashIterator
implements Iterator<V>, Enumeration<V>
{
public final V next() { return super.nextEntry().value; }
public final V nextElement() { return super.nextEntry().value; }
}
//返回Map.entry的set视图
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es = entrySet;
return (es != null) ? es : (entrySet = new EntrySet());
}
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
public Iterator<Map.Entry<K,V>> iterator() {
return new EntryIterator();
}
public boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
V v = ConcurrentHashMap.this.get(e.getKey());
return v != null && v.equals(e.getValue());
}
public boolean remove(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
final class EntryIterator
extends HashIterator
implements Iterator<Entry<K,V>>
{
public Map.Entry<K,V> next() {
HashEntry<K,V> e = super.nextEntry();
return new WriteThroughEntry(e.key, e.value);
}
}
final class WriteThroughEntry
extends AbstractMap.SimpleEntry<K,V>
{
WriteThroughEntry(K k, V v) {
super(k,v);
}
public V setValue(V value) {
if (value == null) throw new NullPointerException();
V v = super.setValue(value);
ConcurrentHashMap.this.put(getKey(), value);
return v;
}
}
//对Entry的实现
public static class SimpleEntry<K,V>
implements Entry<K,V>, java.io.Serializable
{
private static final long serialVersionUID = -8499721149061103585L;
private final K key;
private V value;
public SimpleEntry(K key, V value) {
this.key = key;
this.value = value;
}
public SimpleEntry(Entry<? extends K, ? extends V> entry) {
this.key = entry.getKey();
this.value = entry.getValue();
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public V setValue(V value) {
V oldValue = this.value;
this.value = value;
return oldValue;
}
public boolean equals(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry e = (Map.Entry)o;
return eq(key, e.getKey()) && eq(value, e.getValue());
}
public int hashCode() {
return (key == null ? 0 : key.hashCode()) ^
(value == null ? 0 : value.hashCode());
}
public String toString() {
return key + "=" + value;
}
}
/**
总结:ConcurrentHashMap相当于数组+数组+链表的实现。最外层是一个Segmetns数组然后,然后每个segment数组里面放着entry,
每个entry都是链表结构。而在get的时候不需要上锁,因为是volatile的。而put的时候也是先计算hash值得到segment,只锁住了一个segment。
所以更加高效。
*/
分享到:
相关推荐
### ConcurrentHashMap源码剖析 #### 一、概述与背景 ConcurrentHashMap是Java中提供的一种高效、线程安全的哈希表实现。与传统的基于synchronized关键字实现线程安全的HashTable相比,ConcurrentHashMap通过采用...
### Java源码剖析-ConcurrentHashMap #### 一、概述 `ConcurrentHashMap`是Java并发包(`java.util.concurrent`)中的一个重要组成部分,它提供了一个线程安全的哈希表实现。与传统的`Hashtable`相比,`...
与传统的`synchronized HashMap`不同,`ConcurrentHashMap`在设计上允许并发的读写操作,其中读操作尤其值得注意,因为它在执行时不需要加锁,这得益于Java内存模型和其内部实现机制。 首先,我们来看看`...
这些源码文件是人类可读的,其中包含了各种类、方法和逻辑,用于构建复杂的软件系统。Java源码遵循特定的语法规则,这些规则定义了如何组织代码、声明变量、控制流程、实现函数等。 1. **Java语法基础** - **...
Lambda表达式是JDK1.8的一个重要特性,它简化了对匿名内部类的使用,使得代码更加简洁和易读。通过使用 Lambda,我们可以将函数作为参数传递,或者将函数直接定义为方法体。这在处理集合操作时尤其有用,配合Stream ...
Spring框架中的源码揭示了其对并发控制的深入理解和巧妙应用,其中就包括了并发安全的HashMap——`ConcurrentHashMap`。今天我们将深入探讨这个类的设计原理,并尝试手写实现一个类似的并发安全的哈希映射结构。 `...
再者,CopyOnWriteArrayList是一种特殊的线程安全列表,它通过复制原列表来保证并发安全,适用于读多写少的场景,提供了高效的迭代性能。 同步工具类如Semaphore(信号量)、CyclicBarrier(回环栅栏)和...
- **ConcurrentHashMap**:线程安全的哈希表,提供了高并发下的高效操作。 - **CopyOnWriteArrayList和CopyOnWriteArraySet**:在迭代时不会抛出`ConcurrentModificationException`,适用于读多写少的场景。 - **...
在`ConcurrentHashMap`的源码分析中,可以看到它通过`volatile`关键字保证了`HashEntry`中`value`字段的可见性,确保了读操作的正确性,而其他字段如`key`、`hash`和`next`都是`final`的,防止在链表中进行中间插入...
HashMap是非线程安全的,适合于高并发环境下的读操作。 二、TreeMap TreeMap使用红黑树作为底层数据结构,提供了有序的键值对存储。红黑树是一种自平衡的二叉查找树,它保证了插入、删除和查找的时间复杂度都是O...
- **CopyOnWriteArrayList/CopyOnWriteArraySet**:适用于读多写少的情况,读操作无需加锁,写操作则复制整个集合。 7. **源码分析** - PDF文档和源码结合可以帮助读者深入理解多线程设计模式的实际应用,通过...
- **CopyOnWriteArrayList/CopyOnWriteArraySet**:写时复制策略,读操作无锁,写操作复制整个数组,适合读多写少的场景。 5. **线程池**: - **ExecutorService**:Java并发框架的一部分,提供线程池服务,可以...
Lambda表达式简化了多线程编程,使得代码更加简洁、易读。Stream API提供了并行流处理的能力,能够利用多核处理器的优势,高效地处理大量数据。默认方法在接口中引入,使得接口在保持兼容性的同时,能添加新的功能,...
- `CopyOnWriteArrayList`和`CopyOnWriteArraySet`: 在迭代时提供不变性,适合于读多写少的场景。 - `BlockingQueue`: 阻塞队列,提供线程安全的队列操作,常用于生产者消费者模式。 5. **原子变量** - `...
5. **原子操作类**:`java.util.concurrent.atomic`包提供了各种原子变量类,如`AtomicInteger`,`AtomicLong`等,它们支持原子性的读/修改/写操作,无需同步也能保证数据一致性。 6. **Future和CompletableFuture*...
标题中的“Hash Map for geeks_hashmap_Geeks_源码”显然指的是一个关于哈希映射(HashMap)的学习资源,特别针对极客和普通程序员。哈希映射是一种数据结构,它允许我们以O(1)的时间复杂度进行插入、删除和查找操作...
- 源码分析:通过阅读Java标准库中数据结构和算法的源码,可以深入理解其实现原理和优化技巧。 - 工具辅助:使用IDE(如Eclipse、IntelliJ IDEA)的调试功能,可以帮助分析和验证算法的执行过程。 6. **实践应用*...
集合源码分析 JAVA: 基本语法 static 修饰变量 方法 静态块(初始化块 构造函数 ) 静态内部类() 静态导包 final() transient() foreach循环原理() volatile底层实现() equals和hashcode(, ) string,stringbuffer和...