HashSet源码分析

Scroll Down

HashSet的继承体系

image

主要属性

// 内部使用的HashMap
private transient HashMap<E,Object> map;

// 虚拟对象用来作为value存储到map中
private static final Object PRESENT = new Object();

构造方法

public HashSet() {
    map = new HashMap<>();
}
public HashSet(Collection<? extends E> c) {
    map = new HashMap<>(Math.max((int) (c.size()/.75f) + 1, 16));
    addAll(c);
}
public HashSet(int initialCapacity, float loadFactor) {
    map = new HashMap<>(initialCapacity, loadFactor);
}
public HashSet(int initialCapacity) {
    map = new HashMap<>(initialCapacity);
}
// 非public的构造方法,主要是给LinkedHashSet使用的
HashSet(int initialCapacity, float loadFactor, boolean dummy) {
    map = new LinkedHashMap<>(initialCapacity, loadFactor);
}

添加元素

public boolean add(E e) {
    return map.put(e, PRESENT)==null;
}

删除元素

public boolean remove(Object o) {
    return map.remove(o)==PRESENT;
}

查询元素

public boolean contains(Object o) {
    return map.containsKey(o);
}

迭代元素

public Iterator<E> iterator() {
    return map.keySet().iterator();
}

全部源码

package java.util;

import java.io.InvalidObjectException;
import sun.misc.SharedSecrets;

public class HashSet<E>
    extends AbstractSet<E>
    implements Set<E>, Cloneable, java.io.Serializable
{
    static final long serialVersionUID = -5024744406713321676L;

    // 内部元素存储到hashmap中
    private transient HashMap<E,Object> map;

    // 虚拟元素,用来存到map元素的value中的,没有实际意义
    private static final Object PRESENT = new Object();

    /**
     * 空构造方法
     */
    public HashSet() {
        map = new HashMap<>();
    }

    /**
     * 把另一个集合的元素全都添加到当前Set中
     *
     * 初始化map的时候是计算了它的初始容量的
     */
    public HashSet(Collection<? extends E> c) {
        map = new HashMap<>(Math.max((int) (c.size()/.75f) + 1, 16));
        addAll(c);
    }

    /**
     * 指定初始容量和装载因子
     */
    public HashSet(int initialCapacity, float loadFactor) {
        map = new HashMap<>(initialCapacity, loadFactor);
    }

    /**
     * 只指定初始容量
     */
    public HashSet(int initialCapacity) {
        map = new HashMap<>(initialCapacity);
    }

    /**
     * LinkedHashSet专用的方法
     *
     * dummy是没有实际意义的, 只是为了跟上上面那个操持方法签名不同而已
     */
    HashSet(int initialCapacity, float loadFactor, boolean dummy) {
        map = new LinkedHashMap<>(initialCapacity, loadFactor);
    }

    /**
     * 迭代器
     */
    public Iterator<E> iterator() {
        return map.keySet().iterator();
    }

    /**
     * 元素个数
     */
    public int size() {
        return map.size();
    }

    /**
     * 检查是否为空
     */
    public boolean isEmpty() {
        return map.isEmpty();
    }

    /**
     * 检查是否包含某个元素
     */
    public boolean contains(Object o) {
        return map.containsKey(o);
    }

    /**
     * 添加元素
     */
    public boolean add(E e) {
        return map.put(e, PRESENT)==null;
    }

    /**
     * 移除元素
     */
    public boolean remove(Object o) {
        return map.remove(o)==PRESENT;
    }

    /**
     * 清空
     */
    public void clear() {
        map.clear();
    }

    /**
     * 克隆
     */
    @SuppressWarnings("unchecked")
    public Object clone() {
        try {
            HashSet<E> newSet = (HashSet<E>) super.clone();
            newSet.map = (HashMap<E, Object>) map.clone();
            return newSet;
        } catch (CloneNotSupportedException e) {
            throw new InternalError(e);
        }
    }

    /**
     * 序列化
     */
    private void writeObject(java.io.ObjectOutputStream s)
        throws java.io.IOException {
        // Write out any hidden serialization magic
        s.defaultWriteObject();

        // Write out HashMap capacity and load factor
        s.writeInt(map.capacity());
        s.writeFloat(map.loadFactor());

        // Write out size
        s.writeInt(map.size());

        // Write out all elements in the proper order.
        for (E e : map.keySet())
            s.writeObject(e);
    }

    /**
     * Reconstitute the <tt>HashSet</tt> instance from a stream (that is,
     * deserialize it).
     */
    private void readObject(java.io.ObjectInputStream s)
        throws java.io.IOException, ClassNotFoundException {
        // Read in any hidden serialization magic
        s.defaultReadObject();

        // Read capacity and verify non-negative.
        int capacity = s.readInt();
        if (capacity < 0) {
            throw new InvalidObjectException("Illegal capacity: " +
                                             capacity);
        }

        // Read load factor and verify positive and non NaN.
        float loadFactor = s.readFloat();
        if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
            throw new InvalidObjectException("Illegal load factor: " +
                                             loadFactor);
        }

        // Read size and verify non-negative.
        int size = s.readInt();
        if (size < 0) {
            throw new InvalidObjectException("Illegal size: " +
                                             size);
        }
        // Set the capacity according to the size and load factor ensuring that
        // the HashMap is at least 25% full but clamping to maximum capacity.
        capacity = (int) Math.min(size * Math.min(1 / loadFactor, 4.0f),
                HashMap.MAXIMUM_CAPACITY);

        // Constructing the backing map will lazily create an array when the first element is
        // added, so check it before construction. Call HashMap.tableSizeFor to compute the
        // actual allocation size. Check Map.Entry[].class since it's the nearest public type to
        // what is actually created.

        SharedSecrets.getJavaOISAccess()
                     .checkArray(s, Map.Entry[].class, HashMap.tableSizeFor(capacity));

        // Create backing HashMap
        map = (((HashSet<?>)this) instanceof LinkedHashSet ?
               new LinkedHashMap<E,Object>(capacity, loadFactor) :
               new HashMap<E,Object>(capacity, loadFactor));

        // Read in all elements in the proper order.
        for (int i=0; i<size; i++) {
            @SuppressWarnings("unchecked")
                E e = (E) s.readObject();
            map.put(e, PRESENT);
        }
    }

    // 可分割的迭代器, 主要用于多线程并行迭代处理时使用
    public Spliterator<E> spliterator() {
        return new HashMap.KeySpliterator<E,Object>(map, 0, -1, 0, 0);
    }
}

总结

  • HashSet内部使用HashMap的key存储元素,以此来保证元素不重复;
  • HashSet是无序的,因为HashMap的key是无序的;
  • HashSet中允许有一个null元素,因为HashMap允许key为null;
  • HashSet是非线程安全的;
  • HashSet是没有get()方法的;