I am writing a Python program to find and remove duplicate files from a folder.
I have multiple copies of mp3 files, and some other files. I am using the sh1 algorit
In case many files of 'big size' should be analyzed (images, mp3, pdf documents), it would be interesting/faster to have the following comparison algorithm:
a first fast hash is performed on the first N bytes of the file (say 1KB). This hash would say if files are different without doubt, but will not say if two files are exactly the same (accuracy of the hash, limited data read from disk)
a second, slower, hash, which is more accurate and performed on the whole content of the file, if a collision occurs in the first stage
Here is an implementation of this algorithm:
import hashlib
def Checksum(current_file_name, check_type = 'sha512', first_block = False):
"""Computes the hash for the given file. If first_block is True,
only the first block of size size_block is hashed."""
size_block = 1024 * 1024 # The first N bytes (1KB)
d = {'sha1' : hashlib.sha1, 'md5': hashlib.md5, 'sha512': hashlib.sha512}
if(not d.has_key(check_type)):
raise Exception("Unknown checksum method")
file_size = os.stat(current_file_name)[stat.ST_SIZE]
with file(current_file_name, 'rb') as f:
key = d[check_type].__call__()
while True:
s = f.read(size_block)
key.update(s)
file_size -= size_block
if(len(s) < size_block or first_block):
break
return key.hexdigest().upper()
def find_duplicates(files):
"""Find duplicates among a set of files.
The implementation uses two types of hashes:
- A small and fast one one the first block of the file (first 1KB),
- and in case of collision a complete hash on the file. The complete hash
is not computed twice.
It flushes the files that seems to have the same content
(according to the hash method) at the end.
"""
print 'Analyzing', len(files), 'files'
# this dictionary will receive small hashes
d = {}
# this dictionary will receive full hashes. It is filled
# only in case of collision on the small hash (contains at least two
# elements)
duplicates = {}
for f in files:
# small hash to be fast
check = Checksum(f, first_block = True, check_type = 'sha1')
if(not d.has_key(check)):
# d[check] is a list of files that have the same small hash
d[check] = [(f, None)]
else:
l = d[check]
l.append((f, None))
for index, (ff, checkfull) in enumerate(l):
if(checkfull is None):
# computes the full hash in case of collision
checkfull = Checksum(ff, first_block = False)
l[index] = (ff, checkfull)
# for each new full hash computed, check if their is
# a collision in the duplicate dictionary.
if(not duplicates.has_key(checkfull)):
duplicates[checkfull] = [ff]
else:
duplicates[checkfull].append(ff)
# prints the detected duplicates
if(len(duplicates) != 0):
print
print "The following files have the same sha512 hash"
for h, lf in duplicates.items():
if(len(lf)==1):
continue
print 'Hash value', h
for f in lf:
print '\t', f.encode('unicode_escape') if \
type(f) is types.UnicodeType else f
return duplicates
The find_duplicates function takes a list of files. This way, it is also possible to compare two directories (for instance, to better synchronize their content.) An example of function creating a list of files, with specified extension, and avoiding entering in some directories, is below:
def getFiles(_path, extensions = ['.png'],
subdirs = False, avoid_directories = None):
"""Returns the list of files in the path :'_path',
of extension in 'extensions'. 'subdir' indicates if
the search should also be performed in the subdirectories.
If extensions = [] or None, all files are returned.
avoid_directories: if set, do not parse subdirectories that
match any element of avoid_directories."""
l = []
extensions = [p.lower() for p in extensions] if not extensions is None \
else None
for root, dirs, files in os.walk(_path, topdown=True):
for name in files:
if(extensions is None or len(extensions) == 0 or \
os.path.splitext(name)[1].lower() in extensions):
l.append(os.path.join(root, name))
if(not subdirs):
while(len(dirs) > 0):
dirs.pop()
elif(not avoid_directories is None):
for d in avoid_directories:
if(d in dirs): dirs.remove(d)
return l
This method is convenient for not parsing .svn paths for instance, which surely will trigger colliding files in find_duplicates.
Feedbacks are welcome.