We can tweak the well-known first-fit decreasing algorithm to do this.
import random
K = 1000
B = 1
KB = K * B
MB = K * KB
class File:
def __init__(self):
self.size = random.randrange(3 * KB, 4 * MB)
class Chunk:
def __init__(self, max_chunk_size):
self.free = max_chunk_size
self.files = []
def add(self, file):
if self.free < file.size:
return False
self.free -= file.size
self.files.append(file)
return True
def enlarge(self, delta_free):
assert delta_free >= 0
self.free += delta_free
def size(self):
return sum(file.size for file in self.files)
def first_fit_decreasing(files, min_chunk_size, max_chunk_size):
chunks = []
for file in sorted(files, key=lambda file: file.size, reverse=True):
for existing_chunk in chunks:
if existing_chunk.add(file):
break
else:
if len(chunks) >= 2:
chunks[-2].enlarge(min_chunk_size)
new_chunk = Chunk(max_chunk_size - min_chunk_size)
new_chunk.add(file)
chunks.append(new_chunk)
if chunks[-1].size() < min_chunk_size:
chunks[-2].enlarge(min_chunk_size)
for file in chunks[-1].files:
chunks[-2].add(file)
del chunks[-1]
return chunks
def test(n):
files = [File() for i in range(n)]
min_chunk_size = 15 * MB
max_chunk_size = 150 * MB
chunks = first_fit_decreasing(files, min_chunk_size, max_chunk_size)
assert sorted(id(file) for file in files) == sorted(
id(file) for chunk in chunks for file in chunk.files
)
for chunk in chunks:
assert min_chunk_size <= chunk.size() <= max_chunk_size
print(len(chunks), "chunks")
print(sum(chunk.free for chunk in chunks) / MB, "MB free")
if __name__ == "__main__":
for t in range(1000):
test(150)
test(10000)
与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…