Master Python's powerful standard library modules: itertools, functools, and collections.
Unlock the full power of Python's built-in modules for efficient programming.
itertools for efficient iteration and combinatorics with lazy evaluation.from itertools import count, cycle, repeat
# count(start, step) - infinite counter
for i in count(10, 2):
if i > 20: break
print(i) # 10, 12, 14, 16, 18, 20
# cycle(iterable) - infinite cycling
colors = cycle(['red', 'green', 'blue'])
for _ in range(5):
print(next(colors))
# repeat(elem, n) - repeat element
list(repeat('A', 3)) # ['A', 'A', 'A']from itertools import permutations, combinations, product
# All arrangements
list(permutations('ABC', 2))
# [('A','B'), ('A','C'), ('B','A'), ('B','C'), ('C','A'), ('C','B')]
# All combinations (no repetition)
list(combinations('ABC', 2))
# [('A','B'), ('A','C'), ('B','C')]
# Cartesian product
list(product('AB', '12'))
# [('A','1'), ('A','2'), ('B','1'), ('B','2')]from itertools import chain, islice, groupby, takewhile
# Chain multiple iterables
list(chain([1,2], [3,4])) # [1, 2, 3, 4]
# Slice an iterator
list(islice(count(), 5)) # [0, 1, 2, 3, 4]
# Group consecutive elements
data = [('a', 1), ('a', 2), ('b', 3)]
for key, group in groupby(data, lambda x: x[0]):
print(key, list(group))from itertools import accumulate, starmap
import operator
# Running totals
list(accumulate([1, 2, 3, 4])) # [1, 3, 6, 10]
# With custom function
list(accumulate([1,2,3,4], operator.mul)) # [1,2,6,24]
# Apply function to unpacked args
list(starmap(pow, [(2,3), (3,2)])) # [8, 9]list() to materialize iteratorscombinations_with_replacement allows repeatsPermutations: [('A', 'B'), ('A', 'C'), ('B', 'A'), ...]
Combinations: [('A', 'B'), ('A', 'C'), ('B', 'C')]
Accumulated: [1, 3, 6, 10, 15]
from itertools import permutations, combinationspermutations('ABC', 2) - order matterscombinations('ABC', 2) - order doesn't matterchain(list1, list2) - combine iterablesaccumulate([1,2,3]) - running totalsReview feedback below
functools for higher-order functions, caching, and function manipulation.from functools import lru_cache
@lru_cache(maxsize=128)
def fibonacci(n):
if n < 2:
return n
return fibonacci(n-1) + fibonacci(n-2)
print(fibonacci(100)) # Instant with caching!
print(fibonacci.cache_info()) # Cache statisticsfrom functools import reduce
# Sum of list
total = reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])
print(total) # 15
# Find maximum
maximum = reduce(lambda x, y: x if x > y else y, [3, 1, 4, 1, 5])
print(maximum) # 5
# With initial value
product = reduce(lambda x, y: x * y, [1, 2, 3, 4], 10)
print(product) # 240 (10 * 1 * 2 * 3 * 4)from functools import partial
def power(base, exponent):
return base ** exponent
# Create specialized functions
square = partial(power, exponent=2)
cube = partial(power, exponent=3)
print(square(5)) # 25
print(cube(3)) # 27
# Partial with multiple args
def greet(greeting, name, punctuation):
return f"{greeting}, {name}{punctuation}"
say_hello = partial(greet, "Hello", punctuation="!")
print(say_hello("Alice")) # Hello, Alice!from functools import wraps
def my_decorator(func):
@wraps(func) # Preserves __name__, __doc__
def wrapper(*args, **kwargs):
print("Before call")
result = func(*args, **kwargs)
print("After call")
return result
return wrapper
@my_decorator
def say_hello():
"""Says hello"""
print("Hello!")
print(say_hello.__name__) # 'say_hello' (not 'wrapper')
print(say_hello.__doc__) # 'Says hello'@lru_cache dramatically speeds up recursive functionsreduce is great for folding/aggregating sequencespartial creates specialized versions of functions@wraps in decoratorsFibonacci(30): 832040
Sum: 15
Square of 5: 25
from functools import lru_cache, reduce, partial@lru_cache(maxsize=128) above functionreduce(lambda x, y: x + y, [1,2,3])square = partial(power, exponent=2)@wraps(func) inside decoratorReview feedback below
collections module for efficient data handling.from collections import Counter
# Count occurrences
words = ['apple', 'banana', 'apple', 'cherry', 'banana', 'apple']
counter = Counter(words)
print(counter) # Counter({'apple': 3, 'banana': 2, 'cherry': 1})
print(counter.most_common(2)) # [('apple', 3), ('banana', 2)]
counter.update(['apple', 'date']) # Add more
print(counter['apple']) # 4from collections import defaultdict
# List as default
groups = defaultdict(list)
for item in [('a', 1), ('b', 2), ('a', 3)]:
groups[item[0]].append(item[1])
print(dict(groups)) # {'a': [1, 3], 'b': [2]}
# Int as default (counting)
counts = defaultdict(int)
for char in 'mississippi':
counts[char] += 1
print(dict(counts)) # {'m': 1, 'i': 4, 's': 4, 'p': 2}from collections import namedtuple
# Define a named tuple
Point = namedtuple('Point', ['x', 'y'])
p = Point(3, 4)
print(p.x, p.y) # 3 4
print(p[0], p[1]) # 3 4 (also indexable)
# With defaults (Python 3.7+)
Person = namedtuple('Person', ['name', 'age', 'city'], defaults=['Unknown'])
alice = Person('Alice', 30)
print(alice) # Person(name='Alice', age=30, city='Unknown')from collections import deque
# Double-ended queue
d = deque([1, 2, 3])
d.appendleft(0) # O(1) prepend
d.append(4) # O(1) append
print(d) # deque([0, 1, 2, 3, 4])
d.popleft() # Remove from left
d.rotate(1) # Rotate right
print(d) # deque([4, 1, 2, 3])
# Fixed-size deque (sliding window)
recent = deque(maxlen=3)
for i in range(5):
recent.append(i)
print(recent) # deque([2, 3, 4])Counter is great for frequency analysisdefaultdict eliminates key existence checksnamedtuple is like a lightweight classdeque is faster than list for left operationsCounter: Counter({'apple': 3, 'banana': 2, 'cherry': 1})
Groups: {'a': [1, 3], 'b': [2]}
Point: Point(x=3, y=4)
from collections import Counter, defaultdict, namedtuple, dequeCounter(['a', 'b', 'a']) → {'a': 2, 'b': 1}defaultdict(list) or defaultdict(int)Point = namedtuple('Point', ['x', 'y'])deque([1,2,3], maxlen=5) for fixed sizeReview feedback below