Why Testing Matters
Testing is not optional for professional software development. Every bug that reaches production costs exponentially more to fix than one caught during development. Tests give you confidence that your code works correctly, freedom to refactor without fear, and documentation of how your code is supposed to behave.
Python has excellent testing tools, with pytest being the gold standard. This guide covers everything from writing your first test to building a comprehensive test suite that runs in CI/CD pipelines.
Getting Started with pytest
pytest is the most popular testing framework in Python. It is simple to use, powerful, and extensible.
# Install pytest
# pip install pytest
# calculator.py - The code to test
def add(a, b):
return a + b
def subtract(a, b):
return a - b
def multiply(a, b):
return a * b
def divide(a, b):
if b == 0:
raise ValueError("Cannot divide by zero")
return a / b
# test_calculator.py - The tests
import pytest
from calculator import add, subtract, multiply, divide
def test_add():
assert add(2, 3) == 5
assert add(-1, 1) == 0
assert add(0, 0) == 0
def test_subtract():
assert subtract(5, 3) == 2
assert subtract(0, 5) == -5
def test_multiply():
assert multiply(3, 4) == 12
assert multiply(-2, 3) == -6
assert multiply(0, 100) == 0
def test_divide():
assert divide(10, 2) == 5.0
assert divide(7, 2) == 3.5
def test_divide_by_zero():
with pytest.raises(ValueError, match="Cannot divide by zero"):
divide(10, 0)
Running Tests
# Run all tests
pytest
# Run with verbose output
pytest -v
# Run a specific test file
pytest test_calculator.py
# Run a specific test function
pytest test_calculator.py::test_add
# Run tests matching a pattern
pytest -k "add or subtract"
# Show print output
pytest -s
# Stop on first failure
pytest -x
# Run last failed tests
pytest --lf
Test Organization
Project Structure
# Recommended project structure
# myproject/
# src/
# myproject/
# __init__.py
# models.py
# services.py
# utils.py
# tests/
# __init__.py
# conftest.py # Shared fixtures
# test_models.py
# test_services.py
# test_utils.py
# integration/
# __init__.py
# test_api.py
# test_database.py
Fixtures: Setup and Teardown
Fixtures provide reusable test setup and teardown logic. They are one of pytest's most powerful features.
import pytest
# conftest.py - Shared fixtures available to all tests
@pytest.fixture
def sample_user():
"""Create a sample user for testing."""
return {
"id": 1,
"name": "Alice",
"email": "alice@example.com",
"role": "admin"
}
@pytest.fixture
def sample_users():
"""Create a list of sample users."""
return [
{"id": 1, "name": "Alice", "email": "alice@example.com"},
{"id": 2, "name": "Bob", "email": "bob@example.com"},
{"id": 3, "name": "Charlie", "email": "charlie@example.com"},
]
@pytest.fixture
def temp_database(tmp_path):
"""Create a temporary database for testing."""
db_path = tmp_path / "test.db"
# Setup: create the database
import sqlite3
conn = sqlite3.connect(str(db_path))
conn.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)")
conn.commit()
yield conn # This is where the test runs
# Teardown: close the connection
conn.close()
# Using fixtures in tests
def test_user_has_email(sample_user):
assert "@" in sample_user["email"]
def test_users_count(sample_users):
assert len(sample_users) == 3
def test_database_insert(temp_database):
temp_database.execute("INSERT INTO users (name) VALUES (?)", ("Alice",))
temp_database.commit()
cursor = temp_database.execute("SELECT COUNT(*) FROM users")
assert cursor.fetchone()[0] == 1
Parametrized Tests
Run the same test with different inputs to reduce code duplication.
import pytest
def is_palindrome(s):
s = s.lower().replace(" ", "")
return s == s[::-1]
@pytest.mark.parametrize("input_str, expected", [
("racecar", True),
("hello", False),
("A man a plan a canal Panama", True),
("", True),
("a", True),
("ab", False),
("Madam", True),
])
def test_is_palindrome(input_str, expected):
assert is_palindrome(input_str) == expected
# Parametrize with IDs for clearer output
@pytest.mark.parametrize("a, b, expected", [
pytest.param(2, 3, 5, id="positive"),
pytest.param(-1, -1, -2, id="negative"),
pytest.param(0, 0, 0, id="zeros"),
pytest.param(100, -50, 50, id="mixed"),
], ids=str)
def test_add_parametrized(a, b, expected):
assert add(a, b) == expected
Mocking and Patching
Mocking lets you replace external dependencies with controlled fake objects. This is essential for testing code that calls APIs, databases, or file systems.
from unittest.mock import patch, MagicMock
import pytest
# The code to test
class WeatherService:
def __init__(self, api_key):
self.api_key = api_key
def get_temperature(self, city):
import requests
response = requests.get(
f"https://api.weather.com/temp?city={city}&key={self.api_key}"
)
data = response.json()
return data["temperature"]
def is_hot(self, city):
temp = self.get_temperature(city)
return temp > 30
# Tests with mocking
class TestWeatherService:
@patch("requests.get")
def test_get_temperature(self, mock_get):
# Setup the mock
mock_response = MagicMock()
mock_response.json.return_value = {"temperature": 25}
mock_get.return_value = mock_response
# Test
service = WeatherService("fake-key")
temp = service.get_temperature("London")
assert temp == 25
mock_get.assert_called_once()
@patch.object(WeatherService, "get_temperature")
def test_is_hot_true(self, mock_temp):
mock_temp.return_value = 35
service = WeatherService("fake-key")
assert service.is_hot("Dubai") is True
@patch.object(WeatherService, "get_temperature")
def test_is_hot_false(self, mock_temp):
mock_temp.return_value = 15
service = WeatherService("fake-key")
assert service.is_hot("London") is False
Testing Classes and OOP
# shopping_cart.py
class ShoppingCart:
def __init__(self):
self.items = []
def add_item(self, name, price, quantity=1):
self.items.append({
"name": name,
"price": price,
"quantity": quantity
})
def remove_item(self, name):
self.items = [item for item in self.items if item["name"] != name]
@property
def total(self):
return sum(item["price"] * item["quantity"] for item in self.items)
@property
def item_count(self):
return sum(item["quantity"] for item in self.items)
def apply_discount(self, percentage):
if not 0 <= percentage <= 100:
raise ValueError("Discount must be between 0 and 100")
factor = 1 - (percentage / 100)
for item in self.items:
item["price"] = round(item["price"] * factor, 2)
# test_shopping_cart.py
class TestShoppingCart:
@pytest.fixture
def cart(self):
return ShoppingCart()
@pytest.fixture
def cart_with_items(self, cart):
cart.add_item("Apple", 1.50, 3)
cart.add_item("Banana", 0.75, 5)
cart.add_item("Cherry", 3.00, 1)
return cart
def test_empty_cart(self, cart):
assert cart.total == 0
assert cart.item_count == 0
def test_add_item(self, cart):
cart.add_item("Apple", 1.50, 3)
assert cart.item_count == 3
assert cart.total == 4.50
def test_remove_item(self, cart_with_items):
cart_with_items.remove_item("Banana")
assert cart_with_items.item_count == 4
def test_total_calculation(self, cart_with_items):
expected = (1.50 * 3) + (0.75 * 5) + (3.00 * 1)
assert cart_with_items.total == expected
def test_apply_discount(self, cart_with_items):
original_total = cart_with_items.total
cart_with_items.apply_discount(10)
assert cart_with_items.total == pytest.approx(original_total * 0.9, rel=1e-2)
def test_invalid_discount(self, cart_with_items):
with pytest.raises(ValueError):
cart_with_items.apply_discount(150)
Test-Driven Development (TDD)
TDD is a development methodology where you write tests before writing the actual code. The cycle is:
- Red - Write a failing test that describes the desired behavior
- Green - Write the minimum code needed to make the test pass
- Refactor - Clean up the code while keeping all tests passing
TDD forces you to think about the interface and behavior of your code before implementation. This leads to better-designed, more testable code.
Test Coverage
# Install coverage
# pip install pytest-cov
# Run tests with coverage report
# pytest --cov=myproject --cov-report=html
# Example output:
# Name Stmts Miss Cover
# -------------------------------------------
# myproject/__init__.py 0 0 100%
# myproject/models.py 45 3 93%
# myproject/services.py 78 12 85%
# myproject/utils.py 23 0 100%
# -------------------------------------------
# TOTAL 146 15 90%
Aim for 80% or higher coverage, but remember that coverage measures which lines were executed, not whether they were tested correctly. A test that runs code without meaningful assertions gives false confidence.
Best Practices Summary
- Test one thing per test - Each test should verify a single behavior. This makes failures easy to diagnose.
- Use descriptive names - test_user_cannot_login_with_wrong_password is much better than test_login_2.
- Follow AAA pattern - Arrange (setup), Act (execute), Assert (verify).
- Keep tests independent - Tests should not depend on each other or on execution order.
- Test edge cases - Empty inputs, None values, boundary conditions, error cases.
- Run tests in CI/CD - Automate test execution on every commit.
- Mock external dependencies - Do not call real APIs or databases in unit tests.
- Write tests you trust - If you do not trust your tests, they are worthless.
Testing is a skill that improves with practice. Start by testing the critical paths in your application, then gradually increase coverage. A well-tested codebase is a joy to work with, and your future self will thank you for every test you write today.