Skip to content

Commit

Permalink
feat: evaluate why specific candidates were filtered
Browse files Browse the repository at this point in the history
  • Loading branch information
Otto-AA committed Aug 7, 2024
1 parent 02c7159 commit 92b98ba
Show file tree
Hide file tree
Showing 6 changed files with 724 additions and 6 deletions.

Large diffs are not rendered by default.

53 changes: 53 additions & 0 deletions tests/integration/snapshots/snap_test_snapshots.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,56 @@
"state_diffs": {"balance": 2577, "code": 3, "nonce": 880, "storage": 2594},
"transactions": 1002,
}

snapshots["test_tod_attack_miner_evaluation evaluation results"] = [
{
"filter": "indirect_dependencies_recursive",
"tx_a": "0x1b99d2ee257a00b5196b39534366fde8fded0cd3bfe639f161d12a9ca011adaf",
"tx_b": "0x9213ae52ca8bf02107081b9340dd0d03ff533ac1040abeb873b686ed6427b303",
},
{
"filter": "recipient_eth_transfer",
"tx_a": "0x3904535cb0f60bc6a3bf7082d05fb363a4375eb0ca1c85dee827007192b00413",
"tx_b": "0x6016a7e14ef9b7ecc80985ace338e8894de892a58e941dd45dbb90c729079cb4",
},
{
"filter": "collision",
"tx_a": "0x40ca117ccc4933dd5b30e399f64673068c622802bdbd728f84b212cd197bf51a",
"tx_b": "0x31c5b4782a75a1f5f513cd86ede1a8c0af54baa9511982f43a57988036ed0fed",
},
{
"filter": "nonces",
"tx_a": "0x5e6e4bae8fa62e86b85b2c5f74f67ee97628112e3b12c20f348ace13a2548ca3",
"tx_b": "0xd884d83c00fa8ddc79306c290988c5b8c81c6fceeae1986add056cd1bf7d5f88",
},
{
"filter": "same_sender",
"tx_a": "0x6d936541a9e0befab9bf765c153fe1f0b9728dd6a6b6ba67cc757a56d115a2e8",
"tx_b": "0x112ac55e0122204165ab94bad060ffcee026e4db572f74b3325d56bd0950ade1",
},
{
"filter": None,
"tx_a": "0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"tx_b": "0x1ea1709059406a15686edef98de051fdfb5e854cc0991687b9573cba9005b021",
},
{
"filter": "same-value collision",
"tx_a": "0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"tx_b": "0x65df49728edca9888255b262f082c1a13beaf1dca58bcb8004920b1fbb53e86b",
},
{
"filter": "indirect_dependencies_recursive",
"tx_a": "0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"tx_b": "0x94029b952ede83cd26f48ab40fad24851a517696b2785b631cdacb02795934cf",
},
{
"filter": "block_producers",
"tx_a": "0x98054dd5b5973d3b953029fb286e416994736313f4f986ba3d11143f4b9fbc72",
"tx_b": "0xa68290ce02acc90bd3abd5ab1dd7b6fa26dd92d0b4e43fbb29737e8d6d54d926",
},
{
"filter": "indirect_dependencies_quick",
"tx_a": "0xf812335569705e032f8eca9fff94d3348e29d748bd9ed4e2acd76fe54dbec42d",
"tx_b": "0xd2b8e8cff425ae336c6084a9d7fc1c7d33d599fdf00c93eda40339e37ca6b12d",
},
]
62 changes: 61 additions & 1 deletion tests/integration/test_snapshots.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

import pytest
from tod_attack_miner.db.db import DB
from tod_attack_miner.db.filters import (
Expand Down Expand Up @@ -35,3 +34,64 @@ def test_tod_attack_miner_e2e(postgresql: Connection, snapshot: PyTestSnapshotTe
snapshot.assert_match(len(candidates), "num_candidates")
snapshot.assert_match(candidates[0], "first_candidate")
snapshot.assert_match(stats, "stats")


evaluation_candidates = [
(
"0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"0x65df49728edca9888255b262f082c1a13beaf1dca58bcb8004920b1fbb53e86b",
),
(
"0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"0x94029b952ede83cd26f48ab40fad24851a517696b2785b631cdacb02795934cf",
),
(
"0x775232180c49821d4208b3c5470d6367de5b96810c79ae0993aeb98ada762ee8",
"0x1ea1709059406a15686edef98de051fdfb5e854cc0991687b9573cba9005b021",
),
(
"0x40ca117ccc4933dd5b30e399f64673068c622802bdbd728f84b212cd197bf51a",
"0x31c5b4782a75a1f5f513cd86ede1a8c0af54baa9511982f43a57988036ed0fed",
),
(
"0x98054dd5b5973d3b953029fb286e416994736313f4f986ba3d11143f4b9fbc72",
"0xa68290ce02acc90bd3abd5ab1dd7b6fa26dd92d0b4e43fbb29737e8d6d54d926",
),
(
"0x5e6e4bae8fa62e86b85b2c5f74f67ee97628112e3b12c20f348ace13a2548ca3",
"0xd884d83c00fa8ddc79306c290988c5b8c81c6fceeae1986add056cd1bf7d5f88",
),
(
"0xf812335569705e032f8eca9fff94d3348e29d748bd9ed4e2acd76fe54dbec42d",
"0xd2b8e8cff425ae336c6084a9d7fc1c7d33d599fdf00c93eda40339e37ca6b12d",
),
(
"0x1b99d2ee257a00b5196b39534366fde8fded0cd3bfe639f161d12a9ca011adaf",
"0x9213ae52ca8bf02107081b9340dd0d03ff533ac1040abeb873b686ed6427b303",
),
(
"0x6d936541a9e0befab9bf765c153fe1f0b9728dd6a6b6ba67cc757a56d115a2e8",
"0x112ac55e0122204165ab94bad060ffcee026e4db572f74b3325d56bd0950ade1",
),
(
"0x3904535cb0f60bc6a3bf7082d05fb363a4375eb0ca1c85dee827007192b00413",
"0x6016a7e14ef9b7ecc80985ace338e8894de892a58e941dd45dbb90c729079cb4",
),
]


@pytest.mark.vcr
def test_tod_attack_miner_evaluation(
postgresql: Connection, snapshot: PyTestSnapshotTest
):
block_range = BlockRange(19895500, 19895504)

miner = Miner(RPC(test_provider_url), DB(postgresql))

miner.fetch(block_range.start, block_range.end)
miner.find_collisions()
results = miner.evaluate_candidates(
get_filters_except_duplicate_limits(3), evaluation_candidates
)

snapshot.assert_match(results, "evaluation results")
47 changes: 47 additions & 0 deletions tod_attack_miner/cli.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""CLI interface for tod_attack_miner project."""

from argparse import ArgumentParser, BooleanOptionalAction
import csv
from importlib.metadata import version
import json
from pathlib import Path

import psycopg

Expand Down Expand Up @@ -35,6 +37,17 @@ def main():
action="store_true",
help="Delete data from previous runs before starting to mine",
)
parser.add_argument(
"--evaluate-candidates-csv",
type=Path,
help="If passed, evaluate up to which filter candidates exist",
)
parser.add_argument(
"--evaluation-result-csv",
type=Path,
default=Path("evaluations.csv"),
help="Path, where evaluation results should be stored",
)
parser.add_argument("--postgres-user", type=str, default="postgres")
parser.add_argument("--postgres-password", type=str, default="password")
parser.add_argument("--postgres-host", type=str, default="localhost")
Expand All @@ -48,6 +61,8 @@ def main():
help="Skip data fetching and processing and only output stats",
)
args = parser.parse_args()
evaluate_candidates_csv: Path | None = args.evaluate_candidates_csv
evaluation_results_csv: Path = args.evaluation_result_csv

with psycopg.connect(
f"user={args.postgres_user} password={args.postgres_password} host={args.postgres_host} port={args.postgres_port}"
Expand All @@ -56,6 +71,38 @@ def main():

if args.stats_only:
print(json.dumps(miner.get_stats(args.quick_stats)))
elif evaluate_candidates_csv:
if args.reset_db:
miner.reset_db()
with open(evaluate_candidates_csv, newline="") as csv_file, open(
evaluation_results_csv, "w"
) as results_csv_file:
csv_reader = csv.DictReader(csv_file)
candidates = [(c["tx_a"], c["tx_b"]) for c in csv_reader]
if args.reset_db:
miner.reset_db()
miner.fetch(int(args.from_block), int(args.to_block))
miner.find_collisions()
results = miner.evaluate_candidates(
get_filters_except_duplicate_limits(25)
+ get_filters_duplicate_limits(10),
candidates,
)

csv_writer = csv.DictWriter(
results_csv_file, ["tx_a", "tx_b", "filtered_by"]
)
csv_writer.writeheader()
rows = [
{
"tx_a": c["tx_a"],
"tx_b": c["tx_b"],
"filtered_by": c["filter"] or "",
}
for c in results
]
csv_writer.writerows(rows)
print(f"Saved results to {evaluation_results_csv}")
else:
if args.reset_db:
miner.reset_db()
Expand Down
76 changes: 76 additions & 0 deletions tod_attack_miner/db/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"state_diffs": "(block_number INTEGER, tx_index INTEGER, tx_hash TEXT, type TEXT, key TEXT, pre_value TEXT, post_value TEXT)",
"collisions": "(tx_write_hash TEXT, tx_access_hash TEXT, type TEXT, key TEXT, block_dist INTEGER, PRIMARY KEY(tx_write_hash, tx_access_hash, type, key))",
"candidates": "(tx_write_hash TEXT, tx_access_hash TEXT, PRIMARY KEY(tx_write_hash, tx_access_hash))",
"evaluation_candidates": "(tx_write_hash TEXT, tx_access_hash TEXT, filtered_by TEXT, PRIMARY KEY(tx_write_hash, tx_access_hash))",
"codes": "(addr TEXT, code TEXT, hash TEXT, PRIMARY KEY(addr))",
"skeletons": "(addr TEXT, family TEXT, hash TEXT, PRIMARY KEY(addr))",
}
Expand All @@ -37,6 +38,12 @@ class Candidate(TypedDict):
types: Sequence[ACCESS_TYPE]


class EvaluationCandidate(TypedDict):
tx_a: str
tx_b: str
filter: str | None


class DB:
def __init__(self, conn: psycopg.Connection) -> None:
self._con: psycopg.Connection = conn
Expand Down Expand Up @@ -228,6 +235,14 @@ def insert_state_diff(
)
self._con.commit()

def insert_evaluation_candidates(self, candidates: Iterable[tuple[str, str]]):
values = [(tx_a, tx_b, "") for tx_a, tx_b in candidates]
with self._con.cursor() as cursor:
cursor.executemany(
"INSERT INTO evaluation_candidates VALUES (%s, %s, %s)", values
)
self._con.commit()

def insert_skeletons(self):
codes = self.get_codes()
mapped_codes = [(addr, code_skeleton_hash(c), hash) for addr, c, hash in codes]
Expand Down Expand Up @@ -341,6 +356,54 @@ def count_candidates_original(self):
"""
return cursor.execute(sql).fetchall()[0][0]

def evaluate_candidates_for_collisions(self):
with self._con.cursor() as cursor:
sql = """
UPDATE evaluation_candidates
SET filtered_by = 'no collision'
FROM (
SELECT c.tx_write_hash, c.tx_access_hash
FROM evaluation_candidates c
WHERE NOT EXISTS (
SELECT 1
FROM accesses
INNER JOIN state_diffs
ON accesses.type = state_diffs.type
AND accesses.key = state_diffs.key
AND accesses.tx_hash = tx_access_hash
AND state_diffs.tx_hash = tx_write_hash
) AND NOT EXISTS (
SELECT 1
FROM state_diffs s1
INNER JOIN state_diffs s2
ON s1.type = s2.type
AND s1.key = s2.key
AND s1.tx_hash = tx_access_hash
AND s2.tx_hash = tx_write_hash
)
) x
WHERE evaluation_candidates.tx_write_hash = x.tx_write_hash
AND evaluation_candidates.tx_access_hash = x.tx_access_hash
"""
cursor.execute(sql)
self._con.commit()

def update_filtered_evaluation_candidates(self, filter_name: str):
with self._con.cursor() as cursor:
sql = f"""
UPDATE evaluation_candidates
SET filtered_by = '{filter_name}'
WHERE filtered_by = ''
AND NOT EXISTS (
SELECT 1
FROM candidates
WHERE candidates.tx_write_hash = evaluation_candidates.tx_write_hash
AND candidates.tx_access_hash = evaluation_candidates.tx_access_hash
)
"""
cursor.execute(sql) # type: ignore
self._con.commit()

def count_transactions(self) -> int:
with self._con.cursor() as cursor:
return cursor.execute("SELECT COUNT(*) FROM transactions").fetchall()[0][0]
Expand Down Expand Up @@ -426,6 +489,19 @@ def get_candidates(self) -> Sequence[Candidate]:
for tx_a, tx_b, block_dist, types in candidates
]

def get_evaluation_candidates(self) -> Sequence[EvaluationCandidate]:
with self._con.cursor() as cursor:
sql = "SELECT tx_write_hash, tx_access_hash, filtered_by FROM evaluation_candidates ORDER BY tx_write_hash, tx_access_hash"
candidates: Iterable[tuple[str, str, str]] = cursor.execute(sql)
return [
{
"tx_a": tx_a,
"tx_b": tx_b,
"filter": filter or None,
}
for tx_a, tx_b, filter in candidates
]

def get_accesses_stats(self):
return dict(
self._con.cursor()
Expand Down
25 changes: 20 additions & 5 deletions tod_attack_miner/miner/miner.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import Callable, Sequence
from tod_attack_miner.db.db import DB, Candidate
from typing import Callable, Iterable, Sequence
from tod_attack_miner.db.db import DB, Candidate, EvaluationCandidate
from tod_attack_miner.fetcher.fetcher import BlockRange, fetch_block_range
from tod_attack_miner.rpc.rpc import RPC

Filters = Sequence[tuple[str, Callable[[DB], int]]]


class Miner:
def __init__(self, rpc: RPC, db: DB) -> None:
Expand All @@ -25,13 +27,26 @@ def find_collisions(self) -> None:
self.db.insert_candidates()
self._original_collisions = self.db.get_collisions_stats()

def filter_candidates(
self, filters: Sequence[tuple[str, Callable[[DB], int]]]
) -> None:
def filter_candidates(self, filters: Filters) -> None:
self._filter_stats["candidates"]["before_filters"] = self.db.count_candidates()
for name, filter in filters:
self._filter_stats["filtered"][name] = filter(self.db)
self._filter_stats["candidates"]["final"] = self.db.count_candidates()

def evaluate_candidates(
self, filters: Filters, candidates: Iterable[tuple[str, str]]
) -> Iterable[EvaluationCandidate]:
self.db.insert_evaluation_candidates(candidates)
self.db.evaluate_candidates_for_collisions()
self.db.update_filtered_evaluation_candidates("same-value collision")

self._filter_stats["candidates"]["before_filters"] = self.db.count_candidates()
for name, filter in filters:
self._filter_stats["filtered"][name] = filter(self.db)
self.db.update_filtered_evaluation_candidates(name)

self._filter_stats["candidates"]["final"] = self.db.count_candidates()
return self.db.get_evaluation_candidates()

def count_candidates(self) -> int:
return self.db.count_candidates()
Expand Down

0 comments on commit 92b98ba

Please sign in to comment.