diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 00000000..9f42b3ac --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,18 @@ +FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt update -y && \ + apt install -y python3 python3-pip git htop vim + +# Make sure you first recursively clone down the git repo before building +WORKDIR /app +RUN pip install quimb pyrofiler cartesian-explorer opt_einsum +RUN pip install --no-binary pynauty pynauty +# Run the below commands after the container opens - because volume hasn't mounted yet +# RUN cd qtree && pip install . +# RUN pip install . +RUN pip install pdbpp +RUN pip install tensornetwork + +ENTRYPOINT ["bash"] \ No newline at end of file diff --git a/dev.sh b/dev.sh new file mode 100755 index 00000000..729e3c14 --- /dev/null +++ b/dev.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build -f Dockerfile.dev -t dev . +docker run -v $(pwd):/app -it dev \ No newline at end of file diff --git a/qtensor/Simulate.py b/qtensor/Simulate.py index 0e271bd1..c2925031 100644 --- a/qtensor/Simulate.py +++ b/qtensor/Simulate.py @@ -155,3 +155,77 @@ def simulate(self, qc, **params): sim = cirq.Simulator(**params) return sim.simulate(qc) +if __name__=="__main__": + import networkx as nx + import numpy as np + import tensornetwork as tn + + G = nx.random_regular_graph(3, 10) + gamma, beta = [np.pi/3], [np.pi/2] + + from qtensor import QtreeQAOAComposer, QAOAQtreeSimulator + composer = QtreeQAOAComposer(graph=G, gamma=gamma, beta=beta) + composer.ansatz_state() + + sim = QAOAQtreeSimulator(composer) + + # now let's run the prepare buckets method to init the tn + sim.simulate_batch(composer.circuit) + buckets = sim.tn.buckets + + # now let's use these buckets to square the TN + def conj(buckets): + # turn each bucket into a node + nodes = [] + for bucket in buckets: + node = tn.Node(np.array(bucket)) + nodes.append(node) + + # now for each node, append its conjugate + conj_nodes = [] + for node in nodes: + conj = np.conj(node.tensor) + conj_node = tn.Node(conj) + conj_nodes.append(conj_node) + + + indices = {} + + for node in nodes: + for conj_node in conj_nodes: + # check if there is a shared index between a node and a conj_node + node_indices = node.get_all_dangling() + conj_indices = conj_node.get_all_dangling() + + shared_indices = set(node_indices).intersection(set(conj_indices)) + if shared_indices: + if node not in indices: + indices[node] = shared_indices + else: + indices[node].update(shared_indices) + + if conj_node not in indices: + indices[conj_node] = shared_indices + else: + indices[conj_node].update(shared_indices) + + for node, shared_indices in indices.items(): + for pair_node in indices.keys(): + if node == pair_node: + continue + # if there are shared indices, connect an edge + if shared_indices.intersection(set(pair_node.get_all_dangling())): + edge = tn.connect(node, pair_node) + + # TODO: TNAdapter should support tensornetwork.Node + # So that we can contract this resulting tensor network directly + return [] + + tn_with_conj = conj(buckets) + + # TODO: contract or sample using tn_with_conj based on method in other branch + + + + log.debug('hello world') + import pdb; pdb.set_trace() \ No newline at end of file diff --git a/scratchpad/tn_api/test_tn_api.py b/scratchpad/tn_api/test_tn_api.py new file mode 100644 index 00000000..a7ec7d0c --- /dev/null +++ b/scratchpad/tn_api/test_tn_api.py @@ -0,0 +1,51 @@ +import random +import numpy as np +from tn import TensorNetwork +from functools import reduce + +def test_add_numpy_array(): + a = TensorNetwork() + t = np.random.randn(2, 2) + a.add(t) + b = TensorNetwork() + b.add(a) + assert b == a + + +def test_composition(): + """ + tensor network adding is associative + """ + tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] + stack = TensorNetwork() + # (((0A)B)C)D + for tn in tns: + stack.add(tn) + # A(B(CD)) + for i in range(len(tns)): + l = tns[len(tns)-2-i] + r = tns[len(tns)-1-i] + l.add(r) + + assert stack == tns[0] + +def test_edges_consistent_ports(): + tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] + tn = TensorNetwork() + # (((0A)B)C)D + for t in tns: + tn.add(t) + + port_data = {} + for e in tn._edges: + for p in e: + port_data[p.tensor_ref] = port_data.get(p.tensor_ref, []) + port_data[p.tensor_ref].append(p.ix) + for i, t in enumerate(tn._tensors): + assert len(t.shape) == len(port_data[i]) + + + +if __name__=="__main__": + test_edges_consistent_ports() + test_composition() diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py new file mode 100644 index 00000000..aed4ceca --- /dev/null +++ b/scratchpad/tn_api/tn.py @@ -0,0 +1,322 @@ +import numpy as np +import math +import string +from dataclasses import dataclass +from typing import TypeVar, Generic, Iterable, Tuple + +class Array(np.ndarray): + shape: tuple + +D = TypeVar('D') # tensor data type (numpy, torch, etc.) + +CHARS = string.ascii_lowercase + string.ascii_uppercase + +N = TypeVar('N', bound=np.ndarray) + +@dataclass +class Port: + tensor_ref: int + ix: int + +@dataclass +class ContractionInfo: + result_indices: Iterable[int] + +class TensorNetworkIFC(Generic[D]): + def __init__(self, *args, **kwargs): + ... + + def optimize(self, out_indices: Iterable = []) -> ContractionInfo: + return ContractionInfo() + + # slice not inplace + def slice(self, slice_dict: dict) -> 'TensorNetwork': + ... + + # contract to produce a new tensor + def contract(self, contraction_info: ContractionInfo) -> D: + ... + + # + def copy(self): + ... + + def add(self, other: "TensorNetworkIFC[D] | D"): + ... + + + @classmethod + def new_random_cpu(cls, dims: Iterable[int])-> 'TensorNetworkIFC[D]': + ... + + def __eq__(a, b): + ... + + +class TensorNetwork(TensorNetworkIFC[np.ndarray]): + tensors: Iterable[np.ndarray] + shape: tuple + edges: tuple + + def __init__(self, *args, **kwargs): + self._tensors = [] + self._edges = tuple() + self.shape = tuple() + + # slice not inplace + def slice(self, slice_dict: dict) -> 'TensorNetwork': + tn = self.copy() + new_edge_list = [] + for idx, slice_val in slice_dict.items(): + # make sure idx is valid + if idx >= len(tn._edges): + continue + + edge_list = list(tn._edges) + edge = edge_list.pop(idx) + # now put the updated edges back on the class + tn._edges = tuple(edge_list) # TODO @dallon - is this the issue, that i'm getting rid of all of the edges? + # get all tensors indexed by this edge + tensors_to_slice = set(port.tensor_ref for port in edge) + # store slice index and value for each tensor + local_slices_dict = {} + for current_tensor_ref in tensors_to_slice: + slice_dict = {} # TODO: make sure this handles the case with multiple ports pointing to the same tensor + # get all ports for the current tensor + current_tensor_ref_ports = [port for port in edge if port.tensor_ref == current_tensor_ref] + for current_port in current_tensor_ref_ports: + slice_dict[current_port.ix] = slice_val + # store the slice params for this tensor in the local dict + local_slices_dict[current_tensor_ref] = slice_dict + + # now use the local slice dict to slice for each tensor + for current_tensor_ref, slice_dict in local_slices_dict.items(): + slice_bounds = [] + current_tensor = tn._tensors[current_tensor_ref] + for idx in range(current_tensor.ndim): + try: + slice_bounds.append(slice_dict[idx]) + except KeyError: + slice_bounds.append(slice(None)) + sliced_tensor = tn._tensors[current_tensor_ref][tuple(slice_bounds)] + tn._tensors[current_tensor_ref] = sliced_tensor + + # TODO: this is just a guess - but i am adding the ports from the popped edge back to the list of slices + # for port in edge: + # if port.ix in slice_dict and port.tensor_ref == current_tensor_ref: + # new_edge_list.append(Port(tensor_ref=current_tensor_ref, ix=port.ix)) + + # # update the tensor network's edges with the new edges + # tn._edges = tuple(new_edge_list) + + + return tn + + def copy(self): + new = TensorNetwork() + new._tensors = self._tensors[:] + new._edges = self._edges[:] + new.shape = self.shape[:] + return new + + def add(self, other: "TensorNetwork | np.ndarray"): + if not isinstance(other, TensorNetwork): + self._tensors.append(other) + self.shape = self.shape + other.shape + else: + m = len(self._tensors) + n = len(self.shape) + # -- other's edges tensors will refer to shifted tensor location + enew = [] + for e in other._edges: + e_ = [] + for p in e: + if p.tensor_ref == -1: + e_.append(Port(tensor_ref=-1, ix=p.ix+n)) + else: + e_.append(Port(tensor_ref=p.tensor_ref+m, ix=p.ix)) + enew.append(tuple(e_)) + + self._edges += tuple(enew) + self._tensors += other._tensors + self.shape += other.shape + + + def _get_random_indices_to_contract(self, count=2): + import random + tn_copy = self.copy() + indices_to_contract = [] + counter = 0 + edges_with_indices = [idx for idx, port in enumerate(list(tn_copy._edges))] + + while counter < count and len(edges_with_indices) > 0: + random_element = random.choice(edges_with_indices) + edges_with_indices.remove(random_element) + indices_to_contract.append(random_element) + counter += 1 + + return sorted(indices_to_contract) + + # contract to produce a new tensor + def contract(self, contraction_info: ContractionInfo) -> np.ndarray: + einsum_expr = self._get_einsum_expr(contraction_info) + print(einsum_expr) + print([t.shape for t in self._tensors]) + print(self._edges) + print(len(self._tensors)) + try: + return np.einsum(einsum_expr, *self._tensors) + except Exception as e: + print(e) + import pdb; pdb.set_trace() + keep_going = True + while keep_going: + einsum_expr = self._get_einsum_expr(contraction_info) + res = np.einsum(einsum_expr, *self._tensors) + import pdb; pdb.set_trace() + + + # for reference, see qtensor/contraction_backends/numpy.py -> get_einsum_expr + def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: + # mapping from tensor index to a tuple of edges that preserves ordering + # st can lookup tix -> tuple(idx of edges) # this iterable needs to be sorted by of port.ix + t_ref_to_edges = {} + # TODO: can do this in a single loop by looping over edges and looking up + for t_idx in range(0, len(self._tensors)): + connected_edges = [] + for edge_index, edge in enumerate(self._edges): + for port in edge: + if port.tensor_ref == t_idx: + connected_edges.append((edge_index, port.ix)) + # now sort by port ix + connected_edges_sorted = sorted(connected_edges, key=lambda x: x[1]) + # extract the ix of the global edge + edge_indices_sorted = [edge_index for edge_index, port_ix in connected_edges_sorted] + t_ref_to_edges[t_idx] = edge_indices_sorted + + # i:0, j:1, k:2, l:3 -> int is index is self._edges + # s[0] = (012), s[1]=(103) where index to s is the index in self._tensors + # edge 0 is (Port(t_ref=0, ix=0), Port(t_ref=1, ix=1)) # i + # edge 1 is (Port(t_ref=0, ix=1), Port(t_ref=1, ix=0)) # j + # edge 2 is (Port(t_ref=0, ix=2)) #k + # edge 3 is (Port(t_ref=1, ix=2)) #l + + # TODO: don't need this dict, use chars instead + edge_to_char = {i: CHARS[i] for i in range(0, len(self._edges))} + # np.einsum('ijk,jil->jkl', a, b) + # expr = ','.join(''.join(index_to_char[port.ix] for edge in self._edges for port in edge) for t in self._tensors) + '->' + \ + # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) + + substrs_to_join = [] + for t_idx, t in enumerate(self._tensors): + substr = '' + for edge_idx in t_ref_to_edges[t_idx]: + substr += edge_to_char[edge_idx] + substrs_to_join.append(substr) + + for ix in contraction_info.result_indices: + if ix not in edge_to_char: + raise ValueError("result expects invalid indices") + + expr = ','.join(substrs_to_join) + '->' + ''.join(edge_to_char[ix] for ix in contraction_info.result_indices) + return expr + + def optimize(self, out_indices: Iterable = []) -> ContractionInfo: + raise NotImplementedError() + + @classmethod + def new_random_cpu(cls, count, size, dim: int): + out = cls() + for i in range(count): + t: np.ndarray = np.random.random((dim, )*size) + out.add(t) + # arbitrary number of output indices + out_dims = np.random.randint(low=0, high=len(out.shape)) + tensor_dims = len(out.shape) + out.shape = (dim, )*out_dims + # -- random connectivity + # A hypergraph can be generated as a partition into + # E parts where E is number of edges. The isolated vertices are equivalent + # to vertices with 1 edge that contains only them. + # arbitrary max number of edges, must be less than total indices + edges_cnt = np.random.randint(low=1, high=tensor_dims+out_dims) + # a partition can be implemented using a random function + partition_fn = lambda : np.random.randint(low=0, high=edges_cnt) + partition_dict = {} + for t_ref, t in enumerate(out._tensors): + for i in range(t.ndim): + eix = partition_fn() + new_port = Port(tensor_ref=t_ref, ix=i) + partition_dict[eix] = partition_dict.get(eix, []) + partition_dict[eix].append(new_port) + + # add "self" tensor indices to partition + # commented out to debug einsum err + # TODO: need to fix this einsum issue + # for i in range(len(out.shape)): + # eix = partition_fn() + # new_port = Port(tensor_ref=-1, ix=i) + # partition_dict[eix] = partition_dict.get(eix, []) + # partition_dict[eix].append(new_port) + + edges = [] + for i in range(edges_cnt): + p = partition_dict.get(i) + if p is not None: + edges.append(tuple(p)) + out._edges = tuple(edges) + return out + + def __eq__(self, other): + if self.shape != other.shape: + return False + if self._edges != other._edges: + return False + return all((a==b).all() for a, b in zip(self._tensors, other._tensors)) + + def __repr__(self): + return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>" + + +if __name__ == "__main__": + dim = 3 + tn = TensorNetwork.new_random_cpu(2, dim, 4) + slice_dict = {0: slice(0, 2), 1: slice(1, 3)} + sliced_tn = tn.slice(slice_dict) + print(len(sliced_tn._edges)) + import pdb; pdb.set_trace() + # TODO: go through slice method debugger here to make sure that certain edges of the same port aren't being skipped + # TODO: st i can run contract on a sliced tn without it breaking + """ + # TODO: issue is that slicing results in no edges + ,-> #einsum expression + [(2, 2, 2), (2, 2, 2)] #tensor shapes + () #edges + 2 #tensors count + """ + + # can also do "contract all except..." by knowing indices of edges in tn + # generate random indices to contract + + random_indices_to_contract = sliced_tn._get_random_indices_to_contract(2) + + contraction_info = ContractionInfo(tuple(random_indices_to_contract)) + print("starting not sliced tensor") + contracted_tensor_not_slice = tn.contract(contraction_info) + print("finished not sliced tensor") + print("starting sliced tensor") + contracted_tensor = sliced_tn.contract(contraction_info) + print("finished sliced tensor") + import pdb; pdb.set_trace() + +""" +dae,dca->be +[(4, 4, 4), (4, 4, 4)] +((Port(tensor_ref=0, ix=1), Port(tensor_ref=1, ix=2)), (Port(tensor_ref=-1, ix=2), Port(tensor_ref=-1, ix=3)), (Port(tensor_ref=1, ix=1),), (Port(tensor_ref=0, ix=0), Port(tensor_ref=1, ix=0)), (Port(tensor_ref=0, ix=2), Port(tensor_ref=-1, ix=1)), (Port(tensor_ref=-1, ix=0),)) +2 +--Return-- +[1] > /app/scratchpad/tn_api/tn.py(160)contract()->None +-> import pdb; pdb.set_trace() +(Pdb++) np.einsum(einsum_expr, *self._tensors) +*** ValueError: einstein sum subscripts string included output subscript 'b' which never appeared in an input +""" \ No newline at end of file