diff --git a/implementation.typ b/implementation.typ
index 0cbde7d..c3ad1d8 100644
--- a/implementation.typ
+++ b/implementation.typ
@@ -1,4 +1,89 @@
+#import "@preview/fletcher:0.5.3" as fletcher: diagram, node, edge
+#import fletcher.shapes: rect, diamond
+#import "utils.typ": todo
+
= Implementation
+The three methods described (ResNet50, CAML, P>M>F) were implemented in a Jupyter notebook and compared to each other.
+
+== Experiments
+For all of the three methods we test the following use-cases:#todo[maybe write more to each test]
+- Detection of anomaly class (1,3,5 shots)
+- Inbalanced target class prediction (5,10,15,30 good shots, 5 bad shots)
+- 2 Way classification (1,3,5 shots)
+- Inbalanced 2 Way classification (5,10,15,30 good shots, 5 bad shots)
+- Detect only anomaly classes (1,3,5 shots)
+
+Those experiments were conducted on the MVTEC AD dataset on the bottle and cable classes.
+
+== ResNet50
+=== Approach
+The simplest approach is to use a pre-trained ResNet50 model as a feature extractor.
+From both the support and query set the features are extracted to get a downprojected representation of the images.
+The support set embeddings are compared to the query set embeddings.
+To predict the class of a query the class with the smallest distance to the support embedding is chosen.
+If there are more than one support embedding within the same class the mean of those embeddings is used (class center).
+This approach is similar to a prototypical network @snell2017prototypicalnetworksfewshotlearning.
+
+In this bachelor thesis a pre-trained ResNet50 (IMAGENET1K_V2) pytorch model was used.
+It is pretrained on the imagenet dataset and has 50 residual layers.
+
+To get the embeddings the last layer of the model was removed and the output of the second last layer was used as embedding output.
+In the following diagram the ResNet50 architecture is visualized and the cut-point is marked.
+
+#diagram(
+ spacing: (5mm, 5mm),
+ node-stroke: 1pt,
+ node-fill: eastern,
+ edge-stroke: 1pt,
+
+ // Input
+ node((1, 1), "Input", shape: rect, width: 30mm, height: 10mm, name: ),
+
+ // Conv1
+ node((1, 0), "Conv1\n7x7, 64", shape: rect, width: 30mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ // MaxPool
+ node((1, -1), "MaxPool\n3x3", shape: rect, width: 30mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ // Residual Blocks
+ node((3, -1), "Residual Block 1\n3x [64, 64, 256]", shape: rect, width: 40mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ node((3, 0), "Residual Block 2\n4x [128, 128, 512]", shape: rect, width: 40mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ node((3, 1), "Residual Block 3\n6x [256, 256, 1024]", shape: rect, width: 40mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ node((3, 2), "Residual Block 4\n3x [512, 512, 2048]", shape: rect, width: 40mm, height: 15mm, name: ),
+ edge(, , "->"),
+
+ // Cutting Line
+ edge(, , marks: "..|..>", stroke: 1pt, label: "Cut here", label-pos: 0.5, label-side: left),
+
+ // AvgPool + FC
+ node((7, 2), "AvgPool\n1x1", shape: rect, width: 30mm, height: 10mm, name: ),
+ //edge(, , "->"),
+
+ node((7, 1), "Fully Connected\n1000 classes", shape: rect, width: 40mm, height: 10mm, name: ),
+ edge(, , "->"),
+
+ // Output
+ node((7, 0), "Output", shape: rect, width: 30mm, height: 10mm, name: