diff --git a/LICENSE b/LICENSE
index 7ad34d9d54818ca098cf90ce0e9a8d2fd52c34e7..8efb5264ef1be5ddbc3bf0c5e5fa5941e4bc55ed 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
 MIT License
 
-Copyright (c) 2018 ewanbarr
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/mpikat/Untitled.ipynb b/mpikat/Untitled.ipynb
deleted file mode 100644
index f7a6a26c905374d6a919ec7365dc843c3cea3b56..0000000000000000000000000000000000000000
--- a/mpikat/Untitled.ipynb
+++ /dev/null
@@ -1,578 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import ipaddress\n",
-    "\n",
-    "def ip_range_from_stream(stream):\n",
-    "    stream = stream.lstrip(\"spead://\")\n",
-    "    ip_range, port = stream.split(\":\")\n",
-    "    port = int(port)\n",
-    "    try:\n",
-    "        base_ip, ip_count = ip_range.split(\"+\")\n",
-    "        ip_count = int(ip_count)\n",
-    "    except ValueError:\n",
-    "        base_ip, ip_count = ip_range, 1\n",
-    "    return [ipaddress.ip_address(unicode(base_ip))+i for i in range(ip_count)], port"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ips,_ = ip_range_from_stream(\"spead://123.1.1.1+10:5000\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "[IPv4Address(u'123.1.1.1'),\n",
-       " IPv4Address(u'123.1.1.2'),\n",
-       " IPv4Address(u'123.1.1.3'),\n",
-       " IPv4Address(u'123.1.1.4'),\n",
-       " IPv4Address(u'123.1.1.5'),\n",
-       " IPv4Address(u'123.1.1.6'),\n",
-       " IPv4Address(u'123.1.1.7'),\n",
-       " IPv4Address(u'123.1.1.8'),\n",
-       " IPv4Address(u'123.1.1.9'),\n",
-       " IPv4Address(u'123.1.1.10')]"
-      ]
-     },
-     "execution_count": 3,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "ips"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "ips.reverse()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 36,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def ip_range_from_stream(stream):\n",
-    "    stream = stream.lstrip(\"spead://\")\n",
-    "    ip_range, port = stream.split(\":\")\n",
-    "    port = int(port)\n",
-    "    try:\n",
-    "        base_ip, ip_count = ip_range.split(\"+\")\n",
-    "        ip_count = int(ip_count)\n",
-    "    except ValueError:\n",
-    "        base_ip, ip_count = ip_range, 1\n",
-    "    return ContiguousIpRange(base_ip, port, ip_count)\n",
-    "\n",
-    "class IpRangeAllocationError(Exception):\n",
-    "    pass\n",
-    "\n",
-    "class ContiguousIpRange(object):\n",
-    "    def __init__(self, base_ip, port, count):\n",
-    "        self._base_ip = ipaddress.ip_address(unicode(base_ip))\n",
-    "        self._ips = [self._base_ip+ii for ii in range(count)]\n",
-    "        self._port = port\n",
-    "        self._count = count\n",
-    "\n",
-    "    @property\n",
-    "    def count(self):\n",
-    "        return self._count\n",
-    "\n",
-    "    @property\n",
-    "    def port(self):\n",
-    "        return self._port\n",
-    "\n",
-    "    @property\n",
-    "    def base_ip(self):\n",
-    "        return self._base_ip\n",
-    "\n",
-    "    def index(self, ip):\n",
-    "        return self._ips.index(ip)\n",
-    "\n",
-    "    def __hash__(self):\n",
-    "        return hash(self.format_katcp())\n",
-    "\n",
-    "    def __iter__(self):\n",
-    "        return self._ips.__iter__()\n",
-    "\n",
-    "    def __repr__(self):\n",
-    "        return \"<{} {}>\".format(self.__class__.__name__, self.format_katcp())\n",
-    "\n",
-    "    def format_katcp(self):\n",
-    "        return \"spead://{}+{}:{}\".format(str(self._base_ip), self._count, self._port)\n",
-    "\n",
-    "\n",
-    "class IpRangeManager(object):\n",
-    "    def __init__(self, ip_range):\n",
-    "        self._ip_range = ip_range\n",
-    "        self._allocated = [False for _ in ip_range]\n",
-    "        self._allocated_ranges = set()\n",
-    "\n",
-    "    def __repr__(self):\n",
-    "        return \"<{} {}>\".format(self.__class__.__name__, self._ip_range.format_katcp())\n",
-    "\n",
-    "    def _free_ranges(self):\n",
-    "        state_ranges = {True:[], False:[]}\n",
-    "        def find_state_range(idx, state):\n",
-    "            start_idx = idx\n",
-    "            while idx < len(self._allocated):\n",
-    "                if self._allocated[idx] == state:\n",
-    "                    idx+=1\n",
-    "                else:\n",
-    "                    state_ranges[state].append((start_idx, idx-start_idx))\n",
-    "                    return find_state_range(idx, not state)\n",
-    "            else:\n",
-    "                state_ranges[state].append((start_idx, idx-start_idx))\n",
-    "        find_state_range(0, self._allocated[0])\n",
-    "        return state_ranges[False]\n",
-    "\n",
-    "    def allocate(self, n):\n",
-    "        ranges = self._free_ranges()\n",
-    "        best_fit = None\n",
-    "        for start,span in ranges:\n",
-    "            if span<n:\n",
-    "                continue\n",
-    "            elif best_fit is None:\n",
-    "                best_fit = (start, span)\n",
-    "            elif (span-n) < (best_fit[1]-n):\n",
-    "                best_fit = (start, span)\n",
-    "        if best_fit is None:\n",
-    "            raise IpRangeAllocationError(\"Could not allocate contiguous range of {} addresses\".format(n))\n",
-    "        else:\n",
-    "            start,span = best_fit\n",
-    "            for ii in range(n):\n",
-    "                offset = start+ii\n",
-    "                self._allocated[offset] = True\n",
-    "            allocated_range = ContiguousIpRange(str(self._ip_range.base_ip + start), self._ip_range.port, n)\n",
-    "            self._allocated_ranges.add(allocated_range)\n",
-    "            return allocated_range\n",
-    "\n",
-    "    def free(self, ip_range):\n",
-    "        self._allocated_ranges.remove(ip_range)\n",
-    "        for ip in ip_range:\n",
-    "            self._allocated[self._ip_range.index(ip)] = False\n",
-    "\n",
-    "        \n",
-    "        "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 37,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "<IpRangeManager spead://239.1.1.150+128:7147>\n"
-     ]
-    }
-   ],
-   "source": [
-    "x = IpRangeManager(ip_range_from_stream('spead://239.1.1.150+128:7147'))\n",
-    "print x"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 38,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x._allocated[5:9] = [True for _ in range(4)]\n",
-    "x._allocated[10:11] = [True for _ in range(1)]\n",
-    "x._allocated[56:77] = [True for _ in range(77-56)]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 39,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[(0, 5), (9, 1), (11, 45), (77, 51)]\n",
-      "set([<ContiguousIpRange spead://239.1.1.161+40:7147>])\n",
-      "<IpRangeManager spead://239.1.1.150+128:7147>\n"
-     ]
-    }
-   ],
-   "source": [
-    "x = IpRangeManager(ip_range_from_stream('spead://239.1.1.150+128:7147'))\n",
-    "x._allocated[5:9] = [True for _ in range(4)]\n",
-    "x._allocated[10:11] = [True for _ in range(1)]\n",
-    "x._allocated[56:77] = [True for _ in range(77-56)]\n",
-    "print x._free_ranges()\n",
-    "a = x.allocate(40)\n",
-    "print x._allocated_ranges\n",
-    "print x"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 40,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "spead://239.1.1.161+40:7147\n"
-     ]
-    }
-   ],
-   "source": [
-    "print a.format_katcp()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 41,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[(0, 5), (9, 1), (51, 5), (77, 51)]\n"
-     ]
-    }
-   ],
-   "source": [
-    "print x._free_ranges()\n",
-    "x.free(a)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 42,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[(0, 5), (9, 1), (11, 45), (77, 51)]\n",
-      "set([])\n"
-     ]
-    }
-   ],
-   "source": [
-    "print x._free_ranges()\n",
-    "print x._allocated_ranges"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 12,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x = [1,2,3]\n",
-    "x.index(2)\n",
-    "a = x.__iter__()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "['__class__',\n",
-       " '__delattr__',\n",
-       " '__doc__',\n",
-       " '__format__',\n",
-       " '__getattribute__',\n",
-       " '__hash__',\n",
-       " '__init__',\n",
-       " '__iter__',\n",
-       " '__length_hint__',\n",
-       " '__new__',\n",
-       " '__reduce__',\n",
-       " '__reduce_ex__',\n",
-       " '__repr__',\n",
-       " '__setattr__',\n",
-       " '__sizeof__',\n",
-       " '__str__',\n",
-       " '__subclasshook__',\n",
-       " 'next']"
-      ]
-     },
-     "execution_count": 13,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "dir(a)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 32,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import logging\n",
-    "\n",
-    "logging.basicConfig(level=logging.DEBUG)\n",
-    "log = logging\n",
-    "\n",
-    "class Config(object):\n",
-    "    def __init__(self):\n",
-    "        self.n_servers = 64\n",
-    "        self.n_mcast_groups = 128\n",
-    "        \n",
-    "    def _sanitize_sb_configuration(self,  tscrunch, fscrunch, desired_nbeams, beam_granularity=None):\n",
-    "\n",
-    "        # What are the key constraints:\n",
-    "        # 1. The data rate per multicast group\n",
-    "        # 2. The aggregate data rate out of the instrument (not as important)\n",
-    "        # 3. The processing limitations (has to be determined empirically)\n",
-    "        # 4. The number of multicast groups available\n",
-    "        # 5. The possible numbers of beams per multicast group (such that TUSE can receive N per node)\n",
-    "        # 6. Need to use at least 16 multicast groups\n",
-    "        # 7. Should have even flow across multicast groups, so same number of beams in each\n",
-    "        # 8. Multicast groups should be contiguous\n",
-    "\n",
-    "\n",
-    "        # Constants for data rates and bandwidths\n",
-    "        # these are hardcoded here for the moment but ultimately\n",
-    "        # they should be moved to a higher level or even dynamically\n",
-    "        # specified\n",
-    "        MAX_RATE_PER_MCAST = 6.8e9 # bits/s\n",
-    "        MAX_RATE_PER_SERVER = 4.375e9 # bits/s, equivalent to 280 Gb/s over 64 (virtual) nodes\n",
-    "        MAX_OUTPUT_RATE = 280.0e9 # bits/s\n",
-    "        BANDWIDTH = 856e6 # MHz\n",
-    "\n",
-    "        # Calculate the data rate for each beam assuming 8-bit packing and\n",
-    "        # no metadata overheads\n",
-    "        data_rate_per_beam = BANDWIDTH / tscrunch / fscrunch * 8 # bits/s\n",
-    "        log.debug(\"Data rate per coherent beam: {} Gb/s\".format(data_rate_per_beam/1e9))\n",
-    "\n",
-    "        if data_rate_per_beam > MAX_RATE_PER_MCAST:\n",
-    "            raise Exception(\"Data rate per beam is greater than the data rate per multicast group\")\n",
-    "            \n",
-    "        if data_rate_per_beam * desired_nbeams > MAX_OUTPUT_RATE:\n",
-    "            desired_nbeams = MAX_OUTPUT_RATE // data_rate_per_beam\n",
-    "            log.warning(\"Aggregate data rate larger than supported, reducing nbeams to {}\".format(desired_nbeams))\n",
-    "             \n",
-    "        # Calculate the maximum number of beams that will fit in one multicast\n",
-    "        # group assuming. Each multicast group must be receivable on a 10 GbE\n",
-    "        # connection so the max rate must be < 8 Gb/s\n",
-    "        max_beams_per_mcast = MAX_RATE_PER_MCAST // data_rate_per_beam\n",
-    "        log.debug(\"Maximum number of beams per multicast group: {}\".format(int(max_beams_per_mcast)))\n",
-    "\n",
-    "        # For instuments such as TUSE, they require a fixed number of beams per node. For their\n",
-    "        # case we assume that they will only acquire one multicast group per node and as such\n",
-    "        # the minimum number of beams per multicast group should be whatever TUSE requires.\n",
-    "        # Multicast groups can contain more beams than this but only in integer multiples of\n",
-    "        # the minimum\n",
-    "        if beam_granularity:\n",
-    "            if max_beams_per_mcast < beam_granularity:\n",
-    "                log.warning(\"Cannot fit {} beams into one multicast group, updating number of beams per multicast group to {}\".format(\n",
-    "                    beam_granularity, max_beams_per_mcast))\n",
-    "                while np.modf(beam_granularity/max_beams_per_mcast)[0] != 0.0:\n",
-    "                    max_beams_per_mcast -= 1\n",
-    "                beam_granularity = max_beams_per_mcast\n",
-    "            beams_per_mcast = beam_granularity * (max_beams_per_mcast // beam_granularity)\n",
-    "            log.debug(\"Number of beams per multicast group, accounting for granularity: {}\".format(int(beams_per_mcast)))\n",
-    "        else:\n",
-    "            beams_per_mcast = max_beams_per_mcast\n",
-    "\n",
-    "        # Calculate the total number of beams that could be produced assuming the only\n",
-    "        # rate limit was that limit per multicast groups\n",
-    "        max_beams = self.n_mcast_groups * beams_per_mcast\n",
-    "        log.debug(\"Maximum possible beams (assuming on multicast group rate limit): {}\".format(max_beams))\n",
-    "\n",
-    "        if desired_nbeams > max_beams:\n",
-    "            log.warning(\"Requested number of beams is greater than theoretical maximum, \"\n",
-    "                \"updating setting the number of beams of beams to {}\".format(max_beams))\n",
-    "            desired_nbeams = max_beams\n",
-    "\n",
-    "        # Calculate the total number of multicast groups that are required to satisfy\n",
-    "        # the requested number of beams\n",
-    "        num_mcast_groups_required = round(desired_nbeams / beams_per_mcast + 0.5)\n",
-    "        log.debug(\"Number of multicast groups required for {} beams: {}\".format(desired_nbeams, num_mcast_groups_required))\n",
-    "        actual_nbeams = num_mcast_groups_required * beams_per_mcast\n",
-    "        nmcast_groups = num_mcast_groups_required\n",
-    "\n",
-    "        # Now we need to check the server rate limits\n",
-    "        if (actual_nbeams * data_rate_per_beam)/self.n_servers > MAX_RATE_PER_SERVER:\n",
-    "            log.warning(\"Number of beams limited by output data rate per server\")\n",
-    "        actual_nbeams = MAX_RATE_PER_SERVER*self.n_servers // data_rate_per_beam\n",
-    "        log.info(\"Number of beams that can be generated: {}\".format(actual_nbeams))\n",
-    "        return actual_nbeams"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 33,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "DEBUG:root:Data rate per coherent beam: 1.3696 Gb/s\n",
-      "DEBUG:root:Maximum number of beams per multicast group: 4\n",
-      "WARNING:root:Cannot fit 6 beams into one multicast group, updating number of beams per multicast group to 4.0\n",
-      "DEBUG:root:Number of beams per multicast group, accounting for granularity: 3\n",
-      "DEBUG:root:Maximum possible beams (assuming on multicast group rate limit): 384.0\n",
-      "DEBUG:root:Number of multicast groups required for 16 beams: 6.0\n",
-      "INFO:root:Number of beams that can be generated: 204.0\n"
-     ]
-    },
-    {
-     "data": {
-      "text/plain": [
-       "204.0"
-      ]
-     },
-     "execution_count": 33,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "x = Config()\n",
-    "x._sanitize_sb_configuration(5,1,16,6)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 35,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class MulticastGroupConfig(object):\n",
-    "    def __init__(self, data_rate_per_beam, granularity):\n",
-    "        self.data_rate_per_beam = data_rate_per_beam\n",
-    "        self.granularity = granularity\n",
-    "        self.group_count = 16\n",
-    "    \n",
-    "    \n",
-    "    \n",
-    "    def add_group(self):\n",
-    "        prior_nbeams = self.group_count * self.beam_count\n",
-    "        self.group_count += 1\n",
-    "        self.beam_count = round(prior_nbeams / self.group_count + 0.5)\n",
-    "    \n",
-    "    def nbeams(self):\n",
-    "        return self.group_count * self.beam_count\n",
-    "    \n",
-    "    def add_beam(self):\n",
-    "        self.beam_count+=1\n",
-    "        \n",
-    "    def remove_beam(self):\n",
-    "        self.beam_count-=1\n",
-    "        \n",
-    "    def matches_granularity(self, granularity):\n",
-    "        if self.beam_count % granularity == 0:\n",
-    "            return True\n",
-    "        elif granularity % self.beam_count == 0:\n",
-    "            return True\n",
-    "        else:\n",
-    "            return False\n",
-    "        \n",
-    "\n",
-    "def test():\n",
-    "    group = MulticastGroup()\n",
-    "        \n",
-    "class MulticastConfiguration(object):\n",
-    "    def __init__(self, nservers, max_groups):\n",
-    "        self.nservers = nservers\n",
-    "        self.max_groups = max_groups\n",
-    "        self.base_groups = [[] for _ in range(16)]\n",
-    "        \n",
-    "    def config(self, tscrunch, fscrunch, desired_nbeams, beam_granularity):\n",
-    "        MAX_RATE_PER_MCAST = 6.8e9 # bits/s\n",
-    "        MAX_RATE_PER_SERVER = 4.375e9 # bits/s, equivalent to 280 Gb/s over 64 (virtual) nodes\n",
-    "        BANDWIDTH = 856e6 # MHz\n",
-    "\n",
-    "        # Calculate the data rate for each beam assuming 8-bit packing and\n",
-    "        # no metadata overheads\n",
-    "        data_rate_per_beam = BANDWIDTH / tscrunch / fscrunch * 8 # bits/s\n",
-    "        log.debug(\"Data rate per coherent beam: {} Gb/s\".format(data_rate_per_beam/1e9))\n",
-    "        # Calculate the maximum number of beams that will fit in one multicast\n",
-    "        # group assuming. Each multicast group must be receivable on a 10 GbE\n",
-    "        # connection so the max rate must be < 8 Gb/s\n",
-    "        max_beams_per_mcast = MAX_RATE_PER_MCAST // data_rate_per_beam\n",
-    "        log.debug(\"Maximum number of beams per multicast group: {}\".format(int(max_beams_per_mcast)))\n",
-    "\n",
-    "        if max_beams_per_mcast == 0:\n",
-    "            raise Exception(\"Data rate per beam is greater than the data rate per multicast group\")\n",
-    "        \n",
-    "        \n",
-    "    "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "anaconda-cloud": {},
-  "kernelspec": {
-   "display_name": "Python [conda env:py27]",
-   "language": "python",
-   "name": "conda-env-py27-py"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.12"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/mpikat/__init__.py b/mpikat/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5ed613325f7f0b2ccdc2e77bb36b252104de5e47 100644
--- a/mpikat/__init__.py
+++ b/mpikat/__init__.py
@@ -0,0 +1,8 @@
+# FBFUSE specific imports
+from .fbfuse_master_controller import FbfMasterController
+from .fbfuse_product_controller import FbfProductController
+#from .fbfuse_worker_server import FbfWorkerServer
+from .fbfuse_ca_server import BaseFbfConfigurationAuthority, DefaultConfigurationAuthority
+from .fbfuse_delay_engine import DelayEngine
+from .fbfuse_beam_manager import BeamManager
+from .fbfuse_worker_wrapper import FbfWorkerWrapper, FbfWorkerPool
diff --git a/mpikat/fbfuse.py b/mpikat/fbfuse.py
deleted file mode 100644
index 4bfb839f6bd3449815315230fc7cbf83b4b46ba6..0000000000000000000000000000000000000000
--- a/mpikat/fbfuse.py
+++ /dev/null
@@ -1,2123 +0,0 @@
-import logging
-import json
-import tornado
-import signal
-import time
-import numpy as np
-import ipaddress
-import mosaic
-from threading import Lock
-from optparse import OptionParser
-from tornado.gen import Return, coroutine
-from katcp import Sensor, Message, AsyncDeviceServer, KATCPClientResource, AsyncReply
-from katcp.kattypes import request, return_reply, Int, Str, Discrete, Float
-from katportalclient import KATPortalClient
-from katpoint import Antenna, Target
-
-
-# ?halt message means shutdown everything and power off all machines
-
-
-log = logging.getLogger("mpikat.fbfuse")
-
-lock = Lock()
-
-PORTAL = "monctl.devnmk.camlab.kat.ac.za"
-
-FBF_IP_RANGE = "spead://239.11.1.0+127:7147"
-
-###################
-# Utility functions
-###################
-
-def is_power_of_two(n):
-    """
-    @brief  Test if number is a power of two
-
-    @return True|False
-    """
-    return n != 0 and ((n & (n - 1)) == 0)
-
-def next_power_of_two(n):
-    """
-    @brief  Round a number up to the next power of two
-    """
-    return 2**(n-1).bit_length()
-
-def parse_csv_antennas(antennas_csv):
-    antennas = antennas_csv.split(",")
-    nantennas = len(antennas)
-    if nantennas == 1 and antennas[0] == '':
-        raise AntennaValidationError("Provided antenna list was empty")
-    names = [antenna.strip() for antenna in antennas]
-    if len(names) != len(set(names)):
-        raise AntennaValidationError("Not all provided antennas were unqiue")
-    return names
-
-def ip_range_from_stream(stream):
-    stream = stream.lstrip("spead://")
-    ip_range, port = stream.split(":")
-    port = int(port)
-    try:
-        base_ip, ip_count = ip_range.split("+")
-        ip_count = int(ip_count)
-    except ValueError:
-        base_ip, ip_count = ip_range, 1
-    return ContiguousIpRange(base_ip, port, ip_count)
-
-class IpRangeAllocationError(Exception):
-    pass
-
-class ContiguousIpRange(object):
-    def __init__(self, base_ip, port, count):
-        self._base_ip = ipaddress.ip_address(unicode(base_ip))
-        self._ips = [self._base_ip+ii for ii in range(count)]
-        self._port = port
-        self._count = count
-
-    @property
-    def count(self):
-        return self._count
-
-    @property
-    def port(self):
-        return self._port
-
-    @property
-    def base_ip(self):
-        return self._base_ip
-
-    def index(self, ip):
-        return self._ips.index(ip)
-
-    def __hash__(self):
-        return hash(self.format_katcp())
-
-    def __iter__(self):
-        return self._ips.__iter__()
-
-    def __repr__(self):
-        return "<{} {}>".format(self.__class__.__name__, self.format_katcp())
-
-    def format_katcp(self):
-        return "spead://{}+{}:{}".format(str(self._base_ip), self._count, self._port)
-
-
-class IpRangeManager(object):
-    def __init__(self, ip_range):
-        self._ip_range = ip_range
-        self._allocated = [False for _ in ip_range]
-        self._allocated_ranges = set()
-
-    def __repr__(self):
-        return "<{} {}>".format(self.__class__.__name__, self._ip_range.format_katcp())
-
-    def format_katcp(self):
-        return self._ip_range.format_katcp()
-
-    def _free_ranges(self):
-        state_ranges = {True:[], False:[]}
-        def find_state_range(idx, state):
-            start_idx = idx
-            while idx < len(self._allocated):
-                if self._allocated[idx] == state:
-                    idx+=1
-                else:
-                    state_ranges[state].append((start_idx, idx-start_idx))
-                    return find_state_range(idx, not state)
-            else:
-                state_ranges[state].append((start_idx, idx-start_idx))
-        find_state_range(0, self._allocated[0])
-        return state_ranges[False]
-
-    def allocate(self, n):
-        ranges = self._free_ranges()
-        best_fit = None
-        for start,span in ranges:
-            if span<n:
-                continue
-            elif best_fit is None:
-                best_fit = (start, span)
-            elif (span-n) < (best_fit[1]-n):
-                best_fit = (start, span)
-        if best_fit is None:
-            raise IpRangeAllocationError("Could not allocate contiguous range of {} addresses".format(n))
-        else:
-            start,span = best_fit
-            for ii in range(n):
-                offset = start+ii
-                self._allocated[offset] = True
-            allocated_range = ContiguousIpRange(str(self._ip_range.base_ip + start), self._ip_range.port, n)
-            self._allocated_ranges.add(allocated_range)
-            return allocated_range
-
-    def free(self, ip_range):
-        self._allocated_ranges.remove(ip_range)
-        for ip in ip_range:
-            self._allocated[self._ip_range.index(ip)] = False
-
-
-###################
-# Custom exceptions
-###################
-
-class ServerAllocationError(Exception):
-    pass
-
-class ServerDeallocationError(Exception):
-    pass
-
-class ProductLookupError(Exception):
-    pass
-
-class ProductExistsError(Exception):
-    pass
-
-class AntennaValidationError(Exception):
-    pass
-
-class FbfStateError(Exception):
-    def __init__(self, expected_states, current_state):
-        message = "Possible states for this operation are '{}', but current state is '{}'".format(
-            expected_states, current_state)
-        super(FbfStateError, self).__init__(message)
-
-
-class KatportalClientWrapper(object):
-    def __init__(self, host, sub_nr=1):
-        self._client = KATPortalClient('http://{host}/api/client/{sub_nr}'.format(
-            host=host, sub_nr=sub_nr),
-            on_update_callback=None, logger=logging.getLogger('katcp'))
-
-    @coroutine
-    def _query(self, component, sensor):
-        sensor_name = yield self._client.sensor_subarray_lookup(
-            component=component, sensor=sensor, return_katcp_name=False)
-        sensor_sample = yield self._client.sensor_value(sensor_name,
-            include_value_ts=False)
-        raise Return(sensor_sample)
-
-    @coroutine
-    def get_observer_string(self, antenna):
-        sensor_sample = yield self._query(antenna, "observer")
-        raise Return(sensor_sample.value)
-
-    @coroutine
-    def get_antenna_feng_id_map(self, instrument_name, antennas):
-        sensor_sample = yield self._query('cbf', '{}.input-labelling'.format(instrument_name))
-        labels = eval(sensor_sample.value)
-        mapping = {}
-        for input_label, input_index, _, _ in labels:
-            antenna_name = input_label.strip("vh").lower()
-            if antenna_name.startswith("m") and antenna_name in antennas:
-                mapping[antenna_name] = input_index//2
-        print mapping
-        raise Return(mapping)
-
-    @coroutine
-    def get_bandwidth(self, stream):
-        sensor_sample = yield self._query('sub', 'streams.{}.bandwidth'.format(stream))
-        raise Return(sensor_sample.value)
-
-    @coroutine
-    def get_cfreq(self, stream):
-        sensor_sample = yield self._query('sub', 'streams.{}.centre-frequency'.format(stream))
-        raise Return(sensor_sample.value)
-
-    @coroutine
-    def get_sideband(self, stream):
-        sensor_sample = yield self._query('sub', 'streams.{}.sideband'.format(stream))
-        raise Return(sensor_sample.value)
-
-    @coroutine
-    def gey_sync_epoch(self):
-        sensor_sample = yield self._query('sub', 'synchronisation-epoch')
-        raise Return(sensor_sample.value)
-
-    @coroutine
-    def get_itrf_reference(self):
-        sensor_sample = yield self._query('sub', 'array-position-itrf')
-        x, y, z = [float(i) for i in sensor_sample.value.split(",")]
-        raise Return((x, y, z))
-
-
-
-####################
-# Classes for communicating with and wrapping
-# the functionality of the processing servers
-# on each NUMA node.
-####################
-
-class FbfWorkerWrapper(object):
-    """Wrapper around a client to an FbfWorkerServer
-    instance.
-    """
-    def __init__(self, hostname, port):
-        """
-        @brief  Create a new wrapper around a client to a worker server
-
-        @params hostname The hostname for the worker server
-        @params port     The port number that the worker server serves on
-        """
-        log.debug("Building client to FbfWorkerServer at {}:{}".format(hostname, port))
-        self._client = KATCPClientResource(dict(
-            name="worker-server-client",
-            address=(hostname, port),
-            controlled=True))
-        self.hostname = hostname
-        self.port = port
-        self.priority = 0 # Currently no priority mechanism is implemented
-        self._started = False
-
-    def start(self):
-        """
-        @brief  Start the client to the worker server
-        """
-        log.debug("Starting client to FbfWorkerServer at {}:{}".format(self.hostname, self.port))
-        self._client.start()
-        self._started = True
-
-    def __repr__(self):
-        return "<{} for {}:{}>".format(self.__class__, self.hostname, self.port)
-
-    def __hash__(self):
-        # This has override is required to allow these wrappers
-        # to be used with set() objects. The implication is that
-        # the combination of hostname and port is unique for a
-        # worker server
-        return hash((self.hostname, self.port))
-
-    def __eq__(self, other):
-        # Also implemented to help with hashing
-        # for sets
-        return self.__hash__() == hash(other)
-
-    def __del__(self):
-        if self._started:
-            try:
-                self._client.stop()
-            except Exception as error:
-                log.exception(str(error))
-
-
-class FbfWorkerPool(object):
-    """Wrapper class for managing server
-    allocation and deallocation to subarray/products
-    """
-    def __init__(self):
-        """
-        @brief   Construct a new instance
-        """
-        self._servers = set()
-        self._allocated = set()
-
-    def add(self, hostname, port):
-        """
-        @brief  Add a new FbfWorkerServer to the server pool
-
-        @params hostname The hostname for the worker server
-        @params port     The port number that the worker server serves on
-        """
-        wrapper = FbfWorkerWrapper(hostname,port)
-        if not wrapper in self._servers:
-            wrapper.start()
-            log.debug("Adding {} to server set".format(wrapper))
-            self._servers.add(wrapper)
-
-    def remove(self, hostname, port):
-        """
-        @brief  Add a new FbfWorkerServer to the server pool
-
-        @params hostname The hostname for the worker server
-        @params port     The port number that the worker server serves on
-        """
-        wrapper = FbfWorkerWrapper(hostname,port)
-        if wrapper in self._allocated:
-            raise ServerDeallocationError("Cannot remove allocated server from pool")
-        try:
-            self._servers.remove(wrapper)
-        except KeyError:
-            log.warning("Could not find {}:{} in server pool".format(hostname, port))
-
-    def allocate(self, count):
-        """
-        @brief    Allocate a number of servers from the pool.
-
-        @note     Free servers will be allocated by priority order
-                  with 0 being highest priority
-
-        @return   A list of FbfWorkerWrapper objects
-        """
-        with lock:
-            log.debug("Request to allocate {} servers".format(count))
-            available_servers = list(self._servers.difference(self._allocated))
-            log.debug("{} servers available".format(len(available_servers)))
-            available_servers.sort(key=lambda server: server.priority, reverse=True)
-            if len(available_servers) < count:
-                raise ServerAllocationError("Cannot allocate {0} servers, only {1} available".format(
-                    count, len(available_servers)))
-            allocated_servers = []
-            for _ in range(count):
-                server = available_servers.pop()
-                log.debug("Allocating server: {}".format(server))
-                allocated_servers.append(server)
-                self._allocated.add(server)
-            return allocated_servers
-
-    def deallocate(self, servers):
-        """
-        @brief    Deallocate servers and return the to the pool.
-
-        @param    A list of Node objects
-        """
-        for server in servers:
-            log.debug("Deallocating server: {}".format(server))
-            self._allocated.remove(server)
-
-    def reset(self):
-        """
-        @brief   Deallocate all servers
-        """
-        log.debug("Reseting server pool allocations")
-        self._allocated = set()
-
-    def available(self):
-        """
-        @brief   Return list of available servers
-        """
-        return list(self._servers.difference(self._allocated))
-
-    def used(self):
-        """
-        @brief   Return list of allocated servers
-        """
-        return list(self._allocated)
-
-####################
-# The main CAM interface for FBFUSE
-####################
-
-class FbfMasterController(AsyncDeviceServer):
-    """This is the main KATCP interface for the FBFUSE
-    multi-beam beamformer on MeerKAT.
-
-    This interface satisfies the following ICDs:
-    CAM-FBFUSE: <link>
-    TUSE-FBFUSE: <link>
-    """
-    VERSION_INFO = ("mpikat-fbf-api", 0, 1)
-    BUILD_INFO = ("mpikat-fbf-implementation", 0, 1, "rc1")
-    DEVICE_STATUSES = ["ok", "degraded", "fail"]
-    def __init__(self, ip, port, dummy=True,
-        ip_range = FBF_IP_RANGE):
-        """
-        @brief       Construct new FbfMasterController instance
-
-        @params  ip       The IP address on which the server should listen
-        @params  port     The port that the server should bind to
-        @params  dummy    Specifies if the instance is running in a dummy mode
-
-        @note   In dummy mode, the controller will act as a mock interface only, sending no requests to nodes.
-                A valid node pool must still be provided to the instance, but this may point to non-existent nodes.
-
-        """
-        self._ip_pool = IpRangeManager(ip_range_from_stream(ip_range))
-        super(FbfMasterController, self).__init__(ip,port)
-        self._products = {}
-        self._dummy = dummy
-        self._server_pool = FbfWorkerPool()
-
-    def start(self):
-        """
-        @brief  Start the FbfMasterController server
-        """
-        super(FbfMasterController,self).start()
-
-    def setup_sensors(self):
-        """
-        @brief  Set up monitoring sensors.
-
-        @note   The following sensors are made available on top of default sensors
-                implemented in AsynDeviceServer and its base classes.
-
-                device-status:  Reports the health status of the FBFUSE and associated devices:
-                                Among other things report HW failure, SW failure and observation failure.
-
-                local-time-synced:  Indicates whether the local time of FBFUSE servers
-                                    is synchronised to the master time reference (use NTP).
-                                    This sensor is aggregated from all nodes that are part
-                                    of FBF and will return "not sync'd" if any nodes are
-                                    unsyncronised.
-
-                products:   The list of product_ids that FBFUSE is currently handling
-        """
-        self._device_status = Sensor.discrete(
-            "device-status",
-            description="Health status of FBFUSE",
-            params=self.DEVICE_STATUSES,
-            default="ok",
-            initial_status=Sensor.UNKNOWN)
-        self.add_sensor(self._device_status)
-
-        self._local_time_synced = Sensor.boolean(
-            "local-time-synced",
-            description="Indicates FBF is NTP syncronised.",
-            default=True,
-            initial_status=Sensor.UNKNOWN)
-        self.add_sensor(self._local_time_synced)
-
-        self._products_sensor = Sensor.string(
-            "products",
-            description="The names of the currently configured products",
-            default="",
-            initial_status=Sensor.UNKNOWN)
-        self.add_sensor(self._products_sensor)
-
-        self._ip_pool_sensor = Sensor.string(
-            "output-ip-range",
-            description="The multicast address allocation for coherent beams",
-            default=self._ip_pool.format_katcp(),
-            initial_status=Sensor.NOMINAL)
-        self.add_sensor(self._ip_pool_sensor)
-
-
-
-    def _update_products_sensor(self):
-        self._products_sensor.set_value(",".join(self._products.keys()))
-
-    def _get_product(self, product_id):
-        if product_id not in self._products:
-            raise ProductLookupError("No product configured with ID: {}".format(product_id))
-        else:
-            return self._products[product_id]
-
-    @request(Str(), Int())
-    @return_reply()
-    def request_register_worker_server(self, req, hostname, port):
-        """
-        @brief   Register an FbfWorker instance
-
-        @params hostname The hostname for the worker server
-        @params port     The port number that the worker server serves on
-
-        @detail  Register an FbfWorker instance that can be used for FBFUSE
-                 computation. FBFUSE has no preference for the order in which control
-                 servers are allocated to a subarray. An FbfWorker wraps an atomic
-                 unit of compute comprised of one CPU, one GPU and one NIC (i.e. one NUMA
-                 node on an FBFUSE compute server).
-        """
-        self._server_pool.add(hostname, port)
-        return ("ok",)
-
-    @request(Str(), Int())
-    @return_reply()
-    def request_deregister_worker_server(self, req, hostname, port):
-        """
-        @brief   Deregister an FbfWorker instance
-
-        @params hostname The hostname for the worker server
-        @params port     The port number that the worker server serves on
-
-        @detail  The graceful way of removing a server from rotation. If the server is
-                 currently actively processing an exception will be raised.
-        """
-        try:
-            self._server_pool.remove(hostname, port)
-        except ServerDeallocationError as error:
-            return ("fail", str(error))
-        else:
-            return ("ok",)
-
-    @request()
-    @return_reply(Int())
-    def request_worker_server_list(self, req):
-        """
-        @brief   List all control servers and provide minimal metadata
-        """
-        for server in self._server_pool.used():
-            req.inform("{} allocated".format(server))
-        for server in self._server_pool.available():
-            req.inform("{} free".format(server))
-        return ("ok", len(self._server_pool.used()) + len(self._server_pool.available()))
-
-
-    @request(Str(), Str(), Int(), Str(), Str())
-    @return_reply()
-    def request_configure(self, req, product_id, antennas_csv, n_channels, streams_json, proxy_name):
-        """
-        @brief      Configure FBFUSE to receive and process data from a subarray
-
-        @detail     REQUEST ?configure product_id antennas_csv n_channels streams_json proxy_name
-                    Configure FBFUSE for the particular data products
-
-        @param      req               A katcp request object
-
-        @param      product_id        This is a name for the data product, which is a useful tag to include
-                                      in the data, but should not be analysed further. For example "array_1_bc856M4k".
-
-        @param      antennas_csv      A comma separated list of physical antenna names used in particular sub-array
-                                      to which the data products belongs (e.g. m007,m008,m009).
-
-        @param      n_channels        The integer number of frequency channels provided by the CBF.
-
-        @param      streams_json      a JSON struct containing config keys and values describing the streams.
-
-                                      For example:
-
-                                      @code
-                                         {'stream_type1': {
-                                             'stream_name1': 'stream_address1',
-                                             'stream_name2': 'stream_address2',
-                                             ...},
-                                             'stream_type2': {
-                                             'stream_name1': 'stream_address1',
-                                             'stream_name2': 'stream_address2',
-                                             ...},
-                                          ...}
-                                      @endcode
-
-                                      The steam type keys indicate the source of the data and the type, e.g. cam.http.
-                                      stream_address will be a URI.  For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>,
-                                      representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth
-                                      to accommodate as a single multicast group, the count parameter indicates the number of additional
-                                      consecutively numbered multicast group ip addresses, and sharing the same UDP port number.
-                                      stream_name is the name used to identify the stream in CAM.
-                                      A Python example is shown below, for five streams:
-                                      One CAM stream, with type cam.http.  The camdata stream provides the connection string for katportalclient
-                                      (for the subarray that this FBFUSE instance is being configured on).
-                                      One F-engine stream, with type:  cbf.antenna_channelised_voltage.
-                                      One X-engine stream, with type:  cbf.baseline_correlation_products.
-                                      Two beam streams, with type: cbf.tied_array_channelised_voltage.  The stream names ending in x are
-                                      horizontally polarised, and those ending in y are vertically polarised.
-
-                                      @code
-                                         pprint(streams_dict)
-                                         {'cam.http':
-                                             {'camdata':'http://10.8.67.235/api/client/1'},
-                                          'cbf.antenna_channelised_voltage':
-                                             {'i0.antenna-channelised-voltage':'spead://239.2.1.150+15:7148'},
-                                          ...}
-                                      @endcode
-
-                                      If using katportalclient to get information from CAM, then reconnect and re-subscribe to all sensors
-                                      of interest at this time.
-
-        @param      proxy_name        The CAM name for the instance of the FBFUSE data proxy that is being configured.
-                                      For example, "FBFUSE_3".  This can be used to query sensors on the correct proxy,
-                                      in the event that there are multiple instances in the same subarray.
-
-        @note       A configure call will result in the generation of a new subarray instance in FBFUSE that will be added to the clients list.
-
-        @return     katcp reply object [[[ !configure ok | (fail [error description]) ]]]
-        """
-        # Test if product_id already exists
-        if product_id in self._products:
-            return ("fail", "FBF already has a configured product with ID: {}".format(product_id))
-        # Determine number of nodes required based on number of antennas in subarray
-        # Note this is a poor way of handling this that may be updated later. In theory
-        # there is a throughput measure as a function of bandwidth, polarisations and number
-        # of antennas that allows one to determine the number of nodes to run. Currently we
-        # just assume one antennas worth of data per NIC on our servers, so two antennas per
-        # node.
-        try:
-            antennas = parse_csv_antennas(antennas_csv)
-        except AntennaValidationError as error:
-            return ("fail", str(error))
-
-        valid_n_channels = [1024, 4096, 32768]
-        if not n_channels in valid_n_channels:
-            return ("fail", "The provided number of channels ({}) is not valid. Valid options are {}".format(n_channels, valid_n_channels))
-
-        streams = json.loads(streams_json)
-        try:
-            streams['cam.http']['camdata']
-            # Need to check for endswith('.antenna-channelised-voltage') as the i0 is not
-            # guaranteed to stay the same.
-            # i0 = instrument name
-            # Need to keep this for future sensor lookups
-            streams['cbf.antenna_channelised_voltage']
-        except KeyError as error:
-            return ("fail", "JSON streams object does not contain required key: {}".format(str(error)))
-
-        for key in streams['cbf.antenna_channelised_voltage'].keys():
-            if key.endswith('.antenna-channelised-voltage'):
-                instrument_name, _ = key.split('.')
-                feng_stream_name = key
-                break
-        else:
-            return ("fail", "Could not determine instrument name (e.g. 'i0') from streams")
-
-        # TODO: change this request to @async_reply and make the whole thing a coroutine
-        @coroutine
-        def configure():
-            nantennas = len(antennas)
-            if not is_power_of_two(nantennas):
-                log.warning("Number of antennas was not a power of two. Rounding up to next power of two for resource allocation.")
-            if next_power_of_two(nantennas)//2 < 4:
-                log.warning("Number of antennas was less than than 4 but resources will be allocated assuming 4 antennas.")
-            required_servers = max(4, next_power_of_two(nantennas)//2)
-
-            # Want to make all the katportalclient calls here, retrieving:
-            # - observer strings
-            # - reference position
-            # - bandwidth
-            # - centre frequency
-            # - sideband
-
-            kpc = KatportalClientWrapper(PORTAL)
-
-            # Get all antenna observer strings
-            futures, observers = [],[]
-            for antenna in antennas:
-                log.debug("Fetching katpoint string for antenna {}".format(antenna))
-                futures.append(kpc.get_observer_string(antenna))
-            for ii,future in enumerate(futures):
-                try:
-                    observer = yield future
-                except Exception as error:
-                    log.error("Error on katportalclient call: {}".format(str(error)))
-                    req.reply("fail", "Error retrieving katpoint string for antenna {}".format(antennas[ii]))
-                    return
-                else:
-                    log.debug("Fetched katpoint antenna: {}".format(observer))
-                    observers.append(Antenna(observer))
-
-            # Get bandwidth, cfreq, sideband, f-eng mapping
-            bandwidth_future = kpc.get_bandwidth(feng_stream_name)
-            cfreq_future = kpc.get_cfreq(feng_stream_name)
-            sideband_future = kpc.get_sideband(feng_stream_name)
-            feng_antenna_map_future = kpc.get_antenna_feng_id_map(instrument_name, antennas)
-            bandwidth = yield bandwidth_future
-            cfreq = yield cfreq_future
-            sideband = yield sideband_future
-            feng_antenna_map = yield feng_antenna_map_future
-
-
-            # This may be removed in future.
-            # Currently if self._dummy is set no actual server allocation will be requested.
-            if not self._dummy:
-                servers = self._server_pool.allocate(required_servers)
-            else:
-                servers = []
-            product = FbfProductController(self, product_id, observers, n_channels, streams, proxy_name, servers)
-            self._products[product_id] = product
-            self._update_products_sensor()
-            req.reply("ok",)
-        self.ioloop.add_callback(configure)
-        raise AsyncReply
-
-    @request(Str())
-    @return_reply()
-    def request_deconfigure(self, req, product_id):
-        """
-        @brief      Deconfigure the FBFUSE instance.
-
-        @note       Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information
-                    from CAM, then it should disconnect at this time.
-
-        @param      req               A katcp request object
-
-        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
-                                      For example "array_1_bc856M4k".
-
-        @return     katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]]
-        """
-        # Test if product exists
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            product.stop_beams()
-        except Exception as error:
-            return ("fail", str(error))
-        self._server_pool.deallocate(product.servers)
-        product.teardown_sensors()
-        del self._products[product_id]
-        self._update_products_sensor()
-        return ("ok",)
-
-
-    @request(Str(), Str())
-    @return_reply()
-    @coroutine
-    def request_target_start(self, req, product_id, target):
-        """
-        @brief      Notify FBFUSE that a new target is being observed
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      target          A KATPOINT target string
-
-        @return     katcp reply object [[[ !target-start ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            raise Return(("fail", str(error)))
-        try:
-            target = Target(target)
-        except Exception as error:
-            raise Return(("fail", str(error)))
-        yield product.target_start(target)
-        raise Return(("ok",))
-
-
-    # DELETE this
-
-    @request(Str())
-    @return_reply()
-    @coroutine
-    def request_target_stop(self, req, product_id):
-        """
-        @brief      Notify FBFUSE that the telescope has stopped observing a target
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @return     katcp reply object [[[ !target-start ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            raise Return(("fail", str(error)))
-        yield product.target_stop()
-        raise Return(("ok",))
-
-
-    @request(Str(), Int(), Str(), Int(), Int())
-    @return_reply()
-    def request_configure_coherent_beams(self, req, product_id, nbeams, antennas_csv, fscrunch, tscrunch):
-        """
-        @brief      Request that FBFUSE configure parameters for coherent beams
-
-        @note       This call can only be made prior to a call to start-beams for the configured product.
-                    This is due to FBFUSE requiring static information up front in order to compile beamformer
-                    kernels, allocate the correct size memory buffers and subscribe to the correct number of
-                    multicast groups.
-
-        @note       The particular configuration passed at this stage will only be evaluated on a call to start-beams.
-                    If the requested configuration is not possible due to hardware and bandwidth limits and error will
-                    be raised on the start-beams call.
-
-        @param      req             A katcp request object
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      nbeams          The number of beams that will be produced for the provided product_id
-
-        @param      antennas_csv    A comma separated list of physical antenna names. Only these antennas will be used
-                                    when generating coherent beams (e.g. m007,m008,m009). The antennas provided here must
-                                    be a subset of the antennas in the current subarray. If not an exception will be
-                                    raised.
-
-        @param      fscrunch        The number of frequency channels to integrate over when producing coherent beams.
-
-        @param      tscrunch        The number of time samples to integrate over when producing coherent beams.
-
-        @return     katcp reply object [[[ !configure-coherent-beams ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            product.configure_coherent_beams(nbeams, antennas_csv, fscrunch, tscrunch)
-        except Exception as error:
-            return ("fail", str(error))
-        else:
-            return ("ok",)
-
-    @request(Str(), Str(), Int(), Int())
-    @return_reply()
-    def request_configure_incoherent_beam(self, req, product_id, antennas_csv, fscrunch, tscrunch):
-        """
-        @brief      Request that FBFUSE sets the parameters for the incoherent beam
-
-        @note       The particular configuration passed at this stage will only be evaluated on a call to start-beams.
-                    If the requested configuration is not possible due to hardware and bandwidth limits and error will
-                    be raised on the start-beams call.
-
-        @note       Currently FBFUSE is only set to produce one incoherent beam per instantiation. This may change in future.
-
-        @param      req             A katcp request object
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      nbeams          The number of beams that will be produced for the provided product_id
-
-        @param      antennas_csv    A comma separated list of physical antenna names. Only these antennas will be used
-                                    when generating the incoherent beam (e.g. m007,m008,m009). The antennas provided here must
-                                    be a subset of the antennas in the current subarray. If not an exception will be
-                                    raised.
-
-        @param      fscrunch        The number of frequency channels to integrate over when producing the incoherent beam.
-
-        @param      tscrunch        The number of time samples to integrate over when producing the incoherent beam.
-
-        @return     katcp reply object [[[ !configure-incoherent-beam ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            product.configure_incoherent_beam(antennas_csv, fscrunch, tscrunch)
-        except Exception as error:
-            return ("fail", str(error))
-        else:
-            return ("ok",)
-
-    @request(Str())
-    @return_reply()
-    def request_capture_start(self, req, product_id):
-        """
-        @brief      Request that FBFUSE start beams streaming
-
-        @detail     Upon this call the provided coherent and incoherent beam configurations will be evaluated
-                    to determine if they are physical and can be met with the existing hardware. If the configurations
-                    are acceptable then servers allocated to this instance will be triggered to begin production of beams.
-
-        @param      req               A katcp request object
-
-        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
-                                      For example "array_1_bc856M4k".
-
-        @return     katcp reply object [[[ !start-beams ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        @coroutine
-        def start():
-            try:
-                product.start_capture()
-            except Exception as error:
-                req.reply("fail", str(error))
-            else:
-                req.reply("ok",)
-        self.ioloop.add_callback(start)
-        raise AsyncReply
-
-    @request(Str())
-    @return_reply()
-    def request_provision_beams(self, req, product_id):
-        """
-        @brief      Request that FBFUSE asynchronously prepare to start beams streaming
-
-        @detail     Upon this call the provided coherent and incoherent beam configurations will be evaluated
-                    to determine if they are physical and can be met with the existing hardware. If the configurations
-                    are acceptable then servers allocated to this instance will be triggered to prepare for the production of beams.
-                    Unlike a call to ?capture-start, ?provision-beams will not trigger a connection to multicast groups and will not
-                    wait for completion before returning, instead it will start the process of beamformer resource alloction and compilation.
-                    To determine when the process is complete, the user must wait on the value of the product "state" sensor becoming "ready",
-                    e.g.
-
-                    @code
-                        client.sensor['{}-state'.format(proxy_name)].wait(
-                            lambda reading: reading.value == 'ready')
-                    @endcode
-
-        @param      req               A katcp request object
-
-        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
-                                      For example "array_1_bc856M4k".
-
-        @return     katcp reply object [[[ !start-beams ok | (fail [error description]) ]]]
-        """
-        # Note: the state of the product won't be updated until the start call hits the top of the
-        # event loop. It may be preferable to keep a self.starting_future object and yield on it
-        # in capture-start if it exists. The current implementation may or may not be a bug...
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        # This check needs to happen here as this call
-        # should return immediately
-        if not product.idle:
-            return ("fail", "Can only provision beams on an idle FBF product")
-        self.ioloop.add_callback(product.prepare)
-        return ("ok",)
-
-    @request(Str())
-    @return_reply()
-    def request_capture_stop(self, req, product_id):
-        """
-        @brief      Stop FBFUSE streaming
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        @coroutine
-        def stop():
-            product.stop_beams()
-            req.reply("ok",)
-        self.ioloop.add_callback(stop)
-        raise AsyncReply
-
-    @request(Str(), Str(), Int())
-    @return_reply()
-    def request_set_configuration_authority(self, req, product_id, hostname, port):
-        """
-        @brief     Set the configuration authority for an FBF product
-
-        @detail    The parameters passed here specify the address of a server that
-                   can be triggered to provide FBFUSE with configuration information
-                   at schedule block and target boundaries. The configuration authority
-                   must be a valid KATCP server.
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        product.set_configuration_authority(hostname, port)
-        return ("ok",)
-
-    @request(Str())
-    @return_reply()
-    def request_reset_beams(self, req, product_id):
-        """
-        @brief      Reset the positions of all allocated beams
-
-        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
-                    allocated to the instance. If all beams are currently allocated an exception will be raised.
-
-        @param      req             A katcp request object
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @return     katcp reply object [[[ !reset-beams m ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        else:
-            beam = product.reset_beams()
-            return ("ok", )
-
-    @request(Str(), Str())
-    @return_reply(Str())
-    def request_add_beam(self, req, product_id, target):
-        """
-        @brief      Configure the parameters of one beam
-
-        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
-                    allocated to the instance. If all beams are currently allocated an exception will be raised.
-
-        @param      req             A katcp request object
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      target          A KATPOINT target string
-
-        @return     katcp reply object [[[ !add-beam ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            target = Target(target)
-        except Exception as error:
-            return ("fail", str(error))
-        beam = product.add_beam(target)
-        return ("ok", beam.idx)
-
-    @request(Str(), Str(), Int(), Float(), Float(), Float())
-    @return_reply(Str())
-    def request_add_tiling(self, req, product_id, target, nbeams, reference_frequency, overlap, epoch):
-        """
-        @brief      Configure the parameters of a static beam tiling
-
-        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
-                    allocated to the instance. If there are not enough free beams to satisfy the request an
-                    exception will be raised.
-
-        @note       Beam shapes calculated for tiling are always assumed to be 2D elliptical Gaussians.
-
-        @param      req             A katcp request object
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      target          A KATPOINT target string
-
-        @param      nbeams          The number of beams in this tiling pattern.
-
-        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
-                                            and thus the tiling pattern. Typically this would be chosen to be the
-                                            centre frequency of the current observation.
-
-        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
-                                    at what power point neighbouring beams in the tiling pattern will meet. For
-                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
-                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
-                                    at their half-power points. [Note: This is currently a tricky parameter to use
-                                    when values are close to zero. In future this may be define in sigma units or
-                                    in multiples of the FWHM of the beam.]
-
-        @param      epoch           The desired epoch for the tiling pattern as a unix time. A typical usage would
-                                    be to set the epoch to half way into the coming observation in order to minimise
-                                    the effect of parallactic angle and array projection changes altering the shape
-                                    and position of the beams and thus changing the efficiency of the tiling pattern.
-
-
-        @return     katcp reply object [[[ !add-tiling ok | (fail [error description]) ]]]
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            target = Target(target)
-        except Exception as error:
-            return ("fail", str(error))
-        tiling = product.add_tiling(target, nbeams, reference_frequency, overlap, epoch)
-        return ("ok", tiling.idxs())
-
-    @request()
-    @return_reply(Int())
-    def request_product_list(self, req):
-        """
-        @brief      List all currently registered products and their states
-
-        @param      req               A katcp request object
-
-        @note       The details of each product are provided via an #inform
-                    as a JSON string containing information on the product state.
-
-        @return     katcp reply object [[[ !product-list ok | (fail [error description]) <number of configured products> ]]],
-        """
-        for product_id,product in self._products.items():
-            info = {}
-            info[product_id] = product.info()
-            as_json = json.dumps(info)
-            req.inform(as_json)
-        return ("ok",len(self._products))
-
-    @request(Str(), Str())
-    @return_reply()
-    def request_set_default_target_configuration(self, req, product_id, target):
-        """
-        @brief      Set the configuration of FBFUSE from the FBFUSE configuration server
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      target          A KATPOINT target string
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        try:
-            target = Target(target)
-        except Exception as error:
-            return ("fail", str(error))
-        if not product.capturing:
-            return ("fail","Product must be capturing before a target confiugration can be set.")
-        product.reset_beams()
-        # TBD: Here we connect to some database and request the default configurations
-        # For example this may return secondary target in the FoV
-        #
-        # As a default the current system will put one beam directly on target and
-        # the rest of the beams in a static tiling pattern around this target
-        now = time.time()
-        nbeams = product._beam_manager.nbeams
-        product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now)
-        product.add_beam(target)
-        return ("ok",)
-
-    @request(Str(), Str())
-    @return_reply()
-    def request_set_default_sb_configuration(self, req, product_id, sb_id):
-        """
-        @brief      Set the configuration of FBFUSE from the FBFUSE configuration server
-
-        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
-                                    For example "array_1_bc856M4k".
-
-        @param      sb_id           The schedule block ID. Decisions of the configuarion of FBFUSE will be made dependent on
-                                    the configuration of the current subarray, the primary and secondary science projects
-                                    active and the targets expected to be visted during the execution of the schedule block.
-        """
-        try:
-            product = self._get_product(product_id)
-        except ProductLookupError as error:
-            return ("fail", str(error))
-        if product.capturing:
-            return ("fail", "Cannot reconfigure a currently capturing instance.")
-        product.configure_coherent_beams(400, product._katpoint_antennas, 1, 16)
-        product.configure_incoherent_beam(product._katpoint_antennas, 1, 16)
-        now = time.time()
-        nbeams = product._beam_manager.nbeams
-        product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now)
-        product.add_beam(target)
-        return ("ok",)
-
-####################
-# Classes representing the different
-# beams and tilings that can be provided
-# by FBFUSE
-####################
-
-DEFAULT_KATPOINT_TARGET = "unset, radec, 0, 0"
-
-class Beam(object):
-    """Wrapper class for a single beam to be produced
-    by FBFUSE"""
-    def __init__(self, idx, target=DEFAULT_KATPOINT_TARGET):
-        """
-        @brief   Create a new Beam object
-
-        @params   idx   a unique identifier for this beam.
-
-        @param      target          A KATPOINT target object
-        """
-        self.idx = idx
-        self._target = target
-        self._observers = set()
-
-    @property
-    def target(self):
-        return self._target
-
-    @target.setter
-    def target(self, new_target):
-        self._target = new_target
-        self.notify()
-
-    def notify(self):
-        """
-        @brief  Notify all observers of a change to the beam parameters
-        """
-        for observer in self._observers:
-            observer(self)
-
-    def register_observer(self, func):
-        """
-        @brief   Register an observer to be called on a notify
-
-        @params  func  Any function that takes a Beam object as its only argument
-        """
-        self._observers.add(func)
-
-    def deregister_observer(self, func):
-        """
-        @brief   Deregister an observer to be called on a notify
-
-        @params  func  Any function that takes a Beam object as its only argument
-        """
-        self._observers.remove(func)
-
-    def reset(self):
-        """
-        @brief   Reset the beam to default parameters
-        """
-        self.target = Target(DEFAULT_KATPOINT_TARGET)
-
-    def __repr__(self):
-        return "{}, {}".format(
-            self.idx, self.target.format_katcp())
-
-
-class Tiling(object):
-    """Wrapper class for a collection of beams in a tiling pattern
-    """
-    def __init__(self, target, reference_frequency, overlap):
-        """
-        @brief   Create a new tiling object
-
-        @param      target          A KATPOINT target object
-
-        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
-                                            and thus the tiling pattern. Typically this would be chosen to be the
-                                            centre frequency of the current observation.
-
-        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
-                                    at what power point neighbouring beams in the tiling pattern will meet. For
-                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
-                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
-                                    at their half-power points. [Note: This is currently a tricky parameter to use
-                                    when values are close to zero. In future this may be define in sigma units or
-                                    in multiples of the FWHM of the beam.]
-        """
-        self._beams = []
-        self.target = target
-        self.reference_frequency = reference_frequency
-        self.overlap = overlap
-        self.tiling = None
-
-    @property
-    def nbeams(self):
-        return len(self._beams)
-
-    def add_beam(self, beam):
-        """
-        @brief   Add a beam to the tiling pattern
-
-        @param   beam   A Beam object
-        """
-        self._beams.append(beam)
-
-    def generate(self, antennas, epoch):
-        """
-        @brief   Calculate and update RA and Dec positions of all
-                 beams in the tiling object.
-
-        @param      epoch     The epoch of tiling (unix time)
-
-        @param      antennas  The antennas to use when calculating the beam shape.
-                              Note these are the antennas in katpoint CSV format.
-        """
-        psfsim = mosaic.PsfSim(antennas, self.reference_frequency)
-        beam_shape = psfsim.get_beam_shape(self.target, epoch)
-        tiling = mosaic.generate_nbeams_tiling(beam_shape, self.nbeams, self.overlap)
-        for ii in range(tiling.beam_num):
-            ra, dec = tiling.coordinates[ii]
-            self._beams[ii].target = Target('{},radec,{},{}'.format(self.target.name, ra, dec))
-        log.warning("Current mosaic implementation returns incorrect tiling positions")
-
-    def __repr__(self):
-        return ", ".join([repr(beam) for beam in self._beams])
-
-    def idxs(self):
-        return ",".join([beam.idx for beam in self._beams])
-
-
-
-class BeamManager(object):
-    """Manager class for allocation, deallocation and tracking of
-    individual beams and static tilings.
-    """
-    def __init__(self, nbeams, antennas):
-        """
-        @brief  Create a new beam manager object
-
-        @param  nbeams    The number of beams managed by this object
-
-        @param  antennas  A list of antennas to use for tilings. Note these should
-                          be in KATPOINT CSV format.
-        """
-        self._nbeams = nbeams
-        self._antennas = antennas
-        self._beams = [Beam("cfbf%05d"%(i)) for i in range(self._nbeams)]
-        self._free_beams = [beam for beam in self._beams]
-        self._allocated_beams = []
-
-        self.reset()
-
-    @property
-    def nbeams(self):
-        return self._nbeams
-
-    @property
-    def antennas(self):
-        return self._antennas
-
-    def reset(self):
-        """
-        @brief  reset and deallocate all beams and tilings managed by this instance
-
-        @note   All tiling will be lost on this call and must be remade for subsequent observations
-        """
-        for beam in self._beams:
-            beam.reset()
-        self._free_beams = [beam for beam in self._beams]
-        self._allocated_beams = []
-        self._tilings = []
-        self._dynamic_tilings = []
-
-    def __add_beam(self, target):
-        beam = self._free_beams.pop(0)
-        beam.target = target
-        self._allocated_beams.append(beam)
-        return beam
-
-    def add_beam(self, target):
-        """
-        @brief   Specify the parameters of one managed beam
-
-        @param      target          A KATPOINT target object
-
-        @return     Returns the allocated Beam object
-        """
-        beam = self.__add_beam(target)
-        return beam
-
-    def __make_tiling(self, nbeams, tiling_type, *args):
-        if len(self._free_beams) < nbeams:
-            raise Exception("More beams requested than are available.")
-        tiling = tiling_type(*args)
-        for _ in range(nbeams):
-            beam = self._free_beams.pop(0)
-            tiling.add_beam(beam)
-            self._allocated_beams.append(beam)
-        return tiling
-
-    def add_tiling(self, target, nbeams, reference_frequency, overlap):
-        """
-        @brief   Add a tiling to be managed
-
-        @param      target          A KATPOINT target object
-
-        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
-                                            and thus the tiling pattern. Typically this would be chosen to be the
-                                            centre frequency of the current observation.
-
-        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
-                                    at what power point neighbouring beams in the tiling pattern will meet. For
-                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
-                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
-                                    at their half-power points. [Note: This is currently a tricky parameter to use
-                                    when values are close to zero. In future this may be define in sigma units or
-                                    in multiples of the FWHM of the beam.]
-
-        @returns    The created Tiling object
-        """
-        if len(self._free_beams) < nbeams:
-            raise Exception("More beams requested than are available.")
-        tiling = Tiling(target, reference_frequency, overlap)
-        for _ in range(nbeams):
-            beam = self._free_beams.pop(0)
-            tiling.add_beam(beam)
-            self._allocated_beams.append(beam)
-        self._tilings.append(tiling)
-        return tiling
-
-    def get_beams(self):
-        """
-        @brief  Return all managed beams
-        """
-        return self._allocated_beams + self._free_beams
-
-
-####################
-# Classes for wrapping an individual
-# configuration of FBFUSE (i.e. an FBF product)
-####################
-
-class DelayEngine(AsyncDeviceServer):
-    """A server for maintining delay models used
-    by FbfWorkerServers.
-    """
-    VERSION_INFO = ("delay-engine-api", 0, 1)
-    BUILD_INFO = ("delay-engine-implementation", 0, 1, "rc1")
-    DEVICE_STATUSES = ["ok", "degraded", "fail"]
-
-    def __init__(self, ip, port, beam_manager):
-        """
-        @brief  Create a new DelayEngine instance
-
-        @param   ip   The interface that the DelayEngine should serve on
-
-        @param   port The port that the DelayEngine should serve on
-
-        @param   beam_manager  A BeamManager instance that will be used to create delays
-        """
-        self._beam_manager = beam_manager
-        super(DelayEngine, self).__init__(ip,port)
-
-    def setup_sensors(self):
-        """
-        @brief    Set up monitoring sensors.
-
-        @note     The key sensor here is the delay sensor which is stored in JSON format
-
-                  @code
-                  {
-                  'antennas':['m007','m008','m009'],
-                  'beams':['cfbf00001','cfbf00002'],
-                  'model': [[[0,2],[0,5]],[[2,3],[4,4]],[[8,8],[8,8]]]
-                  }
-                  @endcode
-
-                  Here the delay model is stored as a 3 dimensional array
-                  with dimensions of beam, antenna, model (rate,offset) from
-                  outer to inner dimension.
-        """
-        self._update_rate_sensor = Sensor.float(
-            "update-rate",
-            description="The delay update rate",
-            default=2.0,
-            initial_status=Sensor.NOMINAL)
-        self.add_sensor(self._update_rate_sensor)
-
-        self._nbeams_sensor = Sensor.integer(
-            "nbeams",
-            description="Number of beams that this delay engine handles",
-            default=0,
-            initial_status=Sensor.NOMINAL)
-        self.add_sensor(self._nbeams_sensor)
-
-        self._antennas_sensor = Sensor.string(
-            "antennas",
-            description="JSON breakdown of the antennas (in KATPOINT format) associated with this delay engine",
-            default=json.dumps([a.format_katcp() for a in self._beam_manager.antennas]),
-            initial_status=Sensor.NOMINAL)
-        self.add_sensor(self._antennas_sensor)
-
-        self._delays_sensor = Sensor.string(
-            "delays",
-            description="JSON object containing delays for each beam for each antenna at the current epoch",
-            default="",
-            initial_status=Sensor.UNKNOWN)
-        self.update_delays()
-        self.add_sensor(self._delays_sensor)
-
-    def update_delays(self):
-        reference_antenna = Antenna("reference,{ref.lat},{ref.lon},{ref.elev}".format(
-            ref=self._beam_manager.antennas[0].ref_observer))
-        targets = [beam.target for beam in self._beam_manager.get_beams()]
-        delay_calc = mosaic.DelayPolynomial(self._beam_manager.antennas, targets, reference_antenna)
-        poly = delay_calc.get_delay_polynomials(time.time(), duration=self._update_rate_sensor.value()*2)
-        #poly has format: beam, antenna, (delay, rate)
-        output = {}
-        output["beams"] = [beam.idx for beam in self._beam_manager.get_beams()]
-        output["antennas"] = [ant.name for ant in self._beam_manager.antennas]
-        output["model"] = poly.tolist()
-        self._delays_sensor.set_value(json.dumps(output))
-
-    def start(self):
-        super(DelayEngine, self).start()
-
-
-    @request(Float())
-    @return_reply()
-    def request_set_update_rate(self, req, rate):
-        """
-        @brief    Set the update rate for delay calculations
-
-        @param    rate  The update rate for recalculation of delay polynomials
-        """
-        self._update_rate_sensor.set_value(rate)
-        # This should make a change to the beam manager object
-
-        self.update_delays()
-        return ("ok",)
-
-
-class FbfProductController(object):
-    """
-    Wrapper class for an FBFUSE product.
-    """
-    STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping"]
-    IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING = STATES
-
-    def __init__(self, parent, product_id, katpoint_antennas, n_channels, streams, proxy_name, servers):
-        """
-        @brief      Construct new instance
-
-        @param      parent            The parent FbfMasterController instance
-
-        @param      product_id        The name of the product
-
-        @param      katpoint_antennas A list of katpoint.Antenna objects
-
-        @param      n_channels        The integer number of frequency channels provided by the CBF.
-
-        @param      streams           A dictionary containing config keys and values describing the streams.
-
-        @param      proxy_name        The name of the proxy associated with this subarray (used as a sensor prefix)
-
-        @param      servers           A list of FbfWorkerServer instances allocated to this product controller
-        """
-        log.debug("Creating new FbfProductController with args: {}".format(
-            ", ".join([str(i) for i in (parent, product_id, katpoint_antennas, n_channels,
-                streams, proxy_name, servers)])))
-        self._parent = parent
-        self._product_id = product_id
-        self._antennas = ",".join([a.name for a in katpoint_antennas])
-        self._katpoint_antennas = katpoint_antennas
-        self._antenna_map = {a.name: a for a in self._katpoint_antennas}
-        self._n_channels = n_channels
-        self._streams = streams
-        self._proxy_name = proxy_name
-        self._servers = servers
-        self._beam_manager = None
-        self._delay_engine = None
-        self._coherent_beam_ip_range = None
-        self._ca_client = None
-        self._managed_sensors = []
-        self.setup_sensors()
-
-    def __del__(self):
-        self.teardown_sensors()
-
-    def info(self):
-        """
-        @brief    Return a metadata dictionary describing this product controller
-        """
-        out = {
-            "antennas":self._antennas,
-            "nservers":len(self.servers),
-            "capturing":self.capturing,
-            "streams":self._streams,
-            "nchannels":self._n_channels,
-            "proxy_name":self._proxy_name
-        }
-        return out
-
-    def add_sensor(self, sensor):
-        """
-        @brief    Add a sensor to the parent object
-
-        @note     This method is used to wrap calls to the add_sensor method
-                  on the parent FbfMasterController instance. In order to
-                  disambiguate between sensors from describing different products
-                  the associated proxy name is used as sensor prefix. For example
-                  the "servers" sensor will be seen by clients connected to the
-                  FbfMasterController server as "<proxy_name>-servers" (e.g.
-                  "FBFUSE_1-servers").
-        """
-        prefix = "{}.".format(self._product_id)
-        if sensor.name.startswith(prefix):
-            self._parent.add_sensor(sensor)
-        else:
-            sensor.name = "{}{}".format(prefix,sensor.name)
-            self._parent.add_sensor(sensor)
-        self._managed_sensors.append(sensor)
-
-    def setup_sensors(self):
-        """
-        @brief    Setup the default KATCP sensors.
-
-        @note     As this call is made only upon an FBFUSE configure call a mass inform
-                  is required to let connected clients know that the proxy interface has
-                  changed.
-        """
-        self._state_sensor = Sensor.discrete(
-            "state",
-            description = "Denotes the state of this FBF instance",
-            params = self.STATES,
-            default = self.IDLE,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._state_sensor)
-
-        self._ca_address_sensor = Sensor.string(
-            "configuration-authority",
-            description = "The address of the server that will be deferred to for configurations",
-            default = "",
-            initial_status = Sensor.UNKNOWN)
-        self.add_sensor(self._ca_address_sensor)
-
-        self._available_antennas_sensor = Sensor.string(
-            "available-antennas",
-            description = "The antennas that are currently available for beamforming",
-            default = self._antennas,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._available_antennas_sensor)
-
-        self._cbc_nbeams_sensor = Sensor.integer(
-            "coherent-beam-count",
-            description = "The number of coherent beams that this FBF instance can currently produce",
-            default = 400,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._cbc_nbeams_sensor)
-
-        self._cbc_tscrunch_sensor = Sensor.integer(
-            "coherent-beam-tscrunch",
-            description = "The number time samples that will be integrated when producing coherent beams",
-            default = 16,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._cbc_tscrunch_sensor)
-
-        self._cbc_fscrunch_sensor = Sensor.integer(
-            "coherent-beam-fscrunch",
-            description = "The number frequency channels that will be integrated when producing coherent beams",
-            default = 1,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._cbc_fscrunch_sensor)
-
-        self._cbc_antennas_sensor = Sensor.string(
-            "coherent-beam-antennas",
-            description = "The antennas that will be used when producing coherent beams",
-            default = self._antennas,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._cbc_antennas_sensor)
-
-        self._ibc_nbeams_sensor = Sensor.integer(
-            "incoherent-beam-count",
-            description = "The number of incoherent beams that this FBF instance can currently produce",
-            default = 1,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._ibc_nbeams_sensor)
-
-        self._ibc_tscrunch_sensor = Sensor.integer(
-            "incoherent-beam-tscrunch",
-            description = "The number time samples that will be integrated when producing incoherent beams",
-            default = 16,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._ibc_tscrunch_sensor)
-
-        self._ibc_fscrunch_sensor = Sensor.integer(
-            "incoherent-beam-fscrunch",
-            description = "The number frequency channels that will be integrated when producing incoherent beams",
-            default = 1,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._ibc_fscrunch_sensor)
-
-        self._ibc_antennas_sensor = Sensor.string(
-            "incoherent-beam-antennas",
-            description = "The antennas that will be used when producing incoherent beams",
-            default = self._antennas,
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._ibc_antennas_sensor)
-
-        self._servers_sensor = Sensor.string(
-            "servers",
-            description = "The server instances currently allocated to this product",
-            default = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers]),
-            initial_status = Sensor.NOMINAL)
-        self.add_sensor(self._servers_sensor)
-
-        self._delay_engine_sensor = Sensor.string(
-            "delay-engine",
-            description = "The address of the delay engine serving this product",
-            default = "",
-            initial_status = Sensor.UNKNOWN)
-        self.add_sensor(self._delay_engine_sensor)
-        self._parent.mass_inform(Message.inform('interface-changed'))
-
-    def teardown_sensors(self):
-        """
-        @brief    Remove all sensors created by this product from the parent server.
-
-        @note     This method is required for cleanup to stop the FBF sensor pool
-                  becoming swamped with unused sensors.
-        """
-        for sensor in self._managed_sensors:
-            self._parent.remove_sensor(sensor)
-        self._parent.mass_inform(Message.inform('interface-changed'))
-
-    @property
-    def servers(self):
-        return self._servers
-
-    @property
-    def capturing(self):
-        return self.state == self.CAPTURING
-
-    @property
-    def idle(self):
-        return self.state == self.IDLE
-
-    @property
-    def starting(self):
-        return self.state == self.STARTING
-
-    @property
-    def stopping(self):
-        return self.state == self.STOPPING
-
-    @property
-    def ready(self):
-        return self.state == self.READY
-
-    @property
-    def preparing(self):
-        return self.state == self.PREPARING
-
-    @property
-    def state(self):
-        return self._state_sensor.value()
-
-    def _verify_antennas(self, antennas):
-        """
-        @brief      Verify that a set of antennas is available to this instance.
-
-        @param      antennas   A CSV list of antenna names
-        """
-        antennas_set = set([ant.name for ant in self._katpoint_antennas])
-        requested_antennas = set(antennas)
-        return requested_antennas.issubset(antennas_set)
-
-    def set_configuration_authority(self, hostname, port):
-        if self._ca_client:
-            self._ca_client.stop()
-        self._ca_client = KATCPClientResource(dict(
-            name = 'configuration-authority-client',
-            address = (hostname, port),
-            controlled = True))
-        self._ca_client.start()
-        self._ca_address_sensor.set_value("{}:{}".format(hostname, port))
-
-    @coroutine
-    def get_ca_sb_configuration(self, sb_id):
-        yield self._ca_client.until_synced()
-        try:
-            response = yield self._ca_client.req.get_schedule_block_configuration(self._proxy_name, sb_id)
-        except Exception as error:
-            log.error("Request for SB configuration to CA failed with error: {}".format(str(error)))
-            raise error
-        config_dict = json.loads(response.reply.arguments[1])
-        sensor_map = {
-            ('coherent-beams','nbeams') : self._cbc_nbeams_sensor,
-            ('coherent-beams','antennas') : self._cbc_antennas_sensor,
-            ('coherent-beams','tscrunch') : self._cbc_tscrunch_sensor,
-            ('coherent-beams','fscrunch') : self._cbc_fscrunch_sensor,
-            ('incoherent-beam','antennas') : self._ibc_antennas_sensor,
-            ('incoherent-beam','tscrunch') : self._ibc_tscrunch_sensor,
-            ('incoherent-beam','fscrunch') : self._ibc_fscrunch_sensor,
-            }
-        for key, subconfig in config_dict.items():
-            for subkey, value in subconfig.items():
-                sensor = sensor_map[(key, subkey)]
-                log.info("CA set sensor {} to {}".format(sensor.name, value))
-                sensor.set_value(value)
-
-    def _sanitize_sb_configuration(self,  tscrunch, fscrunch, desired_nbeams, beam_granularity=None):
-
-        # What are the key constraints:
-        # 1. The data rate per multicast group
-        # 2. The aggregate data rate out of the instrument (not as important)
-        # 3. The processing limitations (has to be determined empirically)
-        # 4. The number of multicast groups available
-        # 5. The possible numbers of beams per multicast group (such that TUSE can receive N per node)
-        # 6. Need to use at least 16 multicast groups
-        # 7. Should have even flow across multicast groups, so same number of beams in each
-        # 8. Multicast groups should be contiguous
-
-
-        # Constants for data rates and bandwidths
-        # these are hardcoded here for the moment but ultimately
-        # they should be moved to a higher level or even dynamically
-        # specified
-        MAX_RATE_PER_MCAST = 6.8e9 # bits/s
-        MAX_RATE_PER_SERVER = 4.375e9 # bits/s, equivalent to 280 Gb/s over 64 (virtual) nodes
-        BANDWIDTH = 856e6 # MHz
-
-        # Calculate the data rate for each beam assuming 8-bit packing and
-        # no metadata overheads
-        data_rate_per_beam = BANDWIDTH / tscrunch / fscrunch * 8 # bits/s
-        log.debug("Data rate per coherent beam: {} Gb/s".format(data_rate_per_beam/1e9))
-
-        # Calculate the maximum number of beams that will fit in one multicast
-        # group assuming. Each multicast group must be receivable on a 10 GbE
-        # connection so the max rate must be < 8 Gb/s
-        max_beams_per_mcast = MAX_RATE_PER_MCAST // data_rate_per_beam
-        log.debug("Maximum number of beams per multicast group: {}".format(int(max_beams_per_mcast)))
-
-        if max_beams_per_mcast == 0:
-            raise Exception("Data rate per beam is greater than the data rate per multicast group")
-
-        # For instuments such as TUSE, they require a fixed number of beams per node. For their
-        # case we assume that they will only acquire one multicast group per node and as such
-        # the minimum number of beams per multicast group should be whatever TUSE requires.
-        # Multicast groups can contain more beams than this but only in integer multiples of
-        # the minimum
-        if beam_granularity:
-            if max_beams_per_mcast < beam_granularity:
-                log.warning("Cannot fit {} beams into one multicast group, updating number of beams per multicast group to {}".format(
-                    beam_granularity, max_beams_per_mcast))
-                while np.modf(beam_granularity/max_beams_per_mcast)[0] != 0.0:
-                    max_beams_per_mcast -= 1
-                beam_granularity = max_beams_per_mcast
-            beams_per_mcast = beam_granularity * (max_beams_per_mcast // beam_granularity)
-            log.debug("Number of beams per multicast group, accounting for granularity: {}".format(int(beams_per_mcast)))
-        else:
-            beams_per_mcast = max_beams_per_mcast
-
-        # Calculate the total number of beams that could be produced assuming the only
-        # rate limit was that limit per multicast groups
-        max_beams = self.n_mcast_groups * beams_per_mcast
-        log.debug("Maximum possible beams (assuming on multicast group rate limit): {}".format(max_beams))
-
-        if desired_nbeams > max_beams:
-            log.warning("Requested number of beams is greater than theoretical maximum, "
-                "updating setting the number of beams of beams to {}".format(max_beams))
-            desired_nbeams = max_beams
-
-        # Calculate the total number of multicast groups that are required to satisfy
-        # the requested number of beams
-        num_mcast_groups_required = round(desired_nbeams / beams_per_mcast + 0.5)
-        log.debug("Number of multicast groups required for {} beams: {}".format(desired_nbeams, num_mcast_groups_required))
-        actual_nbeams = num_mcast_groups_required * beams_per_mcast
-        nmcast_groups = num_mcast_groups_required
-
-        # Now we need to check the server rate limits
-        if (actual_nbeams * data_rate_per_beam)/self.n_servers > MAX_RATE_PER_SERVER:
-            log.warning("Number of beams limited by output data rate per server")
-        actual_nbeams = MAX_RATE_PER_SERVER*self.n_servers // data_rate_per_beam
-        log.info("Number of beams that can be generated: {}".format(actual_nbeams))
-        return actual_nbeams
-
-    @coroutine
-    def get_ca_target_configuration(self, target):
-        def ca_target_update_callback(received_timestamp, timestamp, status, value):
-            # TODO, should we really reset all the beams or should we have
-            # a mechanism to only update changed beams
-            config_dict = json.loads(value)
-            self.reset_beams()
-            for target_string in config_dict.get('beams',[]):
-                target = Target(target_string)
-                self.add_beam(target)
-            for tiling in config_dict.get('tilings',[]):
-                target  = Target(tiling['target']) #required
-                freq    = float(tiling.get('reference_frequency', 1.4e9))
-                nbeams  = int(tiling['nbeams'])
-                overlap = float(tiling.get('overlap', 0.5))
-                epoch   = float(tiling.get('epoch', time.time()))
-                self.add_tiling(target, nbeams, freq, overlap, epoch)
-        yield self._ca_client.until_synced()
-        try:
-            response = yield self._ca_client.req.target_configuration_start(self._proxy_name, target.format_katcp())
-        except Exception as error:
-            log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
-            raise error
-        if not response.reply.reply_ok():
-            error = Exception(response.reply.arguments[1])
-            log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
-            raise error
-        yield self._ca_client.until_synced()
-        sensor = self._ca_client.sensor["{}_beam_position_configuration".format(self._proxy_name)]
-        sensor.register_listener(ca_target_update_callback)
-        self._ca_client.set_sampling_strategy(sensor.name, "event")
-
-    def configure_coherent_beams(self, nbeams, antennas, fscrunch, tscrunch):
-        """
-        @brief      Set the configuration for coherent beams producted by this instance
-
-        @param      nbeams          The number of beams that will be produced for the provided product_id
-
-        @param      antennas        A comma separated list of physical antenna names. Only these antennas will be used
-                                    when generating coherent beams (e.g. m007,m008,m009). The antennas provided here must
-                                    be a subset of the antennas in the current subarray. If not an exception will be
-                                    raised.
-
-        @param      fscrunch        The number of frequency channels to integrate over when producing coherent beams.
-
-        @param      tscrunch        The number of time samples to integrate over when producing coherent beams.
-        """
-        if not self.idle:
-            raise FbfStateError([self.IDLE], self.state)
-        if not self._verify_antennas(parse_csv_antennas(antennas)):
-            raise AntennaValidationError("Requested antennas are not a subset of the current subarray")
-        self._cbc_nbeams_sensor.set_value(nbeams)
-        #need a check here to determine if this is a subset of the subarray antennas
-        self._cbc_fscrunch_sensor.set_value(fscrunch)
-        self._cbc_tscrunch_sensor.set_value(tscrunch)
-        self._cbc_antennas_sensor.set_value(antennas)
-
-    def configure_incoherent_beam(self, antennas, fscrunch, tscrunch):
-        """
-        @brief      Set the configuration for incoherent beams producted by this instance
-
-        @param      antennas        A comma separated list of physical antenna names. Only these antennas will be used
-                                    when generating incoherent beams (e.g. m007,m008,m009). The antennas provided here must
-                                    be a subset of the antennas in the current subarray. If not an exception will be
-                                    raised.
-
-        @param      fscrunch        The number of frequency channels to integrate over when producing incoherent beams.
-
-        @param      tscrunch        The number of time samples to integrate over when producing incoherent beams.
-        """
-        if not self.idle:
-            raise FbfStateError([self.IDLE], self.state)
-        if not self._verify_antennas(parse_csv_antennas(antennas)):
-            raise AntennaValidationError("Requested antennas are not a subset of the current subarray")
-        #need a check here to determine if this is a subset of the subarray antennas
-        self._ibc_fscrunch_sensor.set_value(fscrunch)
-        self._ibc_tscrunch_sensor.set_value(tscrunch)
-        self._ibc_antennas_sensor.set_value(antennas)
-
-    def _beam_to_sensor_string(self, beam):
-        return beam.target.format_katcp()
-
-    @coroutine
-    def target_start(self, target):
-        if self._ca_client:
-            yield self.get_ca_target_configuration(target)
-        else:
-            log.warning("No configuration authority is set, using default beam configuration")
-
-    @coroutine
-    def target_stop(self):
-        if self._ca_client:
-            sensor_name = "{}_beam_position_configuration".format(self._proxy_name)
-            self._ca_client.set_sampling_strategy(sensor_name, "none")
-
-    @coroutine
-    def prepare(self):
-        """
-        @brief      Prepare the beamformer for streaming
-
-        @detail     This method evaluates the current configuration creates a new DelayEngine
-                    and passes a prepare call to all allocated servers.
-        """
-        if not self.idle:
-            raise FbfStateError([self.IDLE], self.state)
-        self._state_sensor.set_value(self.PREPARING)
-
-        # Here we need to parse the streams and assign beams to streams:
-        #mcast_addrs, mcast_port = parse_stream(self._streams['cbf.antenna_channelised_voltage']['i0.antenna-channelised-voltage'])
-
-        if not self._ca_client:
-            log.warning("No configuration authority found, using default configuration parameters")
-        else:
-            #TODO: get the schedule block ID into this call from somewhere (configure?)
-            yield self.get_ca_sb_configuration("default_subarray")
-
-
-        cbc_antennas_names = parse_csv_antennas(self._cbc_antennas_sensor.value())
-        cbc_antennas = [self._antenna_map[name] for name in cbc_antennas_names]
-        self._beam_manager = BeamManager(self._cbc_nbeams_sensor.value(), cbc_antennas)
-        self._delay_engine = DelayEngine("127.0.0.1", 0, self._beam_manager)
-        self._delay_engine.start()
-
-        for server in self._servers:
-            # each server will take 4 consequtive multicast groups
-            pass
-
-        # set up delay engine
-        # compile kernels
-        # start streaming
-        self._delay_engine_sensor.set_value(self._delay_engine.bind_address)
-
-
-        # Need to tear down the beam sensors here
-        self._beam_sensors = []
-        for beam in self._beam_manager.get_beams():
-            sensor = Sensor.string(
-                "coherent-beam-{}".format(beam.idx),
-                description="R.A. (deg), declination (deg) and source name for coherent beam with ID {}".format(beam.idx),
-                default=self._beam_to_sensor_string(beam),
-                initial_status=Sensor.UNKNOWN)
-            beam.register_observer(lambda beam, sensor=sensor:
-                sensor.set_value(self._beam_to_sensor_string(beam)))
-            self._beam_sensors.append(sensor)
-            self.add_sensor(sensor)
-        self._state_sensor.set_value(self.READY)
-
-        # Only make this call if the the number of beams has changed
-        self._parent.mass_inform(Message.inform('interface-changed'))
-
-    def start_capture(self):
-        if not self.ready:
-            raise FbfStateError([self.READY], self.state)
-        self._state_sensor.set_value(self.STARTING)
-        """
-        futures = []
-        for server in self._servers:
-            futures.append(server.req.start_capture())
-        for future in futures:
-            try:
-                response = yield future
-            except:
-                pass
-        """
-        self._state_sensor.set_value(self.CAPTURING)
-
-    def stop_beams(self):
-        """
-        @brief      Stops the beamformer servers streaming.
-        """
-        if not self.capturing:
-            return
-        self._state_sensor.set_value(self.STOPPING)
-        for server in self._servers:
-            #yield server.req.deconfigure()
-            pass
-        self._state_sensor.set_value(self.IDLE)
-
-    def add_beam(self, target):
-        """
-        @brief      Specify the parameters of one managed beam
-
-        @param      target      A KATPOINT target object
-
-        @return     Returns the allocated Beam object
-        """
-        valid_states = [self.READY, self.CAPTURING, self.STARTING]
-        if not self.state in valid_states:
-            raise FbfStateError(valid_states, self.state)
-        return self._beam_manager.add_beam(target)
-
-    def add_tiling(self, target, number_of_beams, reference_frequency, overlap, epoch):
-        """
-        @brief   Add a tiling to be managed
-
-        @param      target      A KATPOINT target object
-
-        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
-                                            and thus the tiling pattern. Typically this would be chosen to be the
-                                            centre frequency of the current observation.
-
-        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
-                                    at what power point neighbouring beams in the tiling pattern will meet. For
-                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
-                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
-                                    at their half-power points. [Note: This is currently a tricky parameter to use
-                                    when values are close to zero. In future this may be define in sigma units or
-                                    in multiples of the FWHM of the beam.]
-
-        @returns    The created Tiling object
-        """
-        valid_states = [self.READY, self.CAPTURING, self.STARTING]
-        if not self.state in valid_states:
-            raise FbfStateError(valid_states, self.state)
-        tiling = self._beam_manager.add_tiling(target, number_of_beams, reference_frequency, overlap)
-        tiling.generate(self._katpoint_antennas, epoch)
-        return tiling
-
-    def reset_beams(self):
-        """
-        @brief  reset and deallocate all beams and tilings managed by this instance
-
-        @note   All tiling will be lost on this call and must be remade for subsequent observations
-        """
-        valid_states = [self.READY, self.CAPTURING, self.STARTING]
-        if not self.state in valid_states:
-            raise FbfStateError(valid_states, self.state)
-        self._beam_manager.reset()
-
-
-@coroutine
-def on_shutdown(ioloop, server):
-    log.info("Shutting down server")
-    yield server.stop()
-    ioloop.stop()
-
-def main():
-    usage = "usage: %prog [options]"
-    parser = OptionParser(usage=usage)
-    parser.add_option('-H', '--host', dest='host', type=str,
-        help='Host interface to bind to')
-    parser.add_option('-p', '--port', dest='port', type=long,
-        help='Port number to bind to')
-    parser.add_option('', '--log_level',dest='log_level',type=str,
-        help='Port number of status server instance',default="INFO")
-    parser.add_option('', '--dummy',action="store_true", dest='dummy',
-        help='Set status server to dummy')
-    (opts, args) = parser.parse_args()
-    FORMAT = "[ %(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s"
-    logger = logging.getLogger('mpikat')
-    logging.basicConfig(format=FORMAT)
-    logger.setLevel(opts.log_level.upper())
-    logging.getLogger('katcp').setLevel('INFO')
-    ioloop = tornado.ioloop.IOLoop.current()
-    log.info("Starting FbfMasterController instance")
-    server = FbfMasterController(opts.host, opts.port, dummy=opts.dummy)
-    signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
-        on_shutdown, ioloop, server))
-    def start_and_display():
-        server.start()
-        log.info("Listening at {0}, Ctrl-C to terminate server".format(server.bind_address))
-
-    ioloop.add_callback(start_and_display)
-    ioloop.start()
-
-if __name__ == "__main__":
-    main()
-
diff --git a/mpikat/fbfuse_beam_manager.py b/mpikat/fbfuse_beam_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6dfb89f70711a4c2274df5195262858c6d5f619
--- /dev/null
+++ b/mpikat/fbfuse_beam_manager.py
@@ -0,0 +1,246 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import logging
+import mosaic
+from katpoint import Target
+
+log = logging.getLogger("mpikat.fbfuse_ca_server")
+
+DEFAULT_KATPOINT_TARGET = "unset, radec, 0, 0"
+
+class BeamAllocationError(object):
+    pass
+
+class Beam(object):
+    """Wrapper class for a single beam to be produced
+    by FBFUSE"""
+    def __init__(self, idx, target=DEFAULT_KATPOINT_TARGET):
+        """
+        @brief   Create a new Beam object
+
+        @params   idx   a unique identifier for this beam.
+
+        @param      target          A KATPOINT target object
+        """
+        self.idx = idx
+        self._target = target
+        self._observers = set()
+
+    @property
+    def target(self):
+        return self._target
+
+    @target.setter
+    def target(self, new_target):
+        self._target = new_target
+        self.notify()
+
+    def notify(self):
+        """
+        @brief  Notify all observers of a change to the beam parameters
+        """
+        for observer in self._observers:
+            observer(self)
+
+    def register_observer(self, func):
+        """
+        @brief   Register an observer to be called on a notify
+
+        @params  func  Any function that takes a Beam object as its only argument
+        """
+        self._observers.add(func)
+
+    def deregister_observer(self, func):
+        """
+        @brief   Deregister an observer to be called on a notify
+
+        @params  func  Any function that takes a Beam object as its only argument
+        """
+        self._observers.remove(func)
+
+    def reset(self):
+        """
+        @brief   Reset the beam to default parameters
+        """
+        self.target = Target(DEFAULT_KATPOINT_TARGET)
+
+    def __repr__(self):
+        return "{}, {}".format(
+            self.idx, self.target.format_katcp())
+
+
+class Tiling(object):
+    """Wrapper class for a collection of beams in a tiling pattern
+    """
+    def __init__(self, target, reference_frequency, overlap):
+        """
+        @brief   Create a new tiling object
+
+        @param      target          A KATPOINT target object
+
+        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
+                                            and thus the tiling pattern. Typically this would be chosen to be the
+                                            centre frequency of the current observation.
+
+        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
+                                    at what power point neighbouring beams in the tiling pattern will meet. For
+                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
+                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
+                                    at their half-power points. [Note: This is currently a tricky parameter to use
+                                    when values are close to zero. In future this may be define in sigma units or
+                                    in multiples of the FWHM of the beam.]
+        """
+        self._beams = []
+        self.target = target
+        self.reference_frequency = reference_frequency
+        self.overlap = overlap
+        self.tiling = None
+
+    @property
+    def nbeams(self):
+        return len(self._beams)
+
+    def add_beam(self, beam):
+        """
+        @brief   Add a beam to the tiling pattern
+
+        @param   beam   A Beam object
+        """
+        self._beams.append(beam)
+
+    def generate(self, antennas, epoch):
+        """
+        @brief   Calculate and update RA and Dec positions of all
+                 beams in the tiling object.
+
+        @param      epoch     The epoch of tiling (unix time)
+
+        @param      antennas  The antennas to use when calculating the beam shape.
+                              Note these are the antennas in katpoint CSV format.
+        """
+        psfsim = mosaic.PsfSim(antennas, self.reference_frequency)
+        beam_shape = psfsim.get_beam_shape(self.target, epoch)
+        tiling = mosaic.generate_nbeams_tiling(beam_shape, self.nbeams, self.overlap)
+        for ii in range(tiling.beam_num):
+            ra, dec = tiling.coordinates[ii]
+            self._beams[ii].target = Target('{},radec,{},{}'.format(self.target.name, ra, dec))
+
+    def __repr__(self):
+        return ", ".join([repr(beam) for beam in self._beams])
+
+    def idxs(self):
+        return ",".join([beam.idx for beam in self._beams])
+
+
+class BeamManager(object):
+    """Manager class for allocation, deallocation and tracking of
+    individual beams and static tilings.
+    """
+    def __init__(self, nbeams, antennas):
+        """
+        @brief  Create a new beam manager object
+
+        @param  nbeams    The number of beams managed by this object
+
+        @param  antennas  A list of antennas to use for tilings. Note these should
+                          be in KATPOINT CSV format.
+        """
+        self._nbeams = nbeams
+        self._antennas = antennas
+        self._beams = [Beam("cfbf%05d"%(i)) for i in range(self._nbeams)]
+        self._free_beams = [beam for beam in self._beams]
+        self._allocated_beams = []
+        self.reset()
+
+    @property
+    def nbeams(self):
+        return self._nbeams
+
+    @property
+    def antennas(self):
+        return self._antennas
+
+    def reset(self):
+        """
+        @brief  reset and deallocate all beams and tilings managed by this instance
+
+        @note   All tiling will be lost on this call and must be remade for subsequent observations
+        """
+        for beam in self._beams:
+            beam.reset()
+        self._free_beams = [beam for beam in self._beams]
+        self._allocated_beams = []
+        self._tilings = []
+        self._dynamic_tilings = []
+
+    def add_beam(self, target):
+        """
+        @brief   Specify the parameters of one managed beam
+
+        @param      target          A KATPOINT target object
+
+        @return     Returns the allocated Beam object
+        """
+        try:
+            beam = self._free_beams.pop(0)
+        except IndexError:
+            raise BeamAllocationError("No free beams remaining")
+        beam.target = target
+        self._allocated_beams.append(beam)
+        return beam
+
+    def add_tiling(self, target, nbeams, reference_frequency, overlap):
+        """
+        @brief   Add a tiling to be managed
+
+        @param      target          A KATPOINT target object
+
+        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
+                                            and thus the tiling pattern. Typically this would be chosen to be the
+                                            centre frequency of the current observation.
+
+        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
+                                    at what power point neighbouring beams in the tiling pattern will meet. For
+                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
+                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
+                                    at their half-power points. [Note: This is currently a tricky parameter to use
+                                    when values are close to zero. In future this may be define in sigma units or
+                                    in multiples of the FWHM of the beam.]
+
+        @returns    The created Tiling object
+        """
+        if len(self._free_beams) < nbeams:
+            raise BeamAllocationError("Requested more beams than are available.")
+        tiling = Tiling(target, reference_frequency, overlap)
+        for _ in range(nbeams):
+            beam = self._free_beams.pop(0)
+            tiling.add_beam(beam)
+            self._allocated_beams.append(beam)
+        self._tilings.append(tiling)
+        return tiling
+
+    def get_beams(self):
+        """
+        @brief  Return all managed beams
+        """
+        return self._allocated_beams + self._free_beams
+
diff --git a/mpikat/fbfuse_ca_server.py b/mpikat/fbfuse_ca_server.py
index 8fdfff0410b16d7f59039064fba63f8db7692cab..1e32906192518e94b0451fa4c57439b2b1a4ba49 100644
--- a/mpikat/fbfuse_ca_server.py
+++ b/mpikat/fbfuse_ca_server.py
@@ -1,3 +1,24 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
 import logging
 import json
 import tornado
@@ -9,8 +30,6 @@ from katcp.kattypes import request, return_reply, Str
 from katportalclient import KATPortalClient
 from katpoint import Antenna, Target
 
-FORMAT = "[ %(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s"
-logging.basicConfig(format=FORMAT)
 log = logging.getLogger("mpikat.fbfuse_ca_server")
 
 class BaseFbfConfigurationAuthority(AsyncDeviceServer):
@@ -117,35 +136,16 @@ class DefaultConfigurationAuthority(BaseFbfConfigurationAuthority):
 
     @tornado.gen.coroutine
     def get_sb_config(self, proxy_id, sb_id):
-        config = {u'coherent-beams':
-                    {u'fscrunch': 16,
-                     u'nbeams': 400,
-                     u'tscrunch': 16},
-                  u'incoherent-beam':
-                    {u'fscrunch': 16,
-                     u'tscrunch': 1}}
-        raise Return(config)
-
-
-class DefaultConfigurationAuthority(BaseFbfConfigurationAuthority):
-    def __init__(self, host, port):
-        super(DefaultConfigurationAuthority, self).__init__(host, port)
-
-    @tornado.gen.coroutine
-    def get_target_config(self, proxy_id, target):
-        self.targets[proxy_id] = target
-        # Return just a boresight beam
-        raise Return({"beams":[target],})
-
-    @tornado.gen.coroutine
-    def get_sb_config(self, proxy_id, sb_id):
-        config = {u'coherent-beams':
-                    {u'fscrunch': 16,
-                     u'nbeams': 400,
-                     u'tscrunch': 16},
-                  u'incoherent-beam':
-                    {u'fscrunch': 16,
-                     u'tscrunch': 1}}
+        config = {
+            u'coherent-beams-nbeams':100,
+            u'coherent-beams-tscrunch':22,
+            u'coherent-beams-fscrunch':2,
+            u'coherent-beams-antennas':'m007',
+            u'coherent-beams-granularity':6,
+            u'incoherent-beam-tscrunch':16,
+            u'incoherent-beam-fscrunch':1,
+            u'incoherent-beam-antennas':'m008'
+            }
         raise Return(config)
 
 
diff --git a/mpikat/fbfuse_mcast_config.py b/mpikat/fbfuse_config.py
similarity index 60%
rename from mpikat/fbfuse_mcast_config.py
rename to mpikat/fbfuse_config.py
index e217115602fabe1ccda3068610aa0008066c9e64..dd280f3bea4a23ba7472c01f9ea866830ff14a24 100644
--- a/mpikat/fbfuse_mcast_config.py
+++ b/mpikat/fbfuse_config.py
@@ -30,18 +30,24 @@ MAX_OUTPUT_RATE = 300.0e9 # bits/s -- This is an artifical limit based on the or
 MAX_OUTPUT_RATE_PER_WORKER = 30.0e9 # bits/s
 MAX_OUTPUT_RATE_PER_MCAST_GROUP = 7.0e9 # bits/s
 MIN_MCAST_GROUPS = 16
+MIN_NBEAMS = 16
+MIN_ANTENNAS = 4
 
 class FbfConfigurationError(Exception):
     pass
 
 class FbfConfigurationManager(object):
-    def __init__(self, total_nantennas, total_bandwidth, total_nchans, worker_pool, ip_pool):
+    def __init__(self, total_nantennas, total_bandwidth, total_nchans, nworkers, nips):
+        if total_nchans != 4096:
+            raise NotImplemented("Currently only 4k channel mode supported")
         self.total_nantennas = total_nantennas
         self.total_bandwidth = total_bandwidth
         self.total_nchans = total_nchans
-        self.worker_pool = worker_pool
-        self.ip_pool = ip_pool
+        self.nworkers = nworkers
+        self.nips = nips
         self.effective_nantennas = next_power_of_two(self.total_nantennas)
+        if self.effective_nantennas < MIN_ANTENNAS:
+            self.effective_nantennas = MIN_ANTENNAS
         self.nchans_per_worker = self.total_nchans / self.effective_nantennas
 
     def _get_minimum_required_workers(self, nchans):
@@ -51,25 +57,35 @@ class FbfConfigurationManager(object):
         if nchans < self.nchans_per_worker:
             nchans = self.nchans_per_worker
         elif nchans > self.total_nchans:
-            raise FbfConfigurationError("Requested more channels than are available")
+            nchans = self.total_nchans
         else:
             nchans = self.nchans_per_worker * int(ceil(nchans / float(self.nchans_per_worker)))
         return nchans
 
     def _max_nbeam_per_worker_by_performance(self, tscrunch, fscrunch, nantennas):
-        return 1000
+        #TODO replace with look up table
+        scrunch = tscrunch * fscrunch
+        if (scrunch) < 8:
+            scale = 1.0/scrunch
+        else:
+            scale = 1.0
+        nbeams = int(700*(self.nchans_per_worker/float(nantennas)) * scale)
+        nbeams -= nbeams%32
+        return nbeams
 
     def _max_nbeam_per_worker_by_data_rate(self, rate_per_beam):
         scale = self.total_nchans/self.nchans_per_worker
         return int(MAX_OUTPUT_RATE_PER_WORKER / rate_per_beam) * scale
 
-    def _valid_nbeams_per_group(self, max_nbeams_per_group, granularity):
+    def _valid_nbeams_per_group(self, max_nbeams_per_group, granularity, nworker_sets=1):
         valid = []
         for ii in range(1, max_nbeams_per_group+1):
-            if (ii%granularity == 0) or (granularity%ii == 0):
+            if ((ii%granularity == 0) or (granularity%ii == 0)) and (ii%nworker_sets == 0):
                 valid.append(ii)
         if not valid:
-            raise Exception("No valid beam counts for the selected granularity")
+            raise Exception("No valid beam counts for the selected granularity "
+                "(max per group: {}, granularity: {}, nworker_sets: {})".format(
+                    max_nbeams_per_group, granularity, nworker_sets))
         else:
             return valid
 
@@ -77,35 +93,48 @@ class FbfConfigurationManager(object):
         max_beams_per_mcast = int(MAX_OUTPUT_RATE_PER_MCAST_GROUP / rate_per_beam)
         valid_nbeam_per_mcast = self._valid_nbeams_per_group(max_beams_per_mcast, granularity)
         max_beams_per_mcast = max(valid_nbeam_per_mcast)
-        return self.ip_pool.largest_free_range() * max_beams_per_mcast
+        max_nbeams = self.nips * max_beams_per_mcast
+        return max_nbeams
 
-    def get_configuration(self, tscrunch, fscrunch, requested_nbeams, nantennas=None, nchans=None, granularity=1):
-        log.debug("Generating FBFUSE configuration")
+    def get_configuration(self, tscrunch, fscrunch, requested_nbeams, nantennas=None, bandwidth=None, granularity=1):
+        log.info("Generating FBFUSE configuration")
 
-        # Sanitise the user inputs with nchans and nantennas
+        # Sanitise the user inputs with bandwidth and nantennas
         # defaulting to the full subarray and complete band
-        if not nantennas:
+        if granularity <= 0:
+            raise FbfConfigurationError("granularity must have a positive value")
+
+        if tscrunch <= 0:
+            raise FbfConfigurationError("tscrunch must have a positive value")
+
+        if fscrunch <= 0:
+            raise FbfConfigurationError("fscrunch must have a positive value")
+
+        if not bandwidth or (bandwidth > self.total_bandwidth):
+            bandwidth = self.total_bandwidth
+            nchans = self.total_nchans
+        else:
+            nchans = self._sanitise_user_nchans(int(bandwidth/self.total_bandwidth * self.total_nchans))
+            bandwidth = self.total_bandwidth * nchans/float(self.total_nchans)
+        if not nantennas or nantennas > self.total_nantennas:
             nantennas = self.total_nantennas
-        if not nchans:
+        if not nchans or nchans > self.total_nchans:
             nchans = self.total_nchans
         else:
             nchans = self._sanitise_user_nchans(nchans)
-        log.debug("Requested number of antennas {}".format(nantennas))
-        log.debug("Requested number of channels sanitised to {}".format(nchans))
-
-        # Calculate the bandwidth, data rate per beam and total output rate
-        # of the instrument.
-        bandwidth = self.total_bandwidth * (nchans/float(self.total_nchans))
-        log.debug("Corresponding bandwidth {} MHz".format(bandwidth/1e6))
+        requested_nbeams = max(MIN_NBEAMS, requested_nbeams)
+        log.info("Sanitised number of antennas {}".format(nantennas))
+        log.info("Sanitised bandwidth to {} MHz".format(bandwidth/1e6))
+        log.info("Corresponing number of channels sanitised to {}".format(nchans))
         rate_per_beam = bandwidth / tscrunch / fscrunch * FBF_BEAM_NBITS # bits/s
-        log.debug("Data rate per beam: {} Gb/s".format(rate_per_beam/1e9))
+        log.info("Data rate per beam: {} Gb/s".format(rate_per_beam/1e9))
         total_output_rate = rate_per_beam * requested_nbeams
-        log.debug("Total data rate across all requested beams: {} Gb/s".format(
+        log.info("Total data rate across all requested beams: {} Gb/s".format(
             total_output_rate/1e9))
 
         if total_output_rate > MAX_OUTPUT_RATE:
             nbeams_after_total_rate_limit = int(MAX_OUTPUT_RATE/rate_per_beam)
-            log.warning("The requested configuration exceeds the maximum FBFUSE "
+            log.info("The requested configuration exceeds the maximum FBFUSE "
                 "output rate, limiting nbeams to {}".format(nbeams_after_total_rate_limit))
         else:
             nbeams_after_total_rate_limit = requested_nbeams
@@ -115,73 +144,83 @@ class FbfConfigurationManager(object):
                 " the data rate per multicast group")
 
         min_num_workers = self._get_minimum_required_workers(nchans)
-        log.debug("Minimum number of workers required to support "
+        log.info("Minimum number of workers required to support "
             "the input data rate: {}".format(min_num_workers))
 
-        num_workers_available = self.worker_pool.navailable()
+        num_workers_available = self.nworkers
         if min_num_workers > num_workers_available:
             raise FbfConfigurationError("Requested configuration requires at minimum {} "
                 "workers, but only {} available".format(
                 min_num_workers, num_workers_available))
         num_worker_sets_available = num_workers_available // min_num_workers
-        log.debug("Number of available worker sets: {}".format(num_worker_sets_available))
+        log.info("Number of available worker sets: {}".format(num_worker_sets_available))
 
         a = self._max_nbeam_per_worker_by_performance(tscrunch, fscrunch, nantennas)
-        log.debug("Maximum possible nbeams per worker by performance limit: {}".format(a))
+        log.info("Maximum possible nbeams per worker by performance limit: {}".format(a))
         b = self._max_nbeam_per_worker_by_data_rate(rate_per_beam)
-        log.debug("Maximum possible nbeams per worker by data rate limit: {}".format(b))
+
+        log.info("Maximum possible nbeams per worker by data rate limit: {}".format(b))
         max_nbeams_per_worker_set = min(a, b)
-        log.debug("Maximum nbeams per worker: {}".format(max_nbeams_per_worker_set))
+
+        log.info("Maximum nbeams per worker: {}".format(max_nbeams_per_worker_set))
         max_nbeams_over_all_worker_sets = max_nbeams_per_worker_set * num_worker_sets_available
 
         mcast_beam_limit = self._max_nbeam_by_mcast_and_granularity(rate_per_beam, granularity)
-        log.debug("Maximum number of beams from multicast groups and granularity: {}".format(
+        log.info("Maximum number of beams from multicast groups and granularity: {}".format(
             mcast_beam_limit))
+        if mcast_beam_limit < MIN_NBEAMS:
+            raise FbfConfigurationError("Multicast beam limit ({}) less than minimum generatable beams ({})".format(
+                mcast_beam_limit, MIN_NBEAMS))
 
         nbeams_after_mcast_limit = min(nbeams_after_total_rate_limit, mcast_beam_limit, max_nbeams_over_all_worker_sets)
-        log.debug("Maximum number of beams after accounting for multicast limit: {}".format(
+        log.info("Maximum number of beams after accounting for multicast limit: {}".format(
             nbeams_after_mcast_limit))
 
         num_required_worker_sets = int(ceil(nbeams_after_mcast_limit / float(max_nbeams_per_worker_set)))
-        log.debug("Number of required worker sets: {}".format(
+        log.info("Number of required worker sets: {}".format(
             num_required_worker_sets))
 
         num_worker_sets_to_be_used = min(num_required_worker_sets, num_required_worker_sets, num_worker_sets_available)
-        log.debug("Number of worker sets to be used: {}".format(
+        log.info("Number of worker sets to be used: {}".format(
             num_worker_sets_to_be_used))
 
         num_beams_per_worker_set = nbeams_after_mcast_limit//num_worker_sets_to_be_used
-        log.debug("Updated number of beams per worker set: {}".format(
+        log.info("Updated number of beams per worker set: {}".format(
             num_beams_per_worker_set))
 
         nbeams_after_mcast_limit = num_worker_sets_to_be_used * num_beams_per_worker_set
-        log.debug("Updated total output number of beams to: {}".format(
+        log.info("Updated total output number of beams to: {}".format(
             nbeams_after_mcast_limit))
 
         max_nbeams_per_group = int(MAX_OUTPUT_RATE_PER_MCAST_GROUP / rate_per_beam)
-        valid_nbeam_per_mcast = self._valid_nbeams_per_group(max_nbeams_per_group, granularity)
-        log.debug("Valid numbers of beams per multicast group: {}".format(valid_nbeam_per_mcast))
+        valid_nbeam_per_mcast = self._valid_nbeams_per_group(max_nbeams_per_group, granularity,
+            nworker_sets=num_worker_sets_to_be_used)
+        log.info("Valid numbers of beams per multicast group: {}".format(valid_nbeam_per_mcast))
 
         max_valid_nbeam_per_mcast = max(valid_nbeam_per_mcast)
         max_valid_nbeam_per_mcast_idx = len(valid_nbeam_per_mcast)-1
-        log.debug("Max valid numbers of beams per multicast group: {}".format(max_valid_nbeam_per_mcast))
+        log.info("Max valid numbers of beams per multicast group: {}".format(max_valid_nbeam_per_mcast))
 
         num_mcast_required_per_worker_set = int(floor(num_beams_per_worker_set / float(max_valid_nbeam_per_mcast)))
-        log.debug("Number of multicast groups required per worker set: {}".format(num_mcast_required_per_worker_set))
+        log.info("Number of multicast groups required per worker set: {}".format(num_mcast_required_per_worker_set))
 
         num_mcast_required = num_mcast_required_per_worker_set * num_worker_sets_to_be_used
-        log.debug("Total number of multicast groups required: {}".format(num_mcast_required))
+        log.info("Total number of multicast groups required: {}".format(num_mcast_required))
 
         while (num_mcast_required < MIN_MCAST_GROUPS):
             log.debug("Too few multicast groups used, trying fewer beams per group")
             max_valid_nbeam_per_mcast_idx -= 1
             max_valid_nbeam_per_mcast = valid_nbeam_per_mcast[max_valid_nbeam_per_mcast_idx]
             log.debug("Testing {} beams per group".format(max_valid_nbeam_per_mcast))
-            num_mcast_required = int(ceil(nbeams_after_mcast_limit / max_valid_nbeam_per_mcast + 0.5))
+            num_mcast_required = int(ceil(nbeams_after_mcast_limit / max_valid_nbeam_per_mcast))
             log.debug("{} groups required".format(num_mcast_required))
-
         # Final check should be over the number of beams
         final_nbeams = num_mcast_required * max_valid_nbeam_per_mcast
+        if final_nbeams % num_worker_sets_to_be_used !=0:
+            raise Exception("Error during configuration, expected number of "
+                "beams ({}) to be a multiple of number of worker sets ({})".format(
+                    final_nbeams, num_worker_sets_to_be_used))
+        num_beams_per_worker_set = final_nbeams / num_worker_sets_to_be_used
         config = {
             "num_beams":final_nbeams,
             "num_chans":nchans,
@@ -189,7 +228,10 @@ class FbfConfigurationManager(object):
             "num_beams_per_mcast_group":max_valid_nbeam_per_mcast,
             "num_workers_per_set":min_num_workers,
             "num_worker_sets":num_worker_sets_to_be_used,
+            "num_workers_total":num_worker_sets_to_be_used*min_num_workers,
+            "num_beams_per_worker_set": num_beams_per_worker_set
         }
+        log.info("Final coniguration: {}".format(config))
         return config
 
 
diff --git a/mpikat/fbfuse_delay_engine.py b/mpikat/fbfuse_delay_engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..62a57fbd261a0daa09fd84fd9f04ae15cd998234
--- /dev/null
+++ b/mpikat/fbfuse_delay_engine.py
@@ -0,0 +1,129 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+import json
+import time
+import mosaic
+from katcp import Sensor, AsyncDeviceServer
+from katcp.kattypes import request, return_reply, Float
+from katpoint import Antenna
+
+log = logging.getLogger("mpikat.delay_engine")
+
+class DelayEngine(AsyncDeviceServer):
+    """A server for maintining delay models used
+    by FbfWorkerServers.
+    """
+    VERSION_INFO = ("delay-engine-api", 0, 1)
+    BUILD_INFO = ("delay-engine-implementation", 0, 1, "rc1")
+    DEVICE_STATUSES = ["ok", "degraded", "fail"]
+
+    def __init__(self, ip, port, beam_manager):
+        """
+        @brief  Create a new DelayEngine instance
+
+        @param   ip   The interface that the DelayEngine should serve on
+
+        @param   port The port that the DelayEngine should serve on
+
+        @param   beam_manager  A BeamManager instance that will be used to create delays
+        """
+        self._beam_manager = beam_manager
+        super(DelayEngine, self).__init__(ip,port)
+
+    def setup_sensors(self):
+        """
+        @brief    Set up monitoring sensors.
+
+        @note     The key sensor here is the delay sensor which is stored in JSON format
+
+                  @code
+                  {
+                  'antennas':['m007','m008','m009'],
+                  'beams':['cfbf00001','cfbf00002'],
+                  'model': [[[0,2],[0,5]],[[2,3],[4,4]],[[8,8],[8,8]]]
+                  }
+                  @endcode
+
+                  Here the delay model is stored as a 3 dimensional array
+                  with dimensions of beam, antenna, model (rate,offset) from
+                  outer to inner dimension.
+        """
+        self._update_rate_sensor = Sensor.float(
+            "update-rate",
+            description="The delay update rate",
+            default=2.0,
+            initial_status=Sensor.NOMINAL)
+        self.add_sensor(self._update_rate_sensor)
+
+        self._nbeams_sensor = Sensor.integer(
+            "nbeams",
+            description="Number of beams that this delay engine handles",
+            default=0,
+            initial_status=Sensor.NOMINAL)
+        self.add_sensor(self._nbeams_sensor)
+
+        self._antennas_sensor = Sensor.string(
+            "antennas",
+            description="JSON breakdown of the antennas (in KATPOINT format) associated with this delay engine",
+            default=json.dumps([a.format_katcp() for a in self._beam_manager.antennas]),
+            initial_status=Sensor.NOMINAL)
+        self.add_sensor(self._antennas_sensor)
+
+        self._delays_sensor = Sensor.string(
+            "delays",
+            description="JSON object containing delays for each beam for each antenna at the current epoch",
+            default="",
+            initial_status=Sensor.UNKNOWN)
+        self.update_delays()
+        self.add_sensor(self._delays_sensor)
+
+    def update_delays(self):
+        reference_antenna = Antenna("reference,{ref.lat},{ref.lon},{ref.elev}".format(
+            ref=self._beam_manager.antennas[0].ref_observer))
+        targets = [beam.target for beam in self._beam_manager.get_beams()]
+        delay_calc = mosaic.DelayPolynomial(self._beam_manager.antennas, targets, reference_antenna)
+        poly = delay_calc.get_delay_polynomials(time.time(), duration=self._update_rate_sensor.value()*2)
+        #poly has format: beam, antenna, (delay, rate)
+        output = {}
+        output["beams"] = [beam.idx for beam in self._beam_manager.get_beams()]
+        output["antennas"] = [ant.name for ant in self._beam_manager.antennas]
+        output["model"] = poly.tolist()
+        self._delays_sensor.set_value(json.dumps(output))
+
+    def start(self):
+        super(DelayEngine, self).start()
+
+    @request(Float())
+    @return_reply()
+    def request_set_update_rate(self, req, rate):
+        """
+        @brief    Set the update rate for delay calculations
+
+        @param    rate  The update rate for recalculation of delay polynomials
+        """
+        self._update_rate_sensor.set_value(rate)
+        # This should make a change to the beam manager object
+
+        self.update_delays()
+        return ("ok",)
\ No newline at end of file
diff --git a/mpikat/fbfuse_master_controller.py b/mpikat/fbfuse_master_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..996954abc62df61d54a9a9a85c48524201c60432
--- /dev/null
+++ b/mpikat/fbfuse_master_controller.py
@@ -0,0 +1,852 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import logging
+import json
+import tornado
+import signal
+import time
+import numpy as np
+import ipaddress
+import mosaic
+from threading import Lock
+from optparse import OptionParser
+from tornado.gen import Return, coroutine
+from katcp import Sensor, Message, AsyncDeviceServer, KATCPClientResource, AsyncReply
+from katcp.kattypes import request, return_reply, Int, Str, Discrete, Float
+from katportalclient import KATPortalClient
+from katpoint import Antenna, Target
+from mpikat.ip_manager import IpRangeManager, ip_range_from_stream
+from mpikat.katportalclient_wrapper import KatportalClientWrapper
+from mpikat.fbfuse_worker_wrapper import FbfWorkerPool
+from mpikat.beam_manager import BeamManager
+from mpikat.ip_manager import IpRangeManager
+from mpikat.fbfuse_product_controller import FbfProductController
+from mpikat.utils import parse_csv_antennas, is_power_of_two, next_power_of_two
+
+# ?halt message means shutdown everything and power off all machines
+
+
+log = logging.getLogger("mpikat.fbfuse_master_controller")
+lock = Lock()
+
+PORTAL = "monctl.devnmk.camlab.kat.ac.za"
+
+FBF_IP_RANGE = "spead://239.11.1.0+127:7147"
+
+class ProductLookupError(Exception):
+    pass
+
+class ProductExistsError(Exception):
+    pass
+
+class FbfMasterController(AsyncDeviceServer):
+    """This is the main KATCP interface for the FBFUSE
+    multi-beam beamformer on MeerKAT.
+
+    This interface satisfies the following ICDs:
+    CAM-FBFUSE: <link>
+    TUSE-FBFUSE: <link>
+    """
+    VERSION_INFO = ("mpikat-fbf-api", 0, 1)
+    BUILD_INFO = ("mpikat-fbf-implementation", 0, 1, "rc1")
+    DEVICE_STATUSES = ["ok", "degraded", "fail"]
+    def __init__(self, ip, port, dummy=True,
+        ip_range = FBF_IP_RANGE):
+        """
+        @brief       Construct new FbfMasterController instance
+
+        @params  ip       The IP address on which the server should listen
+        @params  port     The port that the server should bind to
+        @params  dummy    Specifies if the instance is running in a dummy mode
+
+        @note   In dummy mode, the controller will act as a mock interface only, sending no requests to nodes.
+                A valid node pool must still be provided to the instance, but this may point to non-existent nodes.
+
+        """
+        self._ip_pool = IpRangeManager(ip_range_from_stream(ip_range))
+        super(FbfMasterController, self).__init__(ip,port)
+        self._products = {}
+        self._dummy = dummy
+        self._katportal_wrapper_type = KatportalClientWrapper
+        self._server_pool = FbfWorkerPool()
+
+    def start(self):
+        """
+        @brief  Start the FbfMasterController server
+        """
+        super(FbfMasterController,self).start()
+
+    def setup_sensors(self):
+        """
+        @brief  Set up monitoring sensors.
+
+        @note   The following sensors are made available on top of default sensors
+                implemented in AsynDeviceServer and its base classes.
+
+                device-status:  Reports the health status of the FBFUSE and associated devices:
+                                Among other things report HW failure, SW failure and observation failure.
+
+                local-time-synced:  Indicates whether the local time of FBFUSE servers
+                                    is synchronised to the master time reference (use NTP).
+                                    This sensor is aggregated from all nodes that are part
+                                    of FBF and will return "not sync'd" if any nodes are
+                                    unsyncronised.
+
+                products:   The list of product_ids that FBFUSE is currently handling
+        """
+        self._device_status = Sensor.discrete(
+            "device-status",
+            description="Health status of FBFUSE",
+            params=self.DEVICE_STATUSES,
+            default="ok",
+            initial_status=Sensor.UNKNOWN)
+        self.add_sensor(self._device_status)
+
+        self._local_time_synced = Sensor.boolean(
+            "local-time-synced",
+            description="Indicates FBF is NTP syncronised.",
+            default=True,
+            initial_status=Sensor.UNKNOWN)
+        self.add_sensor(self._local_time_synced)
+
+        self._products_sensor = Sensor.string(
+            "products",
+            description="The names of the currently configured products",
+            default="",
+            initial_status=Sensor.UNKNOWN)
+        self.add_sensor(self._products_sensor)
+
+        self._ip_pool_sensor = Sensor.string(
+            "output-ip-range",
+            description="The multicast address allocation for coherent beams",
+            default=self._ip_pool.format_katcp(),
+            initial_status=Sensor.NOMINAL)
+        self.add_sensor(self._ip_pool_sensor)
+
+
+
+    def _update_products_sensor(self):
+        self._products_sensor.set_value(",".join(self._products.keys()))
+
+    def _get_product(self, product_id):
+        if product_id not in self._products:
+            raise ProductLookupError("No product configured with ID: {}".format(product_id))
+        else:
+            return self._products[product_id]
+
+    @request(Str(), Int())
+    @return_reply()
+    def request_register_worker_server(self, req, hostname, port):
+        """
+        @brief   Register an FbfWorker instance
+
+        @params hostname The hostname for the worker server
+        @params port     The port number that the worker server serves on
+
+        @detail  Register an FbfWorker instance that can be used for FBFUSE
+                 computation. FBFUSE has no preference for the order in which control
+                 servers are allocated to a subarray. An FbfWorker wraps an atomic
+                 unit of compute comprised of one CPU, one GPU and one NIC (i.e. one NUMA
+                 node on an FBFUSE compute server).
+        """
+        self._server_pool.add(hostname, port)
+        return ("ok",)
+
+    @request(Str(), Int())
+    @return_reply()
+    def request_deregister_worker_server(self, req, hostname, port):
+        """
+        @brief   Deregister an FbfWorker instance
+
+        @params hostname The hostname for the worker server
+        @params port     The port number that the worker server serves on
+
+        @detail  The graceful way of removing a server from rotation. If the server is
+                 currently actively processing an exception will be raised.
+        """
+        try:
+            self._server_pool.remove(hostname, port)
+        except ServerDeallocationError as error:
+            return ("fail", str(error))
+        else:
+            return ("ok",)
+
+    @request()
+    @return_reply(Int())
+    def request_worker_server_list(self, req):
+        """
+        @brief   List all control servers and provide minimal metadata
+        """
+        for server in self._server_pool.used():
+            req.inform("{} allocated".format(server))
+        for server in self._server_pool.available():
+            req.inform("{} free".format(server))
+        return ("ok", len(self._server_pool.used()) + len(self._server_pool.available()))
+
+
+    @request(Str(), Str(), Int(), Str(), Str())
+    @return_reply()
+    def request_configure(self, req, product_id, antennas_csv, n_channels, streams_json, proxy_name):
+        """
+        @brief      Configure FBFUSE to receive and process data from a subarray
+
+        @detail     REQUEST ?configure product_id antennas_csv n_channels streams_json proxy_name
+                    Configure FBFUSE for the particular data products
+
+        @param      req               A katcp request object
+
+        @param      product_id        This is a name for the data product, which is a useful tag to include
+                                      in the data, but should not be analysed further. For example "array_1_bc856M4k".
+
+        @param      antennas_csv      A comma separated list of physical antenna names used in particular sub-array
+                                      to which the data products belongs (e.g. m007,m008,m009).
+
+        @param      n_channels        The integer number of frequency channels provided by the CBF.
+
+        @param      streams_json      a JSON struct containing config keys and values describing the streams.
+
+                                      For example:
+
+                                      @code
+                                         {'stream_type1': {
+                                             'stream_name1': 'stream_address1',
+                                             'stream_name2': 'stream_address2',
+                                             ...},
+                                             'stream_type2': {
+                                             'stream_name1': 'stream_address1',
+                                             'stream_name2': 'stream_address2',
+                                             ...},
+                                          ...}
+                                      @endcode
+
+                                      The steam type keys indicate the source of the data and the type, e.g. cam.http.
+                                      stream_address will be a URI.  For SPEAD streams, the format will be spead://<ip>[+<count>]:<port>,
+                                      representing SPEAD stream multicast groups. When a single logical stream requires too much bandwidth
+                                      to accommodate as a single multicast group, the count parameter indicates the number of additional
+                                      consecutively numbered multicast group ip addresses, and sharing the same UDP port number.
+                                      stream_name is the name used to identify the stream in CAM.
+                                      A Python example is shown below, for five streams:
+                                      One CAM stream, with type cam.http.  The camdata stream provides the connection string for katportalclient
+                                      (for the subarray that this FBFUSE instance is being configured on).
+                                      One F-engine stream, with type:  cbf.antenna_channelised_voltage.
+                                      One X-engine stream, with type:  cbf.baseline_correlation_products.
+                                      Two beam streams, with type: cbf.tied_array_channelised_voltage.  The stream names ending in x are
+                                      horizontally polarised, and those ending in y are vertically polarised.
+
+                                      @code
+                                         pprint(streams_dict)
+                                         {'cam.http':
+                                             {'camdata':'http://10.8.67.235/api/client/1'},
+                                          'cbf.antenna_channelised_voltage':
+                                             {'i0.antenna-channelised-voltage':'spead://239.2.1.150+15:7148'},
+                                          ...}
+                                      @endcode
+
+                                      If using katportalclient to get information from CAM, then reconnect and re-subscribe to all sensors
+                                      of interest at this time.
+
+        @param      proxy_name        The CAM name for the instance of the FBFUSE data proxy that is being configured.
+                                      For example, "FBFUSE_3".  This can be used to query sensors on the correct proxy,
+                                      in the event that there are multiple instances in the same subarray.
+
+        @note       A configure call will result in the generation of a new subarray instance in FBFUSE that will be added to the clients list.
+
+        @return     katcp reply object [[[ !configure ok | (fail [error description]) ]]]
+        """
+        # Test if product_id already exists
+        if product_id in self._products:
+            return ("fail", "FBF already has a configured product with ID: {}".format(product_id))
+        # Determine number of nodes required based on number of antennas in subarray
+        # Note this is a poor way of handling this that may be updated later. In theory
+        # there is a throughput measure as a function of bandwidth, polarisations and number
+        # of antennas that allows one to determine the number of nodes to run. Currently we
+        # just assume one antennas worth of data per NIC on our servers, so two antennas per
+        # node.
+        try:
+            antennas = parse_csv_antennas(antennas_csv)
+        except AntennaValidationError as error:
+            return ("fail", str(error))
+
+        valid_n_channels = [1024, 4096, 32768]
+        if not n_channels in valid_n_channels:
+            return ("fail", "The provided number of channels ({}) is not valid. Valid options are {}".format(n_channels, valid_n_channels))
+
+        streams = json.loads(streams_json)
+        try:
+            streams['cam.http']['camdata']
+            # Need to check for endswith('.antenna-channelised-voltage') as the i0 is not
+            # guaranteed to stay the same.
+            # i0 = instrument name
+            # Need to keep this for future sensor lookups
+            streams['cbf.antenna_channelised_voltage']
+        except KeyError as error:
+            return ("fail", "JSON streams object does not contain required key: {}".format(str(error)))
+
+        for key in streams['cbf.antenna_channelised_voltage'].keys():
+            if key.endswith('.antenna-channelised-voltage'):
+                instrument_name, _ = key.split('.')
+                feng_stream_name = key
+                break
+        else:
+            return ("fail", "Could not determine instrument name (e.g. 'i0') from streams")
+
+        # TODO: change this request to @async_reply and make the whole thing a coroutine
+        @coroutine
+        def configure():
+            kpc = self._katportal_wrapper_type(streams['cam.http']['camdata'])
+
+            # Get all antenna observer strings
+            futures, observers = [],[]
+            for antenna in antennas:
+                log.debug("Fetching katpoint string for antenna {}".format(antenna))
+                futures.append(kpc.get_observer_string(antenna))
+            for ii,future in enumerate(futures):
+                try:
+                    observer = yield future
+                except Exception as error:
+                    log.error("Error on katportalclient call: {}".format(str(error)))
+                    req.reply("fail", "Error retrieving katpoint string for antenna {}".format(antennas[ii]))
+                    return
+                else:
+                    log.debug("Fetched katpoint antenna: {}".format(observer))
+                    observers.append(Antenna(observer))
+
+            # Get bandwidth, cfreq, sideband, f-eng mapping
+            bandwidth_future = kpc.get_bandwidth(feng_stream_name)
+            cfreq_future = kpc.get_cfreq(feng_stream_name)
+            sideband_future = kpc.get_sideband(feng_stream_name)
+            feng_antenna_map_future = kpc.get_antenna_feng_id_map(instrument_name, antennas)
+            bandwidth = yield bandwidth_future
+            cfreq = yield cfreq_future
+            sideband = yield sideband_future
+            feng_antenna_map = yield feng_antenna_map_future
+            feng_config = {
+                'bandwidth':bandwidth,
+                'centre-frequency':cfreq,
+                'sideband':sideband,
+                'feng_antenna_map':feng_antenna_map
+            }
+            product = FbfProductController(self, product_id, observers, n_channels, streams, proxy_name, feng_config)
+            self._products[product_id] = product
+            self._update_products_sensor()
+            req.reply("ok",)
+        self.ioloop.add_callback(configure)
+        raise AsyncReply
+
+    @request(Str())
+    @return_reply()
+    def request_deconfigure(self, req, product_id):
+        """
+        @brief      Deconfigure the FBFUSE instance.
+
+        @note       Deconfigure the FBFUSE instance. If FBFUSE uses katportalclient to get information
+                    from CAM, then it should disconnect at this time.
+
+        @param      req               A katcp request object
+
+        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
+                                      For example "array_1_bc856M4k".
+
+        @return     katcp reply object [[[ !deconfigure ok | (fail [error description]) ]]]
+        """
+        # Test if product exists
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            product.stop_beams()
+        except Exception as error:
+            return ("fail", str(error))
+        self._server_pool.deallocate(product.servers)
+        product.teardown_sensors()
+        del self._products[product_id]
+        self._update_products_sensor()
+        return ("ok",)
+
+
+    @request(Str(), Str())
+    @return_reply()
+    @coroutine
+    def request_target_start(self, req, product_id, target):
+        """
+        @brief      Notify FBFUSE that a new target is being observed
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      target          A KATPOINT target string
+
+        @return     katcp reply object [[[ !target-start ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            raise Return(("fail", str(error)))
+        try:
+            target = Target(target)
+        except Exception as error:
+            raise Return(("fail", str(error)))
+        yield product.target_start(target)
+        raise Return(("ok",))
+
+
+    # DELETE this
+
+    @request(Str())
+    @return_reply()
+    @coroutine
+    def request_target_stop(self, req, product_id):
+        """
+        @brief      Notify FBFUSE that the telescope has stopped observing a target
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @return     katcp reply object [[[ !target-start ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            raise Return(("fail", str(error)))
+        yield product.target_stop()
+        raise Return(("ok",))
+
+
+    @request(Str(), Int(), Str(), Int(), Int())
+    @return_reply()
+    def request_configure_coherent_beams(self, req, product_id, nbeams, antennas_csv, fscrunch, tscrunch):
+        """
+        @brief      Request that FBFUSE configure parameters for coherent beams
+
+        @note       This call can only be made prior to a call to start-beams for the configured product.
+                    This is due to FBFUSE requiring static information up front in order to compile beamformer
+                    kernels, allocate the correct size memory buffers and subscribe to the correct number of
+                    multicast groups.
+
+        @note       The particular configuration passed at this stage will only be evaluated on a call to start-beams.
+                    If the requested configuration is not possible due to hardware and bandwidth limits and error will
+                    be raised on the start-beams call.
+
+        @param      req             A katcp request object
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      nbeams          The number of beams that will be produced for the provided product_id
+
+        @param      antennas_csv    A comma separated list of physical antenna names. Only these antennas will be used
+                                    when generating coherent beams (e.g. m007,m008,m009). The antennas provided here must
+                                    be a subset of the antennas in the current subarray. If not an exception will be
+                                    raised.
+
+        @param      fscrunch        The number of frequency channels to integrate over when producing coherent beams.
+
+        @param      tscrunch        The number of time samples to integrate over when producing coherent beams.
+
+        @return     katcp reply object [[[ !configure-coherent-beams ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            product.configure_coherent_beams(nbeams, antennas_csv, fscrunch, tscrunch)
+        except Exception as error:
+            return ("fail", str(error))
+        else:
+            return ("ok",)
+
+    @request(Str(), Str(), Int(), Int())
+    @return_reply()
+    def request_configure_incoherent_beam(self, req, product_id, antennas_csv, fscrunch, tscrunch):
+        """
+        @brief      Request that FBFUSE sets the parameters for the incoherent beam
+
+        @note       The particular configuration passed at this stage will only be evaluated on a call to start-beams.
+                    If the requested configuration is not possible due to hardware and bandwidth limits and error will
+                    be raised on the start-beams call.
+
+        @note       Currently FBFUSE is only set to produce one incoherent beam per instantiation. This may change in future.
+
+        @param      req             A katcp request object
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      nbeams          The number of beams that will be produced for the provided product_id
+
+        @param      antennas_csv    A comma separated list of physical antenna names. Only these antennas will be used
+                                    when generating the incoherent beam (e.g. m007,m008,m009). The antennas provided here must
+                                    be a subset of the antennas in the current subarray. If not an exception will be
+                                    raised.
+
+        @param      fscrunch        The number of frequency channels to integrate over when producing the incoherent beam.
+
+        @param      tscrunch        The number of time samples to integrate over when producing the incoherent beam.
+
+        @return     katcp reply object [[[ !configure-incoherent-beam ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            product.configure_incoherent_beam(antennas_csv, fscrunch, tscrunch)
+        except Exception as error:
+            return ("fail", str(error))
+        else:
+            return ("ok",)
+
+    @request(Str())
+    @return_reply()
+    def request_capture_start(self, req, product_id):
+        """
+        @brief      Request that FBFUSE start beams streaming
+
+        @detail     Upon this call the provided coherent and incoherent beam configurations will be evaluated
+                    to determine if they are physical and can be met with the existing hardware. If the configurations
+                    are acceptable then servers allocated to this instance will be triggered to begin production of beams.
+
+        @param      req               A katcp request object
+
+        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
+                                      For example "array_1_bc856M4k".
+
+        @return     katcp reply object [[[ !start-beams ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        @coroutine
+        def start():
+            try:
+                product.start_capture()
+            except Exception as error:
+                req.reply("fail", str(error))
+            else:
+                req.reply("ok",)
+        self.ioloop.add_callback(start)
+        raise AsyncReply
+
+    @request(Str())
+    @return_reply()
+    def request_provision_beams(self, req, product_id):
+        """
+        @brief      Request that FBFUSE asynchronously prepare to start beams streaming
+
+        @detail     Upon this call the provided coherent and incoherent beam configurations will be evaluated
+                    to determine if they are physical and can be met with the existing hardware. If the configurations
+                    are acceptable then servers allocated to this instance will be triggered to prepare for the production of beams.
+                    Unlike a call to ?capture-start, ?provision-beams will not trigger a connection to multicast groups and will not
+                    wait for completion before returning, instead it will start the process of beamformer resource alloction and compilation.
+                    To determine when the process is complete, the user must wait on the value of the product "state" sensor becoming "ready",
+                    e.g.
+
+                    @code
+                        client.sensor['{}-state'.format(proxy_name)].wait(
+                            lambda reading: reading.value == 'ready')
+                    @endcode
+
+        @param      req               A katcp request object
+
+        @param      product_id        This is a name for the data product, used to track which subarray is being deconfigured.
+                                      For example "array_1_bc856M4k".
+
+        @return     katcp reply object [[[ !start-beams ok | (fail [error description]) ]]]
+        """
+        # Note: the state of the product won't be updated until the start call hits the top of the
+        # event loop. It may be preferable to keep a self.starting_future object and yield on it
+        # in capture-start if it exists. The current implementation may or may not be a bug...
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        # This check needs to happen here as this call
+        # should return immediately
+        if not product.idle:
+            return ("fail", "Can only provision beams on an idle FBF product")
+        self.ioloop.add_callback(product.prepare)
+        return ("ok",)
+
+    @request(Str())
+    @return_reply()
+    def request_capture_stop(self, req, product_id):
+        """
+        @brief      Stop FBFUSE streaming
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        @coroutine
+        def stop():
+            product.stop_beams()
+            req.reply("ok",)
+        self.ioloop.add_callback(stop)
+        raise AsyncReply
+
+    @request(Str(), Str(), Int())
+    @return_reply()
+    def request_set_configuration_authority(self, req, product_id, hostname, port):
+        """
+        @brief     Set the configuration authority for an FBF product
+
+        @detail    The parameters passed here specify the address of a server that
+                   can be triggered to provide FBFUSE with configuration information
+                   at schedule block and target boundaries. The configuration authority
+                   must be a valid KATCP server.
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        product.set_configuration_authority(hostname, port)
+        return ("ok",)
+
+    @request(Str())
+    @return_reply()
+    def request_reset_beams(self, req, product_id):
+        """
+        @brief      Reset the positions of all allocated beams
+
+        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
+                    allocated to the instance. If all beams are currently allocated an exception will be raised.
+
+        @param      req             A katcp request object
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @return     katcp reply object [[[ !reset-beams m ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        else:
+            beam = product.reset_beams()
+            return ("ok", )
+
+    @request(Str(), Str())
+    @return_reply(Str())
+    def request_add_beam(self, req, product_id, target):
+        """
+        @brief      Configure the parameters of one beam
+
+        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
+                    allocated to the instance. If all beams are currently allocated an exception will be raised.
+
+        @param      req             A katcp request object
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      target          A KATPOINT target string
+
+        @return     katcp reply object [[[ !add-beam ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            target = Target(target)
+        except Exception as error:
+            return ("fail", str(error))
+        beam = product.add_beam(target)
+        return ("ok", beam.idx)
+
+    @request(Str(), Str(), Int(), Float(), Float(), Float())
+    @return_reply(Str())
+    def request_add_tiling(self, req, product_id, target, nbeams, reference_frequency, overlap, epoch):
+        """
+        @brief      Configure the parameters of a static beam tiling
+
+        @note       This call may only be made AFTER a successful call to start-beams. Before this point no beams are
+                    allocated to the instance. If there are not enough free beams to satisfy the request an
+                    exception will be raised.
+
+        @note       Beam shapes calculated for tiling are always assumed to be 2D elliptical Gaussians.
+
+        @param      req             A katcp request object
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      target          A KATPOINT target string
+
+        @param      nbeams          The number of beams in this tiling pattern.
+
+        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
+                                            and thus the tiling pattern. Typically this would be chosen to be the
+                                            centre frequency of the current observation.
+
+        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
+                                    at what power point neighbouring beams in the tiling pattern will meet. For
+                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
+                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
+                                    at their half-power points. [Note: This is currently a tricky parameter to use
+                                    when values are close to zero. In future this may be define in sigma units or
+                                    in multiples of the FWHM of the beam.]
+
+        @param      epoch           The desired epoch for the tiling pattern as a unix time. A typical usage would
+                                    be to set the epoch to half way into the coming observation in order to minimise
+                                    the effect of parallactic angle and array projection changes altering the shape
+                                    and position of the beams and thus changing the efficiency of the tiling pattern.
+
+
+        @return     katcp reply object [[[ !add-tiling ok | (fail [error description]) ]]]
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            target = Target(target)
+        except Exception as error:
+            return ("fail", str(error))
+        tiling = product.add_tiling(target, nbeams, reference_frequency, overlap, epoch)
+        return ("ok", tiling.idxs())
+
+    @request()
+    @return_reply(Int())
+    def request_product_list(self, req):
+        """
+        @brief      List all currently registered products and their states
+
+        @param      req               A katcp request object
+
+        @note       The details of each product are provided via an #inform
+                    as a JSON string containing information on the product state.
+
+        @return     katcp reply object [[[ !product-list ok | (fail [error description]) <number of configured products> ]]],
+        """
+        for product_id,product in self._products.items():
+            info = {}
+            info[product_id] = product.info()
+            as_json = json.dumps(info)
+            req.inform(as_json)
+        return ("ok",len(self._products))
+
+    @request(Str(), Str())
+    @return_reply()
+    def request_set_default_target_configuration(self, req, product_id, target):
+        """
+        @brief      Set the configuration of FBFUSE from the FBFUSE configuration server
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      target          A KATPOINT target string
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        try:
+            target = Target(target)
+        except Exception as error:
+            return ("fail", str(error))
+        if not product.capturing:
+            return ("fail","Product must be capturing before a target confiugration can be set.")
+        product.reset_beams()
+        # TBD: Here we connect to some database and request the default configurations
+        # For example this may return secondary target in the FoV
+        #
+        # As a default the current system will put one beam directly on target and
+        # the rest of the beams in a static tiling pattern around this target
+        now = time.time()
+        nbeams = product._beam_manager.nbeams
+        product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now)
+        product.add_beam(target)
+        return ("ok",)
+
+    @request(Str(), Str())
+    @return_reply()
+    def request_set_default_sb_configuration(self, req, product_id, sb_id):
+        """
+        @brief      Set the configuration of FBFUSE from the FBFUSE configuration server
+
+        @param      product_id      This is a name for the data product, used to track which subarray is being deconfigured.
+                                    For example "array_1_bc856M4k".
+
+        @param      sb_id           The schedule block ID. Decisions of the configuarion of FBFUSE will be made dependent on
+                                    the configuration of the current subarray, the primary and secondary science projects
+                                    active and the targets expected to be visted during the execution of the schedule block.
+        """
+        try:
+            product = self._get_product(product_id)
+        except ProductLookupError as error:
+            return ("fail", str(error))
+        if product.capturing:
+            return ("fail", "Cannot reconfigure a currently capturing instance.")
+        product.configure_coherent_beams(400, product._katpoint_antennas, 1, 16)
+        product.configure_incoherent_beam(product._katpoint_antennas, 1, 16)
+        now = time.time()
+        nbeams = product._beam_manager.nbeams
+        product.add_tiling(target, nbeams-1, 1.4e9, 0.5, now)
+        product.add_beam(target)
+        return ("ok",)
+
+@coroutine
+def on_shutdown(ioloop, server):
+    log.info("Shutting down server")
+    yield server.stop()
+    ioloop.stop()
+
+def main():
+    usage = "usage: %prog [options]"
+    parser = OptionParser(usage=usage)
+    parser.add_option('-H', '--host', dest='host', type=str,
+        help='Host interface to bind to')
+    parser.add_option('-p', '--port', dest='port', type=long,
+        help='Port number to bind to')
+    parser.add_option('', '--log_level',dest='log_level',type=str,
+        help='Port number of status server instance',default="INFO")
+    parser.add_option('', '--dummy',action="store_true", dest='dummy',
+        help='Set status server to dummy')
+    (opts, args) = parser.parse_args()
+    FORMAT = "[ %(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s"
+    logger = logging.getLogger('mpikat')
+    logging.basicConfig(format=FORMAT)
+    logger.setLevel(opts.log_level.upper())
+    logging.getLogger('katcp').setLevel('INFO')
+    ioloop = tornado.ioloop.IOLoop.current()
+    log.info("Starting FbfMasterController instance")
+    server = FbfMasterController(opts.host, opts.port, dummy=opts.dummy)
+    signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
+        on_shutdown, ioloop, server))
+    def start_and_display():
+        server.start()
+        log.info("Listening at {0}, Ctrl-C to terminate server".format(server.bind_address))
+
+    ioloop.add_callback(start_and_display)
+    ioloop.start()
+
+if __name__ == "__main__":
+    main()
+
diff --git a/mpikat/fbfuse_product_controller.py b/mpikat/fbfuse_product_controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..efd5dbe73615d5c7ea451087c7c867f9d60cef9e
--- /dev/null
+++ b/mpikat/fbfuse_product_controller.py
@@ -0,0 +1,667 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+import json
+import time
+from copy import deepcopy
+from tornado.gen import coroutine
+from katcp import Sensor, Message, KATCPClientResource
+from katpoint import  Target
+from mpikat.fbfuse_beam_manager import BeamManager
+from mpikat.fbfuse_delay_engine import DelayEngine
+from mpikat.fbfuse_config import FbfConfigurationManager
+from mpikat.utils import parse_csv_antennas
+
+log = logging.getLogger("mpikat.fbfuse_product_controller")
+
+class FbfProductStateError(Exception):
+    def __init__(self, expected_states, current_state):
+        message = "Possible states for this operation are '{}', but current state is '{}'".format(
+            expected_states, current_state)
+        super(FbfProductStateError, self).__init__(message)
+
+class FbfProductController(object):
+    """
+    Wrapper class for an FBFUSE product.
+    """
+    STATES = ["idle", "preparing", "ready", "starting", "capturing", "stopping"]
+    IDLE, PREPARING, READY, STARTING, CAPTURING, STOPPING = STATES
+
+    def __init__(self, parent, product_id, katpoint_antennas,
+                 n_channels, streams, proxy_name, feng_config):
+        """
+        @brief      Construct new instance
+
+        @param      parent            The parent FbfMasterController instance
+
+        @param      product_id        The name of the product
+
+        @param      katpoint_antennas A list of katpoint.Antenna objects
+
+        @param      n_channels        The integer number of frequency channels provided by the CBF.
+
+        @param      streams           A dictionary containing config keys and values describing the streams.
+
+        @param      proxy_name        The name of the proxy associated with this subarray (used as a sensor prefix)
+
+        @param      servers           A list of FbfWorkerServer instances allocated to this product controller
+        """
+        log.debug("Creating new FbfProductController with args: {}".format(
+            ", ".join([str(i) for i in (parent, product_id, katpoint_antennas, n_channels,
+                streams, proxy_name, feng_config)])))
+        self._parent = parent
+        self._product_id = product_id
+        self._antennas = ",".join([a.name for a in katpoint_antennas])
+        self._katpoint_antennas = katpoint_antennas
+        self._antenna_map = {a.name: a for a in self._katpoint_antennas}
+        self._n_channels = n_channels
+        self._streams = streams
+        self._proxy_name = proxy_name
+        self._feng_config = feng_config
+        self._servers = []
+        self._beam_manager = None
+        self._delay_engine = None
+        self._coherent_beam_ip_range = None
+        self._ca_client = None
+        self._managed_sensors = []
+        self._ip_allocations = []
+        self._default_sb_config = {
+            u'coherent-beams-nbeams':400,
+            u'coherent-beams-tscrunch':16,
+            u'coherent-beams-fscrunch':1,
+            u'coherent-beams-antennas':self._antennas,
+            u'coherent-beams-granularity':6,
+            u'incoherent-beam-tscrunch':16,
+            u'incoherent-beam-fscrunch':1,
+            u'incoherent-beam-antennas':self._antennas,
+            u'bandwidth':self._feng_config['bandwidth'],
+            u'centre-frequency':self._feng_config['centre-frequency']}
+        self.setup_sensors()
+
+    def __del__(self):
+        self.teardown_sensors()
+
+    def info(self):
+        """
+        @brief    Return a metadata dictionary describing this product controller
+        """
+        out = {
+            "antennas":self._antennas,
+            "nservers":len(self.servers),
+            "capturing":self.capturing,
+            "streams":self._streams,
+            "nchannels":self._n_channels,
+            "proxy_name":self._proxy_name
+        }
+        return out
+
+    def add_sensor(self, sensor):
+        """
+        @brief    Add a sensor to the parent object
+
+        @note     This method is used to wrap calls to the add_sensor method
+                  on the parent FbfMasterController instance. In order to
+                  disambiguate between sensors from describing different products
+                  the associated proxy name is used as sensor prefix. For example
+                  the "servers" sensor will be seen by clients connected to the
+                  FbfMasterController server as "<proxy_name>-servers" (e.g.
+                  "FBFUSE_1-servers").
+        """
+        prefix = "{}.".format(self._product_id)
+        if sensor.name.startswith(prefix):
+            self._parent.add_sensor(sensor)
+        else:
+            sensor.name = "{}{}".format(prefix,sensor.name)
+            self._parent.add_sensor(sensor)
+        self._managed_sensors.append(sensor)
+
+    def setup_sensors(self):
+        """
+        @brief    Setup the default KATCP sensors.
+
+        @note     As this call is made only upon an FBFUSE configure call a mass inform
+                  is required to let connected clients know that the proxy interface has
+                  changed.
+        """
+        self._state_sensor = Sensor.discrete(
+            "state",
+            description = "Denotes the state of this FBF instance",
+            params = self.STATES,
+            default = self.IDLE,
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._state_sensor)
+
+        self._ca_address_sensor = Sensor.string(
+            "configuration-authority",
+            description = "The address of the server that will be deferred to for configurations",
+            default = "",
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._ca_address_sensor)
+
+        self._available_antennas_sensor = Sensor.string(
+            "available-antennas",
+            description = "The antennas that are currently available for beamforming",
+            default = self._antennas,
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._available_antennas_sensor)
+
+        self._bandwidth_sensor = Sensor.float(
+            "bandwidth",
+            description = "The bandwidth this product is configured to process",
+            default = self._default_sb_config['bandwidth'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._bandwidth_sensor)
+
+        self._nchans_sensor = Sensor.integer(
+            "nchannels",
+            description = "The number of channels to be processesed",
+            default = self._n_channels,
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._nchans_sensor)
+
+        self._cfreq_sensor = Sensor.float(
+            "centre-frequency",
+            description = "The centre frequency of the band this product configured to process",
+            default = self._default_sb_config['centre-frequency'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._cfreq_sensor)
+
+        self._cbc_nbeams_sensor = Sensor.integer(
+            "coherent-beam-count",
+            description = "The number of coherent beams that this FBF instance can currently produce",
+            default = self._default_sb_config['coherent-beams-nbeams'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._cbc_nbeams_sensor)
+
+        self._cbc_nbeams_per_group = Sensor.integer(
+            "coherent-beam-count-per-group",
+            description = "The number of coherent beams packed into a multicast group",
+            default = 1,
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._cbc_nbeams_per_group)
+
+        self._cbc_ngroups = Sensor.integer(
+            "coherent-beam-ngroups",
+            description = "The number of multicast groups used for coherent beam transmission",
+            default = 1,
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._cbc_ngroups)
+
+        self._cbc_nbeams_per_server_set = Sensor.integer(
+            "coherent-beam-nbeams-per-server-set",
+            description = "The number of beams produced by each server set",
+            default = 1,
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._cbc_nbeams_per_server_set)
+
+        self._cbc_tscrunch_sensor = Sensor.integer(
+            "coherent-beam-tscrunch",
+            description = "The number time samples that will be integrated when producing coherent beams",
+            default = self._default_sb_config['coherent-beams-tscrunch'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._cbc_tscrunch_sensor)
+
+        self._cbc_fscrunch_sensor = Sensor.integer(
+            "coherent-beam-fscrunch",
+            description = "The number frequency channels that will be integrated when producing coherent beams",
+            default = self._default_sb_config['coherent-beams-fscrunch'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._cbc_fscrunch_sensor)
+
+        self._cbc_antennas_sensor = Sensor.string(
+            "coherent-beam-antennas",
+            description = "The antennas that will be used when producing coherent beams",
+            default = self._default_sb_config['coherent-beams-antennas'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._cbc_antennas_sensor)
+
+        self._cbc_mcast_groups_sensor = Sensor.string(
+            "coherent-beam-multicast-groups",
+            description = "Multicast groups used by this instance for sending coherent beam data",
+            default = "",
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._cbc_mcast_groups_sensor)
+
+        self._ibc_nbeams_sensor = Sensor.integer(
+            "incoherent-beam-count",
+            description = "The number of incoherent beams that this FBF instance can currently produce",
+            default = 1,
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._ibc_nbeams_sensor)
+
+        self._ibc_tscrunch_sensor = Sensor.integer(
+            "incoherent-beam-tscrunch",
+            description = "The number time samples that will be integrated when producing incoherent beams",
+            default = self._default_sb_config['incoherent-beam-tscrunch'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._ibc_tscrunch_sensor)
+
+        self._ibc_fscrunch_sensor = Sensor.integer(
+            "incoherent-beam-fscrunch",
+            description = "The number frequency channels that will be integrated when producing incoherent beams",
+            default = self._default_sb_config['incoherent-beam-fscrunch'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._ibc_fscrunch_sensor)
+
+        self._ibc_antennas_sensor = Sensor.string(
+            "incoherent-beam-antennas",
+            description = "The antennas that will be used when producing incoherent beams",
+            default = self._default_sb_config['incoherent-beam-antennas'],
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._ibc_antennas_sensor)
+
+        self._ibc_mcast_group_sensor = Sensor.string(
+            "incoherent-beam-multicast-group",
+            description = "Multicast group used by this instance for sending incoherent beam data",
+            default = "",
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._ibc_mcast_group_sensor)
+
+        self._servers_sensor = Sensor.string(
+            "servers",
+            description = "The worker server instances currently allocated to this product",
+            default = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers]),
+            initial_status = Sensor.NOMINAL)
+        self.add_sensor(self._servers_sensor)
+
+        self._nserver_sets_sensor = Sensor.integer(
+            "nserver-sets",
+            description = "The number of server sets (independent subscriptions to the F-engines)",
+            default = 1,
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._nserver_sets_sensor)
+
+        self._nservers_per_set_sensor = Sensor.integer(
+            "nservers-per-set",
+            description = "The number of servers per server set",
+            default = 1,
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._nservers_per_set_sensor)
+
+        self._delay_engine_sensor = Sensor.string(
+            "delay-engines",
+            description = "The addresses of the delay engines serving this product",
+            default = "",
+            initial_status = Sensor.UNKNOWN)
+        self.add_sensor(self._delay_engine_sensor)
+        self._parent.mass_inform(Message.inform('interface-changed'))
+
+    def teardown_sensors(self):
+        """
+        @brief    Remove all sensors created by this product from the parent server.
+
+        @note     This method is required for cleanup to stop the FBF sensor pool
+                  becoming swamped with unused sensors.
+        """
+        for sensor in self._managed_sensors:
+            self._parent.remove_sensor(sensor)
+        self._parent.mass_inform(Message.inform('interface-changed'))
+
+    @property
+    def servers(self):
+        return self._servers
+
+    @property
+    def capturing(self):
+        return self.state == self.CAPTURING
+
+    @property
+    def idle(self):
+        return self.state == self.IDLE
+
+    @property
+    def starting(self):
+        return self.state == self.STARTING
+
+    @property
+    def stopping(self):
+        return self.state == self.STOPPING
+
+    @property
+    def ready(self):
+        return self.state == self.READY
+
+    @property
+    def preparing(self):
+        return self.state == self.PREPARING
+
+    @property
+    def state(self):
+        return self._state_sensor.value()
+
+    def _verify_antennas(self, antennas):
+        """
+        @brief      Verify that a set of antennas is available to this instance.
+
+        @param      antennas   A CSV list of antenna names
+        """
+        antennas_set = set([ant.name for ant in self._katpoint_antennas])
+        requested_antennas = set(antennas)
+        return requested_antennas.issubset(antennas_set)
+
+    def set_configuration_authority(self, hostname, port):
+        if self._ca_client:
+            self._ca_client.stop()
+        self._ca_client = KATCPClientResource(dict(
+            name = 'configuration-authority-client',
+            address = (hostname, port),
+            controlled = True))
+        self._ca_client.start()
+        self._ca_address_sensor.set_value("{}:{}".format(hostname, port))
+
+    @coroutine
+    def get_ca_sb_configuration(self, sb_id):
+        yield self._ca_client.until_synced()
+        try:
+            response = yield self._ca_client.req.get_schedule_block_configuration(self._proxy_name, sb_id)
+        except Exception as error:
+            log.error("Request for SB configuration to CA failed with error: {}".format(str(error)))
+            raise error
+        try:
+            config_dict = json.loads(response.reply.arguments[1])
+        except Exception as error:
+            log.error("Could not parse CA SB configuration with error: {}".format(str(error)))
+            raise error
+        self.set_sb_configuration(config_dict)
+
+    def reset_sb_configuration(self):
+        self._parent._server_pool.deallocate(self._servers)
+        for ip_range in self._ip_allocations:
+            self._parent._ip_pool.free(ip_range)
+
+    def set_sb_configuration(self, config_dict):
+        """
+        @brief  Set the schedule block configuration for this product
+
+        @param  config_dict  A dictionary specifying configuation parameters
+        """
+        self.reset_sb_configuration()
+        config = deepcopy(self._default_sb_config)
+        config.update(config_dict)
+        # first we need to get one ip address for the incoherent beam
+        ibc_mcast_group = self._parent._ip_pool.allocate(1)
+        self._ip_allocations.append(ibc_mcast_group)
+        self._ibc_mcast_group_sensor.set_value(ibc_mcast_group.format_katcp())
+        largest_ip_range = self._parent._ip_pool.largest_free_range()
+        nworkers_available = self._parent._server_pool.navailable()
+        cm = FbfConfigurationManager(len(self._katpoint_antennas),
+            self._feng_config['bandwidth'], self._n_channels,
+            nworkers_available, largest_ip_range)
+        requested_nantennas = len(parse_csv_antennas(config['coherent-beams-antennas']))
+        mcast_config = cm.get_configuration(
+            config['coherent-beams-tscrunch'],
+            config['coherent-beams-fscrunch'],
+            config['coherent-beams-nbeams'],
+            requested_nantennas,
+            config['bandwidth'],
+            config['coherent-beams-granularity'])
+        self._bandwidth_sensor.set_value(config['bandwidth'])
+        self._cfreq_sensor.set_value(config['centre-frequency'])
+        self._nchans_sensor.set_value(mcast_config['num_chans'])
+        self._cbc_nbeams_sensor.set_value(mcast_config['num_beams'])
+        self._cbc_nbeams_per_group.set_value(mcast_config['num_beams_per_mcast_group'])
+        self._cbc_ngroups.set_value(mcast_config['num_mcast_groups'])
+        self._cbc_nbeams_per_server_set.set_value(mcast_config['num_beams_per_worker_set'])
+        self._cbc_tscrunch_sensor.set_value(config['coherent-beams-tscrunch'])
+        self._cbc_fscrunch_sensor.set_value(config['coherent-beams-fscrunch'])
+        self._cbc_antennas_sensor.set_value(config['coherent-beams-antennas'])
+        self._ibc_tscrunch_sensor.set_value(config['incoherent-beam-tscrunch'])
+        self._ibc_fscrunch_sensor.set_value(config['incoherent-beam-fscrunch'])
+        self._ibc_antennas_sensor.set_value(config['incoherent-beam-antennas'])
+        self._servers = self._parent._server_pool.allocate(mcast_config['num_workers_total'])
+        server_str = ",".join(["{s.hostname}:{s.port}".format(s=server) for server in self._servers])
+        self._servers_sensor.set_value(server_str)
+        self._nserver_sets_sensor.set_value(mcast_config['num_worker_sets'])
+        self._nservers_per_set_sensor.set_value(mcast_config['num_workers_per_set'])
+        cbc_mcast_groups = self._parent._ip_pool.allocate(mcast_config['num_mcast_groups'])
+        self._ip_allocations.append(cbc_mcast_groups)
+        self._cbc_mcast_groups_sensor.set_value(cbc_mcast_groups.format_katcp())
+
+    @coroutine
+    def get_ca_target_configuration(self, target):
+        def ca_target_update_callback(received_timestamp, timestamp, status, value):
+            # TODO, should we really reset all the beams or should we have
+            # a mechanism to only update changed beams
+            config_dict = json.loads(value)
+            self.reset_beams()
+            for target_string in config_dict.get('beams',[]):
+                target = Target(target_string)
+                self.add_beam(target)
+            for tiling in config_dict.get('tilings',[]):
+                target  = Target(tiling['target']) #required
+                freq    = float(tiling.get('reference_frequency', 1.4e9))
+                nbeams  = int(tiling['nbeams'])
+                overlap = float(tiling.get('overlap', 0.5))
+                epoch   = float(tiling.get('epoch', time.time()))
+                self.add_tiling(target, nbeams, freq, overlap, epoch)
+        yield self._ca_client.until_synced()
+        try:
+            response = yield self._ca_client.req.target_configuration_start(self._proxy_name, target.format_katcp())
+        except Exception as error:
+            log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
+            raise error
+        if not response.reply.reply_ok():
+            error = Exception(response.reply.arguments[1])
+            log.error("Request for target configuration to CA failed with error: {}".format(str(error)))
+            raise error
+        yield self._ca_client.until_synced()
+        sensor = self._ca_client.sensor["{}_beam_position_configuration".format(self._proxy_name)]
+        sensor.register_listener(ca_target_update_callback)
+        self._ca_client.set_sampling_strategy(sensor.name, "event")
+
+    def configure_coherent_beams(self, nbeams, antennas, fscrunch, tscrunch):
+        """
+        @brief      Set the configuration for coherent beams producted by this instance
+
+        @param      nbeams          The number of beams that will be produced for the provided product_id
+
+        @param      antennas        A comma separated list of physical antenna names. Only these antennas will be used
+                                    when generating coherent beams (e.g. m007,m008,m009). The antennas provided here must
+                                    be a subset of the antennas in the current subarray. If not an exception will be
+                                    raised.
+
+        @param      fscrunch        The number of frequency channels to integrate over when producing coherent beams.
+
+        @param      tscrunch        The number of time samples to integrate over when producing coherent beams.
+        """
+        if not self.idle:
+            raise FbfProductStateError([self.IDLE], self.state)
+        if not self._verify_antennas(parse_csv_antennas(antennas)):
+            raise AntennaValidationError("Requested antennas are not a subset of the current subarray")
+        self._cbc_nbeams_sensor.set_value(nbeams)
+        #need a check here to determine if this is a subset of the subarray antennas
+        self._cbc_fscrunch_sensor.set_value(fscrunch)
+        self._cbc_tscrunch_sensor.set_value(tscrunch)
+        self._cbc_antennas_sensor.set_value(antennas)
+
+    def configure_incoherent_beam(self, antennas, fscrunch, tscrunch):
+        """
+        @brief      Set the configuration for incoherent beams producted by this instance
+
+        @param      antennas        A comma separated list of physical antenna names. Only these antennas will be used
+                                    when generating incoherent beams (e.g. m007,m008,m009). The antennas provided here must
+                                    be a subset of the antennas in the current subarray. If not an exception will be
+                                    raised.
+
+        @param      fscrunch        The number of frequency channels to integrate over when producing incoherent beams.
+
+        @param      tscrunch        The number of time samples to integrate over when producing incoherent beams.
+        """
+        if not self.idle:
+            raise FbfProductStateError([self.IDLE], self.state)
+        if not self._verify_antennas(parse_csv_antennas(antennas)):
+            raise AntennaValidationError("Requested antennas are not a subset of the current subarray")
+        #need a check here to determine if this is a subset of the subarray antennas
+        self._ibc_fscrunch_sensor.set_value(fscrunch)
+        self._ibc_tscrunch_sensor.set_value(tscrunch)
+        self._ibc_antennas_sensor.set_value(antennas)
+
+    def _beam_to_sensor_string(self, beam):
+        return beam.target.format_katcp()
+
+    @coroutine
+    def target_start(self, target):
+        if self._ca_client:
+            yield self.get_ca_target_configuration(target)
+        else:
+            log.warning("No configuration authority is set, using default beam configuration")
+
+    @coroutine
+    def target_stop(self):
+        if self._ca_client:
+            sensor_name = "{}_beam_position_configuration".format(self._proxy_name)
+            self._ca_client.set_sampling_strategy(sensor_name, "none")
+
+    @coroutine
+    def prepare(self):
+        """
+        @brief      Prepare the beamformer for streaming
+
+        @detail     This method evaluates the current configuration creates a new DelayEngine
+                    and passes a prepare call to all allocated servers.
+        """
+        if not self.idle:
+            raise FbfProductStateError([self.IDLE], self.state)
+        self._state_sensor.set_value(self.PREPARING)
+
+        # Here we need to parse the streams and assign beams to streams:
+        #mcast_addrs, mcast_port = parse_stream(self._streams['cbf.antenna_channelised_voltage']['i0.antenna-channelised-voltage'])
+
+        if not self._ca_client:
+            log.warning("No configuration authority found, using default configuration parameters")
+        else:
+            #TODO: get the schedule block ID into this call from somewhere (configure?)
+            yield self.get_ca_sb_configuration("default_subarray")
+
+
+        cbc_antennas_names = parse_csv_antennas(self._cbc_antennas_sensor.value())
+        cbc_antennas = [self._antenna_map[name] for name in cbc_antennas_names]
+        self._beam_manager = BeamManager(self._cbc_nbeams_sensor.value(), cbc_antennas)
+        self._delay_engine = DelayEngine("127.0.0.1", 0, self._beam_manager)
+        self._delay_engine.start()
+
+        for server in self._servers:
+            # each server will take 4 consequtive multicast groups
+            pass
+
+        # set up delay engine
+        # compile kernels
+        # start streaming
+        self._delay_engine_sensor.set_value(self._delay_engine.bind_address)
+
+
+        # Need to tear down the beam sensors here
+        self._beam_sensors = []
+        for beam in self._beam_manager.get_beams():
+            sensor = Sensor.string(
+                "coherent-beam-{}".format(beam.idx),
+                description="R.A. (deg), declination (deg) and source name for coherent beam with ID {}".format(beam.idx),
+                default=self._beam_to_sensor_string(beam),
+                initial_status=Sensor.UNKNOWN)
+            beam.register_observer(lambda beam, sensor=sensor:
+                sensor.set_value(self._beam_to_sensor_string(beam)))
+            self._beam_sensors.append(sensor)
+            self.add_sensor(sensor)
+        self._state_sensor.set_value(self.READY)
+
+        # Only make this call if the the number of beams has changed
+        self._parent.mass_inform(Message.inform('interface-changed'))
+
+    def start_capture(self):
+        if not self.ready:
+            raise FbfProductStateError([self.READY], self.state)
+        self._state_sensor.set_value(self.STARTING)
+        """
+        futures = []
+        for server in self._servers:
+            futures.append(server.req.start_capture())
+        for future in futures:
+            try:
+                response = yield future
+            except:
+                pass
+        """
+        self._state_sensor.set_value(self.CAPTURING)
+
+    def stop_beams(self):
+        """
+        @brief      Stops the beamformer servers streaming.
+        """
+        if not self.capturing:
+            return
+        self._state_sensor.set_value(self.STOPPING)
+        for server in self._servers:
+            #yield server.req.deconfigure()
+            pass
+        self._state_sensor.set_value(self.IDLE)
+
+    def add_beam(self, target):
+        """
+        @brief      Specify the parameters of one managed beam
+
+        @param      target      A KATPOINT target object
+
+        @return     Returns the allocated Beam object
+        """
+        valid_states = [self.READY, self.CAPTURING, self.STARTING]
+        if not self.state in valid_states:
+            raise FbfProductStateError(valid_states, self.state)
+        return self._beam_manager.add_beam(target)
+
+    def add_tiling(self, target, number_of_beams, reference_frequency, overlap, epoch):
+        """
+        @brief   Add a tiling to be managed
+
+        @param      target      A KATPOINT target object
+
+        @param      reference_frequency     The reference frequency at which to calculate the synthesised beam shape,
+                                            and thus the tiling pattern. Typically this would be chosen to be the
+                                            centre frequency of the current observation.
+
+        @param      overlap         The desired overlap point between beams in the pattern. The overlap defines
+                                    at what power point neighbouring beams in the tiling pattern will meet. For
+                                    example an overlap point of 0.1 corresponds to beams overlapping only at their
+                                    10%-power points. Similarly a overlap of 0.5 corresponds to beams overlapping
+                                    at their half-power points. [Note: This is currently a tricky parameter to use
+                                    when values are close to zero. In future this may be define in sigma units or
+                                    in multiples of the FWHM of the beam.]
+
+        @returns    The created Tiling object
+        """
+        valid_states = [self.READY, self.CAPTURING, self.STARTING]
+        if not self.state in valid_states:
+            raise FbfProductStateError(valid_states, self.state)
+        tiling = self._beam_manager.add_tiling(target, number_of_beams, reference_frequency, overlap)
+        tiling.generate(self._katpoint_antennas, epoch)
+        return tiling
+
+    def reset_beams(self):
+        """
+        @brief  reset and deallocate all beams and tilings managed by this instance
+
+        @note   All tiling will be lost on this call and must be remade for subsequent observations
+        """
+        valid_states = [self.READY, self.CAPTURING, self.STARTING]
+        if not self.state in valid_states:
+            raise FbfProductStateError(valid_states, self.state)
+        self._beam_manager.reset()
\ No newline at end of file
diff --git a/mpikat/fbf_worker_server.py b/mpikat/fbfuse_worker_server.py
similarity index 99%
rename from mpikat/fbf_worker_server.py
rename to mpikat/fbfuse_worker_server.py
index 6ed20a2fb827ff2b1cdc2cd1b67c04114eed4606..4e4e65f90eb2a08b873213bb02110186a8cf58f6 100644
--- a/mpikat/fbf_worker_server.py
+++ b/mpikat/fbfuse_worker_server.py
@@ -10,7 +10,7 @@ from optparse import OptionParser
 from katcp import Sensor, AsyncDeviceServer
 from katcp.kattypes import request, return_reply, Int, Str, Discrete
 
-log = logging.getLogger("psrdada_cpp.meerkat.fbfuse.delay_engine_client")
+log = logging.getLogger("mpikat.fbfuse_worker_server")
 
 lock = Lock()
 
diff --git a/mpikat/fbfuse_worker_wrapper.py b/mpikat/fbfuse_worker_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a91af139ae65a759e6def9374b87d511abd160
--- /dev/null
+++ b/mpikat/fbfuse_worker_wrapper.py
@@ -0,0 +1,83 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+from katcp import KATCPClientResource
+from mpikat.worker_pool import WorkerPool
+
+log = logging.getLogger("mpikat.fbfuse_worker_wrapper")
+
+class FbfWorkerWrapper(object):
+    """Wrapper around a client to an FbfWorkerServer
+    instance.
+    """
+    def __init__(self, hostname, port):
+        """
+        @brief  Create a new wrapper around a client to a worker server
+
+        @params hostname The hostname for the worker server
+        @params port     The port number that the worker server serves on
+        """
+        log.debug("Building client to FbfWorkerServer at {}:{}".format(hostname, port))
+        self._client = KATCPClientResource(dict(
+            name="worker-server-client",
+            address=(hostname, port),
+            controlled=True))
+        self.hostname = hostname
+        self.port = port
+        self.priority = 0 # Currently no priority mechanism is implemented
+        self._started = False
+
+    def start(self):
+        """
+        @brief  Start the client to the worker server
+        """
+        log.debug("Starting client to FbfWorkerServer at {}:{}".format(self.hostname, self.port))
+        self._client.start()
+        self._started = True
+
+    def __repr__(self):
+        return "<{} for {}:{}>".format(self.__class__, self.hostname, self.port)
+
+    def __hash__(self):
+        # This has override is required to allow these wrappers
+        # to be used with set() objects. The implication is that
+        # the combination of hostname and port is unique for a
+        # worker server
+        return hash((self.hostname, self.port))
+
+    def __eq__(self, other):
+        # Also implemented to help with hashing
+        # for sets
+        return self.__hash__() == hash(other)
+
+    def __del__(self):
+        if self._started:
+            try:
+                self._client.stop()
+            except Exception as error:
+                log.exception(str(error))
+
+
+class FbfWorkerPool(WorkerPool):
+    def make_wrapper(self, hostname, port):
+        return FbfWorkerWrapper(hostname, port)
\ No newline at end of file
diff --git a/mpikat/ip_manager.py b/mpikat/ip_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..463d10ce70950cd90c82110d9f7ad400b294ada0
--- /dev/null
+++ b/mpikat/ip_manager.py
@@ -0,0 +1,179 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import logging
+import ipaddress
+
+log = logging.getLogger('mpikat.ip_manager')
+
+class IpRangeAllocationError(Exception):
+    pass
+
+class ContiguousIpRange(object):
+    def __init__(self, base_ip, port, count):
+        """
+        @brief      Wrapper for a contiguous range of IPs
+
+        @param      base_ip    The first IP address in the range as a string, e.g. '239.11.1.150'
+        @param      port       A port number associated with this range of IPs
+        @param      count      The number of IPs in the range
+
+        @note       No checks are made to determine whether a given IP is valid or whether
+                    the range crosses subnet boundaries.
+
+        @note       This class is intended for managing SPEAD stream IPs, hence the assocated
+                    port number and the 'spead://' prefix used in the format_katcp method.
+        """
+        self._base_ip = ipaddress.ip_address(unicode(base_ip))
+        self._ips = [self._base_ip+ii for ii in range(count)]
+        self._port = port
+        self._count = count
+
+    @property
+    def count(self):
+        return self._count
+
+    @property
+    def port(self):
+        return self._port
+
+    @property
+    def base_ip(self):
+        return self._base_ip
+
+    def index(self, ip):
+        return self._ips.index(ip)
+
+    def __hash__(self):
+        return hash(self.format_katcp())
+
+    def __iter__(self):
+        return self._ips.__iter__()
+
+    def __repr__(self):
+        return "<{} {}>".format(self.__class__.__name__, self.format_katcp())
+
+    def format_katcp(self):
+        """
+        @brief  Return a description of this IP range in a KATCP friendly format,
+                e.g. 'spead://239.11.1.150+15:7147'
+        """
+        return "spead://{}+{}:{}".format(str(self._base_ip), self._count, self._port)
+
+
+class IpRangeManager(object):
+    def __init__(self, ip_range):
+        """
+        @brief  Class for managing allocation of sub-ranges from
+                a ContiguousIpRange instance
+
+        @param  ip_range   A ContiguousIpRange instance to be managed
+        """
+        self._ip_range = ip_range
+        self._allocated = [False for _ in ip_range]
+        self._allocated_ranges = set()
+
+    def __repr__(self):
+        return "<{} {}>".format(self.__class__.__name__, self._ip_range.format_katcp())
+
+    def format_katcp(self):
+        """
+        @brief      Return a description of full managed (allocated and free) IP range in
+                    a KATCP friendly format, e.g. 'spead://239.11.1.150+15:7147'
+        """
+        return self._ip_range.format_katcp()
+
+    def _free_ranges(self):
+        state_ranges = {True:[], False:[]}
+        def find_state_range(idx, state):
+            start_idx = idx
+            while idx < len(self._allocated):
+                if self._allocated[idx] == state:
+                    idx+=1
+                else:
+                    state_ranges[state].append((start_idx, idx-start_idx))
+                    return find_state_range(idx, not state)
+            else:
+                state_ranges[state].append((start_idx, idx-start_idx))
+        find_state_range(0, self._allocated[0])
+        return state_ranges[False]
+
+    def largest_free_range(self):
+        return max(self._free_ranges(), key=lambda r: r[1])
+
+    def allocate(self, n):
+        """
+        @brief      Allocate a range of contiguous IPs
+
+        @param      n   The number of IPs to allocate
+
+        @return     A ContiguousIpRange object describing the allocated range
+        """
+        ranges = self._free_ranges()
+        best_fit = None
+        for start,span in ranges:
+            if span<n:
+                continue
+            elif best_fit is None:
+                best_fit = (start, span)
+            elif (span-n) < (best_fit[1]-n):
+                best_fit = (start, span)
+        if best_fit is None:
+            raise IpRangeAllocationError("Could not allocate contiguous range of {} addresses".format(n))
+        else:
+            start,span = best_fit
+            for ii in range(n):
+                offset = start+ii
+                self._allocated[offset] = True
+            allocated_range = ContiguousIpRange(str(self._ip_range.base_ip + start), self._ip_range.port, n)
+            self._allocated_ranges.add(allocated_range)
+            return allocated_range
+
+    def free(self, ip_range):
+        """
+        @brief      Free an allocated IP range
+
+        @param      ip_range  A ContiguousIpRange object allocated through a call to the
+                              'allocate' method.
+        """
+        self._allocated_ranges.remove(ip_range)
+        for ip in ip_range:
+            self._allocated[self._ip_range.index(ip)] = False
+
+
+def ip_range_from_stream(stream):
+    """
+    @brief      Generate a ContiguousIpRange object from a KATCP-style
+                stream definition, e.g. 'spead://239.11.1.150+15:7147'
+
+    @param      stream  A KATCP stream string
+
+    @return     A ContiguousIpRange object
+    """
+    stream = stream.lstrip("spead://")
+    ip_range, port = stream.split(":")
+    port = int(port)
+    try:
+        base_ip, ip_count = ip_range.split("+")
+        ip_count = int(ip_count)
+    except ValueError:
+        base_ip, ip_count = ip_range, 1
+    return ContiguousIpRange(base_ip, port, ip_count)
diff --git a/mpikat/katportalclient_wrapper.py b/mpikat/katportalclient_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..f563b96d84266290553ae7fed35c302375fd505d
--- /dev/null
+++ b/mpikat/katportalclient_wrapper.py
@@ -0,0 +1,83 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import logging
+from tornado.gen import coroutine, Return
+from katportalclient import KATPortalClient
+
+log = logging.getLogger('mpikat.katportalclient_wrapper')
+
+class KatportalClientWrapper(object):
+    def __init__(self, host, sub_nr=1):
+        self._client = KATPortalClient('http://{host}/api/client/{sub_nr}'.format(
+            host=host, sub_nr=sub_nr),
+            on_update_callback=None, logger=logging.getLogger('katcp'))
+
+    @coroutine
+    def _query(self, component, sensor):
+        sensor_name = yield self._client.sensor_subarray_lookup(
+            component=component, sensor=sensor, return_katcp_name=False)
+        sensor_sample = yield self._client.sensor_value(sensor_name,
+            include_value_ts=False)
+        raise Return(sensor_sample)
+
+    @coroutine
+    def get_observer_string(self, antenna):
+        sensor_sample = yield self._query(antenna, "observer")
+        raise Return(sensor_sample.value)
+
+    @coroutine
+    def get_antenna_feng_id_map(self, instrument_name, antennas):
+        sensor_sample = yield self._query('cbf', '{}.input-labelling'.format(instrument_name))
+        labels = eval(sensor_sample.value)
+        mapping = {}
+        for input_label, input_index, _, _ in labels:
+            antenna_name = input_label.strip("vh").lower()
+            if antenna_name.startswith("m") and antenna_name in antennas:
+                mapping[antenna_name] = input_index//2
+        print mapping
+        raise Return(mapping)
+
+    @coroutine
+    def get_bandwidth(self, stream):
+        sensor_sample = yield self._query('sub', 'streams.{}.bandwidth'.format(stream))
+        raise Return(sensor_sample.value)
+
+    @coroutine
+    def get_cfreq(self, stream):
+        sensor_sample = yield self._query('sub', 'streams.{}.centre-frequency'.format(stream))
+        raise Return(sensor_sample.value)
+
+    @coroutine
+    def get_sideband(self, stream):
+        sensor_sample = yield self._query('sub', 'streams.{}.sideband'.format(stream))
+        raise Return(sensor_sample.value)
+
+    @coroutine
+    def get_sync_epoch(self):
+        sensor_sample = yield self._query('sub', 'synchronisation-epoch')
+        raise Return(sensor_sample.value)
+
+    @coroutine
+    def get_itrf_reference(self):
+        sensor_sample = yield self._query('sub', 'array-position-itrf')
+        x, y, z = [float(i) for i in sensor_sample.value.split(",")]
+        raise Return((x, y, z))
\ No newline at end of file
diff --git a/mpikat/test/data/default_antenna.csv b/mpikat/test/data/default_antenna.csv
new file mode 100644
index 0000000000000000000000000000000000000000..6d5008e18bc5d2c238fd74704eb129cbfba72eeb
--- /dev/null
+++ b/mpikat/test/data/default_antenna.csv
@@ -0,0 +1,10 @@
+m007, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -89.5835 -402.7315 2.3675 5864.851 5864.965, 0:21:15.7 0 -0:00:41.8 0:01:56.1 0:00:30.5 -0:00:19.9 -0:23:44.9 -0:00:31.4, 1.22
+m008, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -93.523 -535.0255 3.0425 5875.701 5876.213, -0:03:22.5 0 -0:00:43.0 -0:01:01.6 0:00:32.9 -0:00:12.9 -0:12:18.4 0:01:03.5, 1.22
+m009, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 32.357 -371.054 2.7315 5851.041 5851.051, 2:46:15.8 0 -0:04:14.1 -0:09:28.6 -0:00:22.6 -0:00:17.7 -0:02:33.5 -0:01:07.4, 1.22
+m010, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 88.1005 -511.8735 3.7765 5880.976 5881.857, 0:26:59.5 0 0:01:26.1 -0:00:54.8 0:00:34.2 -0:00:35.0 -0:02:48.1 0:00:38.0, 1.22
+m011, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 84.0175 -352.08 2.7535 5859.067 5859.093, -1:57:25.9 0 0:00:03.9 0:02:53.3 0:00:28.1 -0:00:15.1 -0:06:51.8 0:01:50.1, 1.22
+m012, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 140.0245 -368.268 3.0505 5864.229 5864.229, -0:17:21.2 0 -0:01:49.7 -0:00:38.2 0:00:15.2 -0:00:08.5 -0:01:11.4 0:01:57.3, 1.22
+m013, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 236.7985 -393.4625 3.719 5863.826 5864.44, 0:40:27.9 0 -0:02:35.2 -0:04:58.5 0:00:13.0 0:00:19.2 -0:05:55.6 0:01:09.3, 1.22
+m014, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 280.676 -285.792 3.144 5868.151 5868.376, 0:51:40.6 0 -0:01:27.5 0:00:58.0 0:00:11.9 0:00:03.8 -0:02:31.1 0:01:55.5, 1.22
+m015, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 210.6505 -219.1425 2.342 5919.036 5919.155, -0:10:22.6 0 0:01:16.7 -0:00:51.5 0:00:28.6 -0:00:38.2 -0:10:57.2 -0:00:43.5, 1.22
+m016, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 288.168 -185.868 2.43 5808.71 5808.856, 0:13:42.0 0 -0:02:17.7 0:00:02.0 0:00:04.9 -0:00:12.4 -0:08:00.4 0:02:07.9, 1.22
\ No newline at end of file
diff --git a/mpikat/test/test_fbfuse_beam_manager.py b/mpikat/test/test_fbfuse_beam_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b3b45134f8d145f015427acb677f2c633ad6f4
--- /dev/null
+++ b/mpikat/test/test_fbfuse_beam_manager.py
@@ -0,0 +1,26 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+
+root_logger = logging.getLogger('')
+root_logger.setLevel(logging.CRITICAL)
\ No newline at end of file
diff --git a/mpikat/test/test_fbfuse_ca_server.py b/mpikat/test/test_fbfuse_ca_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b3b45134f8d145f015427acb677f2c633ad6f4
--- /dev/null
+++ b/mpikat/test/test_fbfuse_ca_server.py
@@ -0,0 +1,26 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+
+root_logger = logging.getLogger('')
+root_logger.setLevel(logging.CRITICAL)
\ No newline at end of file
diff --git a/mpikat/test/test_fbfuse_config.py b/mpikat/test/test_fbfuse_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..32116b5b2e81a114b7f9fcb033e53fced009ac24
--- /dev/null
+++ b/mpikat/test/test_fbfuse_config.py
@@ -0,0 +1,86 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import unittest
+import mock
+from mpikat.fbfuse_config import FbfConfigurationManager, MIN_NBEAMS, FbfConfigurationError
+
+NBEAMS_OVERFLOW_TOLERANCE = 0.05 # 5%
+
+def make_ip_pool_mock(nips):
+    ip_pool = mock.Mock()
+    ip_pool.largest_free_range.return_value = nips
+    return ip_pool
+
+def make_worker_pool_mock(nworkers):
+    worker_pool = mock.Mock()
+    worker_pool.navailable.return_value = nworkers
+    return worker_pool
+
+class TestFbfConfigurationManager(unittest.TestCase):
+    def _verify_configuration(self, cm, tscrunch, fscrunch, bandwidth, nbeams, nantennas, granularity):
+        max_allowable_nbeams = nbeams+NBEAMS_OVERFLOW_TOLERANCE*nbeams
+        if max_allowable_nbeams < MIN_NBEAMS:
+            max_allowable_nbeams = MIN_NBEAMS
+        min_allowable_nbeams = MIN_NBEAMS
+        config = cm.get_configuration(tscrunch, fscrunch, nbeams, nantennas, bandwidth, granularity)
+        self.assertTrue(config['num_beams'] <= max_allowable_nbeams, "Actual number of beams {}".format(config['num_beams']))
+        self.assertTrue(config['num_beams'] >= min_allowable_nbeams)
+        self.assertTrue(config['num_mcast_groups'] <= cm.nips)
+        self.assertTrue(config['num_workers_total'] <= cm.nworkers)
+        nbpmg = config['num_beams_per_mcast_group']
+        self.assertTrue((nbpmg%granularity==0) or (granularity%nbpmg==0))
+        nchans = cm._sanitise_user_nchans(int(bandwidth / cm.total_bandwidth * cm.total_nchans))
+        self.assertEqual(config['num_chans'], nchans)
+
+    def test_full_ranges(self):
+        cm = FbfConfigurationManager(64, 856e6, 4096, 64, 128)
+        bandwidths = [10e6,100e6,1000e6]
+        antennas = [1, 4, 13, 16, 26, 32, 33, 56, 64]
+        granularities = [1,2,5,6]
+        nbeams = [1,3,40,100,2000,100000]
+        for bandwidth in bandwidths:
+            for antenna in antennas:
+                for granularity in granularities:
+                    for nbeam in nbeams:
+                        self._verify_configuration(cm, 16, 1, bandwidth, nbeam, antenna, granularity)
+
+    def test_invalid_nantennas(self):
+        cm = FbfConfigurationManager(64, 856e6, 4096, 64, 128)
+        with self.assertRaises(FbfConfigurationError):
+            self._verify_configuration(cm, 16, 1, 856e6, 400, 32, 0)
+
+    def test_no_remaining_workers(self):
+        cm = FbfConfigurationManager(64, 856e6, 4096, 0, 128)
+        with self.assertRaises(FbfConfigurationError):
+            self._verify_configuration(cm, 16, 1, 856e6, 400, 32, 1)
+
+    def test_no_remaining_groups(self):
+        cm = FbfConfigurationManager(64, 856e6, 4096, 64, 0)
+        with self.assertRaises(FbfConfigurationError):
+            self._verify_configuration(cm, 16, 1, 856e6, 400, 32, 1)
+
+if __name__ == "__main__":
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
+    log = logging.getLogger('')
+    log.setLevel(logging.WARN)
+    unittest.main(buffer=True)
\ No newline at end of file
diff --git a/mpikat/test/test_fbfuse_delay_engine.py b/mpikat/test/test_fbfuse_delay_engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..dde72b063868c845c839fe3b8b9606853a49900b
--- /dev/null
+++ b/mpikat/test/test_fbfuse_delay_engine.py
@@ -0,0 +1,57 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import logging
+import os
+import unittest
+from tornado.testing import AsyncTestCase, gen_test
+from katpoint import Antenna, Target
+from mpikat import DelayEngine, BeamManager
+
+root_logger = logging.getLogger('')
+root_logger.setLevel(logging.CRITICAL)
+
+DEFAULT_ANTENNAS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'default_antenna.csv')
+with open(DEFAULT_ANTENNAS_FILE, "r") as f:
+    DEFAULT_ANTENNAS = f.read().strip().splitlines()
+KATPOINT_ANTENNAS = [Antenna(i) for i in DEFAULT_ANTENNAS]
+
+class TestDelayEngine(AsyncTestCase):
+    def setUp(self):
+        super(TestDelayEngine, self).setUp()
+
+    def tearDown(self):
+        super(TestDelayEngine, self).tearDown()
+
+    @gen_test
+    def test_delay_engine_startup(self):
+        bm = BeamManager(4, KATPOINT_ANTENNAS)
+        de = DelayEngine("127.0.0.1", 0, bm)
+        de.start()
+        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
+        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
+        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
+        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
+        de.update_delays()
+
+if __name__ == '__main__':
+    unittest.main(buffer=True)
diff --git a/mpikat/test/test_fbfuse.py b/mpikat/test/test_fbfuse_master_controller.py
similarity index 76%
rename from mpikat/test/test_fbfuse.py
rename to mpikat/test/test_fbfuse_master_controller.py
index e05b33bab759bb9f441186a5782a373bc54c6217..6d5dd887681e028fa348893d07ea1badb1d82fdd 100644
--- a/mpikat/test/test_fbfuse.py
+++ b/mpikat/test/test_fbfuse_master_controller.py
@@ -1,3 +1,25 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
 import unittest
 import mock
 import signal
@@ -6,6 +28,7 @@ import time
 import sys
 import importlib
 import re
+import ipaddress
 from urllib2 import urlopen, URLError
 from StringIO import StringIO
 from tornado.ioloop import IOLoop
@@ -14,21 +37,26 @@ from tornado.testing import AsyncTestCase, gen_test
 from katpoint import Antenna, Target
 from katcp import AsyncReply
 from katcp.testutils import mock_req, handle_mock_req
-from katportalclient import SensorNotFoundError, SensorLookupError
-from mpikat import fbfuse
-from mpikat.fbfuse import (FbfMasterController,
-                           FbfProductController,
-                           ProductLookupError,
-                           KatportalClientWrapper,
-                           FbfWorkerWrapper,
-                           BeamManager,
-                           DelayEngine)
+import mpikat
+from mpikat import (
+    FbfMasterController,
+    FbfProductController,
+    FbfWorkerWrapper
+    )
+from mpikat.katportalclient_wrapper import KatportalClientWrapper
 from mpikat.test.utils import MockFbfConfigurationAuthority
+from mpikat.ip_manager import ContiguousIpRange, ip_range_from_stream
 
 root_logger = logging.getLogger('')
 root_logger.setLevel(logging.CRITICAL)
 
-PORTAL = "monctl.devnmk.camlab.kat.ac.za"
+
+def type_converter(value):
+    try: return int(value)
+    except: pass
+    try: return float(value)
+    except: pass
+    return value
 
 class MockKatportalClientWrapper(mock.Mock):
     @coroutine
@@ -64,49 +92,6 @@ class MockKatportalClientWrapper(mock.Mock):
         raise Return((5109318.841, 2006836.367, -3238921.775))
 
 
-class TestKatPortalClientWrapper(AsyncTestCase):
-    PORTAL = "monctl.devnmk.camlab.kat.ac.za"
-    def setUp(self):
-        super(TestKatPortalClientWrapper, self).setUp()
-        try:
-            urlopen("http://{}".format(PORTAL))
-        except URLError:
-            raise unittest.SkipTest("No route to {}".format(PORTAL))
-        self.kpc = KatportalClientWrapper(PORTAL, sub_nr=1)
-
-    def tearDown(self):
-        super(TestKatPortalClientWrapper, self).tearDown()
-
-    @gen_test(timeout=10)
-    def test_katportalclient_wrapper(self):
-        value = yield self.kpc.get_observer_string('m001')
-        try:
-            Antenna(value)
-        except Exception as error:
-            self.fail("Could not convert antenna string to katpoint Antenna instance,"
-                " failed with error {}".format(str(error)))
-
-    @gen_test(timeout=10)
-    def test_katportalclient_wrapper_invalid_antenna(self):
-        try:
-            value = yield self.kpc.get_observer_string('IAmNotAValidAntennaName')
-        except SensorLookupError:
-            pass
-
-    @gen_test(timeout=10)
-    def test_katportalclient_wrapper_get_bandwidth(self):
-        value = yield self.kpc.get_bandwidth('i0.antenna-channelised-voltage')
-
-    @gen_test(timeout=10)
-    def test_katportalclient_wrapper_get_cfreq(self):
-        value = yield self.kpc.get_cfreq('i0.antenna-channelised-voltage')
-
-    @gen_test(timeout=10)
-    def test_katportalclient_wrapper_get_sideband(self):
-        value = yield self.kpc.get_sideband('i0.antenna-channelised-voltage')
-        self.assertIn(value, ['upper','lower'])
-
-
 class TestFbfMasterController(AsyncTestCase):
     DEFAULT_STREAMS = ('{"cam.http": {"camdata": "http://10.8.67.235/api/client/1"}, '
         '"cbf.antenna_channelised_voltage": {"i0.antenna-channelised-voltage": '
@@ -117,11 +102,17 @@ class TestFbfMasterController(AsyncTestCase):
     def setUp(self):
         super(TestFbfMasterController, self).setUp()
         self.server = FbfMasterController('127.0.0.1', 0, dummy=True)
+        self.server._katportal_wrapper_type = MockKatportalClientWrapper
         self.server.start()
 
     def tearDown(self):
         super(TestFbfMasterController, self).tearDown()
 
+    def _add_n_servers(self, n):
+        base_ip = ipaddress.ip_address(u'192.168.1.150')
+        for ii in range(n):
+            self.server._server_pool.add(str(base_ip+ii), 5000)
+
     @coroutine
     def _configure_helper(self, product_name, antennas, nchans, streams_json, proxy_name):
         #Patching isn't working here for some reason (maybe pathing?), the
@@ -129,21 +120,33 @@ class TestFbfMasterController(AsyncTestCase):
         #client. TODO: Fix the structure of the code so that this can be
         #patched properly
         #Test that a valid configure call goes through
-        fbfuse.KatportalClientWrapper = MockKatportalClientWrapper
+        #mpikat.KatportalClientWrapper = MockKatportalClientWrapper
         req = mock_req('configure', product_name, antennas, nchans, streams_json, proxy_name)
         reply,informs = yield handle_mock_req(self.server, req)
-        fbfuse.KatportalClientWrapper = KatportalClientWrapper
+        #mpikat.KatportalClientWrapper = KatportalClientWrapper
         raise Return((reply, informs))
 
     @coroutine
-    def _check_sensor_value(self, sensor_name, expected_value, expected_status='nominal'):
-        #Test that the products sensor has been updated
+    def _get_sensor_reading(self, sensor_name):
         req = mock_req('sensor-value', sensor_name)
         reply,informs = yield handle_mock_req(self.server, req)
         self.assertTrue(reply.reply_ok(), msg=reply)
         status, value = informs[0].arguments[-2:]
+        value = type_converter(value)
+        raise Return((status, value))
+
+    @coroutine
+    def _check_sensor_value(self, sensor_name, expected_value, expected_status='nominal', tolerance=None):
+        #Test that the products sensor has been updated
+        status, value = yield self._get_sensor_reading(sensor_name)
+        value = type_converter(value)
         self.assertEqual(status, expected_status)
-        self.assertEqual(value, expected_value)
+        if not tolerance:
+            self.assertEqual(value, expected_value)
+        else:
+            max_value = value + value*tolerance
+            min_value = value - value*tolerance
+            self.assertTrue((value<=max_value) and (value>=min_value))
 
     @coroutine
     def _check_sensor_exists(self, sensor_name):
@@ -258,7 +261,7 @@ class TestFbfMasterController(AsyncTestCase):
         hostname = '127.0.0.1'
         port = 10000
         yield self._send_request_expect_ok('register-worker-server', hostname, port)
-        server = self.server._server_pool.available()[0]
+        server = self.server._server_pool.available()[-1]
         self.assertEqual(server.hostname, hostname)
         self.assertEqual(server.port, port)
         other = FbfWorkerWrapper(hostname, port)
@@ -281,7 +284,7 @@ class TestFbfMasterController(AsyncTestCase):
 
     @gen_test
     def test_deregister_nonexistant_worker_server(self):
-        hostname, port = '127.0.0.1', 60000
+        hostname, port = '192.168.1.150', 60000
         yield self._send_request_expect_ok('deregister-worker-server', hostname, port)
 
     @gen_test
@@ -295,9 +298,9 @@ class TestFbfMasterController(AsyncTestCase):
             self.DEFAULT_NCHANS, self.DEFAULT_STREAMS, proxy_name)
         yield self._send_request_expect_ok('configure-coherent-beams', product_name, nbeams,
             self.DEFAULT_ANTENNAS, fscrunch, tscrunch)
-        yield self._check_sensor_value("{}.coherent-beam-count".format(product_name), str(nbeams))
-        yield self._check_sensor_value("{}.coherent-beam-tscrunch".format(product_name), str(tscrunch))
-        yield self._check_sensor_value("{}.coherent-beam-fscrunch".format(product_name), str(fscrunch))
+        yield self._check_sensor_value("{}.coherent-beam-count".format(product_name), nbeams)
+        yield self._check_sensor_value("{}.coherent-beam-tscrunch".format(product_name), tscrunch)
+        yield self._check_sensor_value("{}.coherent-beam-fscrunch".format(product_name), fscrunch)
         yield self._check_sensor_value("{}.coherent-beam-antennas".format(product_name), self.DEFAULT_ANTENNAS)
 
     @gen_test
@@ -310,8 +313,8 @@ class TestFbfMasterController(AsyncTestCase):
             self.DEFAULT_NCHANS, self.DEFAULT_STREAMS, proxy_name)
         yield self._send_request_expect_ok('configure-incoherent-beam', product_name,
             self.DEFAULT_ANTENNAS, fscrunch, tscrunch)
-        yield self._check_sensor_value("{}.incoherent-beam-tscrunch".format(product_name), str(tscrunch))
-        yield self._check_sensor_value("{}.incoherent-beam-fscrunch".format(product_name), str(fscrunch))
+        yield self._check_sensor_value("{}.incoherent-beam-tscrunch".format(product_name), tscrunch)
+        yield self._check_sensor_value("{}.incoherent-beam-fscrunch".format(product_name), fscrunch)
         yield self._check_sensor_value("{}.incoherent-beam-antennas".format(product_name), self.DEFAULT_ANTENNAS)
 
     @gen_test
@@ -367,16 +370,20 @@ class TestFbfMasterController(AsyncTestCase):
         hostname = "127.0.0.1"
         sb_id = "default_subarray"
         target = 'test_target,radec,12:00:00,01:00:00'
-        sb_config = {u'coherent-beams':
-                    {u'fscrunch': 2,
-                     u'nbeams': 100,
-                     u'tscrunch': 22},
-                  u'incoherent-beam':
-                    {u'fscrunch': 32,
-                     u'tscrunch': 4}}
+        sb_config = {
+            u'coherent-beams-nbeams':100,
+            u'coherent-beams-tscrunch':22,
+            u'coherent-beams-fscrunch':2,
+            u'coherent-beams-antennas':'m007',
+            u'coherent-beams-granularity':6,
+            u'incoherent-beam-tscrunch':16,
+            u'incoherent-beam-fscrunch':1,
+            u'incoherent-beam-antennas':'m008'
+            }
         ca_server = MockFbfConfigurationAuthority(hostname, 0)
         ca_server.start()
         ca_server.set_sb_config_return_value(proxy_name, sb_id, sb_config)
+        self._add_n_servers(64)
         port = ca_server.bind_address[1]
         yield self._send_request_expect_ok('configure', product_name, self.DEFAULT_ANTENNAS,
             self.DEFAULT_NCHANS, self.DEFAULT_STREAMS, proxy_name)
@@ -387,13 +394,22 @@ class TestFbfMasterController(AsyncTestCase):
             yield sleep(0.5)
             if product.ready: break
         # Here we need to check if the proxy sensors have been updated
-        yield self._check_sensor_value("{}.coherent-beam-count".format(product_name), str(sb_config['coherent-beams']['nbeams']))
-        yield self._check_sensor_value("{}.coherent-beam-tscrunch".format(product_name), str(sb_config['coherent-beams']['tscrunch']))
-        yield self._check_sensor_value("{}.coherent-beam-fscrunch".format(product_name), str(sb_config['coherent-beams']['fscrunch']))
-        yield self._check_sensor_value("{}.coherent-beam-antennas".format(product_name), self.DEFAULT_ANTENNAS)
-        yield self._check_sensor_value("{}.incoherent-beam-tscrunch".format(product_name), str(sb_config['incoherent-beam']['tscrunch']))
-        yield self._check_sensor_value("{}.incoherent-beam-fscrunch".format(product_name), str(sb_config['incoherent-beam']['fscrunch']))
-        yield self._check_sensor_value("{}.incoherent-beam-antennas".format(product_name), self.DEFAULT_ANTENNAS)
+        yield self._check_sensor_value("{}.coherent-beam-count".format(product_name), sb_config['coherent-beams-nbeams'], tolerance=0.05)
+        yield self._check_sensor_value("{}.coherent-beam-tscrunch".format(product_name), sb_config['coherent-beams-tscrunch'])
+        yield self._check_sensor_value("{}.coherent-beam-fscrunch".format(product_name), sb_config['coherent-beams-fscrunch'])
+        yield self._check_sensor_value("{}.coherent-beam-antennas".format(product_name), 'm007')
+        yield self._check_sensor_value("{}.incoherent-beam-tscrunch".format(product_name), sb_config['incoherent-beam-tscrunch'])
+        yield self._check_sensor_value("{}.incoherent-beam-fscrunch".format(product_name), sb_config['incoherent-beam-fscrunch'])
+        yield self._check_sensor_value("{}.incoherent-beam-antennas".format(product_name), 'm008')
+        expected_ibc_mcast_group = ContiguousIpRange(str(self.server._ip_pool._ip_range.base_ip),
+            self.server._ip_pool._ip_range.port, 1)
+        yield self._check_sensor_value("{}.incoherent-beam-multicast-group".format(product_name),
+            expected_ibc_mcast_group.format_katcp())
+        _, ngroups = yield self._get_sensor_reading("{}.coherent-beam-ngroups".format(product_name))
+        expected_cbc_mcast_groups = ContiguousIpRange(str(self.server._ip_pool._ip_range.base_ip+1),
+            self.server._ip_pool._ip_range.port, ngroups)
+        yield self._check_sensor_value("{}.coherent-beam-multicast-groups".format(product_name),
+            expected_cbc_mcast_groups.format_katcp())
         yield self._send_request_expect_ok('capture-start', product_name)
 
     @gen_test
@@ -409,6 +425,7 @@ class TestFbfMasterController(AsyncTestCase):
         ca_server.set_sb_config_return_value(proxy_name, sb_id, {})
         ca_server.set_target_config_return_value(proxy_name, targets[0], {'beams':targets})
         port = ca_server.bind_address[1]
+        self._add_n_servers(64)
         yield self._send_request_expect_ok('configure', product_name, self.DEFAULT_ANTENNAS,
             self.DEFAULT_NCHANS, self.DEFAULT_STREAMS, proxy_name)
         yield self._send_request_expect_ok('set-configuration-authority', product_name, hostname, port)
@@ -450,35 +467,6 @@ class TestFbfMasterController(AsyncTestCase):
             Target(targets[1]).format_katcp())
 
 
-class TestFbfDelayEngine(AsyncTestCase):
-    DEFAULT_ANTENNAS = ['m007, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -89.5835 -402.7315 2.3675 5864.851 5864.965, 0:21:15.7 0 -0:00:41.8 0:01:56.1 0:00:30.5 -0:00:19.9 -0:23:44.9 -0:00:31.4, 1.22',
-                        'm008, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -93.523 -535.0255 3.0425 5875.701 5876.213, -0:03:22.5 0 -0:00:43.0 -0:01:01.6 0:00:32.9 -0:00:12.9 -0:12:18.4 0:01:03.5, 1.22',
-                        'm009, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 32.357 -371.054 2.7315 5851.041 5851.051, 2:46:15.8 0 -0:04:14.1 -0:09:28.6 -0:00:22.6 -0:00:17.7 -0:02:33.5 -0:01:07.4, 1.22',
-                        'm010, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 88.1005 -511.8735 3.7765 5880.976 5881.857, 0:26:59.5 0 0:01:26.1 -0:00:54.8 0:00:34.2 -0:00:35.0 -0:02:48.1 0:00:38.0, 1.22',
-                        'm011, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 84.0175 -352.08 2.7535 5859.067 5859.093, -1:57:25.9 0 0:00:03.9 0:02:53.3 0:00:28.1 -0:00:15.1 -0:06:51.8 0:01:50.1, 1.22',
-                        'm012, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 140.0245 -368.268 3.0505 5864.229 5864.229, -0:17:21.2 0 -0:01:49.7 -0:00:38.2 0:00:15.2 -0:00:08.5 -0:01:11.4 0:01:57.3, 1.22',
-                        'm013, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 236.7985 -393.4625 3.719 5863.826 5864.44, 0:40:27.9 0 -0:02:35.2 -0:04:58.5 0:00:13.0 0:00:19.2 -0:05:55.6 0:01:09.3, 1.22',
-                        'm014, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 280.676 -285.792 3.144 5868.151 5868.376, 0:51:40.6 0 -0:01:27.5 0:00:58.0 0:00:11.9 0:00:03.8 -0:02:31.1 0:01:55.5, 1.22',
-                        'm015, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 210.6505 -219.1425 2.342 5919.036 5919.155, -0:10:22.6 0 0:01:16.7 -0:00:51.5 0:00:28.6 -0:00:38.2 -0:10:57.2 -0:00:43.5, 1.22',
-                        'm016, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 288.168 -185.868 2.43 5808.71 5808.856, 0:13:42.0 0 -0:02:17.7 0:00:02.0 0:00:04.9 -0:00:12.4 -0:08:00.4 0:02:07.9, 1.22']
-    KATPOINT_ANTENNAS = [Antenna(i) for i in DEFAULT_ANTENNAS]
-    def setUp(self):
-        super(TestFbfDelayEngine, self).setUp()
-
-    def tearDown(self):
-        super(TestFbfDelayEngine, self).tearDown()
-
-    @gen_test
-    def test_delay_engine_startup(self):
-        bm = BeamManager(4, self.KATPOINT_ANTENNAS)
-        de = DelayEngine("127.0.0.1", 0, bm)
-        de.start()
-        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
-        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
-        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
-        bm.add_beam(Target('test_target0,radec,12:00:00,01:00:00'))
-        de.update_delays()
-
 if __name__ == '__main__':
     unittest.main(buffer=True)
 
diff --git a/mpikat/test/test_katportalclient_wrapper.py b/mpikat/test/test_katportalclient_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc84d3959ba62ec1ec0af261bc8dcf04afa2c850
--- /dev/null
+++ b/mpikat/test/test_katportalclient_wrapper.py
@@ -0,0 +1,91 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import unittest
+import mock
+import logging
+import re
+from urllib2 import urlopen, URLError
+from StringIO import StringIO
+from tornado.gen import coroutine, Return
+from tornado.testing import AsyncTestCase, gen_test
+from katpoint import Antenna
+from katportalclient import SensorNotFoundError, SensorLookupError
+from mpikat.katportalclient_wrapper import KatportalClientWrapper
+
+root_logger = logging.getLogger('')
+root_logger.setLevel(logging.CRITICAL)
+
+PORTAL = "monctl.devnmk.camlab.kat.ac.za"
+
+class TestKatPortalClientWrapper(AsyncTestCase):
+    PORTAL = "monctl.devnmk.camlab.kat.ac.za"
+    def setUp(self):
+        super(TestKatPortalClientWrapper, self).setUp()
+        try:
+            urlopen("http://{}".format(PORTAL))
+        except URLError:
+            raise unittest.SkipTest("No route to {}".format(PORTAL))
+        self.kpc = KatportalClientWrapper(PORTAL, sub_nr=1)
+
+    def tearDown(self):
+        super(TestKatPortalClientWrapper, self).tearDown()
+
+    @gen_test(timeout=10)
+    def test_get_observer_string(self):
+        value = yield self.kpc.get_observer_string('m001')
+        try:
+            Antenna(value)
+        except Exception as error:
+            self.fail("Could not convert antenna string to katpoint Antenna instance,"
+                " failed with error {}".format(str(error)))
+
+    @gen_test(timeout=10)
+    def test_get_observer_string_invalid_antenna(self):
+        try:
+            value = yield self.kpc.get_observer_string('IAmNotAValidAntennaName')
+        except SensorLookupError:
+            pass
+
+    @gen_test(timeout=10)
+    def test_get_bandwidth(self):
+        value = yield self.kpc.get_bandwidth('i0.antenna-channelised-voltage')
+
+    @gen_test(timeout=10)
+    def test_get_cfreq(self):
+        value = yield self.kpc.get_cfreq('i0.antenna-channelised-voltage')
+
+    @gen_test(timeout=10)
+    def test_get_sideband(self):
+        value = yield self.kpc.get_sideband('i0.antenna-channelised-voltage')
+        self.assertIn(value, ['upper','lower'])
+
+    @gen_test(timeout=10)
+    def test_get_reference_itrf(self):
+        value = yield self.kpc.get_itrf_reference()
+
+    @gen_test(timeout=10)
+    def test_get_sync_epoch(self):
+        value = yield self.kpc.get_sync_epoch()
+
+if __name__ == '__main__':
+    unittest.main(buffer=True)
\ No newline at end of file
diff --git a/mpikat/test/utils.py b/mpikat/test/utils.py
index da51caa4d2e133b065177d14f7cb369b236015a1..a84e9b307a81909364850b5c1bd266b8ed8d0aa6 100644
--- a/mpikat/test/utils.py
+++ b/mpikat/test/utils.py
@@ -22,4 +22,5 @@ class MockFbfConfigurationAuthority(BaseFbfConfigurationAuthority):
 
     @coroutine
     def get_sb_config(self, proxy_id, sb_id):
-        raise Return(self.sb_return_values[(proxy_id, sb_id)])
\ No newline at end of file
+        raise Return(self.sb_return_values[(proxy_id, sb_id)])
+
diff --git a/mpikat/utils.py b/mpikat/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3db47547c55c7a39a9e927d7fedae7d862159498
--- /dev/null
+++ b/mpikat/utils.py
@@ -0,0 +1,47 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+class AntennaValidationError(Exception):
+    pass
+
+def is_power_of_two(n):
+    """
+    @brief  Test if number is a power of two
+
+    @return True|False
+    """
+    return n != 0 and ((n & (n - 1)) == 0)
+
+def next_power_of_two(n):
+    """
+    @brief  Round a number up to the next power of two
+    """
+    return 2**(n-1).bit_length()
+
+def parse_csv_antennas(antennas_csv):
+    antennas = antennas_csv.split(",")
+    nantennas = len(antennas)
+    if nantennas == 1 and antennas[0] == '':
+        raise AntennaValidationError("Provided antenna list was empty")
+    names = [antenna.strip() for antenna in antennas]
+    if len(names) != len(set(names)):
+        raise AntennaValidationError("Not all provided antennas were unqiue")
+    return names
\ No newline at end of file
diff --git a/mpikat/worker_pool.py b/mpikat/worker_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..575e56cdde3e117dfe025744c919f8e87be585cb
--- /dev/null
+++ b/mpikat/worker_pool.py
@@ -0,0 +1,135 @@
+"""
+Copyright (c) 2018 Ewan Barr <ebarr@mpifr-bonn.mpg.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+import logging
+from threading import Lock
+
+log = logging.getLogger('mpikat.worker_pool')
+lock = Lock()
+
+class WorkerAllocationError(Exception):
+    pass
+
+class WorkerDeallocationError(Exception):
+    pass
+
+class WorkerPool(object):
+    """Wrapper class for managing server
+    allocation and deallocation to subarray/products
+    """
+    def __init__(self):
+        """
+        @brief   Construct a new instance
+        """
+        self._servers = set()
+        self._allocated = set()
+
+    def make_wrapper(self, hostname, port):
+        raise NotImplemented
+
+    def add(self, hostname, port):
+        """
+        @brief  Add a new FbfWorkerServer to the server pool
+
+        @params hostname The hostname for the worker server
+        @params port     The port number that the worker server serves on
+        """
+        wrapper = self.make_wrapper(hostname,port)
+        if not wrapper in self._servers:
+            wrapper.start()
+            log.debug("Adding {} to server set".format(wrapper))
+            self._servers.add(wrapper)
+
+    def remove(self, hostname, port):
+        """
+        @brief  Add a new FbfWorkerServer to the server pool
+
+        @params hostname The hostname for the worker server
+        @params port     The port number that the worker server serves on
+        """
+        wrapper = self.make_wrapper(hostname,port)
+        if wrapper in self._allocated:
+            raise WorkerDeallocationError("Cannot remove allocated server from pool")
+        try:
+            self._servers.remove(wrapper)
+        except KeyError:
+            log.warning("Could not find {}:{} in server pool".format(hostname, port))
+
+    def allocate(self, count):
+        """
+        @brief    Allocate a number of servers from the pool.
+
+        @note     Free servers will be allocated by priority order
+                  with 0 being highest priority
+
+        @return   A list of FbfWorkerWrapper objects
+        """
+        with lock:
+            log.debug("Request to allocate {} servers".format(count))
+            available_servers = list(self._servers.difference(self._allocated))
+            log.debug("{} servers available".format(len(available_servers)))
+            available_servers.sort(key=lambda server: server.priority, reverse=True)
+            if len(available_servers) < count:
+                raise WorkerAllocationError("Cannot allocate {0} servers, only {1} available".format(
+                    count, len(available_servers)))
+            allocated_servers = []
+            for _ in range(count):
+                server = available_servers.pop()
+                log.debug("Allocating server: {}".format(server))
+                allocated_servers.append(server)
+                self._allocated.add(server)
+            return allocated_servers
+
+    def deallocate(self, servers):
+        """
+        @brief    Deallocate servers and return the to the pool.
+
+        @param    A list of Node objects
+        """
+        for server in servers:
+            log.debug("Deallocating server: {}".format(server))
+            self._allocated.remove(server)
+
+    def reset(self):
+        """
+        @brief   Deallocate all servers
+        """
+        log.debug("Reseting server pool allocations")
+        self._allocated = set()
+
+    def available(self):
+        """
+        @brief   Return list of available servers
+        """
+        return list(self._servers.difference(self._allocated))
+
+    def navailable(self):
+        return len(self.available())
+
+    def used(self):
+        """
+        @brief   Return list of allocated servers
+        """
+        return list(self._allocated)
+
+    def nused(self):
+        return len(self.used())
+