...
 
Commits (2)
00129207 <feng.shengqin@zte.com.cn>
Andreas Jaeger <aj@suse.com>
Arundhati Surpur <arundhati@nectechnologies.in>
Biwei Guo <biwei.guo@intel.com>
Coco <419546439@qq.com>
Doug Hellmann <doug@doughellmann.com>
Duong Ha-Quang <duonghq@vn.fujitsu.com>
Eric Fried <efried@us.ibm.com>
Ian Wienand <iwienand@redhat.com>
James E. Blair <jeblair@redhat.com>
Jinghan Sun <jinghan.sun@intel.com>
Li Liu <liliueecg@gmail.com>
Nguyen Hai <nguyentrihai93@gmail.com>
Nguyen Quang Huy <huynq@vn.fujitsu.com>
Nguyen Van Trung <trungnv@vn.fujitsu.com>
OpenStack Release Bot <infra-root@openstack.org>
Rushil Chugh <rushil.chugh@gmail.com>
Sean McGinnis <sean.mcginnis@gmail.com>
Shaohe Feng <shaohe.feng@intel.com>
Sumit Jamgade <sjamgade@suse.com>
Sundar Nadathur <ns1.sundar@gmail.com>
Sundar Nadathur <sundar.nadathur@intel.com>
Sunil <sunil.kumar.veerappa@huawei.com>
Toby Huang <tobyxdd@gmail.com>
Ubuntu <xin-ran.wang@intel.com>
Vu Cong Tuan <tuanvc@vn.fujitsu.com>
Xinran WANG <wxr930217@gmail.com>
Xinran WANG <xin-ran.wang@intel.com>
Yaguo Zhou <zhouyaguo@gmail.com>
Yongjun Bai <bai.yongjun@99cloud.net>
Yumeng Bao <yumeng_bao@yahoo.com>
Zhao Yi <zhaoyi@cmss.chinamobile.com>
Zuul <zuul@review.openstack.org>
chenke <chen.ke14@zte.com.cn>
coco-Gao <419546439@qq.com>
heluwei <heluwei@huawei.com>
jkilpatr <jkilpatr@redhat.com>
melissaml <ma.lei@99cloud.net>
nizam <abdul.nizamuddin@nectechnologies.in>
shangxiaobj <shangxiaobj@inspur.com>
wangxu <wangxu17@lenovo.com>
wangzh21 <wangzh21@lenovo.com>
whoami-rajat <rajatdhasmana@gmail.com>
wingwj <wingwj@gmail.com>
zhipengh <huangzhipeng@huawei.com>
zhouxinyong <zhouxinyong@inspur.com>
zhuli <zhuli27@huawei.com>
......@@ -11,7 +11,3 @@ submitted for review via the Gerrit tool:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/openstack-cyborg
......@@ -7,10 +7,11 @@ Before you commit your code run tox against your patch using the command.
tox .
If any of the tests fail correct the error and try again. If your code is valid Python
but not valid pep8 you may find autopep8 from pip useful.
If any of the tests fail correct the error and try again. If your code is valid
Python but not valid pep8 you may find autopep8 from pip useful.
Once you submit a patch integration tests will run and those may fail, -1'ing your patch
you can make a gerrit comment 'recheck ci' if you have reviewed the logs from the jobs
by clicking on the job name in gerrit and concluded that the failure was spurious or otherwise
not related to your patch. If problems persist contact people on #openstack-cyborg or #openstack-infra.
Once you submit a patch integration tests will run and those may fail,
-1'ing your patch you can make a gerrit comment 'recheck ci' if you have
reviewed the logs from the jobs by clicking on the job name in gerrit and
concluded that the failure was spurious or otherwise not related to your patch.
If problems persist contact people on #openstack-cyborg or #openstack-infra.
......@@ -19,7 +19,6 @@ Cyborg FPGA driver implementation.
from cyborg.accelerator.drivers.fpga import utils
#Added m2dc vendor in VENDOR_MAPS
VENDOR_MAPS = {"0x8086": "intel","0xXXXX": "m2dc"}
......@@ -27,7 +26,7 @@ class FPGADriver(object):
"""Base class for FPGA drivers.
This is just a virtual FPGA drivers interface.
Vedor should implement their specific drivers.
Vendor should implement their specific drivers.
"""
@classmethod
......
......@@ -46,7 +46,7 @@ class IntelFPGADriver(FPGADriver):
else:
bdf = sysinfo.get_bdf_by_path(path)
bdfs = sysinfo.split_bdf(bdf)
cmd = ["sudo", "fpgaconf"]
cmd = ["sudo", "/usr/bin/fpgaconf"]
for i in zip(["-b", "-d", "-f"], bdfs):
cmd.extend(i)
cmd.append(image)
......
......@@ -22,7 +22,10 @@ Cyborg Intel FPGA driver implementation.
import glob
import os
import re
from cyborg import objects
from cyborg.objects.driver_objects import driver_deployable, driver_device,\
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
SYS_FPGA = "/sys/class/fpga"
DEVICE = "device"
......@@ -32,11 +35,10 @@ BDF_PATTERN = re.compile(
"^[a-fA-F\d]{4}:[a-fA-F\d]{2}:[a-fA-F\d]{2}\.[a-fA-F\d]$")
DEVICE_FILE_MAP = {"vendor": "vendor_id",
"device": "product_id",
"sriov_numvfs": "pr_num"}
DEVICE_FILE_MAP = {"vendor": "vendor",
"device": "model"}
DEVICE_FILE_HANDLER = {}
DEVICE_EXPOSED = ["vendor", "device", "sriov_numvfs"]
DEVICE_EXPOSED = ["vendor", "device"]
def all_fpgas():
......@@ -49,7 +51,7 @@ def all_vf_fpgas():
glob.glob(os.path.join(SYS_FPGA, "*/device/physfn"))]
def all_pure_pf_fpgas():
def all_pfs_have_vf():
return [dev.rsplit("/", 2)[0] for dev in
glob.glob(os.path.join(SYS_FPGA, "*/device/virtfn0"))]
......@@ -131,32 +133,72 @@ def fpga_device(path):
def fpga_tree():
def gen_fpga_infos(path, vf=True):
name = os.path.basename(path)
dpath = os.path.realpath(os.path.join(path, DEVICE))
bdf = os.path.basename(dpath)
func = "vf" if vf else "pf"
pf_bdf = os.path.basename(
os.path.realpath(os.path.join(dpath, PF))) if vf else ""
fpga = {"path": path, "function": func,
"devices": bdf, "assignable": True,
"parent_devices": pf_bdf,
fpga = {"type": constants.DEVICE_FPGA,
"devices": bdf,
"name": name}
d_info = fpga_device(dpath)
fpga.update(d_info)
return fpga
devs = []
pure_pfs = all_pure_pf_fpgas()
pf_has_vf = all_pfs_have_vf()
for pf in all_pf_fpgas():
fpga = gen_fpga_infos(pf, False)
if pf in pure_pfs:
fpga["assignable"] = False
if pf in pf_has_vf:
fpga["regions"] = []
vfs = all_vfs_in_pf_fpgas(pf)
for vf in vfs:
vf_fpga = gen_fpga_infos(vf, True)
fpga["regions"].append(vf_fpga)
devs.append(fpga)
devs.append(_generate_driver_device(fpga, pf in pf_has_vf))
return devs
def _generate_driver_device(fpga, pf_has_vf):
driver_device_obj = driver_device.DriverDevice()
driver_device_obj.vendor = fpga["vendor"]
driver_device_obj.model = fpga["model"]
driver_device_obj.type = fpga["type"]
driver_device_obj.controlpath_id = _generate_controlpath_id(fpga)
driver_device_obj.deployable_list = _generate_dep_list(fpga, pf_has_vf)
return driver_device_obj
def _generate_controlpath_id(fpga):
driver_cpid = driver_controlpath_id.DriverControlPathID()
driver_cpid.cpid_type = "pci"
driver_cpid.cpid_info = fpga["devices"]
return driver_cpid
def _generate_dep_list(fpga, pf_has_vf):
dep_list = []
driver_dep = driver_deployable.DriverDeployable()
driver_dep.attach_handle_list = []
# pf without sriov enabled.
if not pf_has_vf:
driver_dep.num_accelerators = 1
driver_dep.attach_handle_list = \
[_generate_attach_handle(fpga, pf_has_vf)]
driver_dep.name = fpga["name"]
# pf with sriov enabled, may have several regions and several vfs.
# For now, there is only region, this maybe improve in next release.
else:
driver_dep.num_accelerators = len(fpga["regions"])
for vf in fpga["regions"]:
driver_dep.attach_handle_list.append(
_generate_attach_handle(vf, False))
driver_dep.name = vf["name"]
dep_list.append(driver_dep)
return dep_list
def _generate_attach_handle(fpga, pf_has_vf):
driver_ah = driver_attach_handle.DriverAttachHandle()
driver_ah.attach_type = "pci"
driver_ah.attach_info = fpga["devices"]
driver_ah.in_use = False
return driver_ah
from oslo_log import log as logging
import glob
import os
import re
import fcntl, socket, struct
import hashlib
from cyborg.objects.driver_objects import driver_deployable, driver_device,\
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
from oslo_versionedobjects import base as object_base
from cyborg.objects import fields as object_fields
from cyborg.objects import base
LOG = logging.getLogger(__name__)
DEVICE_TYPE = ["GPU", "FPGA"]
#Added by Rahul for m2dc data generation
@base.CyborgObjectRegistry.register
class m2dcDataGenerator(base.CyborgObject, object_base.VersionedObjectDictCompat):
fields = {
#'id': object_fields.IntegerField(nullable=False),
#'uuid': object_fields.UUIDField(nullable=False),
'type': object_fields.EnumField(valid_values=DEVICE_TYPE,
nullable=False),
'vendor': object_fields.StringField(nullable=False),
'model': object_fields.StringField(nullable=False),
'std_board_info': object_fields.StringField(nullable=True),
'vendor_board_info': object_fields.StringField(nullable=True),
'hostname': object_fields.StringField(nullable=False),
'attach_type': object_fields.StringField(nullable=False),
# PCI BDF or mediated device ID...
'attach_info': object_fields.StringField(nullable=False),
# The status of attach_handle, is in use or not.
'in_use': object_fields.BooleanField(nullable=False, default=False),
'cpid_type': object_fields.StringField(nullable=False),
# PCI BDF, PowerVM device, etc.
'cpid_info': object_fields.StringField(nullable=False),
'dep_name': object_fields.StringField(nullable=False),
}
\ No newline at end of file
......@@ -22,33 +22,56 @@ import os
import ast
from collections import Counter
from oslo_log import log as logging
import paramiko
import sys
from cyborg import objects
from cyborg.objects.device import Device
from cyborg.accelerator.drivers.fpga.base import FPGADriver
from cyborg.accelerator.drivers.fpga.m2dc import sysinfo
from cyborg import objects
LOG = logging.getLogger(__name__)
class M2DCFPGADriver(FPGADriver):
"""Base class for FPGA drivers.
This is just a virtual FPGA drivers interface.
Vedor should implement their specific drivers.
"""
VENDOR = "m2dc"
def __init__(self, *args, **kwargs):
pass
def discover(self):
return sysinfo.discover_boards()
def program(self, device_path, image):
cmd = ["sudo", "fpga"] #fpga should be replaced with Reflex CES fpga reprogramming API
cmd.append(image)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# FIXME Should log p.communicate(), p.stderr
p.wait()
return p.returncode
def program(self, device_id, image):
LOG.error("Device id is " + str(device_id))
LOG.error("Image location is " + str(image))
device_details = Device.get_by_id(self, device_id)
LOG.error("board ip is " + str(device_details.std_board_info))
SSH_USERNAME = "rahul"
SSH_COMMAND = "cat " + image
#paramiko.Transport._preferred_ciphers = ('aes256-ctr',)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_stdin = ssh_stdout = ssh_stderr = None
try:
ssh.connect(device_details.std_board_info, username=SSH_USERNAME)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(SSH_COMMAND)
except Exception as e:
sys.stderr.write("SSH connection error: {0}".format(e))
if ssh_stdout:
sys.stdout.write(ssh_stdout.read())
if ssh_stderr:
sys.stderr.write(ssh_stderr.read())
\ No newline at end of file
......@@ -16,64 +16,186 @@
"""
Cyborg M2DC FPGA driver implementation.
"""
from oslo_log import log as logging
import glob
import os
import os,sys
import json
import ast
import flask
import requests
import subprocess
import re
import fcntl, socket, struct
import hashlib
from requests import auth
import paramiko
from cyborg.objects.driver_objects import driver_deployable, driver_device,\
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
#Added for sending request to RECSMaster
RECS_URL="http://admin:admin@localhost:8080/redfish/v1/"
headers = {'Content-Type': 'application/json'}
list_fpga_id=[]
list_fpga_id1=["fpga1","fpga2"]
list_fpga_ip1 =["192.168.2.159","192.168.2.159"]
list_fpga_mac=[]
list_fpga_ip=[]
LOG = logging.getLogger(__name__)
def available_Systems():
listed_systems = []
r = requests.get(RECS_URL+"Systems", headers=headers)
json_data = json.loads(r.text)
j = ast.literal_eval(str(json_data['Members']))
for item in j:
sys_res = requests.get(RECS_URL+"Systems" + str(
str(str(str(item).replace("'", "")).split('u@odata.id: u'))[26:])[:-3], headers=headers)
listed_systems.append(str(str(str(str(item).replace("'", "")).split('u@odata.id: u'))[27:])[:-3])
return listed_systems
def checkFPGA(system_id):
response = requests.get(RECS_URL+'Systems/' + system_id, headers=headers,
auth=('admin', 'admin'))
json_data = json.loads(response.text)
if json_data["ProcessorSummary"]["Model"] == "Stratix 10 SX 2800":
response_eth = requests.get(RECS_URL + 'Systems/' + system_id + "/EthernetInterfaces/" + system_id + "_IF_Eth_0",
headers=headers, auth=('admin', 'admin'))
json_data_eth = json.loads(response_eth.text)
if json_data_eth["PermanentMACAddress"] not in list_fpga_mac:
list_fpga_mac.append(json_data_eth["PermanentMACAddress"])
return True
return False
def list_fpga():
available_systems_RECSmaster=[]
# First list down all the systems from RECS master
available_systems_RECSmaster = available_Systems()
# Check if any of the system is fpga or not
for system in available_systems_RECSmaster:
if checkFPGA(system):
if system not in list_fpga_id:
list_fpga_id.append(system)
def _generate_dep_list(item):
dep_list = []
driver_dep = driver_deployable.DriverDeployable()
driver_dep.attach_handle_list = []
driver_dep.num_accelerators = 2
driver_dep.attach_handle_list = \
[_generate_attach_handle(item)]
driver_dep.name = "test2"+item
dep_list.append(driver_dep)
return dep_list
def _generate_attach_handle(item):
driver_ah = driver_attach_handle.DriverAttachHandle()
driver_ah.attach_type = "PCI"
driver_ah.attach_info = "test2"+item
driver_ah.in_use = False
return driver_ah
def _generate_controlpath_id(item):
driver_cpid = driver_controlpath_id.DriverControlPathID()
driver_cpid.cpid_type = "PCI"
driver_cpid.cpid_info = "test_cyborg"+item
return driver_cpid
def _generate_driver_device(item,device_data):
driver_device_obj = driver_device.DriverDevice()
driver_device_obj.vendor = "stratix 10 "+device_data["vendorName"]
driver_device_obj.model = device_data["physicalName"]
driver_device_obj.std_board_info = device_data["deviceName"]
driver_device_obj.vendor_board_info = device_data["vendorName"]
driver_device_obj.type = constants.DEVICE_FPGA
driver_device_obj.controlpath_id = _generate_controlpath_id(item)
driver_device_obj.deployable_list = _generate_dep_list(item)
return driver_device_obj
def extractData(Data):
deviceDict={}
device_name = str(str(Data.split("\n")[2:3]).split("['")[1]).split("']")[0]
vendor_name = str(Data.split("\n")[7:8])[10:19]
phy_name = str(Data.split("\n")[11:12])[2:21]
deviceDict["deviceName"] = device_name
deviceDict["vendorName"] = vendor_name
deviceDict["physicalName"] = phy_name
return deviceDict
def discover_boards():
deviceDict = {}
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def extractMac()
hwMac = getHwAddr('eth0')
return hwMac
def hashMac(fpgaMac)
hash_object = hashlib.md5(fpgaMac)
hshMac=hash_object.hexdigest()
return hshMac
def addHashMac(fpgaMac)
deviceDict["identifier"]=hashMac(fpgaMac)
return devicedict
def extractData(self, out):
device_name=out.split("\n")[2:3]
vendor_name=str(out.split("\n")[7:8])[10:19]
phy_name=str(out.split("\n")[11:12])[2:21]
deviceDict["deviceName"]= device_name
deviceDict["vendorName"] = vendor_name
deviceDict["physicalName"] = phy_name
devs = []
device_data={}
#Added mechanism to extract IP addresses of the FPGA available in RECS board
list_fpga()
LOG.error("List of FPGA are "+ str(list_fpga_id))
LOG.error("List of FPGA mac address are " + str(list_fpga_mac))
#extract ip from the mac address
for mac_record in list_fpga_mac:
ip_fpga = subprocess.check_output(str("arp -n | grep -w -i \'88:a2:d7:1f:49:45\' | awk \'{print $1}\'"),
stdin=subprocess.PIPE, shell=True)
if ip_fpga.rstrip() not in list_fpga_ip:
list_fpga_ip.append(ip_fpga.rstrip())
LOG.error("List of FPGA IP address are " + str(list_fpga_ip))
#feed to the agent service ussing the extracted IP
for item, ip_item in zip(list_fpga_id1, list_fpga_ip1):
# This step is for test as connection to fpga is not established
Data = subprocess.check_output(str("cat /home/sunil/Desktop/Data.txt"),
stdin=subprocess.PIPE, shell=True)
#This line will be removed and put SSH connection to the IP as well as aocl diagnose command execution
#Following line should get added
#login = ["ssh -oCiphers=aes256-ctr root@192.168.1.155"] #Put IP here
#p_login = subprocess.Popen(login, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
#out, err = p_login.communicate()
#cmd = ["aocl diagonse"]
#p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# out, err = p.communicate()
# for ssh using ip
SSH_USERNAME = "rahul"
SSH_COMMAND = "cat board.txt"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_stdin = ssh_stdout = ssh_stderr = None
ip = "192.168.2.159"
try:
ssh.connect(ip, username=SSH_USERNAME)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(SSH_COMMAND)
except Exception as e:
sys.stderr.write("SSH connection error: {0}".format(e))
if ssh_stdout:
data_1 = str(ssh_stdout.read())
print("I am here : " + data_1)
ssh.close()
device_data = extractData(Data)
print("Hello ")
devs.append(_generate_driver_device(item,device_data))
return devs
return deviceDict
try:
#Login to the FPGA HPS and execute command to detect device
login = ["ssh -oCiphers=aes256-ctr root@192.168.1.155"]
p_login = subprocess.Popen(login, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out, err = p_login.communicate()
cmd = ["aocl diagonse"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
p_mac=subprocess.Popen(extractMac(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out_mac, err = p_mac.communicate()
except SystemError as e:
print "Error: can\'t able to connect FPGA"
LOG.error(e)
else:
print "Command executed in FPGA successfully"
dict_fpga=extractData(out)
dict_fpga=addHashMac(out_mac)
return dict_fpga
......
......@@ -20,7 +20,6 @@ Utils for FPGA driver.
import glob
import re
#Added m2dc vendor in VENDORS
VENDORS = ["intel","m2dc"] # can extend, such as ["intel", "xilinx"]
SYS_FPGA_PATH = "/sys/class/fpga"
......
......@@ -17,7 +17,7 @@
Cyborg Generic driver implementation.
"""
from modules import generic
from cyborg.accelerator.drivers.modules import generic
from oslo_config import cfg
from oslo_log import log
......
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyborg.accelerator.drivers.gpu.nvidia.driver import NVIDIAGPUDriver
import os
import glob
from oslo_log import log as logging
__import__('pkg_resources').declare_namespace(__name__)
__import__(".".join([__package__, 'base']))
LOG = logging.getLogger(__name__)
def load_gpu_vendor_driver():
files = glob.glob(os.path.join(os.path.dirname(__file__), "*/driver*"))
modules = set(map(lambda s: ".".join(s.rsplit(".")[0].rsplit("/", 2)[-2:]),
files))
for m in modules:
try:
__import__(".".join([__package__, m]))
LOG.debug("Successfully loaded GPU vendor driver: %s." % m)
except ImportError as e:
LOG.error("Failed to load GPU vendor driver: %s. Details: %s"
% (m, e))
load_gpu_vendor_driver()
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cyborg GPU driver implementation.
"""
from oslo_log import log as logging
from cyborg.accelerator.drivers.gpu import utils
LOG = logging.getLogger(__name__)
VENDOR_MAPS = {"10de": "nvidia", "102b": "matrox"}
class GPUDriver(object):
"""Base class for GPU drivers.
This is just a virtual GPU drivers interface.
Vendor should implement their specific drivers.
"""
@classmethod
def create(cls, vendor, *args, **kwargs):
for sclass in cls.__subclasses__():
vendor_name = VENDOR_MAPS.get(vendor, vendor)
if vendor_name == sclass.VENDOR:
return sclass(*args, **kwargs)
raise LookupError("Not find the GPU driver for vendor %s" % vendor)
def discover(self):
"""
Discover GPU information of current vendor(Identified by class).
:return: List of GPU information dict.
"""
raise NotImplementedError()
@classmethod
def discover_vendors(cls):
"""
Discover GPU vendors of current node.
:return: GPU vendor ID list.
"""
return utils.discover_vendors()
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cyborg NVIDIA GPU driver implementation.
"""
from cyborg.accelerator.drivers.gpu.base import GPUDriver
from cyborg.accelerator.drivers.gpu.nvidia import sysinfo
class NVIDIAGPUDriver(GPUDriver):
"""Base class for GPU drivers.
This is just a virtual GPU drivers interface.
Vendor should implement their specific drivers.
"""
VENDOR = "nvidia"
def discover(self):
return sysinfo.gpu_tree()
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cyborg NVIDIA GPU driver implementation.
"""
from cyborg.accelerator.drivers.gpu import utils
VENDOR_ID = "10de"
def gpu_tree():
devs = utils.discover_gpus(VENDOR_ID)
return devs
# Copyright 2018 Beijing Lenovo Software Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for GPU driver.
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import re
import subprocess
from cyborg.objects.driver_objects import driver_deployable, driver_device, \
driver_attach_handle, driver_controlpath_id
from cyborg.common import constants
LOG = logging.getLogger(__name__)
GPU_FLAGS = ["VGA compatible controller", "3D controller"]
GPU_INFO_PATTERN = re.compile("(?P<devices>[0-9]{4}:[0-9]{2}:[0-9]{2}\.[0-9]) "
"(?P<controller>.*) [\[].*]: (?P<name>.*) .*"
"[\[](?P<vendor_id>[0-9a-fA-F]"
"{4}):(?P<product_id>[0-9a-fA-F]{4})].*")
# NOTE(wangzhh): The implementation of current release doesn't support virtual
# GPU.
def discover_vendors():
cmd = "sudo lspci -nnn -D | grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
vendors = set()
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
vendor_id = m.groupdict().get("vendor_id")
vendors.add(vendor_id)
return vendors
def discover_gpus(vender_id=None):
cmd = "sudo lspci -nnn -D| grep -E '%s'"
cmd = cmd % "|".join(GPU_FLAGS)
if vender_id:
cmd = cmd + "| grep " + vender_id
# FIXME(wangzhh): Use oslo.privsep instead of subprocess here to prevent
# shell injection attacks.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
p.wait()
gpus = p.stdout.readlines()
gpu_list = []
for gpu in gpus:
m = GPU_INFO_PATTERN.match(gpu)
if m:
gpu_dict = m.groupdict()
gpu_list.append(_generate_driver_device(gpu_dict))
return gpu_list
def _generate_driver_device(gpu):
driver_device_obj = driver_device.DriverDevice()
driver_device_obj.vendor = gpu["vendor_id"]
driver_device_obj.model = gpu.get('model', 'miss model info')
std_board_info = {'product_id': gpu.get('product_id', None),
'controller': gpu.get('controller', None)}
driver_device_obj.std_board_info = jsonutils.dumps(std_board_info)
driver_device_obj.type = constants.DEVICE_GPU
driver_device_obj.controlpath_id = _generate_controlpath_id(gpu)
driver_device_obj.deployable_list = _generate_dep_list(gpu)
return driver_device_obj
def _generate_controlpath_id(gpu):
driver_cpid = driver_controlpath_id.DriverControlPathID()
driver_cpid.cpid_type = "PCI"
driver_cpid.cpid_info = gpu["devices"]
return driver_cpid
def _generate_dep_list(gpu):
dep_list = []
driver_dep = driver_deployable.DriverDeployable()
driver_dep.attach_handle_list = []
# NOTE(wangzhh): The name of deployable should be unique, its format is
# under disscussion, may looks like
# <ComputeNodeName>_<NumaNodeName>_<CyborgName>_<NumInHost>, now simply
# named <Device_name>_<Device_address>
driver_dep.name = gpu.get('name', '') + '_' + gpu["devices"]
driver_dep.num_accelerators = 1
driver_dep.attach_handle_list = \
[_generate_attach_handle(gpu)]
dep_list.append(driver_dep)
return dep_list
def _generate_attach_handle(gpu):
driver_ah = driver_attach_handle.DriverAttachHandle()
driver_ah.attach_type = "PCI"
driver_ah.in_use = False
driver_ah.attach_info = gpu["devices"]
return driver_ah
......@@ -13,8 +13,8 @@ from cyborg.accelerator import configuration
from cyborg.accelerator.common import exception
from cyborg.accelerator.drivers.spdk.util.pyspdk.py_spdk import PySPDK
from cyborg.common.i18n import _
from pyspdk.nvmf_client import NvmfTgt
from pyspdk.vhost_client import VhostTgt
from cyborg.accelerator.drivers.spdk.util.pyspdk.nvmf_client import NvmfTgt
from cyborg.accelerator.drivers.spdk.util.pyspdk.vhost_client import VhostTgt
LOG = logging.getLogger(__name__)
......
import json
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class NvmfTgt(object):
......@@ -20,12 +24,12 @@ class NvmfTgt(object):
def delete_bdev(self, name):
sub_args = [name]
res = self.py.exec_rpc('delete_bdev', '10.0.2.15', sub_args=sub_args)
print res
LOG.info(res)
def kill_instance(self, sig_name):
sub_args = [sig_name]
res = self.py.exec_rpc('kill_instance', '10.0.2.15', sub_args=sub_args)
print res
LOG.info(res)
def construct_aio_bdev(self, filename, name, block_size):
sub_args = [filename, name, str(block_size)]
......@@ -33,7 +37,7 @@ class NvmfTgt(object):
'construct_aio_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def construct_error_bdev(self, basename):
sub_args = [basename]
......@@ -41,7 +45,7 @@ class NvmfTgt(object):
'construct_error_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def construct_nvme_bdev(
self,
......@@ -84,7 +88,7 @@ class NvmfTgt(object):
'construct_malloc_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def delete_nvmf_subsystem(self, nqn):
sub_args = [nqn]
......@@ -92,7 +96,7 @@ class NvmfTgt(object):
'delete_nvmf_subsystem',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def construct_nvmf_subsystem(
self,
......@@ -106,7 +110,7 @@ class NvmfTgt(object):
'construct_nvmf_subsystem',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def get_nvmf_subsystems(self):
subsystems = self._get_json_objs(
......
......@@ -3,6 +3,10 @@ import re
import os
import subprocess
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class PySPDK(object):
......@@ -16,7 +20,7 @@ class PySPDK(object):
self.init_hugepages(spdk_dir)
server_dir = os.path.join(spdk_dir, 'app/')
file_dir = self._search_file(server_dir, server_name)
print file_dir
LOG.info(file_dir)
os.chdir(file_dir)
p = subprocess.Popen(
'sudo ./%s' % server_name,
......@@ -28,7 +32,7 @@ class PySPDK(object):
def init_hugepages(self, spdk_dir):
huge_dir = os.path.join(spdk_dir, 'scripts/')
file_dir = self._search_file(huge_dir, 'setup.sh')
print file_dir
LOG.info(file_dir)
os.chdir(file_dir)
p = subprocess.Popen(
'sudo ./setup.sh',
......@@ -52,8 +56,8 @@ class PySPDK(object):
self.pid = pinfo.get('pid')
return self.pid
except psutil.NoSuchProcess:
print "NoSuchProcess:%s" % self.pname
print "NoSuchProcess:%s" % self.pname
LOG.info("NoSuchProcess:%s" % self.pname)
LOG.info("NoSuchProcess:%s" % self.pname)
return self.pid
def is_alive(self):
......
import json
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class VhostTgt(object):
......@@ -49,12 +53,12 @@ class VhostTgt(object):
def delete_bdev(self, name):
sub_args = [name]
res = self.py.exec_rpc('delete_bdev', '127.0.0.1', sub_args=sub_args)
print res
LOG.info(res)
def kill_instance(self, sig_name):
sub_args = [sig_name]
res = self.py.exec_rpc('kill_instance', '127.0.0.1', sub_args=sub_args)
print res
LOG.info(res)
def construct_aio_bdev(self, filename, name, block_size):
sub_args = [filename, name, str(block_size)]
......@@ -62,7 +66,7 @@ class VhostTgt(object):
'construct_aio_bdev',
'127.0.0.1',
sub_args=sub_args)
print res
LOG.info(res)
def construct_error_bdev(self, basename):
sub_args = [basename]
......@@ -70,7 +74,7 @@ class VhostTgt(object):
'construct_error_bdev',
'127.0.0.1',
sub_args=sub_args)
print res
LOG.info(res)
def construct_nvme_bdev(
self,
......@@ -113,7 +117,7 @@ class VhostTgt(object):
'construct_malloc_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
LOG.info(res)
def _get_json_objs(self, method, server_ip):
res = self.py.exec_rpc(method, server_ip)
......
......@@ -23,8 +23,13 @@ from oslo_log import log as logging
from oslo_messaging.rpc.client import RemoteError
from oslo_utils import uuidutils
from stevedore import driver
from stevedore.extension import ExtensionManager
from cyborg.common import exception
from cyborg.conf import CONF
from cyborg.accelerator.drivers.fpga.base import FPGADriver
from cyborg.accelerator.drivers.fpga.m2dc import M2DCFPGADriver
from cyborg.accelerator.drivers.fpga.m2dc.driver import M2DCFPGADriver
from cyborg.common import utils
from cyborg import objects
......@@ -53,18 +58,41 @@ class AcceleratorTracker(object):
self.fpgas = None
self.host = host
self.conductor_api = cond_api
self.fpga_driver = M2DCFPGADriver()
self.acc_drivers = []
#self._initialize_drivers()
'''def _initialize_drivers(self, enabled_drivers_m2dc=[]):
"""
Load accelerator drivers.
:return: [nvidia_gpu_driver_obj, intel_fpga_driver_obj]
"""
LOG.error("control is here in acc tracker")
acc_drivers = []
if not enabled_drivers_m2dc:
enabled_drivers_m2dc = CONF.agent.enabled_drivers_m2dc
LOG.error(" Driver name from conf" + str(enabled_drivers_m2dc))
valid_drivers = ExtensionManager(
namespace='cyborg.accelerator.driver').names()
for d in enabled_drivers_m2dc:
if d not in valid_drivers:
raise exception.InvalidDriver(name=d)
acc_driver = driver.DriverManager(
namespace='cyborg.accelerator.driver', name=d,
invoke_on_load=True).driver
acc_drivers.append(acc_driver)
LOG.error("acc driver in resource " + str(acc_drivers))
self.acc_drivers = acc_drivers'''
@utils.synchronized(AGENT_RESOURCE_SEMAPHORE)
def claim(self, context):
pass
def _fpga_compare_and_update(self, host_dev, acclerator):
def _fpga_compare_and_update(self, host_dev, accelerator):
need_updated = False
for k, v in ACCELERATOR_HOST_MAPS.items():
if acclerator[k] != host_dev[v]:
if accelerator[k] != host_dev[v]:
need_updated = True
acclerator[k] = host_dev[v]
accelerator[k] = host_dev[v]
return need_updated
def _gen_accelerator_from_host_dev(self, host_dev):
......@@ -85,6 +113,21 @@ class AcceleratorTracker(object):
"""Update the resource usage and stats after a change in an
instance
"""
LOG.error("Hello in update usage")
acc_list = []
for acc_driver in self.acc_drivers:
acc_list.extend(acc_driver.discover())
# Call conductor_api here to diff and report acc data. Now, we actually
# do not have the method report_data.
if acc_list:
self.conductor_api.report_data(context, self.host, acc_list)
"""
def create_accelerator(fpgas,bdf):
fpga = fpgas[bdf]
dep = self._gen_accelerator_from_host_dev(fpga)
......@@ -119,10 +162,11 @@ class AcceleratorTracker(object):
except RemoteError as e:
LOG.error(e)
del accls[obsolete]
"""
def _get_fpga_devices(self):
available_fpgas=[]
available_fpgas = self.fpga_driver.discover()
return available_fpgas
......@@ -16,12 +16,23 @@
import oslo_messaging as messaging
from oslo_service import periodic_task
from stevedore import driver
from stevedore.extension import ExtensionManager
from cyborg.accelerator.drivers.fpga.base import FPGADriver
from cyborg.agent.resource_tracker import ResourceTracker
from cyborg.agent.accelerator_tracker import AcceleratorTracker
from cyborg.agent.rpcapi import AgentAPI
from cyborg.image.api import API as ImageAPI
from cyborg.conductor import rpcapi as cond_api
from cyborg.conf import CONF
from cyborg.common import exception
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class AgentManager(periodic_task.PeriodicTasks):
"""Cyborg Agent manager main class."""
......@@ -35,8 +46,9 @@ class AgentManager(periodic_task.PeriodicTasks):
self.host = host or CONF.host
self.fpga_driver = FPGADriver()
self.cond_api = cond_api.ConductorAPI()
self.agent_api = AgentAPI()
self.image_api = ImageAPI()
self._rt = ResourceTracker(host, self.cond_api)
self._at = AcceleratorTracker(host, self.cond_api)
def periodic_tasks(self, context, raise_on_error=False):
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
......@@ -45,15 +57,40 @@ class AgentManager(periodic_task.PeriodicTasks):
"""List installed hardware."""
pass
def fpga_program(self, context, accelerator, image):
def fpga_program(self, context, deployable_uuid, image_uuid):
""" Program a FPGA regoin, image can be a url or local file"""
# TODO (Shaohe Feng) Get image from glance.
# And add claim and rollback logical.
raise NotImplementedError()
path = self._download_bitstream(context, image_uuid)
dep = self.cond_api.deployable_get(context, deployable_uuid)
enabled_drivers = CONF.agent.enabled_drivers
valid_drivers = ExtensionManager(
namespace='cyborg.accelerator.driver').names()
for d in enabled_drivers:
if d not in valid_drivers:
raise exception.InvalidDriver(name=d)
acc_driver = driver.DriverManager(
namespace='cyborg.accelerator.driver', name=d,
invoke_on_load=True).driver
acc_driver.program(dep.device_id, path)
def _download_bitstream(self, context, bitstream_uuid):
"""download the bistream
:param context: the context
:param bistream_uuid: v4 uuid of the bitstream to reprogram
:returns: the path to bitstream downloaded, None if fail to download
"""
download_path = "/tmp/" + bitstream_uuid + ".bin"
self.image_api.download(context,
bitstream_uuid,
dest_path=download_path)
return download_path
@periodic_task.periodic_task(run_immediately=True)
def update_available_resource(self, context, startup=True):
"""update all kinds of accelerator resources from their drivers."""
self._rt.update_usage(context)
#Added accelerator update periodic service for automated updation in accelerator table
self._at.update_usage(context)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Standard Resource Class Fields."""
# NOTE(cdent): This is kept as its own independent file as it is used by
# both the placement and nova sides of the placement interaction. On the
# placement side we don't want to import all the nova fields, nor all the
# nova objects (which are automatically loaded and registered if the
# nova.objects package is imported).
import re
from oslo_versionedobjects import fields
class ResourceClass(fields.StringField):
"""Classes of resources provided to consumers."""
CUSTOM_NAMESPACE = 'CUSTOM_'
"""All non-standard resource classes must begin with this string."""
VCPU = 'VCPU'
MEMORY_MB = 'MEMORY_MB'
DISK_GB = 'DISK_GB'
PCI_DEVICE = 'PCI_DEVICE'
SRIOV_NET_VF = 'SRIOV_NET_VF'
NUMA_SOCKET = 'NUMA_SOCKET'
NUMA_CORE = 'NUMA_CORE'
NUMA_THREAD = 'NUMA_THREAD'
NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
IPV4_ADDRESS = 'IPV4_ADDRESS'
VGPU = 'VGPU'
VGPU_DISPLAY_HEAD = 'VGPU_DISPLAY_HEAD'
# The ordering here is relevant. If you must add a value, only
# append.
STANDARD = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF,
NUMA_SOCKET, NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB,
IPV4_ADDRESS, VGPU, VGPU_DISPLAY_HEAD)
# This is the set of standard resource classes that existed before
# we opened up for custom resource classes in version 1.1 of various
# objects in nova/objects/resource_provider.py
V1_0 = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF, NUMA_SOCKET,
NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB, IPV4_ADDRESS)
@classmethod
def normalize_name(cls, rc_name):
if rc_name is None:
return None
# Replace non-alphanumeric characters with underscores
norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
# Bug #1762789: Do .upper after replacing non alphanumerics.
norm_name = norm_name.upper()
norm_name = cls.CUSTOM_NAMESPACE + norm_name
return norm_name
class ResourceClassField(fields.AutoTypedField):
AUTO_TYPE = ResourceClass()
......@@ -20,28 +20,18 @@ model.
"""
from oslo_log import log as logging
from oslo_messaging.rpc.client import RemoteError
from oslo_utils import uuidutils
from stevedore import driver
from stevedore.extension import ExtensionManager
from cyborg.accelerator.drivers.fpga.base import FPGADriver
from cyborg.common import exception
from cyborg.common import utils
from cyborg import objects
from cyborg.conf import CONF
LOG = logging.getLogger(__name__)
AGENT_RESOURCE_SEMAPHORE = "agent_resources"
DEPLOYABLE_VERSION = "1.0"
# need to change the driver field name
DEPLOYABLE_HOST_MAPS = {"assignable": "assignable",
"pcie_address": "devices",
"board": "product_id",
"type": "function",
"vendor": "vendor_id",
"name": "name"}
class ResourceTracker(object):
"""Agent helper class for keeping track of resource usage as instances
......@@ -49,105 +39,38 @@ class ResourceTracker(object):
"""
def __init__(self, host, cond_api):
# FIXME (Shaohe) local cache for Accelerator.
# Will fix it in next release.
self.fpgas = None
self.host = host
self.conductor_api = cond_api
self.fpga_driver = FPGADriver()
@utils.synchronized(AGENT_RESOURCE_SEMAPHORE)
def claim(self, context):
pass
self.acc_drivers = []
self._initialize_drivers()
def _fpga_compare_and_update(self, host_dev, acclerator):
need_updated = False
for k, v in DEPLOYABLE_HOST_MAPS.items():
if acclerator[k] != host_dev[v]:
need_updated = True
acclerator[k] = host_dev[v]
return need_updated
def _gen_deployable_from_host_dev(self, host_dev):
dep = {}
for k, v in DEPLOYABLE_HOST_MAPS.items():
dep[k] = host_dev[v]
dep["host"] = self.host
dep["version"] = DEPLOYABLE_VERSION
dep["availability"] = "free"
dep["uuid"] = uuidutils.generate_uuid()
return dep
def _initialize_drivers(self, enabled_drivers=[]):
"""
Load accelerator drivers.
:return: [nvidia_gpu_driver_obj, intel_fpga_driver_obj]
"""
acc_drivers = []
if not enabled_drivers:
enabled_drivers = CONF.agent.enabled_drivers
valid_drivers = ExtensionManager(
namespace='cyborg.accelerator.driver').names()
for d in enabled_drivers:
if d not in valid_drivers:
raise exception.InvalidDriver(name=d)
acc_driver = driver.DriverManager(
namespace='cyborg.accelerator.driver', name=d,
invoke_on_load=True).driver
acc_drivers.append(acc_driver)
self.acc_drivers = acc_drivers
@utils.synchronized(AGENT_RESOURCE_SEMAPHORE)
def update_usage(self, context):
"""Update the resource usage and stats after a change in an
instance
"""Update the resource usage periodically.
"""
def create_deployable(fpgas, bdf, parent_uuid=None):
fpga = fpgas[bdf]
dep = self._gen_deployable_from_host_dev(fpga)
# if parent_uuid:
dep["parent_uuid"] = parent_uuid
obj_dep = objects.Deployable(context, **dep)
new_dep = self.conductor_api.deployable_create(context, obj_dep)
return new_dep
# NOTE(Shaohe Feng) need more agreement on how to keep consistency.
fpgas = self._get_fpga_devices()
bdfs = set(fpgas.keys())
deployables = self.conductor_api.deployable_get_by_host(
context, self.host)
# NOTE(Shaohe Feng) when no "pcie_address" in deployable?
accls = dict([(v["pcie_address"], v) for v in deployables])
accl_bdfs = set(accls.keys())
# Firstly update
for mutual in accl_bdfs & bdfs:
accl = accls[mutual]
if self._fpga_compare_and_update(fpgas[mutual], accl):
try:
self.conductor_api.deployable_update(context, accl)
except RemoteError as e:
LOG.error(e)
# Add
new = bdfs - accl_bdfs
new_pf = set([n for n in new if fpgas[n]["function"] == "pf"])
for n in new_pf:
new_dep = create_deployable(fpgas, n)
accls[n] = new_dep
sub_vf = set()
if "regions" in n:
sub_vf = set([sub["devices"] for sub in fpgas[n]["regions"]])
for vf in sub_vf & new:
new_dep = create_deployable(fpgas, vf, new_dep["uuid"])
accls[vf] = new_dep
new.remove(vf)
for n in new - new_pf:
p_bdf = fpgas[n]["parent_devices"]
p_accl = accls[p_bdf]
p_uuid = p_accl["uuid"]
new_dep = create_deployable(fpgas, n, p_uuid)
# Delete
for obsolete in accl_bdfs - bdfs:
try:
self.conductor_api.deployable_delete(context, accls[obsolete])
except RemoteError as e:
LOG.error(e)
del accls[obsolete]
def _get_fpga_devices(self):
def form_dict(devices, fpgas):
for v in devices:
fpgas[v["devices"]] = v
if "regions" in v:
form_dict(v["regions"], fpgas)
fpgas = {}
vendors = self.fpga_driver.discover_vendors()
for v in vendors:
driver = self.fpga_driver.create(v)
form_dict(driver.discover(), fpgas)
return fpgas
acc_list = []
for acc_driver in self.acc_drivers:
acc_list.extend(acc_driver.discover())
# Call conductor_api here to diff and report acc data. Now, we actually
# do not have the method report_data.