From 2e2ac6014c12eeafb77e1cc57d01597f8b35b2b3 Mon Sep 17 00:00:00 2001 From: zhangshengping Date: Tue, 24 Sep 2019 07:17:59 -0700 Subject: [PATCH] create and delete snat for provider feature --- .../services/f5/f5-openstack-agent.ini | 14 + .../lbaasv2/drivers/bigip/agent_manager.py | 12 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 17 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 50 ++- .../lbaasv2/drivers/bigip/network_service.py | 20 +- .../lbaasv2/drivers/bigip/service_adapter.py | 24 +- .../lbaasv2/drivers/bigip/snats.py | 334 +++++++++++++++++- .../drivers/bigip/test/test_lbaas_builder.py | 36 ++ .../bigip/test/test_service_adapter.py | 7 +- 9 files changed, 484 insertions(+), 30 deletions(-) diff --git a/etc/neutron/services/f5/f5-openstack-agent.ini b/etc/neutron/services/f5/f5-openstack-agent.ini index 5c8e02321..8f3a9e57b 100644 --- a/etc/neutron/services/f5/f5-openstack-agent.ini +++ b/etc/neutron/services/f5/f5-openstack-agent.ini @@ -433,6 +433,20 @@ f5_snat_mode = True # f5_global_routed_mode = True. # f5_snat_addresses_per_subnet = 1 + +# f5_snat_per_provider set True to enable snat pool +# in Bigip for each neutron provider. +# f5_snat_per_provider set False to enable snat pool +# in Bigip for each neutron subnet. +f5_snat_per_provider = False + +# f5_snat_addresses_per_provider set the number of +# members (IP address) in the snat pool for each provider, +# only when f5_snat_per_provider is enabled. +# This setting will be forced to 0 (zero) if +# f5_global_routed_mode = True. +f5_snat_addresses_per_provider = 1 + # # This setting will cause all networks to be # defined under the common partition on the diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 636453a24..8f6b70f9a 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -78,6 +78,16 @@ default=True, help=('use SNATs, not direct routed mode') ), + cfg.BoolOpt( + 'f5_snat_per_provider', + default=False, + help=('use SNATs, not direct routed mode') + ), + cfg.IntOpt( + 'f5_snat_addresses_per_provider', + default=1, + help=('Interface and VLAN for the VTEP overlay network') + ), cfg.IntOpt( 'f5_snat_addresses_per_subnet', default=1, @@ -142,7 +152,7 @@ ) ] -PERIODIC_TASK_INTERVAL = 10 +PERIODIC_TASK_INTERVAL = 30 class LogicalServiceCache(object): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index b06bd76f2..d9ed72d8c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -499,21 +499,24 @@ def _init_bigip_managers(self): self.tenant_manager = BigipTenantManager(self.conf, self) self.cluster_manager = ClusterManager() self.system_helper = SystemHelper() - self.lbaas_builder = LBaaSBuilder(self.conf, self) - - # Set esd_processor object as soon as ServiceModelAdapter and - # LBaaSBuilder class instantiated, otherwise manager RPC exception - # will break setting esd_porcessor procedure. - self.init_esd() if self.conf.f5_global_routed_mode: self.network_builder = None + self.lbaas_builder = LBaaSBuilder(self.conf, self) else: self.network_builder = NetworkServiceBuilder( self.conf.f5_global_routed_mode, self.conf, self, self.l3_binding) + snat_manager = self.network_builder.bigip_snat_manager + self.lbaas_builder = LBaaSBuilder(self.conf, self, + snat_manager=snat_manager) + + # Set esd_processor object as soon as ServiceModelAdapter and + # LBaaSBuilder class instantiated, otherwise manager RPC exception + # will break setting esd_porcessor procedure. + self.init_esd() def _init_bigip_hostnames(self): # Validate and parse bigip credentials @@ -771,6 +774,7 @@ def _init_bigip(self, bigip, hostname, check_group_name=None): self.system_helper.get_interface_macaddresses_dict(bigip) bigip.assured_networks = {} bigip.assured_tenant_snat_subnets = {} + bigip.assured_tenant_snat_providers = {} bigip.assured_gateway_subnets = [] if self.conf.f5_ha_type != 'standalone': @@ -1104,6 +1108,7 @@ def flush_cache(self): for bigip in self.get_all_bigips(): bigip.assured_networks = {} bigip.assured_tenant_snat_subnets = {} + bigip.assured_tenant_snat_providers = {} bigip.assured_gateway_subnets = [] @serialized('get_all_deployed_loadbalancers') diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 1a21845c0..7251f1fa8 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -19,12 +19,14 @@ from oslo_log import log as logging from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 +from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_service import \ LbaasServiceObject from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address + from requests import HTTPError LOG = logging.getLogger(__name__) @@ -32,12 +34,13 @@ class LBaaSBuilder(object): # F5 LBaaS Driver using iControl for BIG-IP to - # create objects (vips, pools) - not using an iApp.""" + # create objects (vips, pools) - not using an iApp. - def __init__(self, conf, driver, l2_service=None): + def __init__(self, conf, driver, l2_service=None, snat_manager=None): self.conf = conf self.driver = driver self.l2_service = l2_service + self.bigip_snat_manager = snat_manager self.service_adapter = driver.service_adapter self.listener_builder = listener_service.ListenerServiceBuilder( self.service_adapter, @@ -182,6 +185,15 @@ def _assure_listeners_created(self, service): for listener in listeners: error = False if self._is_not_pending_delete(listener): + # create and set snat for neutron provider here, + # since the neturon providers can be perceived + # in loadbalancer dict. + if (self.conf.f5_snat_mode and + self.conf.f5_snat_addresses_per_provider + and self.conf.f5_snat_addresses_per_provider > 0): + self._assure_provider_snats(bigips, + service, + loadbalancer) svc = {"loadbalancer": loadbalancer, "listener": listener, @@ -204,6 +216,40 @@ def _assure_listeners_created(self, service): if listener['admin_state_up']: listener['operating_status'] = constants_v2.F5_ONLINE + def _assure_provider_snats(self, bigips, service, loadbalancer): + # snats_per_provider = self.conf.f5_snat_addresss_per_provider + snats_per_provider = self.conf.f5_snat_addresses_per_subnet + provider_name = loadbalancer["provider"] + lb_id = loadbalancer["id"] + tenant_id = loadbalancer["tenant_id"] + subnet_id = loadbalancer["vip_subnet_id"] + subnet = service["subnets"][subnet_id] + network_id = loadbalancer["network_id"] + network = service["networks"][network_id] + + bigips = [bigip for bigip in bigips + if tenant_id not in bigip.assured_tenant_snat_providers or + subnet_id not in bigip.assured_tenant_snat_providers[ + tenant_id] or provider_name not in + bigip.assured_tenant_snat_providers[tenant_id][subnet_id]] + + if bigips and self.bigip_snat_manager: + snat_addrs = self.bigip_snat_manager.get_provider_snat_addrs( + provider_name, lb_id, tenant_id, subnet_id, snats_per_provider + ) + + if len(snat_addrs) != snats_per_provider: + raise f5_ex.SNATCreationException( + "Unable to satisfy request to allocate %d " + "snats. Actual SNAT count: %d SNATs" % + (snats_per_provider, len(snat_addrs))) + + for bigip in bigips: + self.bigip_snat_manager.assure_provider_bigip_snats( + bigip, provider_name, snat_addrs, tenant_id, network, + subnet + ) + def _assure_pools_created(self, service): if "pools" not in service: return diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 5a0f6e9b9..2289c43ad 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -44,6 +44,7 @@ def __init__(self, f5_global_routed_mode, conf, driver, l3_binding=None): self.bigip_selfip_manager = BigipSelfIpManager( self.driver, self.l2_service, self.driver.l3_binding) + self.bigip_snat_manager = BigipSnatManager( self.driver, self.l2_service, self.driver.l3_binding) @@ -55,7 +56,7 @@ def __init__(self, f5_global_routed_mode, conf, driver, l3_binding=None): self.service_adapter = self.driver.service_adapter def post_init(self): - # Run and Post Initialization Tasks """ + # Run and Post Initialization Tasks # run any post initialized tasks, now that the agent # is fully connected self.l2_service.post_init() @@ -64,11 +65,11 @@ def tunnel_sync(self, tunnel_ips): self.l2_service.tunnel_sync(tunnel_ips) def set_tunnel_rpc(self, tunnel_rpc): - # Provide FDB Connector with ML2 RPC access """ + # Provide FDB Connector with ML2 RPC access self.l2_service.set_tunnel_rpc(tunnel_rpc) def set_l2pop_rpc(self, l2pop_rpc): - # Provide FDB Connector with ML2 RPC access """ + # Provide FDB Connector with ML2 RPC access self.l2_service.set_l2pop_rpc(l2pop_rpc) def initialize_vcmp(self): @@ -207,7 +208,8 @@ def prep_service_networking(self, service, traffic_group): LOG.debug("Getting subnetinfo for ...") LOG.debug(assure_bigips) for subnetinfo in subnetsinfo: - if self.conf.f5_snat_addresses_per_subnet > 0: + if (self.conf.f5_snat_addresses_per_subnet > 0 and + not self.conf.f5_snat_per_provider): self._assure_subnet_snats(assure_bigips, service, subnetinfo) if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode: @@ -602,6 +604,7 @@ def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): "Unable to satisfy request to allocate %d " "snats. Actual SNAT count: %d SNATs" % (snats_per_subnet, len(snat_addrs))) + for assure_bigip in assure_bigips: self.bigip_snat_manager.assure_bigip_snats( assure_bigip, subnetinfo, snat_addrs, tenant_id) @@ -735,6 +738,7 @@ def update_bigip_l2(self, service): def _assure_delete_nets_shared(self, bigip, service, subnet_hints): # Assure shared configuration (which syncs) is deleted + deleted_names = set() tenant_id = service['loadbalancer']['tenant_id'] @@ -746,6 +750,8 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): if not self.conf.f5_snat_mode: gw_name = delete_gateway(bigip, subnetinfo) deleted_names.add(gw_name) + # if the a subnet has no related resource on the bigip, + # then the snat pool will be deleted my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( bigip, subnetinfo, tenant_id) @@ -863,6 +869,10 @@ def _get_subnets_to_delete(self, bigip, service, subnet_hints): def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain): # Does the big-ip have any IP addresses on this subnet? + # if the loadbalancer is pending_delete status + # the F5 agent will check the subnet used by the loadbalancer for + # deleting. if any vip related or memeber ip within the subnet, + # then this function return False LOG.debug("_ips_exist_on_subnet entry %s rd %s" % (str(subnet['cidr']), route_domain)) route_domain = str(route_domain) @@ -892,7 +902,7 @@ def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain): return True # If there aren't any virtual addresses, are there - # node addresses on this subnet? + # node (member) addresses on this subnet? nodes = self.network_helper.get_node_addresses( bigip, partition=folder diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 408345c36..8ebaea242 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -75,9 +75,6 @@ def get_pool(self, service): def snat_mode(self): return self.conf.f5_snat_mode - def snat_count(self): - return self.conf.f5_snat_addresses_per_subnet - def vip_on_common_network(self, service): loadbalancer = service.get('loadbalancer', {}) network_id = loadbalancer.get('network_id', "") @@ -112,11 +109,22 @@ def get_virtual(self, service): listener = service["listener"] loadbalancer = service["loadbalancer"] - listener["use_snat"] = self.snat_mode() and not listener.get( + # set snat pool for listener + # when use transparent for CMCC, the use_snat must be True in + # in f5-openstack-agent.ini configuration file + listener["use_snat"] = self.conf.f5_snat_mode and not listener.get( "transparent") - if listener["use_snat"] and self.snat_count() > 0: - listener["snat_pool_name"] = self.get_folder_name( - loadbalancer["tenant_id"]) + if listener["use_snat"]: + if self.conf.f5_snat_per_provider: + if self.conf.f5_snat_addresses_per_subnet > 0: + folder_name = self.get_folder_name( + loadbalancer["tenant_id"]) + prvd_name = loadbalancer["provider"] + listener["snat_pool_name"] = folder_name + '_' + prvd_name + else: + if self.conf.f5_snat_addresses_per_subnet > 0: + listener["snat_pool_name"] = self.get_folder_name( + loadbalancer["tenant_id"]) pool = self.get_vip_default_pool(service) @@ -595,7 +603,7 @@ def get_vlan(self, vip, bigip, network_id): def _add_vlan_and_snat(self, listener, vip): - # snat + # set snat mode (automap/sant pool) to listener if "use_snat" in listener and listener["use_snat"]: vip['sourceAddressTranslation'] = {} if "snat_pool_name" in listener: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index e4ecf300c..1098c22a1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -52,8 +52,24 @@ def _get_snat_name(self, subnet, tenant_id): LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' + def _get_provider_snat_name(self, subnet_id, + tenant_id, provider): + # Get the snat name based on HA type + if self.driver.conf.f5_ha_type == 'standalone': + return 'snat-traffic-group-local-only-' + subnet_id + '_' + \ + provider + elif self.driver.conf.f5_ha_type == 'pair': + return 'snat-traffic-group-1-' + subnet_id + '_' + provider + elif self.driver.conf.f5_ha_type == 'scalen': + traffic_group = self.driver.tenant_to_traffic_group(tenant_id) + base_traffic_group = os.path.basename(traffic_group) + return 'snat-' + base_traffic_group + '-' + subnet_id + '_' + \ + provider + LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) + return '' + def _get_snat_traffic_group(self, tenant_id): - # Get the snat name based on HA type """ + # Get the snat name based on HA type if self.driver.conf.f5_ha_type == 'standalone': return 'traffic-group-local-only' elif self.driver.conf.f5_ha_type == 'pair': @@ -65,8 +81,50 @@ def _get_snat_traffic_group(self, tenant_id): LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' + def get_provider_snat_addrs(self, provider_name, lb_id, + tenant_id, subnet_id, snats_per_provider): + if self.driver.conf.unlegacy_setting_placeholder: + LOG.debug('setting vnic_type to normal instead of baremetal') + vnic_type = "normal" + else: + vnic_type = "baremetal" + + snat_name = self._get_provider_snat_name(subnet_id, tenant_id, + provider_name) + snat_addrs = [] + + for i in range(snats_per_provider): + ip_address = "" + index_snat_name = snat_name + "_" + str(i) + ports = self.driver.plugin_rpc.get_port_by_name( + port_name=index_snat_name) + if len(ports) > 0: + first_port = ports[0] + first_fixed_ip = first_port['fixed_ips'][0] + ip_address = first_fixed_ip['ip_address'] + else: + new_port = self.driver.plugin_rpc.create_port_on_subnet( + subnet_id=subnet_id, + mac_address=None, + name=index_snat_name, + fixed_address_count=1, device_id=lb_id, + vnic_type=vnic_type + ) + if new_port is not None: + ip_address = new_port['fixed_ips'][0]['ip_address'] + + # Push the IP address on the list if the port was acquired. + if len(ip_address) > 0: + snat_addrs.append(ip_address) + else: + LOG.error("get_provider_snat_addrs: " + "failed to allocate port for " + "SNAT address.") + + return snat_addrs + def get_snat_addrs(self, subnetinfo, tenant_id, snat_count, lb_id): - # Get the ip addresses for snat """ + # Get the ip addresses for snat if self.driver.conf.unlegacy_setting_placeholder: LOG.debug('setting vnic_type to normal instead of baremetal') vnic_type = "normal" @@ -106,10 +164,42 @@ def get_snat_addrs(self, subnetinfo, tenant_id, snat_count, lb_id): return snat_addrs + def assure_provider_bigip_snats(self, bigip, provider_name, + snat_addrs, tenant_id, network, + subnet): + # Ensure Snat Addresses are configured on a bigip. + # Called for every bigip only in replication mode. + # otherwise called once and synced.(with provider + # name) + + snat_info = {} + if self.l2_service.is_common_network(network): + snat_info['network_folder'] = 'Common' + else: + snat_info['network_folder'] = ( + self.driver.service_adapter.get_folder_name(tenant_id) + ) + + # configure listener with this pool name + pool_name = self.driver.service_adapter.get_folder_name( + tenant_id) + '_' + provider_name + snat_info['pool_name'] = pool_name + + snat_info['pool_folder'] = self.driver.service_adapter.get_folder_name( + tenant_id + ) + + snat_info['addrs'] = snat_addrs + + self._assure_provider_bigip_snats(bigip, provider_name, network, + subnet, snat_info, tenant_id) + def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id): # Ensure Snat Addresses are configured on a bigip. # Called for every bigip only in replication mode. - # otherwise called once and synced. + # otherwise called once and synced. (with subnet + # name) + network = subnetinfo['network'] snat_info = {} @@ -130,6 +220,90 @@ def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id): self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id) + def _assure_provider_bigip_snats(self, bigip, provider_name, network, + subnet, snat_info, tenant_id): + # Configure the ip addresses for snat + + # make sure cached again + subnet_id = subnet.get('id') + if tenant_id not in bigip.assured_tenant_snat_providers or \ + subnet_id not in bigip.assured_tenant_snat_providers[ + tenant_id]: + bigip.assured_tenant_snat_providers[tenant_id] = {subnet_id: []} + if provider_name in bigip.assured_tenant_snat_providers[ + tenant_id][subnet_id]: + return + + snat_name = self._get_provider_snat_name(subnet.get('id'), tenant_id, + provider_name) + for i, snat_address in enumerate(snat_info['addrs']): + ip_address = snat_address + \ + '%' + str(network['route_domain_id']) + index_snat_name = snat_name + '_' + str(i) + + snat_traffic_group = self._get_snat_traffic_group(tenant_id) + # snat.create() did the following in LBaaSv1 + # Creates the SNAT + # * if the traffic_group is empty it uses a const + # but this seems like it should be an error see message + # in this file about this + # Create a SNAT Pool if a name was passed in + # * Add the snat to the list of members + snat_translation_model = { + "name": index_snat_name, + "partition": snat_info['network_folder'], + "address": ip_address, + "trafficGroup": snat_traffic_group + } + try: + if not self.snat_translation_manager.exists( + bigip, + name=index_snat_name, + partition=snat_info['network_folder']): + self.snat_translation_manager.create( + bigip, snat_translation_model) + except Exception as err: + LOG.exception(err) + raise f5_ex.SNATCreationException( + "Error creating snat translation manager %s" % + index_snat_name) + + snat_pool_model = { + "name": snat_info['pool_name'], + "partition": snat_info['pool_folder'], + } + snat_pool_member = ( + '/' + snat_info['network_folder'] + '/' + index_snat_name) + snat_pool_model["members"] = [snat_pool_member] + try: + if not self.snatpool_manager.exists( + bigip, + name=snat_pool_model['name'], + partition=snat_pool_model['partition']): + LOG.debug("Creating SNAT pool: %s" % snat_pool_model) + self.snatpool_manager.create(bigip, snat_pool_model) + else: + LOG.debug("Updating SNAT pool") + snatpool = self.snatpool_manager.load( + bigip, + name=snat_pool_model["name"], + partition=snat_pool_model["partition"] + ) + snatpool.members.append(snat_pool_member) + snatpool.modify(members=snatpool.members) + + except Exception as err: + LOG.error("Create SNAT pool failed %s" % err.message) + raise f5_ex.SNATCreationException( + "Failed to create SNAT pool") + + if self.l3_binding: + self.l3_binding.bind_address(subnet_id, + ip_address=ip_address) + + bigip.assured_tenant_snat_providers[tenant_id][subnet_id].append( + provider_name) + def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id): # Configure the ip addresses for snat network = subnetinfo['network'] @@ -216,10 +390,36 @@ def delete_bigip_snats(self, bigip, subnetinfo, tenant_id): 'for missing network ... skipping.') return set() - return self._delete_bigip_snats(bigip, subnetinfo, tenant_id) + if self.driver.conf.f5_snat_per_provider: + return self._delete_bigip_provider_snats(bigip, + subnetinfo, tenant_id) + else: + return self._delete_bigip_snats(bigip, subnetinfo, tenant_id) + + def _remove_assured_tenant_snat_provider(self, bigip, tenant_id, subnet): + # Remove ref for the subnet for this tenant in cache, all the provider + # names in that subnet cache will be removed. + if tenant_id in bigip.assured_tenant_snat_providers: + tenant_snat_subnets = \ + bigip.assured_tenant_snat_providers[tenant_id] + if tenant_snat_subnets and subnet['id'] in tenant_snat_subnets: + LOG.debug( + 'Remove subnet id %s from ' + 'bigip.assured_tenant_snat_subnets for tenant %s' % + (subnet['id'], tenant_id)) + tenant_snat_subnets.pop(subnet['id']) + else: + LOG.debug( + 'Subnet id %s does not exist in ' + 'bigip.assured_tenant_snat_subnets for tenant %s' % + (subnet['id'], tenant_id)) + else: + LOG.debug( + 'Tenant id %s does not exist in ' + 'bigip.assured_tenant_snat_subnets' % tenant_id) def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet): - # Remove ref for the subnet for this tenant""" + # Remove ref for the subnet for this tenant if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] @@ -239,10 +439,131 @@ def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet): 'Tenant id %s does not exist in ' 'bigip.assured_tenant_snat_subnets' % tenant_id) + def _delete_bigip_provider_snats(self, bigip, subnetinfo, tenant_id): + # Assure snats deleted in standalone mode + + subnet = subnetinfo['subnet'] + subnet_id = subnet.get('id') + provider_names = [] + network = subnetinfo['network'] + + if self.l2_service.is_common_network(network): + partition = 'Common' + else: + partition = self.driver.service_adapter.get_folder_name(tenant_id) + + deleted_names = set() + in_use_subnets = set() + + # when it left one lb, then restart the bigip agent, the cache will be + # empty, then it will not filter out any provider_names here + # if the last one resource have been deleted? + if bigip.assured_tenant_snat_providers.get(tenant_id) and subnet_id: + if bigip.assured_tenant_snat_providers[tenant_id].get(subnet_id): + provider_names = bigip.assured_tenant_snat_providers[ + tenant_id].get(subnet_id) + + for i in range(self.driver.conf.f5_snat_addresses_per_subnet): + for provider in provider_names: + snat_name = self._get_provider_snat_name(subnet_id, + tenant_id, provider) + index_snat_name = snat_name + "_" + str(i) + tmos_snat_name = index_snat_name + + snat_pool_folder = self.driver.service_adapter.get_folder_name( + tenant_id) + snat_pool_name = snat_pool_folder + '_' + provider + + if self.l3_binding: + try: + snat_xlate = self.snat_translation_manager.load( + bigip, name=index_snat_name, partition=partition) + except HTTPError as err: + LOG.error("Load SNAT xlate failed %s" % err.message) + except Exception: + LOG.error("Unknown error occurred loading " + + "SNAT for unbind") + else: + self.l3_binding.unbind_address( + subnet_id=subnet['id'], + ip_address=snat_xlate.address) + + LOG.debug('Remove translation address from tenant SNAT pool') + try: + snatpool = self.snatpool_manager.load(bigip, + snat_pool_name, + snat_pool_folder) + + snatpool.members = [ + member for member in snatpool.members + if os.path.basename(member) != tmos_snat_name + ] + + LOG.debug('Check if snat pool is empty') + if not snatpool.members: + LOG.debug('Snat pool is empty - delete snatpool') + try: + snatpool.delete() + except HTTPError as err: + LOG.error("Delete SNAT pool failed %s" % + err.message) + else: + LOG.debug('Snat pool is not empty - update snatpool') + try: + snatpool.modify(members=snatpool.members) + except HTTPError as err: + LOG.error("Update SNAT pool failed %s" % + err.message) + except HTTPError as err: + LOG.error("Failed to load SNAT pool %s" % err.message) + + self._remove_assured_tenant_snat_provider( + bigip, tenant_id, subnet) + LOG.debug( + 'Check cache for subnet %s in use by other tenant' % + subnet['id']) + in_use_count = 0 + for other_tenant_id in bigip.assured_tenant_snat_providers: + subnet_providers_dict = \ + bigip.assured_tenant_snat_providers[other_tenant_id] + if subnet['id'] in subnet_providers_dict.keys(): + LOG.debug( + 'Subnet %s in use (tenant %s)' % + (subnet['id'], other_tenant_id)) + in_use_count += 1 + + if in_use_count: + in_use_subnets.add(subnet['id']) + else: + LOG.debug('Check subnet in use by any tenant') + member_use_count = \ + self.get_snatpool_member_use_count( + bigip, subnet['id']) + if member_use_count: + LOG.debug('Subnet in use - do not delete') + in_use_subnets.add(subnet['id']) + else: + LOG.debug('Subnet not in use - delete') + + # Check if trans addr in use by any snatpool. If not in use, + # okay to delete associated neutron port. + LOG.debug('Check trans addr %s in use.' % tmos_snat_name) + in_use_count = \ + self.get_snatpool_member_use_count( + bigip, tmos_snat_name) + if not in_use_count: + LOG.debug('Trans addr not in use - delete') + deleted_names.add(index_snat_name) + else: + LOG.debug('Trans addr in use - do not delete') + + return deleted_names, in_use_subnets + def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): - # Assure snats deleted in standalone mode """ + # Assure snats deleted in standalone mode subnet = subnetinfo['subnet'] network = subnetinfo['network'] + if self.l2_service.is_common_network(network): partition = 'Common' else: @@ -255,6 +576,7 @@ def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): # Delete SNATs on traffic-group-local-only snat_name = self._get_snat_name(subnet, tenant_id) + for i in range(self.driver.conf.f5_snat_addresses_per_subnet): index_snat_name = snat_name + "_" + str(i) tmos_snat_name = index_snat_name diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_lbaas_builder.py index 071502822..b59fa1c35 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_lbaas_builder.py @@ -414,6 +414,35 @@ def service(): u'session_persistence': None, u'sessionpersistence': None, u'tenant_id': u'd9ed216f67f04a84bf8fd97c155855cd'}], + u'subnets': { + u'81f42a8a-fc98-4281-8de4-2b946e931457': { + u'updated_at': u'2019-09-10T09:11:06Z', + u'ipv6_ra_mode': None, + u'allocation_pools': [{ + u'start': u'10.250.26.2', + u'end': u'10.250.26.254' + }], + u'host_routes': [], + u'revision_number': 0, + u'ipv6_address_mode': None, + u'id': u'81f42a8a-fc98-4281-8de4-2b946e931457', + u'dns_nameservers': [], + u'gateway_ip': u'10.250.26.1', + u'shared': False, + u'project_id': u'ff3500317f2249109620cd5f2a019adb', + u'description': u'', + u'tags': [], + u'cidr': u'10.250.26.0/24', + u'subnetpool_id': None, + u'service_types': [], + u'name': u'pef-mb-subnet', + u'enable_dhcp': True, + u'network_id': u'cdf1eb6d-9b17-424a-a054-778f3d3a5490', + u'tenant_id': u'd9ed216f67f04a84bf8fd97c155855cd', + u'created_at': u'2019-09-10T09:11:06Z', + u'ip_version': 4 + } + } } @@ -1121,11 +1150,13 @@ def test_assure_listeners_created_update(self, service, create_self): listener = service.get('listeners')[0] target = self.builder service['listener'] = listener + service['subnets'] = service.get('subnets') loadbalancer = service['loadbalancer'] # Test UPDATE case target.listener_builder = Mock() target.listener_builder.create_listener.return_value = None + target.driver.get_config_bigips.return_value = [] expected_bigips = target.driver.get_config_bigips() listener['provisioning_status'] = \ @@ -1147,11 +1178,13 @@ def test_assure_listeners_created_create(self, service, create_self): listener = service.get('listeners')[0] target = self.builder service['listener'] = listener + service['subnets'] = service.get('subnets') loadbalancer = service['loadbalancer'] # Test CREATE case target.listener_builder = Mock() target.listener_builder.create_listener.return_value = None + target.driver.get_config_bigips.return_value = [] expected_bigips = target.driver.get_config_bigips() listener['provisioning_status'] = \ @@ -1172,11 +1205,13 @@ def test_assure_listeners_created_create_error(self, service, create_self): listener = service.get('listeners')[0] target = self.builder service['listener'] = listener + service['subnets'] = service.get('subnets') loadbalancer = service['loadbalancer'] # Test CREATE case target.listener_builder = Mock() target.listener_builder.create_listener.return_value = "error" + target.driver.get_config_bigips.return_value = [] expected_bigips = target.driver.get_config_bigips() listener['provisioning_status'] = \ @@ -1203,6 +1238,7 @@ def test_assure_listeners_created_error_to_active( # Test CREATE case target.listener_builder = Mock() target.listener_builder.create_listener.return_value = None + target.driver.get_config_bigips.return_value = [] expected_bigips = target.driver.get_config_bigips() listener['provisioning_status'] = \ diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_service_adapter.py index 0aa5dc13c..e95141dd7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/test/test_service_adapter.py @@ -114,7 +114,8 @@ def basic_service(): monitor_id = str(uuid.uuid4()) return {'loadbalancer': dict(id=str(uuid.uuid4()), tenant_id=tenant_id, - vip_address='192.168.1.1%0'), + vip_address='192.168.1.1%0', + provider='f5netowrks'), 'pools': [dict(id=default_pool_id, session_persistence=True)], 'healthmonitors': [dict(id=str(monitor_id))], @@ -276,6 +277,7 @@ def test_get_virtual(self, target, basic_service): target.get_folder_name = Mock(return_value=tenant_id) target.snat_mode = Mock(return_value=True) basic_service['pool'] = basic_service['pools'][0] + basic_service['loadbalancer'] = basic_service['loadbalancer'] vip = 'vip' target._map_virtual = Mock(return_value=vip) target._add_bigip_items = Mock() @@ -283,7 +285,8 @@ def test_get_virtual(self, target, basic_service): assert target.get_virtual(basic_service) == vip assert basic_service['pool']['session_persistence'] == \ basic_service['listener']['session_persistence'] - assert basic_service['listener']['snat_pool_name'] == tenant_id + assert basic_service['listener']['snat_pool_name'] == tenant_id + \ + '_' + basic_service['loadbalancer']['provider'] target._map_virtual.assert_called_once_with( basic_service['loadbalancer'], basic_service['listener'], pool=basic_service['pool'], policies=list(), irules=list())