diff -Nru neutron-20.4.0/AUTHORS neutron-20.5.0/AUTHORS --- neutron-20.4.0/AUTHORS 2023-07-28 09:15:51.000000000 +0000 +++ neutron-20.5.0/AUTHORS 2023-11-23 09:44:54.000000000 +0000 @@ -1164,6 +1164,7 @@ karimb kedar kulkarni kpdev +labedz labedz lawrancejing leegy @@ -1186,6 +1187,7 @@ liushy liuyulong liyingjun +liyou01 lizheming lizheng lizhixin3016 @@ -1202,6 +1204,7 @@ mark mcclain mat mathieu-rohon +maximkorezkij melissaml miaoyuliang mid_one diff -Nru neutron-20.4.0/ChangeLog neutron-20.5.0/ChangeLog --- neutron-20.4.0/ChangeLog 2023-07-28 09:15:50.000000000 +0000 +++ neutron-20.5.0/ChangeLog 2023-11-23 09:44:52.000000000 +0000 @@ -1,6 +1,44 @@ CHANGES ======= +20.5.0 +------ + +* Ensure ovn loadbalancer FIPs are centralized upon neutron restarts +* [Stable Only] Fix parent for nftables job +* [DHCP agent] Add route to OVN metadata port if exists +* "ebtables-nft" MAC rule deletion failing +* Remove any IPAM allocation if port bulk creation fails +* Add dhcpagentscheduler API extension to the ML2/OVN extensions +* [stable-only] Replace cirros image versions not cached in the CI +* Parameter filters may be None, which cannot be called with \*\* +* Use safer methods to get security groups on security group logging +* [OVN] Fix rate and burst for stateless security groups +* [OVN] Add the default condition check in \`\`PortBindingChassisEvent\`\` +* Revert "[OVN][Trunk] Add port binding info on subport when parent is bound" +* Reduce lock contention on subnets +* [PostgreSQL] Subnet entity with ServiceType grouped by both tables +* Add 3 secs to wait for keepalived state change +* Use HasStandardAttributes as parent class for Tags DB model +* Revert "[OVN][Trunk] Set the subports correct host during live migration" +* Call the "tc qdisc" command for ingress qdisc without parent +* [FT] Make explicit the "publish" call check in "test\_port\_forwarding" +* [OVN] Cleanup old Hash Ring node entries +* [OVN] Add the 'uplink-status-propagation' extension to ML2/OVN +* [OVN][Trunk] Set the subports correct host during live migration +* [OVN] Skip the port status UP update during a live migration +* Fix ovn-metadata agent sync of unused namespaces +* Send ovn heatbeat more often +* Spread OVN metadata agent heartbeat response in time +* [OVN] Disable the mcast\_flood\_reports option for LSPs +* [OVN] ovn-db-sync check for router port differences +* hash-ring: Retry all DB operations if inactive +* [OVN] Retry retrieving LSP hosting information +* [UT] Create network to make lazy loading in the models\_v2 possible +* dvr: Avoid installing non-dvr openflow rule on startup +* [OVN] Hash Ring: Better handle Neutron worker failures +* [neutron-api] remove leader\_only for sb connection + 20.4.0 ------ @@ -11,6 +49,7 @@ * Don't allow deletion of the router ports without IP addresses * Delete sg rule which remote is the deleted sg * [OVN] Expose chassis hosting information in LSP +* [OVN] Prevent Trunk creation/deletion with parent port bound * Load FIP information during initialize not init * [OVN] Hash Ring: Set nodes as offline upon exit * [OVN] Improve Hash Ring logs diff -Nru neutron-20.4.0/PKG-INFO neutron-20.5.0/PKG-INFO --- neutron-20.4.0/PKG-INFO 2023-07-28 09:15:51.864969000 +0000 +++ neutron-20.5.0/PKG-INFO 2023-11-23 09:44:55.188718000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.2 Name: neutron -Version: 20.4.0 +Version: 20.5.0 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack diff -Nru neutron-20.4.0/debian/changelog neutron-20.5.0/debian/changelog --- neutron-20.4.0/debian/changelog 2023-08-07 16:29:27.000000000 +0000 +++ neutron-20.5.0/debian/changelog 2023-12-13 19:22:29.000000000 +0000 @@ -1,3 +1,9 @@ +neutron (2:20.5.0-0ubuntu1) jammy; urgency=medium + + * New stable point release for OpenStack Yoga (LP: #2046376). + + -- Corey Bryant Wed, 13 Dec 2023 14:22:29 -0500 + neutron (2:20.4.0-0ubuntu1) jammy; urgency=medium * New stable point release for OpenStack Yoga (LP: #2030526). diff -Nru neutron-20.4.0/neutron/agent/linux/dhcp.py neutron-20.5.0/neutron/agent/linux/dhcp.py --- neutron-20.4.0/neutron/agent/linux/dhcp.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/agent/linux/dhcp.py 2023-11-23 09:44:10.000000000 +0000 @@ -1140,6 +1140,14 @@ file_utils.replace_file(name, '\n'.join(options)) return name + def _get_ovn_metadata_port_ip(self, subnet): + m_ports = [port for port in self.network.ports if + self._is_ovn_metadata_port(port, self.network.id)] + if m_ports: + for fixed_ip in m_ports[0].fixed_ips: + if fixed_ip.subnet_id == subnet.id: + return fixed_ip.ip_address + def _generate_opts_per_subnet(self): options = [] subnets_without_nameservers = set() @@ -1193,23 +1201,33 @@ else: host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) - # Add host routes for isolated network segments + # Determine metadata port route + if subnet.ip_version == constants.IP_VERSION_4: + metadata_route_ip = None + # NOTE: OVN metadata port IP is used in a case when the DHCP + # agent is deployed in the ML2/OVN enviroment where the native + # ovn-controller dhcp is disabled. The ovn metadata route + # takes precedence over native force_metadata and + # enable_isolated_metadata routes settings. + ovn_metadata_port_ip = self._get_ovn_metadata_port_ip(subnet) + if ovn_metadata_port_ip: + metadata_route_ip = ovn_metadata_port_ip + + elif (self.conf.force_metadata or + (isolated_subnets[subnet.id] and + self.conf.enable_isolated_metadata)): + subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) + if subnet_dhcp_ip: + metadata_route_ip = subnet_dhcp_ip + + if not isolated_subnets[subnet.id] and gateway: + metadata_route_ip = gateway - if ((self.conf.force_metadata or - (isolated_subnets[subnet.id] and - self.conf.enable_isolated_metadata)) and - subnet.ip_version == 4): - subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) - if subnet_dhcp_ip: + if metadata_route_ip: host_routes.append( - '%s,%s' % (constants.METADATA_CIDR, subnet_dhcp_ip) + '%s,%s' % (constants.METADATA_CIDR, metadata_route_ip) ) - elif not isolated_subnets[subnet.id] and gateway: - host_routes.append( - '%s,%s' % (constants.METADATA_CIDR, gateway) - ) - if subnet.ip_version == 4: for s in self._get_all_subnets(self.network): sub_segment_id = getattr(s, 'segment_id', None) if (s.ip_version == 4 and @@ -1374,13 +1392,21 @@ return True return False + @staticmethod + def _is_ovn_metadata_port(port, network_id): + return (port.device_id == 'ovnmeta-' + network_id and + port.device_owner == constants.DEVICE_OWNER_DISTRIBUTED) + @classmethod def should_enable_metadata(cls, conf, network): """Determine whether the metadata proxy is needed for a network - This method returns True for truly isolated networks (ie: not attached - to a router) when enable_isolated_metadata is True, or for all the - networks when the force_metadata flags is True. + If the given network contains a ovn metadata port then this method + assumes that the ovn metadata service is in use and this metadata + service is not required, method returns False. For other cases this + method returns True for truly isolated networks (ie: not attached to a + router) when enable_isolated_metadata is True, or for all the networks + when the force_metadata flags is True. This method also returns True when enable_metadata_network is True, and the network passed as a parameter has a subnet in the link-local @@ -1389,6 +1415,10 @@ providing access to the metadata service via logical routers built with 3rd party backends. """ + for port in network.ports: + if cls._is_ovn_metadata_port(port, network.id): + return False + all_subnets = cls._get_all_subnets(network) dhcp_subnets = [s for s in all_subnets if s.enable_dhcp] if not dhcp_subnets: diff -Nru neutron-20.4.0/neutron/agent/ovn/metadata/agent.py neutron-20.5.0/neutron/agent/ovn/metadata/agent.py --- neutron-20.4.0/neutron/agent/ovn/metadata/agent.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/agent/ovn/metadata/agent.py 2023-11-23 09:44:10.000000000 +0000 @@ -14,6 +14,7 @@ import collections import functools +from random import randint import re import threading import uuid @@ -21,6 +22,7 @@ import netaddr from neutron_lib import constants as n_const from oslo_concurrency import lockutils +from oslo_config import cfg from oslo_log import log from oslo_utils import netutils from ovsdbapp.backend.ovs_idl import event as row_event @@ -35,10 +37,12 @@ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils from neutron.common import utils +from neutron.conf.agent.database import agents_db from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf as config LOG = log.getLogger(__name__) +agents_db.register_db_agents_opts() _SYNC_STATE_LOCK = lockutils.ReaderWriterLock() CHASSIS_METADATA_LOCK = 'chassis_metadata_lock' @@ -186,14 +190,34 @@ events = (self.ROW_UPDATE,) super(SbGlobalUpdateEvent, self).__init__(events, table, None) self.event_name = self.__class__.__name__ + self.first_run = True def run(self, event, row, old): - table = ('Chassis_Private' if self.agent.has_chassis_private - else 'Chassis') - self.agent.sb_idl.db_set( - table, self.agent.chassis, ('external_ids', { - ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: - str(row.nb_cfg)})).execute() + + def _update_chassis(self, row): + table = ('Chassis_Private' if self.agent.has_chassis_private + else 'Chassis') + self.agent.sb_idl.db_set( + table, self.agent.chassis, ('external_ids', { + ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: + str(row.nb_cfg)})).execute() + + delay = 0 + if self.first_run: + self.first_run = False + else: + # We occasionally see port binding failed errors due to + # the ml2 driver refusing to bind the port to a dead agent. + # if all agents heartbeat at the same time, they will all + # cause a load spike on the server. To mitigate that we + # need to spread out the load by introducing a random delay. + # clamp the max delay between 3 and 10 seconds. + max_delay = max(min(cfg.CONF.agent_down_time // 3, 10), 3) + delay = randint(0, max_delay) + + LOG.debug("Delaying updating chassis table for %s seconds", delay) + timer = threading.Timer(delay, _update_chassis, [self, row]) + timer.start() class MetadataAgent(object): @@ -339,7 +363,8 @@ for ns in ip_lib.list_network_namespaces()) net_datapaths = self.get_networks_datapaths() metadata_namespaces = [ - self._get_namespace_name(str(datapath.uuid)) + self._get_namespace_name( + ovn_utils.get_network_name_from_datapath(datapath)) for datapath in net_datapaths ] unused_namespaces = [ns for ns in system_namespaces if @@ -348,8 +373,9 @@ for ns in unused_namespaces: self.teardown_datapath(self._get_datapath_name(ns)) - # now that all obsolete namespaces are cleaned up, deploy required - # networks + # resync all network namespaces based on the associated datapaths, + # even those that are already running. This is to make sure + # everything within each namespace is up to date. for datapath in net_datapaths: self.provision_datapath(datapath) diff -Nru neutron-20.4.0/neutron/cmd/ovn/neutron_ovn_db_sync_util.py neutron-20.5.0/neutron/cmd/ovn/neutron_ovn_db_sync_util.py --- neutron-20.4.0/neutron/cmd/ovn/neutron_ovn_db_sync_util.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/cmd/ovn/neutron_ovn_db_sync_util.py 2023-11-23 09:44:10.000000000 +0000 @@ -57,12 +57,12 @@ def ovn_client(self): return self._ovn_client - def _set_hash_ring_nodes_offline(self): - """Don't set hash ring nodes as offline. + def _remove_node_from_hash_ring(self): + """Don't remove the node from the Hash Ring. If this method was not overridden, cleanup would be performed when - calling the db sync and running neutron server would mark all the - nodes from the ring as offline. + calling the db sync and running neutron server would remove the + nodes from the Hash Ring. """ # Since we are not using the ovn mechanism driver while syncing, diff -Nru neutron-20.4.0/neutron/common/ovn/extensions.py neutron-20.5.0/neutron/common/ovn/extensions.py --- neutron-20.4.0/neutron/common/ovn/extensions.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/common/ovn/extensions.py 2023-11-23 09:44:10.000000000 +0000 @@ -18,6 +18,7 @@ from neutron_lib.api.definitions import auto_allocated_topology from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import default_subnetpools +from neutron_lib.api.definitions import dhcpagentscheduler from neutron_lib.api.definitions import dns from neutron_lib.api.definitions import dns_domain_keywords from neutron_lib.api.definitions import dns_domain_ports @@ -65,6 +66,7 @@ from neutron_lib.api.definitions import subnet_dns_publish_fixed_ip from neutron_lib.api.definitions import subnet_service_types from neutron_lib.api.definitions import trunk +from neutron_lib.api.definitions import uplink_status_propagation from neutron_lib.api.definitions import vlantransparent from neutron_lib import constants @@ -105,6 +107,7 @@ portbindings.ALIAS, pbe_ext.ALIAS, default_subnetpools.ALIAS, + dhcpagentscheduler.ALIAS, dns.ALIAS, external_net.ALIAS, extra_dhcp_opt.ALIAS, @@ -149,4 +152,5 @@ floating_ip_port_forwarding.ALIAS, vlantransparent.ALIAS, logging.ALIAS, + uplink_status_propagation.ALIAS, ] diff -Nru neutron-20.4.0/neutron/common/ovn/utils.py neutron-20.5.0/neutron/common/ovn/utils.py --- neutron-20.4.0/neutron/common/ovn/utils.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/common/ovn/utils.py 2023-11-23 09:44:10.000000000 +0000 @@ -828,3 +828,10 @@ return chassis.other_config except AttributeError: return chassis.external_ids + + +def get_requested_chassis(requested_chassis): + """Returns a list with the items in the LSP.options:requested-chassis""" + if isinstance(requested_chassis, str): + return requested_chassis.split(',') + return [] diff -Nru neutron-20.4.0/neutron/common/utils.py neutron-20.5.0/neutron/common/utils.py --- neutron-20.4.0/neutron/common/utils.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/common/utils.py 2023-11-23 09:44:10.000000000 +0000 @@ -35,9 +35,12 @@ from eventlet.green import subprocess import netaddr from neutron_lib.api.definitions import availability_zone as az_def +from neutron_lib.api.definitions import portbindings +from neutron_lib.api.definitions import portbindings_extended from neutron_lib import constants as n_const from neutron_lib import context as n_context from neutron_lib.db import api as db_api +from neutron_lib.plugins import utils as plugin_utils from neutron_lib.services.trunk import constants as trunk_constants from neutron_lib.utils import helpers from oslo_config import cfg @@ -1046,3 +1049,16 @@ if not (session.dirty or session.deleted or session.new): return False return True + + +# TODO(slaweq): this should be moved to neutron_lib.plugins.utils module +def is_port_bound(port, log_message=True): + active_binding = plugin_utils.get_port_binding_by_status_and_host( + port.get('port_bindings', []), n_const.ACTIVE) + if not active_binding: + if log_message: + LOG.warning('Binding for port %s was not found.', port) + return False + return active_binding[portbindings_extended.VIF_TYPE] not in ( + portbindings.VIF_TYPE_UNBOUND, + portbindings.VIF_TYPE_BINDING_FAILED) diff -Nru neutron-20.4.0/neutron/db/address_group_db.py neutron-20.5.0/neutron/db/address_group_db.py --- neutron-20.4.0/neutron/db/address_group_db.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/address_group_db.py 2023-11-23 09:44:09.000000000 +0000 @@ -168,6 +168,7 @@ def get_address_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): + filters = filters or {} pager = base_obj.Pager(sorts, limit, page_reverse, marker) address_groups = ag_obj.AddressGroup.get_objects( context, _pager=pager, **filters) diff -Nru neutron-20.4.0/neutron/db/db_base_plugin_v2.py neutron-20.5.0/neutron/db/db_base_plugin_v2.py --- neutron-20.4.0/neutron/db/db_base_plugin_v2.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/db_base_plugin_v2.py 2023-11-23 09:44:10.000000000 +0000 @@ -75,7 +75,7 @@ def _ensure_subnet_not_used(context, subnet_id): - models_v2.Subnet.lock_register( + models_v2.Subnet.write_lock_register( context, exc.SubnetInUse(subnet_id=subnet_id), id=subnet_id) try: registry.publish( diff -Nru neutron-20.4.0/neutron/db/ipam_backend_mixin.py neutron-20.5.0/neutron/db/ipam_backend_mixin.py --- neutron-20.4.0/neutron/db/ipam_backend_mixin.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/ipam_backend_mixin.py 2023-11-23 09:44:10.000000000 +0000 @@ -688,7 +688,7 @@ msg = ('This subnet is being modified by another concurrent ' 'operation') for subnet in subnets: - subnet.lock_register( + subnet.read_lock_register( context, exc.SubnetInUse(subnet_id=subnet.id, reason=msg), id=subnet.id) subnet_dicts = [self._make_subnet_dict(subnet, context=context) diff -Nru neutron-20.4.0/neutron/db/l3_dvr_db.py neutron-20.5.0/neutron/db/l3_dvr_db.py --- neutron-20.4.0/neutron/db/l3_dvr_db.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/l3_dvr_db.py 2023-11-23 09:44:10.000000000 +0000 @@ -17,7 +17,6 @@ from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import portbindings -from neutron_lib.api.definitions import portbindings_extended from neutron_lib.api.definitions import router_admin_state_down_before_update from neutron_lib.api import validators from neutron_lib.callbacks import events @@ -70,18 +69,6 @@ return _IS_ADMIN_STATE_DOWN_NECESSARY -# TODO(slaweq): this should be moved to neutron_lib.plugins.utils module -def is_port_bound(port): - active_binding = plugin_utils.get_port_binding_by_status_and_host( - port.get("port_bindings", []), const.ACTIVE) - if not active_binding: - LOG.warning("Binding for port %s was not found.", port) - return False - return active_binding[portbindings_extended.VIF_TYPE] not in [ - portbindings.VIF_TYPE_UNBOUND, - portbindings.VIF_TYPE_BINDING_FAILED] - - @registry.has_registry_receivers class DVRResourceOperationHandler(object): """Contains callbacks for DVR operations. @@ -1426,7 +1413,7 @@ def get_ports_under_dvr_connected_subnet(self, context, subnet_id): query = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id) - ports = [p for p in query.all() if is_port_bound(p)] + ports = [p for p in query.all() if n_utils.is_port_bound(p)] # TODO(slaweq): if there would be way to pass to neutron-lib only # list of extensions which actually should be processed, than setting # process_extensions=True below could avoid that second loop and diff -Nru neutron-20.4.0/neutron/db/models/tag.py neutron-20.5.0/neutron/db/models/tag.py --- neutron-20.4.0/neutron/db/models/tag.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/models/tag.py 2023-11-23 09:44:09.000000000 +0000 @@ -26,6 +26,6 @@ tag = sa.Column(sa.String(255), nullable=False, primary_key=True) standard_attr = orm.relationship( 'StandardAttribute', load_on_pending=True, - backref=orm.backref('tags', lazy='subquery', viewonly=True), + backref=orm.backref('tags', lazy='joined', viewonly=True), sync_backref=False) revises_on_change = ('standard_attr', ) diff -Nru neutron-20.4.0/neutron/db/models_v2.py neutron-20.5.0/neutron/db/models_v2.py --- neutron-20.4.0/neutron/db/models_v2.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/models_v2.py 2023-11-23 09:44:10.000000000 +0000 @@ -33,25 +33,51 @@ class HasInUse(object): """NeutronBaseV2 mixin, to add the flag "in_use" to a DB model. - The content of this flag (boolean) parameter is not relevant. The goal of - this field is to be used in a write transaction to mark a DB register as - "in_use". Writing any value on this DB parameter will lock the container - register. At the end of the DB transaction, the DB engine will check if - this register was modified or deleted. In such case, the transaction will - fail and won't be commited. - - "lock_register" is the method to write the register "in_use" column. - Because the lifespan of this DB lock is the DB transaction, there isn't an - unlock method. The lock will finish once the transaction ends. + The goal of this class is to allow users lock specific database rows with + a shared or exclusive lock (without necessarily introducing a change in + the table itself). Having these locks allows the DB engine to prevent + concurrent modifications (e.g. the deletion of a resource while we are + currently adding a new dependency on the resource). + + "read_lock_register" takes a shared DB lock on the row specified by the + filters. The lock is automatically released once the transaction ends. + You can have any number of parallel read locks on the same DB row. But + you can not have any write lock in parallel. + + "write_lock_register" takes an exclusive DB lock on the row specified by + the filters. The lock is automatically released on transaction commit. + You may only have one write lock on each row at a time. It therefor + blocks all other read and write locks to this row. """ + # keep this value to not need to update the database schema + # only at backport in_use = sa.Column(sa.Boolean(), nullable=False, server_default=sql.false(), default=False) @classmethod - def lock_register(cls, context, exception, **filters): + def write_lock_register(cls, context, exception, **filters): + # we use `with_for_update()` to include `FOR UPDATE` in the sql + # statement. + # we need to set `enable_eagerloads(False)` so that we do not try to + # load attached resources (e.g. standardattributes) as this breaks the + # `FOR UPDATE` statement. + num_reg = context.session.query( + cls).filter_by(**filters).enable_eagerloads( + False).with_for_update().first() + if num_reg is None: + raise exception + + @classmethod + def read_lock_register(cls, context, exception, **filters): + # we use `with_for_update(read=True)` to include `LOCK IN SHARE MODE` + # in the sql statement. + # we need to set `enable_eagerloads(False)` so that we do not try to + # load attached resources (e.g. standardattributes) as this breaks the + # `LOCK IN SHARE MODE` statement. num_reg = context.session.query( - cls).filter_by(**filters).update({'in_use': True}) - if num_reg != 1: + cls).filter_by(**filters).enable_eagerloads( + False).with_for_update(read=True).first() + if num_reg is None: raise exception diff -Nru neutron-20.4.0/neutron/db/ovn_hash_ring_db.py neutron-20.5.0/neutron/db/ovn_hash_ring_db.py --- neutron-20.4.0/neutron/db/ovn_hash_ring_db.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/db/ovn_hash_ring_db.py 2023-11-23 09:44:09.000000000 +0000 @@ -29,6 +29,7 @@ # NOTE(ralonsoh): this was migrated from networking-ovn to neutron and should # be refactored to be integrated in a OVO. +@db_api.retry_if_session_inactive() def add_node(context, group_name, node_uuid=None): if node_uuid is None: node_uuid = uuidutils.generate_uuid() @@ -41,6 +42,7 @@ return node_uuid +@db_api.retry_if_session_inactive() def remove_nodes_from_host(context, group_name): with db_api.CONTEXT_WRITER.using(context): context.session.query(ovn_models.OVNHashRing).filter( @@ -50,6 +52,24 @@ CONF.host, group_name) +@db_api.retry_if_session_inactive() +def remove_node_by_uuid(context, node_uuid): + with db_api.CONTEXT_WRITER.using(context): + context.session.query(ovn_models.OVNHashRing).filter( + ovn_models.OVNHashRing.node_uuid == node_uuid).delete() + LOG.info('Node "%s" removed from the Hash Ring', node_uuid) + + +@db_api.retry_if_session_inactive() +def cleanup_old_nodes(context, days): + age = timeutils.utcnow() - datetime.timedelta(days=days) + with db_api.CONTEXT_WRITER.using(context): + context.session.query(ovn_models.OVNHashRing).filter( + ovn_models.OVNHashRing.updated_at < age).delete() + LOG.info('Cleaned up Hash Ring nodes older than %d days', days) + + +@db_api.retry_if_session_inactive() def _touch(context, updated_at=None, **filter_args): if updated_at is None: updated_at = timeutils.utcnow() @@ -83,6 +103,7 @@ return query +@db_api.retry_if_session_inactive() @db_api.CONTEXT_READER def get_active_nodes(context, interval, group_name, from_host=False): query = _get_nodes_query(context, interval, group_name, @@ -90,13 +111,17 @@ return query.all() +@db_api.retry_if_session_inactive() @db_api.CONTEXT_READER def count_offline_nodes(context, interval, group_name): query = _get_nodes_query(context, interval, group_name, offline=True) return query.count() -def set_nodes_from_host_as_offline(context, group_name): - timestamp = datetime.datetime(day=26, month=10, year=1985, hour=9) - _touch(context, updated_at=timestamp, hostname=CONF.host, - group_name=group_name) +@db_api.retry_if_session_inactive() +@db_api.CONTEXT_READER +def count_nodes_from_host(context, group_name): + query = context.session.query(ovn_models.OVNHashRing).filter( + ovn_models.OVNHashRing.group_name == group_name, + ovn_models.OVNHashRing.hostname == CONF.host) + return query.count() diff -Nru neutron-20.4.0/neutron/objects/subnet.py neutron-20.5.0/neutron/objects/subnet.py --- neutron-20.4.0/neutron/objects/subnet.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/objects/subnet.py 2023-11-23 09:44:10.000000000 +0000 @@ -178,6 +178,15 @@ # service type when DHCP is enabled on the subnet. and_(Subnet.enable_dhcp.is_(True), service_type == const.DEVICE_OWNER_DHCP))) + + if query._group_by_clauses: + # If the "Subnet" query has a "GROUP BY" clause (that happens when + # a non-admin user has executed the query, that requires the join + # of the RBAC registers), it is needed to add the + # "SubnetServiceType" fields to this clause too. + query = query.group_by(ServiceType.subnet_id, + ServiceType.service_type) + return query.from_self(Subnet) diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py neutron-20.5.0/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py 2023-11-23 09:44:09.000000000 +0000 @@ -201,7 +201,9 @@ chain = _mac_chain_name(vif) for rule in current_rules: if '-i %s' % vif in rule and '--among-src' in rule: - ebtables(['-D', chain] + rule.split()) + # Flush the table and recreate the default DROP rule. + ebtables(['-F', chain]) + ebtables(['-A', chain, '-j', 'DROP']) def _delete_mac_spoofing_protection(vifs, current_rules, table, chain): diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py 2023-11-23 09:44:10.000000000 +0000 @@ -33,13 +33,19 @@ dvr_process_next_table_id = constants.PATCH_LV_TO_TUN of_tables = constants.TUN_BR_ALL_TABLES - def setup_default_table(self, patch_int_ofport, arp_responder_enabled): + def setup_default_table( + self, patch_int_ofport, arp_responder_enabled, dvr_enabled): (dp, ofp, ofpp) = self._get_dp() - # Table 0 (default) will sort incoming traffic depending on in_port - self.install_goto(dest_table_id=constants.PATCH_LV_TO_TUN, - priority=1, - in_port=patch_int_ofport) + if not dvr_enabled: + # Table 0 (default) will sort incoming traffic depending on in_port + # This table is needed only for non-dvr environment because + # OVSDVRProcessMixin overwrites this flow in its + # install_dvr_process() method. + self.install_goto(dest_table_id=constants.PATCH_LV_TO_TUN, + priority=1, + in_port=patch_int_ofport) + self.install_drop() # default drop if arp_responder_enabled: diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2023-11-23 09:44:10.000000000 +0000 @@ -263,16 +263,20 @@ if not self.enable_tunneling: return - self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, - priority=1, - in_port=self.patch_int_ofport) + self._setup_dvr_flows_on_tun_br(self.tun_br, self.patch_int_ofport) + + @staticmethod + def _setup_dvr_flows_on_tun_br(tun_br, patch_int_ofport): + tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, + priority=1, + in_port=patch_int_ofport) # table-miss should be sent to learning table - self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN, - dest_table_id=constants.LEARN_FROM_TUN) + tun_br.install_goto(table_id=constants.DVR_NOT_LEARN, + dest_table_id=constants.LEARN_FROM_TUN) - self.tun_br.install_goto(table_id=constants.DVR_PROCESS, - dest_table_id=constants.PATCH_LV_TO_TUN) + tun_br.install_goto(table_id=constants.DVR_PROCESS, + dest_table_id=constants.PATCH_LV_TO_TUN) def setup_dvr_flows_on_phys_br(self, bridge_mappings=None): '''Setup up initial dvr flows into br-phys''' diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2023-11-23 09:44:10.000000000 +0000 @@ -1475,7 +1475,8 @@ Add all flows to the tunnel bridge. ''' self.tun_br.setup_default_table(self.patch_int_ofport, - self.arp_responder_enabled) + self.arp_responder_enabled, + self.enable_distributed_routing) def _reconfigure_physical_bridges(self, bridges): try: diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -270,17 +270,17 @@ resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE) - def _set_hash_ring_nodes_offline(self, *args, **kwargs): + def _remove_node_from_hash_ring(self, *args, **kwargs): + # The node_uuid attribute will be empty for worker types + # that are not added to the Hash Ring and can be skipped + if self.node_uuid is None: + return admin_context = n_context.get_admin_context() - ovn_hash_ring_db.set_nodes_from_host_as_offline( - admin_context, self.hash_ring_group) - LOG.info('Hash Ring nodes from host "%s" marked as offline', - cfg.CONF.host) + ovn_hash_ring_db.remove_node_by_uuid( + admin_context, self.node_uuid) def pre_fork_initialize(self, resource, event, trigger, payload=None): """Pre-initialize the ML2/OVN driver.""" - atexit.register(self._set_hash_ring_nodes_offline) - signal.signal(signal.SIGTERM, self._set_hash_ring_nodes_offline) ovn_utils.create_neutron_pg_drop() @staticmethod @@ -298,6 +298,10 @@ thread for this host. Subsequently workers just need to register themselves to the hash ring. """ + # Attempt to remove the node from the ring when the worker stops + atexit.register(self._remove_node_from_hash_ring) + signal.signal(signal.SIGTERM, self._remove_node_from_hash_ring) + admin_context = n_context.get_admin_context() if not self._hash_ring_probe_event.is_set(): # Clear existing entries @@ -1044,7 +1048,7 @@ # See doc/source/design/ovn_worker.rst for more details. return [worker.MaintenanceWorker()] - def _update_dnat_entry_if_needed(self, port_id): + def _update_dnat_entry_if_needed(self, port_id, up=True): """Update DNAT entry if using distributed floating ips.""" if not self.nb_ovn: self.nb_ovn = self._ovn_client._nb_idl @@ -1066,13 +1070,14 @@ nat['external_mac']})).execute() if ovn_conf.is_ovn_distributed_floating_ip(): - mac = nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY) - if mac and nat['external_mac'] != mac: - LOG.debug("Setting external_mac of port %s to %s", - port_id, mac) - self.nb_ovn.db_set( - 'NAT', nat['_uuid'], ('external_mac', mac)).execute( - check_error=True) + if up: + mac = nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY) + if mac and nat['external_mac'] != mac: + LOG.debug("Setting external_mac of port %s to %s", + port_id, mac) + self.nb_ovn.db_set( + 'NAT', nat['_uuid'], ('external_mac', mac)).execute( + check_error=True) else: if nat['external_mac']: LOG.debug("Clearing up external_mac of port %s", port_id) @@ -1135,7 +1140,7 @@ # to prevent another entity from bypassing the block with its own # port status update. LOG.info("OVN reports status down for port: %s", port_id) - self._update_dnat_entry_if_needed(port_id) + self._update_dnat_entry_if_needed(port_id, False) admin_context = n_context.get_admin_context() try: db_port = ml2_db.get_port(admin_context, port_id) diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py 2023-11-23 09:44:10.000000000 +0000 @@ -161,6 +161,10 @@ return cls._schema_helper @classmethod + def get_schema_version(cls): + return cls.schema_helper.schema_json['version'] + + @classmethod def schema_has_table(cls, table_name): return table_name in cls.schema_helper.schema_json['tables'] diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py 2023-11-23 09:44:10.000000000 +0000 @@ -42,6 +42,7 @@ from neutron.db import segments_db from neutron.objects import router as router_obj from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync +from neutron import service from neutron.services.logapi.drivers.ovn import driver as log_driver @@ -724,7 +725,7 @@ txn.add(cmd) raise periodics.NeverAgain() - # TODO(lucasagomes): Remove this in the Z cycle + # TODO(lucasagomes): Remove this in the B+3 cycle # A static spacing value is used here, but this method will only run # once per lock due to the use of periodics.NeverAgain(). @periodics.periodic(spacing=600, run_immediately=True) @@ -735,21 +736,36 @@ cmds = [] for port in self._nb_idl.lsp_list().execute(check_error=True): port_type = port.type.strip() - if port_type in ("vtep", ovn_const.LSP_TYPE_LOCALPORT, "router"): - continue - options = port.options - if port_type == ovn_const.LSP_TYPE_LOCALNET: - mcast_flood_value = options.get( + mcast_flood_reports_value = options.get( ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS) - if mcast_flood_value == 'false': + + if self._ovn_client.is_mcast_flood_broken: + if port_type in ("vtep", ovn_const.LSP_TYPE_LOCALPORT, + "router"): + continue + + if port_type == ovn_const.LSP_TYPE_LOCALNET: + mcast_flood_value = options.pop( + ovn_const.LSP_OPTIONS_MCAST_FLOOD, None) + if mcast_flood_value: + cmds.append(self._nb_idl.db_remove( + 'Logical_Switch_Port', port.name, 'options', + ovn_const.LSP_OPTIONS_MCAST_FLOOD, + if_exists=True)) + + if mcast_flood_reports_value == 'true': continue - options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'}) - elif ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS in options: - continue - options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'}) - cmds.append(self._nb_idl.lsp_set_options(port.name, **options)) + options.update( + {ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'}) + cmds.append(self._nb_idl.lsp_set_options(port.name, **options)) + + elif (mcast_flood_reports_value and port_type != + ovn_const.LSP_TYPE_LOCALNET): + cmds.append(self._nb_idl.db_remove( + 'Logical_Switch_Port', port.name, 'options', + ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS, if_exists=True)) if cmds: with self._nb_idl.transaction(check_error=True) as txn: @@ -1054,6 +1070,20 @@ for table in ('Chassis_Private', 'Chassis'): txn.add(self._sb_idl.db_destroy(table, ch.name)) + @periodics.periodic(spacing=86400, run_immediately=True) + def cleanup_old_hash_ring_nodes(self): + """Daily task to cleanup old stable Hash Ring node entries. + + Runs once a day and clean up Hash Ring entries that haven't + been updated in more than 5 days. See LP #2033281 for more + information. + + """ + if not self.has_lock: + return + context = n_context.get_admin_context() + hash_ring_db.cleanup_old_nodes(context, days=5) + class HashRingHealthCheckPeriodics(object): @@ -1067,3 +1097,20 @@ # here because we want the maintenance tasks from each instance to # execute this task. hash_ring_db.touch_nodes_from_host(self.ctx, self._group) + + # Check the number of the nodes in the ring and log a message in + # case they are out of sync. See LP #2024205 for more information + # on this issue. + api_workers = service._get_api_workers() + num_nodes = hash_ring_db.count_nodes_from_host(self.ctx, self._group) + + if num_nodes > api_workers: + LOG.critical( + 'The number of nodes in the Hash Ring (%d) is higher than ' + 'the number of API workers (%d) for host "%s". Something is ' + 'not right and OVSDB events could be missed because of this. ' + 'Please check the status of the Neutron processes, this can ' + 'happen when the API workers are killed and restarted. ' + 'Restarting the service should fix the issue, see LP ' + '#2024205 for more information.', + num_nodes, api_workers, cfg.CONF.host) diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py 2023-11-23 09:44:10.000000000 +0000 @@ -39,9 +39,11 @@ from oslo_log import log from oslo_utils import excutils from oslo_utils import timeutils +from oslo_utils import versionutils from ovsdbapp.backend.ovs_idl import idlutils import tenacity +from neutron._i18n import _ from neutron.common.ovn import acl as ovn_acl from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils @@ -49,6 +51,7 @@ from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf from neutron.db import ovn_revision_numbers_db as db_rev from neutron.db import segments_db +from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \ import placement as placement_extension from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \ @@ -92,6 +95,7 @@ self._plugin_property = None self._l3_plugin_property = None + self._is_mcast_flood_broken = None # TODO(ralonsoh): handle the OVN client extensions with an ext. manager self._qos_driver = qos_extension.OVNClientQosExtension(driver=self) @@ -272,6 +276,29 @@ ovn_const.VIF_DETAILS_CARD_SERIAL_NUMBER]).hostname return '' + @tenacity.retry(retry=tenacity.retry_if_exception_type(RuntimeError), + wait=tenacity.wait_random(min=2, max=3), + stop=tenacity.stop_after_attempt(3), + reraise=True) + def _wait_for_port_bindings_host(self, context, port_id): + db_port = ml2_db.get_port(context, port_id) + # This is already checked previously but, just to stay on + # the safe side in case the port is deleted mid-operation + if not db_port: + raise RuntimeError( + _('No port found with ID %s') % port_id) + + if not db_port.port_bindings: + raise RuntimeError( + _('No port bindings information found for ' + 'port %s') % port_id) + + if not db_port.port_bindings[0].host: + raise RuntimeError( + _('No hosting information found for port %s') % port_id) + + return db_port + def update_lsp_host_info(self, context, db_port, up=True): """Update the binding hosting information for the LSP. @@ -287,8 +314,19 @@ if up: if not db_port.port_bindings: return - host = db_port.port_bindings[0].host + if not db_port.port_bindings[0].host: + # NOTE(lucasgomes): There might be a sync issue between + # the moment that this port was fetched from the database + # and the hosting information being set, retry a few times + try: + db_port = self._wait_for_port_bindings_host( + context, db_port.id) + except RuntimeError as e: + LOG.warning(e) + return + + host = db_port.port_bindings[0].host ext_ids = ('external_ids', {ovn_const.OVN_HOST_ID_EXT_ID_KEY: host}) cmd.append( @@ -302,6 +340,20 @@ self._transaction(cmd) + # TODO(lucasagomes): Remove this method and the logic around the broken + # mcast_flood_reports configuration option on any other port that is not + # type "localnet" when the fixed version of OVN becomes the norm. + # The commit in core OVN fixing this issue is the + # https://github.com/ovn-org/ovn/commit/6aeeccdf272bc60630581e46aa42d97f4f56d4fa + @property + def is_mcast_flood_broken(self): + if self._is_mcast_flood_broken is None: + schema_version = self._nb_idl.get_schema_version() + self._is_mcast_flood_broken = ( + versionutils.convert_version_to_tuple(schema_version) < + (6, 3, 0)) + return self._is_mcast_flood_broken + def _get_port_options(self, port): context = n_context.get_admin_context() binding_prof = utils.validate_and_get_data_from_binding_profile(port) @@ -464,12 +516,8 @@ if port_type != ovn_const.LSP_TYPE_VIRTUAL: options[ovn_const.LSP_OPTIONS_REQUESTED_CHASSIS_KEY] = chassis - # TODO(lucasagomes): Enable the mcast_flood_reports by default, - # according to core OVN developers it shouldn't cause any harm - # and will be ignored when mcast_snoop is False. We can revise - # this once https://bugzilla.redhat.com/show_bug.cgi?id=1933990 - # (see comment #3) is fixed in Core OVN. - if port_type not in ('vtep', ovn_const.LSP_TYPE_LOCALPORT, 'router'): + if self.is_mcast_flood_broken and port_type not in ( + 'vtep', ovn_const.LSP_TYPE_LOCALPORT, 'router'): options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'}) device_owner = port.get('device_owner', '') @@ -2682,7 +2730,8 @@ txn.add(self._nb_idl.dns_remove_record( ls_dns_record.uuid, ptr_record, if_exists=True)) - def create_ovn_fair_meter(self, meter_name, from_reload=False, txn=None): + def _create_ovn_fair_meter(self, meter_name, from_reload=False, txn=None, + stateless=False): """Create row in Meter table with fair attribute set to True. Create a row in OVN's NB Meter table based on well-known name. This @@ -2697,11 +2746,26 @@ """ meter = self._nb_idl.db_find_rows( "Meter", ("name", "=", meter_name)).execute(check_error=True) - # The meter is created when a log object is created, not by default. - # This condition avoids creating the meter if it wasn't there already + # The meters are created when a log object is created, not by default. + # This condition avoids creating the meter if it wasn't there already. commands = [] if from_reload and not meter: return + + burst_limit = cfg.CONF.network_log.burst_limit + rate_limit = cfg.CONF.network_log.rate_limit + if stateless: + meter_name = meter_name + "_stateless" + burst_limit = int(burst_limit / 2) + rate_limit = int(rate_limit / 2) + # The stateless meter is only created once the stateful meter was + # successfully created. + # The treatment of limits is not equal for stateful and stateless + # traffic at a kernel level according to: + # https://bugzilla.redhat.com/show_bug.cgi?id=2212952 + # The stateless meter is created to adjust this issue. + meter = self._nb_idl.db_find_rows( + "Meter", ("name", "=", meter_name)).execute(check_error=True) if meter: meter = meter[0] meter_band = self._nb_idl.lookup("Meter_Band", @@ -2709,9 +2773,8 @@ if meter_band: if all((meter.unit == "pktps", meter.fair[0], - meter_band.rate == cfg.CONF.network_log.rate_limit, - meter_band.burst_size == - cfg.CONF.network_log.burst_limit)): + meter_band.rate == rate_limit, + meter_band.burst_size == burst_limit)): # Meter (and its meter-band) unchanged: noop. return # Re-create meter (and its meter-band) with the new attributes. @@ -2725,10 +2788,15 @@ commands.append(self._nb_idl.meter_add( name=meter_name, unit="pktps", - rate=cfg.CONF.network_log.rate_limit, + rate=rate_limit, fair=True, - burst_size=cfg.CONF.network_log.burst_limit, + burst_size=burst_limit, may_exist=False, external_ids={ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY: log_const.LOGGING_PLUGIN})) self._transaction(commands, txn=txn) + + def create_ovn_fair_meter(self, meter_name, from_reload=False, txn=None): + self._create_ovn_fair_meter(meter_name, from_reload, txn) + self._create_ovn_fair_meter(meter_name, from_reload, txn, + stateless=True) diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py 2023-11-23 09:44:10.000000000 +0000 @@ -446,6 +446,36 @@ self.l3_plugin.port_forwarding.db_sync_delete( context, fip_id, txn) + def _is_router_port_changed(self, db_router_port, lrport_nets): + """Check if the router port needs to be updated. + + This method checks for networks and ipv6_ra_configs (if supported) + changes on a given router port. + """ + db_lrport_nets = db_router_port['networks'] + if db_lrport_nets != lrport_nets: + return True + + # Check for ipv6_ra_configs changes + db_lrport_ra = db_router_port['ipv6_ra_configs'] + lrport_ra = {} + ipv6_ra_supported = self.ovn_api.is_col_present( + 'Logical_Router_Port', 'ipv6_ra_configs') + if ipv6_ra_supported: + lrp_name = utils.ovn_lrouter_port_name(db_router_port['id']) + try: + ovn_lrport = self.ovn_api.lrp_get( + lrp_name).execute(check_error=True) + except idlutils.RowNotFound: + # If the port is not found in the OVN database the + # ovn-db-sync script will recreate this port later + # and it will have the latest information. No need + # to update it. + return False + lrport_ra = ovn_lrport.ipv6_ra_configs + + return db_lrport_ra != lrport_ra + def sync_routers_and_rports(self, ctx): """Sync Routers between neutron and NB. @@ -525,6 +555,12 @@ constants.DEVICE_OWNER_HA_REPLICATED_INT]) for interface in interfaces: db_router_ports[interface['id']] = interface + networks, ipv6_ra_configs = ( + self._ovn_client._get_nets_and_ipv6_ra_confs_for_router_port( + ctx, interface)) + db_router_ports[interface['id']]['networks'] = networks + db_router_ports[interface['id']][ + 'ipv6_ra_configs'] = ipv6_ra_configs lrouters = self.ovn_api.get_all_logical_routers_with_rports() @@ -541,11 +577,9 @@ if lrouter['name'] in db_routers: for lrport, lrport_nets in lrouter['ports'].items(): if lrport in db_router_ports: - # We dont have to check for the networks and - # ipv6_ra_configs values. Lets add it to the - # update_lrport_list. If they are in sync, then - # update_router_port will be a no-op. - update_lrport_list.append(db_router_ports[lrport]) + if self._is_router_port_changed( + db_router_ports[lrport], lrport_nets): + update_lrport_list.append(db_router_ports[lrport]) del db_router_ports[lrport] else: del_lrouter_ports_list.append( diff -Nru neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py --- neutron-20.4.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py 2023-11-23 09:44:10.000000000 +0000 @@ -275,6 +275,15 @@ {'port': row.logical_port, 'binding': row.uuid}) return False + req_chassis = utils.get_requested_chassis( + row.options.get(ovn_const.LSP_OPTIONS_REQUESTED_CHASSIS_KEY, '')) + if len(req_chassis) > 1: + # This event has been issued during a LSP migration. During this + # process, the LSP will change the port binding but the port status + # will be handled by the ``LogicalSwitchPortUpdateDownEvent`` and + # ``LogicalSwitchPortUpdateUpEvent`` events. + return False + return bool(lsp.up) def run(self, event, row, old=None): @@ -415,11 +424,13 @@ self.l3_plugin = directory.get_plugin(constants.L3) table = 'Port_Binding' events = (self.ROW_UPDATE,) - super(PortBindingChassisEvent, self).__init__( - events, table, (('type', '=', ovn_const.OVN_CHASSIS_REDIRECT),)) + super().__init__(events, table, None) self.event_name = 'PortBindingChassisEvent' def match_fn(self, event, row, old): + if row.type != ovn_const.OVN_CHASSIS_REDIRECT: + return False + if len(old._data) == 1 and 'external_ids' in old._data: # NOTE: since [1], the NB logical_router_port.external_ids are # copied into the SB port_binding.external_ids. If only the @@ -659,9 +670,8 @@ helper.register_table('Encap') helper.register_table('Port_Binding') helper.register_table('Datapath_Binding') - # Used by MaintenanceWorker which can use ovsdb locking try: - return cls(connection_string, helper, leader_only=True) + return cls(connection_string, helper, leader_only=False) except TypeError: # TODO(twilson) We can remove this when we require ovs>=2.12.0 return cls(connection_string, helper) diff -Nru neutron-20.4.0/neutron/plugins/ml2/plugin.py neutron-20.5.0/neutron/plugins/ml2/plugin.py --- neutron-20.4.0/neutron/plugins/ml2/plugin.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/plugins/ml2/plugin.py 2023-11-23 09:44:10.000000000 +0000 @@ -1619,10 +1619,9 @@ for port in port_list: self._before_create_port(context, port) - port_list, net_cache = self.allocate_macs_and_ips_for_ports( - context, port_list) - try: + port_list, net_cache = self.allocate_macs_and_ips_for_ports( + context, port_list) return self._create_port_bulk(context, port_list, net_cache) except Exception: with excutils.save_and_reraise_exception(): @@ -1630,7 +1629,7 @@ # deallocated now for port in port_list: self.ipam.deallocate_ips_from_port( - context, port, port['ipams']) + context, port, port.get('ipams')) @db_api.retry_if_session_inactive() def _create_port_bulk(self, context, port_list, network_cache): diff -Nru neutron-20.4.0/neutron/services/logapi/drivers/ovn/driver.py neutron-20.5.0/neutron/services/logapi/drivers/ovn/driver.py --- neutron-20.4.0/neutron/services/logapi/drivers/ovn/driver.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/services/logapi/drivers/ovn/driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -28,6 +28,7 @@ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils from neutron.conf.services import logging as log_cfg +from neutron.objects import securitygroup as sg_obj from neutron.services.logapi.common import db_api from neutron.services.logapi.common import sg_callback from neutron.services.logapi.drivers import base @@ -152,9 +153,21 @@ msg += " for network log {}".format(log_name) LOG.info(msg, acl_changes, acl_absents, acl_visits) - def _set_acls_log(self, pgs, ovn_txn, actions_enabled, log_name): + def _set_acls_log(self, pgs, context, ovn_txn, actions_enabled, log_name): acl_changes, acl_visits = 0, 0 for pg in pgs: + meter_name = self.meter_name + if pg["name"] != ovn_const.OVN_DROP_PORT_GROUP_NAME: + sg = sg_obj.SecurityGroup.get_sg_by_id( + context, + pg["external_ids"][ovn_const.OVN_SG_EXT_ID_KEY]) + if not sg: + LOG.warning("Port Group %s is missing a corresponding " + "security group, skipping its network log " + "setting...", pg["name"]) + continue + if not sg.stateful: + meter_name = meter_name + ("_stateless") for acl_uuid in pg["acls"]: acl_visits += 1 acl = self.ovn_nb.lookup("ACL", acl_uuid) @@ -163,7 +176,7 @@ continue columns = { 'log': acl.action in actions_enabled, - 'meter': self.meter_name, + 'meter': meter_name, 'name': log_name, 'severity': "info" } @@ -183,12 +196,13 @@ for log_obj in log_objs: pgs = self._pgs_from_log_obj(context, log_obj) actions_enabled = self._acl_actions_enabled(log_obj) - self._set_acls_log(pgs, ovn_txn, actions_enabled, + self._set_acls_log(pgs, context, ovn_txn, actions_enabled, utils.ovn_name(log_obj.id)) def _pgs_all(self): return self.ovn_nb.db_list( - "Port_Group", columns=["name", "acls"]).execute(check_error=True) + "Port_Group", + columns=["name", "external_ids", "acls"]).execute(check_error=True) def _pgs_from_log_obj(self, context, log_obj): """Map Neutron log_obj into affected port groups in OVN. @@ -207,11 +221,13 @@ # No sg, no port, DROP: return DROP pg if log_obj.event == log_const.DROP_EVENT: return [{"name": pg_drop.name, - "acls": [r.uuid for r in pg_drop.acls]}] + "external_ids": pg_drop.external_ids, + "acls": [r.uuid for r in pg_drop.acls]}] # No sg, no port, ACCEPT: return all except DROP pg pgs = self._pgs_all() pgs.remove({"name": pg_drop.name, - "acls": [r.uuid for r in pg_drop.acls]}) + "external_ids": pg_drop.external_ids, + "acls": [r.uuid for r in pg_drop.acls]}) return pgs except idlutils.RowNotFound: pass @@ -223,6 +239,7 @@ pg = self.ovn_nb.lookup("Port_Group", ovn_const.OVN_DROP_PORT_GROUP_NAME) pgs.append({"name": pg.name, + "external_ids": pg.external_ids, "acls": [r.uuid for r in pg.acls]}) except idlutils.RowNotFound: pass @@ -235,6 +252,7 @@ utils.ovn_port_group_name( log_obj.resource_id)) pgs.append({"name": pg.name, + "external_ids": pg.external_ids, "acls": [r.uuid for r in pg.acls]}) except idlutils.RowNotFound: pass @@ -248,6 +266,7 @@ pg = self.ovn_nb.lookup("Port_Group", utils.ovn_port_group_name(sg_id)) pgs.append({"name": pg.name, + "external_ids": pg.external_ids, "acls": [r.uuid for r in pg.acls]}) except idlutils.RowNotFound: pass @@ -266,7 +285,7 @@ with self.ovn_nb.transaction(check_error=True) as ovn_txn: self._ovn_client.create_ovn_fair_meter(self.meter_name, txn=ovn_txn) - self._set_acls_log(pgs, ovn_txn, actions_enabled, + self._set_acls_log(pgs, context, ovn_txn, actions_enabled, utils.ovn_name(log_obj.id)) def create_log_precommit(self, context, log_obj): @@ -334,7 +353,7 @@ if not self._unset_disabled_acls(context, log_obj, ovn_txn): pgs = self._pgs_from_log_obj(context, log_obj) actions_enabled = self._acl_actions_enabled(log_obj) - self._set_acls_log(pgs, ovn_txn, actions_enabled, + self._set_acls_log(pgs, context, ovn_txn, actions_enabled, utils.ovn_name(log_obj.id)) def delete_log(self, context, log_obj): @@ -356,6 +375,8 @@ self._remove_acls_log(pgs, ovn_txn) ovn_txn.add(self.ovn_nb.meter_del(self.meter_name, if_exists=True)) + ovn_txn.add(self.ovn_nb.meter_del( + self.meter_name + "_stateless", if_exists=True)) LOG.info("All ACL logs cleared after deletion of log_obj %s", log_obj.id) return diff -Nru neutron-20.4.0/neutron/services/trunk/drivers/ovn/trunk_driver.py neutron-20.5.0/neutron/services/trunk/drivers/ovn/trunk_driver.py --- neutron-20.4.0/neutron/services/trunk/drivers/ovn/trunk_driver.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/services/trunk/drivers/ovn/trunk_driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -22,10 +22,12 @@ from oslo_log import log from neutron.common.ovn import constants as ovn_const +from neutron.common import utils as n_utils from neutron.db import db_base_plugin_common from neutron.db import ovn_revision_numbers_db as db_rev from neutron.objects import ports as port_obj from neutron.services.trunk.drivers import base as trunk_base +from neutron.services.trunk import exceptions as trunk_exc SUPPORTED_INTERFACES = ( @@ -49,13 +51,11 @@ context = n_context.get_admin_context() db_parent_port = port_obj.Port.get_object(context, id=parent_port) parent_port_status = db_parent_port.status - parent_port_bindings = db_parent_port.bindings[0] for subport in subports: with db_api.CONTEXT_WRITER.using(context), ( txn(check_error=True)) as ovn_txn: port = self._set_binding_profile(context, subport, parent_port, - parent_port_status, - parent_port_bindings, ovn_txn) + parent_port_status, ovn_txn) db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS) def _unset_sub_ports(self, subports): @@ -69,8 +69,7 @@ @db_base_plugin_common.convert_result_to_dict def _set_binding_profile(self, context, subport, parent_port, - parent_port_status, - parent_port_bindings, ovn_txn): + parent_port_status, ovn_txn): LOG.debug("Setting parent %s for subport %s", parent_port, subport.port_id) db_port = port_obj.Port.get_object(context, id=subport.port_id) @@ -82,9 +81,6 @@ check_rev_cmd = self.plugin_driver.nb_ovn.check_revision_number( db_port.id, db_port, ovn_const.TYPE_PORTS) ovn_txn.add(check_rev_cmd) - parent_binding_host = '' - if parent_port_bindings.host: - parent_binding_host = parent_port_bindings.host try: # NOTE(flaviof): We expect binding's host to be set. Otherwise, # sub-port will not transition from DOWN to ACTIVE. @@ -100,7 +96,6 @@ port_obj.PortBinding.update_object( context, {'profile': binding.profile, - 'host': parent_binding_host, 'vif_type': portbindings.VIF_TYPE_OVS}, port_id=subport.port_id, host=binding.host) @@ -162,13 +157,9 @@ LOG.debug("Done unsetting parent for subport %s", subport.port_id) return db_port - def trunk_updated(self, trunk): - # Check if parent port is handled by OVN. - if not self.plugin_driver.nb_ovn.lookup('Logical_Switch_Port', - trunk.port_id, default=None): - return - if trunk.sub_ports: - self._set_sub_ports(trunk.port_id, trunk.sub_ports) + @staticmethod + def _is_port_bound(port): + return n_utils.is_port_bound(port, log_message=False) def trunk_created(self, trunk): # Check if parent port is handled by OVN. @@ -204,10 +195,18 @@ def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.states[0]) - elif event == events.AFTER_UPDATE: - self.trunk_updated(payload.states[0]) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.states[0]) + elif event == events.PRECOMMIT_CREATE: + trunk = payload.desired_state + parent_port = trunk.db_obj.port + if self._is_port_bound(parent_port): + raise trunk_exc.ParentPortInUse(port_id=parent_port.id) + elif event == events.PRECOMMIT_DELETE: + trunk = payload.states[0] + parent_port = payload.states[1] + if self._is_port_bound(parent_port): + raise trunk_exc.TrunkInUse(trunk_id=trunk.id) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: @@ -232,8 +231,8 @@ super(OVNTrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = OVNTrunkHandler(self.plugin_driver) - for _event in (events.AFTER_CREATE, events.AFTER_UPDATE, - events.AFTER_DELETE): + for _event in (events.AFTER_CREATE, events.AFTER_DELETE, + events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, _event) diff -Nru neutron-20.4.0/neutron/services/trunk/plugin.py neutron-20.5.0/neutron/services/trunk/plugin.py --- neutron-20.4.0/neutron/services/trunk/plugin.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/services/trunk/plugin.py 2023-11-23 09:44:10.000000000 +0000 @@ -294,6 +294,7 @@ trunk = self._get_trunk(context, trunk_id) rules.trunk_can_be_managed(context, trunk) trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) + parent_port = trunk.db_obj.port if trunk_port_validator.can_be_trunked_or_untrunked(context): # NOTE(status_police): when a trunk is deleted, the logical # object disappears from the datastore, therefore there is no @@ -307,7 +308,7 @@ 'deleting trunk port %s: %s', trunk_id, str(e)) payload = events.DBEventPayload(context, resource_id=trunk_id, - states=(trunk,)) + states=(trunk, parent_port)) registry.publish(resources.TRUNK, events.PRECOMMIT_DELETE, self, payload=payload) else: @@ -317,7 +318,7 @@ registry.publish(resources.TRUNK, events.AFTER_DELETE, self, payload=events.DBEventPayload( context, resource_id=trunk_id, - states=(trunk,))) + states=(trunk, parent_port))) @db_base_plugin_common.convert_result_to_dict def add_subports(self, context, trunk_id, subports): diff -Nru neutron-20.4.0/neutron/tests/functional/agent/l3/test_keepalived_state_change.py neutron-20.5.0/neutron/tests/functional/agent/l3/test_keepalived_state_change.py --- neutron-20.4.0/neutron/tests/functional/agent/l3/test_keepalived_state_change.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/agent/l3/test_keepalived_state_change.py 2023-11-23 09:44:10.000000000 +0000 @@ -96,7 +96,7 @@ except FileNotFoundError: return False try: - utils.wait_until_true(text_in_file, timeout=15) + utils.wait_until_true(text_in_file, timeout=18) except utils.WaitTimeout: devices = {} for dev in ip_lib.IPWrapper( diff -Nru neutron-20.4.0/neutron/tests/functional/agent/test_ovs_flows.py neutron-20.5.0/neutron/tests/functional/agent/test_ovs_flows.py --- neutron-20.4.0/neutron/tests/functional/agent/test_ovs_flows.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/agent/test_ovs_flows.py 2023-11-23 09:44:10.000000000 +0000 @@ -25,6 +25,8 @@ from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ + import ovs_dvr_neutron_agent as ovsdvragt +from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_neutron_agent as ovsagt from neutron.tests.common import base as common_base from neutron.tests.common import helpers @@ -299,8 +301,9 @@ """ def setUp(self): + dvr_enabled = True cfg.CONF.set_override('enable_distributed_routing', - True, + dvr_enabled, group='AGENT') super(OVSFlowTestCase, self).setUp() self.phys_br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge @@ -322,7 +325,9 @@ prefix=cfg.CONF.OVS.tun_peer_patch_port), common_utils.get_rand_device_name( prefix=cfg.CONF.OVS.int_peer_patch_port)) - self.br_tun.setup_default_table(self.tun_p, True) + self.br_tun.setup_default_table(self.tun_p, True, dvr_enabled) + ovsdvragt.OVSDVRNeutronAgent._setup_dvr_flows_on_tun_br(self.br_tun, + self.tun_p) def test_provision_local_vlan(self): kwargs = {'port': 123, 'lvid': 888, 'segmentation_id': 777} diff -Nru neutron-20.4.0/neutron/tests/functional/base.py neutron-20.5.0/neutron/tests/functional/base.py --- neutron-20.4.0/neutron/tests/functional/base.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/base.py 2023-11-23 09:44:10.000000000 +0000 @@ -363,7 +363,7 @@ # NOTE(ralonsoh): do not access to the DB at exit when the SQL # connection is already closed, to avoid useless exception messages. mock.patch.object( - self.mech_driver, '_set_hash_ring_nodes_offline').start() + self.mech_driver, '_remove_node_from_hash_ring').start() self.mech_driver.pre_fork_initialize( mock.ANY, mock.ANY, trigger_cls.trigger) diff -Nru neutron-20.4.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py neutron-20.5.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py --- neutron-20.4.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 2023-11-23 09:44:10.000000000 +0000 @@ -1038,7 +1038,9 @@ m_publish.reset_mock() self.pf_plugin.delete_floatingip_port_forwarding( self.context, pf_obj['id'], fip_id) - m_publish.assert_called_once() + call = mock.call('port_forwarding', 'after_delete', self.pf_plugin, + payload=mock.ANY) + m_publish.assert_has_calls([call]) # Assert load balancer for port forwarding is stale _verify_lb(self, 'udp', 5353, 53) @@ -1123,10 +1125,9 @@ # Check a meter and fair meter exist self.assertTrue(self.nb_api._tables['Meter'].rows) self.assertTrue(self.nb_api._tables['Meter_Band'].rows) - self.assertEqual(cfg.CONF.network_log.burst_limit, - [*self.nb_api._tables['Meter_Band'].rows.values()][0].burst_size) - self.assertEqual(cfg.CONF.network_log.rate_limit, - [*self.nb_api._tables['Meter_Band'].rows.values()][0].rate) + self.assertEqual(len([*self.nb_api._tables['Meter'].rows.values()]), + len([*self.nb_api._tables['Meter_Band'].rows.values()])) + self._check_meters_consistency() # Update burst and rate limit values on the configuration ovn_config.cfg.CONF.set_override('burst_limit', CFG_NEW_BURST, group='network_log') @@ -1136,7 +1137,16 @@ self.assertRaises(periodics.NeverAgain, self.maint.check_fair_meter_consistency) # Check meter band was effectively changed after the maintenance call - self.assertEqual(CFG_NEW_BURST, - [*self.nb_api._tables['Meter_Band'].rows.values()][0].burst_size) - self.assertEqual(CFG_NEW_RATE, - [*self.nb_api._tables['Meter_Band'].rows.values()][0].rate) + self._check_meters_consistency(CFG_NEW_BURST, CFG_NEW_RATE) + + def _check_meters_consistency(self, new_burst=None, new_rate=None): + burst, rate = (new_burst, new_rate) if new_burst else ( + cfg.CONF.network_log.burst_limit, cfg.CONF.network_log.rate_limit) + for meter in [*self.nb_api._tables['Meter'].rows.values()]: + meter_band = self.nb_api.lookup('Meter_Band', meter.bands[0].uuid) + if "_stateless" in meter.name: + self.assertEqual(int(burst / 2), meter_band.burst_size) + self.assertEqual(int(rate / 2), meter_band.rate) + else: + self.assertEqual(burst, meter_band.burst_size) + self.assertEqual(rate, meter_band.rate) diff -Nru neutron-20.4.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py neutron-20.5.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py --- neutron-20.4.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 2023-11-23 09:44:10.000000000 +0000 @@ -577,3 +577,45 @@ interval = ovn_conf.get_ovn_ovsdb_probe_interval() for idl in idls: self.assertEqual(interval, idl._session.reconnect.probe_interval) + + +class TestPortBindingChassisEvent(base.TestOVNFunctionalBase, + test_l3.L3NatTestCaseMixin): + + def setUp(self, **kwargs): + super().setUp(**kwargs) + self.chassis = self.add_fake_chassis('ovs-host1') + self.l3_plugin = directory.get_plugin(plugin_constants.L3) + kwargs = {'arg_list': (external_net.EXTERNAL,), + external_net.EXTERNAL: True} + self.net = self._make_network( + self.fmt, 'ext_net', True, as_admin=True, **kwargs) + self._make_subnet(self.fmt, self.net, '20.0.10.1', '20.0.10.0/24') + port_res = self._create_port(self.fmt, self.net['network']['id']) + self.port = self.deserialize(self.fmt, port_res)['port'] + + self.ext_api = test_extensions.setup_extensions_middleware( + test_l3.L3TestExtensionManager()) + self.pb_event_match = mock.patch.object( + self.sb_api.idl._portbinding_event, 'match_fn').start() + + def _check_pb_type(self, _type): + def check_pb_type(_type): + if len(self.pb_event_match.call_args_list) < 1: + return False + + pb_row = self.pb_event_match.call_args_list[0].args[1] + return _type == pb_row.type + + n_utils.wait_until_true(lambda: check_pb_type(_type), timeout=5) + + def test_pb_type_patch(self): + router = self._make_router(self.fmt, self._tenant_id) + self._add_external_gateway_to_router(router['router']['id'], + self.net['network']['id']) + self._check_pb_type('patch') + + def test_pb_type_empty(self): + self.sb_api.lsp_bind(self.port['id'], self.chassis, + may_exist=True).execute(check_error=True) + self._check_pb_type('') diff -Nru neutron-20.4.0/neutron/tests/functional/privileged/agent/linux/test_tc_lib.py neutron-20.5.0/neutron/tests/functional/privileged/agent/linux/test_tc_lib.py --- neutron-20.4.0/neutron/tests/functional/privileged/agent/linux/test_tc_lib.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/privileged/agent/linux/test_tc_lib.py 2023-11-23 09:44:10.000000000 +0000 @@ -33,6 +33,7 @@ self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface(self.device, self.namespace, 'dummy') + priv_ip_lib.set_link_attribute(self.device, self.namespace, state='up') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) @@ -52,7 +53,8 @@ namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(0, len(qdiscs)) + self.assertEqual(1, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) def test_add_tc_qdisc_htb_no_handle(self): priv_tc_lib.add_tc_qdisc( @@ -69,7 +71,8 @@ namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(0, len(qdiscs)) + self.assertEqual(1, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) def test_add_tc_qdisc_tbf(self): burst = 192000 @@ -95,23 +98,26 @@ namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(0, len(qdiscs)) + self.assertEqual(1, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) def test_add_tc_qdisc_ingress(self): priv_tc_lib.add_tc_qdisc(self.device, kind='ingress', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(1, len(qdiscs)) - self.assertEqual('ingress', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) - self.assertEqual(rtnl.TC_H_INGRESS, qdiscs[0]['parent']) - self.assertEqual(0xffff0000, qdiscs[0]['handle']) + self.assertEqual(2, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) + self.assertEqual('ingress', tc_lib._get_attr(qdiscs[1], 'TCA_KIND')) + self.assertEqual(rtnl.TC_H_INGRESS, qdiscs[1]['parent']) + self.assertEqual(0xffff0000, qdiscs[1]['handle']) priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(0, len(qdiscs)) + self.assertEqual(1, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) def test_delete_tc_qdisc_no_device(self): self.assertRaises( @@ -138,14 +144,16 @@ namespace=self.namespace) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(1, len(qdiscs)) - self.assertEqual('ingress', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) + self.assertEqual(2, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) + self.assertEqual('ingress', tc_lib._get_attr(qdiscs[1], 'TCA_KIND')) self.assertIsNone( priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', namespace=self.namespace)) qdiscs = priv_tc_lib.list_tc_qdiscs(self.device, namespace=self.namespace) - self.assertEqual(0, len(qdiscs)) + self.assertEqual(1, len(qdiscs)) + self.assertEqual('noqueue', tc_lib._get_attr(qdiscs[0], 'TCA_KIND')) self.assertEqual( errno.EINVAL, priv_tc_lib.delete_tc_qdisc(self.device, kind='ingress', @@ -167,6 +175,7 @@ self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface('int_dummy', self.namespace, 'dummy') + priv_ip_lib.set_link_attribute(self.device, self.namespace, state='up') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) @@ -248,6 +257,7 @@ self.addCleanup(self._remove_ns, self.namespace) self.device = 'int_dummy' priv_ip_lib.create_interface('int_dummy', self.namespace, 'dummy') + priv_ip_lib.set_link_attribute(self.device, self.namespace, state='up') def _remove_ns(self, namespace): priv_ip_lib.remove_netns(namespace) @@ -274,9 +284,8 @@ self.assertEqual(value, filter_keys[index]) def test_add_tc_filter_policy(self): - priv_tc_lib.add_tc_qdisc( - self.device, parent=rtnl.TC_H_ROOT, kind='ingress', - namespace=self.namespace) + priv_tc_lib.add_tc_qdisc(self.device, kind='ingress', + namespace=self.namespace) # NOTE(ralonsoh): # - rate: 320000 bytes/sec (pyroute2 units) = 2560 kbits/sec (OS units) diff -Nru neutron-20.4.0/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py neutron-20.5.0/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py --- neutron-20.4.0/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/services/logapi/drivers/ovn/test_driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -30,6 +30,12 @@ self._check_is_supported() self.ctxt = context.Context('admin', 'fake_tenant') + # Since these tests use the _create_network() from the unit test suite + # but _create_security_group() is from the functional tests, two + # different tenant_ids will be used unless we specify the following + # line in the code: + self._tenant_id = self.ctxt.project_id + def _check_is_supported(self): if not self.log_driver.network_logging_supported(self.nb_api): self.skipTest("The current OVN version does not offer support " diff -Nru neutron-20.4.0/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py neutron-20.5.0/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py --- neutron-20.4.0/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/functional/services/trunk/drivers/ovn/test_trunk_driver.py 2023-11-23 09:44:09.000000000 +0000 @@ -15,8 +15,9 @@ import contextlib from neutron_lib.api.definitions import portbindings +from neutron_lib.callbacks import exceptions as n_exc from neutron_lib import constants as n_consts -from neutron_lib.db import api as db_api +from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import utils from neutron_lib.services.trunk import constants as trunk_consts from oslo_utils import uuidutils @@ -29,8 +30,8 @@ class TestOVNTrunkDriver(base.TestOVNFunctionalBase): - def setUp(self, **kwargs): - super().setUp(**kwargs) + def setUp(self): + super(TestOVNTrunkDriver, self).setUp() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type( trunk_consts.SEGMENTATION_TYPE_VLAN, @@ -41,8 +42,7 @@ sub_ports = sub_ports or [] with self.network() as network: with self.subnet(network=network) as subnet: - with self.port(subnet=subnet, - device_owner='compute:nova') as parent_port: + with self.port(subnet=subnet) as parent_port: tenant_id = uuidutils.generate_uuid() trunk = {'trunk': { 'port_id': parent_port['port']['id'], @@ -67,14 +67,17 @@ if row.parent_name and row.tag: device_owner = row.external_ids[ ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY] + revision_number = row.external_ids[ + ovn_const.OVN_REV_NUM_EXT_ID_KEY] ovn_trunk_info.append({'port_id': row.name, 'parent_port_id': row.parent_name, 'tag': row.tag, 'device_owner': device_owner, + 'revision_number': revision_number, }) return ovn_trunk_info - def _verify_trunk_info(self, trunk, has_items, host=''): + def _verify_trunk_info(self, trunk, has_items): ovn_subports_info = self._get_ovn_trunk_info() neutron_subports_info = [] for subport in trunk.get('sub_ports', []): @@ -83,12 +86,12 @@ 'parent_port_id': [trunk['port_id']], 'tag': [subport['segmentation_id']], 'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER, + 'revision_number': '2', }) - # Check the subport binding. - pb = port_obj.PortBinding.get_object( - self.context, port_id=subport['port_id'], host=host) - self.assertEqual(n_consts.PORT_STATUS_ACTIVE, pb.status) - self.assertEqual(host, pb.host) + # Check that the subport has the binding is active. + binding = obj_reg.load_class('PortBinding').get_object( + self.context, port_id=subport['port_id'], host='') + self.assertEqual(n_consts.PORT_STATUS_ACTIVE, binding['status']) self.assertCountEqual(ovn_subports_info, neutron_subports_info) self.assertEqual(has_items, len(neutron_subports_info) != 0) @@ -96,14 +99,6 @@ if trunk.get('status'): self.assertEqual(trunk_consts.TRUNK_ACTIVE_STATUS, trunk['status']) - def _bind_port(self, port_id, host): - with db_api.CONTEXT_WRITER.using(self.context): - pb = port_obj.PortBinding.get_object(self.context, - port_id=port_id, host='') - pb.delete() - port_obj.PortBinding(self.context, port_id=port_id, host=host, - vif_type=portbindings.VIF_TYPE_OVS).create() - def test_trunk_create(self): with self.trunk() as trunk: self._verify_trunk_info(trunk, has_items=False) @@ -113,6 +108,25 @@ with self.trunk([subport]) as trunk: self._verify_trunk_info(trunk, has_items=True) + def test_trunk_create_parent_port_bound(self): + with self.network() as network: + with self.subnet(network=network) as subnet: + with self.port(subnet=subnet) as parent_port: + pb = port_obj.PortBinding.get_objects( + self.context, port_id=parent_port['port']['id']) + port_obj.PortBinding.update_object( + self.context, {'vif_type': portbindings.VIF_TYPE_OVS}, + port_id=pb[0].port_id, host=pb[0].host) + tenant_id = uuidutils.generate_uuid() + trunk = {'trunk': { + 'port_id': parent_port['port']['id'], + 'tenant_id': tenant_id, 'project_id': tenant_id, + 'admin_state_up': True, + 'name': 'trunk', 'sub_ports': []}} + self.assertRaises(n_exc.CallbackFailure, + self.trunk_plugin.create_trunk, + self.context, trunk) + def test_subport_add(self): with self.subport() as subport: with self.trunk() as trunk: @@ -121,22 +135,10 @@ new_trunk = self.trunk_plugin.get_trunk(self.context, trunk['id']) self._verify_trunk_info(new_trunk, has_items=True) - # Bind parent port. That will trigger the binding of the - # trunk subports too, using the same host ID. - self._bind_port(trunk['port_id'], 'host1') - self.mech_driver.set_port_status_up(trunk['port_id']) - self._verify_trunk_info(new_trunk, has_items=True, - host='host1') def test_subport_delete(self): with self.subport() as subport: with self.trunk([subport]) as trunk: - # Bind parent port. - self._bind_port(trunk['port_id'], 'host1') - self.mech_driver.set_port_status_up(trunk['port_id']) - self._verify_trunk_info(trunk, has_items=True, - host='host1') - self.trunk_plugin.remove_subports(self.context, trunk['id'], {'sub_ports': [subport]}) new_trunk = self.trunk_plugin.get_trunk(self.context, @@ -147,3 +149,14 @@ with self.trunk() as trunk: self.trunk_plugin.delete_trunk(self.context, trunk['id']) self._verify_trunk_info({}, has_items=False) + + def test_trunk_delete_parent_port_bound(self): + with self.trunk() as trunk: + bp = port_obj.PortBinding.get_objects( + self.context, port_id=trunk['port_id']) + port_obj.PortBinding.update_object( + self.context, {'vif_type': portbindings.VIF_TYPE_OVS}, + port_id=bp[0].port_id, host=bp[0].host) + self.assertRaises(n_exc.CallbackFailure, + self.trunk_plugin.delete_trunk, + self.context, trunk['id']) diff -Nru neutron-20.4.0/neutron/tests/unit/agent/dhcp/test_agent.py neutron-20.5.0/neutron/tests/unit/agent/dhcp/test_agent.py --- neutron-20.4.0/neutron/tests/unit/agent/dhcp/test_agent.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/agent/dhcp/test_agent.py 2023-11-23 09:44:10.000000000 +0000 @@ -152,10 +152,27 @@ fake_ipv6_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', device_owner='', + device_id='', mac_address='aa:bb:cc:dd:ee:99', network_id=FAKE_NETWORK_UUID, fixed_ips=[fake_fixed_ipv6]) +fake_ovn_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', + device_owner='', + device_id='', + mac_address='aa:bb:cc:dd:ee:98', + network_id=FAKE_NETWORK_UUID, + fixed_ips=[fake_fixed_ip2]) + +fake_ovn_metadata_port = dhcp.DictModel(id='12345678-1234-aaaa-123456789000', + device_owner=const. + DEVICE_OWNER_DISTRIBUTED, + device_id='ovnmeta-{}'.format( + FAKE_NETWORK_UUID), + mac_address='aa:bb:cc:dd:ee:99', + network_id=FAKE_NETWORK_UUID, + fixed_ips=[fake_fixed_ip1]) + fake_meta_port = dhcp.DictModel(id='12345678-1234-aaaa-1234567890ab', mac_address='aa:bb:cc:dd:ee:ff', network_id=FAKE_NETWORK_UUID, @@ -191,6 +208,12 @@ subnets=[fake_ipv6_subnet], ports=[fake_ipv6_port]) +fake_ovn_network = dhcp.NetModel(id=FAKE_NETWORK_UUID, + project_id=FAKE_PROJECT_ID, + admin_state_up=True, + subnets=[fake_ipv6_subnet], + ports=[fake_ovn_metadata_port, fake_ovn_port]) + fake_network_ipv6_ipv4 = dhcp.NetModel( id=FAKE_NETWORK_UUID, project_id=FAKE_PROJECT_ID, @@ -850,7 +873,7 @@ default_cmd_callback=mock.ANY) def _enable_dhcp_helper(self, network, enable_isolated_metadata=False, - is_isolated_network=False): + is_isolated_network=False, is_ovn_network=False): self.dhcp._process_monitor = mock.Mock() if enable_isolated_metadata: cfg.CONF.set_override('enable_isolated_metadata', True) @@ -860,7 +883,8 @@ mock.call.get_network_info(network.id)]) self.call_driver.assert_called_once_with('enable', network) self.cache.assert_has_calls([mock.call.put(network)]) - if is_isolated_network and enable_isolated_metadata: + if (is_isolated_network and enable_isolated_metadata and not + is_ovn_network): self.external_process.assert_has_calls([ self._process_manager_constructor_call(), mock.call().enable()], any_order=True) @@ -905,6 +929,21 @@ enable_isolated_metadata=True, is_isolated_network=False) + def test_enable_dhcp_helper_enable_metadata_ovn_network(self): + # Metadata should not be enabled when the dhcp agent is used + # in ML2/OVN where the ovn metadata agent is responsible for the + # metadata service. + self._enable_dhcp_helper(fake_ovn_network, is_ovn_network=True) + + def test_enable_dhcp_helper_ovn_network_with_enable_isolated_metadata( + self): + # Metadata should not be enabled when the dhcp agent is used + # in ML2/OVN where the ovn metadata agent is responsible for the + # metadata service. Even if the enable_isolated_metadata is enabled + self._enable_dhcp_helper(fake_ovn_network, + enable_isolated_metadata=True, + is_ovn_network=True) + def test_enable_dhcp_helper_enable_metadata_empty_network(self): self._enable_dhcp_helper(empty_network, enable_isolated_metadata=True, diff -Nru neutron-20.4.0/neutron/tests/unit/agent/linux/test_dhcp.py neutron-20.5.0/neutron/tests/unit/agent/linux/test_dhcp.py --- neutron-20.4.0/neutron/tests/unit/agent/linux/test_dhcp.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/agent/linux/test_dhcp.py 2023-11-23 09:44:10.000000000 +0000 @@ -88,6 +88,19 @@ self.extra_dhcp_opts = [] +class FakeOvnMetadataPort(object): + def __init__(self): + self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' + self.admin_state_up = True + self.device_owner = constants.DEVICE_OWNER_DISTRIBUTED + self.fixed_ips = [ + FakeIPAllocation('192.168.0.10', + 'dddddddd-dddd-dddd-dddd-dddddddddddd')] + self.mac_address = '00:00:80:aa:bb:ee' + self.device_id = 'ovnmeta-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.extra_dhcp_opts = [] + + class FakeReservedPort(object): def __init__(self, id='reserved-aaaa-aaaa-aaaa-aaaaaaaaaaa'): self.admin_state_up = True @@ -755,6 +768,14 @@ self.namespace = 'qdhcp-ns' +class FakeNetworkDhcpandOvnMetadataPort(object): + def __init__(self): + self.id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.subnets = [FakeV4Subnet()] + self.ports = [FakePort1(), FakeDhcpPort(), FakeOvnMetadataPort()] + self.namespace = 'qdhcp-ns' + + class FakeDualNetworkGatewayRoute(object): def __init__(self): self.id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' @@ -3050,6 +3071,10 @@ self.assertFalse(dhcp.Dnsmasq.has_metadata_subnet( [FakeV4Subnet()])) + def test_should_enable_metadata_ovn_metadata_port_returns_false(self): + self.assertFalse(dhcp.Dnsmasq.should_enable_metadata( + self.conf, FakeNetworkDhcpandOvnMetadataPort())) + def test_should_enable_metadata_isolated_network_returns_true(self): self.assertTrue(dhcp.Dnsmasq.should_enable_metadata( self.conf, FakeV4NetworkNoRouter())) @@ -3098,6 +3123,12 @@ 'force_metadata': False} self._test__generate_opts_per_subnet_helper(config, False) + def test__generate_opts_per_subnet_with_metadata_port(self): + config = {'enable_isolated_metadata': False, + 'force_metadata': False} + self._test__generate_opts_per_subnet_helper(config, True, + network_class=FakeNetworkDhcpandOvnMetadataPort) + def test__generate_opts_per_subnet_isolated_metadata_with_router(self): config = {'enable_isolated_metadata': True, 'force_metadata': False} diff -Nru neutron-20.4.0/neutron/tests/unit/agent/ovn/metadata/test_agent.py neutron-20.5.0/neutron/tests/unit/agent/ovn/metadata/test_agent.py --- neutron-20.4.0/neutron/tests/unit/agent/ovn/metadata/test_agent.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/agent/ovn/metadata/test_agent.py 2023-11-23 09:44:10.000000000 +0000 @@ -83,7 +83,8 @@ self.ports = [] for i in range(0, 3): - self.ports.append(makePort(datapath=DatapathInfo(uuid=str(i), + self.ports.append(makePort( + datapath=DatapathInfo(uuid=str(uuid.uuid4()), external_ids={'name': 'neutron-%d' % i}))) self.agent.sb_idl.get_ports_on_chassis.return_value = self.ports diff -Nru neutron-20.4.0/neutron/tests/unit/db/test_l3_dvr_db.py neutron-20.5.0/neutron/tests/unit/db/test_l3_dvr_db.py --- neutron-20.4.0/neutron/tests/unit/db/test_l3_dvr_db.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/db/test_l3_dvr_db.py 2023-11-23 09:44:09.000000000 +0000 @@ -30,6 +30,7 @@ from neutron_lib.plugins import utils as plugin_utils from oslo_utils import uuidutils +from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import l3_dvr_db from neutron.db import l3_dvrscheduler_db @@ -1521,7 +1522,7 @@ self.assertTrue( self.mixin.is_router_distributed(self.ctx, router_id)) - @mock.patch.object(l3_dvr_db, "is_port_bound") + @mock.patch.object(n_utils, 'is_port_bound') def test_get_ports_under_dvr_connected_subnet(self, is_port_bound_mock): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} diff -Nru neutron-20.4.0/neutron/tests/unit/db/test_ovn_hash_ring_db.py neutron-20.5.0/neutron/tests/unit/db/test_ovn_hash_ring_db.py --- neutron-20.4.0/neutron/tests/unit/db/test_ovn_hash_ring_db.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/db/test_ovn_hash_ring_db.py 2023-11-23 09:44:10.000000000 +0000 @@ -270,16 +270,44 @@ self.assertEqual(0, ovn_hash_ring_db.count_offline_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP)) - def test_set_nodes_from_host_as_offline(self): + def test_remove_node_by_uuid(self): self._add_nodes_and_assert_exists(count=3) active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP) self.assertEqual(3, len(active_nodes)) - ovn_hash_ring_db.set_nodes_from_host_as_offline( - self.admin_ctx, HASH_RING_TEST_GROUP) + node_to_remove = active_nodes[0].node_uuid + ovn_hash_ring_db.remove_node_by_uuid( + self.admin_ctx, node_to_remove) active_nodes = ovn_hash_ring_db.get_active_nodes( self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP) - self.assertEqual(0, len(active_nodes)) + self.assertEqual(2, len(active_nodes)) + self.assertNotIn(node_to_remove, [n.node_uuid for n in active_nodes]) + + def test_cleanup_old_nodes(self): + # Add 2 new nodes + self._add_nodes_and_assert_exists(count=2) + + # Subtract 5 days from utcnow() and touch the nodes to make + # them to appear stale + fake_utcnow = timeutils.utcnow() - datetime.timedelta(days=5) + with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: + mock_utcnow.return_value = fake_utcnow + ovn_hash_ring_db.touch_nodes_from_host(self.admin_ctx, + HASH_RING_TEST_GROUP) + + # Add 3 new nodes + self._add_nodes_and_assert_exists(count=3) + + # Assert we have 5 nodes in the hash ring + self.assertEqual(5, ovn_hash_ring_db.count_nodes_from_host( + self.admin_ctx, HASH_RING_TEST_GROUP)) + + # Clean up the 2 stale nodes + ovn_hash_ring_db.cleanup_old_nodes(self.admin_ctx, days=5) + + # Assert we only have 3 node entries after the clean up + self.assertEqual(3, ovn_hash_ring_db.count_nodes_from_host( + self.admin_ctx, HASH_RING_TEST_GROUP)) diff -Nru neutron-20.4.0/neutron/tests/unit/extensions/test_l3.py neutron-20.5.0/neutron/tests/unit/extensions/test_l3.py --- neutron-20.4.0/neutron/tests/unit/extensions/test_l3.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/extensions/test_l3.py 2023-11-23 09:44:10.000000000 +0000 @@ -4462,9 +4462,15 @@ def test_port_deletion_prevention_handles_missing_port(self): pl = directory.get_plugin(plugin_constants.L3) - self.assertIsNone( - pl.prevent_l3_port_deletion(context.get_admin_context(), 'fakeid') - ) + # NOTE(slaweq): it's needed to make at least one API call to the + # application to initialize all models which are using lazy loading of + # some attributes, + # check https://bugs.launchpad.net/neutron/+bug/2028285 for details + with self.network(): + self.assertIsNone( + pl.prevent_l3_port_deletion(context.get_admin_context(), + 'fakeid') + ) class L3TestExtensionManagerWithDNS(L3TestExtensionManager): diff -Nru neutron-20.4.0/neutron/tests/unit/fake_resources.py neutron-20.5.0/neutron/tests/unit/fake_resources.py --- neutron-20.4.0/neutron/tests/unit/fake_resources.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/fake_resources.py 2023-11-23 09:44:10.000000000 +0000 @@ -159,6 +159,8 @@ self.ha_chassis_group_del = mock.Mock() self.ha_chassis_group_add_chassis = mock.Mock() self.ha_chassis_group_del_chassis = mock.Mock() + self.lrp_get = mock.Mock() + self.get_schema_version = mock.Mock(return_value='3.6.0') class FakeOvsdbSbOvnIdl(object): @@ -181,6 +183,7 @@ self.is_table_present = mock.Mock() self.is_table_present.return_value = False self.get_chassis_by_card_serial_from_cms_options = mock.Mock() + self.get_schema_version = mock.Mock(return_value='3.6.0') class FakeOvsdbTransaction(object): diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py 2023-11-23 09:44:10.000000000 +0000 @@ -52,7 +52,8 @@ patch_int_ofport = 5555 arp_responder_enabled = False self.br.setup_default_table(patch_int_ofport=patch_int_ofport, - arp_responder_enabled=arp_responder_enabled) + arp_responder_enabled=arp_responder_enabled, + dvr_enabled=False) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, @@ -160,7 +161,8 @@ patch_int_ofport = 5555 arp_responder_enabled = True self.br.setup_default_table(patch_int_ofport=patch_int_ofport, - arp_responder_enabled=arp_responder_enabled) + arp_responder_enabled=arp_responder_enabled, + dvr_enabled=False) (dp, ofp, ofpp) = self._get_dp() expected = [ call._send_msg(ofpp.OFPFlowMod(dp, @@ -280,6 +282,33 @@ ] self.assertEqual(expected, self.mock.mock_calls) + def _test_setup_default_table_dvr_helper(self, dvr_enabled): + patch_int_ofport = 5555 + arp_responder_enabled = True + self.br.setup_default_table(patch_int_ofport=patch_int_ofport, + arp_responder_enabled=arp_responder_enabled, + dvr_enabled=dvr_enabled) + (dp, ofp, ofpp) = self._get_dp() + non_dvr_specific_call = call._send_msg( + ofpp.OFPFlowMod( + dp, + cookie=self.stamp, + instructions=[ofpp.OFPInstructionGotoTable(table_id=2)], + match=ofpp.OFPMatch(in_port=patch_int_ofport), + priority=1, table_id=0), + active_bundle=None) + + if dvr_enabled: + self.assertNotIn(non_dvr_specific_call, self.mock.mock_calls) + else: + self.assertIn(non_dvr_specific_call, self.mock.mock_calls) + + def test_setup_default_table_dvr_enabled(self): + self._test_setup_default_table_dvr_helper(dvr_enabled=True) + + def test_setup_default_table_dvr_disabled(self): + self._test_setup_default_table_dvr_helper(dvr_enabled=False) + def test_provision_local_vlan(self): network_type = 'vxlan' lvid = 888 diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py 2023-11-23 09:44:10.000000000 +0000 @@ -188,7 +188,8 @@ '_check_bridge_datapath_id').start() self._define_expected_calls() - def _define_expected_calls(self, arp_responder=False, igmp_snooping=False): + def _define_expected_calls( + self, arp_responder=False, igmp_snooping=False): self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE, datapath_type=mock.ANY), @@ -268,7 +269,11 @@ ] self.mock_tun_bridge_expected += [ - mock.call.setup_default_table(self.INT_OFPORT, arp_responder), + # NOTE: Parameters passed to setup_default_table() method are named + # in the production code. That's why we can't use keyword parameter + # here. The last parameter passed below is dvr_enabled set to False + mock.call.setup_default_table( + self.INT_OFPORT, arp_responder, False), ] self.ipdevice_expected = [] diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py 2023-11-23 09:44:10.000000000 +0000 @@ -522,7 +522,8 @@ "lsp1", external_ids=external_ids ) - def test_check_for_mcast_flood_reports(self): + def test_check_for_mcast_flood_reports_broken(self): + self.fake_ovn_client.is_mcast_flood_broken = True nb_idl = self.fake_ovn_client._nb_idl lsp0 = fakes.FakeOvsdbRow.create_one_ovsdb_row( attrs={'name': 'lsp0', @@ -563,14 +564,86 @@ self.assertRaises(periodics.NeverAgain, self.periodic.check_for_mcast_flood_reports) - # Assert only lsp1, lsp5 and lsp6 were called because they are the - # only ones meeting the criteria + # Assert only lsp1 and lsp5 were called because they are the + # only ones meeting to set mcast_flood_reports to 'true' expected_calls = [ mock.call('lsp1', mcast_flood_reports='true'), - mock.call('lsp5', mcast_flood_reports='true', mcast_flood='false'), - mock.call('lsp6', mcast_flood_reports='true', mcast_flood='false')] + mock.call('lsp5', mcast_flood_reports='true')] nb_idl.lsp_set_options.assert_has_calls(expected_calls) + self.assertEqual(2, nb_idl.lsp_set_options.call_count) + + # Assert only lsp6 and lsp7 were called because they are the + # only ones meeting to remove mcast_flood + expected_calls = [ + mock.call('Logical_Switch_Port', 'lsp6', 'options', + constants.LSP_OPTIONS_MCAST_FLOOD, + if_exists=True), + mock.call('Logical_Switch_Port', 'lsp7', 'options', + constants.LSP_OPTIONS_MCAST_FLOOD, + if_exists=True)] + + nb_idl.db_remove.assert_has_calls(expected_calls) + self.assertEqual(2, nb_idl.db_remove.call_count) + + def test_check_for_mcast_flood_reports(self): + self.fake_ovn_client.is_mcast_flood_broken = False + nb_idl = self.fake_ovn_client._nb_idl + + lsp0 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp0', + 'options': { + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'}, + 'type': ""}) + lsp1 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp1', 'options': {}, 'type': ""}) + lsp2 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp2', + 'options': { + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'}, + 'type': "vtep"}) + lsp3 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp3', 'options': {}, + 'type': constants.LSP_TYPE_LOCALPORT}) + lsp4 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp4', 'options': {}, + 'type': "router"}) + lsp5 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp5', 'options': {}, + 'type': constants.LSP_TYPE_LOCALNET}) + lsp6 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp6', + 'options': { + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true', + constants.LSP_OPTIONS_MCAST_FLOOD: 'true'}, + 'type': constants.LSP_TYPE_LOCALNET}) + lsp7 = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': 'lsp7', + 'options': { + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true', + constants.LSP_OPTIONS_MCAST_FLOOD: 'false'}, + 'type': constants.LSP_TYPE_LOCALNET}) + + nb_idl.lsp_list.return_value.execute.return_value = [ + lsp0, lsp1, lsp2, lsp3, lsp4, lsp5, lsp6, lsp7] + + # Invoke the periodic method, it meant to run only once at startup + # so NeverAgain will be raised at the end + self.assertRaises(periodics.NeverAgain, + self.periodic.check_for_mcast_flood_reports) + + # Assert only lsp0 and lsp2 were called because they are the + # only ones meeting the criteria + expected_calls = [ + mock.call('Logical_Switch_Port', 'lsp0', 'options', + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS, + if_exists=True), + mock.call('Logical_Switch_Port', 'lsp2', 'options', + constants.LSP_OPTIONS_MCAST_FLOOD_REPORTS, + if_exists=True)] + + nb_idl.db_remove.assert_has_calls(expected_calls) + self.assertEqual(2, nb_idl.db_remove.call_count) def test_check_router_mac_binding_options(self): nb_idl = self.fake_ovn_client._nb_idl diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_client.py 2023-11-23 09:44:10.000000000 +0000 @@ -17,6 +17,7 @@ from neutron.common.ovn import constants from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.plugins.ml2 import db as ml2_db from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.tests import base from neutron.tests.unit import fake_resources as fakes @@ -25,6 +26,8 @@ from neutron_lib.api.definitions import portbindings from neutron_lib.services.logapi import constants as log_const +from tenacity import wait_none + class TestOVNClientBase(base.BaseTestCase): @@ -47,6 +50,9 @@ fakes.FakeChassis.create( attrs={'hostname': self.fake_smartnic_hostname})) + # Disable tenacity wait for UT + self.ovn_client._wait_for_port_bindings_host.retry.wait = wait_none() + def test_vnic_normal_unbound_port(self): self.assertEqual( '', @@ -139,6 +145,45 @@ 'Logical_Switch_Port', port_id, ('external_ids', {constants.OVN_HOST_ID_EXT_ID_KEY: host_id})) + def test_update_lsp_host_info_up_retry(self): + context = mock.MagicMock() + host_id = 'fake-binding-host-id' + port_id = 'fake-port-id' + db_port_no_host = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host="")]) + db_port = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host=host_id)]) + + with mock.patch.object( + self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: + mock_wait.return_value = db_port + self.ovn_client.update_lsp_host_info(context, db_port_no_host) + + # Assert _wait_for_port_bindings_host was called + mock_wait.assert_called_once_with(context, port_id) + + # Assert host_id was set + self.nb_idl.db_set.assert_called_once_with( + 'Logical_Switch_Port', port_id, + ('external_ids', {constants.OVN_HOST_ID_EXT_ID_KEY: host_id})) + + def test_update_lsp_host_info_up_retry_fail(self): + context = mock.MagicMock() + port_id = 'fake-port-id' + db_port_no_host = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host="")]) + + with mock.patch.object( + self.ovn_client, '_wait_for_port_bindings_host') as mock_wait: + mock_wait.side_effect = RuntimeError("boom") + self.ovn_client.update_lsp_host_info(context, db_port_no_host) + + # Assert _wait_for_port_bindings_host was called + mock_wait.assert_called_once_with(context, port_id) + + # Assert host_id was NOT set + self.assertFalse(self.nb_idl.db_set.called) + def test_update_lsp_host_info_down(self): context = mock.MagicMock() port_id = 'fake-port-id' @@ -150,6 +195,47 @@ 'Logical_Switch_Port', port_id, 'external_ids', constants.OVN_HOST_ID_EXT_ID_KEY, if_exists=True) + @mock.patch.object(ml2_db, 'get_port') + def test__wait_for_port_bindings_host(self, mock_get_port): + context = mock.MagicMock() + host_id = 'fake-binding-host-id' + port_id = 'fake-port-id' + db_port_no_host = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host="")]) + db_port = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host=host_id)]) + + mock_get_port.side_effect = (db_port_no_host, db_port) + + ret = self.ovn_client._wait_for_port_bindings_host( + context, port_id) + + self.assertEqual(ret, db_port) + + expected_calls = [mock.call(context, port_id), + mock.call(context, port_id)] + mock_get_port.assert_has_calls(expected_calls) + + @mock.patch.object(ml2_db, 'get_port') + def test__wait_for_port_bindings_host_fail(self, mock_get_port): + context = mock.MagicMock() + port_id = 'fake-port-id' + db_port_no_pb = mock.Mock(id=port_id, port_bindings=[]) + db_port_no_host = mock.Mock( + id=port_id, port_bindings=[mock.Mock(host="")]) + + mock_get_port.side_effect = ( + db_port_no_pb, db_port_no_host, db_port_no_host) + + self.assertRaises( + RuntimeError, self.ovn_client._wait_for_port_bindings_host, + context, port_id) + + expected_calls = [mock.call(context, port_id), + mock.call(context, port_id), + mock.call(context, port_id)] + mock_get_port.assert_has_calls(expected_calls) + class TestOVNClientFairMeter(TestOVNClientBase, test_log_driver.TestOVNDriverBase): @@ -161,7 +247,16 @@ self.ovn_client.create_ovn_fair_meter(self._log_driver.meter_name) self.assertFalse(self.nb_idl.meter_del.called) self.assertTrue(self.nb_idl.meter_add.called) - self.nb_idl.meter_add.assert_called_once_with( + self.nb_idl.meter_add.assert_any_call( + name=self._log_driver.meter_name + "_stateless", + unit="pktps", + rate=int(self.fake_cfg_network_log.rate_limit / 2), + fair=True, + burst_size=int(self.fake_cfg_network_log.burst_limit / 2), + may_exist=False, + external_ids={constants.OVN_DEVICE_OWNER_EXT_ID_KEY: + log_const.LOGGING_PLUGIN}) + self.nb_idl.meter_add.assert_any_call( name=self._log_driver.meter_name, unit="pktps", rate=self.fake_cfg_network_log.rate_limit, @@ -173,10 +268,17 @@ def test_create_ovn_fair_meter_unchanged(self): mock_find_rows = mock.Mock() - mock_find_rows.execute.return_value = [self._fake_meter()] + fake_meter1 = [self._fake_meter()] + fake_meter2 = [self._fake_meter( + name=self._log_driver.meter_name + "_stateless", + bands=[mock.Mock(uuid='tb_stateless')])] + mock_find_rows.execute.side_effect = [fake_meter1, fake_meter1, + fake_meter2, fake_meter2] self.nb_idl.db_find_rows.return_value = mock_find_rows self.nb_idl.lookup.side_effect = lambda table, key, default: ( - self._fake_meter_band() if key == "test_band" else default) + self._fake_meter_band() if key == "test_band" else + self._fake_meter_band_stateless() if key == "tb_stateless" else + default) self.ovn_client.create_ovn_fair_meter(self._log_driver.meter_name) self.assertFalse(self.nb_idl.meter_del.called) self.assertFalse(self.nb_idl.meter_add.called) diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py 2023-11-23 09:44:10.000000000 +0000 @@ -23,6 +23,7 @@ from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync from neutron.services.ovn_l3 import plugin as ovn_plugin +from neutron.tests.unit import fake_resources as fakes from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import \ test_mech_driver @@ -1109,6 +1110,47 @@ expected_deleted) +class TestIsRouterPortChanged(test_mech_driver.OVNMechanismDriverTestCase): + + def setUp(self): + super(TestIsRouterPortChanged, self).setUp() + self.ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( + self.plugin, self.mech_driver.nb_ovn, self.mech_driver.sb_ovn, + 'log', self.mech_driver) + + self.db_router_port = { + 'id': 'aa076509-915d-4b1c-8d9d-3db53d9c5faf', + 'networks': ['fdf9:ad62:3a04::1/64'], + 'ipv6_ra_configs': {'address_mode': 'slaac', + 'send_periodic': 'true', + 'mtu': '1442'} + } + self.lrport_nets = ['fdf9:ad62:3a04::1/64'] + self.ovn_lrport = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'ipv6_ra_configs': {'address_mode': 'slaac', + 'send_periodic': 'true', + 'mtu': '1442'}}) + + self.ovn_nb_synchronizer.ovn_api.is_col_present.return_value = True + self.ovn_nb_synchronizer.ovn_api.lrp_get().execute.return_value = ( + self.ovn_lrport) + + def test__is_router_port_changed_not_changed(self): + self.assertFalse(self.ovn_nb_synchronizer._is_router_port_changed( + self.db_router_port, self.lrport_nets)) + + def test__is_router_port_changed_network_changed(self): + self.db_router_port['networks'] = ['172.24.4.26/24', + '2001:db8::206/64'] + self.assertTrue(self.ovn_nb_synchronizer._is_router_port_changed( + self.db_router_port, self.lrport_nets)) + + def test__is_router_port_changed_ipv6_ra_configs_changed(self): + self.db_router_port['ipv6_ra_configs']['mtu'] = '1500' + self.assertTrue(self.ovn_nb_synchronizer._is_router_port_changed( + self.db_router_port, self.lrport_nets)) + + class TestOvnSbSyncML2(test_mech_driver.OVNMechanismDriverTestCase): def test_ovn_sb_sync(self): diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovsdb_monitor.py 2023-11-23 09:44:10.000000000 +0000 @@ -356,6 +356,7 @@ self.driver.set_port_status_up.assert_called() else: self.driver.set_port_status_up.assert_not_called() + self.driver.set_port_status_up.reset_mock() def test_event_matches(self): # NOTE(twilson) This primarily tests implementation details. If a @@ -365,10 +366,24 @@ attrs={'name': 'Port_Binding'}) ovsdb_row = fakes.FakeOvsdbRow.create_one_ovsdb_row self.driver.nb_ovn.lookup.return_value = ovsdb_row(attrs={'up': True}) + + # Port binding change. + self._test_event( + self.event.ROW_UPDATE, + ovsdb_row(attrs={'_table': pbtable, 'chassis': 'one', + 'type': '_fake_', 'logical_port': 'foo', + 'options': {}}), + ovsdb_row(attrs={'_table': pbtable, 'chassis': 'two', + 'type': '_fake_'})) + + # Port binding change because of a live migration in progress. + options = { + ovn_const.LSP_OPTIONS_REQUESTED_CHASSIS_KEY: 'chassis1,chassis2'} self._test_event( self.event.ROW_UPDATE, ovsdb_row(attrs={'_table': pbtable, 'chassis': 'one', - 'type': '_fake_', 'logical_port': 'foo'}), + 'type': '_fake_', 'logical_port': 'foo', + 'options': options}), ovsdb_row(attrs={'_table': pbtable, 'chassis': 'two', 'type': '_fake_'})) diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -1112,7 +1112,7 @@ resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) - ude.assert_called_once_with(port1['port']['id']) + ude.assert_called_once_with(port1['port']['id'], False) # If the port does NOT bellong to compute, do not notify Nova # about it's status changes @@ -1164,7 +1164,7 @@ resources.PORT, provisioning_blocks.L2_AGENT_ENTITY ) - ude.assert_called_once_with(port1['port']['id']) + ude.assert_called_once_with(port1['port']['id'], False) def test_bind_port_unsupported_vnic_type(self): fake_port = fakes.FakePort.create_one_port( @@ -2358,7 +2358,7 @@ self.assertTrue(agent.alive, "Agent of type %s alive=%s" % ( agent.agent_type, agent.alive)) - def _test__update_dnat_entry_if_needed(self, dvr=True): + def _test__update_dnat_entry_if_needed(self, up=True, dvr=True): if dvr: ovn_conf.cfg.CONF.set_override( 'enable_distributed_floating_ip', True, group='ovn') @@ -2374,25 +2374,33 @@ fake_db_find.execute.return_value = [nat_row] self.nb_ovn.db_find.return_value = fake_db_find - self.mech_driver._update_dnat_entry_if_needed(port_id) + self.mech_driver._update_dnat_entry_if_needed(port_id, up=up) - if dvr: + if up and dvr: # Assert that we are setting the external_mac in the NAT table self.nb_ovn.db_set.assert_called_once_with( 'NAT', fake_nat_uuid, ('external_mac', fake_ext_mac_key)) - self.nb_ovn.db_clear.assert_not_called() else: - self.nb_ovn.db_set.assert_not_called() - # Assert that we are cleaning the external_mac from the NAT table - self.nb_ovn.db_clear.assert_called_once_with( - 'NAT', fake_nat_uuid, 'external_mac') + if dvr: + self.nb_ovn.db_set.assert_not_called() + else: + # Assert that we are cleaning the external_mac from the NAT + # table + self.nb_ovn.db_clear.assert_called_once_with( + 'NAT', fake_nat_uuid, 'external_mac') - def test__update_dnat_entry_if_needed_dvr(self): + def test__update_dnat_entry_if_needed_up_dvr(self): self._test__update_dnat_entry_if_needed() - def test__update_dnat_entry_if_needed_no_dvr(self): + def test__update_dnat_entry_if_needed_up_no_dvr(self): self._test__update_dnat_entry_if_needed(dvr=False) + def test__update_dnat_entry_if_needed_down_dvr(self): + self._test__update_dnat_entry_if_needed(up=False) + + def test__update_dnat_entry_if_needed_down_no_dvr(self): + self._test__update_dnat_entry_if_needed(up=False, dvr=False) + @mock.patch('neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.' 'ovn_client.OVNClient._get_router_ports') def _test_update_network_fragmentation(self, new_mtu, expected_opts, grps): diff -Nru neutron-20.4.0/neutron/tests/unit/plugins/ml2/test_plugin.py neutron-20.5.0/neutron/tests/unit/plugins/ml2/test_plugin.py --- neutron-20.4.0/neutron/tests/unit/plugins/ml2/test_plugin.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/plugins/ml2/test_plugin.py 2023-11-23 09:44:10.000000000 +0000 @@ -55,6 +55,7 @@ from neutron.db import provisioning_blocks from neutron.db import securitygroups_db as sg_db from neutron.db import segments_db +from neutron.ipam import driver from neutron.objects import base as base_obj from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj @@ -1737,6 +1738,39 @@ ports_out = self.plugin.create_port_bulk(ctx, ports_in) self.assertEqual(edo, ports_out[0]['extra_dhcp_opts']) + def test_create_ports_bulk_with_wrong_fixed_ips(self): + cidr = '10.0.10.0/24' + with self.network() as net: + with self.subnet(net, cidr=cidr) as snet: + net_id = net['network']['id'] + data = [{'network_id': net_id, + 'fixed_ips': [{'subnet_id': snet['subnet']['id'], + 'ip_address': '10.0.10.100'}], + 'tenant_id': snet['subnet']['tenant_id'] + }, + {'network_id': net_id, + 'fixed_ips': [{'subnet_id': snet['subnet']['id'], + 'ip_address': '10.0.20.101'}], + 'tenant_id': snet['subnet']['tenant_id'] + }] + res = self._create_bulk_from_list(self.fmt, 'port', + data, as_admin=True) + self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) + self.assertIn('IP address 10.0.20.101 is not a valid IP for ' + 'the specified subnet.', + res.json['NeutronError']['message']) + + ipam_driver = driver.Pool.get_instance(None, self.context) + ipam_allocator = ipam_driver.get_allocator([cidr]) + with db_api.CONTEXT_READER.using(self.context): + ipam_subnet = ipam_allocator._driver.get_subnet( + snet['subnet']['id']) + allocations = ipam_subnet.subnet_manager.list_allocations( + self.context) + # There are no leftovers (e.g.: 10.0.10.100) in the + # "IpamAllocation" registers + self.assertEqual([], allocations) + def test_delete_port_no_notify_in_disassociate_floatingips(self): ctx = context.get_admin_context() plugin = directory.get_plugin() diff -Nru neutron-20.4.0/neutron/tests/unit/services/logapi/drivers/ovn/test_driver.py neutron-20.5.0/neutron/tests/unit/services/logapi/drivers/ovn/test_driver.py --- neutron-20.4.0/neutron/tests/unit/services/logapi/drivers/ovn/test_driver.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/services/logapi/drivers/ovn/test_driver.py 2023-11-23 09:44:09.000000000 +0000 @@ -21,6 +21,7 @@ from neutron.common.ovn import constants as ovn_const from neutron.common.ovn import utils as ovn_utils +from neutron.objects import securitygroup as sg_obj from neutron.services.logapi.drivers.ovn import driver as ovn_driver from neutron.tests import base from neutron.tests.unit import fake_resources @@ -92,6 +93,15 @@ meter_band_obj_dict = {**meter_band_defaults_dict, **kwargs} return mock.Mock(**meter_band_obj_dict) + def _fake_meter_band_stateless(self, **kwargs): + meter_band_defaults_dict = { + 'uuid': 'tb_stateless', + 'rate': int(self.fake_cfg_network_log.rate_limit / 2), + 'burst_size': int(self.fake_cfg_network_log.burst_limit / 2), + } + meter_band_obj_dict = {**meter_band_defaults_dict, **kwargs} + return mock.Mock(**meter_band_obj_dict) + class TestOVNDriver(TestOVNDriverBase): def test_create(self): @@ -119,18 +129,16 @@ self.__dict__ = {**acl_defaults_dict, **acl_dict} def _fake_pg_dict(self, **kwargs): + uuid = uuidutils.generate_uuid() pg_defaults_dict = { - "name": ovn_utils.ovn_port_group_name(uuidutils.generate_uuid()), + "name": ovn_utils.ovn_port_group_name(uuid), + "external_ids": {ovn_const.OVN_SG_EXT_ID_KEY: uuid}, "acls": [] } return {**pg_defaults_dict, **kwargs} def _fake_pg(self, **kwargs): - pg_defaults_dict = { - "name": ovn_utils.ovn_port_group_name(uuidutils.generate_uuid()), - "acls": [] - } - pg_dict = {**pg_defaults_dict, **kwargs} + pg_dict = self._fake_pg_dict(**kwargs) return mock.Mock(**pg_dict) def _fake_log_obj(self, **kwargs): @@ -180,7 +188,9 @@ pgs = self._log_driver._pgs_from_log_obj(self.context, log_obj) mock_pgs_all.assert_not_called() self.assertEqual(2, self._nb_ovn.lookup.call_count) - self.assertEqual([{'acls': [], 'name': pg.name}], pgs) + self.assertEqual([{'acls': [], + 'external_ids': pg.external_ids, + 'name': pg.name}], pgs) def test__pgs_from_log_obj_pg(self): with mock.patch.object(self._log_driver, '_pgs_all', @@ -194,7 +204,9 @@ mock_pgs_all.assert_not_called() self._nb_ovn.lookup.assert_called_once_with( "Port_Group", ovn_utils.ovn_port_group_name('resource_id')) - self.assertEqual([{'acls': [], 'name': pg.name}], pgs) + self.assertEqual([{'acls': [], + 'external_ids': pg.external_ids, + 'name': pg.name}], pgs) def test__pgs_from_log_obj_port(self): with mock.patch.object(self._log_driver, '_pgs_all', @@ -211,7 +223,9 @@ self._nb_ovn.lookup.assert_called_once_with("Port_Group", pg_name) self.fake_get_sgs_attached_to_port.assert_called_once_with( self.context, 'target_id') - self.assertEqual([{'acls': [], 'name': pg.name}], pgs) + self.assertEqual([{'acls': [], + 'external_ids': pg.external_ids, + 'name': pg.name}], pgs) @mock.patch.object(ovn_driver.LOG, 'info') def test__remove_acls_log(self, m_info): @@ -287,7 +301,8 @@ self._nb_ovn.db_set.call_count) @mock.patch.object(ovn_driver.LOG, 'info') - def test__set_acls_log(self, m_info): + @mock.patch.object(sg_obj.SecurityGroup, 'get_sg_by_id') + def test__set_acls_log(self, get_sg, m_info): pg_dict = self._fake_pg_dict(acls=['acl1', 'acl2', 'acl3', 'acl4']) log_name = 'test_obj_name' used_name = 'test_used_name' @@ -297,10 +312,14 @@ return self._fake_acl() return self._fake_acl(name=used_name) + sg = fake_resources.FakeSecurityGroup.create_one_security_group( + attrs={'stateful': True}) + get_sg.return_value = sg self._nb_ovn.lookup.side_effect = _mock_lookup actions_enabled = self._log_driver._acl_actions_enabled( self._fake_log_obj(event=log_const.ALL_EVENT)) - self._log_driver._set_acls_log([pg_dict], self._nb_ovn.transaction, + self._log_driver._set_acls_log([pg_dict], self.context, + self._nb_ovn.transaction, actions_enabled, log_name) info_args, _info_kwargs = m_info.call_args_list[0] self.assertIn('Set %d (out of %d visited) ACLs for network log %s', diff -Nru neutron-20.4.0/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py neutron-20.5.0/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py --- neutron-20.4.0/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py 2023-07-28 09:15:22.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/services/trunk/drivers/ovn/test_trunk_driver.py 2023-11-23 09:44:10.000000000 +0000 @@ -122,7 +122,6 @@ mock.call(mock.ANY, {'profile': {'parent_name': trunk.port_id, 'tag': s_port.segmentation_id}, - 'host': mock.ANY, 'vif_type': portbindings.VIF_TYPE_OVS}, host=mock.ANY, port_id=s_port.port_id) @@ -153,7 +152,6 @@ self.mock_update_pb.assert_called_once_with( mock.ANY, {'profile': {'parent_name': self.sub_port_1.trunk_id, 'tag': self.sub_port_1.segmentation_id}, - 'host': 'foo.com', 'vif_type': portbindings.VIF_TYPE_OVS}, host='foo.com', port_id=self.sub_port_1.port_id) self.mock_port_update.assert_not_called() diff -Nru neutron-20.4.0/neutron/tests/unit/services/trunk/test_plugin.py neutron-20.5.0/neutron/tests/unit/services/trunk/test_plugin.py --- neutron-20.4.0/neutron/tests/unit/services/trunk/test_plugin.py 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/neutron/tests/unit/services/trunk/test_plugin.py 2023-11-23 09:44:09.000000000 +0000 @@ -162,7 +162,8 @@ resources.TRUNK, event, self.trunk_plugin, payload=mock.ANY) payload = callback.mock_calls[0][2]['payload'] self.assertEqual(self.context, payload.context) - self.assertEqual(trunk_obj, payload.latest_state) + self.assertEqual(trunk_obj, payload.states[0]) + self.assertEqual(parent_port['port']['id'], payload.states[1].id) self.assertEqual(trunk['id'], payload.resource_id) def test_delete_trunk_notify_after_delete(self): diff -Nru neutron-20.4.0/neutron.egg-info/PKG-INFO neutron-20.5.0/neutron.egg-info/PKG-INFO --- neutron-20.4.0/neutron.egg-info/PKG-INFO 2023-07-28 09:15:51.000000000 +0000 +++ neutron-20.5.0/neutron.egg-info/PKG-INFO 2023-11-23 09:44:54.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.2 Name: neutron -Version: 20.4.0 +Version: 20.5.0 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack diff -Nru neutron-20.4.0/neutron.egg-info/SOURCES.txt neutron-20.5.0/neutron.egg-info/SOURCES.txt --- neutron-20.4.0/neutron.egg-info/SOURCES.txt 2023-07-28 09:15:51.000000000 +0000 +++ neutron-20.5.0/neutron.egg-info/SOURCES.txt 2023-11-23 09:44:54.000000000 +0000 @@ -2593,6 +2593,7 @@ releasenotes/notes/bug-1953165-6e848ea2c0398f56.yaml releasenotes/notes/bug-1986003-9bf5ca04f9304336.yaml releasenotes/notes/bug-2003455-b502cc637427560e.yaml +releasenotes/notes/bug-2022914-edbf1ea3514596b8.yaml releasenotes/notes/bug-7dc8245da8e0e571.yaml releasenotes/notes/bug-817525-eef68687dafa97fd.yaml releasenotes/notes/bump-default-quotas-810570badb378c50.yaml @@ -2644,6 +2645,7 @@ releasenotes/notes/deprecate_prevent_arp_spoofing_option-a09e673fc8f9fee4.yaml releasenotes/notes/deprecated-driver-e368e0befc9bee4c.yaml releasenotes/notes/designate-driver-keystonev3-8e70d152e84388e0.yaml +releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml releasenotes/notes/dhcp-bulk-updates-0150b764bb1b165f.yaml releasenotes/notes/dhcp-dnsmasq-dhcp-host-addr6-list-support-45d104b3f7ce220e.yaml releasenotes/notes/dhcp-domain-removed-cc5bc6e2129fdf7f.yaml @@ -2706,6 +2708,7 @@ releasenotes/notes/force-arp-responder-true-for-dvr-5aabbfa51945dd5a.yaml releasenotes/notes/gateway-rate-limit-905bee1ed60c6b8e.yaml releasenotes/notes/get_standard_device_mappings_for_mechdriver-bc039d478ea0b162.yaml +releasenotes/notes/hash-ring-cleanup-1079d2375082cebe.yaml releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml releasenotes/notes/hyperv-security-group-driver-fdbe0c0c292a1505.yaml releasenotes/notes/ib-dhcp-allocation-fix-a4ebe8b55bb2c065.yaml @@ -2760,6 +2763,7 @@ releasenotes/notes/oslo-reports-166a169037bf64f2.yaml releasenotes/notes/oslo.messaging.notify.drivers-abb0d17b9e1bd470.yaml releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml +releasenotes/notes/ovn-add-extension-uplink-status-propagation-4c232954f8b4f0ef.yaml releasenotes/notes/ovn-chassis-other-config-7db15b9d10bf7f04.yaml releasenotes/notes/ovn-default-dns-servers-ipv6-subnets-f2d525abc70b01b3.yaml releasenotes/notes/ovn-driver-adds-support-for-vlan-transparency-3ac9d965f5093a15.yaml @@ -2767,6 +2771,7 @@ releasenotes/notes/ovn-igmp-snooping-support-1a6ec8e703311fce.yaml releasenotes/notes/ovn-limit-one-physnet-per-bridge-188285955a5ea124.yaml releasenotes/notes/ovn-mcast-flood-reports-80fb529120f2af1c.yaml +releasenotes/notes/ovn-mcast_flood_reports-4eee20856ccfc7d7.yaml releasenotes/notes/ovn-metadata-workers-fa8a2019f34bd572.yaml releasenotes/notes/ovn-metadata_workers-1121334593ce9829.yaml releasenotes/notes/ovn-network-az-c4ee9a4089872818.yaml @@ -2779,6 +2784,7 @@ releasenotes/notes/ovn-support-stateless-sg-mandatory-bdeb1bc626decc51.yaml releasenotes/notes/ovn-support-virtual-ports-3da6dc89937a63c7.yaml releasenotes/notes/ovn-supports-Port_Group-96fc1a89e2da163d.yaml +releasenotes/notes/ovn-trunk-check-parent-port-eeca2eceaca9d158.yaml releasenotes/notes/ovn-update-vlan-id-749d8f17999243f5.yaml releasenotes/notes/ovn-use-stateless-nat-for-fips-e764c4ece4024be1.yaml releasenotes/notes/ovnmeta-namespaces-include-network-name-e6e4e5f6ff69e7ed.yaml @@ -2797,6 +2803,7 @@ releasenotes/notes/port-device-profile-extension-30ffdaf6a89b89dc.yaml releasenotes/notes/port-mac-address-regenerate-312978c834abaa52.yaml releasenotes/notes/port-resource-request-groups-516820eed2fc659b.yaml +releasenotes/notes/port_bulk_creation_no_ipamallocation_leftovers-9d72cc5f616f51e4.yaml releasenotes/notes/pps-config-ovs-036b5940694f786c.yaml releasenotes/notes/precise-agent-state-transfer-67c771cb1ee04dd0.yaml releasenotes/notes/project_id-d5ea7a42be428230.yaml diff -Nru neutron-20.4.0/neutron.egg-info/pbr.json neutron-20.5.0/neutron.egg-info/pbr.json --- neutron-20.4.0/neutron.egg-info/pbr.json 2023-07-28 09:15:51.000000000 +0000 +++ neutron-20.5.0/neutron.egg-info/pbr.json 2023-11-23 09:44:54.000000000 +0000 @@ -1 +1 @@ -{"git_version": "d0c54786fa", "is_release": true} \ No newline at end of file +{"git_version": "bef1f7a486", "is_release": true} \ No newline at end of file diff -Nru neutron-20.4.0/releasenotes/notes/bug-2022914-edbf1ea3514596b8.yaml neutron-20.5.0/releasenotes/notes/bug-2022914-edbf1ea3514596b8.yaml --- neutron-20.4.0/releasenotes/notes/bug-2022914-edbf1ea3514596b8.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/bug-2022914-edbf1ea3514596b8.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + [`bug 2022914 `_] + Neutron-API supports using relays as the southbound connection in a + ML2/OVN setup. Before the maintenance worker of the API required a + leader_only connection, which was removed. diff -Nru neutron-20.4.0/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml neutron-20.5.0/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml --- neutron-20.4.0/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/dhcp-agent-ovn-metadata-port-33a654ccb9554c65.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixed the scenario where the DHCP agent is deployed in conjunction with + the OVN metadata agent in order to serve metadata for baremetal nodes. + In this scenario, the DHCP agent would not set the route needed for the + OVN metadata agent service resulting in baremetal nodes not being able + to query the metadata service. For more information see + `bug 1982569 `_. diff -Nru neutron-20.4.0/releasenotes/notes/hash-ring-cleanup-1079d2375082cebe.yaml neutron-20.5.0/releasenotes/notes/hash-ring-cleanup-1079d2375082cebe.yaml --- neutron-20.4.0/releasenotes/notes/hash-ring-cleanup-1079d2375082cebe.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/hash-ring-cleanup-1079d2375082cebe.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,6 @@ +--- +other: + - | + Adds a maintenance task that runs once a day and is responsible for + cleaning up Hash Ring nodes that haven't been updated in 5 days or + more. See LP #2033281 for more information. \ No newline at end of file diff -Nru neutron-20.4.0/releasenotes/notes/ovn-add-extension-uplink-status-propagation-4c232954f8b4f0ef.yaml neutron-20.5.0/releasenotes/notes/ovn-add-extension-uplink-status-propagation-4c232954f8b4f0ef.yaml --- neutron-20.4.0/releasenotes/notes/ovn-add-extension-uplink-status-propagation-4c232954f8b4f0ef.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/ovn-add-extension-uplink-status-propagation-4c232954f8b4f0ef.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,7 @@ +--- +other: + - | + Added the missing extension ``uplink-status-propagation`` to the ML2/OVN + mechanism driver. This extension is used by the ML2/SR-IOV mechanism + driver, that could be loaded with ML2/OVN. Now it is possible to create + ports with the "uplink-status-propagation" flag defined. diff -Nru neutron-20.4.0/releasenotes/notes/ovn-mcast_flood_reports-4eee20856ccfc7d7.yaml neutron-20.5.0/releasenotes/notes/ovn-mcast_flood_reports-4eee20856ccfc7d7.yaml --- neutron-20.4.0/releasenotes/notes/ovn-mcast_flood_reports-4eee20856ccfc7d7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/ovn-mcast_flood_reports-4eee20856ccfc7d7.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + For OVN versions v22.09.0 and above, the ``mcast_flood_reports`` option + is now set to ``false`` on all ports except "localnet" types. In the past, + this option was set to ``true`` as a workaround for a bug in core OVN + multicast implementation. diff -Nru neutron-20.4.0/releasenotes/notes/ovn-trunk-check-parent-port-eeca2eceaca9d158.yaml neutron-20.5.0/releasenotes/notes/ovn-trunk-check-parent-port-eeca2eceaca9d158.yaml --- neutron-20.4.0/releasenotes/notes/ovn-trunk-check-parent-port-eeca2eceaca9d158.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/ovn-trunk-check-parent-port-eeca2eceaca9d158.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,6 @@ +--- +fixes: + - | + Now the ML2/OVN trunk driver prevents a trunk creation if the parent port + is already bound. In the same way, if a parent port being used in a trunk + is bound, the trunk cannot be deleted. diff -Nru neutron-20.4.0/releasenotes/notes/port_bulk_creation_no_ipamallocation_leftovers-9d72cc5f616f51e4.yaml neutron-20.5.0/releasenotes/notes/port_bulk_creation_no_ipamallocation_leftovers-9d72cc5f616f51e4.yaml --- neutron-20.4.0/releasenotes/notes/port_bulk_creation_no_ipamallocation_leftovers-9d72cc5f616f51e4.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-20.5.0/releasenotes/notes/port_bulk_creation_no_ipamallocation_leftovers-9d72cc5f616f51e4.yaml 2023-11-23 09:44:09.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + During the port bulk creation, if an IPAM allocation fails (for example, if + the IP address is outside of the subnet CIDR), the other IPAM allocations + already created are deleted before raising the exception. Fixes bug + `2039550 `_. diff -Nru neutron-20.4.0/zuul.d/base.yaml neutron-20.5.0/zuul.d/base.yaml --- neutron-20.4.0/zuul.d/base.yaml 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/zuul.d/base.yaml 2023-11-23 09:44:10.000000000 +0000 @@ -151,7 +151,7 @@ - job: name: neutron-linuxbridge-tempest-plugin-scenario-nftables - parent: neutron-tempest-plugin-scenario-linuxbridge + parent: neutron-tempest-plugin-scenario-linuxbridge-yoga pre-run: playbooks/install_nftables.yaml vars: devstack_local_conf: @@ -162,7 +162,7 @@ - job: name: neutron-ovs-tempest-plugin-scenario-iptables_hybrid-nftables - parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid + parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga pre-run: playbooks/install_nftables.yaml vars: devstack_local_conf: diff -Nru neutron-20.4.0/zuul.d/tempest-multinode.yaml neutron-20.5.0/zuul.d/tempest-multinode.yaml --- neutron-20.4.0/zuul.d/tempest-multinode.yaml 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/zuul.d/tempest-multinode.yaml 2023-11-23 09:44:10.000000000 +0000 @@ -39,9 +39,9 @@ vars: tox_envlist: integrated-network devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge Q_AGENT: openvswitch @@ -172,9 +172,9 @@ vars: tox_envlist: integrated-network devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge Q_AGENT: openvswitch @@ -266,9 +266,9 @@ devstack_plugins: neutron: https://opendev.org/openstack/neutron.git devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge Q_AGENT: openvswitch @@ -383,9 +383,9 @@ ovn: enable_distributed_floating_ip: True devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz Q_AGENT: ovn ML2_L3_PLUGIN: ovn-router,trunk Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger diff -Nru neutron-20.4.0/zuul.d/tempest-singlenode.yaml neutron-20.5.0/zuul.d/tempest-singlenode.yaml --- neutron-20.4.0/zuul.d/tempest-singlenode.yaml 2023-07-28 09:15:23.000000000 +0000 +++ neutron-20.5.0/zuul.d/tempest-singlenode.yaml 2023-11-23 09:44:10.000000000 +0000 @@ -339,9 +339,9 @@ neutron_plugin_options: is_igmp_snooping_enabled: True devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz Q_AGENT: ovn ML2_L3_PLUGIN: ovn-router,trunk Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger @@ -448,9 +448,9 @@ vars: tox_envlist: integrated-network devstack_localrc: - CIRROS_VERSION: 0.5.1 - DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec - DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz + CIRROS_VERSION: 0.5.2 + DEFAULT_IMAGE_NAME: cirros-0.5.2-x86_64-uec + DEFAULT_IMAGE_FILE_NAME: cirros-0.5.2-x86_64-uec.tar.gz devstack_plugins: neutron: https://opendev.org/openstack/neutron.git devstack_services: