From 5dc4fe8fd5bd57cae03bfdedd0ed80f8fc84efd5 Mon Sep 17 00:00:00 2001 From: Yuliia Miroshnychenko Date: Tue, 3 Dec 2024 16:27:16 +0100 Subject: [PATCH] [TEST]: Improvement: Switch: New interaction approach --- .../helpers/KildaProperties.groovy | 37 + .../helpers/factory/FlowFactory.groovy | 19 + .../helpers/factory/SwitchFactory.groovy | 51 ++ .../helpers/model/PortExtended.groovy | 130 ++++ .../helpers/model/SwitchExtended.groovy | 677 ++++++++++++++++++ .../helpers/model/SwitchLagPorts.groovy | 56 ++ .../helpers/model/SwitchMeters.groovy | 28 +- .../helpers/model/SwitchRules.groovy | 56 +- .../functionaltests/BaseSpecification.groovy | 3 + .../spec/switches/DefaultRulesSpec.groovy | 134 ++-- .../DefaultRulesValidationSpec.groovy | 13 +- .../spec/switches/FlowRulesSpec.groovy | 279 +++----- .../spec/switches/LagPortSpec.groovy | 328 ++++----- .../spec/switches/MetersSpec.groovy | 179 ++--- .../spec/switches/SwitchesSpec.groovy | 70 +- 15 files changed, 1528 insertions(+), 532 deletions(-) create mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/KildaProperties.groovy create mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/SwitchFactory.groovy create mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/PortExtended.groovy create mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchExtended.groovy create mode 100644 src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchLagPorts.groovy diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/KildaProperties.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/KildaProperties.groovy new file mode 100644 index 00000000000..4f860c9a14e --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/KildaProperties.groovy @@ -0,0 +1,37 @@ +package org.openkilda.functionaltests.helpers + +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.beans.factory.annotation.Value +import org.springframework.stereotype.Component + +@Component +class KildaProperties { + + public static int DISCOVERY_EXHAUSTED_INTERVAL + public static int ANTIFLAP_MIN + public static int ANTIFLAP_COOLDOWN + public static int DISCOVERY_TIMEOUT + public static double BURST_COEFFICIENT + public static String TOPO_DISCO_TOPIC + public static Properties PRODUCER_PROPS + + @Autowired + KildaProperties( @Value('${discovery.exhausted.interval}') int discoveryExhaustedInterval, + @Value('${antiflap.min}') int antiflapMin, + @Value('${antiflap.cooldown}') int antiflapCooldown, + @Value('${discovery.timeout}') int discoveryTimeout, + @Value('${burst.coefficient}') double burstCoefficient, + @Autowired @Qualifier("kafkaProducerProperties") Properties producerProps, + @Value("#{kafkaTopicsConfig.getTopoDiscoTopic()}") String topoDiscoTopic) { + + DISCOVERY_EXHAUSTED_INTERVAL = discoveryExhaustedInterval + ANTIFLAP_MIN = antiflapMin + ANTIFLAP_COOLDOWN = antiflapCooldown + DISCOVERY_TIMEOUT = discoveryTimeout + BURST_COEFFICIENT = burstCoefficient + TOPO_DISCO_TOPIC = topoDiscoTopic + PRODUCER_PROPS = producerProps + + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/FlowFactory.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/FlowFactory.groovy index b73ed504322..96b21adc2f8 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/FlowFactory.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/FlowFactory.groovy @@ -7,6 +7,7 @@ import static org.springframework.beans.factory.config.ConfigurableBeanFactory.S import org.openkilda.functionaltests.helpers.builder.FlowBuilder import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchExtended import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.functionaltests.model.cleanup.CleanupManager @@ -52,6 +53,14 @@ class FlowFactory { return new FlowBuilder(srcSwitch, dstSwitch, northbound, northboundV2, topology, cleanupManager, database, useTraffgenPorts, busyEndpoints) } + FlowBuilder getBuilder(SwitchExtended srcSwitch, SwitchExtended dstSwitch, boolean useTraffgenPorts = true, List busyEndpoints = []) { + getBuilder(srcSwitch.sw, dstSwitch.sw, useTraffgenPorts, busyEndpoints) + } + + FlowBuilder getSingleSwBuilder(SwitchExtended srcSwitch, boolean useTraffgenPorts = true, List busyEndpoints = []) { + getBuilder(srcSwitch.sw, srcSwitch.sw, useTraffgenPorts, busyEndpoints) + } + /* This method allows random Flow creation on specified switches and waits for it to become UP by default or to be in an expected state. @@ -66,6 +75,16 @@ class FlowFactory { return getBuilder(srcSwitch, dstSwitch, useTraffgenPorts, busyEndpoints).build().create(expectedFlowState) } + FlowExtended getRandom(SwitchExtended srcSwitch, SwitchExtended dstSwitch, boolean useTraffgenPorts = true, FlowState expectedFlowState = UP, + List busyEndpoints = []) { + getRandom(srcSwitch.sw, dstSwitch.sw) + } + + FlowExtended getSingleSwRandom(SwitchExtended srcSwitch, boolean useTraffgenPorts = true, FlowState expectedFlowState = UP, + List busyEndpoints = []) { + getRandom(srcSwitch.sw, srcSwitch.sw) + } + FlowExtended getRandomV1(Switch srcSwitch, Switch dstSwitch, boolean useTraffgenPorts = true, FlowState expectedFlowState = UP, List busyEndpoints = []) { return getBuilder(srcSwitch, dstSwitch, useTraffgenPorts, busyEndpoints).build().createV1(expectedFlowState) diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/SwitchFactory.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/SwitchFactory.groovy new file mode 100644 index 00000000000..91a37fe7576 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/factory/SwitchFactory.groovy @@ -0,0 +1,51 @@ +package org.openkilda.functionaltests.helpers.factory + +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + +import org.openkilda.functionaltests.helpers.model.SwitchExtended +import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.model.SwitchId +import org.openkilda.testing.model.topology.TopologyDefinition +import org.openkilda.testing.model.topology.TopologyDefinition.Isl +import org.openkilda.testing.model.topology.TopologyDefinition.Switch +import org.openkilda.testing.model.topology.TopologyDefinition.TraffGen +import org.openkilda.testing.service.database.Database +import org.openkilda.testing.service.lockkeeper.LockKeeperService +import org.openkilda.testing.service.northbound.NorthboundService +import org.openkilda.testing.service.northbound.NorthboundServiceV2 + +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.context.annotation.Scope +import org.springframework.stereotype.Component + +@Slf4j +@Component +@Scope(SCOPE_PROTOTYPE) +class SwitchFactory { + + @Autowired @Qualifier("islandNb") + NorthboundService northbound + @Autowired @Qualifier("islandNbV2") + NorthboundServiceV2 northboundV2 + @Autowired + TopologyDefinition topology + @Autowired + Database database + @Autowired + LockKeeperService lockKeeper + @Autowired + CleanupManager cleanupManager + + SwitchExtended get(Switch sw) { + List relatedIsls = topology.getRelatedIsls(sw) + List traffGen = topology.traffGens.findAll{ it.switchConnected.dpId == sw.dpId } + return new SwitchExtended(sw, relatedIsls, traffGen, + northbound, northboundV2, database, lockKeeper, cleanupManager) + } + SwitchExtended get(SwitchId swId) { + def sw = topology.activeSwitches.find { it.dpId == swId } + get(sw) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/PortExtended.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/PortExtended.groovy new file mode 100644 index 00000000000..de23c9a4272 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/PortExtended.groovy @@ -0,0 +1,130 @@ +package org.openkilda.functionaltests.helpers.model + +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.PORT_UP +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_ISL +import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST +import static org.openkilda.testing.Constants.WAIT_OFFSET + +import org.openkilda.functionaltests.helpers.KildaProperties +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.thread.PortBlinker +import org.openkilda.functionaltests.model.cleanup.CleanupAfter +import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.messaging.info.switches.PortDescription +import org.openkilda.model.SwitchId +import org.openkilda.northbound.dto.v2.switches.PortPropertiesDto +import org.openkilda.testing.model.topology.TopologyDefinition.Switch +import org.openkilda.testing.service.northbound.NorthboundService +import org.openkilda.testing.service.northbound.NorthboundServiceV2 + +import com.fasterxml.jackson.annotation.JsonIgnore +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString +import groovy.util.logging.Slf4j + +@Slf4j +@EqualsAndHashCode(excludes = 'northbound, northboundV2, cleanupManager') +@ToString(includeNames = true, excludes = 'northbound, northboundV2, cleanupManager') +class PortExtended { + + Switch sw + Integer port + + @JsonIgnore + NorthboundService northbound + @JsonIgnore + NorthboundServiceV2 northboundV2 + @JsonIgnore + CleanupManager cleanupManager + + Map, Long> history = [:] + + PortExtended(Switch sw, + Integer portNumber, + NorthboundService northbound, + NorthboundServiceV2 northboundV2, + CleanupManager cleanupManager) { + this.sw = sw + this.port = portNumber + this.northbound = northbound + this.northboundV2 = northboundV2 + this.cleanupManager = cleanupManager + } + + def portUp() { + def swPort = new Tuple2(sw.dpId, port) + def lastEvent = history.get(swPort) + if (lastEvent) { + Wrappers.silent { //Don't fail hard on this check. In rare cases we may miss the history entry + waitPortIsStable(lastEvent) + } + history.remove(swPort) + } + northbound.portUp(sw.dpId, port) + } + + def safePortUp() { + if (northbound.getPort(sw.dpId, port).getState().first() != "LIVE") { + portUp() + } + Wrappers.wait(WAIT_OFFSET) { + assert northbound.getActiveLinks().findAll { + it.source.switchId == sw.dpId && it.source.portNo == port || + it.destination.switchId == sw.dpId && it.destination.portNo == port + }.size() == 2 + } + } + + def portDown(CleanupAfter cleanupAfter = TEST, boolean isNotInScopeOfIslBreak = true) { + if (isNotInScopeOfIslBreak) { + cleanupManager.addAction(PORT_UP, { safePortUp() }, cleanupAfter) + } + def response = northbound.portDown(sw.dpId, port) + sleep(KildaProperties.ANTIFLAP_MIN * 1000) + history.put(new Tuple2(sw.dpId, port), System.currentTimeMillis()) + response + } + + /** + * Wait till the current port is in a stable state (deactivated antiflap) by analyzing its history. + */ + void waitPortIsStable(Long since = 0) { + // '* 2' it takes more time on a hardware env for link via 'a-switch' + Wrappers.wait(KildaProperties.ANTIFLAP_COOLDOWN + WAIT_OFFSET * 2) { + def history = northboundV2.getPortHistory(sw.dpId, port, since, null) + + if (!history.empty) { + def antiflapEvents = history.collect { PortHistoryEvent.valueOf(it.event) }.findAll { + it in [PortHistoryEvent.ANTI_FLAP_ACTIVATED, PortHistoryEvent.ANTI_FLAP_DEACTIVATED] + } + + if (!antiflapEvents.empty) { + assert antiflapEvents.last() == PortHistoryEvent.ANTI_FLAP_DEACTIVATED + } else { + false + } + } else { + false + } + } + } + + def setPortDiscovery(boolean expectedStatus) { + if (!expectedStatus) { + cleanupManager.addAction(RESTORE_ISL, { setPortDiscovery(true) }) + } + return northboundV2.updatePortProperties(sw.dpId, port, new PortPropertiesDto(discoveryEnabled: expectedStatus)) + } + + PortBlinker getPortBlinker(long interval, Properties producerProps) { + new PortBlinker(KildaProperties.PRODUCER_PROPS, KildaProperties.TOPO_DISCO_TOPIC, sw, port, interval) + } + + static def closePortBlinker(PortBlinker blinker) { + blinker?.isRunning() && blinker.stop(true) + } + + PortDescription retrieveDetails() { + northbound.getPort(sw.dpId, port) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchExtended.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchExtended.groovy new file mode 100644 index 00000000000..358ab0aa474 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchExtended.groovy @@ -0,0 +1,677 @@ +package org.openkilda.functionaltests.helpers.model + +import static groovyx.gpars.GParsPool.withPool +import static org.hamcrest.MatcherAssert.assertThat +import static org.hamcrest.Matchers.hasItem +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_SWITCH_MAINTENANCE +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.REVIVE_SWITCH +import static org.openkilda.model.SwitchFeature.KILDA_OVS_PUSH_POP_MATCH_VXLAN +import static org.openkilda.model.SwitchFeature.NOVIFLOW_PUSH_POP_VXLAN +import static org.openkilda.model.cookie.Cookie.ARP_INGRESS_COOKIE +import static org.openkilda.model.cookie.Cookie.ARP_INPUT_PRE_DROP_COOKIE +import static org.openkilda.model.cookie.Cookie.ARP_POST_INGRESS_COOKIE +import static org.openkilda.model.cookie.Cookie.ARP_POST_INGRESS_ONE_SWITCH_COOKIE +import static org.openkilda.model.cookie.Cookie.ARP_POST_INGRESS_VXLAN_COOKIE +import static org.openkilda.model.cookie.Cookie.ARP_TRANSIT_COOKIE +import static org.openkilda.model.cookie.Cookie.CATCH_BFD_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.DROP_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.DROP_VERIFICATION_LOOP_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_INGRESS_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_INPUT_PRE_DROP_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_POST_INGRESS_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_POST_INGRESS_ONE_SWITCH_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_POST_INGRESS_VXLAN_COOKIE +import static org.openkilda.model.cookie.Cookie.LLDP_TRANSIT_COOKIE +import static org.openkilda.model.cookie.Cookie.MULTITABLE_EGRESS_PASS_THROUGH_COOKIE +import static org.openkilda.model.cookie.Cookie.MULTITABLE_INGRESS_DROP_COOKIE +import static org.openkilda.model.cookie.Cookie.MULTITABLE_POST_INGRESS_DROP_COOKIE +import static org.openkilda.model.cookie.Cookie.MULTITABLE_PRE_INGRESS_PASS_THROUGH_COOKIE +import static org.openkilda.model.cookie.Cookie.MULTITABLE_TRANSIT_DROP_COOKIE +import static org.openkilda.model.cookie.Cookie.ROUND_TRIP_LATENCY_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_TURNING_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_VXLAN_TURNING_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_ISL_RTT_OUTPUT_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_ISL_RTT_TURNING_COOKIE +import static org.openkilda.model.cookie.Cookie.VERIFICATION_BROADCAST_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.VERIFICATION_UNICAST_RULE_COOKIE +import static org.openkilda.model.cookie.Cookie.VERIFICATION_UNICAST_VXLAN_RULE_COOKIE +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + +import org.openkilda.functionaltests.helpers.KildaProperties +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.model.cleanup.CleanupAfter +import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.messaging.info.event.IslChangeType +import org.openkilda.messaging.info.event.SwitchChangeType +import org.openkilda.messaging.payload.flow.FlowPayload +import org.openkilda.model.MeterId +import org.openkilda.model.SwitchFeature +import org.openkilda.model.SwitchId +import org.openkilda.model.cookie.Cookie +import org.openkilda.model.cookie.CookieBase.CookieType +import org.openkilda.model.cookie.PortColourCookie +import org.openkilda.model.cookie.ServiceCookie +import org.openkilda.model.cookie.ServiceCookie.ServiceCookieTag +import org.openkilda.northbound.dto.v1.switches.SwitchDto +import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto +import org.openkilda.northbound.dto.v1.switches.SwitchSyncResult +import org.openkilda.northbound.dto.v2.switches.SwitchDtoV2 +import org.openkilda.northbound.dto.v2.switches.SwitchFlowsPerPortResponse +import org.openkilda.northbound.dto.v2.switches.SwitchLocationDtoV2 +import org.openkilda.northbound.dto.v2.switches.SwitchPatchDto +import org.openkilda.testing.model.topology.TopologyDefinition.Isl +import org.openkilda.testing.model.topology.TopologyDefinition.Switch +import org.openkilda.testing.model.topology.TopologyDefinition.SwitchProperties +import org.openkilda.testing.model.topology.TopologyDefinition.TraffGen +import org.openkilda.testing.service.database.Database +import org.openkilda.testing.service.floodlight.model.Floodlight +import org.openkilda.testing.service.floodlight.model.FloodlightConnectMode +import org.openkilda.testing.service.lockkeeper.LockKeeperService +import org.openkilda.testing.service.lockkeeper.model.FloodlightResourceAddress +import org.openkilda.testing.service.northbound.NorthboundService +import org.openkilda.testing.service.northbound.NorthboundServiceV2 +import org.openkilda.testing.service.northbound.payloads.SwitchValidationExtendedResult +import org.openkilda.testing.service.northbound.payloads.SwitchValidationV2ExtendedResult + +import com.fasterxml.jackson.annotation.JsonIdentityInfo +import com.fasterxml.jackson.annotation.JsonIgnore +import com.fasterxml.jackson.annotation.ObjectIdGenerators.PropertyGenerator +import groovy.transform.EqualsAndHashCode +import groovy.transform.Memoized +import groovy.transform.builder.Builder +import groovy.util.logging.Slf4j + +import java.math.RoundingMode + +@Slf4j +@EqualsAndHashCode(excludes = 'northbound, northboundV2, database, lockKeeper, cleanupManager') +@Builder +@JsonIdentityInfo(property = "name", generator = PropertyGenerator.class) +class SwitchExtended { + + //below values are manufacturer-specific and override default Kilda values on firmware level + static NOVIFLOW_BURST_COEFFICIENT = 1.005 // Driven by the Noviflow specification + static CENTEC_MIN_BURST = 1024 // Driven by the Centec specification + static CENTEC_MAX_BURST = 32000 // Driven by the Centec specification + + //Kilda allows user to pass reserved VLAN IDs 1 and 4095 if they want. + static final IntRange KILDA_ALLOWED_VLANS = 1..4095 + + Switch sw + List isls + List traffGens + + @JsonIgnore + NorthboundService northbound + @JsonIgnore + NorthboundServiceV2 northboundV2 + @JsonIgnore + Database database + @JsonIgnore + LockKeeperService lockKeeper + @JsonIgnore + CleanupManager cleanupManager + + SwitchExtended(Switch sw, + List isls, + List traffGens, + NorthboundService northbound, + NorthboundServiceV2 northboundV2, + Database database, + LockKeeperService lockKeeper, + CleanupManager cleanupManager) { + this.sw = sw + + this.isls = isls + this.traffGens = traffGens + this.northbound = northbound + this.northboundV2 = northboundV2 + this.database = database + this.lockKeeper = lockKeeper + this.cleanupManager = cleanupManager + } + + @Override + String toString() { + return String.format("Switch: %s, isls: %s, traffGen(s) on port(s) %s", switchId, isls, traffGens.switchPort) + } + + @JsonIgnore + @Memoized + SwitchRules getRulesManager() { + return new SwitchRules(northbound, database, cleanupManager, sw.dpId) + } + + @JsonIgnore + @Memoized + SwitchMeters getMetersManager() { + return new SwitchMeters(northbound, database, sw.dpId) + } + + @JsonIgnore + @Memoized + SwitchLagPorts getLagManager() { + return new SwitchLagPorts(northboundV2, cleanupManager, sw.dpId) + } + + SwitchId getSwitchId() { + sw.dpId + } + + /** + * + * Get list of switch ports excluding the ports which are busy with ISLs or s42. + */ + @Memoized + List collectAllowedPorts() { + List allPorts = sw.getAllPorts() + allPorts.removeAll(retrieveIslPorts()) + !sw?.prop?.server42Port ?: allPorts.removeAll([sw.prop.server42Port]) + allPorts.unique() + } + /*** + * + * @param useTraffgenPorts allows us to select random TraffGen port for further traffic verification + * @return random port for further interaction + */ + PortExtended retrieveRandomPort(boolean useTraffgenPorts = true) { + List allPorts = useTraffgenPorts ? traffGens*.switchPort : collectAllowedPorts() + Integer portNo = allPorts.shuffled().first() + return new PortExtended(sw, portNo, northbound, northboundV2, cleanupManager) + } + + @Memoized + List retrieveIslPorts() { + List islPorts = [] + isls.each { + it?.srcSwitch?.dpId != sw.dpId ?: islPorts.add(it.srcPort) + it?.dstSwitch?.dpId != sw.dpId ?: islPorts.add(it.dstPort) + } + islPorts + } + + @Memoized + PortExtended retrievePort(Integer portNo) { + return new PortExtended(sw, portNo, northbound, northboundV2, cleanupManager) + } + + @Memoized + PortExtended retrieveServer42Port() { + if(sw?.prop?.server42Port) { + assert sw.prop.server42MacAddress && sw.prop.server42Vlan + return new PortExtended(sw, sw.prop.server42Port, northbound, northboundV2, cleanupManager) + } else { + null + } + } + + static SwitchProperties retrieveDummyServer42Props() { + return new SwitchProperties(true, 33, "00:00:00:00:00:00", 1, null) + } + + + @Memoized + SwitchPropertiesDto retrievedCachedSwProps() { + return northboundV2.getAllSwitchProperties().switchProperties.find { it.switchId == sw.dpId } + } + + @Memoized + boolean isVxlanEnabled() { + return retrievedCachedSwProps().supportedTransitEncapsulation + .contains(org.openkilda.model.FlowEncapsulationType.VXLAN.toString().toLowerCase()) + } + + @Memoized + String retrieveDescription() { + northbound.getActiveSwitches().find { it.switchId == sw.dpId }.description + } + + @Memoized + SwitchDto nbFormat() { + northbound.getSwitch(sw.dpId) + } + + String hwSwString() { + "${nbFormat().hardware} ${nbFormat().software}" + } + + String hwSwString(SwitchDtoV2 sw) { + "${sw.hardware} ${sw.software}" + } + + boolean isCentec() { + nbFormat().manufacturer.toLowerCase().contains("centec") + } + + boolean isNoviflow() { + nbFormat().manufacturer.toLowerCase().contains("noviflow") + } + + boolean isVirtual() { + nbFormat().manufacturer.toLowerCase().contains("nicira") + } + + /** + * A hardware with 100GB ports. Due to its nature sometimes requires special actions from Kilda + */ + boolean isWb5164() { + nbFormat().hardware =~ "WB5164" + } + + List collectDefaultCookies() { + def swProps = northbound.getSwitchProperties(sw.dpId) + def multiTableRules = [] + def devicesRules = [] + def server42Rules = [] + def vxlanRules = [] + def lacpRules = [] + def toggles = northbound.getFeatureToggles() + multiTableRules = [MULTITABLE_PRE_INGRESS_PASS_THROUGH_COOKIE, MULTITABLE_INGRESS_DROP_COOKIE, + MULTITABLE_POST_INGRESS_DROP_COOKIE, MULTITABLE_EGRESS_PASS_THROUGH_COOKIE, + MULTITABLE_TRANSIT_DROP_COOKIE, LLDP_POST_INGRESS_COOKIE, LLDP_POST_INGRESS_ONE_SWITCH_COOKIE, + ARP_POST_INGRESS_COOKIE, ARP_POST_INGRESS_ONE_SWITCH_COOKIE] + def unifiedCookies = [DROP_RULE_COOKIE, VERIFICATION_BROADCAST_RULE_COOKIE, + VERIFICATION_UNICAST_RULE_COOKIE, DROP_VERIFICATION_LOOP_RULE_COOKIE] + + + boolean isVxlanSupported = sw.features.contains(NOVIFLOW_PUSH_POP_VXLAN) || sw.features.contains(KILDA_OVS_PUSH_POP_MATCH_VXLAN) + + if (isVxlanSupported) { + multiTableRules.addAll([LLDP_POST_INGRESS_VXLAN_COOKIE, ARP_POST_INGRESS_VXLAN_COOKIE]) + vxlanRules << VERIFICATION_UNICAST_VXLAN_RULE_COOKIE + } + + if (swProps.switchLldp) { + devicesRules.addAll([LLDP_INPUT_PRE_DROP_COOKIE, LLDP_TRANSIT_COOKIE, LLDP_INGRESS_COOKIE]) + } + if (swProps.switchArp) { + devicesRules.addAll([ARP_INPUT_PRE_DROP_COOKIE, ARP_TRANSIT_COOKIE, ARP_INGRESS_COOKIE]) + } + + northbound.getSwitchFlows(sw.dpId).each { flow -> + [flow.source, flow.destination].findAll { ep -> ep.datapath == sw.dpId }.each { ep -> + multiTableRules.add(new PortColourCookie(CookieType.MULTI_TABLE_INGRESS_RULES, ep.portNumber).getValue()) + if (swProps.switchLldp || ep.detectConnectedDevices.lldp) { + devicesRules.add(new PortColourCookie(CookieType.LLDP_INPUT_CUSTOMER_TYPE, ep.portNumber).getValue()) + } + if (swProps.switchArp || ep.detectConnectedDevices.arp) { + devicesRules.add(new PortColourCookie(CookieType.ARP_INPUT_CUSTOMER_TYPE, ep.portNumber).getValue()) + } + } + } + + def relatedLinks = northbound.getLinks(sw.dpId, null, null, null) + relatedLinks.each { + if (isVxlanSupported) { + multiTableRules.add(new PortColourCookie(CookieType.MULTI_TABLE_ISL_VXLAN_EGRESS_RULES, it.source.portNo).getValue()) + multiTableRules.add(new PortColourCookie(CookieType.MULTI_TABLE_ISL_VXLAN_TRANSIT_RULES, it.source.portNo).getValue()) + } + multiTableRules.add(new PortColourCookie(CookieType.MULTI_TABLE_ISL_VLAN_EGRESS_RULES, it.source.portNo).getValue()) + multiTableRules.add(new PortColourCookie(CookieType.PING_INPUT, it.source.portNo).getValue()) + } + + boolean doesSwSupportS42 = swProps.server42Port != null && swProps.server42MacAddress != null && swProps.server42Vlan != null + if ((toggles.server42IslRtt && doesSwSupportS42 && + swProps.server42IslRtt in ["ENABLED", "AUTO"] && !sw.features.contains(SwitchFeature.NOVIFLOW_COPY_FIELD))) { + devicesRules.add(SERVER_42_ISL_RTT_TURNING_COOKIE) + devicesRules.add(SERVER_42_ISL_RTT_OUTPUT_COOKIE) + relatedLinks.each { + devicesRules.add(new PortColourCookie(CookieType.SERVER_42_ISL_RTT_INPUT, it.source.portNo).getValue()) + } + } + + if (swProps.server42FlowRtt) { + server42Rules << SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE + if (isVxlanSupported) { + server42Rules << SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE + } + } + if (toggles.server42FlowRtt) { + if (sw.features.contains(SwitchFeature.NOVIFLOW_SWAP_ETH_SRC_ETH_DST) || sw.features.contains(SwitchFeature.KILDA_OVS_SWAP_FIELD)) { + server42Rules << SERVER_42_FLOW_RTT_TURNING_COOKIE + server42Rules << SERVER_42_FLOW_RTT_VXLAN_TURNING_COOKIE + } + } + + def lacpPorts = northboundV2.getLagLogicalPort(sw.dpId).findAll { it.lacpReply } + if (!lacpPorts.isEmpty()) { + lacpRules << new ServiceCookie(ServiceCookieTag.DROP_SLOW_PROTOCOLS_LOOP_COOKIE).getValue() + lacpPorts.each { + lacpRules << new PortColourCookie(CookieType.LACP_REPLY_INPUT, it.logicalPortNumber).getValue() + } + } + if (sw.noviflow && !sw.wb5164) { + return ([CATCH_BFD_RULE_COOKIE, ROUND_TRIP_LATENCY_RULE_COOKIE] + + unifiedCookies + vxlanRules + multiTableRules + devicesRules + server42Rules + lacpRules) + } else if ((sw.noviflow || sw.nbFormat().manufacturer == "E") && sw.wb5164) { + return ([CATCH_BFD_RULE_COOKIE] + + unifiedCookies + vxlanRules + multiTableRules + devicesRules + server42Rules + lacpRules) + } else if (sw.ofVersion == "OF_12") { + return [VERIFICATION_BROADCAST_RULE_COOKIE] + } else { + return (unifiedCookies + vxlanRules + multiTableRules + devicesRules + server42Rules + lacpRules) + } + } + + List collectDefaultMeters() { + if (sw.ofVersion == "OF_12") { + return [] + } + def swProps = northbound.getSwitchProperties(sw.dpId) + List result = [] + result << MeterId.createMeterIdForDefaultRule(VERIFICATION_BROADCAST_RULE_COOKIE) //2 + result << MeterId.createMeterIdForDefaultRule(VERIFICATION_UNICAST_RULE_COOKIE) //3 + if (retrieveFeaturesFromDb().contains(NOVIFLOW_PUSH_POP_VXLAN) || retrieveFeaturesFromDb().contains(KILDA_OVS_PUSH_POP_MATCH_VXLAN)) { + result << MeterId.createMeterIdForDefaultRule(VERIFICATION_UNICAST_VXLAN_RULE_COOKIE) //7 + } + result << MeterId.createMeterIdForDefaultRule(LLDP_POST_INGRESS_COOKIE) //16 + result << MeterId.createMeterIdForDefaultRule(LLDP_POST_INGRESS_ONE_SWITCH_COOKIE) //18 + result << MeterId.createMeterIdForDefaultRule(ARP_POST_INGRESS_COOKIE) //22 + result << MeterId.createMeterIdForDefaultRule(ARP_POST_INGRESS_ONE_SWITCH_COOKIE) //24 + if (retrieveFeaturesFromDb().contains(NOVIFLOW_PUSH_POP_VXLAN) || retrieveFeaturesFromDb().contains(KILDA_OVS_PUSH_POP_MATCH_VXLAN)) { + result << MeterId.createMeterIdForDefaultRule(LLDP_POST_INGRESS_VXLAN_COOKIE) //17 + result << MeterId.createMeterIdForDefaultRule(ARP_POST_INGRESS_VXLAN_COOKIE) //23 + } + if (swProps.switchLldp) { + result << MeterId.createMeterIdForDefaultRule(LLDP_INPUT_PRE_DROP_COOKIE) //13 + result << MeterId.createMeterIdForDefaultRule(LLDP_TRANSIT_COOKIE) //14 + result << MeterId.createMeterIdForDefaultRule(LLDP_INGRESS_COOKIE) //15 + } + if (swProps.switchArp) { + result << MeterId.createMeterIdForDefaultRule(ARP_INPUT_PRE_DROP_COOKIE) //19 + result << MeterId.createMeterIdForDefaultRule(ARP_TRANSIT_COOKIE) //20 + result << MeterId.createMeterIdForDefaultRule(ARP_INGRESS_COOKIE) //21 + } + def lacpPorts = northboundV2.getLagLogicalPort(sw.dpId).findAll { + it.lacpReply + } + if (!lacpPorts.isEmpty()) { + result << MeterId.LACP_REPLY_METER_ID //31 + } + + return result*.getValue().sort() + } + + int collectFlowRelatedRulesAmount(FlowExtended flow) { + def swProps = retrievedCachedSwProps() + sw.dpId == flow.source.switchId + def isSwSrcOrDst = (sw.dpId in [flow.source.switchId, flow.destination.switchId]) + def defaultAmountOfFlowRules = 2 // ingress + egress + def amountOfServer42Rules = 0 + if(swProps.server42FlowRtt && isSwSrcOrDst) { + amountOfServer42Rules +=1 + sw.dpId == flow.source.switchId && flow.source.vlanId && ++amountOfServer42Rules + sw.dpId == flow.destination.switchId && flow.destination.vlanId && ++amountOfServer42Rules + } + + defaultAmountOfFlowRules + amountOfServer42Rules + (isSwSrcOrDst ? 1 : 0) + } + + + /** + * The same as direct northbound call, but additionally waits that default rules and default meters are indeed + * reinstalled according to config + */ + SwitchPropertiesDto updateProperties(SwitchPropertiesDto switchProperties) { + cleanupManager.addAction(OTHER, { northbound.updateSwitchProperties(sw.dpId, retrievedCachedSwProps()) }) + def response = northbound.updateSwitchProperties(sw.dpId, switchProperties) + Wrappers.wait(RULES_INSTALLATION_TIME) { + def actualHexCookie = [] + for (long cookie : northbound.getSwitchRules(sw.dpId).flowEntries*.cookie) { + actualHexCookie.add(new Cookie(cookie).toString()) + } + def expectedHexCookie = [] + for (long cookie : collectDefaultCookies()) { + expectedHexCookie.add(new Cookie(cookie).toString()) + } + expectedHexCookie.forEach { item -> + assertThat sw.toString(), actualHexCookie, hasItem(item) + } + + + def actualDefaultMetersIds = northbound.getAllMeters(sw.dpId).meterEntries*.meterId.findAll { + MeterId.isMeterIdOfDefaultRule((long) it) + } + assert actualDefaultMetersIds.sort() == collectDefaultMeters().sort() + } + return response + } + + SwitchDto retrieveDetails() { + northbound.getSwitch(sw.dpId) + } + + List retrieveFlows() { + return northbound.getSwitchFlows(sw.dpId) + } + + List retrieveFlows(Integer port) { + return northbound.getSwitchFlows(sw.dpId, port) + } + + SwitchFlowsPerPortResponse retrieveFlowsV2(List portIds = []){ + return northboundV2.getSwitchFlows(new SwitchId(sw.dpId.id), portIds) + } + + List retrievedUsedPorts() { + return northboundV2.getSwitchFlows(sw.dpId, []).flowsByPort.keySet().asList() + } + + SwitchValidationV2ExtendedResult validate(String include = null, String exclude = null) { + return northboundV2.validateSwitch(sw.dpId, include, exclude) + } + + SwitchValidationExtendedResult validateV1() { + return northbound.validateSwitch(sw.dpId) + } + + Optional validateAndCollectFoundDiscrepancies() { + SwitchValidationV2ExtendedResult validationResponse = northboundV2.validateSwitch(switchId) + return validationResponse.asExpected ? + Optional.empty() as Optional : Optional.of(validationResponse) + } + + SwitchSyncResult synchronize(boolean removeExcess = true) { + return northbound.synchronizeSwitch(sw.dpId, removeExcess) + } + + /** + * Synchronizes the switch and returns an optional SwitchSyncResult if the switch was in an unexpected state + * before the synchronization. + * @return optional SwitchSyncResult if the switch was in an unexpected state + * before the synchronization + */ + Optional synchronizeAndCollectFixedDiscrepancies() { + def syncResponse = synchronize(true) + boolean isAnyDiscrepancyFound = [syncResponse.rules.missing, + syncResponse.rules.misconfigured, + syncResponse.rules.excess, + syncResponse.meters.missing, + syncResponse.meters.misconfigured, + syncResponse.meters.excess].any { !it.isEmpty() } + return isAnyDiscrepancyFound ? Optional.of(syncResponse) : Optional.empty() as Optional + } + + + SwitchDto setSwitchMaintenance(boolean maintenance, boolean evacuate) { + cleanupManager.addAction(RESET_SWITCH_MAINTENANCE, { northbound.setSwitchMaintenance(sw.dpId, false, false) }) + northbound.setSwitchMaintenance(sw.dpId, maintenance, evacuate) + } + + def partialUpdate(SwitchPatchDto updateDto) { + def initialSettings = northbound.getSwitch(sw.dpId) + cleanupManager.addAction(RESTORE_SWITCH_PROPERTIES, { northboundV2.partialSwitchUpdate(sw.dpId, convertToUpdateRequest(initialSettings)) }) + return northboundV2.partialSwitchUpdate(sw.dpId, updateDto) + } + + def deleteSwitch(Boolean force = false) { + return northbound.deleteSwitch(sw.dpId, force) + } + + + /*** + * Floodlight interaction + */ + + /** + * Waits for certain switch to appear/disappear from switch list in certain floodlights. + * Useful when knocking out switches + * + * @deprecated use 'northboundV2.getSwitchConnections(switchId)' instead + */ + @Deprecated + void waitForSwitchFlConnection(boolean shouldBePresent, List floodlights) { + Wrappers.wait(WAIT_OFFSET) { + withPool { + floodlights.eachParallel { + assert it.getFloodlightService().getSwitches()*.switchId.contains(sw.dpId) == shouldBePresent + } + } + } + } + + /** + * Disconnect a switch from controller either removing controller settings inside an OVS switch + * or blocking access to floodlight via iptables for a hardware switch. + * + * @param FL mode + * @param waitForRelatedLinks make sure that all switch related ISLs are FAILED + */ + List knockoutSwitch(FloodlightConnectMode mode, boolean waitForRelatedLinks, double timeout = WAIT_OFFSET) { + def blockData = lockKeeper.knockoutSwitch(sw, mode) + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(blockData, true) }, CleanupAfter.TEST) + Wrappers.wait(timeout) { + assert northbound.getSwitch(sw.dpId).state == SwitchChangeType.DEACTIVATED + } + if (waitForRelatedLinks) { + Wrappers.wait(KildaProperties.DISCOVERY_TIMEOUT + timeout * 2) { + def allIsls = northbound.getAllLinks() + isls.each { isl -> + assert allIsls.find { + (it.source.switchId == isl.srcSwitch.dpId && it.source.portNo == isl.srcPort + && it.destination.switchId == isl.dstSwitch.dpId && it.destination.portNo == isl.dstPort) + }.state == IslChangeType.FAILED + } + } + } + + return blockData + } + + List knockoutSwitch(FloodlightConnectMode mode) { + knockoutSwitch(mode, false) + } + + List knockoutSwitch(List regions) { + def blockData = lockKeeper.knockoutSwitch(sw, regions) + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(blockData, true) }, CleanupAfter.TEST) + return blockData + } + + List knockoutSwitchFromStatsController(){ + def blockData = lockKeeper.knockoutSwitch(sw, FloodlightConnectMode.RO) + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(blockData, true) }, CleanupAfter.TEST) + return blockData + } + + /** + * Connect a switch to controller either adding controller settings inside an OVS switch + * or setting proper iptables to allow access to floodlight for a hardware switch. + * + * @param flResourceAddress to register sw in the specific FL regions + * @param waitForRelatedLinks make sure that all switch related ISLs are DISCOVERED + */ + void reviveSwitch(List flResourceAddress, boolean waitForRelatedLinks) { + lockKeeper.reviveSwitch(sw, flResourceAddress) + Wrappers.wait(WAIT_OFFSET) { + assert northbound.getSwitch(sw.dpId).state == SwitchChangeType.ACTIVATED + } + if (waitForRelatedLinks) { + Wrappers.wait(KildaProperties.DISCOVERY_TIMEOUT + WAIT_OFFSET * 2) { + def allIsls = northbound.getAllLinks() + isls.collectMany { [it, it.reversed] }.each { isl -> + assert allIsls.find { + (it.source.switchId == isl.srcSwitch.dpId && it.source.portNo == isl.srcPort + && it.destination.switchId == isl.dstSwitch.dpId && it.destination.portNo == isl.dstPort) + }.state == IslChangeType.DISCOVERED + } + } + } + } + + void reviveSwitch(List flResourceAddress) { + reviveSwitch(flResourceAddress, false) + } + + + /** + * This method calculates expected burst for different types of switches. The common burst equals to + * `rate * BURST_COEFFICIENT`. There are couple exceptions though:
+ * Noviflow: Does not use our common burst coefficient and overrides it with its own coefficient (see static + * variables at the top of the class).
+ * Centec: Follows our burst coefficient policy, except for restrictions for the minimum and maximum burst. + * In cases when calculated burst is higher or lower of the Centec max/min - the max/min burst value will be used + * instead. + * + * @param rate meter rate which is used to calculate burst + * Needed to get the switch manufacturer and apply special calculations if required + * @return the expected burst value for given switch and rate + */ + def retrieveExpectedBurst(long rate) { + def burstCoefficient = KildaProperties.BURST_COEFFICIENT + if (isNoviflow() || isWb5164()) { + return (rate * NOVIFLOW_BURST_COEFFICIENT - 1).setScale(0, RoundingMode.CEILING) + } else if (isCentec()) { + def burst = (rate * burstCoefficient).toBigDecimal().setScale(0, RoundingMode.FLOOR) + if (burst <= CENTEC_MIN_BURST) { + return CENTEC_MIN_BURST + } else if (burst > CENTEC_MIN_BURST && burst <= CENTEC_MAX_BURST) { + return burst + } else { + return CENTEC_MAX_BURST + } + } else { + return (rate * burstCoefficient).round(0) + } + } + + /*** + * Database interaction + */ + + @Memoized + Set retrieveFeaturesFromDb() { + database.getSwitch(sw.dpId).features + } + + List dumpAllSwitches() { + database.dumpAllSwitches().collect { new SwitchDbData((it.data)) } + } + + static int randomVlan() { + return randomVlan([]) + } + + static int randomVlan(List exclusions) { + return (KILDA_ALLOWED_VLANS - exclusions).shuffled().first() + } + + static List availableVlanList(List exclusions) { + return (KILDA_ALLOWED_VLANS - exclusions).shuffled() + } + + static SwitchPatchDto convertToUpdateRequest(SwitchDto swDetails) { + def pop = swDetails.pop ? swDetails.pop : "" + def location = new SwitchLocationDtoV2(swDetails.location.latitude, swDetails.location.longitude, "", "", "") + !swDetails.location.street ?: location.setStreet(swDetails.location.street) + !swDetails.location.city ?: location.setStreet(swDetails.location.city) + !swDetails.location.country ?: location.setStreet(swDetails.location.country) + return new SwitchPatchDto(pop, location) + + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchLagPorts.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchLagPorts.groovy new file mode 100644 index 00000000000..052ae887fe0 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchLagPorts.groovy @@ -0,0 +1,56 @@ +package org.openkilda.functionaltests.helpers.model + +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_LAG_LOGICAL_PORT + +import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.model.SwitchId +import org.openkilda.northbound.dto.v2.switches.LagPortRequest +import org.openkilda.northbound.dto.v2.switches.LagPortResponse +import org.openkilda.testing.service.northbound.NorthboundServiceV2 + +import com.fasterxml.jackson.annotation.JsonIgnore +import groovy.transform.EqualsAndHashCode +import groovy.transform.ToString +import groovy.util.logging.Slf4j + +@Slf4j +@EqualsAndHashCode(excludes = 'northboundV2, cleanupManager') +@ToString(includeNames = true, excludes = 'northboundV2, cleanupManager') +class SwitchLagPorts { + + SwitchId switchId + + @JsonIgnore + NorthboundServiceV2 northboundV2 + @JsonIgnore + CleanupManager cleanupManager + + SwitchLagPorts(NorthboundServiceV2 northboundV2, CleanupManager cleanupManager, SwitchId switchId) { + this.switchId = switchId + this.northboundV2 = northboundV2 + this.cleanupManager = cleanupManager + } + + LagPortResponse createLogicalPort(Set portNumbers, boolean lacpReply = null) { + cleanupManager.addAction(DELETE_LAG_LOGICAL_PORT, { safeDeleteAllLogicalPorts() }) + northboundV2.createLagLogicalPort(switchId, new LagPortRequest(portNumbers , lacpReply)) + } + + List retrieveLogicalPorts() { + northboundV2.getLagLogicalPort(switchId) + } + + LagPortResponse updateLogicalPort(int lagPort, LagPortRequest updateRequest) { + northboundV2.updateLagLogicalPort(switchId, lagPort, updateRequest) + } + + def safeDeleteAllLogicalPorts() { + return northboundV2.getLagLogicalPort(switchId).each { + deleteLogicalPort(it.getLogicalPortNumber()) + } + } + + LagPortResponse deleteLogicalPort(int lagPort) { + northboundV2.deleteLagLogicalPort(switchId, lagPort) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchMeters.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchMeters.groovy index 53b31e42ed4..2fdd5465973 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchMeters.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchMeters.groovy @@ -1,12 +1,18 @@ package org.openkilda.functionaltests.helpers.model - +import org.openkilda.messaging.info.meter.MeterEntry import org.openkilda.model.FlowMeter +import org.openkilda.model.MeterId import org.openkilda.model.SwitchId -import org.openkilda.northbound.dto.v2.haflows.HaFlow +import org.openkilda.northbound.dto.v1.switches.DeleteMeterResult +import org.openkilda.northbound.dto.v1.switches.MeterInfoDto +import org.openkilda.northbound.dto.v2.switches.MeterInfoDtoV2 import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService +import groovy.transform.ToString + +@ToString(includeNames = true, excludes = 'northboundService, database') class SwitchMeters { NorthboundService northboundService Database database @@ -25,7 +31,23 @@ class SwitchMeters { .findAll {it.getSwitchId() == switchId} } + List getMeters() { + northboundService.getAllMeters(switchId).meterEntries + } + void delete(FlowMeter flowMeter) { - northboundService.deleteMeter(switchId, flowMeter.getMeterId().getValue()) + delete(flowMeter.getMeterId().getValue()) + } + + DeleteMeterResult delete(Long meterId) { + northboundService.deleteMeter(switchId, meterId) + } + + boolean isDefaultMeter(MeterInfoDto dto) { + MeterId.isMeterIdOfDefaultRule(dto.getMeterId()) + } + + boolean isDefaultMeter(MeterInfoDtoV2 dto) { + MeterId.isMeterIdOfDefaultRule(dto.getMeterId()) } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy index 711012e695d..a6dffceadc1 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy @@ -1,17 +1,27 @@ package org.openkilda.functionaltests.helpers.model +import static org.openkilda.model.cookie.Cookie.* +import static org.openkilda.model.cookie.CookieBase.CookieType.* + import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.messaging.command.switches.DeleteRulesAction +import org.openkilda.messaging.command.switches.InstallRulesAction import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.FlowMeter import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v1.flows.PathDiscrepancyDto +import org.openkilda.northbound.dto.v1.switches.RulesSyncResult +import org.openkilda.northbound.dto.v1.switches.RulesValidationResult import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH +import groovy.transform.ToString + +@ToString(includeNames = true, excludes = 'northboundService, database, cleanupManager') class SwitchRules { NorthboundService northboundService Database database @@ -40,15 +50,43 @@ class SwitchRules { return getRules().findAll {it.getInstructions().getGoToMeter() == flowMeter.getMeterId().getValue()} } + List installRules(InstallRulesAction installAction) { + northboundService.installSwitchRules(switchId, installAction) + } + + RulesValidationResult validateRules() { + northboundService.validateSwitchRules(switchId) + } + + RulesSyncResult synchronizeRules() { + northboundService.synchronizeSwitchRules(switchId) + } + void delete(FlowRuleEntity flowEntry) { delete(flowEntry.getCookie()) } - void delete(long cookie) { + List delete(long cookie) { cleanupManager.addAction(SYNCHRONIZE_SWITCH, {northboundService.synchronizeSwitch(switchId, true)}) northboundService.deleteSwitchRules(switchId, cookie) } + List delete(DeleteRulesAction deleteAction) { + cleanupManager.addAction(SYNCHRONIZE_SWITCH, {northboundService.synchronizeSwitch(switchId, true)}) + return northboundService.deleteSwitchRules(switchId, deleteAction) + } + + List delete(Integer inPort, Integer inVlan, String encapsulationType, + Integer outPort) { + cleanupManager.addAction(SYNCHRONIZE_SWITCH, {northboundService.synchronizeSwitch(switchId, true)}) + return northboundService.deleteSwitchRules(switchId, inPort, inVlan, encapsulationType, outPort) + } + + List delete(int priority) { + cleanupManager.addAction(SYNCHRONIZE_SWITCH, {northboundService.synchronizeSwitch(switchId, true)}) + return northboundService.deleteSwitchRules(switchId, priority) + } + static Set missingRuleCookieIds(Collection missingRules) { return missingRules.collect {new Long((it.getRule() =~ COOKIE_ID_IN_RULE_DISCREPANCY_STRING_REGEX)[0])} } @@ -60,9 +98,9 @@ class SwitchRules { static long getPingRuleCookie(String encapsulationType) { if (FlowEncapsulationType.TRANSIT_VLAN.toString().equalsIgnoreCase(encapsulationType)) { - return Cookie.VERIFICATION_UNICAST_RULE_COOKIE + return VERIFICATION_UNICAST_RULE_COOKIE } else if (FlowEncapsulationType.VXLAN.toString().equalsIgnoreCase(encapsulationType)) { - return Cookie.VERIFICATION_UNICAST_VXLAN_RULE_COOKIE + return VERIFICATION_UNICAST_VXLAN_RULE_COOKIE } else { throw new IllegalArgumentException("Unknown encapsulation " + encapsulationType) } @@ -73,13 +111,17 @@ class SwitchRules { } List getServer42FlowRules() { - getRules().findAll { new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, - CookieType.SERVER_42_FLOW_RTT_INGRESS] } + getRules().findAll { new Cookie(it.cookie).getType() in [SERVER_42_FLOW_RTT_INPUT, SERVER_42_FLOW_RTT_INGRESS] } } List getServer42ISLRules() { - getRules().findAll { (new Cookie(it.cookie).getType() in [CookieType.SERVER_42_ISL_RTT_INPUT] || - it.cookie in [Cookie.SERVER_42_ISL_RTT_TURNING_COOKIE, Cookie.SERVER_42_ISL_RTT_OUTPUT_COOKIE]) } + getRules().findAll { (new Cookie(it.cookie).getType() in [SERVER_42_ISL_RTT_INPUT] || + it.cookie in [SERVER_42_ISL_RTT_TURNING_COOKIE, SERVER_42_ISL_RTT_OUTPUT_COOKIE]) } + } + + List getServer42SwitchRules() { + getRules().findAll { it.cookie in [SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE, + SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE] } } List getRules() { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy index 0dfd063f47c..759ae682fce 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/BaseSpecification.groovy @@ -1,6 +1,7 @@ package org.openkilda.functionaltests import org.openkilda.functionaltests.helpers.IslHelper +import org.openkilda.functionaltests.helpers.factory.SwitchFactory import org.openkilda.functionaltests.helpers.model.ASwitchFlows import org.openkilda.functionaltests.helpers.model.ASwitchPorts import org.openkilda.functionaltests.helpers.model.KildaConfiguration @@ -63,6 +64,8 @@ class BaseSpecification extends Specification { @Autowired @Shared SwitchHelper switchHelper @Autowired @Shared + SwitchFactory switchFactory + @Autowired @Shared PortAntiflapHelper antiflap //component overrides getting existing flows per topology lab(flow, y-flow, ha_flow) @Autowired @Shared @Qualifier("islandNbV2") diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesSpec.groovy index 5334bab26a3..d623c5e04e6 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesSpec.groovy @@ -17,7 +17,6 @@ import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMo import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.command.switches.InstallRulesAction import org.openkilda.messaging.model.SwitchPropertiesDto.RttState @@ -26,15 +25,9 @@ import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.testing.model.topology.TopologyDefinition.Switch -import org.springframework.beans.factory.annotation.Autowired -import spock.lang.Shared class DefaultRulesSpec extends HealthCheckSpecification { - @Autowired - @Shared - SwitchRulesFactory switchRulesFactory - def setupSpec() { deleteAnyFlowsLeftoversIssue5480() } @@ -42,8 +35,9 @@ class DefaultRulesSpec extends HealthCheckSpecification { @Tags([TOPOLOGY_DEPENDENT, SMOKE, SMOKE_SWITCHES]) def "Default rules are installed on switches #sw.hwSwString"() { expect: "Default rules are installed on the switch" - def cookies = switchRulesFactory.get(sw.dpId).getRules().cookie - cookies.sort() == sw.defaultCookies.sort() + def swToInteract = switchFactory.get(sw) + def cookies = swToInteract.rulesManager.getRules().cookie + cookies.sort() == swToInteract.collectDefaultCookies().sort() where: sw << getTopology().getActiveSwitches().unique { sw -> sw.description } @@ -52,17 +46,17 @@ class DefaultRulesSpec extends HealthCheckSpecification { @Tags([SMOKE, SWITCH_RECOVER_ON_FAIL]) def "Default rules are installed when a new switch is connected"() { given: "A switch with no rules installed and not connected to the controller" - def sw = topology.activeSwitches.first() - switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.DROP_ALL) - wait(RULES_DELETION_TIME) { assert switchRulesFactory.get(sw.dpId).getRules().isEmpty() } - def blockData = switchHelper.knockoutSwitch(sw, RW) + def swToInteract = switchFactory.get(topology.activeSwitches.first()) + swToInteract.rulesManager.delete(DeleteRulesAction.DROP_ALL) + wait(RULES_DELETION_TIME) { assert swToInteract.rulesManager.getRules().isEmpty() } + def blockData = swToInteract.knockoutSwitch(RW) when: "Connect the switch to the controller" - switchHelper.reviveSwitch(sw, blockData) + swToInteract.reviveSwitch(blockData) then: "Default rules are installed on the switch" wait(RULES_INSTALLATION_TIME) { - assert switchRulesFactory.get(sw.dpId).getRules().cookie.sort() == sw.defaultCookies.sort() + assert swToInteract.rulesManager.getRules().cookie.sort() == swToInteract.collectDefaultCookies().sort() } } @@ -70,21 +64,22 @@ class DefaultRulesSpec extends HealthCheckSpecification { def "Able to install default rule on #sw.hwSwString [install-action=#data.installRulesAction]"( Map data, Switch sw) { given: "A switch without any rules" - def defaultRules = switchRulesFactory.get(sw.dpId).getRules() - assertThat(defaultRules*.cookie.sort()).containsExactlyInAnyOrder(*sw.defaultCookies.sort()) + def swToInteract = switchFactory.get(sw) + def defaultRules = swToInteract.rulesManager.getRules() + assertThat(defaultRules*.cookie.sort()).containsExactlyInAnyOrder(*swToInteract.collectDefaultCookies().sort()) - switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.DROP_ALL) - wait(RULES_DELETION_TIME) { assert switchRulesFactory.get(sw.dpId).getRules().empty } + swToInteract.rulesManager.delete(DeleteRulesAction.DROP_ALL) + wait(RULES_DELETION_TIME) { assert swToInteract.rulesManager.getRules().empty } when: "Install rules on the switch" - def installedRules = northbound.installSwitchRules(sw.dpId, data.installRulesAction) + def installedRules = swToInteract.rulesManager.installRules(data.installRulesAction) then: "The corresponding rules are really installed" installedRules.size() == 1 def expectedRules = defaultRules.findAll { it.cookie == data.cookie } wait(RULES_INSTALLATION_TIME) { - def actualRules = switchRulesFactory.get(sw.dpId).getRules() + def actualRules = swToInteract.rulesManager.getRules() .findAll { new Cookie(it.cookie).getType() != CookieType.MULTI_TABLE_ISL_VLAN_EGRESS_RULES } assertThat(actualRules).containsExactlyInAnyOrder(*expectedRules) } @@ -135,21 +130,22 @@ class DefaultRulesSpec extends HealthCheckSpecification { def "Able to install default rule on switch: #sw.hwSwString [install-action=#data.installRulesAction]"( Map data, Switch sw) { given: "A switch without rules" - def defaultRules = switchRulesFactory.get(sw.dpId).getRules() - assert defaultRules*.cookie.sort() == sw.defaultCookies.sort() + def swToInteract = switchFactory.get(sw) + def defaultRules = swToInteract.rulesManager.getRules() + assert defaultRules*.cookie.sort() == swToInteract.collectDefaultCookies().sort() - switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.DROP_ALL) - wait(RULES_DELETION_TIME) { assert switchRulesFactory.get(sw.dpId).getRules().empty } + swToInteract.rulesManager.delete(DeleteRulesAction.DROP_ALL) + wait(RULES_DELETION_TIME) { assert swToInteract.rulesManager.getRules().empty } when: "Install rules on the switch" - def installedRules = northbound.installSwitchRules(sw.dpId, data.installRulesAction) + def installedRules = swToInteract.rulesManager.installRules(data.installRulesAction) then: "The corresponding rules are really installed" installedRules.size() == 1 def expectedRules = defaultRules.findAll { it.cookie == data.cookie } wait(RULES_INSTALLATION_TIME) { - def actualRules = switchRulesFactory.get(sw.dpId).getRules() + def actualRules = swToInteract.rulesManager.getRules() assert actualRules.cookie == installedRules assertThat(actualRules).containsExactlyInAnyOrder(*expectedRules) } @@ -187,19 +183,20 @@ class DefaultRulesSpec extends HealthCheckSpecification { @Tags([TOPOLOGY_DEPENDENT, SMOKE, SMOKE_SWITCHES]) def "Able to install default rules on #sw.hwSwString [install-action=INSTALL_DEFAULTS]"() { given: "A switch without any rules" - def defaultRules = switchRulesFactory.get(sw.dpId).getRules() - assert defaultRules*.cookie.sort() == sw.defaultCookies.sort() + def swToInteract = switchFactory.get(sw) + def defaultRules = swToInteract.rulesManager.getRules() + assert defaultRules*.cookie.sort() == swToInteract.collectDefaultCookies().sort() - switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.DROP_ALL) - wait(RULES_DELETION_TIME) { assert switchRulesFactory.get(sw.dpId).getRules().empty } + swToInteract.rulesManager.delete(DeleteRulesAction.DROP_ALL) + wait(RULES_DELETION_TIME) { assert swToInteract.rulesManager.getRules().empty } when: "Install rules on the switch" - def installedRules = northbound.installSwitchRules(sw.dpId, InstallRulesAction.INSTALL_DEFAULTS) + def installedRules = swToInteract.rulesManager.installRules(InstallRulesAction.INSTALL_DEFAULTS) then: "The corresponding rules are really installed" installedRules.size() == defaultRules.size() wait(RULES_INSTALLATION_TIME) { - def actualRules = switchRulesFactory.get(sw.dpId).getRules() + def actualRules = swToInteract.rulesManager.getRules() assertThat(actualRules).containsExactlyInAnyOrder(*defaultRules) } @@ -211,25 +208,27 @@ class DefaultRulesSpec extends HealthCheckSpecification { def "Able to delete default rule from #sw.hwSwString[delete-action=#data.deleteRulesAction]"( Map data, Switch sw) { when: "Delete rules from the switch" - def defaultRules = switchRulesFactory.get(sw.dpId).getRules() - def expectedDefaultCookies = sw.defaultCookies + def swToInteract = switchFactory.get(sw) + def defaultRules = swToInteract.rulesManager.getRules() + def expectedDefaultCookies = swToInteract.collectDefaultCookies() assert defaultRules*.cookie.sort() == expectedDefaultCookies.sort() - def deletedRules = switchHelper.deleteSwitchRules(sw.dpId, data.deleteRulesAction) + def deletedRules = swToInteract.rulesManager.delete(data.deleteRulesAction) then: "The corresponding rules are really deleted" deletedRules.size() == 1 wait(RULES_DELETION_TIME) { - def actualRules = switchRulesFactory.get(sw.dpId).getRules() + def actualRules = swToInteract.rulesManager.getRules() assertThat(actualRules).containsExactlyInAnyOrder(*defaultRules.findAll { it.cookie != data.cookie }) } and: "Switch and rules validation shows that corresponding default rule is missing" - verifyAll(northbound.validateSwitchRules(sw.dpId)) { + verifyAll(swToInteract.rulesManager.validateRules()) { missingRules == deletedRules excessRules.empty properRules.sort() == expectedDefaultCookies.findAll { it != data.cookie }.sort() } - verifyAll(switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get()) { + + verifyAll(swToInteract.validate()) { rules.missing*.getCookie() == deletedRules rules.misconfigured.empty rules.excess.empty @@ -268,28 +267,30 @@ class DefaultRulesSpec extends HealthCheckSpecification { @Tags([TOPOLOGY_DEPENDENT, SMOKE_SWITCHES]) def "Able to delete default rule from #sw.hwSwString [delete-action=#data.deleteRulesAction]"(Map data, Switch sw) { when: "Delete rule from the switch" + def swToInteract = switchFactory.get(sw) def defaultRules - def expectedDefaultCookies = sw.defaultCookies + def expectedDefaultCookies = swToInteract.collectDefaultCookies() wait(RULES_INSTALLATION_TIME) { - defaultRules = switchRulesFactory.get(sw.dpId).getRules() + defaultRules = swToInteract.rulesManager.getRules() assert defaultRules*.cookie.sort() == expectedDefaultCookies.sort() } - def deletedRules = switchHelper.deleteSwitchRules(sw.dpId, data.deleteRulesAction) + def deletedRules = swToInteract.rulesManager.delete(data.deleteRulesAction) then: "The corresponding rule is really deleted" deletedRules.size() == 1 wait(RULES_DELETION_TIME) { - def actualRules = switchRulesFactory.get(sw.dpId).getRules() + def actualRules = swToInteract.rulesManager.getRules() assertThat(actualRules).containsExactlyInAnyOrder(*defaultRules.findAll { it.cookie != data.cookie }) } and: "Switch and rules validation shows that corresponding default rule is missing" - verifyAll(northbound.validateSwitchRules(sw.dpId)) { + verifyAll(swToInteract.rulesManager.validateRules()) { missingRules == deletedRules excessRules.empty properRules.sort() == expectedDefaultCookies.findAll { it != data.cookie }.sort() } - verifyAll(switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get()) { + + verifyAll(swToInteract.validate()) { rules.missing*.getCookie() == deletedRules rules.misconfigured.empty rules.excess.empty @@ -331,12 +332,13 @@ class DefaultRulesSpec extends HealthCheckSpecification { setup: "Select a switch which support server42 turning rule" def sw = topology.activeSwitches.find { it.features.contains(SwitchFeature.NOVIFLOW_SWAP_ETH_SRC_ETH_DST) } ?: assumeTrue(false, "No suiting switch found") + def swToInteract = switchFactory.get(sw) and: "Server42 is enabled in feature toggle" assumeTrue(featureToggles.getFeatureToggles().server42FlowRtt) when: "Delete the server42 turning rule from the switch" - def deleteResponse = switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.REMOVE_SERVER_42_TURNING) + def deleteResponse = swToInteract.rulesManager.delete(DeleteRulesAction.REMOVE_SERVER_42_TURNING) then: "The delete rule response contains the server42 turning cookie only" deleteResponse.size() == 1 @@ -344,17 +346,18 @@ class DefaultRulesSpec extends HealthCheckSpecification { and: "The corresponding rule is really deleted" wait(RULES_DELETION_TIME) { - assert switchRulesFactory.get(sw.dpId).getRules().findAll { it.cookie == SERVER_42_FLOW_RTT_TURNING_COOKIE }.empty + assert swToInteract.rulesManager.getRules().findAll { it.cookie == SERVER_42_FLOW_RTT_TURNING_COOKIE }.empty } and: "Switch and rules validation shows that corresponding rule is missing" - def defaultCookiesWithoutMissingS42Rule = sw.defaultCookies.findAll { it != SERVER_42_FLOW_RTT_TURNING_COOKIE }.sort() - verifyAll(northbound.validateSwitchRules(sw.dpId)) { + def defaultCookiesWithoutMissingS42Rule = swToInteract.collectDefaultCookies().findAll { it != SERVER_42_FLOW_RTT_TURNING_COOKIE }.sort() + verifyAll(swToInteract.rulesManager.validateRules()) { missingRules == [SERVER_42_FLOW_RTT_TURNING_COOKIE] excessRules.empty properRules.sort() == defaultCookiesWithoutMissingS42Rule } - verifyAll(switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get()) { + + verifyAll(swToInteract.validate()) { rules.missing*.getCookie() == [SERVER_42_FLOW_RTT_TURNING_COOKIE] rules.misconfigured.empty rules.excess.empty @@ -362,7 +365,7 @@ class DefaultRulesSpec extends HealthCheckSpecification { } when: "Install the server42 turning rule" - def installResponse = northbound.installSwitchRules(sw.dpId, InstallRulesAction.INSTALL_SERVER_42_TURNING) + def installResponse = swToInteract.rulesManager.installRules(InstallRulesAction.INSTALL_SERVER_42_TURNING) then: "The install rule response contains the server42 turning cookie only" installResponse.size() == 1 @@ -370,33 +373,35 @@ class DefaultRulesSpec extends HealthCheckSpecification { and: "The corresponding rule is really installed" wait(RULES_INSTALLATION_TIME) { - assert !switchRulesFactory.get(sw.dpId).getRules().findAll { it.cookie == SERVER_42_FLOW_RTT_TURNING_COOKIE }.empty + assert !swToInteract.rulesManager.getRules().findAll { it.cookie == SERVER_42_FLOW_RTT_TURNING_COOKIE }.empty } } @Tags([TOPOLOGY_DEPENDENT, SMOKE_SWITCHES]) def "Able to delete/install the server42 ISL RTT turning rule on a switch"() { setup: "Select a switch which support server42 turning rule" - def sw = topology.getActiveServer42Switches().find(s -> switchHelper.getCachedSwProps(s.dpId).server42IslRtt != "DISABLED"); + def sw = topology.getActiveServer42Switches().find{ s -> + northboundV2.getAllSwitchProperties().switchProperties.find { it.switchId == s.dpId }.server42IslRtt != "DISABLED"} assumeTrue(sw != null, "No suiting switch found") + def swToInteract = switchFactory.get(sw) and: "Server42 is enabled in feature toggle" assumeTrue(featureToggles.getFeatureToggles().server42IslRtt) and: "server42IslRtt is enabled on the switch" - def originSwProps = switchHelper.getCachedSwProps(sw.dpId) - switchHelper.updateSwitchProperties(sw, originSwProps.jacksonCopy().tap({ + def originSwProps = swToInteract.retrievedCachedSwProps() + swToInteract.updateProperties(originSwProps.jacksonCopy().tap({ it.server42IslRtt = RttState.ENABLED.toString() })) wait(RULES_INSTALLATION_TIME) { - assert switchRulesFactory.get(sw.dpId).getRules().findAll { + assert swToInteract.rulesManager.getRules().findAll { (it.cookie in [SERVER_42_ISL_RTT_TURNING_COOKIE, SERVER_42_ISL_RTT_OUTPUT_COOKIE]) || (new Cookie(it.cookie).getType() in [CookieType.SERVER_42_ISL_RTT_INPUT]) }.size() == northbound.getLinks(sw.dpId, null, null, null).size() + 2 } when: "Delete the server42 ISL RTT turning rule from the switch" - def deleteResponse = switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.REMOVE_SERVER_42_ISL_RTT_TURNING) + def deleteResponse = swToInteract.rulesManager.delete(DeleteRulesAction.REMOVE_SERVER_42_ISL_RTT_TURNING) then: "The delete rule response contains the server42 ISL RTT turning cookie only" deleteResponse.size() == 1 @@ -404,24 +409,25 @@ class DefaultRulesSpec extends HealthCheckSpecification { and: "The corresponding rule is really deleted" wait(RULES_DELETION_TIME) { - assert switchRulesFactory.get(sw.dpId).getRules().findAll { it.cookie == SERVER_42_ISL_RTT_TURNING_COOKIE }.empty + assert swToInteract.rulesManager.getRules().findAll { it.cookie == SERVER_42_ISL_RTT_TURNING_COOKIE }.empty } and: "Switch and rules validation shows that corresponding rule is missing" - verifyAll(northbound.validateSwitchRules(sw.dpId)) { + verifyAll(swToInteract.rulesManager.validateRules()) { missingRules == [SERVER_42_ISL_RTT_TURNING_COOKIE] excessRules.empty - properRules.sort() == sw.defaultCookies.findAll { it != SERVER_42_ISL_RTT_TURNING_COOKIE }.sort() + properRules.sort() == swToInteract.collectDefaultCookies().findAll { it != SERVER_42_ISL_RTT_TURNING_COOKIE }.sort() } - verifyAll(switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get()) { + + verifyAll(swToInteract.validate()) { rules.missing*.getCookie() == [SERVER_42_ISL_RTT_TURNING_COOKIE] rules.misconfigured.empty rules.excess.empty - rules.proper*.getCookie().sort() == sw.defaultCookies.findAll { it != SERVER_42_ISL_RTT_TURNING_COOKIE }.sort() + rules.proper*.getCookie().sort() == swToInteract.collectDefaultCookies().findAll { it != SERVER_42_ISL_RTT_TURNING_COOKIE }.sort() } when: "Install the server42 ISL RTT turning rule" - def installResponse = northbound.installSwitchRules(sw.dpId, InstallRulesAction.INSTALL_SERVER_42_ISL_RTT_TURNING) + def installResponse = swToInteract.rulesManager.installRules(InstallRulesAction.INSTALL_SERVER_42_ISL_RTT_TURNING) then: "The install rule response contains the server42 ISL RTT turning cookie only" installResponse.size() == 1 @@ -429,7 +435,7 @@ class DefaultRulesSpec extends HealthCheckSpecification { and: "The corresponding rule is really installed" wait(RULES_INSTALLATION_TIME) { - assert !switchRulesFactory.get(sw.dpId).getRules().findAll { it.cookie == SERVER_42_ISL_RTT_TURNING_COOKIE }.empty + assert !swToInteract.rulesManager.getRules().findAll { it.cookie == SERVER_42_ISL_RTT_TURNING_COOKIE }.empty } } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesValidationSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesValidationSpec.groovy index 2a2a5624321..ca82b23ec7e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesValidationSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/DefaultRulesValidationSpec.groovy @@ -31,27 +31,28 @@ class DefaultRulesValidationSpec extends HealthCheckSpecification { def "Switch and rule validation can properly detect default rules to 'proper' section (#sw.hwSwString #propsDescr)"( Map swProps, Switch sw, String propsDescr) { given: "Clean switch without customer flows and with the given switchProps" - def originalProps = switchHelper.getCachedSwProps(sw.dpId) - switchHelper.updateSwitchProperties(sw, originalProps.jacksonCopy().tap({ + def swToInteract = switchFactory.get(sw) + def originalProps = swToInteract.retrievedCachedSwProps() + swToInteract.updateProperties(originalProps.jacksonCopy().tap({ it.switchLldp = swProps.switchLldp it.switchArp = swProps.switchArp })) expect: "Switch validation shows all expected default rules in 'proper' section" Wrappers.wait(Constants.RULES_INSTALLATION_TIME) { - verifyAll(northbound.validateSwitchRules(sw.dpId)) { + verifyAll(swToInteract.rulesManager.validateRules()) { missingRules.empty excessRules.empty - properRules.sort() == sw.defaultCookies.sort() + properRules.sort() == swToInteract.collectDefaultCookies().sort() } } and: "Rule validation shows all expected default rules in 'proper' section" - verifyAll(switchHelper.validate(sw.dpId)) { + verifyAll(swToInteract.validate()) { rules.missing.empty rules.misconfigured.empty rules.excess.empty - assertThat sw.toString(), rules.proper*.cookie, containsInAnyOrder(sw.defaultCookies.toArray()) + assertThat sw.toString(), rules.proper*.cookie, containsInAnyOrder(swToInteract.collectDefaultCookies().toArray()) } where: "Run for all combinations of unique switches and switch modes" diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy index 9597875b328..f88c3a91ce5 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy @@ -25,7 +25,7 @@ import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.helpers.model.FlowRuleEntity -import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.helpers.model.SwitchExtended import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.info.rule.FlowEntry @@ -33,7 +33,6 @@ import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType -import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService import org.springframework.beans.factory.annotation.Autowired @@ -53,11 +52,8 @@ class FlowRulesSpec extends HealthCheckSpecification { @Autowired @Shared FlowFactory flowFactory - @Autowired - @Shared - SwitchRulesFactory switchRulesFactory @Shared - Switch srcSwitch, dstSwitch + SwitchExtended srcSwitch, dstSwitch @Shared List srcSwDefaultRules @Shared @@ -79,10 +75,10 @@ class FlowRulesSpec extends HealthCheckSpecification { int s42QinqOuterVlanCount = 1 def setupSpec() { - (srcSwitch, dstSwitch) = topology.getActiveSwitches()[0..1] - s42IsEnabledOnSrcSw = switchHelper.getCachedSwProps(srcSwitch.dpId).server42FlowRtt - srcSwDefaultRules = switchRulesFactory.get(srcSwitch.dpId).getRules() - dstSwDefaultRules = switchRulesFactory.get(dstSwitch.dpId).getRules() + (srcSwitch, dstSwitch) = topology.getActiveSwitches()[0..1].collect { switchFactory.get(it) } + s42IsEnabledOnSrcSw = srcSwitch.retrievedCachedSwProps().server42FlowRtt + srcSwDefaultRules = srcSwitch.rulesManager.getRules() + dstSwDefaultRules = dstSwitch.rulesManager.getRules() } @Tags([VIRTUAL, SMOKE, SWITCH_RECOVER_ON_FAIL]) @@ -92,18 +88,18 @@ class FlowRulesSpec extends HealthCheckSpecification { def defaultPlusFlowRules = [] Wrappers.wait(RULES_INSTALLATION_TIME) { - defaultPlusFlowRules = switchRulesFactory.get(srcSwitch.dpId).getRules() + defaultPlusFlowRules = srcSwitch.rulesManager.getRules() def multiTableFlowRules = multiTableFlowRulesCount + sharedRulesCount assert defaultPlusFlowRules.size() == srcSwDefaultRules.size() + flowRulesCount + multiTableFlowRules } - def blockData = switchHelper.knockoutSwitch(srcSwitch, RW) + def blockData = srcSwitch.knockoutSwitch(RW) when: "Connect the switch to the controller" - switchHelper.reviveSwitch(srcSwitch, blockData) + srcSwitch.reviveSwitch(blockData) then: "Previously installed rules are not deleted from the switch" - def actualRules = switchRulesFactory.get(srcSwitch.dpId).getRules() + def actualRules = srcSwitch.rulesManager.getRules() assertThat(actualRules).containsExactlyInAnyOrder(*defaultPlusFlowRules) } @@ -115,12 +111,12 @@ class FlowRulesSpec extends HealthCheckSpecification { when: "Delete rules from the switch" List expectedRules = data.getExpectedRules(srcSwitch, srcSwDefaultRules) - def deletedRules = switchHelper.deleteSwitchRules(srcSwitch.dpId, data.deleteRulesAction) + def deletedRules = srcSwitch.rulesManager.delete(data.deleteRulesAction) then: "The corresponding rules are really deleted" deletedRules.size() == data.rulesDeleted Wrappers.wait(RULES_DELETION_TIME) { - def actualRules = switchRulesFactory.get(srcSwitch.dpId).getRules() + def actualRules = srcSwitch.rulesManager.getRules() assertThat(actualRules).containsExactlyInAnyOrder(*expectedRules) } @@ -139,7 +135,7 @@ class FlowRulesSpec extends HealthCheckSpecification { sharedRulesCount + (s42IsEnabledOnSrcSw ? s42FlowRttInput + s42QinqOuterVlanCount + s42FlowRttIngressForwardCount : 0), getExpectedRules : { sw, defaultRules -> - List noDefaultSwRules = switchRulesFactory.get(srcSwitch.dpId).getRules() - defaultRules + List noDefaultSwRules = srcSwitch.rulesManager.getRules() - defaultRules defaultRules + noDefaultSwRules.findAll { Cookie.isIngressRulePassThrough(it.cookie) } + (s42IsEnabledOnSrcSw ? noDefaultSwRules.findAll { new Cookie(it.cookie).getType() == CookieType.SERVER_42_FLOW_RTT_INPUT } : []) @@ -150,7 +146,7 @@ class FlowRulesSpec extends HealthCheckSpecification { rulesDeleted : flowRulesCount + sharedRulesCount + (s42IsEnabledOnSrcSw ? s42QinqOuterVlanCount + s42FlowRttIngressForwardCount : 0), getExpectedRules : { sw, defaultRules -> - List noDefaultSwRules = switchRulesFactory.get(srcSwitch.dpId).getRules() - defaultRules + List noDefaultSwRules = srcSwitch.rulesManager.getRules() - defaultRules defaultRules + noDefaultSwRules.findAll { Cookie.isIngressRulePassThrough(it.cookie) } + (s42IsEnabledOnSrcSw ? noDefaultSwRules.findAll { new Cookie(it.cookie).getType() == CookieType.SERVER_42_FLOW_RTT_INPUT } : []) @@ -161,7 +157,7 @@ class FlowRulesSpec extends HealthCheckSpecification { rulesDeleted : srcSwDefaultRules.size() + multiTableFlowRulesCount + (s42IsEnabledOnSrcSw ? s42FlowRttInput : 0), getExpectedRules : { sw, defaultRules -> defaultRules + getFlowRules(sw) + - switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { + srcSwitch.rulesManager.getRules().findAll { Cookie.isIngressRulePassThrough(it.cookie) } } @@ -171,7 +167,7 @@ class FlowRulesSpec extends HealthCheckSpecification { rulesDeleted : srcSwDefaultRules.size() + multiTableFlowRulesCount + (s42IsEnabledOnSrcSw ? s42FlowRttInput : 0), getExpectedRules : { sw, defaultRules -> getFlowRules(sw) - - (s42IsEnabledOnSrcSw ? switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { + (s42IsEnabledOnSrcSw ? srcSwitch.rulesManager.getRules().findAll { new Cookie(it.cookie).getType() == CookieType.SERVER_42_FLOW_RTT_INPUT } : []) } ], @@ -180,7 +176,7 @@ class FlowRulesSpec extends HealthCheckSpecification { rulesDeleted : srcSwDefaultRules.size() + multiTableFlowRulesCount + (s42IsEnabledOnSrcSw ? s42FlowRttInput : 0), getExpectedRules : { sw, defaultRules -> defaultRules + getFlowRules(sw) + - switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { + srcSwitch.rulesManager.getRules().findAll { Cookie.isIngressRulePassThrough(it.cookie) } } @@ -196,15 +192,15 @@ class FlowRulesSpec extends HealthCheckSpecification { when: "Delete switch rules by #data.identifier" //exclude the "SERVER_42_INPUT" rule, this rule has less priority than usual flow rule def ruleToDelete = getFlowRules(data.switch).find { !new Cookie(it.cookie).serviceFlag } - def expectedDeletedRules = switchRulesFactory.get(data.switch.dpId).getRules() + def expectedDeletedRules = data.switch.rulesManager.getRules() .findAll { it."$data.identifier" == ruleToDelete."$data.identifier" && !new Cookie(it.cookie).serviceFlag } - def deletedRules = switchHelper.deleteSwitchRules(data.switch.dpId, ruleToDelete."$data.identifier") + def deletedRules = data.switch.rulesManager.delete(ruleToDelete."$data.identifier") then: "The requested rules are really deleted" deletedRules.size() == expectedDeletedRules.size() Wrappers.wait(RULES_DELETION_TIME) { - def actualRules = switchRulesFactory.get(data.switch.dpId).getRules() + def actualRules = data.switch.rulesManager.getRules() assert actualRules.findAll { it.cookie in expectedDeletedRules*.cookie }.empty } @@ -224,7 +220,7 @@ class FlowRulesSpec extends HealthCheckSpecification { assumeTrue(data.description != "priority", "https://github.com/telstra/open-kilda/issues/1701") flowFactory.getRandom(srcSwitch, dstSwitch) - def ingressRule = (switchRulesFactory.get(srcSwitch.dpId).getRules() - data.defaultRules).find { + def ingressRule = (srcSwitch.rulesManager.getRules() - data.defaultRules).find { new Cookie(it.cookie).serviceFlag } if (ingressRule) { @@ -232,11 +228,11 @@ class FlowRulesSpec extends HealthCheckSpecification { } when: "Delete switch rules by non-existing #data.description" - def deletedRules = switchHelper.deleteSwitchRules(data.switch.dpId, data.value) + def deletedRules = data.switch.rulesManager.delete(data.value) then: "All rules are kept intact" deletedRules.size() == 0 - switchRulesFactory.get(data.switch.dpId).getRules().size() == data.defaultRules.size() + flowRulesCount + data.switch.rulesManager.getRules().size() == data.defaultRules.size() + flowRulesCount where: data << [[description : "cookie", @@ -257,12 +253,11 @@ class FlowRulesSpec extends HealthCheckSpecification { def "Able to delete switch rules by #data.description"() { given: "A switch with some flow rules installed" flow.create() - def cookiesBefore = switchRulesFactory.get(data.switch.dpId).getRules().cookie.sort() - def s42IsEnabled = switchHelper.getCachedSwProps(data.switch.dpId).server42FlowRtt + def cookiesBefore = sw.rulesManager.getRules().cookie.sort() + def s42IsEnabled = sw.retrievedCachedSwProps().server42FlowRtt when: "Delete switch rules by #data.description" - def deletedRules = switchHelper.deleteSwitchRules(data.switch.dpId, data.inPort, data.inVlan, - data.encapsulationType, data.outPort) + def deletedRules = sw.rulesManager.delete(data.inPort, data.inVlan, data.encapsulationType, data.outPort) then: "The requested rules are really deleted" def amountOfDeletedRules = data.removedRules @@ -271,7 +266,7 @@ class FlowRulesSpec extends HealthCheckSpecification { } deletedRules.size() == amountOfDeletedRules Wrappers.wait(RULES_DELETION_TIME) { - def actualRules = switchRulesFactory.get(data.switch.dpId).getRules() + def actualRules = sw.rulesManager.getRules() assert actualRules*.cookie.sort() == cookiesBefore - deletedRules assert filterRules(actualRules, data.inPort, data.inVlan, data.outPort).empty } @@ -310,21 +305,21 @@ class FlowRulesSpec extends HealthCheckSpecification { ].tap { outPort = flow.destination.portNumber }, ] flow = data.flow as FlowExtended + sw = data.switch as SwitchExtended } @IterationTag(tags = [SMOKE], iterationNameRegex = /inVlan/) def "Attempt to delete switch rules by supplying non-existing #data.description keeps all rules intact"() { given: "A switch with some flow rules installed" flowFactory.getRandom(srcSwitch, dstSwitch) - def originalRules = switchRulesFactory.get(data.switch.dpId).getRules().cookie.sort() + def originalRules = sw.rulesManager.getRules().cookie.sort() when: "Delete switch rules by non-existing #data.description" - def deletedRules = switchHelper.deleteSwitchRules(data.switch.dpId, data.inPort, data.inVlan, - data.encapsulationType, data.outPort) + def deletedRules = sw.rulesManager.delete(data.inPort, data.inVlan, data.encapsulationType, data.outPort) then: "All rules are kept intact" deletedRules.size() == 0 - switchRulesFactory.get(data.switch.dpId).getRules().cookie.sort() == originalRules + sw.rulesManager.getRules().cookie.sort() == originalRules where: data << [[description : "inPort", @@ -360,6 +355,7 @@ class FlowRulesSpec extends HealthCheckSpecification { outPort : Integer.MAX_VALUE - 1 ] ] + sw = data.switch as SwitchExtended } @Tags([TOPOLOGY_DEPENDENT]) @@ -378,50 +374,39 @@ class FlowRulesSpec extends HealthCheckSpecification { and: "Remove flow rules so that they become 'missing'" def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() - def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { switchId -> - [switchId, switchRulesFactory.get(switchId).getRules()] - } - - def amountOfRulesMap = involvedSwitches.collectEntries { switchId -> - def swProps = switchHelper.getCachedSwProps(switchId) - def switchIdInSrcOrDst = (switchId in [switchPair.src.dpId, switchPair.dst.dpId]) - def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = 0 - if(swProps.server42FlowRtt && switchIdInSrcOrDst) { - amountOfServer42Rules +=1 - switchId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules - switchId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules - } - - def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) + .collect { switchFactory.get(it) } + def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.rulesManager.getRules()] + } - [switchId, (rulesCount)] + def amountOfRulesMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.collectFlowRelatedRulesAmount(flow)] } - involvedSwitches.each { switchId -> - switchHelper.deleteSwitchRules(switchId, DeleteRulesAction.IGNORE_DEFAULTS) + involvedSwitches.each { sw -> + sw.rulesManager.delete(DeleteRulesAction.IGNORE_DEFAULTS) Wrappers.wait(RULES_DELETION_TIME) { - assert northbound.validateSwitchRules(switchId).missingRules.size() == amountOfRulesMap[switchId] + assert sw.rulesManager.validateRules().missingRules.size() == amountOfRulesMap[sw.switchId] } } when: "Synchronize rules on switches" - def synchronizedRulesMap = involvedSwitches.collectEntries { switchId -> - [switchId, northbound.synchronizeSwitchRules(switchId)] + def synchronizedRulesMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.rulesManager.synchronizeRules()] } then: "The corresponding rules are installed on switches" - involvedSwitches.each { switchId -> - assert synchronizedRulesMap[switchId].installedRules.size() == amountOfRulesMap[switchId] + involvedSwitches.each { sw -> + assert synchronizedRulesMap[sw.switchId].installedRules.size() == amountOfRulesMap[sw.switchId] Wrappers.wait(RULES_INSTALLATION_TIME) { - def actualRules = switchRulesFactory.get(switchId).getRules() - assertThat(actualRules).containsExactlyInAnyOrder(*defaultPlusFlowRulesMap[switchId]) + def actualRules = sw.rulesManager.getRules() + assertThat(actualRules).containsExactlyInAnyOrder(*defaultPlusFlowRulesMap[sw.switchId]) } } and: "No missing rules were found after rules validation" - involvedSwitches.each { switchId -> - verifyAll(northbound.validateSwitchRules(switchId)) { - properRules.findAll { !new Cookie(it).serviceFlag }.size() == amountOfRulesMap[switchId] + involvedSwitches.each { sw -> + verifyAll(sw.rulesManager.validateRules()) { + properRules.findAll { !new Cookie(it).serviceFlag }.size() == amountOfRulesMap[sw.switchId] missingRules.empty excessRules.empty } @@ -458,39 +443,34 @@ class FlowRulesSpec extends HealthCheckSpecification { .create() def flowPathInfo = flow.retrieveAllEntityPaths() + def involvedSwitches = flowPathInfo.getInvolvedSwitches().collect { switchFactory.get(it) } - HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() - .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> - flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + HashMap> flowInvolvedSwitchesWithRulesBefore = involvedSwitches + .collectEntries{ [(it.switchId): it.rulesManager.getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRulesBefore) - def mainFlowPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) - def protectedFlowPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) - List commonNodeIds = mainFlowPath*.switchId.intersect(protectedFlowPath*.switchId) - List uniqueNodes = (protectedFlowPath.findAll { !commonNodeIds.contains(it.switchId) } + mainFlowPath.findAll { - !commonNodeIds.contains(it.switchId) - })*.switchId.unique() - def rulesOnSwitchesBefore = (commonNodeIds + uniqueNodes).collectEntries { - [it, switchRulesFactory.get(it).getRules().sort { it.cookie }] - } + def mainPathSwIds = flowPathInfo.getPathNodes(Direction.FORWARD, false).switchId + def protectedPathSwId = flowPathInfo.getPathNodes(Direction.FORWARD, true).switchId + List commonNodeIds = mainPathSwIds.intersect(protectedPathSwId) + List commonMainAndProtectedPathSws = involvedSwitches.findAll { it.switchId in commonNodeIds } + List uniqueMainAndProtectedPathSws = involvedSwitches.findAll { !(it.switchId in commonNodeIds) } and: "Delete flow rules(for main and protected paths) on involved switches for creating missing rules" - commonNodeIds.each { switchHelper.deleteSwitchRules(it, DeleteRulesAction.IGNORE_DEFAULTS) } - uniqueNodes.each { switchHelper.deleteSwitchRules(it, DeleteRulesAction.IGNORE_DEFAULTS) } - commonNodeIds.each { switchId -> - assert northbound.validateSwitchRules(switchId).missingRules.size() > 0 - } - uniqueNodes.each { assert northbound.validateSwitchRules(it).missingRules.size() == 2 } + commonMainAndProtectedPathSws.each { it.rulesManager.delete(DeleteRulesAction.IGNORE_DEFAULTS) } + uniqueMainAndProtectedPathSws.each { it.rulesManager.delete(DeleteRulesAction.IGNORE_DEFAULTS) } + commonMainAndProtectedPathSws.each { assert it.rulesManager.validateRules().missingRules.size() > 0 } + uniqueMainAndProtectedPathSws.each { assert it.rulesManager.validateRules().missingRules.size() == 2 } when: "Synchronize rules on switches" - commonNodeIds.each { - def response = northbound.synchronizeSwitchRules(it) + commonMainAndProtectedPathSws.each { + def response = it.rulesManager.synchronizeRules() assert response.missingRules.size() > 0 assert response.installedRules.sort() == response.missingRules.sort() assert response.properRules.findAll { !new Cookie(it).serviceFlag }.empty assert response.excessRules.empty } - uniqueNodes.each { - def response = northbound.synchronizeSwitchRules(it) + uniqueMainAndProtectedPathSws.each { + def response = it.rulesManager.synchronizeRules() assert response.missingRules.size() == 2 assert response.installedRules.sort() == response.missingRules.sort() assert response.properRules.findAll { !new Cookie(it).serviceFlag }.empty, it @@ -498,15 +478,15 @@ class FlowRulesSpec extends HealthCheckSpecification { } then: "No missing rules were found after rules synchronization" - commonNodeIds.each { switchId -> - verifyAll(northbound.validateSwitchRules(switchId)) { - properRules.sort() == rulesOnSwitchesBefore[switchId]*.cookie + commonMainAndProtectedPathSws.each { sw -> + verifyAll(sw.rulesManager.validateRules()) { + properRules.sort() == flowInvolvedSwitchesWithRulesBefore[sw.switchId]*.cookie.sort() missingRules.empty excessRules.empty } } - uniqueNodes.each { - verifyAll(northbound.validateSwitchRules(it)) { + uniqueMainAndProtectedPathSws.each { + verifyAll(it.rulesManager.validateRules()) { properRules.findAll { !new Cookie(it).serviceFlag }.size() == 2 missingRules.empty excessRules.empty @@ -514,9 +494,9 @@ class FlowRulesSpec extends HealthCheckSpecification { } and: "Synced rules are exactly the same as before delete (ignoring irrelevant fields)" - rulesOnSwitchesBefore.each { - def actualRules = switchRulesFactory.get(it.key).getRules() - assertThat(actualRules).containsExactlyInAnyOrder(*it.value) + involvedSwitches.each { sw -> + def actualRules = sw.rulesManager.getRules() + assertThat(actualRules).containsExactlyInAnyOrder(*flowInvolvedSwitchesWithRulesBefore[sw.switchId]) } } @@ -529,11 +509,11 @@ class FlowRulesSpec extends HealthCheckSpecification { and: "Create a flow going through these switches" def flow = flowFactory.getRandom(swPair) def flowInfo = flow.retrieveDetailsFromDB() - def flowRulesSrcSw = getFlowRules(swPair.src) - def flowRulesDstSw = getFlowRules(swPair.dst) - def sharedRuleSrcSw = flowRulesSrcSw.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && + def srcSw = switchFactory.get(swPair.src) + def dstSw = switchFactory.get(swPair.dst) + def sharedRuleSrcSw = getFlowRules(srcSw).find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && it.match.inPort.toInteger() == flow.source.portNumber }.cookie - def sharedRuleDstSw = flowRulesDstSw.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && + def sharedRuleDstSw = getFlowRules(dstSw).find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && it.match.inPort.toInteger() == flow.destination.portNumber }.cookie def ingressSrcSw = flowInfo.forwardPath.cookie.value @@ -554,8 +534,8 @@ class FlowRulesSpec extends HealthCheckSpecification { } then: "Traffic counters in shared/ingress/egress rule on source and destination switches represent packets movement" - def rulesAfterPassingTrafficSrcSw = getFlowRules(swPair.src) - def rulesAfterPassingTrafficDstSw = getFlowRules(swPair.dst) + def rulesAfterPassingTrafficSrcSw = getFlowRules(srcSw) + def rulesAfterPassingTrafficDstSw = getFlowRules(dstSw) //srcSw with(rulesAfterPassingTrafficSrcSw.find { it.cookie == sharedRuleSrcSw}) { !it.flags @@ -603,8 +583,8 @@ class FlowRulesSpec extends HealthCheckSpecification { assert flow.retrieveFlowStatus().status == FlowState.UP assert flow.retrieveAllEntityPaths() != actualFlowPath flowInfoAfterReroute = flow.retrieveDetailsFromDB() - rulesAfterRerouteSrcSw = getFlowRules(swPair.src) - rulesAfterRerouteDstSw = getFlowRules(swPair.dst) + rulesAfterRerouteSrcSw = getFlowRules(srcSw) + rulesAfterRerouteDstSw = getFlowRules(dstSw) //system doesn't reinstall shared rule assert rulesAfterRerouteSrcSw.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && it.match.inPort.toInteger() == flow.source.portNumber }.cookie == sharedRuleSrcSw @@ -668,83 +648,62 @@ class FlowRulesSpec extends HealthCheckSpecification { and: "Delete flow rules so that they become 'missing'" def flowInfoFromDb = flow.retrieveDetailsFromDB() def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + .collect { switchFactory.get(it) } + def transitSwitchIds = involvedSwitches[1..-2] - def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { switchId -> - [switchId, switchRulesFactory.get(switchId).getRules()] - } - - def rulesCountMap = involvedSwitches.collectEntries { switchId -> - def swProps = switchHelper.getCachedSwProps(switchId) - def switchIdInSrcOrDst = (switchId in [switchPair.src.dpId, switchPair.dst.dpId]) - def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = 0 - if(swProps.server42FlowRtt && switchIdInSrcOrDst) { - amountOfServer42Rules +=1 - switchId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules - switchId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules - } + def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.rulesManager.getRules()] + } - def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) - [switchId, rulesCount] + def rulesCountMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.collectFlowRelatedRulesAmount(flow)] } - involvedSwitches.each { switchId -> - switchHelper.deleteSwitchRules(switchId, DeleteRulesAction.IGNORE_DEFAULTS) + involvedSwitches.each { sw -> + sw.rulesManager.delete(DeleteRulesAction.IGNORE_DEFAULTS) Wrappers.wait(RULES_DELETION_TIME) { - assert northbound.validateSwitchRules(switchId).missingRules.size() == rulesCountMap[switchId] + assert sw.rulesManager.validateRules().missingRules.size() == rulesCountMap[sw.switchId] } } when: "Synchronize rules on switches" - def synchronizedRulesMap = involvedSwitches.collectEntries { switchId -> - [switchId, northbound.synchronizeSwitchRules(switchId)] + def synchronizedRulesMap = involvedSwitches.collectEntries { sw -> + [sw.switchId, sw.rulesManager.synchronizeRules()] } then: "The corresponding rules are installed on switches" - involvedSwitches.each { switchId -> - assert synchronizedRulesMap[switchId].installedRules.size() == rulesCountMap[switchId] + involvedSwitches.each { sw -> + assert synchronizedRulesMap[sw.switchId].installedRules.size() == rulesCountMap[sw.switchId] Wrappers.wait(RULES_INSTALLATION_TIME) { - def actualRules = switchRulesFactory.get(switchId).getRules() - assertThat(actualRules).containsExactlyInAnyOrder(*defaultPlusFlowRulesMap[switchId]) + def actualRules = sw.rulesManager.getRules() + assertThat(actualRules).containsExactlyInAnyOrder(*defaultPlusFlowRulesMap[sw.switchId]) } } and: "Rules are synced correctly" // ingressRule should contain "pushVxlan" // egressRule should contain "tunnel-id" - with(switchRulesFactory.get(switchPair.src.dpId).getRules()) { rules -> - assert rules.find { - it.cookie == flowInfoFromDb.forwardPath.cookie.value - }.instructions.applyActions.pushVxlan - assert rules.find { - it.cookie == flowInfoFromDb.reversePath.cookie.value - }.match.tunnelId - } - - with(switchRulesFactory.get(switchPair.dst.dpId).getRules()) { rules -> - assert rules.find { - it.cookie == flowInfoFromDb.forwardPath.cookie.value - }.match.tunnelId - assert rules.find { - it.cookie == flowInfoFromDb.reversePath.cookie.value - }.instructions.applyActions.pushVxlan - } - - transitSwitchIds.each { swId -> - with(switchRulesFactory.get(swId).getRules()) { rules -> - assert rules.find { - it.cookie == flowInfoFromDb.forwardPath.cookie.value - }.match.tunnelId - assert rules.find { - it.cookie == flowInfoFromDb.reversePath.cookie.value - }.match.tunnelId + with(involvedSwitches.find{ it.switchId == switchPair.src.dpId }.rulesManager.getRules()) { rules -> + assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan + assert rules.find { it.cookie == flowInfoFromDb.reversePath.cookie.value }.match.tunnelId + } + + with(involvedSwitches.find{ it.switchId == switchPair.dst.dpId }.rulesManager.getRules()) { rules -> + assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId + assert rules.find { it.cookie == flowInfoFromDb.reversePath.cookie.value }.instructions.applyActions.pushVxlan + } + + transitSwitchIds.each { sw -> + with(sw.rulesManager.getRules()) { rules -> + assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId + assert rules.find { it.cookie == flowInfoFromDb.reversePath.cookie.value }.match.tunnelId } } and: "No missing rules were found after rules validation" - involvedSwitches.each { switchId -> - verifyAll(northbound.validateSwitchRules(switchId)) { - properRules.findAll { !new Cookie(it).serviceFlag }.size() == rulesCountMap[switchId] + involvedSwitches.each { sw -> + verifyAll(sw.rulesManager.validateRules()) { + properRules.findAll { !new Cookie(it).serviceFlag }.size() == rulesCountMap[sw.switchId] missingRules.empty excessRules.empty } @@ -764,8 +723,8 @@ class FlowRulesSpec extends HealthCheckSpecification { return rules } - List getFlowRules(Switch sw) { - def defaultCookies = sw.defaultCookies - switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in defaultCookies) }.sort() + List getFlowRules(SwitchExtended sw) { + def defaultCookies = sw.collectDefaultCookies() + sw.rulesManager.getRules().findAll { !(it.cookie in defaultCookies) }.sort() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy index cd0b36257f0..123755378fd 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy @@ -3,7 +3,7 @@ package org.openkilda.functionaltests.spec.switches import static groovyx.gpars.GParsPool.withPool import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.helpers.SwitchHelper.randomVlan +import static org.openkilda.functionaltests.helpers.model.SwitchExtended.randomVlan import static org.openkilda.model.MeterId.LACP_REPLY_METER_ID import static org.openkilda.model.cookie.Cookie.DROP_SLOW_PROTOCOLS_LOOP_COOKIE import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID @@ -17,13 +17,13 @@ import org.openkilda.functionaltests.error.LagNotUpdatedExpectedError import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchExtended import org.openkilda.grpc.speaker.model.LogicalPortDto import org.openkilda.messaging.model.grpc.LogicalPortType import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.model.cookie.PortColourCookie import org.openkilda.northbound.dto.v2.switches.LagPortRequest -import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.grpc.GrpcService import org.openkilda.testing.service.traffexam.TraffExamService @@ -57,15 +57,16 @@ class LagPortSpec extends HealthCheckSpecification { def "Able to CRUD LAG port with lacp_reply=#lacpReply on #sw.hwSwString"() { given: "A switch" - def portsArrayCreate = topology.getAllowedPortsForSwitch(sw)[-2, -1] as Set - def portsArrayUpdate = topology.getAllowedPortsForSwitch(sw)[1, -1] as Set + def switchToInteract = switchFactory.get(sw) + def portsArrayCreate = switchToInteract.collectAllowedPorts()[-2, -1] as Set + def portsArrayUpdate = switchToInteract.collectAllowedPorts()[1, -1] as Set assert portsArrayCreate.sort() != portsArrayUpdate.sort() when: "Create a LAG" - def createResponse = switchHelper.createLagLogicalPort(sw.dpId, portsArrayCreate, lacpReply) + def createResponse = switchToInteract.lagManager.createLogicalPort(portsArrayCreate, lacpReply) then: "Response reports successful creation of the LAG port" - with(createResponse) { + verifyAll(createResponse) { logicalPortNumber > 0 portNumbers.sort() == portsArrayCreate.sort() it.lacpReply == lacpReply @@ -73,16 +74,16 @@ class LagPortSpec extends HealthCheckSpecification { def lagPort = createResponse.logicalPortNumber and: "LAG port is really created" - def getResponse = northboundV2.getLagLogicalPort(sw.dpId) + def getResponse = switchToInteract.lagManager.retrieveLogicalPorts() getResponse.size() == 1 - with(getResponse[0]) { + verifyAll(getResponse[0]) { logicalPortNumber == lagPort portNumbers.sort() == portsArrayCreate.sort() } and: "LAG port is really created on the switch(check GRPC)" - def swAddress = northbound.getSwitch(sw.dpId).address - with(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { + def swAddress = switchToInteract.retrieveDetails().address + verifyAll(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { logicalPortNumber == lagPort name == "novi_lport" + lagPort.toString() portNumbers.sort() == portsArrayCreate.sort() @@ -90,27 +91,27 @@ class LagPortSpec extends HealthCheckSpecification { } and: "Switch is valid" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !switchToInteract.validateAndCollectFoundDiscrepancies().isPresent() when: "Update the LAG port" def payloadUpdate = new LagPortRequest(portNumbers: portsArrayUpdate) - def updateResponse = northboundV2.updateLagLogicalPort(sw.dpId, lagPort, payloadUpdate) + def updateResponse = switchToInteract.lagManager.updateLogicalPort(lagPort, payloadUpdate) then: "Response reports successful updation of the LAG port" - with(updateResponse) { + verifyAll(updateResponse) { logicalPortNumber == lagPort portNumbers.sort() == portsArrayUpdate.sort() } and: "LAG port is really updated" - with(northboundV2.getLagLogicalPort(sw.dpId)) { + verifyAll(switchToInteract.lagManager.retrieveLogicalPorts()) { it.size() == 1 it[0].logicalPortNumber == lagPort it[0].portNumbers.sort() == portsArrayUpdate.sort() } and: "LAG port is really updated on the switch(check GRPC)" - with(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { + verifyAll(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { logicalPortNumber == lagPort name == "novi_lport" + lagPort.toString() portNumbers.sort() == portsArrayUpdate.sort() @@ -118,20 +119,19 @@ class LagPortSpec extends HealthCheckSpecification { } and: "Switch is valid" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !switchToInteract.validateAndCollectFoundDiscrepancies().isPresent() when: "Delete the LAG port" - def deleteResponse = northboundV2.deleteLagLogicalPort(sw.dpId, lagPort) + def deleteResponse = switchToInteract.lagManager.deleteLogicalPort(lagPort) then: "Response reports successful deletion of the LAG port" - with(deleteResponse) { + verifyAll(deleteResponse) { logicalPortNumber == lagPort portNumbers.sort() == portsArrayUpdate.sort() } and: "LAG port is really deleted from db" - northboundV2.getLagLogicalPort(sw.dpId).empty - def lagPortIsDeleted = true + switchToInteract.lagManager.retrieveLogicalPorts().empty and: "LAG port is really deleted from switch" !grpc.getSwitchLogicalPorts(swAddress).find { it.logicalPortNumber == lagPort } @@ -146,9 +146,10 @@ class LagPortSpec extends HealthCheckSpecification { def "Able to create a flow on a LAG port"() { given: "A switchPair with a LAG port on the src switch" def switchPair = switchPairs.all().withTraffgensOnBothEnds().random() - def traffgenSrcSwPort = switchPair.src.traffGens.switchPort[0] - def portsArray = (topology.getAllowedPortsForSwitch(switchPair.src)[-2, -1] << traffgenSrcSwPort).unique() - def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, portsArray as Set).logicalPortNumber + def srcSwitch = switchFactory.get(switchPair.src) + def traffgenSrcSwPort = srcSwitch.traffGens.switchPort.first() + def portsArray = (srcSwitch.collectAllowedPorts()[-2, -1] + [traffgenSrcSwPort]) as Set + def lagPort = srcSwitch.lagManager.createLogicalPort(portsArray).logicalPortNumber when: "Create a flow" def flow = flowFactory.getBuilder(switchPair) @@ -177,9 +178,10 @@ class LagPortSpec extends HealthCheckSpecification { and: "A flow on the LAG port" def swPair = switchPairs.singleSwitch() .withAtLeastNTraffgensOnSource(2).random() - def traffgenSrcSwPort = swPair.src.traffGens[0].switchPort - def traffgenDstSwPort = swPair.src.traffGens[1].switchPort - def lagPort = switchHelper.createLagLogicalPort(swPair.src.dpId, [traffgenSrcSwPort] as Set).logicalPortNumber + def swToInteract = switchFactory.get(swPair.src) + Integer traffgenSrcSwPort = swToInteract.traffGens.switchPort.first() + Integer traffgenDstSwPort = swToInteract.traffGens.switchPort.last() + def lagPort = swToInteract.lagManager.createLogicalPort([traffgenSrcSwPort] as Set).logicalPortNumber when: "Create a flow" def flow = flowFactory.getBuilder(swPair) @@ -206,34 +208,36 @@ class LagPortSpec extends HealthCheckSpecification { @Tags(SWITCH_RECOVER_ON_FAIL) def "LAG port is not deleted after switch reconnecting"() { given: "A switch with a LAG port" - def sw = topology.getActiveSwitches().first() - def portsArray = topology.getAllowedPortsForSwitch(sw)[-2, -1] - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def portsArray = swToInteract.collectAllowedPorts()[-2, -1] + def lagPort = swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Disconnect the switch" + def blockData = swToInteract.knockoutSwitch(RW) + and: "Connect the switch back" - def blockData = switchHelper.knockoutSwitch(sw, RW) - switchHelper.reviveSwitch(sw, blockData, true) + swToInteract.reviveSwitch(blockData, true) then: "The LAG port is still exist" - with(northboundV2.getLagLogicalPort(sw.dpId)[0]) { + with(swToInteract.lagManager.retrieveLogicalPorts()[0]) { logicalPortNumber == lagPort portNumbers.sort() == portsArray.sort() } and: "Switch is valid" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !swToInteract.validateAndCollectFoundDiscrepancies().isPresent() } def "Unable to delete a LAG port in case flow on it"() { given: "A flow on a LAG port" def switchPair = switchPairs.all().random() - def portsArray = topology.getAllowedPortsForSwitch(switchPair.src)[-2, -1] - def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, portsArray as Set).logicalPortNumber + def srcSw = switchFactory.get(switchPair.src) + def portsArray = srcSw.collectAllowedPorts()[-2, -1] + def lagPort = srcSw.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber def flow = flowFactory.getBuilder(switchPair).withSourcePort(lagPort).build().create() when: "When delete LAG port" - northboundV2.deleteLagLogicalPort(switchPair.src.dpId, lagPort) + srcSw.lagManager.deleteLogicalPort(lagPort) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -243,11 +247,11 @@ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) def "Unable to create LAG on a port with flow on it"() { given: "Active switch with flow on it" - def sw = topology.activeSwitches.first() - def flow = flowFactory.getRandom(sw, sw) + def swToInteract = switchFactory.get(topology.activeSwitches.first()) + def flow = flowFactory.getSingleSwRandom(swToInteract) when: "Create a LAG port with flow's port" - switchHelper.createLagLogicalPort(sw.dpId, [flow.source.portNumber] as Set) + swToInteract.lagManager.createLogicalPort([flow.source.portNumber] as Set) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -257,13 +261,13 @@ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) def "Unable to create a flow on port which is inside LAG group"() { given: "An active switch with LAG port on it" - def sw = topology.activeSwitches.first() - def portsArray = topology.getAllowedPortsForSwitch(sw)[-2, -1] + def swToInteract = switchFactory.get(topology.activeSwitches.first()) + def portsArray = swToInteract.collectAllowedPorts()[-2, -1] def flowSourcePort = portsArray[0] - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + def lagPort = swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Create flow on ports which are in inside LAG group" - flowFactory.getBuilder(sw, sw) + flowFactory.getBuilder(swToInteract, swToInteract) .withSourcePort(flowSourcePort) .withDestinationPort(portsArray[1]) .build().sendCreateRequest() @@ -271,7 +275,7 @@ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) new FlowNotCreatedExpectedError(~/Port $flowSourcePort \ -on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) +on switch $swToInteract.switchId is used as part of LAG port $lagPort/).matches(exc) } @@ -279,11 +283,12 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) given: "A flow with mirrorPoint" def swP = switchPairs.all().neighbouring().random() def flow = flowFactory.getRandom(swP, false) - def mirrorPort = topology.getAllowedPortsForSwitch(swP.src).last() + def swToInteract = switchFactory.get(swP.src) + def mirrorPort = swToInteract.collectAllowedPorts().last() def mirrorEndpoint = flow.createMirrorPoint(swP.src.dpId, mirrorPort, randomVlan()) when: "Create a LAG port with port which is used as mirrorPort" - switchHelper.createLagLogicalPort(swP.src.dpId, [mirrorPort] as Set) + swToInteract.lagManager.createLogicalPort([mirrorPort] as Set) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -294,9 +299,9 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) def "Unable to create a LAG port in case port is #data.description"() { when: "Create a LAG port on a occupied port" - def sw = topology.getActiveServer42Switches().first() - def occupiedPort = data.portNumber(sw) - switchHelper.createLagLogicalPort(sw.dpId, [occupiedPort] as Set) + def swToInteract = switchFactory.get(topology.getActiveServer42Switches().first()) + def occupiedPort = data.portNumber(swToInteract) + swToInteract.lagManager.createLogicalPort([occupiedPort] as Set) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -306,12 +311,12 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) data << [ [ description: "occupied by server42", - portNumber : { Switch s -> s.prop.server42Port }, + portNumber : { SwitchExtended sw -> sw.sw.prop.server42Port }, errorDescription: ~/Physical port number \d+ on switch .*? is server42 port./ ], [ description: "occupied by isl", - portNumber : { Switch s -> getTopology().getBusyPortsForSwitch(s)[0] }, + portNumber : { SwitchExtended sw -> sw.retrieveIslPorts()[0] }, errorDescription: ~/Physical port number \d+ intersects with existing ISLs/ ], [ @@ -321,7 +326,7 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) ], [ description: "not exist", - portNumber : { Switch s -> s.maxPort + 1 }, + portNumber : { SwitchExtended sw -> sw.sw.maxPort + 1 }, errorDescription: ~/Invalid portno value./ ] ] @@ -329,25 +334,24 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) def "Unable to create two LAG ports with the same physical port inside at the same time"() { given: "A switch with a LAG port" - def sw = topology.getActiveSwitches().first() - def availablePorts = topology.getAllowedPortsForSwitch(sw) + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def availablePorts = swToInteract.collectAllowedPorts() def portsArray = availablePorts[-2, -1] def conflictPortsArray = availablePorts[-3, -1] - def payload = new LagPortRequest(portNumbers: portsArray) - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Try to create the same LAG port with the same physical ports inside" - northboundV2.createLagLogicalPort(sw.dpId, new LagPortRequest(portNumbers: conflictPortsArray)) + swToInteract.lagManager.createLogicalPort(conflictPortsArray as Set) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - new LagNotCreatedExpectedError(~/Physical ports \[${portsArray[-1]}]\ on switch $sw.dpId already \ + new LagNotCreatedExpectedError(~/Physical ports \[${portsArray[-1]}]\ on switch $swToInteract.switchId already \ occupied by other LAG group\(s\)./).matches(exc) } def "Unable to proceed incorrect delete LAG port request (#data.description)"() { when: "Send invalid delete LAG port request" - getNorthboundV2().deleteLagLogicalPort(data.swIdForRequest(), data.logicalPortNumber) + northboundV2.deleteLagLogicalPort(data.swIdForRequest(), data.logicalPortNumber) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -372,73 +376,73 @@ occupied by other LAG group\(s\)./).matches(exc) def "System is able to detect and sync missed LAG port"() { given: "A switch with a LAG port" - def sw = topology.getActiveSwitches().first() - def portsArray = topology.getAllowedPortsForSwitch(sw)[-2,-1] - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def portsArray = swToInteract.collectAllowedPorts()[-2,-1] + def lagPort = swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Delete LAG port via grpc" - grpc.deleteSwitchLogicalPort(northbound.getSwitch(sw.dpId).address, lagPort) + grpc.deleteSwitchLogicalPort(swToInteract.retrieveDetails().address, lagPort) then: "System detects that LAG port is missed" - def lagPortMissingInfo = switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get().logicalPorts.missing + def lagPortMissingInfo = swToInteract.validate().logicalPorts.missing lagPortMissingInfo.size() == 1 - with (lagPortMissingInfo[0]) { + verifyAll(lagPortMissingInfo[0]) { type == LogicalPortType.LAG.toString() logicalPortNumber == lagPort physicalPorts.sort() == portsArray.sort() } when: "Synchronize the switch" - switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId) + swToInteract.synchronizeAndCollectFixedDiscrepancies() then: "LAG port is reinstalled" - !switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).isPresent() + !swToInteract.validateAndCollectFoundDiscrepancies().isPresent() } def "System is able to detect misconfigured LAG port"() { //system can't re-install misconfigured LAG port given: "A switch with a LAG port" - def sw = topology.getActiveSwitches().first() - def portsArray = topology.getAllowedPortsForSwitch(sw)[-3,-1] - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def portsArray = swToInteract.collectAllowedPorts()[-3,-1] + def lagPort = swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Modify LAG port via grpc(delete, create with incorrect ports)" - def swAddress = northbound.getSwitch(sw.dpId).address + def swAddress = swToInteract.retrieveDetails().address grpc.deleteSwitchLogicalPort(swAddress, lagPort) def request = new LogicalPortDto(LogicalPortType.LAG, [portsArray[0]], lagPort) grpc.createLogicalPort(swAddress, request) then: "System detects misconfigured LAG port" - !switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).get().logicalPorts.misconfigured.empty + !swToInteract.validate().logicalPorts.misconfigured.empty } def "Able to create/update LAG port with duplicated port numbers on the #sw.hwSwString switch"() { given: "Switch and two ports" - def sw = getTopology().getActiveSwitches().get(0) - def testPorts = topology.getAllowedPortsForSwitch(sw).take(2) + def swToInteract = switchFactory.get(getTopology().getActiveSwitches().first()) + def testPorts = swToInteract.collectAllowedPorts().take(2) assert testPorts.size > 1 when: "Create LAG port with duplicated port numbers" - def switchPortToCreate = testPorts.get(0) - def swAddress = northbound.getSwitch(sw.dpId).address + def switchPortToCreate = testPorts.first() + def swAddress = swToInteract.retrieveDetails().address def portListToCreate = [switchPortToCreate, switchPortToCreate] - def lagPortCreateResponse = switchHelper.createLagLogicalPort(sw.dpId, portListToCreate as Set) + def lagPortCreateResponse = swToInteract.lagManager.createLogicalPort(portListToCreate as Set) then: "Response shows that LAG port created successfully" - with(lagPortCreateResponse) { + verifyAll(lagPortCreateResponse) { logicalPortNumber > 0 portNumbers == [switchPortToCreate] } def lagPort = lagPortCreateResponse.logicalPortNumber and: "Request on user side shows that LAG port created" - with(northboundV2.getLagLogicalPort(sw.dpId)[0]) { + verifyAll(swToInteract.lagManager.retrieveLogicalPorts()[0]) { logicalPortNumber == lagPort portNumbers == [switchPortToCreate] } and: "Created port exists in a list of all LAG ports from switch side (GRPC)" - with(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { + verifyAll(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { logicalPortNumber == lagPort name == "novi_lport" + lagPort.toString() portNumbers == [switchPortToCreate] @@ -449,22 +453,22 @@ occupied by other LAG group\(s\)./).matches(exc) def switchPortToUpdate = testPorts.get(1) def portListToUpdate = [switchPortToUpdate, switchPortToUpdate] def updatePayload = new LagPortRequest(portNumbers: portListToUpdate) - def lagPortUpdateResponse = northboundV2.updateLagLogicalPort(sw.dpId, lagPort, updatePayload) + def lagPortUpdateResponse = swToInteract.lagManager.updateLogicalPort(lagPort, updatePayload) then: "Response shows that LAG port updated successfully" - with(lagPortUpdateResponse) { + verifyAll(lagPortUpdateResponse) { logicalPortNumber == lagPort portNumbers == [switchPortToUpdate] } and: "Check on user side that LAG port updated successfully" - with(northboundV2.getLagLogicalPort(sw.dpId)[0]) { + verifyAll(swToInteract.lagManager.retrieveLogicalPorts()[0]) { logicalPortNumber == lagPort portNumbers == [switchPortToUpdate] } and: "Check that LAG port updated successfully on switch side (via GRPC)" - with(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { + verifyAll(grpc.getSwitchLogicalPortConfig(swAddress, lagPort)) { logicalPortNumber == lagPort name == "novi_lport" + lagPort.toString() portNumbers == [switchPortToUpdate] @@ -474,15 +478,14 @@ occupied by other LAG group\(s\)./).matches(exc) def "Able to create and delete single LAG port with lacp_reply=#data.portLacpReply"() { given: "A switch" - def sw = topology.getActiveSwitches().first() - def portsArrayCreate = topology.getAllowedPortsForSwitch(sw)[-2, -1] as Set + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def portsArrayCreate = swToInteract.collectAllowedPorts()[-2, -1] as Set when: "Create a LAG port" - def createResponse = switchHelper.createLagLogicalPort( - sw.dpId, portsArrayCreate, data.portLacpReply) + def createResponse = swToInteract.lagManager.createLogicalPort(portsArrayCreate, data.portLacpReply) then: "Response reports successful creation of the LAG port" - with(createResponse) { + verifyAll(createResponse) { logicalPortNumber > 0 portNumbers.sort() == portsArrayCreate.sort() lacpReply == data.portLacpReply @@ -491,20 +494,20 @@ occupied by other LAG group\(s\)./).matches(exc) and: "Correct rules and meters are on the switch" assertSwitchHasCorrectLacpRulesAndMeters( - sw, data.mustContainCookies(portNumber), data.mustNotContainCookies(portNumber), data.mustContainLacpMeter) + swToInteract, data.mustContainCookies(portNumber), data.mustNotContainCookies(portNumber), data.mustContainLacpMeter) when: "Delete the LAG port" - def deleteResponse = northboundV2.deleteLagLogicalPort(sw.dpId, portNumber) + def deleteResponse = swToInteract.lagManager.deleteLogicalPort(portNumber) then: "Response reports successful delete of the LAG port" - with(deleteResponse) { + verifyAll(deleteResponse) { logicalPortNumber == portNumber portNumbers.sort() == portsArrayCreate.sort() lacpReply == data.portLacpReply } and: "No LACP rules and meters on the switch" - assertSwitchHasCorrectLacpRulesAndMeters(sw, [], [LACP_COOKIE, getLagCookie(portNumber)], false) + assertSwitchHasCorrectLacpRulesAndMeters(swToInteract, [], [LACP_COOKIE, getLagCookie(portNumber)], false) where: data << [ @@ -525,18 +528,17 @@ occupied by other LAG group\(s\)./).matches(exc) def "Able to create and delete LAG port with #data.description"() { given: "A switch with LAG port" - def sw = topology.getActiveSwitches().first() - def physicalPortsOfLag1 = topology.getAllowedPortsForSwitch(sw)[-2, -1] as Set - def physicalPortsOfLag2 = topology.getAllowedPortsForSwitch(sw)[-4, -3] as Set - def portNumber1 = switchHelper.createLagLogicalPort( - sw.dpId, physicalPortsOfLag1 as Set, data.existingPortLacpReply).logicalPortNumber + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def physicalPortsOfLag1 = swToInteract.collectAllowedPorts()[-2, -1] as Set + def physicalPortsOfLag2 = swToInteract.collectAllowedPorts()[-4, -3] as Set + def portNumber1 = swToInteract.lagManager + .createLogicalPort(physicalPortsOfLag1, data.existingPortLacpReply).logicalPortNumber when: "Create a LAG port" - def createResponse = northboundV2.createLagLogicalPort( - sw.dpId, new LagPortRequest(physicalPortsOfLag2, data.newPortLacpReply)) + def createResponse = swToInteract.lagManager.createLogicalPort(physicalPortsOfLag2, data.newPortLacpReply) then: "Response reports successful creation of the LAG port" - with(createResponse) { + verifyAll(createResponse) { logicalPortNumber > 0 portNumbers.sort() == physicalPortsOfLag2.sort() lacpReply == data.newPortLacpReply @@ -545,14 +547,14 @@ occupied by other LAG group\(s\)./).matches(exc) and: "Correct rules and meters are on the switch" assertSwitchHasCorrectLacpRulesAndMeters( - sw, data.mustContainCookies(portNumber1, portNumber2), + swToInteract, data.mustContainCookies(portNumber1, portNumber2), data.mustNotContainCookies(portNumber1, portNumber2), data.mustContainLacpMeter) when: "Delete created LAG port" - def deleteResponse = northboundV2.deleteLagLogicalPort(sw.dpId, portNumber2) + def deleteResponse = swToInteract.lagManager.deleteLogicalPort(portNumber2) then: "Response reports successful delete of the LAG port" - with(deleteResponse) { + verifyAll(deleteResponse) { logicalPortNumber == portNumber2 portNumbers.sort() == physicalPortsOfLag2.sort() lacpReply == data.newPortLacpReply @@ -560,10 +562,10 @@ occupied by other LAG group\(s\)./).matches(exc) and: "No LACP rules and meters of second LAG port on the switch" if (data.existingPortLacpReply) { // Switch must contain LACP rules and meter for first LAG port - assertSwitchHasCorrectLacpRulesAndMeters(sw, + assertSwitchHasCorrectLacpRulesAndMeters(swToInteract, [LACP_COOKIE, getLagCookie(portNumber1)], [getLagCookie(portNumber2)], true) } else { // Switch must not contain any LACP rules and meter - assertSwitchHasCorrectLacpRulesAndMeters(sw, + assertSwitchHasCorrectLacpRulesAndMeters(swToInteract, [], [LACP_COOKIE, getLagCookie(portNumber1), getLagCookie(portNumber2), ], false) } @@ -608,14 +610,13 @@ occupied by other LAG group\(s\)./).matches(exc) def "Able to update #data.description for single LAG port"() { given: "A switch" - def sw = topology.getActiveSwitches().first() - def physicalPortsOfCreatedLag = topology.getAllowedPortsForSwitch(sw)[-2, -1] as Set - def physicalPortsOfUpdatedLag = topology.getAllowedPortsForSwitch(sw)[-3, -2] as Set + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def physicalPortsOfCreatedLag = swToInteract.collectAllowedPorts()[-2, -1] as Set + def physicalPortsOfUpdatedLag = swToInteract.collectAllowedPorts()[-3, -2] as Set and: "A LAG port" - def createResponse = switchHelper.createLagLogicalPort( - sw.dpId, physicalPortsOfCreatedLag, data.oldlacpReply) - with(createResponse) { + def createResponse = swToInteract.lagManager.createLogicalPort(physicalPortsOfCreatedLag, data.oldlacpReply) + verifyAll(createResponse) { assert logicalPortNumber > 0 assert portNumbers.sort() == physicalPortsOfCreatedLag.sort() } @@ -623,11 +624,11 @@ occupied by other LAG group\(s\)./).matches(exc) when: "Update the LAG port" def updatedPhysicalPorts = data.updatePorts ? physicalPortsOfUpdatedLag : physicalPortsOfCreatedLag - def updateResponse = northboundV2.updateLagLogicalPort( - sw.dpId, portNumber, new LagPortRequest(updatedPhysicalPorts, data.newlacpReply)) + def updateResponse = swToInteract.lagManager + .updateLogicalPort(portNumber, new LagPortRequest(updatedPhysicalPorts, data.newlacpReply)) then: "Response reports successful update of the LAG port" - with(updateResponse) { + verifyAll(updateResponse) { logicalPortNumber == portNumber portNumbers.sort() == updatedPhysicalPorts.sort() lacpReply == data.newlacpReply @@ -635,7 +636,7 @@ occupied by other LAG group\(s\)./).matches(exc) and: "Correct rules and meters are on the switch" assertSwitchHasCorrectLacpRulesAndMeters( - sw, data.mustContainCookies(portNumber), data.mustNotContainCookies(portNumber), data.mustContainLacpMeter) + swToInteract, data.mustContainCookies(portNumber), data.mustNotContainCookies(portNumber), data.mustContainLacpMeter) where: data << [ @@ -698,19 +699,19 @@ occupied by other LAG group\(s\)./).matches(exc) def "Able to update #data.description near to existing LAG port with lacp_reply=#data.existingPortLacpReply"() { given: "A switch" - def sw = topology.getActiveSwitches().first() - def physicalPortsOfLag1 = topology.getAllowedPortsForSwitch(sw)[-2, -1] as Set - def physicalPortsOfCreatedLag2 = topology.getAllowedPortsForSwitch(sw)[-4, -3] as Set - def physicalPortsOfUpdatedLag2 = topology.getAllowedPortsForSwitch(sw)[-5, -4] as Set + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def physicalPortsOfLag1 = swToInteract.collectAllowedPorts()[-2, -1] as Set + def physicalPortsOfCreatedLag2 = swToInteract.collectAllowedPorts()[-4, -3] as Set + def physicalPortsOfUpdatedLag2 = swToInteract.collectAllowedPorts()[-5, -4] as Set and: "LAG port 1" - def portNumber1 = switchHelper.createLagLogicalPort( - sw.dpId, physicalPortsOfLag1, data.existingPortLacpReply).logicalPortNumber + def portNumber1 = swToInteract.lagManager + .createLogicalPort(physicalPortsOfLag1, data.existingPortLacpReply).logicalPortNumber and: "LAG port 2" - def createResponse = northboundV2.createLagLogicalPort( - sw.dpId, new LagPortRequest(physicalPortsOfCreatedLag2, data.oldlacpReply)) - with(createResponse) { + def createResponse = swToInteract.lagManager + .createLogicalPort(physicalPortsOfCreatedLag2, data.oldlacpReply) + verifyAll(createResponse) { assert logicalPortNumber > 0 assert portNumbers.sort() == physicalPortsOfCreatedLag2.sort() assert lacpReply == data.oldlacpReply @@ -719,11 +720,11 @@ occupied by other LAG group\(s\)./).matches(exc) when: "Update the LAG port" def updatedPhysicalPorts = data.updatePorts ? physicalPortsOfUpdatedLag2 : physicalPortsOfCreatedLag2 - def updateResponse = northboundV2.updateLagLogicalPort( - sw.dpId, portNumber2, new LagPortRequest(updatedPhysicalPorts, data.newlacpReply)) + def updateResponse = swToInteract.lagManager + .updateLogicalPort(portNumber2, new LagPortRequest(updatedPhysicalPorts, data.newlacpReply)) then: "Response reports successful update of the LAG port" - with(updateResponse) { + verifyAll(updateResponse) { logicalPortNumber == portNumber2 portNumbers.sort() == updatedPhysicalPorts.sort() lacpReply == data.newlacpReply @@ -731,7 +732,7 @@ occupied by other LAG group\(s\)./).matches(exc) and: "Correct rules and meters are on the switch" assertSwitchHasCorrectLacpRulesAndMeters( - sw, data.mustContainCookies(portNumber1, portNumber2), + swToInteract, data.mustContainCookies(portNumber1, portNumber2), data.mustNotContainCookies(portNumber1, portNumber2), data.mustContainLacpMeter) where: @@ -819,47 +820,30 @@ occupied by other LAG group\(s\)./).matches(exc) ] } - private void assertSwitchHasCorrectLacpRulesAndMeters( - Switch sw, mustContainCookies, mustNotContainsCookies, mustContainLacpMeter) { - // validate switch - !switchHelper.validateAndCollectFoundDiscrepancies(sw.dpId).isPresent() - - // check cookies - def hexCookies = northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.collect { Cookie.toString(it) } - assert hexCookies.containsAll(mustContainCookies) - assert hexCookies.intersect(mustNotContainsCookies).isEmpty() - - // check meters - def meters = northbound.getAllMeters(sw.dpId).meterEntries*.meterId - if (mustContainLacpMeter) { - assert LACP_REPLY_METER_ID.value in meters - } else { - assert LACP_REPLY_METER_ID.value !in meters - } - } - def "Unable decrease bandwidth on LAG port lower than connected flows bandwidth sum"() { given: "Flows on a LAG port with switch ports" def switchPair = switchPairs.all().random() - def testPorts = topology.getAllowedPortsForSwitch(switchPair.src).takeRight(2).sort() + def swToInteract = switchFactory.get(switchPair.src) + def testPorts = swToInteract.collectAllowedPorts().takeRight(2).sort() assert testPorts.size > 1 - def maximumBandwidth = testPorts.sum { northbound.getPort(switchPair.src.dpId, it).currentSpeed } - def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, testPorts as Set).logicalPortNumber - def flow = flowFactory.getBuilder(switchPair) + def maximumBandwidth = testPorts.sum { swToInteract.retrievePort(it).retrieveDetails().currentSpeed } + def lagPort = swToInteract.lagManager.createLogicalPort(testPorts as Set).logicalPortNumber + + flowFactory.getBuilder(switchPair) .withSourcePort(lagPort) .withBandwidth(maximumBandwidth as Long) .build().create() when: "Decrease LAG port bandwidth by deleting one port to make it lower than connected flows bandwidth sum" def updatePayload = new LagPortRequest(portNumbers: [testPorts.get(0)]) - northboundV2.updateLagLogicalPort(switchPair.src.dpId, lagPort, updatePayload) + swToInteract.lagManager.updateLogicalPort(lagPort, updatePayload) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) new LagNotUpdatedExpectedError( - switchPair.getSrc().getDpId(), lagPort, ~/Not enough bandwidth for LAG port $lagPort./).matches(exc) + swToInteract.switchId, lagPort, ~/Not enough bandwidth for LAG port $lagPort./).matches(exc) then: "No bandwidth changed for LAG port and all connected ports are in place" - with(northboundV2.getLagLogicalPort(switchPair.src.dpId)[0]) { + verifyAll(swToInteract.lagManager.retrieveLogicalPorts()[0]) { logicalPortNumber == lagPort portNumbers == testPorts } @@ -867,17 +851,16 @@ occupied by other LAG group\(s\)./).matches(exc) def "Able to delete LAG port if it is already removed from switch"() { given: "A switch with a LAG port" - def sw = topology.getActiveSwitches().first() - def portsArray = topology.getAllowedPortsForSwitch(sw)[-2,-1] - def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber + def swToInteract = switchFactory.get(topology.getActiveSwitches().first()) + def portsArray = swToInteract.collectAllowedPorts()[-2,-1] + def lagPort = swToInteract.lagManager.createLogicalPort(portsArray as Set).logicalPortNumber when: "Delete LAG port via grpc" - grpc.deleteSwitchLogicalPort(northbound.getSwitch(sw.dpId).address, lagPort) + grpc.deleteSwitchLogicalPort(swToInteract.retrieveDetails().address, lagPort) then: "Able to delete LAG port from switch with no exception" - def deleteResponse = northboundV2.deleteLagLogicalPort(sw.dpId, lagPort) - - with(deleteResponse) { + def deleteResponse = swToInteract.lagManager.deleteLogicalPort(lagPort) + verifyAll(deleteResponse) { logicalPortNumber == lagPort portNumbers.sort() == portsArray.sort() } @@ -886,4 +869,25 @@ occupied by other LAG group\(s\)./).matches(exc) def getLagCookie(portNumber) { new PortColourCookie(CookieType.LACP_REPLY_INPUT, portNumber).toString() } + + private void assertSwitchHasCorrectLacpRulesAndMeters(SwitchExtended sw, + mustContainCookies, + mustNotContainsCookies, + mustContainLacpMeter) { + // validate switch + assert !sw.validateAndCollectFoundDiscrepancies().isPresent() + + // check cookies + def hexCookies = sw.rulesManager.getRules().cookie.collect { Cookie.toString(it) } + assert hexCookies.containsAll(mustContainCookies) + assert hexCookies.intersect(mustNotContainsCookies).isEmpty() + + // check meters + def meters = sw.metersManager.getMeters().meterId + if (mustContainLacpMeter) { + assert LACP_REPLY_METER_ID.value in meters + } else { + assert LACP_REPLY_METER_ID.value !in meters + } + } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy index a3b655cee85..c501cbb10ad 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy @@ -27,9 +27,8 @@ import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.FlowRuleEntity -import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.helpers.model.SwitchExtended import org.openkilda.messaging.info.meter.MeterEntry -import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType @@ -61,10 +60,6 @@ class MetersSpec extends HealthCheckSpecification { @Shared FlowFactory flowFactory - @Autowired - @Shared - SwitchRulesFactory switchRulesFactory - @Value('${burst.coefficient}') double burstCoefficient @@ -78,26 +73,26 @@ class MetersSpec extends HealthCheckSpecification { assumeTrue(switches as boolean, "Unable to find required switches in topology") setup: "Select a #switchType switch and retrieve default meters" - def sw = switches.first() - def defaultMeters = northbound.getAllMeters(sw.dpId) + def swToInteract = switchFactory.get(switches.first()) + def defaultMeters = swToInteract.metersManager.getMeters() when: "A flow is created and its meter is deleted" - def flow = flowFactory.getRandom(sw, sw) - def meterToDelete = northbound.getAllMeters(sw.dpId).meterEntries.find { - !defaultMeters.meterEntries*.meterId.contains(it.meterId) + def flow = flowFactory.getSingleSwRandom(swToInteract) + def meterToDelete = swToInteract.metersManager.getMeters().find { + !defaultMeters.meterId.contains(it.meterId) }.meterId - def deleteResult = northbound.deleteMeter(sw.dpId, meterToDelete) + def deleteResult = swToInteract.metersManager.delete(meterToDelete) then: "Delete operation should be successful" deleteResult.deleted - !northbound.getAllMeters(sw.dpId).meterEntries.find { it.meterId == meterToDelete } + !swToInteract.metersManager.getMeters().find { it.meterId == meterToDelete } when: "Delete the flow" flow.delete() then: "No excessive meters are installed on the switch" Wrappers.wait(WAIT_OFFSET) { - assert defaultMeters.meterEntries.sort() == northbound.getAllMeters(sw.dpId).meterEntries.sort() + assert defaultMeters.sort() == swToInteract.metersManager.getMeters().sort() } where: @@ -140,16 +135,17 @@ class MetersSpec extends HealthCheckSpecification { @Tags([HARDWARE, SMOKE_SWITCHES]) def "Default meters should express bandwidth in kbps re-calculated from pktps on Centec #sw.hwSwString"() { expect: "Only the default meters should be present on the switch" - def meters = northbound.getAllMeters(sw.dpId) - assert meters.meterEntries.size() == 2 - assert meters.meterEntries.each { + def swToInteract = switchFactory.get(sw) + def meters = swToInteract.metersManager.getMeters() + assert meters.size() == 2 + assert meters.each { assert it.rate == Math.max((long) (DISCO_PKT_RATE * DISCO_PKT_SIZE * 8 / 1024L), MIN_RATE_KBPS) } //unable to use #getExpectedBurst. For Centects there's special burst due to KBPS - assert meters.meterEntries.every { it.burstSize == (long) ((DISCO_PKT_BURST * DISCO_PKT_SIZE * 8) / 1024) } - assert meters.meterEntries.every(defaultMeters) - assert meters.meterEntries.every { ["KBPS", "BURST", "STATS"].containsAll(it.flags) } - assert meters.meterEntries.every { it.flags.size() == 3 } + assert meters.every { it.burstSize == (long) ((DISCO_PKT_BURST * DISCO_PKT_SIZE * 8) / 1024) } + assert meters.every(defaultMeters) + assert meters.every { ["KBPS", "BURST", "STATS"].containsAll(it.flags) } + assert meters.every { it.flags.size() == 3 } where: sw << (getCentecSwitches().unique { it.description } @@ -161,11 +157,12 @@ class MetersSpec extends HealthCheckSpecification { //TODO: Research how to calculate burstSize on OpenVSwitch in this case // now burstSize is equal to 4096, rate == 200 expect: "Only the default meters should be present on the switch" - def meters = northbound.getAllMeters(sw.dpId) - meters.meterEntries*.meterId.sort() == sw.defaultMeters.sort() - meters.meterEntries.each { assert it.burstSize == switchHelper.getExpectedBurst(sw.dpId, it.rate) } - meters.meterEntries.each { assert ["PKTPS", "BURST", "STATS"].containsAll(it.flags) } - meters.meterEntries.each { assert it.flags.size() == 3 } + def swToInteract = switchFactory.get(sw) + def meters = swToInteract.metersManager.getMeters() + meters.meterId.sort() == swToInteract.collectDefaultMeters().sort() + meters.each { assert it.burstSize == swToInteract.retrieveExpectedBurst(it.rate) } + meters.each { assert ["PKTPS", "BURST", "STATS"].containsAll(it.flags) } + meters.each { assert it.flags.size() == 3 } where: sw << (getNoviflowSwitches().unique { it.nbFormat().hardware + it.nbFormat().software } @@ -175,8 +172,9 @@ class MetersSpec extends HealthCheckSpecification { @Tags([HARDWARE, SMOKE_SWITCHES]) def "Default meters should express bandwidth in kbps on Noviflow Wb5164 #sw.hwSwString"() { expect: "Only the default meters should be present on the switch" - def meters = northbound.getAllMeters(sw.dpId) - meters.meterEntries*.meterId.sort() == sw.defaultMeters.sort() + def swToInteract = switchFactory.get(sw) + def meters = swToInteract.metersManager.getMeters() + meters.meterId.sort() == swToInteract.collectDefaultMeters().sort() /* burstSizre doesn't depend on rate on WB switches, it should be calculated by formula burstSize * packet_size * 8 / 1024, where burstSize - 4096, packet_size: lldp - 300, arp - 100, unicast/multicast - 250 */ @@ -187,7 +185,7 @@ class MetersSpec extends HealthCheckSpecification { createMeterIdForDefaultRule(LLDP_POST_INGRESS_VXLAN_COOKIE).getValue(), createMeterIdForDefaultRule(LLDP_POST_INGRESS_ONE_SWITCH_COOKIE).getValue()] //16, 17, 18 - meters.meterEntries.each { meter -> + meters.each { meter -> if (meter.meterId in arpMeters) { verifyBurstSizeOnWb5164(meter.burstSize, Math.max((long) (DISCO_PKT_BURST * 100 * 8 / 1024L), MIN_RATE_KBPS)) @@ -199,8 +197,8 @@ class MetersSpec extends HealthCheckSpecification { Math.max((long) (DISCO_PKT_BURST * 250 * 8 / 1024L), MIN_RATE_KBPS)) } } - meters.meterEntries.each { assert ["KBPS", "BURST", "STATS"].containsAll(it.flags) } - meters.meterEntries.each { assert it.flags.size() == 3 } + meters.each { assert ["KBPS", "BURST", "STATS"].containsAll(it.flags) } + meters.each { assert it.flags.size() == 3 } where: sw << (getNoviflowWb5164().unique { it.description } ?: @@ -214,20 +212,20 @@ on a #switchType switch"() { assumeTrue(switches as boolean, "Unable to find required switches in topology") given: "A #switchType switch with OpenFlow 1.3 support" - def sw = switches.first() + def swToInteract = switchFactory.get(switches.first()) when: "Get default meters from the switch" - def defaultMeters = northbound.getAllMeters(sw.dpId) + def defaultMeters = swToInteract.metersManager.getMeters() assert defaultMeters and: "Create a single-switch flow" - def flow = flowFactory.getBuilder(sw, sw) + def flow = flowFactory.getSingleSwBuilder(swToInteract) .withIgnoreBandwidth(ignoreBandwidth).build() .create() then: "New meters should appear after flow setup" - def newMeters = northbound.getAllMeters(sw.dpId) - def newMeterEntries = newMeters.meterEntries.findAll { !defaultMeters.meterEntries.contains(it) } + def newMeters = swToInteract.metersManager.getMeters() + def newMeterEntries = newMeters.findAll { !defaultMeters.contains(it) } newMeterEntries.size() == 2 and: "All new meters should have KBPS, BURST and STATS flags installed" @@ -237,7 +235,7 @@ on a #switchType switch"() { newMeterEntries*.rate.each { verifyRateSizeOnWb5164(it, flow.maximumBandwidth) } and: "Switch validation shows no discrepancies in meters" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !swToInteract.synchronizeAndCollectFixedDiscrepancies().isPresent() and: "Flow validation shows no discrepancies in meters" flow.validateAndCollectDiscrepancies().isEmpty() @@ -247,9 +245,9 @@ on a #switchType switch"() { then: "New meters should disappear from the switch" Wrappers.wait(WAIT_OFFSET) { - def newestMeters = northbound.getAllMeters(sw.dpId) - newestMeters.meterEntries.containsAll(defaultMeters.meterEntries) - newestMeters.meterEntries.size() == defaultMeters.meterEntries.size() + def newestMeters = swToInteract.metersManager.getMeters() + newestMeters.containsAll(defaultMeters) + newestMeters.size() == defaultMeters.size() } where: @@ -270,21 +268,21 @@ on a #switchType switch"() { assumeTrue(switches as boolean, "Unable to find required switches in topology") given: "A #switchType switch with OpenFlow 1.3 support" - def sw = switches.first() + def swToInteract = switchFactory.get(switches.first()) when: "Get default meters from the switch" - def defaultMeters = northbound.getAllMeters(sw.dpId) + def defaultMeters = swToInteract.metersManager.getMeters() assert defaultMeters and: "Create a single-switch flow with maximum_bandwidth=0" - flowFactory.getBuilder(sw, sw) + flowFactory.getSingleSwBuilder(swToInteract) .withBandwidth(0) .withIgnoreBandwidth(true).build() .create() then: "Ony default meters should be present on the switch and new meters should not appear after flow setup" - def newMeters = northbound.getAllMeters(sw.dpId) - def newMeterEntries = newMeters.meterEntries.findAll { !defaultMeters.meterEntries.contains(it) } + def newMeters = swToInteract.metersManager.getMeters() + def newMeterEntries = newMeters.findAll { !defaultMeters.contains(it) } newMeterEntries.empty where: @@ -306,14 +304,16 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def flow = flowFactory.getRandom(switchPair) then: "The source and destination switches have only one meter in the flow's ingress rule" - def srcSwFlowMeters = northbound.getAllMeters(flow.source.switchId).meterEntries.findAll(flowMeters) - def dstSwFlowMeters = northbound.getAllMeters(flow.destination.switchId).meterEntries.findAll(flowMeters) + def srcSwToInteract = switchFactory.get(switchPair.src) + def dstSwToInteract = switchFactory.get(switchPair.dst) + def srcSwFlowMeters = srcSwToInteract.metersManager.getMeters().findAll(flowMeters) + def dstSwFlowMeters = dstSwToInteract.metersManager.getMeters().findAll(flowMeters) srcSwFlowMeters.size() == 1 dstSwFlowMeters.size() == 1 - def srcSwitchRules = switchRulesFactory.get(flow.source.switchId).getRules().findAll { !Cookie.isDefaultRule(it.cookie) } - def dstSwitchRules = switchRulesFactory.get(flow.destination.switchId).getRules().findAll { !Cookie.isDefaultRule(it.cookie) } + def srcSwitchRules = srcSwToInteract.rulesManager.getRules().findAll { !Cookie.isDefaultRule(it.cookie) } + def dstSwitchRules = dstSwToInteract.rulesManager.getRules().findAll { !Cookie.isDefaultRule(it.cookie) } def srcSwIngressFlowRules = srcSwitchRules.findAll { it.match.inPort == flow.source.portNumber.toString() } assert srcSwIngressFlowRules.size() == 2 //shared + simple ingress @@ -351,8 +351,10 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List flowInvolvedSwitches[1..-2].findAll { it.ofVersion != "OF_12" }.each { sw -> - assert northbound.getAllMeters(sw.dpId).meterEntries.findAll(flowMeters).empty - def flowRules = switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in sw.defaultCookies) } + def swToInteract = switchFactory.get(sw) + assert swToInteract.metersManager.getMeters().findAll(flowMeters).empty + def defaultCookies = swToInteract.collectDefaultCookies() + def flowRules = swToInteract.rulesManager.getRules().findAll { !(it.cookie in defaultCookies) } flowRules.each { assert !it.instructions.goToMeter } } @@ -372,9 +374,9 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def switches = data.switches assumeTrue(switches as boolean, "Unable to find required switches in topology") - def sw = switches.first() - def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowFactory.getBuilder(sw, sw) + def swToInteract = switchFactory.get(switches.first()) + def defaultMeters = swToInteract.metersManager.getMeters() + def flow = flowFactory.getSingleSwBuilder(swToInteract) .withBandwidth(100).build() .create() @@ -382,19 +384,17 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { flow.update(flow.tap { it.maximumBandwidth = flowRate as Long }) then: "New meters should be installed on the switch" - def newMeters = northbound.getAllMeters(sw.dpId).meterEntries.findAll { - !defaultMeters.meterEntries.contains(it) - } + def newMeters = swToInteract.metersManager.getMeters().findAll { !defaultMeters.contains(it) } assert newMeters.size() == 2 and: "New meters rate should be equal to flow bandwidth" newMeters*.rate.each { assert it == flowRate } and: "New meters burst size matches the expected value for given switch model" - newMeters*.burstSize.each { assert it == switchHelper.getExpectedBurst(sw.dpId, flowRate) } + newMeters*.burstSize.each { assert it == swToInteract.retrieveExpectedBurst(flowRate)} and: "Switch validation shows no discrepancies in meters" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !swToInteract.synchronizeAndCollectFixedDiscrepancies().isPresent() and: "Flow validation shows no discrepancies in meters" flow.validateAndCollectDiscrepancies().isEmpty() @@ -418,21 +418,21 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def switches = getCentecSwitches() assumeTrue(switches as boolean, "Unable to find required switches in topology") - def sw = switches.first() - def expectedBurstSize = switchHelper.getExpectedBurst(sw.dpId, flowRate) - def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowFactory.getBuilder(sw, sw) + def swToInteract = switchFactory.get(switches.first()) + def expectedBurstSize = swToInteract.retrieveExpectedBurst(flowRate) + def defaultMeters = swToInteract.metersManager.getMeters() + def flow = flowFactory.getSingleSwBuilder(swToInteract) .withBandwidth(100).build() .create() when: "Update flow bandwidth to #flowRate kbps" - flow.update(flow.tap{ it.maximumBandwidth = flowRate}) + flow.update(flow.tap{ it.maximumBandwidth = flowRate }) then: "Meters with updated rate should be installed on the switch" def newMeters = null Wrappers.wait(Constants.RULES_DELETION_TIME + Constants.RULES_INSTALLATION_TIME) { - newMeters = northbound.getAllMeters(sw.dpId).meterEntries.findAll { - !defaultMeters.meterEntries.contains(it) + newMeters = swToInteract.metersManager.getMeters().findAll { + !defaultMeters.contains(it) } assert newMeters.size() == 2 assert newMeters*.rate.every { it == flowRate } @@ -442,7 +442,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { newMeters*.burstSize.every { it == expectedBurstSize } and: "Switch validation shows no discrepancies in meters" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !swToInteract.synchronizeAndCollectFixedDiscrepancies().isPresent() and: "Flow validation shows no discrepancies in meters" flow.validateAndCollectDiscrepancies().isEmpty() @@ -465,9 +465,9 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def switches = getNoviflowWb5164() assumeTrue(switches as boolean, "Unable to find required switches in topology") - def sw = switches.first() - def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowFactory.getBuilder(sw, sw) + def swToInteract = switchFactory.get(switches.first()) + def defaultMeters = swToInteract.metersManager.getMeters() + def flow = flowFactory.getSingleSwBuilder(swToInteract) .withBandwidth(100).build() .create() @@ -475,8 +475,8 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { flow.update(flow.tap { it.maximumBandwidth = flowRate }) then: "New meters should be installed on the switch" - def newMeters = northbound.getAllMeters(sw.dpId).meterEntries.findAll { - !defaultMeters.meterEntries.contains(it) + def newMeters = swToInteract.metersManager.getMeters().findAll { + !defaultMeters.contains(it) } assert newMeters.size() == 2 @@ -488,12 +488,12 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { and: "New meters burst size matches the expected value for given switch model" newMeters.each { meter -> Long actualBurstSize = meter.burstSize - Long expectedBurstSize = switchHelper.getExpectedBurst(sw.dpId, flowRate) + Long expectedBurstSize = swToInteract.retrieveExpectedBurst(flowRate) verifyBurstSizeOnWb5164(expectedBurstSize, actualBurstSize) } and: "Switch validation shows no discrepancies in meters" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() + !swToInteract.synchronizeAndCollectFixedDiscrepancies().isPresent() and: "Flow validation shows no discrepancies in meters" flow.validateAndCollectDiscrepancies().isEmpty() @@ -520,11 +520,13 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { flow.updateFlowBandwidthInDB(newBandwidth) //at this point existing meters do not correspond with the flow //now save some original data for further comparison before resetting meters - Map> originalRules = [src.dpId, dst.dpId].collectEntries { - [(it): switchRulesFactory.get(it).getRules()] + def srcToInteract = switchFactory.get(src) + def dstToInteract = switchFactory.get(dst) + Map> originalRules = [srcToInteract, dstToInteract].collectEntries { + [(it.switchId): it.rulesManager.getRules()] } - Map> originalMeters = [src.dpId, dst.dpId].collectEntries { - [(it): northbound.getAllMeters(it).meterEntries] + Map> originalMeters = [srcToInteract, dstToInteract].collectEntries { + [(it.switchId): it.metersManager.getMeters()] } when: "Ask system to reset meters for the flow" @@ -534,14 +536,15 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { [response.srcMeter, response.dstMeter].each { switchMeterEntries -> def originalFlowMeters = originalMeters[switchMeterEntries.switchId].findAll(flowMeters) switchMeterEntries.meterEntries.each { meterEntry -> - if (northbound.getSwitch(switchMeterEntries.switchId).hardware =~ "WB5164") { + def sw = srcToInteract.switchId == switchMeterEntries.switchId ? srcToInteract : dstToInteract + if (sw.isWb5164()) { verifyRateSizeOnWb5164(newBandwidth, meterEntry.rate) - Long expectedBurstSize = switchHelper.getExpectedBurst(switchMeterEntries.switchId, newBandwidth) + Long expectedBurstSize = sw.retrieveExpectedBurst(newBandwidth) Long actualBurstSize = meterEntry.burstSize verifyBurstSizeOnWb5164(expectedBurstSize, actualBurstSize) } else { assert meterEntry.rate == newBandwidth - assert meterEntry.burstSize == switchHelper.getExpectedBurst(switchMeterEntries.switchId, newBandwidth) + assert meterEntry.burstSize == sw.retrieveExpectedBurst(newBandwidth) } } assert switchMeterEntries.meterEntries*.meterId.sort() == originalFlowMeters*.meterId.sort() @@ -556,15 +559,15 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { // expect dstFlowMeters, sameBeanAs(response.dstMeter.meterEntries).ignoring("timestamp") and: "Default meters are unchanged" - [src.dpId, dst.dpId].each { SwitchId swId -> - def actualDefaultMeters = northbound.getAllMeters(swId).meterEntries.findAll(defaultMeters) - assertThat(actualDefaultMeters).containsExactlyInAnyOrder(*originalMeters[swId].findAll(defaultMeters)) + [srcToInteract, dstToInteract].each { SwitchExtended sw -> + def actualDefaultMeters = sw.metersManager.getMeters().findAll(defaultMeters) + assertThat(actualDefaultMeters).containsExactlyInAnyOrder(*originalMeters[sw.switchId].findAll(defaultMeters)) } and: "Switch rules are unchanged" - [src.dpId, dst.dpId].each { SwitchId swId -> - def actualRules = switchRulesFactory.get(swId).getRules() - assertThat(actualRules).containsExactlyInAnyOrder(*originalRules[swId]) + [srcToInteract, dstToInteract].each { SwitchExtended sw -> + def actualRules = sw.rulesManager.getRules() + assertThat(actualRules).containsExactlyInAnyOrder(*originalRules[sw.switchId]) } where: @@ -633,7 +636,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { topology.getActiveSwitches().findAll { it.virtual } } - List filterRules(List rules, inPort, inVlan, outPort) { + List filterRules(List rules, inPort, inVlan, outPort) { if (inPort) { rules = rules.findAll { it.match.inPort == inPort.toString() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy index e0ccb6eebe8..d33f8c8cddb 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy @@ -5,7 +5,6 @@ import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW @@ -16,7 +15,6 @@ import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.FlowActionType -import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.command.switches.InstallRulesAction @@ -33,9 +31,7 @@ class SwitchesSpec extends HealthCheckSpecification { @Shared SwitchNotFoundExpectedError switchNotFoundExpectedError = new SwitchNotFoundExpectedError( "Switch $NON_EXISTENT_SWITCH_ID not found", ~/Switch $NON_EXISTENT_SWITCH_ID not found/) - @Autowired - @Shared - CleanupManager cleanupManager + @Autowired @Shared FlowFactory flowFactory @@ -48,11 +44,11 @@ class SwitchesSpec extends HealthCheckSpecification { @Tags([SMOKE, SMOKE_SWITCHES]) def "System is able to return a certain switch info by its id"() { when: "Request info about certain switch from Northbound" - def sw = topology.activeSwitches[0] - def response = northbound.getSwitch(sw.dpId) + def sw = switchFactory.get(topology.activeSwitches[0]) + def response = sw.retrieveDetails() then: "Switch information is returned" - response.switchId == sw.dpId + response.switchId == sw.switchId !response.hostname.empty !response.address.empty !response.description.empty @@ -76,7 +72,8 @@ class SwitchesSpec extends HealthCheckSpecification { @Tags(ISL_RECOVER_ON_FAIL) def "Systems allows to get a flow that goes through a switch"() { given: "Two active not neighboring switches with two diverse paths at least" - def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() + def switchPair = switchPairs.all().nonNeighbouring() + .withAtLeastNNonOverlappingPaths(2).random() and: "A protected flow" def protectedFlow = flowFactory.getBuilder(switchPair) @@ -84,7 +81,8 @@ class SwitchesSpec extends HealthCheckSpecification { .build().create() and: "A single switch flow" - def allowedPorts = topology.getAllowedPortsForSwitch(switchPair.src).findAll { + def srcSwitch = switchFactory.get(switchPair.src) + def allowedPorts = srcSwitch.collectAllowedPorts().findAll { it != protectedFlow.source.portNumber } def r = new Random() @@ -100,40 +98,40 @@ class SwitchesSpec extends HealthCheckSpecification { def involvedSwitchIds = flowPathInfo.getInvolvedSwitches() then: "The created flows are in the response list from the src switch" - def switchFlowsResponseSrcSwitch = northbound.getSwitchFlows(switchPair.src.dpId) + def switchFlowsResponseSrcSwitch = srcSwitch.retrieveFlows() switchFlowsResponseSrcSwitch*.id.sort() == [protectedFlow.flowId, singleFlow.flowId].sort() and: "Only the protectedFlow is in the response list from the involved switch(except the src switch)" involvedSwitchIds.findAll { it != switchPair.src.dpId }.each { switchId -> - def getSwitchFlowsResponse = northbound.getSwitchFlows(switchId) + def getSwitchFlowsResponse = switchFactory.get(switchId).retrieveFlows() assert getSwitchFlowsResponse.size() == 1 assert getSwitchFlowsResponse[0].id == protectedFlow.flowId } when: "Get all flows going through the src switch based on the port of the main path" - def getSwitchFlowsResponse1 = northbound.getSwitchFlows(switchPair.src.dpId, mainPath[0].portNo) + def getSwitchFlowsResponse1 = srcSwitch.retrieveFlows(mainPath[0].portNo) then: "Only the protected flow is in the response list" getSwitchFlowsResponse1.size() == 1 getSwitchFlowsResponse1[0].id == protectedFlow.flowId when: "Get all flows going through the src switch based on the port of the protected path" - def getSwitchFlowsResponse2 = northbound.getSwitchFlows(switchPair.src.dpId, protectedPath[0].portNo) + def getSwitchFlowsResponse2 = srcSwitch.retrieveFlows(protectedPath[0].portNo) then: "Only the protected flow is in the response list" getSwitchFlowsResponse2.size() == 1 getSwitchFlowsResponse2[0].id == protectedFlow.flowId when: "Get all flows going through the src switch based on the dstPort of the single switch flow" - def getSwitchFlowsResponse3 = northbound.getSwitchFlows(switchPair.src.dpId, singleFlow.destination.portNumber) + def getSwitchFlowsResponse3 = srcSwitch.retrieveFlows(singleFlow.destination.portNumber) then: "Only the single switch flow is in the response list" getSwitchFlowsResponse3.size() == 1 getSwitchFlowsResponse3[0].id == singleFlow.flowId when: "Get all flows going through the dst switch based on the dstPort of the protected flow" - def getSwitchFlowsResponse4 = northbound.getSwitchFlows(switchPair.dst.dpId, - protectedFlow.destination.portNumber) + def dstSwitch = switchFactory.get(switchPair.dst) + def getSwitchFlowsResponse4 = dstSwitch.retrieveFlows(protectedFlow.destination.portNumber) then: "Only the protected flow is in the response list" getSwitchFlowsResponse4.size() == 1 @@ -146,15 +144,14 @@ class SwitchesSpec extends HealthCheckSpecification { .build().create() and: "Get all flows going through the src switch" - def getSwitchFlowsResponse5 = northbound.getSwitchFlows(switchPair.src.dpId) + def getSwitchFlowsResponse5 = srcSwitch.retrieveFlows() then: "The created flows are in the response list" getSwitchFlowsResponse5.size() == 3 getSwitchFlowsResponse5*.id.sort() == [protectedFlow.flowId, singleFlow.flowId, defaultFlow.flowId].sort() when: "Bring down all ports on src switch to make flow DOWN" - def switchIsls = topology.getRelatedIsls(switchPair.src) - islHelper.breakIsls(switchIsls) + islHelper.breakIsls(srcSwitch.isls) and: "Get all flows going through the src switch" Wrappers.wait(WAIT_OFFSET * 2) { @@ -165,7 +162,7 @@ class SwitchesSpec extends HealthCheckSpecification { assert defaultFlow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE_FAILED).last() .payload.find { it.action == FlowActionType.REROUTE_FAILED.payloadLastAction } } - def getSwitchFlowsResponse6 = northbound.getSwitchFlows(switchPair.src.dpId) + def getSwitchFlowsResponse6 = srcSwitch.retrieveFlows() then: "The created flows are in the response list" getSwitchFlowsResponse6*.id.sort() == [protectedFlow.flowId, singleFlow.flowId, defaultFlow.flowId].sort() @@ -191,11 +188,11 @@ class SwitchesSpec extends HealthCheckSpecification { def singleFlow = flowFactory.getRandom(switchPair.src, switchPair.src) when: "Deactivate the src switch" - def switchToDisconnect = topology.switches.find { it.dpId == switchPair.src.dpId } - switchHelper.knockoutSwitch(switchToDisconnect, RW) + def switchToDisconnect = switchFactory.get(switchPair.src) + switchToDisconnect.knockoutSwitch(RW) and: "Get all flows going through the deactivated src switch" - def switchFlowsResponseSrcSwitch = northbound.getSwitchFlows(switchPair.src.dpId) + def switchFlowsResponseSrcSwitch = switchToDisconnect.retrieveFlows() then: "The created flows are in the response list from the deactivated src switch" switchFlowsResponseSrcSwitch*.id.sort() == [simpleFlow.flowId, singleFlow.flowId].sort() @@ -232,7 +229,7 @@ class SwitchesSpec extends HealthCheckSpecification { @Tags(LOW_PRIORITY) def "System returns human readable error when deleting switch rules on non-existing switch"() { when: "Delete switch rules on non-existing switch" - switchHelper.deleteSwitchRules(NON_EXISTENT_SWITCH_ID, DeleteRulesAction.DROP_ALL_ADD_DEFAULTS) + northbound.deleteSwitchRules(NON_EXISTENT_SWITCH_ID, DeleteRulesAction.DROP_ALL_ADD_DEFAULTS) then: "Not Found error is returned" def e = thrown(HttpClientErrorException) @@ -301,25 +298,17 @@ class SwitchesSpec extends HealthCheckSpecification { @Tags(LOW_PRIORITY) def "Able to partially update switch a 'location.#data.field' field"() { given: "A switch" - def sw = topology.activeSwitches.first() - def initConf = northbound.getSwitch(sw.dpId) + def sw = switchFactory.get(topology.activeSwitches.first()) when: "Request a switch partial update for a #data.field field" SwitchPatchDto updateRequest = [location: [(data.field): data.newValue]] as SwitchPatchDto - cleanupManager.addAction(RESTORE_SWITCH_PROPERTIES, {northboundV2.partialSwitchUpdate(sw.dpId, [location: [ - latitude: initConf.location.latitude ?: 0, - longitude: initConf.location.longitude ?: 0, - city: initConf.location.city ?: "", - country: initConf.location.country ?: "", - street: initConf.location.street ?: "" - ]] as SwitchPatchDto)}) - def response = northboundV2.partialSwitchUpdate(sw.dpId, updateRequest) + def response = sw.partialUpdate(updateRequest) then: "Update response reflects the changes" response.location."$data.field" == data.newValue and: "Changes actually took place" - northbound.getSwitch(sw.dpId).location."$data.field" == data.newValue + sw.retrieveDetails().location."$data.field" == data.newValue where: data << [ @@ -348,19 +337,16 @@ class SwitchesSpec extends HealthCheckSpecification { def "Able to partially update switch a 'pop' field"() { given: "A switch" - def sw = topology.activeSwitches.first() - def initConf = northbound.getSwitch(sw.dpId) + def sw = switchFactory.get(topology.activeSwitches.first()) when: "Request a switch partial update for a 'pop' field" def newPopValue = "test_POP" - cleanupManager.addAction(RESTORE_SWITCH_PROPERTIES, - {northboundV2.partialSwitchUpdate(sw.dpId, new SwitchPatchDto().tap { it.pop = initConf.pop ?: "" })}) - def response = northboundV2.partialSwitchUpdate(sw.dpId, new SwitchPatchDto().tap { it.pop = newPopValue }) + def response = sw.partialUpdate(new SwitchPatchDto().tap { it.pop = newPopValue }) then: "Update response reflects the changes" response.pop == newPopValue and: "Changes actually took place" - northbound.getSwitch(sw.dpId).pop == newPopValue + sw.retrieveDetails().pop == newPopValue } }