diff --git a/dpd-api/src/lib.rs b/dpd-api/src/lib.rs index 37df934..dbc89ae 100644 --- a/dpd-api/src/lib.rs +++ b/dpd-api/src/lib.rs @@ -4,6 +4,8 @@ // // Copyright 2025 Oxide Computer Company +//! DPD endpoint definitions. + use std::{ collections::{BTreeMap, HashMap, HashSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr}, @@ -1410,23 +1412,29 @@ pub trait DpdApi { async fn multicast_group_create_external( rqctx: RequestContext, group: TypedBody, - ) -> Result, HttpError>; + ) -> Result< + HttpResponseCreated, + HttpError, + >; /** - * Create an internal multicast group configuration. + * Create an underlay (internal) multicast group configuration. * - * Internal groups are used for admin-scoped IPv6 multicast traffic that + * Underlay groups are used for admin-scoped IPv6 multicast traffic that * requires replication infrastructure. These groups support both external * and underlay members with full replication capabilities. */ #[endpoint { method = POST, - path = "/multicast/groups", + path = "/multicast/underlay-groups", }] - async fn multicast_group_create( + async fn multicast_group_create_underlay( rqctx: RequestContext, - group: TypedBody, - ) -> Result, HttpError>; + group: TypedBody, + ) -> Result< + HttpResponseCreated, + HttpError, + >; /** * Delete a multicast group configuration by IP address. @@ -1464,20 +1472,37 @@ pub trait DpdApi { ) -> Result, HttpError>; /** - * Update an internal multicast group configuration for a given group IP address. + * Get an underlay (internal) multicast group configuration by admin-scoped + * IPv6 address. * - * Internal groups are used for admin-scoped IPv6 multicast traffic that + * Underlay groups handle admin-scoped IPv6 multicast traffic with + * replication infrastructure for external and underlay members. + */ + #[endpoint { + method = GET, + path = "/multicast/underlay-groups/{group_ip}", + }] + async fn multicast_group_get_underlay( + rqctx: RequestContext, + path: Path, + ) -> Result, HttpError>; + + /** + * Update an underlay (internal) multicast group configuration for a given + * group IP address. + * + * Underlay groups are used for admin-scoped IPv6 multicast traffic that * requires replication infrastructure with external and underlay members. */ #[endpoint { method = PUT, - path = "/multicast/groups/{group_ip}", + path = "/multicast/underlay-groups/{group_ip}", }] - async fn multicast_group_update( + async fn multicast_group_update_underlay( rqctx: RequestContext, - path: Path, - group: TypedBody, - ) -> Result, HttpError>; + path: Path, + group: TypedBody, + ) -> Result, HttpError>; /** * Update an external-only multicast group configuration for a given group IP address. @@ -1493,7 +1518,10 @@ pub trait DpdApi { rqctx: RequestContext, path: Path, group: TypedBody, - ) -> Result, HttpError>; + ) -> Result< + HttpResponseCreated, + HttpError, + >; /** * List all multicast groups. @@ -2033,6 +2061,13 @@ pub struct MulticastGroupIpParam { pub group_ip: IpAddr, } +/// Used to identify an underlay (internal) multicast group by admin-scoped IPv6 +/// address. +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct MulticastUnderlayGroupIpParam { + pub group_ip: mcast::AdminScopedIpv6, +} + /// Used to identify a multicast group by ID. /// /// If not provided, it will return all multicast groups. diff --git a/dpd-client/tests/integration_tests/mcast.rs b/dpd-client/tests/integration_tests/mcast.rs index fde1412..45965a9 100644 --- a/dpd-client/tests/integration_tests/mcast.rs +++ b/dpd-client/tests/integration_tests/mcast.rs @@ -30,47 +30,14 @@ const GIMLET_MAC: &str = "11:22:33:44:55:66"; const GIMLET_IP: Ipv6Addr = Ipv6Addr::new(0xfd00, 0x1122, 0x7788, 0x0101, 0, 0, 0, 4); -// Bifurcated Multicast Design: -// -// The multicast implementation uses a bifurcated design that separates external -// (customer) and (internal) underlay traffic: -// -// 1. External-only groups (IPv4 and non-admin-scoped IPv6): -// - Created from API control plane IPs for customer traffic -// - Handle customer traffic to/from outside the rack -// - Use the external multicast API (/multicast/external-groups) -// - Must have NAT targets pointing to internal groups for proper forwarding -// -// 2. Internal groups (admin-scoped IPv6 multicast): -// - Admin-scoped = admin-local, site-local, or organization-local scope (RFC 7346, RFC 4291) -// - Geneve encapsulated multicast traffic (NAT targets of external-only groups) -// - Use the internal multicast API (/multicast/groups) -// - Can replicate to: -// a) External group members (customer traffic) -// b) Underlay-only members (infrastructure traffic) -// c) Both external and underlay members (bifurcated replication) -// - Don't require NAT targets (they serve as targets for external-only groups) -// -// This design ensures proper traffic separation and enables flexible multicast forwarding -// policies between external networks and internal rack infrastructure. - -fn derive_ipv6_mcast_mac(ipv6_addr: &Ipv6Addr) -> MacAddr { - // Get the octets of the IPv6 address - let ip_octets = ipv6_addr.octets(); - - // Create the MAC address - // First 2 bytes: 0x33, 0x33 (fixed prefix for IPv6 multicast) - // Last 4 bytes: Take the last 4 bytes of the IPv6 address - let mac_bytes = [ - 0x33, // First byte: 33 - 0x33, // Second byte: 33 - ip_octets[12], // Third byte: 13th octet of IPv6 address - ip_octets[13], // Fourth byte: 14th octet of IPv6 address - ip_octets[14], // Fifth byte: 15th octet of IPv6 address - ip_octets[15], // Sixth byte: 16th octet of IPv6 address - ]; +trait ToIpAddr { + fn to_ip_addr(&self) -> IpAddr; +} - MacAddr::from(mac_bytes) +impl ToIpAddr for types::AdminScopedIpv6 { + fn to_ip_addr(&self) -> IpAddr { + IpAddr::V6(self.0) + } } async fn check_counter_incremented( @@ -104,30 +71,13 @@ async fn check_counter_incremented( )) } -fn create_nat_target_ipv4() -> types::NatTarget { - types::NatTarget { - internal_ip: MULTICAST_NAT_IP.into(), - inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x00, 0x00, 0x01).into(), - vni: 100.into(), - } -} - -fn create_nat_target_ipv6() -> types::NatTarget { - types::NatTarget { - internal_ip: MULTICAST_NAT_IP.into(), - inner_mac: MacAddr::new(0x33, 0x33, 0x00, 0x00, 0x00, 0x01).into(), - vni: 100.into(), - } -} - -/// Create a multicast group for testing. async fn create_test_multicast_group( switch: &Switch, group_ip: IpAddr, tag: Option<&str>, ports: &[(PhysPort, types::Direction)], - vlan_id: Option, - create_nat: bool, + internal_forwarding: types::InternalForwarding, + external_forwarding: types::ExternalForwarding, sources: Option>, ) -> types::MulticastGroupResponse { let members = ports @@ -142,83 +92,189 @@ async fn create_test_multicast_group( }) .collect(); - let nat_target = if create_nat { - if group_ip.is_ipv4() { - Some(create_nat_target_ipv4()) - } else { - Some(create_nat_target_ipv6()) - } - } else { - None - }; - match group_ip { IpAddr::V4(_) => { // IPv4 groups are always external and require NAT targets - let nat_target = - nat_target.expect("IPv4 external groups require NAT targets"); let external_entry = types::MulticastGroupCreateExternalEntry { group_ip, tag: tag.map(String::from), - nat_target, - vlan_id, + internal_forwarding, + external_forwarding, sources, }; - switch + + let resp = switch .client .multicast_group_create_external(&external_entry) .await .expect("Failed to create external multicast group") - .into_inner() + .into_inner(); + + types::MulticastGroupResponse::External { + external_group_id: resp.external_group_id, + group_ip: resp.group_ip, + tag: resp.tag, + internal_forwarding: resp.internal_forwarding, + external_forwarding: resp.external_forwarding, + sources: resp.sources, + } } IpAddr::V6(ipv6) => { if oxnet::Ipv6Net::new_unchecked(ipv6, 128) .is_admin_scoped_multicast() { // Admin-scoped IPv6 groups are internal - let internal_entry = types::MulticastGroupCreateEntry { - group_ip: match group_ip { - IpAddr::V6(ipv6) => ipv6, - _ => panic!("Expected IPv6 address"), - }, + let admin_scoped_ip = types::AdminScopedIpv6(ipv6); + let internal_entry = types::MulticastGroupCreateUnderlayEntry { + group_ip: admin_scoped_ip, tag: tag.map(String::from), - sources, members, }; - switch + + let resp = switch .client - .multicast_group_create(&internal_entry) + .multicast_group_create_underlay(&internal_entry) .await .expect("Failed to create internal multicast group") - .into_inner() + .into_inner(); + + types::MulticastGroupResponse::Underlay { + external_group_id: resp.external_group_id, + group_ip: resp.group_ip, + members: resp.members, + tag: resp.tag, + underlay_group_id: resp.underlay_group_id, + } } else { // Non-admin-scoped IPv6 groups are external-only and require NAT targets - let nat_target = nat_target - .expect("IPv6 external groups require NAT targets"); let external_entry = types::MulticastGroupCreateExternalEntry { group_ip, tag: tag.map(String::from), - nat_target, - vlan_id, + internal_forwarding, + external_forwarding, sources, }; - switch + + let resp = switch .client .multicast_group_create_external(&external_entry) .await .expect("Failed to create external multicast group") - .into_inner() + .into_inner(); + + types::MulticastGroupResponse::External { + external_group_id: resp.external_group_id, + group_ip: resp.group_ip, + tag: resp.tag, + internal_forwarding: resp.internal_forwarding, + external_forwarding: resp.external_forwarding, + sources: resp.sources, + } } } } } -/// Clean up a test group. -async fn cleanup_test_group(switch: &Switch, group_ip: IpAddr) { - let _ = switch.client.multicast_group_delete(&group_ip).await; +/// Clean up a test group, failing if it cannot be deleted properly. +async fn cleanup_test_group(switch: &Switch, group_ip: IpAddr) -> TestResult { + switch + .client + .multicast_group_delete(&group_ip) + .await + .map_err(|e| { + anyhow!("Failed to delete test group {}: {:?}", group_ip, e) + }) + .map(|_| ()) +} + +fn get_group_ip(response: &types::MulticastGroupResponse) -> IpAddr { + match response { + types::MulticastGroupResponse::Underlay { group_ip, .. } => { + group_ip.to_ip_addr() + } + types::MulticastGroupResponse::External { group_ip, .. } => *group_ip, + } +} + +fn get_external_group_id(response: &types::MulticastGroupResponse) -> u16 { + match response { + types::MulticastGroupResponse::Underlay { + external_group_id, .. + } => *external_group_id, + types::MulticastGroupResponse::External { + external_group_id, .. + } => *external_group_id, + } +} + +fn get_underlay_group_id( + response: &types::MulticastGroupResponse, +) -> Option { + match response { + types::MulticastGroupResponse::Underlay { + underlay_group_id, .. + } => Some(*underlay_group_id), + types::MulticastGroupResponse::External { .. } => None, + } +} + +fn get_members( + response: &types::MulticastGroupResponse, +) -> Option<&Vec> { + match response { + types::MulticastGroupResponse::Underlay { members, .. } => { + Some(members) + } + types::MulticastGroupResponse::External { .. } => None, + } +} + +fn get_sources( + response: &types::MulticastGroupResponse, +) -> Option> { + match response { + types::MulticastGroupResponse::Underlay { .. } => None, + types::MulticastGroupResponse::External { sources, .. } => { + sources.clone() + } + } +} + +fn get_nat_target( + response: &types::MulticastGroupResponse, +) -> Option<&types::NatTarget> { + match response { + types::MulticastGroupResponse::Underlay { .. } => None, + types::MulticastGroupResponse::External { + internal_forwarding, + .. + } => internal_forwarding.nat_target.as_ref(), + } +} + +fn get_tag(response: &types::MulticastGroupResponse) -> &Option { + match response { + types::MulticastGroupResponse::Underlay { tag, .. } => tag, + types::MulticastGroupResponse::External { tag, .. } => tag, + } +} + +fn create_nat_target_ipv4() -> types::NatTarget { + types::NatTarget { + internal_ip: MULTICAST_NAT_IP, + inner_mac: MacAddr::new(0x11, 0x22, 0x33, 0x44, 0x55, 0x66).into(), + vni: 100.into(), + } +} + +fn create_nat_target_ipv6() -> types::NatTarget { + types::NatTarget { + internal_ip: MULTICAST_NAT_IP, + inner_mac: MacAddr::new(0x11, 0x22, 0x33, 0x44, 0x55, 0x66).into(), + vni: 101.into(), + } } -/// Create an IPv4 multicast packet for testing. fn create_ipv4_multicast_packet( multicast_ip_addr: IpAddr, src_mac: MacAddr, @@ -256,7 +312,6 @@ fn create_ipv4_multicast_packet( common::gen_udp_packet(src_endpoint, dst_endpoint) } -/// Create an IPv6 multicast packet for testing. fn create_ipv6_multicast_packet( multicast_ip_addr: IpAddr, src_mac: MacAddr, @@ -401,7 +456,7 @@ async fn test_nonexisting_group() { #[tokio::test] #[ignore] -async fn test_group_creation_with_validation() { +async fn test_group_creation_with_validation() -> TestResult { let switch = &*get_switch().await; // Test the bifurcated multicast design: @@ -416,21 +471,29 @@ async fn test_group_creation_with_validation() { internal_multicast_ip, Some("valid_internal_group"), &[(egress1, types::Direction::Underlay)], - None, - false, + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - assert!(internal_group.underlay_group_id.is_some()); + assert_ne!( + get_external_group_id(&internal_group), + get_underlay_group_id(&internal_group).unwrap(), + "Group IDs should be different" + ); // Test creating a group with invalid parameters (e.g., invalid VLAN ID) // IPv4 groups are always external let external_invalid = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4(MULTICAST_TEST_IPV4), tag: Some("test_invalid".to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(4096), // Invalid: VLAN ID must be 1-4095 + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { + vlan_id: Some(4096), // Invalid: VLAN ID must be 1-4095 + }, sources: None, }; @@ -456,8 +519,10 @@ async fn test_group_creation_with_validation() { let external_valid = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4(MULTICAST_TEST_IPV4_SSM), tag: Some("test_valid".to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: Some(vec![types::IpSrc::Exact( "192.168.1.1".parse::().unwrap(), )]), @@ -470,95 +535,39 @@ async fn test_group_creation_with_validation() { .expect("Should successfully create valid group") .into_inner(); + assert_eq!( + get_external_group_id(&internal_group), created.external_group_id, + "External group should reference the same external group ID as the internal group" + ); + assert_eq!(created.group_ip, MULTICAST_TEST_IPV4_SSM); - assert!(created.external_group_id.is_none()); - assert!(created.underlay_group_id.is_none()); assert_eq!(created.tag, Some("test_valid".to_string())); - assert_eq!(created.int_fwding.nat_target, Some(nat_target.clone())); - assert_eq!(created.ext_fwding.vlan_id, Some(10)); + assert_eq!( + created.internal_forwarding.nat_target, + Some(nat_target.clone()) + ); + assert_eq!(created.external_forwarding.vlan_id, Some(10)); assert_eq!( created.sources, Some(vec![types::IpSrc::Exact( "192.168.1.1".parse::().unwrap(), )]) ); - assert_eq!(created.members.len(), 0); // External groups don't have members - switch - .client - .multicast_group_delete(&created.group_ip) - .await - .expect("Failed to delete test group"); + cleanup_test_group(switch, created.group_ip).await } #[tokio::test] #[ignore] -async fn test_internal_ipv6_validation() { +async fn test_internal_ipv6_validation() -> TestResult { let switch = &*get_switch().await; - let (port_id, link_id) = switch.link_id(PhysPort(26)).unwrap(); - - // IPv4-mapped IPv6 addresses should be rejected as invalid multicast - let ipv4_mapped_internal = types::MulticastGroupCreateEntry { - group_ip: "::ffff:224.1.1.1".parse().unwrap(), // IPv4-mapped IPv6 - tag: Some("test_ipv4_mapped_internal".to_string()), - sources: None, - members: vec![types::MulticastGroupMember { - port_id: port_id.clone(), - link_id, - direction: types::Direction::External, - }], - }; - - let ipv4_mapped_res = switch - .client - .multicast_group_create(&ipv4_mapped_internal) - .await; - - assert!( - ipv4_mapped_res.is_err(), - "Should reject IPv4-mapped IPv6 addresses" - ); - let ipv4_mapped_error_msg = format!("{:?}", ipv4_mapped_res.unwrap_err()); - assert!( - ipv4_mapped_error_msg.contains("is not a multicast address"), - "Error message should indicate invalid multicast address: {}", - ipv4_mapped_error_msg - ); - - // Non-admin-scoped IPv6 groups should be rejected from internal API - let non_admin_ipv6 = types::MulticastGroupCreateEntry { - group_ip: "ff0e::1".parse().unwrap(), // Global scope, not admin-scoped - tag: Some("test_non_admin".to_string()), - sources: None, - members: vec![types::MulticastGroupMember { - port_id: port_id.clone(), - link_id, - direction: types::Direction::External, - }], - }; - - let non_admin_res = - switch.client.multicast_group_create(&non_admin_ipv6).await; - - assert!( - non_admin_res.is_err(), - "Should reject non-admin-scoped IPv6 groups from internal API" - ); - let non_admin_error_msg = format!("{:?}", non_admin_res.unwrap_err()); - assert!( - non_admin_error_msg.contains( - "Non-admin-scoped IPv6 multicast groups must use the external API" - ), - "Error message should direct to external API: {}", - non_admin_error_msg - ); + let (port_id, link_id) = switch.link_id(PhysPort(15)).unwrap(); - // Admin-scoped IPv6 groups work correctly (no VLAN IDs supported) - let internal_group = types::MulticastGroupCreateEntry { - group_ip: "ff04::2".parse().unwrap(), // Admin-scoped IPv6 + // Admin-scoped IPv6 groups work correctly + let internal_group = types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff04::2".parse().unwrap(), tag: Some("test_admin_scoped".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: port_id.clone(), link_id, @@ -568,18 +577,19 @@ async fn test_internal_ipv6_validation() { let created = switch .client - .multicast_group_create(&internal_group) + .multicast_group_create_underlay(&internal_group) .await .expect("Should create internal IPv6 group") .into_inner(); - assert_eq!(created.ext_fwding.vlan_id, None); - assert!(created.underlay_group_id.is_some()); + assert_ne!( + created.external_group_id, created.underlay_group_id, + "Group IDs should be different" + ); // Test update works correctly - let update_entry = types::MulticastGroupUpdateEntry { + let update_entry = types::MulticastGroupUpdateUnderlayEntry { tag: Some("updated_tag".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id, link_id, @@ -589,29 +599,27 @@ async fn test_internal_ipv6_validation() { let updated = switch .client - .multicast_group_update(&created.group_ip, &update_entry) + .multicast_group_update_underlay(&created.group_ip, &update_entry) .await .expect("Should update internal IPv6 group") .into_inner(); assert_eq!(updated.tag, Some("updated_tag".to_string())); - assert_eq!(updated.ext_fwding.vlan_id, None); - cleanup_test_group(switch, created.group_ip).await; + cleanup_test_group(switch, created.group_ip.to_ip_addr()).await } #[tokio::test] #[ignore] -async fn test_vlan_propagation_to_internal() { +async fn test_vlan_propagation_to_internal() -> TestResult { let switch = &*get_switch().await; let (port_id, link_id) = switch.link_id(PhysPort(30)).unwrap(); // Create internal IPv6 group first - let internal_group_entry = types::MulticastGroupCreateEntry { - group_ip: "ff04::200".parse().unwrap(), // Admin-scoped IPv6 + let internal_group_entry = types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff04::200".parse().unwrap(), tag: Some("test_vlan_propagation".to_string()), - sources: None, members: vec![ types::MulticastGroupMember { port_id: port_id.clone(), @@ -628,14 +636,11 @@ async fn test_vlan_propagation_to_internal() { let created_admin = switch .client - .multicast_group_create(&internal_group_entry) + .multicast_group_create_underlay(&internal_group_entry) .await .expect("Should create admin-scoped group") .into_inner(); - assert!(created_admin.external_group_id.is_some()); - assert_eq!(created_admin.ext_fwding.vlan_id, None); // No VLAN initially - // Create external group that references the admin-scoped group let nat_target = types::NatTarget { internal_ip: "ff04::200".parse().unwrap(), // References admin-scoped group @@ -646,8 +651,10 @@ async fn test_vlan_propagation_to_internal() { let external_group = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4("224.1.2.3".parse().unwrap()), tag: Some("test_external_with_vlan".to_string()), - nat_target, - vlan_id: Some(42), // This VLAN should be used by admin-scoped group + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(42) }, // This VLAN should be used by admin-scoped group sources: None, }; @@ -658,9 +665,14 @@ async fn test_vlan_propagation_to_internal() { .expect("Should create external group with NAT target") .into_inner(); - assert_eq!(created_external.ext_fwding.vlan_id, Some(42)); + assert_eq!(created_external.external_forwarding.vlan_id, Some(42)); assert_eq!( - created_external.int_fwding.nat_target.unwrap().internal_ip, + created_external + .internal_forwarding + .nat_target + .as_ref() + .unwrap() + .internal_ip, "ff04::200".parse::().unwrap() ); @@ -682,8 +694,10 @@ async fn test_vlan_propagation_to_internal() { "Admin-scoped group bitmap should have VLAN 42 from external group" ); - cleanup_test_group(switch, created_admin.group_ip).await; - cleanup_test_group(switch, created_external.group_ip).await; + cleanup_test_group(switch, created_admin.group_ip.to_ip_addr()) + .await + .unwrap(); + cleanup_test_group(switch, created_external.group_ip).await } #[tokio::test] @@ -693,19 +707,17 @@ async fn test_group_api_lifecycle() { let egress1 = PhysPort(28); let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); - let underlay_group = create_test_multicast_group( + create_test_multicast_group( switch, internal_multicast_ip, Some("valid_underlay_group"), &[(egress1, types::Direction::Underlay)], - None, - false, + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - assert!(underlay_group.underlay_group_id.is_some()); - // Create IPv4 external group with NAT target referencing the underlay group let group_ip = IpAddr::V4(MULTICAST_TEST_IPV4); let vlan_id = 10; @@ -713,8 +725,12 @@ async fn test_group_api_lifecycle() { let external_create = types::MulticastGroupCreateExternalEntry { group_ip, tag: Some("test_lifecycle".to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(vlan_id), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { + vlan_id: Some(vlan_id), + }, sources: None, }; @@ -728,14 +744,14 @@ async fn test_group_api_lifecycle() { let external_group_id = created.external_group_id; assert_eq!(created.group_ip, MULTICAST_TEST_IPV4); - assert!(created.external_group_id.is_none()); - assert!(created.underlay_group_id.is_none()); assert_eq!(created.tag, Some("test_lifecycle".to_string())); - assert_eq!(created.int_fwding.nat_target, Some(nat_target.clone())); - assert_eq!(created.ext_fwding.vlan_id, Some(vlan_id)); - assert_eq!(created.members.len(), 0); // External groups don't have members + assert_eq!( + created.internal_forwarding.nat_target, + Some(nat_target.clone()) + ); + assert_eq!(created.external_forwarding.vlan_id, Some(vlan_id)); - // 3. Get all groups and verify our group is included + // Get all groups and verify our group is included let groups = switch .client .multicast_groups_list_stream(None) @@ -745,10 +761,10 @@ async fn test_group_api_lifecycle() { let found_in_list = groups .iter() - .any(|g| g.external_group_id == external_group_id); + .any(|g| get_external_group_id(g) == external_group_id); assert!(found_in_list, "Created group should be in the list"); - // 4. Get groups by tag + // Get groups by tag let tagged_groups = switch .client .multicast_groups_list_by_tag_stream("test_lifecycle", None) @@ -762,10 +778,10 @@ async fn test_group_api_lifecycle() { ); let found_by_tag = tagged_groups .iter() - .any(|g| g.external_group_id == external_group_id); + .any(|g| get_external_group_id(g) == external_group_id); assert!(found_by_tag, "Created group should be found by tag"); - // 5. Get the specific group + // Get the specific group let group = switch .client .multicast_groups_list_stream(None) @@ -773,8 +789,8 @@ async fn test_group_api_lifecycle() { .await .expect("Should be able to get group by ID"); - assert_eq!(group[0].external_group_id, external_group_id); - assert_eq!(group[0].tag, Some("test_lifecycle".to_string())); + assert_eq!(get_external_group_id(&group[0]), external_group_id); + assert_eq!(get_tag(&group[0]), &Some("test_lifecycle".to_string())); // Also test getting by IP address let group_by_ip = switch @@ -783,22 +799,26 @@ async fn test_group_api_lifecycle() { .await .expect("Should be able to get group by IP"); - assert_eq!(group_by_ip.external_group_id, external_group_id); + assert_eq!( + get_external_group_id(&group_by_ip.into_inner()), + external_group_id + ); - // 6. Update the group + // Update the group let updated_nat_target = types::NatTarget { internal_ip: MULTICAST_NAT_IP.into(), - inner_mac: MacAddr::new(0xe0, 0xd5, 0x5e, 0x00, 0x11, 0x22).into(), + inner_mac: MacAddr::from(MULTICAST_NAT_IP.derive_multicast_mac()) + .into(), vni: 200.into(), }; let external_update = types::MulticastGroupUpdateExternalEntry { tag: Some("updated_lifecycle".to_string()), - nat_target: updated_nat_target.clone(), - vlan_id: Some(20), - sources: Some(vec![types::IpSrc::Exact( - "192.168.1.5".parse::().unwrap(), - )]), + internal_forwarding: types::InternalForwarding { + nat_target: Some(updated_nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(20) }, + sources: None, }; let updated = switch @@ -809,26 +829,22 @@ async fn test_group_api_lifecycle() { .into_inner(); assert_eq!(updated.external_group_id, external_group_id); - assert!(updated.underlay_group_id.is_none()); assert_eq!(updated.tag, Some("updated_lifecycle".to_string())); - assert_eq!(updated.int_fwding.nat_target, Some(updated_nat_target)); - assert_eq!(updated.ext_fwding.vlan_id, Some(20)); assert_eq!( - updated.sources, - Some(vec![types::IpSrc::Exact( - "192.168.1.5".parse::().unwrap(), - )]) + updated.internal_forwarding.nat_target, + Some(updated_nat_target) ); - assert_eq!(updated.members.len(), 0); // External groups don't have members + assert_eq!(updated.external_forwarding.vlan_id, Some(20)); + assert_eq!(updated.sources, None); - // 7. Delete the group + // Delete the group switch .client .multicast_group_delete(&group_ip) .await .expect("Should be able to delete group"); - // 8. Verify group was deleted + // Verify group was deleted let result = switch .client .multicast_group_get(&group_ip) @@ -846,7 +862,7 @@ async fn test_group_api_lifecycle() { _ => panic!("Expected ErrorResponse when getting a deleted group"), } - // 9. Verify group no longer appears in the list + // Verify group no longer appears in the list let groups_after_delete = switch .client .multicast_groups_list_stream(None) @@ -855,8 +871,9 @@ async fn test_group_api_lifecycle() { .expect("Should be able to list groups"); // Check if the specific deleted group is still in the list - let deleted_group_still_in_list = - groups_after_delete.iter().any(|g| g.group_ip == group_ip); + let deleted_group_still_in_list = groups_after_delete + .iter() + .any(|g| get_group_ip(g) == group_ip); assert!( !deleted_group_still_in_list, "Deleted group should not be in the list" @@ -878,8 +895,8 @@ async fn test_multicast_tagged_groups_management() { internal_multicast_ip, Some(&format!("{}_internal", tag)), &[(PhysPort(11), types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -891,8 +908,10 @@ async fn test_multicast_tagged_groups_management() { let external_group1 = types::MulticastGroupCreateExternalEntry { group_ip, tag: Some(tag.to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -907,8 +926,10 @@ async fn test_multicast_tagged_groups_management() { let external_group2 = types::MulticastGroupCreateExternalEntry { group_ip: "224.0.1.2".parse().unwrap(), // Different IP tag: Some(tag.to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -923,8 +944,10 @@ async fn test_multicast_tagged_groups_management() { let external_group3 = types::MulticastGroupCreateExternalEntry { group_ip: "224.0.1.3".parse().unwrap(), // Different IP tag: Some("different_tag".to_string()), - nat_target: nat_target.clone(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -946,7 +969,7 @@ async fn test_multicast_tagged_groups_management() { assert_eq!(tagged_groups.len(), 2, "Should find 2 groups with the tag"); let group_ips: HashSet<_> = - tagged_groups.iter().map(|g| g.group_ip).collect(); + tagged_groups.iter().map(|g| get_group_ip(g)).collect(); assert!(group_ips.contains(&created1.group_ip)); assert!(group_ips.contains(&created2.group_ip)); assert!(!group_ips.contains(&created3.group_ip)); @@ -967,23 +990,10 @@ async fn test_multicast_tagged_groups_management() { .expect("Should list remaining groups"); let remaining_ips: HashSet<_> = - remaining_groups.iter().map(|g| g.group_ip).collect(); + remaining_groups.iter().map(|g| get_group_ip(g)).collect(); assert!(!remaining_ips.contains(&created1.group_ip)); assert!(!remaining_ips.contains(&created2.group_ip)); assert!(remaining_ips.contains(&created3.group_ip)); - - // Clean up the remaining group and underlay group - switch - .client - .multicast_group_delete(&created3.group_ip) - .await - .expect("Should delete the remaining group"); - - switch - .client - .multicast_group_delete(&internal_multicast_ip) - .await - .expect("Should delete the remaining group"); } #[tokio::test] @@ -998,8 +1008,8 @@ async fn test_multicast_untagged_groups() { internal_multicast_ip, None, // No tag for NAT target &[(PhysPort(26), types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -1011,8 +1021,10 @@ async fn test_multicast_untagged_groups() { let external_untagged = types::MulticastGroupCreateExternalEntry { group_ip, tag: None, // No tag - nat_target: create_nat_target_ipv4(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -1028,8 +1040,10 @@ async fn test_multicast_untagged_groups() { let tagged_group = types::MulticastGroupCreateExternalEntry { group_ip: "224.0.2.2".parse().unwrap(), // Different IP tag: Some("some_tag".to_string()), - nat_target: create_nat_target_ipv4(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -1056,32 +1070,23 @@ async fn test_multicast_untagged_groups() { .expect("Should list remaining groups"); let remaining_ips: HashSet<_> = - remaining_groups.iter().map(|g| g.group_ip).collect(); + remaining_groups.iter().map(get_group_ip).collect(); assert!(!remaining_ips.contains(&created_untagged.group_ip)); assert!(remaining_ips.contains(&created_tagged.group_ip)); - - // Clean up the remaining tagged group - // (NAT target group was already deleted by multicast_reset_untagged since it had no tag) - switch - .client - .multicast_group_delete(&created_tagged.group_ip) - .await - .expect("Should delete remaining tagged group"); } #[tokio::test] #[ignore] -async fn test_api_internal_ipv6_bifurcated_replication() { +async fn test_api_internal_ipv6_bifurcated_replication() -> TestResult { let switch = &*get_switch().await; let (port_id1, link_id1) = switch.link_id(PhysPort(11)).unwrap(); let (port_id2, link_id2) = switch.link_id(PhysPort(12)).unwrap(); // Create admin-scoped IPv6 group with both external and underlay members - let admin_scoped_group = types::MulticastGroupCreateEntry { - group_ip: "ff04::100".parse().unwrap(), // Admin-scoped IPv6 + let admin_scoped_group = types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff04::100".parse().unwrap(), tag: Some("test_bifurcated".to_string()), - sources: None, members: vec![ types::MulticastGroupMember { port_id: port_id1.clone(), @@ -1098,29 +1103,22 @@ async fn test_api_internal_ipv6_bifurcated_replication() { let created = switch .client - .multicast_group_create(&admin_scoped_group) + .multicast_group_create_underlay(&admin_scoped_group) .await .expect("Should create bifurcated admin-scoped group") .into_inner(); - // Verify both group IDs are populated - assert!( - created.external_group_id.is_some(), - "Should have external group ID" - ); - assert!( - created.underlay_group_id.is_some(), - "Should have underlay group ID" - ); assert_ne!( created.external_group_id, created.underlay_group_id, - "Group IDs should be different" + "Internal group should have different external and underlay group IDs" + ); + assert!( + created.external_group_id > 0, + "External group ID should be allocated and non-zero" ); - - // Verify group has external_group_id (replication is handled internally) assert!( - created.external_group_id.is_some(), - "Bifurcated group should have external_group_id" + created.underlay_group_id > 0, + "Underlay group ID should be allocated and non-zero" ); // Verify members are preserved @@ -1139,21 +1137,20 @@ async fn test_api_internal_ipv6_bifurcated_replication() { assert_eq!(external_members.len(), 1); assert_eq!(underlay_members.len(), 1); - cleanup_test_group(switch, created.group_ip).await; + cleanup_test_group(switch, created.group_ip.to_ip_addr()).await } #[tokio::test] #[ignore] -async fn test_api_internal_ipv6_underlay_only() { +async fn test_api_internal_ipv6_underlay_only() -> TestResult { let switch = &*get_switch().await; let (port_id, link_id) = switch.link_id(PhysPort(11)).unwrap(); // Create admin-scoped IPv6 group with only underlay members - let underlay_only_group = types::MulticastGroupCreateEntry { - group_ip: "ff05::200".parse().unwrap(), // Site-local admin-scoped + let underlay_only_group = types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff05::200".parse().unwrap(), tag: Some("test_underlay_only".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: port_id.clone(), link_id, @@ -1163,86 +1160,54 @@ async fn test_api_internal_ipv6_underlay_only() { let created = switch .client - .multicast_group_create(&underlay_only_group) + .multicast_group_create_underlay(&underlay_only_group) .await .expect("Should create underlay-only admin-scoped group") .into_inner(); - // Should have underlay group ID but no external group ID - assert!( - created.underlay_group_id.is_some(), - "Should have underlay group ID" - ); - assert!( - created.external_group_id.is_none(), - "Should NOT have external group ID" - ); - - // Verify group has underlay_group_id (replication is handled internally) - assert!( - created.underlay_group_id.is_some(), - "Underlay-only group should have underlay_group_id" - ); - // Verify only underlay members assert_eq!(created.members.len(), 1); assert_eq!(created.members[0].direction, types::Direction::Underlay); - cleanup_test_group(switch, created.group_ip).await; + cleanup_test_group(switch, created.group_ip.to_ip_addr()).await } #[tokio::test] #[ignore] -async fn test_api_internal_ipv6_external_only() { +async fn test_api_internal_ipv6_external_only() -> TestResult { let switch = &*get_switch().await; let (port_id, link_id) = switch.link_id(PhysPort(11)).unwrap(); // Create admin-scoped IPv6 group with only external members - let external_only_group = types::MulticastGroupCreateEntry { - group_ip: "ff08::300".parse().unwrap(), // Org-local admin-scoped - tag: Some("test_external_only".to_string()), - sources: None, - members: vec![types::MulticastGroupMember { - port_id: port_id.clone(), - link_id, - direction: types::Direction::External, - }], - }; + let external_members_only_group = + types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff08::300".parse().unwrap(), + tag: Some("test_external_members_only".to_string()), + members: vec![types::MulticastGroupMember { + port_id: port_id.clone(), + link_id, + direction: types::Direction::External, + }], + }; let created = switch .client - .multicast_group_create(&external_only_group) + .multicast_group_create_underlay(&external_members_only_group) .await - .expect("Should create external-only admin-scoped group") + .expect("Should create external members-only admin-scoped group") .into_inner(); - // Should have external group ID but no underlay group ID - assert!( - created.external_group_id.is_some(), - "Should have external group ID" - ); - assert!( - created.underlay_group_id.is_none(), - "Should NOT have underlay group ID" - ); - - // Verify group has external_group_id (replication is handled internally) - assert!( - created.external_group_id.is_some(), - "External-only group should have external_group_id" - ); - // Verify only external members assert_eq!(created.members.len(), 1); assert_eq!(created.members[0].direction, types::Direction::External); - cleanup_test_group(switch, created.group_ip).await; + cleanup_test_group(switch, created.group_ip.to_ip_addr()).await } #[tokio::test] #[ignore] -async fn test_api_invalid_combinations() { +async fn test_api_invalid_combinations() -> TestResult { let switch = &*get_switch().await; // First create the internal admin-scoped group that will be the NAT target @@ -1252,8 +1217,8 @@ async fn test_api_invalid_combinations() { internal_multicast_ip, Some("nat_target_for_invalid_combos"), &[(PhysPort(26), types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -1262,8 +1227,10 @@ async fn test_api_invalid_combinations() { let ipv4_with_underlay = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4("224.1.0.200".parse().unwrap()), // Avoid 224.0.0.0/24 reserved range tag: Some("test_invalid_ipv4".to_string()), - nat_target: create_nat_target_ipv4(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -1275,15 +1242,14 @@ async fn test_api_invalid_combinations() { .expect("IPv4 external group should be created") .into_inner(); - // But it should not have underlay group ID or replication info - assert!(created_ipv4.underlay_group_id.is_none()); - // Non-admin-scoped IPv6 should use external API let non_admin_ipv6 = types::MulticastGroupCreateExternalEntry { group_ip: "ff0e::400".parse().unwrap(), // Global scope, not admin-scoped tag: Some("test_non_admin_ipv6".to_string()), - nat_target: create_nat_target_ipv6(), - vlan_id: Some(20), + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv6()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(20) }, sources: None, }; @@ -1294,16 +1260,17 @@ async fn test_api_invalid_combinations() { .expect("Non-admin-scoped IPv6 should use external API") .into_inner(); - // Should not have underlay group ID or replication info - assert!(created_non_admin.underlay_group_id.is_none()); - // Admin-scoped IPv6 with underlay members should fail via external API let admin_scoped_external_entry = types::MulticastGroupCreateExternalEntry { group_ip: "ff04::500".parse().unwrap(), // Admin-scoped tag: Some("test_admin_external".to_string()), - nat_target: create_nat_target_ipv6(), - vlan_id: Some(30), + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv6()), + }, + external_forwarding: types::ExternalForwarding { + vlan_id: Some(30), + }, sources: None, }; @@ -1325,9 +1292,13 @@ async fn test_api_invalid_combinations() { ), } - cleanup_test_group(switch, created_ipv4.group_ip).await; - cleanup_test_group(switch, created_non_admin.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; + cleanup_test_group(switch, created_ipv4.group_ip) + .await + .unwrap(); + cleanup_test_group(switch, created_non_admin.group_ip) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -1342,14 +1313,14 @@ async fn test_ipv4_multicast_invalid_destination_mac() -> TestResult { // Create admin-scoped IPv6 multicast group for underlay replication // This group handles replication within the rack infrastructure let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); - let vlan = Some(10); + create_test_multicast_group( switch, internal_multicast_ip, Some("test_invalid_mac_underlay"), &[(egress1, types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -1363,8 +1334,10 @@ async fn test_ipv4_multicast_invalid_destination_mac() -> TestResult { multicast_ip, Some("test_invalid_mac"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped underlay group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -1418,7 +1391,7 @@ async fn test_ipv4_multicast_invalid_destination_mac() -> TestResult { .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1442,10 +1415,10 @@ async fn test_ipv4_multicast_invalid_destination_mac() -> TestResult { .unwrap(); // Cleanup: Remove both external IPv4 group and underlay IPv6 group - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -1459,15 +1432,14 @@ async fn test_ipv6_multicast_invalid_destination_mac() -> TestResult { // Create admin-scoped IPv6 multicast group let multicast_ip = IpAddr::V6("ff04::300".parse().unwrap()); // Admin-scoped - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_ipv6_invalid_mac"), &[(egress1, types::Direction::External)], - vlan, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -1514,7 +1486,7 @@ async fn test_ipv6_multicast_invalid_destination_mac() -> TestResult { .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1537,9 +1509,7 @@ async fn test_ipv6_multicast_invalid_destination_mac() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)).await } #[tokio::test] @@ -1558,23 +1528,24 @@ async fn test_multicast_ttl_zero() -> TestResult { internal_multicast_ip, Some("nat_target_for_ttl"), &[(egress1, types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; // Create IPv4 multicast group with two egress ports let multicast_ip = IpAddr::V4(MULTICAST_TEST_IPV4); - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_ttl_drop"), &[], // External groups have no members - vlan, - true, // IPv4 groups need NAT targets + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -1607,7 +1578,7 @@ async fn test_multicast_ttl_zero() -> TestResult { let ctr_baseline = switch.get_counter("ipv4_ttl_invalid", None).await.unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1619,10 +1590,10 @@ async fn test_multicast_ttl_zero() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -1641,23 +1612,24 @@ async fn test_multicast_ttl_one() -> TestResult { internal_multicast_ip, Some("nat_target_for_ttl_one"), &[(egress1, types::Direction::Underlay)], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; // Create IPv4 multicast group with two egress ports let multicast_ip = IpAddr::V4(MULTICAST_TEST_IPV4); - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_ttl_one_drop"), &[], // External groups have no members - vlan, - true, // IPv4 groups need NAT targets + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -1690,7 +1662,7 @@ async fn test_multicast_ttl_one() -> TestResult { let ctr_baseline = switch.get_counter("ipv4_ttl_invalid", None).await.unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1702,10 +1674,10 @@ async fn test_multicast_ttl_one() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -1719,7 +1691,7 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { let egress2 = PhysPort(17); let egress3 = PhysPort(19); - // Create admin-scoped IPv6 multicast group for underlay replication + // Create admin-scoped IPv6 multicast group for underlay replication // This handles the actual packet replication within the rack infrastructure // after NAT ingress processing let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); @@ -1732,8 +1704,8 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { internal_multicast_ip, Some("test_replication_internal"), &underlay_members, - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -1752,8 +1724,10 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { multicast_ip, Some("test_ipv4_replication"), &external_members, - vlan, - true, // Create NAT target that points to the admin-scoped underlay group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan }, None, ) .await; @@ -1792,14 +1766,14 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); let to_recv2 = prepare_expected_pkt( switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress3), ); @@ -1826,7 +1800,7 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1838,9 +1812,7 @@ async fn test_ipv4_multicast_basic_replication_nat_ingress() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)).await } #[tokio::test] @@ -1866,8 +1838,8 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members( internal_multicast_ip, Some("test_geneve_mcast_tag_underlay"), &replication_members, - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -1881,8 +1853,10 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members( multicast_ip, Some("test_geneve_mcast_tag_0"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan }, None, ) .await; @@ -1928,7 +1902,8 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members( ) .unwrap(), Endpoint::parse( - &derive_ipv6_mcast_mac(&nat_target.internal_ip).to_string(), + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), &nat_target.internal_ip.to_string(), geneve::GENEVE_UDP_PORT, ) @@ -1964,7 +1939,7 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members( .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -1976,10 +1951,10 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members( .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await } #[tokio::test] @@ -2003,23 +1978,24 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members( (egress3, types::Direction::Underlay), (egress4, types::Direction::Underlay), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; // Create IPv4 external multicast group with NAT target (no members) let multicast_ip = IpAddr::V4(MULTICAST_TEST_IPV4); - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_geneve_mcast_tag_1"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -2052,7 +2028,8 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members( ) .unwrap(); let geneve_dst = Endpoint::parse( - &derive_ipv6_mcast_mac(&nat_target.internal_ip).to_string(), + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), &nat_target.internal_ip.to_string(), geneve::GENEVE_UDP_PORT, ) @@ -2101,7 +2078,7 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members( .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2113,10 +2090,10 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members( .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await } #[tokio::test] @@ -2145,8 +2122,8 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe (egress3, types::Direction::Underlay), (egress4, types::Direction::Underlay), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -2160,8 +2137,10 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe multicast_ip, Some("test_geneve_mcast_tag_1"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan }, None, ) .await; @@ -2199,7 +2178,8 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe ) .unwrap(), Endpoint::parse( - &derive_ipv6_mcast_mac(&nat_target.internal_ip).to_string(), + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), &nat_target.internal_ip.to_string(), geneve::GENEVE_UDP_PORT, ) @@ -2256,7 +2236,7 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2268,10 +2248,10 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, MULTICAST_NAT_IP.into()).await } #[tokio::test] @@ -2289,8 +2269,8 @@ async fn test_ipv4_multicast_drops_ingress_is_egress_port() -> TestResult { internal_multicast_ip, Some("test_drops_underlay"), &[(ingress, types::Direction::Underlay)], - None, - false, // No NAT target for admin-scoped group + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // No NAT target for admin-scoped group None, ) .await; @@ -2303,8 +2283,10 @@ async fn test_ipv4_multicast_drops_ingress_is_egress_port() -> TestResult { multicast_ip, Some("test_replication"), &[], // External groups have no members - None, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; @@ -2336,7 +2318,7 @@ async fn test_ipv4_multicast_drops_ingress_is_egress_port() -> TestResult { .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2348,10 +2330,10 @@ async fn test_ipv4_multicast_drops_ingress_is_egress_port() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -2374,23 +2356,24 @@ async fn test_ipv6_multicast_hop_limit_zero() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; // Create external IPv6 group with NAT target (no members) let multicast_ip = IpAddr::V6(MULTICAST_TEST_IPV6); - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_ipv6_hop_limit_zero"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -2419,9 +2402,11 @@ async fn test_ipv6_multicast_hop_limit_zero() -> TestResult { let ctr_baseline = switch.get_counter("ipv6_ttl_invalid", None).await.unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); check_counter_incremented( switch, @@ -2433,7 +2418,7 @@ async fn test_ipv6_multicast_hop_limit_zero() -> TestResult { .await .unwrap(); - result + Ok(()) } #[tokio::test] @@ -2456,23 +2441,24 @@ async fn test_ipv6_multicast_hop_limit_one() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; // Create external IPv6 group with NAT target (no members) let multicast_ip = IpAddr::V6(MULTICAST_TEST_IPV6); - let vlan = Some(10); let created_group = create_test_multicast_group( switch, multicast_ip, Some("test_ipv6_hop_limit_one"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -2505,7 +2491,7 @@ async fn test_ipv6_multicast_hop_limit_one() -> TestResult { let ctr_baseline = switch.get_counter("ipv6_ttl_invalid", None).await.unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2517,9 +2503,7 @@ async fn test_ipv6_multicast_hop_limit_one() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)).await } #[tokio::test] @@ -2534,13 +2518,14 @@ async fn test_ipv6_multicast_basic_replication_nat_ingress() -> TestResult { // Create admin-scoped IPv6 group for underlay replication first let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); let underlay_members = [(egress1, types::Direction::Underlay)]; + create_test_multicast_group( switch, internal_multicast_ip, Some("test_replication_internal"), &underlay_members, - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -2554,8 +2539,10 @@ async fn test_ipv6_multicast_basic_replication_nat_ingress() -> TestResult { multicast_ip, Some("test_ipv6_replication"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan }, None, ) .await; @@ -2584,7 +2571,7 @@ async fn test_ipv6_multicast_basic_replication_nat_ingress() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -2605,7 +2592,7 @@ async fn test_ipv6_multicast_basic_replication_nat_ingress() -> TestResult { .await .unwrap(); - let result = switch.packet_test(vec![test_pkt], expected_pkts); + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2617,9 +2604,7 @@ async fn test_ipv6_multicast_basic_replication_nat_ingress() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)).await } #[tokio::test] @@ -2644,8 +2629,8 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -2661,8 +2646,10 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { multicast_ip, Some("test_source_filtering"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, Some(vec![allowed_src]), ) .await; @@ -2691,7 +2678,7 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { switch, &allowed_pkt, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -2699,7 +2686,7 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { switch, &allowed_pkt, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -2731,7 +2718,7 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { .await .unwrap(); - let result = switch.packet_test(test_pkts, expected_pkts); + switch.packet_test(test_pkts, expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2743,10 +2730,10 @@ async fn test_ipv4_multicast_source_filtering_exact_match() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -2771,8 +2758,8 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -2792,8 +2779,10 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { multicast_ip, Some("test_source_filtering"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, Some(vec![allowed_src]), ) .await; @@ -2830,7 +2819,7 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { switch, &allowed_pkt1, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -2838,7 +2827,7 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { switch, &allowed_pkt2, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -2846,7 +2835,7 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { switch, &allowed_pkt1, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -2854,7 +2843,7 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { switch, &allowed_pkt2, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -2898,7 +2887,7 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { .await .unwrap(); - let result = switch.packet_test(test_pkts, expected_pkts); + switch.packet_test(test_pkts, expected_pkts).unwrap(); check_counter_incremented( switch, @@ -2910,10 +2899,10 @@ async fn test_ipv4_multicast_source_filtering_prefix_match() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -2938,8 +2927,8 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -2961,8 +2950,10 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { multicast_ip, Some("test_ipv6_source_filtering"), &[], // External groups have no members - vlan, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan }, Some(sources), ) .await; @@ -3000,7 +2991,7 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { switch, &allowed_pkt1, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -3008,7 +2999,7 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { switch, &allowed_pkt2, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -3016,7 +3007,7 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { switch, &allowed_pkt1, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -3024,7 +3015,7 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { switch, &allowed_pkt2, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -3070,7 +3061,7 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { .await .unwrap(); - let result = switch.packet_test(test_pkts, expected_pkts); + switch.packet_test(test_pkts, expected_pkts).unwrap(); check_counter_incremented( switch, @@ -3082,10 +3073,10 @@ async fn test_ipv6_multicast_multiple_source_filtering() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -3109,8 +3100,8 @@ async fn test_multicast_dynamic_membership() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -3124,8 +3115,10 @@ async fn test_multicast_dynamic_membership() -> TestResult { multicast_ip, Some("test_dynamic_membership"), &[], // External groups have no members - vlan, - true, // Create NAT target pointing to underlay group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -3144,7 +3137,7 @@ async fn test_multicast_dynamic_membership() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -3152,7 +3145,7 @@ async fn test_multicast_dynamic_membership() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -3179,27 +3172,27 @@ async fn test_multicast_dynamic_membership() -> TestResult { // but we can update their NAT target, tag, vlan, and sources let external_update_entry = types::MulticastGroupUpdateExternalEntry { tag: None, - nat_target: create_nat_target_ipv4(), // Keep the same NAT target - vlan_id: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, // Keep the same NAT target + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, // Test with VLAN like reference test sources: None, }; - let updated = switch + switch .client .multicast_group_update_external( - &created_group.group_ip, + &get_group_ip(&created_group), &external_update_entry, ) .await .expect("Should be able to update group"); - assert_eq!(updated.members.len(), 0); // External groups don't have members - // Update the admin-scoped group membership to demonstrate dynamic membership let (port_id2, link_id2) = switch.link_id(egress2).unwrap(); let (port_id3, link_id3) = switch.link_id(egress3).unwrap(); - let internal_update_entry = types::MulticastGroupUpdateEntry { + let internal_update_entry = types::MulticastGroupUpdateUnderlayEntry { tag: None, members: vec![ types::MulticastGroupMember { @@ -3213,12 +3206,19 @@ async fn test_multicast_dynamic_membership() -> TestResult { direction: types::Direction::External, }, ], - sources: None, + }; + + let ipv6 = match internal_multicast_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), }; switch .client - .multicast_group_update(&internal_multicast_ip, &internal_update_entry) + .multicast_group_update_underlay( + &types::AdminScopedIpv6(ipv6), + &internal_update_entry, + ) .await .expect("Should be able to update admin-scoped group membership"); @@ -3227,14 +3227,14 @@ async fn test_multicast_dynamic_membership() -> TestResult { switch, &to_send, None, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); let to_recv2_new = prepare_expected_pkt( switch, &to_send, None, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress3), ); @@ -3254,12 +3254,14 @@ async fn test_multicast_dynamic_membership() -> TestResult { }, ]; - let result2 = switch.packet_test(vec![test_pkt_new], expected_pkts_new); - - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; + switch + .packet_test(vec![test_pkt_new], expected_pkts_new) + .unwrap(); - result2 + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -3286,8 +3288,8 @@ async fn test_multicast_multiple_groups() -> TestResult { (egress3, types::Direction::External), (egress4, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -3301,8 +3303,10 @@ async fn test_multicast_multiple_groups() -> TestResult { multicast_ip1, Some("test_multi_group_1"), &[], // External groups have no members - vlan1, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -3316,8 +3320,10 @@ async fn test_multicast_multiple_groups() -> TestResult { multicast_ip2, Some("test_multi_group_2"), &[], // External groups have no members - vlan2, - true, // Create NAT target that points to the admin-scoped group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(20) }, None, ) .await; @@ -3344,7 +3350,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send1, vlan1, - created_group1.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group1), Some(egress1), ); @@ -3352,7 +3358,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send1, vlan1, - created_group1.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group1), Some(egress2), ); @@ -3360,7 +3366,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send2, vlan2, - created_group2.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group2), Some(egress3), ); @@ -3368,7 +3374,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send2, vlan2, - created_group2.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group2), Some(egress4), ); @@ -3377,7 +3383,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send1, vlan1, - created_group1.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group1), Some(egress3), ); @@ -3385,7 +3391,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send1, vlan1, - created_group1.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group1), Some(egress4), ); @@ -3393,7 +3399,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send2, vlan2, - created_group2.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group2), Some(egress1), ); @@ -3401,7 +3407,7 @@ async fn test_multicast_multiple_groups() -> TestResult { switch, &to_send2, vlan2, - created_group2.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group2), Some(egress2), ); @@ -3453,13 +3459,15 @@ async fn test_multicast_multiple_groups() -> TestResult { }, ]; - let result = switch.packet_test(test_pkts, expected_pkts); + switch.packet_test(test_pkts, expected_pkts).unwrap(); - cleanup_test_group(switch, created_group1.group_ip).await; - cleanup_test_group(switch, created_group2.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group1)) + .await + .unwrap(); + cleanup_test_group(switch, get_group_ip(&created_group2)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -3483,8 +3491,8 @@ async fn test_multicast_reset_all_tables() -> TestResult { (egress1, types::Direction::External), (egress2, types::Direction::External), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -3498,31 +3506,37 @@ async fn test_multicast_reset_all_tables() -> TestResult { multicast_ip1, Some("test_reset_all_1"), &[], // External groups have no members - vlan1, - true, // Create NAT target + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan1 }, None, ) .await; // IPv6 external group (non-admin-scoped must use external API) let multicast_ip2 = IpAddr::V6(MULTICAST_TEST_IPV6); + let vlan2 = Some(10); let created_group2 = create_test_multicast_group( switch, multicast_ip2, Some("test_reset_all_2"), - &[], // External groups have no members - Some(20), // Add VLAN for this external group - true, // Create NAT target - None, // No sources for this group + &[], // External groups have no members + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv6()), + }, + types::ExternalForwarding { vlan_id: vlan2 }, + None, // No sources for this group ) .await; // 2b. Admin-scoped IPv6 group to test internal API with custom replication parameters - let group_entry2b = types::MulticastGroupCreateEntry { - group_ip: Ipv6Addr::new(0xff04, 0, 0, 0, 0, 0, 0, 2), + let ipv6 = Ipv6Addr::new(0xff04, 0, 0, 0, 0, 0, 0, 2); + + let group_entry2b = types::MulticastGroupCreateUnderlayEntry { + group_ip: types::AdminScopedIpv6(ipv6), tag: Some("test_reset_all_2b".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: switch.link_id(egress1).unwrap().0, link_id: switch.link_id(egress1).unwrap().1, @@ -3532,7 +3546,7 @@ async fn test_multicast_reset_all_tables() -> TestResult { let created_group2b = switch .client - .multicast_group_create(&group_entry2b) + .multicast_group_create_underlay(&group_entry2b) .await .expect("Failed to create admin-scoped IPv6 multicast group") .into_inner(); @@ -3552,8 +3566,10 @@ async fn test_multicast_reset_all_tables() -> TestResult { multicast_ip3, Some("test_reset_all_3"), &[], // External groups have no members - vlan3, - true, // Create NAT target + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: vlan3 }, sources.clone(), ) .await; @@ -3569,8 +3585,10 @@ async fn test_multicast_reset_all_tables() -> TestResult { multicast_ip4, Some("test_reset_all_4"), &[], // External groups have no members - vlan4, - true, // IPv6 SSM external groups need NAT targets + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv6()), + }, + types::ExternalForwarding { vlan_id: vlan4 }, ipv6_sources.clone(), ) .await; @@ -3759,11 +3777,11 @@ async fn test_multicast_reset_all_tables() -> TestResult { // Try to get each group specifically for group_ip in [ - created_group1.group_ip, - created_group2.group_ip, - created_group2b.group_ip, - created_group3.group_ip, - created_group4.group_ip, + get_group_ip(&created_group1), + get_group_ip(&created_group2), + created_group2b.group_ip.to_ip_addr(), + get_group_ip(&created_group3), + get_group_ip(&created_group4), internal_multicast_ip, ] { let result = switch.client.multicast_group_get(&group_ip).await; @@ -3786,16 +3804,15 @@ async fn test_multicast_vlan_translation_not_possible() -> TestResult { let ingress = PhysPort(10); // Create admin-scoped IPv6 underlay group that will handle actual replication - // Must have at least one member to satisfy validation requirements let egress1 = PhysPort(15); let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); create_test_multicast_group( switch, internal_multicast_ip, Some("test_vlan_underlay"), - &[(egress1, types::Direction::External)], // Need at least one member for admin-scoped groups - None, - false, // Admin-scoped groups don't need NAT targets + &[(egress1, types::Direction::External)], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -3809,8 +3826,12 @@ async fn test_multicast_vlan_translation_not_possible() -> TestResult { multicast_ip, Some("test_vlan_behavior"), &[], // External groups have no members - output_vlan, - true, // Create NAT target + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, // Create NAT target + types::ExternalForwarding { + vlan_id: output_vlan, + }, None, ) .await; @@ -3843,12 +3864,12 @@ async fn test_multicast_vlan_translation_not_possible() -> TestResult { // is not possible for multicast packets let expected_pkts = vec![]; - let result = switch.packet_test(vec![test_pkt], expected_pkts); - - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; + switch.packet_test(vec![test_pkt], expected_pkts).unwrap(); - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -3873,8 +3894,8 @@ async fn test_multicast_multiple_packets() -> TestResult { (egress2, types::Direction::Underlay), (egress3, types::Direction::Underlay), ], - None, - false, // Admin-scoped groups don't need NAT targets + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // Admin-scoped groups don't need NAT targets None, ) .await; @@ -3888,8 +3909,10 @@ async fn test_multicast_multiple_packets() -> TestResult { multicast_ip, Some("test_performance"), &[], // External groups have no members - vlan, - true, // Create NAT target pointing to underlay group + types::InternalForwarding { + nat_target: Some(create_nat_target_ipv4()), + }, + types::ExternalForwarding { vlan_id: Some(10) }, None, ) .await; @@ -3919,7 +3942,7 @@ async fn test_multicast_multiple_packets() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress1), ); @@ -3927,7 +3950,7 @@ async fn test_multicast_multiple_packets() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress2), ); @@ -3935,7 +3958,7 @@ async fn test_multicast_multiple_packets() -> TestResult { switch, &to_send, vlan, - created_group.int_fwding.nat_target.as_ref(), + get_nat_target(&created_group), Some(egress3), ); @@ -3965,7 +3988,7 @@ async fn test_multicast_multiple_packets() -> TestResult { .await .unwrap(); - let result = switch.packet_test(test_pkts, expected_pkts); + switch.packet_test(test_pkts, expected_pkts).unwrap(); check_counter_incremented( switch, @@ -3977,10 +4000,10 @@ async fn test_multicast_multiple_packets() -> TestResult { .await .unwrap(); - cleanup_test_group(switch, created_group.group_ip).await; - cleanup_test_group(switch, internal_multicast_ip).await; - - result + cleanup_test_group(switch, get_group_ip(&created_group)) + .await + .unwrap(); + cleanup_test_group(switch, internal_multicast_ip).await } #[tokio::test] @@ -4076,7 +4099,7 @@ async fn test_multicast_no_group_configured() -> TestResult { #[tokio::test] #[ignore] -async fn test_external_group_nat_target_validation() { +async fn test_external_group_nat_target_validation() -> TestResult { let switch = &*get_switch().await; let (port_id, link_id) = switch.link_id(PhysPort(11)).unwrap(); @@ -4091,8 +4114,10 @@ async fn test_external_group_nat_target_validation() { let group_with_invalid_nat = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4("224.1.0.101".parse().unwrap()), tag: Some("test_invalid_nat".to_string()), - nat_target: nonexistent_nat_target.clone(), - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nonexistent_nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -4110,10 +4135,9 @@ async fn test_external_group_nat_target_validation() { } // Create admin-scoped IPv6 group first, then external group with valid NAT target - let admin_scoped_group = types::MulticastGroupCreateEntry { - group_ip: "ff04::1".parse().unwrap(), // Admin-scoped IPv6 + let admin_scoped_group = types::MulticastGroupCreateUnderlayEntry { + group_ip: "ff04::1".parse().unwrap(), tag: Some("test_admin_scoped".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: port_id.clone(), link_id, @@ -4123,12 +4147,24 @@ async fn test_external_group_nat_target_validation() { let created_admin = switch .client - .multicast_group_create(&admin_scoped_group) + .multicast_group_create_underlay(&admin_scoped_group) .await .expect("Should create admin-scoped group") .into_inner(); - assert!(created_admin.underlay_group_id.is_some()); + // Test with NAT: internal group created first with no NAT members gets IDs allocated + assert_ne!( + created_admin.external_group_id, created_admin.underlay_group_id, + "Internal group should have different external and underlay group IDs" + ); + assert!( + created_admin.external_group_id > 0, + "Internal group should have allocated external group ID" + ); + assert!( + created_admin.underlay_group_id > 0, + "Internal group should have allocated underlay group ID" + ); // Test 3: Now create external group with valid NAT target let valid_nat_target = types::NatTarget { @@ -4140,8 +4176,10 @@ async fn test_external_group_nat_target_validation() { let group_with_valid_nat = types::MulticastGroupCreateExternalEntry { group_ip: IpAddr::V4("224.1.0.102".parse().unwrap()), tag: Some("test_valid_nat".to_string()), - nat_target: valid_nat_target, - vlan_id: Some(10), + internal_forwarding: types::InternalForwarding { + nat_target: Some(valid_nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, sources: None, }; @@ -4152,24 +4190,28 @@ async fn test_external_group_nat_target_validation() { .expect("Should create external group with valid NAT target") .into_inner(); - // External groups created via external API don't have external_group_id unless - // there are external members in the referenced admin-scoped group - assert!( - created_external.external_group_id.is_none(), - "External API groups shouldn't have external_group_id without external members" - ); - assert!( - created_external.underlay_group_id.is_none(), - "External group should not have underlay_group_id" + // External group should share the same external ID as the internal group it references + assert_eq!( + created_external.external_group_id, created_admin.external_group_id, + "External group should reuse the internal group's external_group_id" ); + + // Verify NAT target configuration assert_eq!( - created_external.members.len(), - 0, - "External group should have no members" + created_external + .internal_forwarding + .nat_target + .as_ref() + .unwrap() + .internal_ip, + valid_nat_target.internal_ip, + "External group's NAT target should point to the correct internal IP" ); - cleanup_test_group(switch, created_admin.group_ip).await; - cleanup_test_group(switch, created_external.group_ip).await; + cleanup_test_group(switch, created_admin.group_ip.to_ip_addr()) + .await + .unwrap(); + cleanup_test_group(switch, created_external.group_ip).await } #[tokio::test] @@ -4181,10 +4223,9 @@ async fn test_ipv6_multicast_scope_validation() { // Test all IPv6 multicast scope types for proper API routing // Admin-local scope (ff04::/16) - should work with internal API - let admin_local_group = types::MulticastGroupCreateEntry { + let admin_local_group = types::MulticastGroupCreateUnderlayEntry { group_ip: "ff04::100".parse().unwrap(), tag: Some("test_admin_local".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: egress_port.clone(), link_id: egress_link, @@ -4194,7 +4235,7 @@ async fn test_ipv6_multicast_scope_validation() { let admin_local_result = switch .client - .multicast_group_create(&admin_local_group) + .multicast_group_create_underlay(&admin_local_group) .await; assert!( admin_local_result.is_ok(), @@ -4202,10 +4243,9 @@ async fn test_ipv6_multicast_scope_validation() { ); // Site-local scope (ff05::/16) - should work with internal API - let site_local_group = types::MulticastGroupCreateEntry { + let site_local_group = types::MulticastGroupCreateUnderlayEntry { group_ip: "ff05::200".parse().unwrap(), tag: Some("test_site_local".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: egress_port.clone(), link_id: egress_link, @@ -4215,7 +4255,7 @@ async fn test_ipv6_multicast_scope_validation() { let site_local_result = switch .client - .multicast_group_create(&site_local_group) + .multicast_group_create_underlay(&site_local_group) .await; assert!( site_local_result.is_ok(), @@ -4223,10 +4263,9 @@ async fn test_ipv6_multicast_scope_validation() { ); // Organization-local scope (ff08::/16) - should work with internal API - let org_local_group = types::MulticastGroupCreateEntry { + let org_local_group = types::MulticastGroupCreateUnderlayEntry { group_ip: "ff08::300".parse().unwrap(), tag: Some("test_org_local".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: egress_port.clone(), link_id: egress_link, @@ -4234,18 +4273,19 @@ async fn test_ipv6_multicast_scope_validation() { }], }; - let org_local_result = - switch.client.multicast_group_create(&org_local_group).await; + let org_local_result = switch + .client + .multicast_group_create_underlay(&org_local_group) + .await; assert!( org_local_result.is_ok(), "Organization-local scope (ff08::/16) should work with internal API" ); - // Global scope (ff0e::/16) - should be rejected by internal API - let global_scope_group = types::MulticastGroupCreateEntry { + // Global scope (ff0e::/16) - should be rejected by server-side validation + let global_scope_group = types::MulticastGroupCreateUnderlayEntry { group_ip: "ff0e::400".parse().unwrap(), tag: Some("test_global".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: egress_port.clone(), link_id: egress_link, @@ -4255,26 +4295,18 @@ async fn test_ipv6_multicast_scope_validation() { let global_scope_result = switch .client - .multicast_group_create(&global_scope_group) + .multicast_group_create_underlay(&global_scope_group) .await; assert!( global_scope_result.is_err(), - "Global scope (ff0e::/16) should be rejected by internal API" - ); - let error_msg = format!("{:?}", global_scope_result.unwrap_err()); - assert!( - error_msg.contains( - "Non-admin-scoped IPv6 multicast groups must use the external API" - ), - "Error should indicate external API required for global scope" + "Global scope (ff0e::/16) should be rejected by server-side validation" ); // Test the reverse: admin-scoped should be rejected by external API // First create an admin-scoped group to reference - let admin_target_group = types::MulticastGroupCreateEntry { + let admin_target_group = types::MulticastGroupCreateUnderlayEntry { group_ip: "ff04::1000".parse().unwrap(), tag: Some("test_target".to_string()), - sources: None, members: vec![types::MulticastGroupMember { port_id: egress_port.clone(), link_id: egress_link, @@ -4284,19 +4316,22 @@ async fn test_ipv6_multicast_scope_validation() { let target_result = switch .client - .multicast_group_create(&admin_target_group) + .multicast_group_create_underlay(&admin_target_group) .await .expect("Should create target group"); let admin_scoped_external = types::MulticastGroupCreateExternalEntry { group_ip: "ff04::500".parse().unwrap(), tag: Some("test_admin_external".to_string()), - nat_target: types::NatTarget { - internal_ip: "ff04::1000".parse().unwrap(), - inner_mac: MacAddr::new(0x02, 0x00, 0x00, 0x00, 0x00, 0x01).into(), - vni: 100.into(), + internal_forwarding: types::InternalForwarding { + nat_target: Some(types::NatTarget { + internal_ip: "ff04::1000".parse().unwrap(), + inner_mac: MacAddr::new(0x02, 0x00, 0x00, 0x00, 0x00, 0x01) + .into(), + vni: 100.into(), + }), }, - vlan_id: Some(42), + external_forwarding: types::ExternalForwarding { vlan_id: Some(42) }, sources: None, }; @@ -4323,29 +4358,29 @@ async fn test_ipv6_multicast_scope_validation() { switch .client - .multicast_group_delete(&admin_local_group.group_ip) + .multicast_group_delete(&admin_local_group.group_ip.to_ip_addr()) .await .ok(); switch .client - .multicast_group_delete(&site_local_group.group_ip) + .multicast_group_delete(&site_local_group.group_ip.to_ip_addr()) .await .ok(); switch .client - .multicast_group_delete(&org_local_group.group_ip) + .multicast_group_delete(&org_local_group.group_ip.to_ip_addr()) .await .ok(); switch .client - .multicast_group_delete(&target_group.group_ip) + .multicast_group_delete(&target_group.group_ip.to_ip_addr()) .await .ok(); } #[tokio::test] #[ignore] -async fn test_multicast_group_id_recycling() { +async fn test_multicast_group_id_recycling() -> TestResult { let switch = &*get_switch().await; // Use admin-scoped IPv6 addresses that get group IDs assigned @@ -4359,30 +4394,28 @@ async fn test_multicast_group_id_recycling() { group1_ip, Some("test_recycling_1"), &[(PhysPort(11), types::Direction::External)], - None, - false, + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - let group1_external_id = group1.external_group_id; - assert!(group1_external_id.is_some()); - // Create second group and capture its group IDs let group2 = create_test_multicast_group( switch, group2_ip, Some("test_recycling_2"), &[(PhysPort(12), types::Direction::External)], - None, - false, + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - let group2_external_id = group2.external_group_id; - assert!(group2_external_id.is_some()); - assert_ne!(group1_external_id, group2_external_id); + assert_ne!( + get_external_group_id(&group1), + get_external_group_id(&group2) + ); // Delete the first group switch @@ -4399,7 +4432,9 @@ async fn test_multicast_group_id_recycling() { .await .expect("Should be able to list groups"); assert!( - !groups_after_delete1.iter().any(|g| g.group_ip == group1_ip), + !groups_after_delete1 + .iter() + .any(|g| get_group_ip(g) == group1_ip), "Group1 should be deleted" ); @@ -4409,19 +4444,17 @@ async fn test_multicast_group_id_recycling() { group3_ip, Some("test_recycling_3"), &[(PhysPort(13), types::Direction::External)], - None, - false, + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - let group3_external_id = group3.external_group_id; - assert!(group3_external_id.is_some()); - // Verify that ID recycling is working - group3 should get an ID that was // previously used assert_ne!( - group2_external_id, group3_external_id, + get_external_group_id(&group2), + get_external_group_id(&group3), "Third group should get a different ID than the active second group" ); @@ -4440,7 +4473,9 @@ async fn test_multicast_group_id_recycling() { .await .expect("Should be able to list groups"); assert!( - !groups_after_delete2.iter().any(|g| g.group_ip == group2_ip), + !groups_after_delete2 + .iter() + .any(|g| get_group_ip(g) == group2_ip), "Group2 should be deleted" ); @@ -4450,23 +4485,1672 @@ async fn test_multicast_group_id_recycling() { group4_ip, Some("test_recycling_4"), &[(PhysPort(14), types::Direction::External)], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, - false, + ) + .await; + + // Group4 should reuse Group2's underlay ID (LIFO: underlay ID was allocated last, returned first) + assert_eq!( + get_underlay_group_id(&group2), + Some(get_external_group_id(&group4)), + "Fourth group should reuse Group2's underlay ID due to LIFO recycling" + ); + + cleanup_test_group(switch, group3_ip).await.unwrap(); + cleanup_test_group(switch, group4_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_empty_then_add_members_ipv6() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 100)); + let external_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 100)); + + // Create internal admin-scoped group (empty, no members) + create_test_multicast_group( + &switch, + internal_group_ip, + Some("empty_internal_ipv6_group"), + &[], // No members (Omicron setup) + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, None, ) .await; - let group4_external_id = group4.external_group_id; - assert!(group4_external_id.is_some()); + // Create external group that references the internal group (empty, no members) + let ipv6 = match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }; + + let nat_target = types::NatTarget { + internal_ip: ipv6, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x00, 0x00, 0x01).into(), + vni: 100.into(), + }; + + let external_group = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: Some("empty_external_ipv6_group".to_string()), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, // Test with VLAN like reference test + sources: None, + }; + + switch + .client + .multicast_group_create_external(&external_group) + .await + .expect("Should create empty external IPv6 group"); + + // Verify both groups have no members initially + let groups = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list groups"); + + let internal_group = groups + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the created internal group"); + + let external_group = groups + .iter() + .find(|g| get_group_ip(g) == external_group_ip) + .expect("Should find the created external group"); + + assert!( + get_members(internal_group) + .map(|m| m.is_empty()) + .unwrap_or(true), + "Empty internal group should have no members initially" + ); + assert!( + get_members(external_group) + .map(|m| m.is_empty()) + .unwrap_or(true), + "Empty external group should have no members initially" + ); + + // Test: Send Geneve packet targeting internal group when empty - should have no replication + let ingress_port = PhysPort(10); + let src_ip = "2001:db8::1"; + let src_port = 3333; + let dst_port = 4444; + + // Create the original IPv6 packet payload + let src_mac = switch.get_port_mac(ingress_port).unwrap(); + let og_pkt = create_ipv6_multicast_packet( + external_group_ip, + src_mac, + src_ip, + src_port, + dst_port, + ); + + // Create Geneve packet targeting the internal group + let eth_hdr_len = 14; + let payload = og_pkt.deparse().unwrap()[eth_hdr_len..].to_vec(); + let geneve_pkt = common::gen_geneve_packet_with_mcast_tag( + Endpoint::parse( + GIMLET_MAC, + &GIMLET_IP.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + Endpoint::parse( + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), + &nat_target.internal_ip.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + eth::ETHER_IPV6, + nat_target.vni.clone().into(), + 2, // mcast_tag = 2 (allows both external and underlay replication) + &payload, + ); + + let send = TestPacket { + packet: Arc::new(geneve_pkt.clone()), + port: ingress_port, + }; + + // Verify no packets are replicated when group is empty + switch.packet_test(vec![send], Vec::new())?; + + // Verify bitmap table is initially empty for both group IDs + let bitmap_table_initial = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table initially"); + // Should have no bitmap entries when groups are empty + assert!( + bitmap_table_initial.entries.is_empty(), + "Bitmap table should be empty when groups have no members" + ); + + // Test: Add members to the internal group using update (mix of External and Underlay) + let egress1 = PhysPort(15); + let egress2 = PhysPort(17); + let egress3 = PhysPort(18); + let (port_id1, link_id1) = switch.link_id(egress1).unwrap(); + let (port_id2, link_id2) = switch.link_id(egress2).unwrap(); + let (port_id3, link_id3) = switch.link_id(egress3).unwrap(); + + let external_member1 = types::MulticastGroupMember { + port_id: port_id1, + link_id: link_id1, + direction: types::Direction::External, + }; + + let external_member2 = types::MulticastGroupMember { + port_id: port_id2, + link_id: link_id2, + direction: types::Direction::External, + }; + + let underlay_member = types::MulticastGroupMember { + port_id: port_id3, + link_id: link_id3, + direction: types::Direction::Underlay, + }; + + // Update the internal group to add members (2 external, 1 underlay) + // Meaning: two decap/port-bitmap members. + let update_entry = types::MulticastGroupUpdateUnderlayEntry { + tag: Some("empty_internal_ipv6_group".to_string()), + members: vec![external_member1, external_member2, underlay_member], + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + switch + .client + .multicast_group_update_underlay(&ipv6_update, &update_entry) + .await + .expect("Should update internal group with members"); + + // Verify members were added + let updated_groups = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list updated groups"); + + let updated_internal = updated_groups + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the updated internal group"); - // Group4 should reuse group2's recently freed ID due to stack-like - // allocation assert_eq!( - group2_external_id, group4_external_id, - "Fourth group should reuse second group's recycled ID" + get_members(updated_internal).map(|m| m.len()).unwrap_or(0), + 3, + "Internal group should now have 3 members (2 external, 1 underlay)" ); - // Cleanup - clean up remaining active groups - cleanup_test_group(switch, group3_ip).await; - cleanup_test_group(switch, group4_ip).await; + // Test: Send Geneve packet again targeting internal group - + // This should now replicate to all 3 members (2 external + 1 underlay) + let og_pkt2 = create_ipv6_multicast_packet( + external_group_ip, + src_mac, + "2001:db8::2", // Different source IP to differentiate packets + 3334, + 4445, + ); + + // Test packet for use in final assertion + let to_send_final = og_pkt2.clone(); + + // Create Geneve packet for the underlay to underlay replication, no decap + let payload2 = og_pkt2.deparse().unwrap()[eth_hdr_len..].to_vec(); + let to_send_again = common::gen_geneve_packet_with_mcast_tag( + Endpoint::parse( + GIMLET_MAC, + &GIMLET_IP.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + Endpoint::parse( + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), + &nat_target.internal_ip.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + eth::ETHER_IPV6, + nat_target.vni.clone().into(), + 2, // mcast_tag = 2 (allows both external and underlay replication) + &payload2, + ); + + // Create expected packets for all three egress ports + let expected1 = prepare_expected_pkt( + switch, + &og_pkt2, + Some(10), // VLAN should come from external group + None, // No NAT target for external members + Some(egress1), + ); + + let expected2 = prepare_expected_pkt( + switch, + &og_pkt2, + Some(10), // VLAN should come from external group + None, // No NAT target for external members (like reference test) + Some(egress2), + ); + + // Underlay member gets the Geneve packet unchanged + let expected3 = prepare_expected_pkt( + switch, + &to_send_again, + None, // No VLAN + None, // No NAT target for underlay member + Some(egress3), + ); + + let send_again = TestPacket { + packet: Arc::new(to_send_again), + port: ingress_port, + }; + + let expected_pkts = vec![ + TestPacket { + packet: Arc::new(expected1), + port: egress1, + }, + TestPacket { + packet: Arc::new(expected2), + port: egress2, + }, + TestPacket { + packet: Arc::new(expected3), + port: egress3, + }, + ]; + + // Verify packets are now replicated to all 3 members (2 external + 1 underlay) + switch.packet_test(vec![send_again], expected_pkts)?; + + // Verify bitmap table now has entry for external group ID only (1 entry with bitmap of 2 ports) + let bitmap_table_with_members = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table with members"); + assert_eq!( + bitmap_table_with_members.entries.len(), + 1, + "Bitmap table should have entry for external group ID when group has members" + ); + + // Test: Update internal group back to empty (remove all members) + let empty_update_entry = types::MulticastGroupUpdateUnderlayEntry { + tag: None, + members: vec![], // Remove all members + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + switch + .client + .multicast_group_update_underlay(&ipv6_update, &empty_update_entry) + .await + .expect("Should update internal group back to empty"); + + // Verify the group is now empty + let groups_after_empty = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list groups after emptying"); + + let empty_internal = groups_after_empty + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the internal group"); + + assert_eq!( + get_members(empty_internal).map(|m| m.len()).unwrap_or(0), + 0, + "Internal group should be empty again" + ); + + // Verify bitmap table is empty again after removing all members + let bitmap_table_final = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table after emptying"); + // Should have no bitmap entries when group is empty again + assert!( + bitmap_table_final.entries.is_empty(), + "Bitmap table should be empty again when group has no members" + ); + + let send_final = TestPacket { + packet: Arc::new(to_send_final), + port: ingress_port, + }; + + // Should only see packet on ingress, no replication to egress ports + let expected_final = vec![]; + + switch.packet_test(vec![send_final], expected_final)?; + + cleanup_test_group(&switch, external_group_ip) + .await + .unwrap(); + cleanup_test_group(&switch, internal_group_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_empty_then_add_members_ipv4() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 101)); + let external_group_ip = IpAddr::V4(Ipv4Addr::new(224, 1, 2, 100)); + + // Create internal admin-scoped group (empty, no members) + create_test_multicast_group( + &switch, + internal_group_ip, + Some("empty_internal_ipv4_nat_target"), + &[], // No members + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Create external group that references the internal group (empty, no members) + let nat_target = types::NatTarget { + internal_ip: match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x02, 0x64).into(), + vni: 100.into(), + }; + + let external_group = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: Some("empty_external_ipv4_group".to_string()), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, // Test with VLAN like reference test + sources: None, + }; + + switch + .client + .multicast_group_create_external(&external_group) + .await + .expect("Should create empty external IPv4 group"); + + // Verify both groups have no members initially + let groups = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list groups"); + + let internal_group = groups + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the created internal group"); + + let external_group = groups + .iter() + .find(|g| get_group_ip(g) == external_group_ip) + .expect("Should find the created external group"); + + assert!( + get_members(internal_group) + .map(|m| m.is_empty()) + .unwrap_or(true), + "Empty internal group should have no members initially" + ); + assert!( + get_members(external_group) + .map(|m| m.is_empty()) + .unwrap_or(true), + "Empty external group should have no members initially" + ); + + // Test: Send Geneve packet targeting internal group when empty - should have no replication + let ingress_port = PhysPort(10); + let src_ip = "192.168.1.10"; + let src_port = 3333; + let dst_port = 4444; + + // Create the original IPv4 packet payload + let src_mac = switch.get_port_mac(ingress_port).unwrap(); + let og_pkt = create_ipv4_multicast_packet( + external_group_ip, + src_mac, + src_ip, + src_port, + dst_port, + ); + + // Create Geneve packet targeting the internal group (like test_encapped_multicast_geneve_mcast_tag_to_underlay_members) + let eth_hdr_len = 14; + let payload = og_pkt.deparse().unwrap()[eth_hdr_len..].to_vec(); + let geneve_pkt = common::gen_geneve_packet_with_mcast_tag( + Endpoint::parse( + GIMLET_MAC, + &GIMLET_IP.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + Endpoint::parse( + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), + &nat_target.internal_ip.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + eth::ETHER_IPV4, + nat_target.vni.clone().into(), + 2, // mcast_tag = 2 (allows both external and underlay replication) + &payload, + ); + + let send = TestPacket { + packet: Arc::new(geneve_pkt.clone()), + port: ingress_port, + }; + + // Verify no packets are replicated when group is empty + switch.packet_test(vec![send], Vec::new())?; + + // Verify bitmap table is initially empty for both group IDs + let bitmap_table_initial = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table initially"); + // Should have no bitmap entries when groups are empty + assert!( + bitmap_table_initial.entries.is_empty(), + "Bitmap table should be empty when groups have no members" + ); + + // Test: Add members to the internal group using update (mix of External and Underlay) + let egress1 = PhysPort(15); + let egress2 = PhysPort(17); + let egress3 = PhysPort(18); + let (port_id1, link_id1) = switch.link_id(egress1).unwrap(); + let (port_id2, link_id2) = switch.link_id(egress2).unwrap(); + let (port_id3, link_id3) = switch.link_id(egress3).unwrap(); + + let external_member1 = types::MulticastGroupMember { + port_id: port_id1, + link_id: link_id1, + direction: types::Direction::External, + }; + + let external_member2 = types::MulticastGroupMember { + port_id: port_id2, + link_id: link_id2, + direction: types::Direction::External, + }; + + let underlay_member = types::MulticastGroupMember { + port_id: port_id3, + link_id: link_id3, + direction: types::Direction::Underlay, + }; + + // Update the internal group to add members (2 external, 1 underlay) + let update_entry = types::MulticastGroupUpdateUnderlayEntry { + tag: Some("empty_internal_ipv4_nat_target".to_string()), + members: vec![external_member1, external_member2, underlay_member], + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + switch + .client + .multicast_group_update_underlay(&ipv6_update, &update_entry) + .await + .expect("Should update internal group with members"); + + // Verify members were added to the internal group + let updated_groups = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list updated groups"); + + let updated_internal = updated_groups + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the updated internal group"); + + assert_eq!( + get_members(updated_internal).map(|m| m.len()).unwrap_or(0), + 3, + "Internal group should now have 3 members (2 external, 1 underlay)" + ); + + // Test: Send Geneve packet again targeting internal group + // This should now replicate to all 3 members + let og_pkt2 = create_ipv4_multicast_packet( + external_group_ip, + src_mac, + "10.1.1.2", + 1235, + 5679, + ); + + // Test packet for use in final assertion + let to_send_final = og_pkt2.clone(); + + // Create Geneve packet for the second test + let payload2 = og_pkt2.deparse().unwrap()[eth_hdr_len..].to_vec(); + let test_packet2 = common::gen_geneve_packet_with_mcast_tag( + Endpoint::parse( + GIMLET_MAC, + &GIMLET_IP.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + Endpoint::parse( + &MacAddr::from(nat_target.internal_ip.derive_multicast_mac()) + .to_string(), + &nat_target.internal_ip.to_string(), + geneve::GENEVE_UDP_PORT, + ) + .unwrap(), + eth::ETHER_IPV4, + nat_target.vni.clone().into(), + 2, // mcast_tag = 2 (allows both external and underlay replication) + &payload2, + ); + + // Create expected packets for all three egress ports + let expected1 = prepare_expected_pkt( + switch, + &og_pkt2, + Some(10), // VLAN should come from external group + None, // No NAT target for external members (like reference test) + Some(egress1), + ); + + let expected2 = prepare_expected_pkt( + switch, + &og_pkt2, + Some(10), // VLAN should come from external group + None, // No NAT target for external members (like reference test) + Some(egress2), + ); + + // Underlay member gets the Geneve packet unchanged (like test_encapped_multicast_geneve_mcast_tag_to_underlay_members) + let expected3 = prepare_expected_pkt( + switch, + &test_packet2, + None, // No VLAN + None, // No NAT target for underlay member + Some(egress3), + ); + + let send_again = TestPacket { + packet: Arc::new(test_packet2), + port: ingress_port, + }; + + let expected_pkts = vec![ + TestPacket { + packet: Arc::new(expected1), + port: egress1, + }, + TestPacket { + packet: Arc::new(expected2), + port: egress2, + }, + TestPacket { + packet: Arc::new(expected3), + port: egress3, + }, + ]; + + // Verify packets are now replicated to all 3 members via NAT target + switch.packet_test(vec![send_again], expected_pkts)?; + + // Verify bitmap table now has entry for external group ID only (underlay doesn't need decap bitmap) + let bitmap_table_with_members = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table with members"); + // Should have bitmap entry for external group ID only (underlay doesn't need decap bitmap) + assert_eq!( + bitmap_table_with_members.entries.len(), + 1, + "Bitmap table should have entry for external group ID when group has members" + ); + + // Test: Update internal group back to empty (remove all members) + let empty_update_entry = types::MulticastGroupUpdateUnderlayEntry { + tag: None, + members: vec![], // Remove all members + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + switch + .client + .multicast_group_update_underlay(&ipv6_update, &empty_update_entry) + .await + .expect("Should update internal group back to empty"); + + // Verify the group is now empty + let groups_after_empty = switch + .client + .multicast_groups_list_stream(None) + .try_collect::>() + .await + .expect("Should be able to list groups after emptying"); + + let empty_internal = groups_after_empty + .iter() + .find(|g| get_group_ip(g) == internal_group_ip) + .expect("Should find the internal group"); + + assert_eq!( + get_members(empty_internal).map(|m| m.len()).unwrap_or(0), + 0, + "Internal group should be empty again" + ); + + // Verify bitmap table is empty again after removing all members + let bitmap_table_final = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table after emptying"); + // Should have no bitmap entries when group is empty again + assert!( + bitmap_table_final.entries.is_empty(), + "Bitmap table should be empty again when group has no members" + ); + + // Test: Send packet again - should now only reach ingress (no replication) + let send_final = TestPacket { + packet: Arc::new(to_send_final), + port: ingress_port, + }; + + // Should only see packet on ingress, no replication to egress ports + let expected_final = vec![]; + + switch.packet_test(vec![send_final], expected_final)?; + + cleanup_test_group(&switch, external_group_ip) + .await + .unwrap(); + cleanup_test_group(&switch, internal_group_ip).await +} + +// ============================================================================= +// ROLLBACK TESTS +// ============================================================================= + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_external_group_creation_failure() -> TestResult +{ + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 102)); + let external_group_ip = IpAddr::V4(Ipv4Addr::new(224, 1, 2, 102)); + + // Create internal group with members first + create_test_multicast_group( + &switch, + internal_group_ip, + Some("rollback_test_internal"), + &[ + (PhysPort(15), types::Direction::External), + (PhysPort(17), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Get initial state - should have internal group but no external group + let initial_groups = switch + .client + .multicast_groups_list(None, None) + .await + .expect("Should be able to list initial groups"); + let initial_internal_count = initial_groups.items.len(); + + // Get initial table states + let initial_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table"); + let initial_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table"); + let initial_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table"); + + // Attempt to create external group that will cause failure during validation + // Use a non-existent internal group IP to trigger "NAT target must be a tracked multicast group" error + let invalid_internal_ip = match internal_group_ip { + IpAddr::V6(ipv6) => { + // Create a different admin-scoped IP that doesn't exist + let mut bytes = ipv6.octets(); + bytes[15] = bytes[15].wrapping_add(1); // Change last byte + Ipv6Addr::from(bytes) + } + _ => panic!("Expected IPv6 address"), + }; + let nat_target = types::NatTarget { + internal_ip: invalid_internal_ip, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x02, 0x66).into(), + vni: 200.into(), + }; + + let external_entry = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, + sources: None, + }; + + // This should fail and trigger rollback + let result = switch + .client + .multicast_group_create_external(&external_entry) + .await; + + // Verify the creation failed + assert!( + result.is_err(), + "External group creation should have failed" + ); + + // Verify rollback worked - check that no state was left behind + + // Group count should be unchanged + let post_failure_groups = switch + .client + .multicast_groups_list(None, None) + .await + .expect("Should be able to list groups after rollback"); + assert_eq!( + post_failure_groups.items.len(), + initial_internal_count, + "Group count should be unchanged after rollback" + ); + + // No external group should exist + let external_groups: Vec<_> = post_failure_groups + .items + .iter() + .filter(|g| get_group_ip(g) == external_group_ip) + .collect(); + + assert!( + external_groups.is_empty(), + "No external group should exist after rollback" + ); + + // Table states should be unchanged + let post_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table"); + + assert_eq!( + post_route_table.entries.len(), + initial_route_table.entries.len(), + "Route table should be unchanged after rollback" + ); + + let post_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table"); + + assert_eq!( + post_nat_table.entries.len(), + initial_nat_table.entries.len(), + "NAT table should be unchanged after rollback" + ); + + let post_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table"); + + assert_eq!( + post_bitmap_table.entries.len(), + initial_bitmap_table.entries.len(), + "Bitmap table should be unchanged after rollback" + ); + + cleanup_test_group(&switch, internal_group_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_member_update_failure() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 103)); + + // Create internal group with initial members + create_test_multicast_group( + &switch, + internal_group_ip, + Some("rollback_member_test"), + &[ + (PhysPort(15), types::Direction::External), + (PhysPort(17), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Get initial state + let initial_group = switch + .client + .multicast_group_get(&internal_group_ip) + .await + .expect("Should be able to get initial group state"); + let initial_member_count = get_members(&initial_group.into_inner()) + .map(|m| m.len()) + .unwrap_or(0); + + // Try to add a member that should cause ASIC operations to fail + // Use a valid port but with an invalid link ID that should cause issues + let (valid_port_id, _) = switch.link_id(PhysPort(15)).unwrap(); // Use valid port 15 + let invalid_members = vec![types::MulticastGroupMember { + port_id: valid_port_id, + link_id: types::LinkId(255), // Use max u8 value which should cause ASIC failure + direction: types::Direction::External, + }]; + + let update_request = types::MulticastGroupUpdateUnderlayEntry { + members: invalid_members, + tag: None, + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + // This should fail and trigger rollback + let result = switch + .client + .multicast_group_update_underlay(&ipv6_update, &update_request) + .await; + + // Verify the update failed + assert!(result.is_err(), "Member update should have failed"); + + // Verify rollback worked - group should be unchanged + let post_failure_group = switch + .client + .multicast_group_get(&internal_group_ip) + .await + .expect("Should be able to get group state after rollback") + .into_inner(); + + assert_eq!( + get_members(&post_failure_group) + .map(|m| m.len()) + .unwrap_or(0), + initial_member_count, + "Member count should be unchanged after rollback" + ); + + cleanup_test_group(&switch, internal_group_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_nat_transition_failure() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 104)); + let external_group_ip = IpAddr::V4(Ipv4Addr::new(224, 1, 2, 104)); + + // Create internal group + create_test_multicast_group( + &switch, + internal_group_ip, + Some("nat_rollback_test"), + &[(PhysPort(15), types::Direction::External)], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Create external group with NAT + let nat_target = types::NatTarget { + internal_ip: match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x02, 0x68).into(), + vni: 104.into(), + }; + + let external_entry = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target.clone()), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, + sources: None, + }; + + switch + .client + .multicast_group_create_external(&external_entry) + .await + .expect("Should create external group for NAT rollback test"); + + // Get initial external group state + let initial_external_group = switch + .client + .multicast_group_get(&external_group_ip) + .await + .expect("Should be able to get initial external group state"); + + // Get initial NAT table state + let initial_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table"); + + // Attempt to update NAT target to invalid configuration that should fail + let invalid_nat_target = types::NatTarget { + internal_ip: match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + // Invalid MAC (not multicast) + inner_mac: MacAddr::new(0x02, 0x00, 0x00, 0x00, 0x00, 0x00).into(), + vni: 300.into(), + }; + + let invalid_update = types::MulticastGroupUpdateExternalEntry { + tag: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(invalid_nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, + sources: None, + }; + + // This should fail and trigger NAT rollback + let result = switch + .client + .multicast_group_update_external(&external_group_ip, &invalid_update) + .await; + + // Verify the update failed + assert!(result.is_err(), "NAT update should have failed"); + + // Verify rollback worked - external group should be unchanged + let post_failure_external_group = switch + .client + .multicast_group_get(&external_group_ip) + .await + .expect("Should be able to get external group state after rollback"); + + // NAT target should be unchanged + let initial_group_inner = initial_external_group.into_inner(); + let post_failure_group_inner = post_failure_external_group.into_inner(); + + let initial_nat = get_nat_target(&initial_group_inner); + let current_nat = get_nat_target(&post_failure_group_inner); + + assert!( + initial_nat.is_some(), + "Initial group should have NAT target" + ); + assert!( + current_nat.is_some(), + "Current group should have NAT target" + ); + + if let (Some(original), Some(current)) = (initial_nat, current_nat) { + assert_eq!( + current.vni, original.vni, + "VNI should be unchanged after rollback" + ); + assert_eq!( + current.inner_mac, original.inner_mac, + "MAC should be unchanged after rollback" + ); + } + + // Verify NAT table state is unchanged + let post_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table"); + + assert_eq!( + post_nat_table.entries.len(), + initial_nat_table.entries.len(), + "NAT table should be unchanged after rollback" + ); + + cleanup_test_group(&switch, external_group_ip) + .await + .unwrap(); + cleanup_test_group(&switch, internal_group_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_vlan_propagation_consistency() { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 105)); + let external_group_ip = IpAddr::V4(Ipv4Addr::new(224, 1, 2, 105)); + + // Create internal group with members (so bitmap entry get created) + create_test_multicast_group( + &switch, + internal_group_ip, + Some("vlan_propagation_test"), + &[ + (PhysPort(15), types::Direction::External), + (PhysPort(17), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Get initial bitmap table state + let _initial_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table"); + + // First, delete the internal group to break the NAT target reference + cleanup_test_group(&switch, internal_group_ip) + .await + .expect("Should cleanup internal group"); + + let nat_target = types::NatTarget { + internal_ip: match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x02, 0x69).into(), + vni: 105.into(), + }; + + // Get initial table states before attempting creation + let initial_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table"); + let initial_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table"); + + // Attempt to create external group that references the deleted internal group + let external_entry = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(999) }, + sources: None, + }; + + // This should fail because the NAT target (internal group) no longer exists + let result = switch + .client + .multicast_group_create_external(&external_entry) + .await; + + assert!( + result.is_err(), + "External group creation should fail when NAT target doesn't exist" + ); + + // Verify rollback worked - tables should remain unchanged + let post_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table after rollback"); + let post_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table after rollback"); + + assert_eq!( + initial_route_table.entries.len(), + post_route_table.entries.len(), + "Route table should be unchanged after rollback" + ); + assert_eq!( + initial_bitmap_table.entries.len(), + post_bitmap_table.entries.len(), + "Bitmap table should be unchanged after rollback" + ); + + // No external group should exist since creation failed + let groups = switch + .client + .multicast_groups_list(None, None) + .await + .expect("Should be able to list groups after rollback"); + + let external_groups: Vec<_> = groups + .items + .iter() + .filter(|g| get_group_ip(g) == external_group_ip) + .collect(); + + assert!( + external_groups.is_empty(), + "No external group should exist after failed creation" + ); +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_source_filter_update() -> TestResult { + let switch = &*get_switch().await; + + // First create the internal admin-scoped group that will be the NAT target + let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP); + let egress1 = PhysPort(28); + create_test_multicast_group( + switch, + internal_multicast_ip, + Some("rollback_internal"), + &[(egress1, types::Direction::External)], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, // No NAT needed for internal groups + None, + ) + .await; + + // Create IPv4 SSM group that supports source filters + let group_ip = IpAddr::V4(Ipv4Addr::new(232, 1, 1, 100)); // SSM range + let nat_target = types::NatTarget { + internal_ip: MULTICAST_NAT_IP.into(), + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x01, 0x64).into(), + vni: 100.into(), + }; + + // Create initial SSM group with source filters + let initial_sources = vec![ + types::IpSrc::Exact("10.1.1.1".parse().unwrap()), + types::IpSrc::Exact("10.1.1.2".parse().unwrap()), + ]; + + let external_group = types::MulticastGroupCreateExternalEntry { + group_ip, + tag: Some("source_filter_rollback_test".to_string()), + internal_forwarding: types::InternalForwarding { + nat_target: Some(nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, + sources: Some(initial_sources.clone()), + }; + + switch + .client + .multicast_group_create_external(&external_group) + .await + .expect("Should create SSM group with initial source filters"); + + // Get initial source filter table state + let initial_src_table = switch + .client + .table_dump("pipe.Ingress.mcast_ingress.mcast_source_filter_ipv4") + .await + .expect("Should be able to dump source filter table"); + + // Try to update with invalid sources that should cause validation failure and rollback + let invalid_sources = vec![ + types::IpSrc::Exact("10.2.2.1".parse().unwrap()), // Valid source + types::IpSrc::Exact("224.1.1.1".parse().unwrap()), // Invalid: multicast IP as source - should cause rollback + ]; + + let failing_update_entry = types::MulticastGroupUpdateExternalEntry { + sources: Some(invalid_sources), + internal_forwarding: external_group.internal_forwarding.clone(), + external_forwarding: types::ExternalForwarding { vlan_id: Some(10) }, + tag: None, + }; + + // This update should fail due to invalid multicast source IP + let result = switch + .client + .multicast_group_update_external(&group_ip, &failing_update_entry) + .await; + + // Verify the update failed + assert!( + result.is_err(), + "Update should have failed with invalid multicast source IP" + ); + + // Verify rollback worked - original sources should still be in place + let post_rollback_group = switch + .client + .multicast_group_get(&group_ip) + .await + .expect("Should be able to get group after rollback"); + + let rollback_sources = get_sources(&post_rollback_group.into_inner()) + .as_ref() + .map_or(0, |s| s.len()); + + assert_eq!( + rollback_sources, + initial_sources.len(), + "Source count should be unchanged after rollback" + ); + + // Verify source filter table is back to initial state (rollback worked) + let post_rollback_src_table = switch + .client + .table_dump("pipe.Ingress.mcast_ingress.mcast_source_filter_ipv4") + .await + .expect("Should be able to dump source filter table after rollback"); + + assert_eq!( + post_rollback_src_table.entries.len(), + initial_src_table.entries.len(), + "Source filter table should be unchanged after rollback" + ); + + // Clean up internal group + cleanup_test_group(&switch, internal_multicast_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_partial_member_addition() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 106)); + + // Create internal group with initial members + create_test_multicast_group( + &switch, + internal_group_ip, + Some("partial_add_rollback_test"), + &[ + (PhysPort(15), types::Direction::External), + (PhysPort(16), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + let initial_group = switch + .client + .multicast_group_get(&internal_group_ip) + .await + .expect("Should be able to get initial group state"); + let initial_member_count = get_members(&initial_group.into_inner()) + .map(|m| m.len()) + .unwrap_or(0); + + // Create a mix of valid and invalid members to trigger partial addition failure + let (valid_port_1, valid_link_1) = switch.link_id(PhysPort(17)).unwrap(); + let (valid_port_2, valid_link_2) = switch.link_id(PhysPort(18)).unwrap(); + let (valid_port_3, _) = switch.link_id(PhysPort(19)).unwrap(); + + let mixed_members = vec![ + // Valid member - should be added successfully + types::MulticastGroupMember { + port_id: valid_port_1, + link_id: valid_link_1, + direction: types::Direction::External, + }, + // Valid member - should be added successfully + types::MulticastGroupMember { + port_id: valid_port_2, + link_id: valid_link_2, + direction: types::Direction::Underlay, + }, + // Invalid member - should cause failure after partial success + types::MulticastGroupMember { + port_id: valid_port_3, + link_id: types::LinkId(250), // Invalid link ID + direction: types::Direction::External, + }, + ]; + + let update_request = types::MulticastGroupUpdateUnderlayEntry { + members: mixed_members, + tag: None, + }; + + let ipv6_update = types::AdminScopedIpv6(match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }); + + // This should fail after partially adding some members, triggering incremental rollback + let result = switch + .client + .multicast_group_update_underlay(&ipv6_update, &update_request) + .await; + + // Verify the update failed + assert!( + result.is_err(), + "Partial member addition should have failed with invalid link ID" + ); + + // Verify rollback worked - should be back to original state + let post_failure_group = switch + .client + .multicast_group_get(&internal_group_ip) + .await + .expect("Should be able to get group state after rollback") + .into_inner(); + + assert_eq!( + get_members(&post_failure_group) + .map(|m| m.len()) + .unwrap_or(0), + initial_member_count, + "Member count should be unchanged after partial addition rollback" + ); + + cleanup_test_group(&switch, internal_group_ip).await +} + +#[tokio::test] +#[ignore] +async fn test_multicast_rollback_table_operation_failure() { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 107)); + let external_group_ip = IpAddr::V4(Ipv4Addr::new(224, 1, 2, 107)); + + // Create internal group first + create_test_multicast_group( + &switch, + internal_group_ip, + Some("table_rollback_test"), + &[ + (PhysPort(15), types::Direction::External), + (PhysPort(17), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + // Delete the internal group to break the NAT target reference + cleanup_test_group(&switch, internal_group_ip) + .await + .expect("Should cleanup internal group"); + + // Get table states after internal group deletion but before external group attempt + let initial_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table"); + let initial_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table"); + let initial_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table"); + + // Attempt to create external group that references the non-existent internal group + let broken_nat_target = types::NatTarget { + internal_ip: match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + inner_mac: MacAddr::new(0x01, 0x00, 0x5e, 0x01, 0x02, 0x6b).into(), + vni: 107.into(), + }; + + let external_entry = types::MulticastGroupCreateExternalEntry { + group_ip: external_group_ip, + tag: None, + internal_forwarding: types::InternalForwarding { + nat_target: Some(broken_nat_target), + }, + external_forwarding: types::ExternalForwarding { vlan_id: Some(200) }, + sources: None, + }; + + // This should fail because the NAT target (internal group) doesn't exist + let result = switch + .client + .multicast_group_create_external(&external_entry) + .await; + + // Verify the creation failed + assert!( + result.is_err(), + "External group creation should fail when NAT target doesn't exist" + ); + + // Verify table rollback worked - all tables should be unchanged + let post_route_table = switch + .client + .table_dump("pipe.Ingress.l3_router.MulticastRouter4.tbl") + .await + .expect("Should be able to dump route table after rollback"); + + let post_nat_table = switch + .client + .table_dump("pipe.Ingress.nat_ingress.ingress_ipv4_mcast") + .await + .expect("Should be able to dump NAT table after rollback"); + + let post_bitmap_table = switch + .client + .table_dump("pipe.Egress.mcast_egress.tbl_decap_ports") + .await + .expect("Should be able to dump bitmap table after rollback"); + + assert_eq!( + post_route_table.entries.len(), + initial_route_table.entries.len(), + "Route table should be unchanged after table operation rollback" + ); + + assert_eq!( + post_nat_table.entries.len(), + initial_nat_table.entries.len(), + "NAT table should be unchanged after table operation rollback" + ); + + assert_eq!( + post_bitmap_table.entries.len(), + initial_bitmap_table.entries.len(), + "Bitmap table should be unchanged after table operation rollback" + ); + + // Verify no external group was created + let groups = switch + .client + .multicast_groups_list(None, None) + .await + .expect("Should be able to list groups after rollback"); + + let external_groups: Vec<_> = groups + .items + .iter() + .filter(|g| get_group_ip(g) == external_group_ip) + .collect(); + + assert!( + external_groups.is_empty(), + "No external group should exist after table operation rollback" + ); +} + +#[tokio::test] +#[ignore] +#[allow(dead_code)] +async fn test_multicast_group_get_underlay() -> TestResult { + let switch = &*get_switch().await; + + let internal_group_ip = + IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 200)); + + // Create an internal/underlay group + let _created_group = create_test_multicast_group( + &switch, + internal_group_ip, + Some("underlay_get_test"), + &[ + (PhysPort(10), types::Direction::External), + (PhysPort(12), types::Direction::Underlay), + ], + types::InternalForwarding { nat_target: None }, + types::ExternalForwarding { vlan_id: None }, + None, + ) + .await; + + let retrieved_underlay = switch + .client + .multicast_group_get_underlay(&types::AdminScopedIpv6( + match internal_group_ip { + IpAddr::V6(ipv6) => ipv6, + _ => panic!("Expected IPv6 address"), + }, + )) + .await + .expect( + "Should be able to get underlay group via admin-scoped endpoint", + ) + .into_inner(); + + // Verify the response matches what we created + assert_eq!(retrieved_underlay.group_ip.to_ip_addr(), internal_group_ip); + assert_eq!( + retrieved_underlay.tag, + Some("underlay_get_test".to_string()) + ); + assert_eq!(retrieved_underlay.members.len(), 2); + + // Compare with generic GET endpoint result + let retrieved_generic = switch + .client + .multicast_group_get(&internal_group_ip) + .await + .expect("Should be able to get group via generic endpoint") + .into_inner(); + + // Verify both endpoints return consistent data for underlay groups + match retrieved_generic { + types::MulticastGroupResponse::Underlay { + group_ip, + tag, + members, + external_group_id, + underlay_group_id, + } => { + assert_eq!(group_ip, retrieved_underlay.group_ip); + assert_eq!(tag, retrieved_underlay.tag); + assert_eq!(members, retrieved_underlay.members); + assert_eq!(external_group_id, retrieved_underlay.external_group_id); + assert_eq!(underlay_group_id, retrieved_underlay.underlay_group_id); + } + _ => { + panic!( + "Admin-scoped IPv6 group should return underlay response only" + ); + } + } + cleanup_test_group(&switch, internal_group_ip).await } diff --git a/dpd-client/tests/integration_tests/table_tests.rs b/dpd-client/tests/integration_tests/table_tests.rs index 70e62dd..aee6ba4 100644 --- a/dpd-client/tests/integration_tests/table_tests.rs +++ b/dpd-client/tests/integration_tests/table_tests.rs @@ -459,13 +459,13 @@ async fn test_routev6_full() -> TestResult { struct MulticastReplicationTableTest {} -impl TableTest +impl TableTest for MulticastReplicationTableTest { async fn insert_entry( switch: &Switch, idx: usize, - ) -> OpResult { + ) -> OpResult { let (port_id1, link_id1) = switch.link_id(PhysPort(11)).unwrap(); let (port_id2, link_id2) = switch.link_id(PhysPort(12)).unwrap(); @@ -473,10 +473,9 @@ impl TableTest let group_ip = gen_ipv6_multicast_addr(idx); // Admin-scoped IPv6 groups are internal with replication info and members - let internal_entry = types::MulticastGroupCreateEntry { - group_ip, + let internal_entry = types::MulticastGroupCreateUnderlayEntry { + group_ip: types::AdminScopedIpv6(group_ip), tag: Some(MCAST_TAG.to_string()), - sources: None, members: vec![ types::MulticastGroupMember { port_id: port_id1.clone(), @@ -490,7 +489,10 @@ impl TableTest }, ], }; - switch.client.multicast_group_create(&internal_entry).await + switch + .client + .multicast_group_create_underlay(&internal_entry) + .await } async fn delete_entry(switch: &Switch, idx: usize) -> OpResult<()> { @@ -499,7 +501,7 @@ impl TableTest } async fn count_entries(switch: &Switch) -> usize { - // Count all groups with our test tag + // Count only underlay groups with our test tag (since this tests replication table capacity) switch .client .multicast_groups_list_by_tag_stream(MCAST_TAG, None) @@ -515,7 +517,7 @@ impl TableTest async fn test_multicast_replication_table_full() -> TestResult { test_table_capacity::< MulticastReplicationTableTest, - types::MulticastGroupResponse, + types::MulticastGroupUnderlayResponse, (), >(MULTICAST_TABLE_SIZE) .await diff --git a/dpd-types/src/mcast.rs b/dpd-types/src/mcast.rs index 585e587..a4e5935 100644 --- a/dpd-types/src/mcast.rs +++ b/dpd-types/src/mcast.rs @@ -4,13 +4,15 @@ // // Copyright 2025 Oxide Computer Company +//! Public types for multicast group management. + use std::{ fmt, net::{IpAddr, Ipv6Addr}, }; use common::{nat::NatTarget, ports::PortId}; -use oxnet::Ipv4Net; +use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -19,15 +21,60 @@ use crate::link::LinkId; /// Type alias for multicast group IDs. pub type MulticastGroupId = u16; -/// A multicast group configuration for POST requests for external (to the rack) -/// groups. -#[derive(Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupCreateExternalEntry { - pub group_ip: IpAddr, - pub tag: Option, - pub nat_target: NatTarget, - pub vlan_id: Option, - pub sources: Option>, +/// A validated admin-scoped IPv6 multicast address. +/// +/// Admin-scoped addresses are ff04::/16, ff05::/16, or ff08::/16. +/// These are used for internal/underlay multicast groups. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Deserialize, + Serialize, + JsonSchema, +)] +#[serde(try_from = "Ipv6Addr", into = "Ipv6Addr")] +pub struct AdminScopedIpv6(Ipv6Addr); + +impl AdminScopedIpv6 { + /// Create a new AdminScopedIpv6 if the address is admin-scoped. + pub fn new(addr: Ipv6Addr) -> Result { + if !Ipv6Net::new_unchecked(addr, 128).is_admin_scoped_multicast() { + return Err(Error::InvalidIp(addr)); + } + Ok(Self(addr)) + } +} + +impl TryFrom for AdminScopedIpv6 { + type Error = Error; + + fn try_from(addr: Ipv6Addr) -> Result { + Self::new(addr) + } +} + +impl From for Ipv6Addr { + fn from(admin: AdminScopedIpv6) -> Self { + admin.0 + } +} + +impl From for IpAddr { + fn from(admin: AdminScopedIpv6) -> Self { + IpAddr::V6(admin.0) + } +} + +impl fmt::Display for AdminScopedIpv6 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } } /// Source filter match key for multicast traffic. @@ -53,19 +100,28 @@ impl fmt::Display for IpSrc { /// A multicast group configuration for POST requests for internal (to the rack) /// groups. #[derive(Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupCreateEntry { - pub group_ip: Ipv6Addr, +pub struct MulticastGroupCreateUnderlayEntry { + pub group_ip: AdminScopedIpv6, pub tag: Option, - pub sources: Option>, pub members: Vec, } +/// A multicast group configuration for POST requests for external (to the rack) +/// groups. +#[derive(Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupCreateExternalEntry { + pub group_ip: IpAddr, + pub tag: Option, + pub internal_forwarding: InternalForwarding, + pub external_forwarding: ExternalForwarding, + pub sources: Option>, +} + /// Represents a multicast replication entry for PUT requests for internal /// (to the rack) groups. #[derive(Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupUpdateEntry { +pub struct MulticastGroupUpdateUnderlayEntry { pub tag: Option, - pub sources: Option>, pub members: Vec, } @@ -74,22 +130,50 @@ pub struct MulticastGroupUpdateEntry { #[derive(Debug, Deserialize, Serialize, JsonSchema)] pub struct MulticastGroupUpdateExternalEntry { pub tag: Option, - pub nat_target: NatTarget, - pub vlan_id: Option, + pub internal_forwarding: InternalForwarding, + pub external_forwarding: ExternalForwarding, pub sources: Option>, } -/// Response structure for multicast group operations. +/// Response structure for underlay/internal multicast group operations. +/// These groups handle admin-scoped IPv6 multicast with full replication. +#[derive(Debug, Deserialize, Serialize, JsonSchema)] +pub struct MulticastGroupUnderlayResponse { + pub group_ip: AdminScopedIpv6, + pub external_group_id: MulticastGroupId, + pub underlay_group_id: MulticastGroupId, + pub tag: Option, + pub members: Vec, +} + +/// Response structure for external multicast group operations. +/// These groups handle IPv4 and non-admin IPv6 multicast via NAT targets. #[derive(Debug, Deserialize, Serialize, JsonSchema)] -pub struct MulticastGroupResponse { +pub struct MulticastGroupExternalResponse { pub group_ip: IpAddr, - pub external_group_id: Option, - pub underlay_group_id: Option, + pub external_group_id: MulticastGroupId, pub tag: Option, - pub int_fwding: InternalForwarding, - pub ext_fwding: ExternalForwarding, + pub internal_forwarding: InternalForwarding, + pub external_forwarding: ExternalForwarding, pub sources: Option>, - pub members: Vec, +} + +/// Unified response type for operations that return mixed group types. +#[derive(Debug, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum MulticastGroupResponse { + Underlay(MulticastGroupUnderlayResponse), + External(MulticastGroupExternalResponse), +} + +impl MulticastGroupResponse { + /// Get the multicast group IP address. + pub fn ip(&self) -> IpAddr { + match self { + Self::Underlay(resp) => resp.group_ip.into(), + Self::External(resp) => resp.group_ip, + } + } } /// Represents the NAT target for multicast traffic for internal/underlay @@ -126,3 +210,11 @@ pub enum Direction { Underlay, External, } + +#[derive(Clone, Debug, thiserror::Error)] +pub enum Error { + #[error( + "Address {0} is not admin-scoped (must be ff04::/16, ff05::/16, or ff08::/16)" + )] + InvalidIp(Ipv6Addr), +} diff --git a/dpd/src/api_server.rs b/dpd/src/api_server.rs index 18e6910..7440357 100644 --- a/dpd/src/api_server.rs +++ b/dpd/src/api_server.rs @@ -17,11 +17,13 @@ use dpd_types::fault::Fault; use dpd_types::link::LinkFsmCounters; use dpd_types::link::LinkId; use dpd_types::link::LinkUpCounter; -use dpd_types::mcast::MulticastGroupCreateEntry; use dpd_types::mcast::MulticastGroupCreateExternalEntry; +use dpd_types::mcast::MulticastGroupCreateUnderlayEntry; +use dpd_types::mcast::MulticastGroupExternalResponse; use dpd_types::mcast::MulticastGroupResponse; -use dpd_types::mcast::MulticastGroupUpdateEntry; +use dpd_types::mcast::MulticastGroupUnderlayResponse; use dpd_types::mcast::MulticastGroupUpdateExternalEntry; +use dpd_types::mcast::MulticastGroupUpdateUnderlayEntry; use dpd_types::oxstats::OximeterMetadata; use dpd_types::port_map::BackplaneLink; use dpd_types::route::Ipv4Route; @@ -1828,7 +1830,8 @@ impl DpdApi for DpdApiImpl { async fn multicast_group_create_external( rqctx: RequestContext>, group: TypedBody, - ) -> Result, HttpError> { + ) -> Result, HttpError> + { let switch: &Switch = rqctx.context(); let entry = group.into_inner(); @@ -1837,10 +1840,11 @@ impl DpdApi for DpdApiImpl { .map_err(HttpError::from) } - async fn multicast_group_create( + async fn multicast_group_create_underlay( rqctx: RequestContext>, - group: TypedBody, - ) -> Result, HttpError> { + group: TypedBody, + ) -> Result, HttpError> + { let switch: &Switch = rqctx.context(); let entry = group.into_inner(); @@ -1884,26 +1888,27 @@ impl DpdApi for DpdApiImpl { .map_err(HttpError::from) } - async fn multicast_group_update( + async fn multicast_group_update_underlay( rqctx: RequestContext>, - path: Path, - group: TypedBody, - ) -> Result, HttpError> { + path: Path, + group: TypedBody, + ) -> Result, HttpError> { let switch: &Switch = rqctx.context(); - let ip = path.into_inner().group_ip; + let admin_scoped = path.into_inner().group_ip; - let ipv6 = match ip { - IpAddr::V6(ipv6) => ipv6, - IpAddr::V4(_) => { - return Err(HttpError::for_bad_request( - None, - "Internal multicast groups must use IPv6 addresses" - .to_string(), - )); - } - }; + mcast::modify_group_internal(switch, admin_scoped, group.into_inner()) + .map(HttpResponseOk) + .map_err(HttpError::from) + } - mcast::modify_group_internal(switch, ipv6, group.into_inner()) + async fn multicast_group_get_underlay( + rqctx: RequestContext>, + path: Path, + ) -> Result, HttpError> { + let switch: &Switch = rqctx.context(); + let admin_scoped = path.into_inner().group_ip; + + mcast::get_group_internal(switch, admin_scoped) .map(HttpResponseOk) .map_err(HttpError::from) } @@ -1912,7 +1917,8 @@ impl DpdApi for DpdApiImpl { rqctx: RequestContext>, path: Path, group: TypedBody, - ) -> Result, HttpError> { + ) -> Result, HttpError> + { let switch: &Switch = rqctx.context(); let entry = group.into_inner(); let ip = path.into_inner().group_ip; @@ -1931,9 +1937,6 @@ impl DpdApi for DpdApiImpl { { let switch: &Switch = rqctx.context(); - // If a group ID is provided, get the group by ID - - // If no group ID is provided, paginate through the groups let pag_params = query_params.into_inner(); let Ok(limit) = usize::try_from(rqctx.page_limit(&pag_params)?.get()) else { @@ -1955,7 +1958,7 @@ impl DpdApi for DpdApiImpl { entries, &EmptyScanParams {}, |e: &MulticastGroupResponse, _| MulticastGroupIpParam { - group_ip: e.group_ip, + group_ip: e.ip(), }, )?)) } @@ -1991,7 +1994,7 @@ impl DpdApi for DpdApiImpl { entries, &EmptyScanParams {}, |e: &MulticastGroupResponse, _| MulticastGroupIpParam { - group_ip: e.group_ip, + group_ip: e.ip(), }, )?)) } diff --git a/dpd/src/mcast/mod.rs b/dpd/src/mcast/mod.rs index a2ffa00..a26a5aa 100644 --- a/dpd/src/mcast/mod.rs +++ b/dpd/src/mcast/mod.rs @@ -8,6 +8,49 @@ //! //! This is the entrypoint for managing multicast groups, including creating, //! modifying, and deleting groups. +//! +//! ## Overview +//! +//! There are two types of multicast groups: +//! - **External (Overlay) groups**: Entry points for overlay traffic, +//! have NAT targets, VLAN IDs, and no direct members. External groups +//! reference internal groups via NAT targets to perform the +//! actual packet replication and forwarding. +//! +//! - **Internal (Underlay) groups**: Handle actual packet replication to +//! members, containing ALL members (either direction - overlay or +//! underlay). +//! +//! ### Member Directions +//! +//! Internal groups contain members with two traffic directions: +//! - **External direction**: Members in overlay networks (customer networks), +//! which receive decapsulated packets with (possible) VLAN tags on multicast +//! egress. +//! +//! - **Underlay direction**: Members in underlay networks +//! (rack infrastructure, instances), which receive encapsulated Geneve +//! packets on multicast egress. +//! +//! ### Bifurcated Multicast Design: +//! +//! The multicast implementation uses a bifurcated design that separates +//! external (customer) and (internal) underlay traffic: +//! +//! 1. External-only groups (IPv4 and non-admin-scoped IPv6): +//! - Created from API control plane IPs for customer traffic +//! - Handle customer traffic to/from outside the rack +//! - Use the external multicast API (/multicast/external-groups) +//! - Must have NAT targets pointing to internal groups for proper forwarding +//! +//! 2. Internal groups (admin-scoped IPv6 multicast): +//! - Admin-scoped = admin-local, site-local, or organization-local scope (RFC 7346, RFC 4291) +//! - Geneve encapsulated multicast traffic (NAT targets of external-only groups) +//! - Use the internal multicast API (/multicast/underlay-groups) +//! - Can replicate to: +//! a) External group members (customer traffic) +//! b) Underlay-only members (infrastructure traffic) +//! c) Both external and underlay members (bifurcated replication) use std::{ collections::{BTreeMap, HashSet}, @@ -16,29 +59,34 @@ use std::{ sync::{Arc, Mutex, Weak}, }; -use crate::{ - table, - types::{DpdError, DpdResult}, - Switch, -}; use aal::{AsicError, AsicOps}; use common::{nat::NatTarget, ports::PortId}; use dpd_types::{ link::LinkId, mcast::{ - Direction, ExternalForwarding, InternalForwarding, IpSrc, - MulticastGroupCreateEntry, MulticastGroupCreateExternalEntry, + AdminScopedIpv6, Direction, ExternalForwarding, InternalForwarding, + IpSrc, MulticastGroupCreateExternalEntry, + MulticastGroupCreateUnderlayEntry, MulticastGroupExternalResponse, MulticastGroupId, MulticastGroupMember, MulticastGroupResponse, - MulticastGroupUpdateEntry, MulticastGroupUpdateExternalEntry, + MulticastGroupUnderlayResponse, MulticastGroupUpdateExternalEntry, + MulticastGroupUpdateUnderlayEntry, }, }; -use oxnet::{Ipv4Net, Ipv6Net}; +use oxnet::Ipv4Net; +use slog::{debug, error, warn}; -use slog::{debug, error}; +use crate::{ + table, + types::{DpdError, DpdResult}, + Switch, +}; +mod rollback; mod validate; + +use rollback::{GroupCreateRollbackContext, GroupUpdateRollbackContext}; use validate::{ - is_ssm, validate_multicast_address, validate_nat_target, + validate_multicast_address, validate_nat_target, validate_not_admin_scoped_ipv6, }; @@ -47,7 +95,7 @@ struct ScopedIdInner(MulticastGroupId, Weak>>); impl Drop for ScopedIdInner { /// Only return to free pool if not taken and if the free pool still - /// exists + /// exists. fn drop(&mut self) { if self.0 != 0 { if let Some(free_ids) = self.1.upgrade() { @@ -92,8 +140,8 @@ struct MulticastReplicationInfo { /// replication information, forwarding settings, and associated members. #[derive(Clone, Debug)] pub(crate) struct MulticastGroup { - external_group_id: Option, - underlay_group_id: Option, + external_scoped_group: ScopedGroupId, + underlay_scoped_group: ScopedGroupId, pub(crate) tag: Option, pub(crate) int_fwding: InternalForwarding, pub(crate) ext_fwding: ExternalForwarding, @@ -103,28 +151,65 @@ pub(crate) struct MulticastGroup { } impl MulticastGroup { - fn external_group_id(&self) -> Option { - self.external_group_id.as_ref().map(ScopedGroupId::id) + fn external_group_id(&self) -> MulticastGroupId { + self.external_scoped_group.id() } - fn underlay_group_id(&self) -> Option { - self.underlay_group_id.as_ref().map(ScopedGroupId::id) + fn underlay_group_id(&self) -> MulticastGroupId { + self.underlay_scoped_group.id() } - fn to_response(&self, group_ip: IpAddr) -> MulticastGroupResponse { - MulticastGroupResponse { + fn external_scoped_group_id(&self) -> &ScopedGroupId { + &self.external_scoped_group + } + + fn underlay_scoped_group_id(&self) -> &ScopedGroupId { + &self.underlay_scoped_group + } + + fn to_external_response( + &self, + group_ip: IpAddr, + ) -> MulticastGroupExternalResponse { + MulticastGroupExternalResponse { group_ip, external_group_id: self.external_group_id(), - underlay_group_id: self.underlay_group_id(), tag: self.tag.clone(), - int_fwding: InternalForwarding { - nat_target: self.int_fwding.nat_target, - }, - ext_fwding: ExternalForwarding { - vlan_id: self.ext_fwding.vlan_id, - }, + internal_forwarding: self.int_fwding.clone(), + external_forwarding: self.ext_fwding.clone(), sources: self.sources.clone(), - members: self.members.to_vec(), + } + } + + fn to_underlay_response( + &self, + group_ip: AdminScopedIpv6, + ) -> MulticastGroupUnderlayResponse { + MulticastGroupUnderlayResponse { + group_ip, + external_group_id: self.external_group_id(), + underlay_group_id: self.underlay_group_id(), + tag: self.tag.clone(), + members: self.members.clone(), + } + } + + fn to_response(&self, group_ip: IpAddr) -> MulticastGroupResponse { + match group_ip { + IpAddr::V4(_) => MulticastGroupResponse::External( + self.to_external_response(group_ip), + ), + IpAddr::V6(ipv6) => { + // Try to create AdminScopedIpv6 - if successful, it's an underlay group + match AdminScopedIpv6::new(ipv6) { + Ok(admin_scoped) => MulticastGroupResponse::Underlay( + self.to_underlay_response(admin_scoped), + ), + Err(_) => MulticastGroupResponse::External( + self.to_external_response(group_ip), + ), + } + } } } } @@ -137,9 +222,9 @@ pub struct MulticastGroupData { /// Stack of available group IDs for O(1) allocation. /// Pre-populated with all IDs from GENERATOR_START to u16::MAX-1. free_group_ids: Arc>>, - /// Mapping from admin-scoped group IP to external groups that use it as NAT - /// target (admin_scoped_ip -> set of external_group_ips) - nat_target_refs: BTreeMap>, + /// 1:1 mapping from admin-scoped group IP to external group that uses it as NAT + /// target (admin_scoped_ip -> external_group_ip) + nat_target_refs: BTreeMap, } impl MulticastGroupData { @@ -180,46 +265,31 @@ impl MulticastGroupData { Ok(ScopedIdInner(id, Arc::downgrade(&self.free_group_ids)).into()) } - /// Add a NAT target reference from external group to admin-scoped group. - fn add_nat_target_ref( + /// Add 1:1 forwarding reference from admin-scoped IP to external group's IP. + fn add_forwarding_refs( &mut self, external_group_ip: IpAddr, - admin_scoped_ip: IpAddr, + admin_scoped_ip: AdminScopedIpv6, ) { self.nat_target_refs - .entry(admin_scoped_ip) - .or_default() - .insert(external_group_ip); + .insert(admin_scoped_ip, external_group_ip); } - /// Remove a NAT target reference. - fn remove_nat_target_ref( - &mut self, - external_group_ip: IpAddr, - admin_scoped_ip: IpAddr, - ) { - if let Some(refs) = self.nat_target_refs.get_mut(&admin_scoped_ip) { - refs.remove(&external_group_ip); - if refs.is_empty() { - self.nat_target_refs.remove(&admin_scoped_ip); - } - } + /// Remove 1:1 forwarding reference. + fn rm_forwarding_refs(&mut self, admin_scoped_ip: AdminScopedIpv6) { + self.nat_target_refs.remove(&admin_scoped_ip); } - /// Get VLAN ID for an internal group from its referencing external groups. - fn get_vlan_for_internal_addr(&self, internal_ip: IpAddr) -> Option { - // Find the first external group that references this internal group - // and return its VLAN ID - if let Some(external_refs) = self.nat_target_refs.get(&internal_ip) { - for external_ip in external_refs { - if let Some(external_group) = self.groups.get(external_ip) { - if let Some(vlan_id) = external_group.ext_fwding.vlan_id { - return Some(vlan_id); - } - } - } - } - None + /// Get the VLAN ID for an internal multicast group by looking up + /// the referencing external group (1:1 mapping). + fn get_vlan_for_internal_addr( + &self, + internal_ip: AdminScopedIpv6, + ) -> Option { + self.nat_target_refs + .get(&internal_ip) + .and_then(|external_ip| self.groups.get(external_ip)) + .and_then(|group| group.ext_fwding.vlan_id) } } @@ -237,155 +307,84 @@ impl Default for MulticastGroupData { pub(crate) fn add_group_external( s: &Switch, group_info: MulticastGroupCreateExternalEntry, -) -> DpdResult { +) -> DpdResult { let group_ip = group_info.group_ip; - // Acquire the lock to the multicast data structure at thestart to ensure + // Acquire the lock to the multicast data structure at the start to ensure // deterministic operation order let mut mcast = s.mcast.lock().unwrap(); + let nat_target = + group_info.internal_forwarding.nat_target.ok_or_else(|| { + DpdError::Invalid( + "external groups must have NAT target".to_string(), + ) + })?; + validate_external_group_creation(&mcast, group_ip, &group_info)?; - validate_nat_target(group_info.nat_target)?; + validate_nat_target(nat_target)?; + + // Validate that NAT target points to an existing group and get its IDs + let internal_group_ip = nat_target.internal_ip.into(); + let internal_group = + mcast.groups.get(&internal_group_ip).ok_or_else(|| { + DpdError::Invalid(format!( + "multicast group for IP address {group_ip} must have a NAT target \ + that is also a tracked multicast group", + )) + })?; - // Validate that NAT target points to an existing group - if !mcast - .groups - .contains_key(&group_info.nat_target.internal_ip.into()) - { - return Err(DpdError::Invalid(format!( - "multicast group for IP address {} must have a NAT target that is also a tracked multicast group", - group_ip - ))); - } + // Set IDs to match the internal group from the NAT target + let scoped_external_id = internal_group.external_scoped_group.clone(); + let scoped_underlay_id = internal_group.underlay_scoped_group.clone(); - let res = configure_external_tables(s, &group_info); + // Create rollback context once for reuse throughout this function + let rollback_ctx = GroupCreateRollbackContext::new_external( + s, + group_ip, + scoped_external_id.id(), + scoped_underlay_id.id(), + nat_target, + group_info.sources.as_deref(), + ); - if let Err(e) = res { - // Use unified rollback with optional NAT for external groups - rollback_on_group_create( - s, - group_ip, - (None, None), // External groups don't create ASIC groups - &[], // No members added externally - &MulticastReplicationInfo::default(), // Dummy replication info - Some(group_info.nat_target), // External groups have NAT targets - group_info.sources.as_deref(), - ) - .ok(); // Ignore rollback errors, log the original error - return Err(e); - } + // Configure external tables and handle VLAN propagation + configure_external_tables(s, &group_info) + .and_then(|_| { + // Perform VLAN propagation if needed + if let Some(vlan_id) = group_info.external_forwarding.vlan_id { + perform_vlan_propagation( + s, + &mcast, + group_ip, + vlan_id, + nat_target.internal_ip.into(), + ) + } else { + Ok(()) + } + }) + .map_err(|e| rollback_ctx.rollback_and_return_error(e))?; + + // Validate the admin-scoped IP early to avoid partial state + let admin_scoped_ip = AdminScopedIpv6::new(nat_target.internal_ip)?; let group = MulticastGroup { - external_group_id: None, - underlay_group_id: None, + external_scoped_group: scoped_external_id, + underlay_scoped_group: scoped_underlay_id, tag: group_info.tag, - int_fwding: InternalForwarding { - nat_target: Some(group_info.nat_target), - }, - ext_fwding: ExternalForwarding { - vlan_id: group_info.vlan_id, - }, - sources: group_info.sources, + int_fwding: group_info.internal_forwarding.clone(), + ext_fwding: group_info.external_forwarding.clone(), + sources: group_info.sources.clone(), replication_info: None, - members: Vec::new(), // External groups have no members + // External groups are entry points only - actual members reside in referenced internal groups + members: Vec::new(), }; mcast.groups.insert(group_ip, group.clone()); + mcast.add_forwarding_refs(group_ip, admin_scoped_ip); - // Track NAT target reference for VLAN propagation - mcast - .add_nat_target_ref(group_ip, group_info.nat_target.internal_ip.into()); - - // Extract data needed for VLAN propagation to internal groups - let vlan_propagation_data = group_info.vlan_id.map(|vlan_id| { - let internal_ip = group_info.nat_target.internal_ip.into(); - debug!( - s.log, - "External group {} with VLAN {} references internal group {}, propagating VLAN to existing internal group", - group_ip, - vlan_id, - internal_ip - ); - - let internal_group = mcast - .groups - .get(&internal_ip) - .ok_or_else(|| { - DpdError::Invalid(format!( - "Internal group {} not found", - internal_ip - )) - }) - .expect("Internal group must exist (validated above)"); - - ( - internal_ip, - vlan_id, - internal_group.external_group_id.clone(), - internal_group.underlay_group_id.clone(), - internal_group.members.clone(), - ) - }); - - // Update internal group's tables with the VLAN if necessary - if let Some(( - internal_ip, - vlan_id, - external_group_id, - underlay_group_id, - members, - )) = vlan_propagation_data - { - // Update external group bitmap if it exists - if let Some(external_id) = external_group_id { - let mut port_bitmap = table::mcast::mcast_egress::PortBitmap::new(); - for member in &members { - if member.direction == Direction::External { - port_bitmap.add_port(member.port_id.as_u8()); - } - } - if let Err(e) = table::mcast::mcast_egress::update_bitmap_entry( - s, - external_id.id(), - &port_bitmap, - Some(vlan_id), - ) { - error!( - s.log, - "Failed to update external bitmap for VLAN {} on internal group {}: {:?}", - vlan_id, - internal_ip, - e - ); - } - } - - // Update underlay group bitmap if it exists - if let Some(underlay_id) = underlay_group_id { - let mut port_bitmap = table::mcast::mcast_egress::PortBitmap::new(); - for member in &members { - if member.direction == Direction::Underlay { - port_bitmap.add_port(member.port_id.as_u8()); - } - } - if let Err(e) = table::mcast::mcast_egress::update_bitmap_entry( - s, - underlay_id.id(), - &port_bitmap, - Some(vlan_id), - ) { - error!( - s.log, - "Failed to update underlay bitmap for VLAN {} on internal group {}: {:?}", - vlan_id, - internal_ip, - e - ); - } - } - } - - Ok(group.to_response(group_ip)) + Ok(group.to_external_response(group_ip)) } /// Add an internal multicast group to the switch, which creates the group on @@ -395,58 +394,80 @@ pub(crate) fn add_group_external( /// If anything fails, the group is cleaned up and an error is returned. pub(crate) fn add_group_internal( s: &Switch, - group_info: MulticastGroupCreateEntry, -) -> DpdResult { - add_group_internal_only(s, group_info) -} - -fn add_group_internal_only( - s: &Switch, - group_info: MulticastGroupCreateEntry, -) -> DpdResult { + group_info: MulticastGroupCreateUnderlayEntry, +) -> DpdResult { let group_ip = group_info.group_ip; // Acquire the lock to the multicast data structure at the start to ensure // deterministic operation order let mut mcast = s.mcast.lock().unwrap(); - validate_internal_group_creation(&mcast, group_ip, &group_info)?; + validate_internal_group_creation(&mcast, group_ip)?; let (scoped_external_id, scoped_underlay_id) = - create_multicast_group_ids(s, &mut mcast, group_ip, &group_info)?; + allocate_multicast_group_ids(s, &mut mcast, group_ip.into())?; + + // Create rollback context for cleanup if operations fail + let rollback_ctx = GroupCreateRollbackContext::new_internal( + s, + group_ip.into(), + scoped_external_id.id(), + scoped_underlay_id.id(), + ); // Get VLAN ID from referencing external groups - let vlan_id = mcast.get_vlan_for_internal_addr(group_ip.into()); - let external_group_id = scoped_external_id.as_ref().map(ScopedGroupId::id); - let underlay_group_id = scoped_underlay_id.as_ref().map(ScopedGroupId::id); + let vlan_id = mcast.get_vlan_for_internal_addr(group_ip); + let external_group_id = scoped_external_id.id(); + let underlay_group_id = scoped_underlay_id.id(); let mut added_members = Vec::new(); - let replication_info = - configure_replication(external_group_id, underlay_group_id); - add_ports_to_groups( - s, - group_ip.into(), - &group_info.members, - external_group_id, - underlay_group_id, - &replication_info, - &mut added_members, - )?; - - configure_internal_tables( - s, - group_ip.into(), - external_group_id, - underlay_group_id, - Some(&replication_info), - &group_info, - &added_members, - vlan_id, - )?; + // Only configure replication if there are members + let replication_info = if !group_info.members.is_empty() { + let replication_info = configure_replication(external_group_id); + + add_ports_to_groups( + s, + group_ip.into(), + &group_info.members, + external_group_id, + underlay_group_id, + &replication_info, + &mut added_members, + ) + .map_err(|e| rollback_ctx.rollback_and_return_error(e))?; + + configure_internal_tables( + s, + group_ip.into(), + external_group_id, + underlay_group_id, + Some(&replication_info), + &added_members, + vlan_id, + ) + .map_err(|e| rollback_ctx.rollback_and_return_error(e))?; + + Some(replication_info) + } else { + // No members - configure minimal tables for empty group + configure_internal_tables( + s, + group_ip.into(), + external_group_id, + underlay_group_id, + None, + &added_members, + vlan_id, + ) + .map_err(|e| rollback_ctx.rollback_and_return_error(e))?; + + None + }; + // Generic internal datastructure (vs API interface) let group = MulticastGroup { - external_group_id: scoped_external_id, - underlay_group_id: scoped_underlay_id, + external_scoped_group: scoped_external_id, + underlay_scoped_group: scoped_underlay_id, tag: group_info.tag, int_fwding: InternalForwarding { nat_target: None, // Internal groups don't have NAT targets @@ -454,14 +475,14 @@ fn add_group_internal_only( ext_fwding: ExternalForwarding { vlan_id: None, // Internal groups don't have VLANs }, - sources: group_info.sources, - replication_info: Some(replication_info), + sources: None, // Internal groups don't have sources + replication_info, members: group_info.members, }; mcast.groups.insert(group_ip.into(), group.clone()); - Ok(group.to_response(group_ip.into())) + Ok(group.to_underlay_response(group_ip)) } /// Delete a multicast group from the switch, including all associated tables @@ -471,8 +492,7 @@ pub(crate) fn del_group(s: &Switch, group_ip: IpAddr) -> DpdResult<()> { let group = mcast.groups.remove(&group_ip).ok_or_else(|| { DpdError::Missing(format!( - "Multicast group for IP {} not found", - group_ip + "Multicast group for IP {group_ip} not found", )) })?; @@ -481,23 +501,45 @@ pub(crate) fn del_group(s: &Switch, group_ip: IpAddr) -> DpdResult<()> { .nat_target .map(|nat| nat.internal_ip.into()); - debug!(s.log, "deleting multicast group for IP {}", group_ip); + debug!(s.log, "deleting multicast group for IP {group_ip}"); + delete_group_tables(s, group_ip, &group)?; - delete_multicast_groups( - s, - group_ip, - group.external_group_id.clone(), - group.underlay_group_id.clone(), - )?; + // Only delete ASIC groups for internal groups (groups without NAT targets). + // External groups share ASIC resources with their referenced internal groups. + if group.int_fwding.nat_target.is_none() { + delete_multicast_groups( + s, + group_ip, + group.external_scoped_group_id().clone(), + group.underlay_scoped_group_id().clone(), + )?; + } - if let Some(internal_ip) = nat_target_to_remove { - mcast.remove_nat_target_ref(group_ip, internal_ip); + if let Some(IpAddr::V6(ipv6)) = nat_target_to_remove { + mcast.rm_forwarding_refs(AdminScopedIpv6::new(ipv6)?); } Ok(()) } +/// Get an internal multicast group configuration by admin-scoped IPv6 address. +pub(crate) fn get_group_internal( + s: &Switch, + admin_scoped: AdminScopedIpv6, +) -> DpdResult { + let mcast = s.mcast.lock().unwrap(); + let group_ip = IpAddr::V6(admin_scoped.into()); + + let group = mcast.groups.get(&group_ip).ok_or_else(|| { + DpdError::Missing(format!( + "internal multicast group for IP {group_ip} not found", + )) + })?; + + Ok(group.to_underlay_response(admin_scoped)) +} + /// Get a multicast group configuration. pub(crate) fn get_group( s: &Switch, @@ -505,16 +547,11 @@ pub(crate) fn get_group( ) -> DpdResult { let mcast = s.mcast.lock().unwrap(); - let group = mcast - .groups - .get(&group_ip) - .ok_or_else(|| { - DpdError::Missing(format!( - "multicast group for IP {} not found", - group_ip - )) - })? - .clone(); + let group = mcast.groups.get(&group_ip).ok_or_else(|| { + DpdError::Missing(format!( + "multicast group for IP {group_ip} not found", + )) + })?; Ok(group.to_response(group_ip)) } @@ -523,198 +560,262 @@ pub(crate) fn modify_group_external( s: &Switch, group_ip: IpAddr, new_group_info: MulticastGroupUpdateExternalEntry, -) -> DpdResult { +) -> DpdResult { let mut mcast = s.mcast.lock().unwrap(); if !mcast.groups.contains_key(&group_ip) { return Err(DpdError::Missing(format!( - "Multicast group for IP {} not found", - group_ip + "Multicast group for IP {group_ip} not found", ))); } + let nat_target = + new_group_info + .internal_forwarding + .nat_target + .ok_or_else(|| { + DpdError::Invalid( + "external groups must have NAT target".to_string(), + ) + })?; + + validate_multicast_address(group_ip, new_group_info.sources.as_deref())?; + validate_nat_target(nat_target)?; + let group_entry = mcast.groups.remove(&group_ip).unwrap(); let old_nat_target = group_entry.int_fwding.nat_target; - let table_result = - update_external_tables(s, group_ip, &group_entry, &new_group_info); - - match table_result { - Ok(_) => { - let mut updated_group = group_entry; // Take ownership - - // Update NAT target references if NAT target changed - if let Some(old_nat) = old_nat_target { - if old_nat.internal_ip != new_group_info.nat_target.internal_ip - { - mcast.remove_nat_target_ref( - group_ip, - old_nat.internal_ip.into(), - ); - mcast.add_nat_target_ref( - group_ip, - new_group_info.nat_target.internal_ip.into(), - ); - } - } + // Create rollback context for external group update + let group_entry_for_rollback = group_entry.clone(); + let rollback_ctx = + GroupUpdateRollbackContext::new(s, group_ip, &group_entry_for_rollback); - // Update the external group fields - updated_group.tag = new_group_info.tag.or(updated_group.tag); - updated_group.int_fwding.nat_target = - Some(new_group_info.nat_target); - updated_group.ext_fwding.vlan_id = - new_group_info.vlan_id.or(updated_group.ext_fwding.vlan_id); - updated_group.sources = - new_group_info.sources.or(updated_group.sources); - - let response = updated_group.to_response(group_ip); - mcast.groups.insert(group_ip, updated_group); - Ok(response) - } - Err(e) => { - mcast.groups.insert(group_ip, group_entry); + // Try to update external tables first + if let Err(e) = + update_external_tables(s, group_ip, &group_entry, &new_group_info) + { + // Restore original group and return error + mcast.groups.insert(group_ip, group_entry); + return Err(rollback_ctx + .rollback_external(e, new_group_info.sources.as_deref())); + } - // Use unified rollback for external modify failures - rollback_on_group_update( - s, + let mut updated_group = group_entry.clone(); + + // Update NAT target references if NAT target changed + if let Some(old_nat) = old_nat_target { + let old_internal_ip = old_nat.internal_ip; + let new_internal_ip = nat_target.internal_ip; + + if old_internal_ip != new_internal_ip { + mcast.rm_forwarding_refs(AdminScopedIpv6::new(old_internal_ip)?); + mcast.add_forwarding_refs( group_ip, - &[], // External groups don't have member changes - &[], // External groups don't have member changes - mcast.groups.get_mut(&group_ip).unwrap(), - new_group_info.sources.as_deref(), // New sources that might need rollback - ) - .ok(); // Ignore rollback errors, return original error + AdminScopedIpv6::new(new_internal_ip)?, + ); + } + } + + // Update the external group fields + updated_group.tag = new_group_info.tag.or(updated_group.tag); + updated_group.int_fwding.nat_target = Some(nat_target); + + let old_vlan_id = updated_group.ext_fwding.vlan_id; + updated_group.ext_fwding.vlan_id = new_group_info + .external_forwarding + .vlan_id + .or(updated_group.ext_fwding.vlan_id); + updated_group.sources = new_group_info.sources.or(updated_group.sources); + + // Update bitmap tables with new VLAN if VLAN changed + // Also, handles possible membership skew between update internal + external calls. + if old_vlan_id != updated_group.ext_fwding.vlan_id { + let internal_ip = nat_target.internal_ip.into(); + + let bitmap_result = match mcast.groups.get(&internal_ip) { + Some(internal_group) + if internal_group.replication_info.is_some() => + { + // Only update bitmap if internal group has replication + let port_bitmap = create_port_bitmap( + &internal_group.members, + Direction::External, + ); + + // During external group update, bitmap entry exists - update it + table::mcast::mcast_egress::update_bitmap_entry( + s, + internal_group.external_group_id(), + &port_bitmap, + updated_group.ext_fwding.vlan_id, + ) + } + Some(_) => Ok(()), // Internal group exists but has no replication + None => { + Err(DpdError::Invalid(format!( + "internal group not found when updating bitmap: internal_ip={internal_ip}, external_group={group_ip}", + ))) + } + }; + + if let Err(e) = bitmap_result { + // Rollback the external table changes and return the error + mcast.groups.insert(group_ip, group_entry); - Err(e) + error!( + s.log, + "failed to update bitmap table for external group {group_ip}: {e:?}" + ); + return Err(rollback_ctx.rollback_and_restore(e)); } } -} -pub(crate) fn modify_group_internal( - s: &Switch, - group_ip: Ipv6Addr, - new_group_info: MulticastGroupUpdateEntry, -) -> DpdResult { - modify_group_internal_only(s, group_ip, new_group_info) + let response = updated_group.to_external_response(group_ip); + mcast.groups.insert(group_ip, updated_group); + Ok(response) } -/// Modify an internal multicast group configuration. -fn modify_group_internal_only( +pub(crate) fn modify_group_internal( s: &Switch, - group_ip: Ipv6Addr, - new_group_info: MulticastGroupUpdateEntry, -) -> DpdResult { + group_ip: AdminScopedIpv6, + new_group_info: MulticastGroupUpdateUnderlayEntry, +) -> DpdResult { let mut mcast = s.mcast.lock().unwrap(); if !mcast.groups.contains_key(&group_ip.into()) { return Err(DpdError::Missing(format!( - "Multicast group for IP {} not found", - group_ip + "Multicast group for IP {group_ip} not found", ))); } let mut group_entry = mcast.groups.remove(&group_ip.into()).unwrap(); - // Validate sources - let (sources, sources_diff) = if let Some(new_srcs) = - new_group_info.sources.clone() - { - if is_ssm(group_ip.into()) && new_srcs.is_empty() { - mcast.groups.insert(group_ip.into(), group_entry.clone()); // Restore on error - return Err(DpdError::Invalid(format!( - "IP {} is a Source-Specific Multicast address and requires at least one source to be defined", - group_ip - ))); + // Create rollback context for internal group update + let group_entry_for_rollback = group_entry.clone(); + let rollback_ctx = GroupUpdateRollbackContext::new( + s, + group_ip.into(), + &group_entry_for_rollback, + ); + + // Internal groups don't update sources - always `None` + debug_assert!( + group_entry.sources.is_none(), + "Internal groups should not have sources" + ); + let sources = None; + + // Configure replication based on member count transitions + let replication_info = match ( + new_group_info.members.is_empty(), + group_entry.replication_info.is_some(), + ) { + (true, true) => { + // Transition from members to empty - cleanup tables + cleanup_empty_group_replication(s, group_ip.into(), &group_entry) + .map_err(|e| rollback_ctx.rollback_and_restore(e))?; + // Immediately clear replication_info to maintain consistency + group_entry.replication_info = None; + None + } + (false, false) => { + // Transition from empty to members - configure replication + Some(configure_replication(group_entry.external_group_id())) + } + (false, true) => { + // Already has members and replication - keep existing + group_entry.replication_info.clone() + } + (true, false) => { + // Already empty and no replication - keep none + None } - (Some(new_srcs), true) - } else { - (group_entry.sources.clone(), false) }; - let replication_info = group_entry.replication_info.clone(); + // Early return for no-replication case - just update metadata + if replication_info.is_none() { + group_entry.tag = new_group_info.tag.or(group_entry.tag.clone()); + group_entry.sources = sources; + group_entry.members = new_group_info.members; - // Pre-allocate group IDs if needed (avoids nested locking later) - let new_members_set = new_group_info - .members - .iter() - .cloned() - .collect::>(); + let response = group_entry.to_underlay_response(group_ip); + mcast.groups.insert(group_ip.into(), group_entry); + return Ok(response); + } - let external_scoped_id = if group_entry.external_group_id.is_none() - && new_members_set - .iter() - .any(|m| m.direction == Direction::External) - { - Some(mcast.generate_group_id()?) - } else { - None - }; + // Continue with replication processing + let repl_info = replication_info.as_ref().unwrap(); + let (added_members, removed_members) = process_membership_changes( + s, + group_ip.into(), + &new_group_info.members, + &mut group_entry, + repl_info, + ) + .inspect_err(|_e| { + // Restore group to mcast data structure + mcast.groups.insert(group_ip.into(), group_entry.clone()); + }) + .map_err(|e| rollback_ctx.rollback_internal(e, &[], &[]))?; - let underlay_scoped_id = if group_entry.underlay_group_id.is_none() - && new_members_set - .iter() - .any(|m| m.direction == Direction::Underlay) - { - Some(mcast.generate_group_id()?) - } else { - None - }; + // Perform table updates + update_group_tables( + s, + group_ip.into(), + &group_entry, + repl_info, + &sources, + &group_entry.sources, + ) + .map_err(|e| { + // Restore group to mcast data structure + mcast.groups.insert(group_ip.into(), group_entry.clone()); + rollback_ctx.rollback_internal(e, &added_members, &removed_members) + })?; - let (added_members, removed_members) = - if let Some(ref repl_info) = replication_info { - process_membership_changes( - s, - group_ip.into(), - &new_group_info.members, - &mut group_entry, - repl_info, - external_scoped_id, - underlay_scoped_id, - )? - } else { - (Vec::new(), Vec::new()) + let filter_by_direction = + |members: &[MulticastGroupMember], direction: Direction| { + members + .iter() + .filter(|m| m.direction == direction) + .cloned() + .collect::>() }; - // Perform table updates - let table_update_result = if let Some(ref repl_info) = replication_info { - update_group_tables( - s, - group_ip.into(), - &group_entry, - repl_info, - &sources, - &group_entry.sources, - ) - } else { - Ok(()) - }; + // Update bitmap tables if overlay members changed + let old_external_members = + filter_by_direction(&group_entry.members, Direction::External); + let new_external_members = + filter_by_direction(&new_group_info.members, Direction::External); - match table_update_result { - Ok(_) => { - group_entry.tag = new_group_info.tag.or(group_entry.tag.clone()); - group_entry.sources = sources; - group_entry.replication_info = replication_info; - group_entry.members = new_group_info.members; + if old_external_members != new_external_members { + // VLAN mapping maintained via add_forwarding_refs/rm_forwarding_refs + let external_group_vlan_id = mcast.get_vlan_for_internal_addr(group_ip); - let response = group_entry.to_response(group_ip.into()); - mcast.groups.insert(group_ip.into(), group_entry); - Ok(response) - } - Err(e) => { + update_internal_group_bitmap_tables( + s, + group_entry.external_group_id(), + &new_group_info.members, + &group_entry.members, + external_group_vlan_id, + ) + .map_err(|e| { + // Restore group to mcast data structure mcast.groups.insert(group_ip.into(), group_entry.clone()); + rollback_ctx.rollback_and_restore(e) + })?; + } - rollback_on_group_update( - s, - group_ip.into(), - &added_members, - &removed_members, - mcast.groups.get_mut(&group_ip.into()).unwrap(), - sources_diff.then_some(sources.as_ref().unwrap()), - )?; + // Update group metadata and return success + group_entry.tag = new_group_info.tag.or(group_entry.tag.clone()); + group_entry.sources = sources; + group_entry.replication_info = replication_info; + group_entry.members = new_group_info.members; - Err(e) - } - } + let response = group_entry.to_underlay_response(group_ip); + mcast.groups.insert(group_ip.into(), group_entry.clone()); + + Ok(response) } /// List all multicast groups over a range. @@ -734,15 +835,13 @@ pub(crate) fn get_range( mcast .groups .range((lower_bound, Bound::Unbounded)) - .filter_map(|(ip, group)| { - if let Some(tag_filter) = tag { - if group.tag.as_deref() != Some(tag_filter) { - return None; - } - } - - Some(group.to_response(*ip)) + .filter(|&(_ip, group)| { + // Filter by tag if specified + tag.is_none_or(|tag_filter| { + group.tag.as_deref() == Some(tag_filter) + }) }) + .map(|(ip, group)| group.to_response(*ip)) .take(limit) .collect() } @@ -755,11 +854,7 @@ pub(crate) fn reset_tag(s: &Switch, tag: &str) -> DpdResult<()> { .groups .iter() .filter_map(|(ip, group)| { - if group.tag.as_deref() == Some(tag) { - Some(*ip) - } else { - None - } + (group.tag.as_deref() == Some(tag)).then_some(*ip) }) .collect::>() }; @@ -768,8 +863,9 @@ pub(crate) fn reset_tag(s: &Switch, tag: &str) -> DpdResult<()> { if let Err(e) = del_group(s, group_ip) { error!( s.log, - "failed to delete multicast group for IP {}: {:?}", group_ip, e + "failed to delete multicast group for IP {group_ip}: {e:?}" ); + return Err(e); } } @@ -799,8 +895,9 @@ pub(crate) fn reset_untagged(s: &Switch) -> DpdResult<()> { if let Err(e) = del_group(s, group_ip) { error!( s.log, - "failed to delete multicast group for IP {}: {:?}", group_ip, e + "failed to delete multicast group for IP {group_ip}: {e:?}" ); + return Err(e); } } @@ -817,10 +914,9 @@ pub(crate) fn reset(s: &Switch) -> DpdResult<()> { if let Err(e) = s.asic_hdl.mc_group_destroy(group_id) { error!( s.log, - "failed to delete multicast group with ID {}: {:?}", - group_id, - e + "failed to delete multicast group with ID {group_id}: {e:?}" ); + return Err(e.into()); } } @@ -841,17 +937,61 @@ pub(crate) fn reset(s: &Switch) -> DpdResult<()> { Ok(()) } +/// Performs VLAN propagation for external groups. +fn perform_vlan_propagation( + s: &Switch, + mcast: &MulticastGroupData, + group_ip: IpAddr, + vlan_id: u16, + internal_ip: IpAddr, +) -> DpdResult<()> { + debug!( + s.log, + "external group with VLAN references internal group, propagating VLAN"; + "external_group" => %group_ip, + "vlan" => vlan_id, + "internal_group" => %internal_ip, + ); + + let internal_group = mcast.groups.get(&internal_ip).ok_or_else(|| { + DpdError::McastGroupFailure(format!( + "internal group not found during VLAN propagation: \ + internal_group={internal_ip}, external_group={group_ip}" + )) + })?; + + let (external_group_id, members) = ( + internal_group.external_scoped_group_id().clone(), + internal_group.members.clone(), + ); + + // Update bitmap entry with VLAN if internal group has members + if !members.is_empty() { + let port_bitmap = create_port_bitmap(&members, Direction::External); + table::mcast::mcast_egress::update_bitmap_entry( + s, + external_group_id.id(), + &port_bitmap, + Some(vlan_id), + ).map_err(|e| { + DpdError::McastGroupFailure(format!( + "failed to update external bitmap: vlan={vlan_id}, internal_group={internal_ip}, error={e:?}", + )) + })?; + } + Ok(()) +} + +/// Remove source filters for a multicast group. fn remove_source_filters( s: &Switch, group_ip: IpAddr, sources: Option<&[IpSrc]>, ) -> DpdResult<()> { match group_ip { - IpAddr::V4(ipv4) => remove_ipv4_source_filters(s, ipv4, sources)?, - IpAddr::V6(ipv6) => remove_ipv6_source_filters(s, ipv6, sources)?, + IpAddr::V4(ipv4) => remove_ipv4_source_filters(s, ipv4, sources), + IpAddr::V6(ipv6) => remove_ipv6_source_filters(s, ipv6, sources), } - - Ok(()) } fn remove_ipv4_source_filters( @@ -898,19 +1038,18 @@ fn remove_ipv6_source_filters( Ok(()) } +/// Add source filters for a multicast group. fn add_source_filters( s: &Switch, group_ip: IpAddr, sources: Option<&[IpSrc]>, ) -> DpdResult<()> { - if let Some(srcs) = sources { - match group_ip { - IpAddr::V4(ipv4) => add_ipv4_source_filters(s, srcs, ipv4)?, - IpAddr::V6(ipv6) => add_ipv6_source_filters(s, srcs, ipv6)?, - } - } + let Some(srcs) = sources else { return Ok(()) }; - Ok(()) + match group_ip { + IpAddr::V4(ipv4) => add_ipv4_source_filters(s, srcs, ipv4), + IpAddr::V6(ipv6) => add_ipv6_source_filters(s, srcs, ipv6), + } } fn add_ipv4_source_filters( @@ -955,19 +1094,9 @@ fn add_ipv6_source_filters( fn validate_internal_group_creation( mcast: &MulticastGroupData, - group_ip: Ipv6Addr, - group_info: &MulticastGroupCreateEntry, + group_ip: AdminScopedIpv6, ) -> DpdResult<()> { validate_group_exists(mcast, group_ip.into())?; - validate_multicast_address(group_ip.into(), group_info.sources.as_deref())?; - - if !Ipv6Net::new_unchecked(group_ip, 128).is_admin_scoped_multicast() { - return Err(DpdError::Invalid(format!( - "Non-admin-scoped IPv6 multicast groups must use the external API (/multicast/groups/external). Address {} is not admin-scoped (ff04::/16, ff05::/16, ff08::/16)", - group_ip - ))); - } - Ok(()) } @@ -987,102 +1116,72 @@ fn validate_group_exists( group_ip: IpAddr, ) -> DpdResult<()> { if mcast.groups.contains_key(&group_ip) { - return Err(DpdError::Invalid(format!( - "multicast group for IP {} already exists", - group_ip + return Err(DpdError::Exists(format!( + "multicast group for IP {group_ip} already exists", ))); } Ok(()) } +/// Configures external tables for an external multicast group. fn configure_external_tables( s: &Switch, group_info: &MulticastGroupCreateExternalEntry, ) -> DpdResult<()> { let group_ip = group_info.group_ip; - let nat_target = group_info.nat_target; + let nat_target = + group_info.internal_forwarding.nat_target.ok_or_else(|| { + DpdError::Invalid( + "external groups must have NAT target".to_string(), + ) + })?; // Add source filter entries if needed - let mut res = if let Some(srcs) = &group_info.sources { - match group_ip { - IpAddr::V4(ipv4) => add_ipv4_source_filters(s, srcs, ipv4), - IpAddr::V6(ipv6) => add_ipv6_source_filters(s, srcs, ipv6), - } - } else { - Ok(()) - }; + add_source_filters(s, group_ip, group_info.sources.as_deref())?; // Add NAT entry - if res.is_ok() { - res = match group_ip { - IpAddr::V4(ipv4) => { - table::mcast::mcast_nat::add_ipv4_entry(s, ipv4, nat_target) - } - IpAddr::V6(ipv6) => { - table::mcast::mcast_nat::add_ipv6_entry(s, ipv6, nat_target) - } - }; + match group_ip { + IpAddr::V4(ipv4) => { + table::mcast::mcast_nat::add_ipv4_entry(s, ipv4, nat_target)?; + } + IpAddr::V6(ipv6) => { + table::mcast::mcast_nat::add_ipv6_entry(s, ipv6, nat_target)?; + } } // Add routing entry - if res.is_ok() { - res = match group_ip { - IpAddr::V4(ipv4) => table::mcast::mcast_route::add_ipv4_entry( - s, - ipv4, - group_info.vlan_id, - ), - IpAddr::V6(ipv6) => table::mcast::mcast_route::add_ipv6_entry( - s, - ipv6, - group_info.vlan_id, - ), - }; + match group_ip { + IpAddr::V4(ipv4) => table::mcast::mcast_route::add_ipv4_entry( + s, + ipv4, + group_info.external_forwarding.vlan_id, + ), + IpAddr::V6(ipv6) => table::mcast::mcast_route::add_ipv6_entry( + s, + ipv6, + group_info.external_forwarding.vlan_id, + ), } - - res } -fn create_multicast_group_ids( +/// Creates multicast group IDs for external and underlay groups. +/// +/// Groups can be created without members initially, and members are added later +/// when instances are added. +fn allocate_multicast_group_ids( s: &Switch, mcast: &mut MulticastGroupData, - group_ip: Ipv6Addr, - group_info: &MulticastGroupCreateEntry, -) -> DpdResult<(Option, Option)> { - let has_external_member = group_info - .members - .iter() - .any(|m| m.direction == Direction::External); - let has_underlay_member = group_info - .members - .iter() - .any(|m| m.direction == Direction::Underlay); - - if !has_external_member && !has_underlay_member { - return Err(DpdError::Invalid(format!( - "multicast group for admin-scoped IP {} must have at least one external/underlay member", - group_ip - ))); - } - - debug!(s.log, "creating multicast group IDs for IP {}", group_ip); + group_ip: IpAddr, +) -> DpdResult<(ScopedGroupId, ScopedGroupId)> { + debug!(s.log, "creating multicast group IDs for IP {group_ip}"); - // Pre-allocate group IDs to avoid nested locking - let external_group_id = has_external_member - .then(|| mcast.generate_group_id()) - .transpose()?; - let underlay_group_id = has_underlay_member - .then(|| mcast.generate_group_id()) - .transpose()?; + // Always allocate both group IDs to avoid allocation delays during member addition + let external_group_id = mcast.generate_group_id()?; + let underlay_group_id = mcast.generate_group_id()?; // Create ASIC groups without holding the lock - if let Some(scoped_id) = &external_group_id { - create_asic_group(s, scoped_id.id(), group_ip.into())?; - } - - if let Some(scoped_id) = &underlay_group_id { - create_asic_group(s, scoped_id.id(), group_ip.into())?; - } + create_asic_group(s, external_group_id.id(), group_ip)?; + create_asic_group(s, underlay_group_id.id(), group_ip)?; Ok((external_group_id, underlay_group_id)) } @@ -1090,27 +1189,29 @@ fn create_multicast_group_ids( fn delete_multicast_groups( s: &Switch, group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, + external_group_id: ScopedGroupId, + underlay_group_id: ScopedGroupId, ) -> DpdResult<()> { - if let Some(external_scoped) = external_group_id.as_ref() { - let external_id = external_scoped.id(); - s.asic_hdl.mc_group_destroy(external_id).map_err(|e| { - DpdError::McastGroupFailure(format!( - "failed to delete external multicast group for IP {} with ID {}: {:?}", - group_ip, external_id, e - )) - })?; + let external_id = external_group_id.id(); + if let Err(e) = s.asic_hdl.mc_group_destroy(external_id) { + warn!( + s.log, + "failed to delete external multicast group"; + "IP" => %group_ip, + "ID" => external_id, + "error" => ?e, + ); } - if let Some(underlay_scoped) = underlay_group_id.as_ref() { - let underlay_id = underlay_scoped.id(); - s.asic_hdl.mc_group_destroy(underlay_id).map_err(|e| { - DpdError::McastGroupFailure(format!( - "failed to delete underlay multicast group for IP {} with ID {}: {:?}", - group_ip, underlay_id, e - )) - })?; + let underlay_id = underlay_group_id.id(); + if let Err(e) = s.asic_hdl.mc_group_destroy(underlay_id) { + warn!( + s.log, + "failed to delete underlay multicast group"; + "IP" => %group_ip, + "ID" => underlay_id, + "error" => ?e, + ); } Ok(()) @@ -1125,8 +1226,8 @@ fn create_asic_group( .mc_group_create(group_id) .map_err(|e: AsicError| { DpdError::McastGroupFailure(format!( - "failed to create multicast group for IP {} with ID {}: {:?}", - group_ip, group_id, e + "failed to create multicast group for IP {group_ip} with ID \ + {group_id}: {e:?}" )) }) } @@ -1135,8 +1236,8 @@ fn add_ports_to_groups( s: &Switch, group_ip: IpAddr, members: &[MulticastGroupMember], - external_group_id: Option, - underlay_group_id: Option, + external_group_id: MulticastGroupId, + underlay_group_id: MulticastGroupId, replication_info: &MulticastReplicationInfo, added_members: &mut Vec<(PortId, LinkId, Direction)>, ) -> DpdResult<()> { @@ -1146,24 +1247,7 @@ fn add_ports_to_groups( Direction::Underlay => underlay_group_id, }; - let Some(group_id) = group_id else { - continue; - }; - - let asic_id = s - .port_link_to_asic_id(member.port_id, member.link_id) - .inspect_err(|_e| { - rollback_on_group_create( - s, - group_ip, - (external_group_id, underlay_group_id), - added_members, - replication_info, - None, - None, - ) - .ok(); - })?; + let asic_id = s.port_link_to_asic_id(member.port_id, member.link_id)?; s.asic_hdl .mc_port_add( @@ -1173,20 +1257,9 @@ fn add_ports_to_groups( replication_info.level1_excl_id, ) .map_err(|e| { - rollback_on_group_create( - s, - group_ip, - (external_group_id, underlay_group_id), - added_members, - replication_info, - None, - None, - ) - .ok(); - DpdError::McastGroupFailure(format!( - "failed to add port {} to group for IP {}: {:?}", - member.port_id, group_ip, e + "failed to add port {port_id} to group for IP {group_ip}: {e:?}", + port_id = member.port_id )) })?; @@ -1202,8 +1275,6 @@ fn process_membership_changes( new_members: &[MulticastGroupMember], group_entry: &mut MulticastGroup, replication_info: &MulticastReplicationInfo, - external_scoped_id: Option, - underlay_scoped_id: Option, ) -> DpdResult<(Vec, Vec)> { // First validate that IPv4 doesn't have underlay members if group_ip.is_ipv4() @@ -1212,8 +1283,7 @@ fn process_membership_changes( .any(|m| m.direction == Direction::Underlay) { return Err(DpdError::Invalid(format!( - "multicast group for IPv4 {} cannot have underlay members", - group_ip + "multicast group for IPv4 {group_ip} cannot have underlay members" ))); } @@ -1224,43 +1294,26 @@ fn process_membership_changes( let mut added_members = Vec::new(); let mut removed_members = Vec::new(); - // Step 1: Ensure required groups exist (this can fail cleanly) - ensure_external_group_exists( - s, - group_ip, - &new_members_set, - group_entry, - external_scoped_id, - )?; - - if group_ip.is_ipv6() { - ensure_underlay_group_exists( - s, - group_ip, - &new_members_set, - group_entry, - underlay_scoped_id, - )?; - } - - // Step 2: Remove members from ASIC (only after group creation succeeds) + // Remove members from ASIC for member in prev_members.difference(&new_members_set) { let group_id = match member.direction { Direction::External => group_entry.external_group_id(), Direction::Underlay => group_entry.underlay_group_id(), }; - let Some(group_id) = group_id else { - continue; - }; - let asic_id = s.port_link_to_asic_id(member.port_id, member.link_id)?; - s.asic_hdl.mc_port_remove(group_id, asic_id)?; + + s.asic_hdl.mc_port_remove(group_id, asic_id).map_err(|e| { + DpdError::McastGroupFailure(format!( + "failed to remove port {port_id} from group for IP {group_ip}: {e:?}", + port_id = member.port_id + )) + })?; removed_members.push(member.clone()); } - // Step 3: Add new members to ASIC + // Add new members to ASIC for member in new_members_set.difference(&prev_members) { if group_ip.is_ipv4() && member.direction == Direction::Underlay { continue; @@ -1271,79 +1324,35 @@ fn process_membership_changes( Direction::Underlay => group_entry.underlay_group_id(), }; - let Some(group_id) = group_id else { - continue; - }; - let asic_id = s.port_link_to_asic_id(member.port_id, member.link_id)?; - s.asic_hdl.mc_port_add( - group_id, - asic_id, - replication_info.rid, - replication_info.level1_excl_id, - )?; - added_members.push(member.clone()); - } - - Ok((added_members, removed_members)) -} - -fn ensure_external_group_exists( - s: &Switch, - group_ip: IpAddr, - members: &HashSet, - group_entry: &mut MulticastGroup, - pre_allocated_id: Option, -) -> DpdResult<()> { - if group_entry.external_group_id.is_none() - && members.iter().any(|m| m.direction == Direction::External) - { - let scoped_group_id = pre_allocated_id.ok_or_else(|| { - DpdError::Other( - "external group ID should have been pre-allocated".to_string(), - ) - })?; - - create_asic_group(s, scoped_group_id.id(), group_ip)?; - group_entry.external_group_id = Some(scoped_group_id); - } - - Ok(()) -} -fn ensure_underlay_group_exists( - s: &Switch, - group_ip: IpAddr, - members: &HashSet, - group_entry: &mut MulticastGroup, - pre_allocated_id: Option, -) -> DpdResult<()> { - if group_entry.underlay_group_id.is_none() - && members.iter().any(|m| m.direction == Direction::Underlay) - { - let scoped_group_id = pre_allocated_id.ok_or_else(|| { - DpdError::Other( - "underlay group ID should have been pre-allocated".to_string(), + s.asic_hdl + .mc_port_add( + group_id, + asic_id, + replication_info.rid, + replication_info.level1_excl_id, ) - })?; + .map_err(|e| { + DpdError::McastGroupFailure(format!( + "failed to add port {port_id} to group for IP {group_ip}: {e:?}", + port_id = member.port_id + )) + })?; - create_asic_group(s, scoped_group_id.id(), group_ip)?; - group_entry.underlay_group_id = Some(scoped_group_id); + added_members.push(member.clone()); } - Ok(()) + Ok((added_members, removed_members)) } +/// Default level exclusion IDs to 0 for internal groups +/// since they can only be configured internally without API calls. fn configure_replication( - external_group_id: Option, - underlay_group_id: Option, + external_group_id: MulticastGroupId, ) -> MulticastReplicationInfo { - let rid = external_group_id.or(underlay_group_id).unwrap(); - - // We default level exclusion IDs to 0 for internal groups - // since they can only be configured internally without API calls. MulticastReplicationInfo { - rid, + rid: external_group_id, level1_excl_id: 0, level2_excl_id: 0, } @@ -1353,23 +1362,20 @@ fn configure_replication( fn configure_internal_tables( s: &Switch, group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, + external_group_id: MulticastGroupId, + underlay_group_id: MulticastGroupId, replication_info: Option<&MulticastReplicationInfo>, - group_info: &MulticastGroupCreateEntry, added_members: &[(PortId, LinkId, Direction)], vlan_id: Option, // VLAN ID from referencing external group ) -> DpdResult<()> { - let res = match (group_ip, replication_info) { + match (group_ip, replication_info) { // Note: There are no internal IPv4 groups, only external IPv4 groups - (IpAddr::V4(_), _) => { - return Err(DpdError::Invalid( - "IPv4 groups cannot be created as internal groups".to_string(), - )); - } + (IpAddr::V4(_), _) => Err(DpdError::Invalid( + "IPv4 groups cannot be created as internal groups".to_string(), + )), (IpAddr::V6(ipv6), Some(replication_info)) => { - let mut res = table::mcast::mcast_replication::add_ipv6_entry( + table::mcast::mcast_replication::add_ipv6_entry( s, ipv6, underlay_group_id, @@ -1377,69 +1383,34 @@ fn configure_internal_tables( replication_info.rid, replication_info.level1_excl_id, replication_info.level2_excl_id, - ); - - if res.is_ok() { - if let Some(srcs) = &group_info.sources { - res = add_ipv6_source_filters(s, srcs, ipv6); - } - } - - if res.is_ok() { - res = table::mcast::mcast_route::add_ipv6_entry( + ) + .and_then(|_| { + table::mcast::mcast_route::add_ipv6_entry( s, ipv6, vlan_id, // VLAN from referencing external group - ); - } - - if res.is_ok() - && external_group_id.is_some() - && underlay_group_id.is_some() - { - let mut port_bitmap = - table::mcast::mcast_egress::PortBitmap::new(); - for (port_id, _link_id, direction) in added_members { - if *direction == Direction::External { - let port_number = port_id.as_u8(); - port_bitmap.add_port(port_number); - } - } - - res = table::mcast::mcast_egress::add_bitmap_entry( + ) + }) + .and_then(|_| { + // Add bitmap entry for overlay members only + // (decapsulation decision only needed for overlay traffic) + let external_port_bitmap = + create_external_port_bitmap(added_members); + table::mcast::mcast_egress::add_bitmap_entry( s, - external_group_id.unwrap(), - &port_bitmap, + external_group_id, + &external_port_bitmap, vlan_id, // VLAN from referencing external group - ); - } - - res - } - - (IpAddr::V6(_), None) => { - return Err(DpdError::Invalid( - "Internal, admin-scoped IPv6 groups must have replication info" - .to_string(), - )); + ) + }) } - }; - if let Err(e) = res { - if let Some(replication_info) = replication_info { - rollback_on_group_create( - s, - group_ip, - (external_group_id, underlay_group_id), - added_members, - replication_info, - None, // Internal groups don't have NAT targets - group_info.sources.as_deref(), - )?; + (IpAddr::V6(ipv6), None) => { + // For empty groups, just add basic route entry - no replication needed yet + table::mcast::mcast_route::add_ipv6_entry( + s, ipv6, vlan_id, // VLAN from referencing external group + ) } - return Err(e); } - - Ok(()) } fn update_group_tables( @@ -1465,6 +1436,22 @@ fn update_group_tables( replication_info, )?; } + } else { + // First time setting up replication for this group - use add instead of update + match group_ip { + IpAddr::V4(_) => {} // IPv4 groups don't have replication entries + IpAddr::V6(ipv6) => { + table::mcast::mcast_replication::add_ipv6_entry( + s, + ipv6, + group_entry.underlay_group_id(), + group_entry.external_group_id(), + replication_info.rid, + replication_info.level1_excl_id, + replication_info.level2_excl_id, + )?; + } + } } if new_sources != old_sources { @@ -1488,27 +1475,45 @@ fn update_external_tables( } // Update NAT target - external groups always have NAT targets - if Some(new_group_info.nat_target) != group_entry.int_fwding.nat_target { + if Some( + new_group_info + .internal_forwarding + .nat_target + .ok_or_else(|| { + DpdError::Invalid( + "external groups must have NAT target".to_string(), + ) + })?, + ) != group_entry.int_fwding.nat_target + { update_nat_tables( s, group_ip, - Some(new_group_info.nat_target), + Some(new_group_info.internal_forwarding.nat_target.ok_or_else( + || { + DpdError::Invalid( + "external groups must have NAT target".to_string(), + ) + }, + )?), group_entry.int_fwding.nat_target, )?; } // Update VLAN if it changed - if new_group_info.vlan_id != group_entry.ext_fwding.vlan_id { + if new_group_info.external_forwarding.vlan_id + != group_entry.ext_fwding.vlan_id + { match group_ip { IpAddr::V4(ipv4) => table::mcast::mcast_route::update_ipv4_entry( s, ipv4, - new_group_info.vlan_id, + new_group_info.external_forwarding.vlan_id, ), IpAddr::V6(ipv6) => table::mcast::mcast_route::update_ipv6_entry( s, ipv6, - new_group_info.vlan_id, + new_group_info.external_forwarding.vlan_id, ), }?; } @@ -1516,38 +1521,105 @@ fn update_external_tables( Ok(()) } -fn delete_group_tables( +/// Delete bitmap entries for a group with replication checks. +fn delete_group_bitmap_entries( s: &Switch, - group_ip: IpAddr, group: &MulticastGroup, ) -> DpdResult<()> { - match group_ip { - IpAddr::V4(ipv4) => { - remove_ipv4_source_filters(s, ipv4, group.sources.as_deref())?; - - if group.int_fwding.nat_target.is_some() { - table::mcast::mcast_nat::del_ipv4_entry(s, ipv4)?; - } - - table::mcast::mcast_route::del_ipv4_entry(s, ipv4)?; - } - IpAddr::V6(ipv6) => { - if group.external_group_id().is_some() - && group.underlay_group_id().is_some() - { - table::mcast::mcast_egress::del_bitmap_entry( - s, - group.external_group_id().unwrap(), - )?; - } + // Only delete bitmap entries if the group had replication info + // (which indicates bitmap entries were created) + if group.replication_info.is_none() { + return Ok(()); // No bitmap entries were ever created + } + // Delete external bitmap entry only (underlay doesn't use decap bitmap) + table::mcast::mcast_egress::del_bitmap_entry(s, group.external_group_id()) +} - table::mcast::mcast_replication::del_ipv6_entry(s, ipv6)?; +/// Cleanup replication tables when transitioning group to empty membership. +/// +/// Handles the complete cleanup process including bitmap and +/// replication entries. +fn cleanup_empty_group_replication( + s: &Switch, + group_ip: IpAddr, + group_entry: &MulticastGroup, +) -> DpdResult<()> { + debug!(s.log, "cleaning up replication for empty group {group_ip}"); - remove_ipv6_source_filters(s, ipv6, group.sources.as_deref())?; + // Only proceed if group actually has replication info to clean up + if group_entry.replication_info.is_none() { + return Ok(()); + } - if group.int_fwding.nat_target.is_some() { - table::mcast::mcast_nat::del_ipv6_entry(s, ipv6)?; - } + // Attempt cleanup operations in sequence + delete_group_bitmap_entries(s, group_entry) + .and_then(|_| delete_replication_entries(s, group_ip, group_entry)) +} + +/// Create a port bitmap for external (overlay) members only. +fn create_external_port_bitmap( + added_members: &[(PortId, LinkId, Direction)], +) -> table::mcast::mcast_egress::PortBitmap { + let external_members: Vec = added_members + .iter() + .filter(|(_, _, direction)| *direction == Direction::External) + .map(|(port_id, link_id, direction)| MulticastGroupMember { + port_id: *port_id, + link_id: *link_id, + direction: *direction, + }) + .collect(); + create_port_bitmap(&external_members, Direction::External) +} + +/// Delete replication table entries. +fn delete_replication_entries( + s: &Switch, + group_ip: IpAddr, + group: &MulticastGroup, +) -> DpdResult<()> { + // Only delete replication entries if the group had replication info + // (which indicates replication entries were created) + if group.replication_info.is_none() { + return Ok(()); // No replication entries were ever created + } + + // Delete replication entry (only IPv6 has replication table) + match group_ip { + IpAddr::V4(_) => Ok(()), // IPv4 doesn't use replication table + IpAddr::V6(ipv6) => { + table::mcast::mcast_replication::del_ipv6_entry(s, ipv6) + } + } +} + +fn delete_group_tables( + s: &Switch, + group_ip: IpAddr, + group: &MulticastGroup, +) -> DpdResult<()> { + match group_ip { + IpAddr::V4(ipv4) => { + remove_ipv4_source_filters(s, ipv4, group.sources.as_deref())?; + + if group.int_fwding.nat_target.is_some() { + table::mcast::mcast_nat::del_ipv4_entry(s, ipv4)?; + } + + delete_group_bitmap_entries(s, group)?; + + table::mcast::mcast_route::del_ipv4_entry(s, ipv4)?; + } + IpAddr::V6(ipv6) => { + delete_replication_entries(s, group_ip, group)?; + + remove_ipv6_source_filters(s, ipv6, group.sources.as_deref())?; + + if group.int_fwding.nat_target.is_some() { + table::mcast::mcast_nat::del_ipv6_entry(s, ipv6)?; + } + + delete_group_bitmap_entries(s, group)?; table::mcast::mcast_route::del_ipv6_entry(s, ipv6)?; } @@ -1559,8 +1631,8 @@ fn delete_group_tables( fn update_replication_tables( s: &Switch, group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, + external_group_id: MulticastGroupId, + underlay_group_id: MulticastGroupId, replication_info: &MulticastReplicationInfo, ) -> DpdResult<()> { match group_ip { @@ -1584,555 +1656,122 @@ fn update_nat_tables( old_nat_target: Option, ) -> DpdResult<()> { match (group_ip, new_nat_target, old_nat_target) { - (IpAddr::V4(ipv4), Some(nat), _) => { + (IpAddr::V4(ipv4), Some(nat), Some(_)) => { + // NAT to NAT - update existing entry table::mcast::mcast_nat::update_ipv4_entry(s, ipv4, nat) } - (IpAddr::V6(ipv6), Some(nat), _) => { + (IpAddr::V6(ipv6), Some(nat), Some(_)) => { + // NAT to NAT - update existing entry table::mcast::mcast_nat::update_ipv6_entry(s, ipv6, nat) } + (IpAddr::V4(ipv4), Some(nat), None) => { + // No NAT to NAT - add new entry + table::mcast::mcast_nat::add_ipv4_entry(s, ipv4, nat) + } + (IpAddr::V6(ipv6), Some(nat), None) => { + // No NAT to NAT - add new entry + table::mcast::mcast_nat::add_ipv6_entry(s, ipv6, nat) + } (IpAddr::V4(ipv4), None, Some(_)) => { + // NAT to no NAT - delete entry table::mcast::mcast_nat::del_ipv4_entry(s, ipv4) } (IpAddr::V6(ipv6), None, Some(_)) => { + // NAT to no NAT - delete entry table::mcast::mcast_nat::del_ipv6_entry(s, ipv6) } - _ => Ok(()), - } -} - -fn update_fwding_tables( - s: &Switch, - group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, - members: &[MulticastGroupMember], - vlan_id: Option, -) -> DpdResult<()> { - match group_ip { - IpAddr::V4(ipv4) => { - table::mcast::mcast_route::update_ipv4_entry(s, ipv4, vlan_id) - } - IpAddr::V6(ipv6) => { - let mut res = - table::mcast::mcast_route::update_ipv6_entry(s, ipv6, vlan_id); - - if res.is_ok() - && external_group_id.is_some() - && underlay_group_id.is_some() - { - let mut port_bitmap = - table::mcast::mcast_egress::PortBitmap::new(); - - for member in members { - if member.direction == Direction::External { - port_bitmap.add_port(member.port_id.as_u8()); - } - } - - res = table::mcast::mcast_egress::update_bitmap_entry( - s, - external_group_id.unwrap(), - &port_bitmap, - vlan_id, - ); - } - - res - } - } -} - -/// Rollback function for a multicast group creation failure. -/// -/// Cleans up all resources created during a failed multicast group creation. -/// -/// This function is reused for both external and internal group failures. -fn rollback_on_group_create( - s: &Switch, - group_ip: IpAddr, - group_ids: (Option, Option), - added_members: &[(PortId, LinkId, Direction)], - replication_info: &MulticastReplicationInfo, - nat_target: Option, - sources: Option<&[IpSrc]>, -) -> DpdResult<()> { - debug!( - s.log, - "rolling back multicast group creation for IP {}", group_ip - ); - - let (external_group_id, underlay_group_id) = group_ids; - - let mut contains_errors = false; - - let added_members_converted: Vec = added_members - .iter() - .map(|(port_id, link_id, direction)| MulticastGroupMember { - port_id: *port_id, - link_id: *link_id, - direction: *direction, - }) - .collect(); - - if let Err(e) = rollback_ports( - s, - &added_members_converted, - &[], - replication_info, - external_group_id, - underlay_group_id, - ) { - error!(s.log, "error removing ports during rollback: {:?}", e); - contains_errors = true; - } - - if let Err(e) = rollback_remove_groups( - s, - group_ip, - external_group_id, - underlay_group_id, - ) { - error!(s.log, "error deleting groups during rollback: {:?}", e); - contains_errors = true; - } - - if let Err(e) = rollback_remove_tables( - s, - group_ip, - external_group_id, - underlay_group_id, - nat_target, - sources, - ) { - error!( - s.log, - "Error deleting table entries during rollback: {:?}", e - ); - contains_errors = true; - } - - if contains_errors { - error!(s.log, "rollback completed with errors for IP {}", group_ip); - } else { - debug!( - s.log, - "successfully rolled back multicast group creation for IP {}", - group_ip - ); + _ => Ok(()), // No change (None → None) } - - Ok(()) } -/// Rollback function for a multicast group modification if it fails on updates. -/// -/// Restores the group to its original state. -/// -/// This function is reused for both external and internal group modifications. -fn rollback_on_group_update( +/// Update internal group's bitmap entries when members change. +fn update_internal_group_bitmap_tables( s: &Switch, - group_ip: IpAddr, - added_ports: &[MulticastGroupMember], - removed_ports: &[MulticastGroupMember], - orig_group_info: &MulticastGroup, - new_sources: Option<&[IpSrc]>, + external_group_id: MulticastGroupId, + new_members: &[MulticastGroupMember], + old_members: &[MulticastGroupMember], + external_group_vlan: Option, ) -> DpdResult<()> { - debug!( - s.log, - "rolling back multicast group update for IP {}", group_ip - ); + let prev_had_members = !old_members.is_empty(); + let now_has_members = !new_members.is_empty(); - let mut contains_errors = false; + // Create bitmap for overlay members only (decapsulation decision applies only to overlay traffic) + let external_port_bitmap = + create_port_bitmap(new_members, Direction::External); - if let Some(replication_info) = &orig_group_info.replication_info { - if let Err(e) = rollback_ports( + if !prev_had_members && now_has_members { + // First time adding members - use add_bitmap_entry with external group's VLAN + table::mcast::mcast_egress::add_bitmap_entry( s, - added_ports, - removed_ports, - replication_info, - orig_group_info.external_group_id(), - orig_group_info.underlay_group_id(), - ) { - error!( - s.log, - "error handling ports during update rollback: {:?}", e - ); - contains_errors = true; - } - } - - if new_sources.is_some() { - if let Err(e) = rollback_source_filters( + external_group_id, + &external_port_bitmap, + external_group_vlan, // Use external group's VLAN for bitmap entries + )?; + } else if prev_had_members && now_has_members { + // Members changed but still have members - use external group's VLAN + table::mcast::mcast_egress::update_bitmap_entry( s, - group_ip, - new_sources, - orig_group_info.sources.as_deref(), - ) { - error!( - s.log, - "error restoring source filters during update rollback: {:?}", - e - ); - contains_errors = true; - } - } - - if let Err(e) = rollback_restore_tables(s, group_ip, orig_group_info) { - error!( - s.log, - "error restoring table entries during update rollback: {:?}", e - ); - contains_errors = true; - } - - if contains_errors { - error!( - s.log, - "update rollback completed with errors for IP {}", group_ip - ); - } else { - debug!( - s.log, - "successfully rolled back multicast group update for IP {}", - group_ip - ); - } - - Ok(()) -} - -fn rollback_ports( - s: &Switch, - added_ports: &[MulticastGroupMember], - removed_ports: &[MulticastGroupMember], - replication_info: &MulticastReplicationInfo, - external_group_id: Option, - underlay_group_id: Option, -) -> DpdResult<()> { - for member in added_ports { - let group_id = match member.direction { - Direction::External => external_group_id, - Direction::Underlay => underlay_group_id, - }; - - if group_id.is_none() { - continue; - } - - match s.port_link_to_asic_id(member.port_id, member.link_id) { - Ok(asic_id) => { - if let Err(e) = - s.asic_hdl.mc_port_remove(group_id.unwrap(), asic_id) - { - debug!( - s.log, - "failed to remove port during rollback: port={}, link={}, error={:?}", - member.port_id, member.link_id, e - ); - } - } - Err(e) => { - debug!( - s.log, - "failed to get ASIC ID for port during rollback: port={}, link={}, error={:?}", - member.port_id, member.link_id, e - ); - } - } - } - - for member in removed_ports { - let group_id = match member.direction { - Direction::External => external_group_id, - Direction::Underlay => underlay_group_id, - }; - - if group_id.is_none() { - continue; - } - - match s.port_link_to_asic_id(member.port_id, member.link_id) { - Ok(asic_id) => { - if let Err(e) = s.asic_hdl.mc_port_add( - group_id.unwrap(), - asic_id, - replication_info.rid, - replication_info.level1_excl_id, - ) { - debug!( - s.log, - "failed to restore port during rollback: port={}, link={}, error={:?}", - member.port_id, member.link_id, e - ); - } - } - Err(e) => { - debug!( - s.log, - "failed to get ASIC ID for port during rollback: port={}, link={}, error={:?}", - member.port_id, member.link_id, e - ); - } - } - } - - Ok(()) -} - -fn rollback_remove_groups( - s: &Switch, - group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, -) -> DpdResult<()> { - if let Some(external_id) = external_group_id { - if let Err(e) = s.asic_hdl.mc_group_destroy(external_id) { - debug!( - s.log, - "failed to remove external multicast group for IP {} with ID {} during rollback: {:?}", - group_ip, external_id, e - ); - } - } - - if let Some(underlay_id) = underlay_group_id { - if let Err(e) = s.asic_hdl.mc_group_destroy(underlay_id) { - debug!( - s.log, - "failed to remove underlay multicast group for IP {} with ID {} during rollback: {:?}", - group_ip, underlay_id, e - ); - } + external_group_id, + &external_port_bitmap, + external_group_vlan, // Use external group's VLAN for bitmap entries + )?; } Ok(()) } -fn rollback_remove_tables( +fn update_fwding_tables( s: &Switch, group_ip: IpAddr, - external_group_id: Option, - underlay_group_id: Option, - nat_target: Option, - sources: Option<&[IpSrc]>, + external_group_id: MulticastGroupId, + underlay_group_id: MulticastGroupId, + members: &[MulticastGroupMember], + vlan_id: Option, ) -> DpdResult<()> { match group_ip { IpAddr::V4(ipv4) => { - if let Some(srcs) = sources { - for src in srcs { - match src { - IpSrc::Exact(IpAddr::V4(src)) => { - if let Err(e) = - table::mcast::mcast_src_filter::del_ipv4_entry( - s, - Ipv4Net::new(*src, 32).unwrap(), - ipv4, - ) - { - debug!(s.log, "failed to remove IPv4 source filter during rollback: {:?}", e); - } - } - IpSrc::Subnet(subnet) => { - if let Err(e) = - table::mcast::mcast_src_filter::del_ipv4_entry( - s, *subnet, ipv4, - ) - { - debug!(s.log, "failed to remove IPv4 subnet filter during rollback: {:?}", e); - } - } - _ => {} - } - } - } - - if nat_target.is_some() { - if let Err(e) = table::mcast::mcast_nat::del_ipv4_entry(s, ipv4) - { - debug!( - s.log, - "failed to remove IPv4 NAT entry during rollback: {:?}", - e - ); - } - } - - if let Err(e) = table::mcast::mcast_route::del_ipv4_entry(s, ipv4) { - debug!( - s.log, - "failed to remove IPv4 route entry during rollback: {:?}", - e - ); - } + table::mcast::mcast_route::update_ipv4_entry(s, ipv4, vlan_id) } IpAddr::V6(ipv6) => { - if external_group_id.is_some() && underlay_group_id.is_some() { - if let Err(e) = table::mcast::mcast_egress::del_bitmap_entry( - s, - external_group_id.unwrap(), - ) { - debug!(s.log, "failed to remove external egress entry during rollback: {:?}", e); - } - } - - if let Err(e) = - table::mcast::mcast_replication::del_ipv6_entry(s, ipv6) - { - debug!(s.log, "failed to remove IPv6 replication entry during rollback: {:?}", e); - } - - if let Some(srcs) = sources { - for src in srcs { - if let IpSrc::Exact(IpAddr::V6(src)) = src { - if let Err(e) = - table::mcast::mcast_src_filter::del_ipv6_entry( - s, *src, ipv6, - ) - { - debug!(s.log, "failed to remove IPv6 source filter during rollback: {:?}", e); - } - } - } - } - - if nat_target.is_some() { - if let Err(e) = table::mcast::mcast_nat::del_ipv6_entry(s, ipv6) - { - debug!( - s.log, - "failed to remove IPv6 NAT entry during rollback: {:?}", - e - ); - } - } - - if let Err(e) = table::mcast::mcast_route::del_ipv6_entry(s, ipv6) { - debug!( - s.log, - "failed to remove IPv6 route entry during rollback: {:?}", - e - ); - } - } - } - - Ok(()) -} - -fn rollback_source_filters( - s: &Switch, - group_ip: IpAddr, - new_sources: Option<&[IpSrc]>, - orig_sources: Option<&[IpSrc]>, -) -> DpdResult<()> { - if let Err(e) = remove_source_filters(s, group_ip, new_sources) { - debug!( - s.log, - "failed to remove new source filters during rollback: {:?}", e - ); - } - - if let Err(e) = add_source_filters(s, group_ip, orig_sources) { - debug!( - s.log, - "failed to restore original source filters during rollback: {:?}", - e - ); - } - - Ok(()) -} - -fn rollback_restore_tables( - s: &Switch, - group_ip: IpAddr, - orig_group_info: &MulticastGroup, -) -> DpdResult<()> { - let external_group_id = orig_group_info.external_group_id(); - let underlay_group_id = orig_group_info.underlay_group_id(); - let replication_info = &orig_group_info.replication_info; - let vlan_id = orig_group_info.ext_fwding.vlan_id; - let nat_target = orig_group_info.int_fwding.nat_target; - let prev_members = orig_group_info.members.to_vec(); - - if let Some(replication_info) = replication_info { - if let Err(e) = update_replication_tables( - s, - group_ip, - external_group_id, - underlay_group_id, - replication_info, - ) { - debug!( - s.log, - "failed to restore replication settings during rollback: {:?}", - e - ); - } - } - - match group_ip { - IpAddr::V4(ipv4) => rollback_restore_nat_v4(s, ipv4, nat_target), - IpAddr::V6(ipv6) => rollback_restore_nat_v6(s, ipv6, nat_target), - } - - if let Err(e) = update_fwding_tables( - s, - group_ip, - external_group_id, - underlay_group_id, - &prev_members, - vlan_id, - ) { - debug!( - s.log, - "failed to restore VLAN settings during rollback: {:?}", e - ); - } - - Ok(()) -} - -fn rollback_restore_nat_v4( - s: &Switch, - ipv4: Ipv4Addr, - nat_target: Option, -) { - if let Some(nat) = nat_target { - if let Err(e) = table::mcast::mcast_nat::update_ipv4_entry(s, ipv4, nat) - { - debug!( - s.log, - "failed to restore IPv4 NAT settings during rollback: {:?}", e - ); + table::mcast::mcast_route::update_ipv6_entry(s, ipv6, vlan_id) + .and_then(|_| { + // Update external bitmap for external members + let external_port_bitmap = + create_port_bitmap(members, Direction::External); + table::mcast::mcast_egress::update_bitmap_entry( + s, + external_group_id, + &external_port_bitmap, + vlan_id, + ) + }) + .and_then(|_| { + // Update underlay bitmap for underlay members + let underlay_port_bitmap = + create_port_bitmap(members, Direction::Underlay); + table::mcast::mcast_egress::update_bitmap_entry( + s, + underlay_group_id, + &underlay_port_bitmap, + vlan_id, + ) + }) } - } else if let Err(e) = table::mcast::mcast_nat::del_ipv4_entry(s, ipv4) { - debug!( - s.log, - "failed to remove IPv4 NAT entry during rollback: {:?}", e - ); } } -fn rollback_restore_nat_v6( - s: &Switch, - ipv6: Ipv6Addr, - nat_target: Option, -) { - if let Some(nat) = nat_target { - if let Err(e) = table::mcast::mcast_nat::update_ipv6_entry(s, ipv6, nat) - { - debug!( - s.log, - "failed to restore IPv6 NAT settings during rollback: {:?}", e - ); +/// Create port bitmap from members filtered by direction. +fn create_port_bitmap( + members: &[MulticastGroupMember], + direction: Direction, +) -> table::mcast::mcast_egress::PortBitmap { + let mut port_bitmap = table::mcast::mcast_egress::PortBitmap::new(); + for member in members { + if member.direction == direction { + port_bitmap.add_port(member.port_id.as_u8()); } - } else if let Err(e) = table::mcast::mcast_nat::del_ipv6_entry(s, ipv6) { - debug!( - s.log, - "failed to remove IPv6 NAT entry during rollback: {:?}", e - ); } + port_bitmap } #[cfg(test)] @@ -2303,4 +1942,43 @@ mod tests { assert!(!pool.contains(&(MulticastGroupData::GENERATOR_START - 1))); assert!(!pool.contains(&MulticastGroupId::MAX)); } + + #[test] + fn test_paired_allocation_and_cleanup() { + let mut mcast_data = MulticastGroupData::new(); + + // Get initial pool size + let initial_pool_size = { + let pool = mcast_data.free_group_ids.lock().unwrap(); + pool.len() + }; + + // Allocate both group IDs as a pair (simulating our "always allocate both" architecture) + let external_id; + let underlay_id; + { + external_id = mcast_data.generate_group_id().unwrap(); + underlay_id = mcast_data.generate_group_id().unwrap(); + + // Verify both IDs are different + assert_ne!(external_id.id(), underlay_id.id()); + + // Pool should have 2 fewer IDs + let pool = mcast_data.free_group_ids.lock().unwrap(); + assert_eq!(pool.len(), initial_pool_size - 2); + } + + // Drop both IDs simultaneously (simulating MulticastGroup being dropped) + drop(external_id); + drop(underlay_id); + + // Both IDs should be returned to pool automatically + let final_pool_size = { + let pool = mcast_data.free_group_ids.lock().unwrap(); + pool.len() + }; + + // Pool should be back to original size + assert_eq!(final_pool_size, initial_pool_size); + } } diff --git a/dpd/src/mcast/rollback.rs b/dpd/src/mcast/rollback.rs new file mode 100644 index 0000000..a7de12c --- /dev/null +++ b/dpd/src/mcast/rollback.rs @@ -0,0 +1,796 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/ +// +// Copyright 2025 Oxide Computer Company + +//! Rollback contexts for multicast group operations. +//! +//! This module provides consistent rollback handling for multicast group +//! creation and update operations. It includes context helpers that capture +//! rollback parameters once and provide reusable error handling throughout +//! multi-step operations. + +use std::{fmt, net::IpAddr}; + +use aal::AsicOps; +use oxnet::Ipv4Net; +use slog::{debug, error}; + +use common::{nat::NatTarget, ports::PortId}; + +use super::{ + add_source_filters, remove_source_filters, update_fwding_tables, + update_replication_tables, Direction, IpSrc, LinkId, MulticastGroup, + MulticastGroupId, MulticastGroupMember, MulticastReplicationInfo, +}; +use crate::{table, types::DpdResult, Switch}; + +/// Trait providing shared rollback functionality for multicast group operations. +/// +/// This trait encapsulates common rollback operations that are needed by both +/// group creation and update contexts. +trait RollbackOps { + fn switch(&self) -> &Switch; + fn group_ip(&self) -> IpAddr; + fn external_group_id(&self) -> MulticastGroupId; + fn underlay_group_id(&self) -> MulticastGroupId; + + /// Rollback port changes (removing added ports, re-adding removed ports). + /// Each group type implements this differently - external groups are no-op, internal groups handle ports. + fn rollback_ports( + &self, + added_ports: &[MulticastGroupMember], + removed_ports: &[MulticastGroupMember], + replication_info: &MulticastReplicationInfo, + ) -> DpdResult<()>; + + /// Rollback source filter changes. + fn rollback_source_filters( + &self, + new_sources: Option<&[IpSrc]>, + orig_sources: Option<&[IpSrc]>, + ) -> DpdResult<()> { + self.log_rollback_error( + "remove new source filters", + &format!("for group {}", self.group_ip()), + remove_source_filters(self.switch(), self.group_ip(), new_sources), + ); + self.log_rollback_error( + "restore original source filters", + &format!("for group {}", self.group_ip()), + add_source_filters(self.switch(), self.group_ip(), orig_sources), + ); + Ok(()) + } + + /// Rollback internal group changes (ports only, no sources). + fn rollback_internal_update( + &self, + added_ports: &[MulticastGroupMember], + removed_ports: &[MulticastGroupMember], + replication_info: &MulticastReplicationInfo, + ) -> DpdResult<()> { + self.collect_rollback_result("port changes", || { + self.rollback_ports(added_ports, removed_ports, replication_info) + }); + + Ok(()) + } + + /// Execute rollback operation and track errors for summary logging. + fn collect_rollback_result(&self, operation: &str, f: F) -> bool + where + F: FnOnce() -> DpdResult<()>, + { + match f() { + Ok(()) => false, + Err(e) => { + debug!( + self.switch().log, + "failed operation during rollback"; + "operation" => operation, + "error" => ?e, + ); + true + } + } + } + + /// Log rollback errors without propagating them. + fn log_rollback_error( + &self, + operation: &str, + context: &str, + result: Result, + ) { + if let Err(e) = result { + debug!( + self.switch().log, + "failed operation during rollback"; + "operation" => operation, + "context" => context, + "error" => ?e, + ); + } + } +} + +/// Rollback context for multicast group creation operations. +pub(crate) struct GroupCreateRollbackContext<'a> { + switch: &'a Switch, + group_ip: IpAddr, + external_id: MulticastGroupId, + underlay_id: MulticastGroupId, + nat_target: Option, + sources: Option<&'a [IpSrc]>, +} + +impl RollbackOps for GroupCreateRollbackContext<'_> { + fn switch(&self) -> &Switch { + self.switch + } + + fn group_ip(&self) -> IpAddr { + self.group_ip + } + + fn external_group_id(&self) -> MulticastGroupId { + self.external_id + } + + fn underlay_group_id(&self) -> MulticastGroupId { + self.underlay_id + } + + fn rollback_ports( + &self, + added_ports: &[MulticastGroupMember], + _removed_ports: &[MulticastGroupMember], + _replication_info: &MulticastReplicationInfo, + ) -> DpdResult<()> { + if self.nat_target.is_some() { + Ok(()) + } else { + debug!( + self.switch.log, + "rolling back multicast group creation"; + "group" => %self.group_ip, + "ports" => added_ports.len(), + ); + + let mut first_error = None; + let mut failed_port_resolution = Vec::new(); + let mut failed_port_removal = Vec::new(); + + for member in added_ports { + let group_id = match member.direction { + Direction::External => self.external_id, + Direction::Underlay => self.underlay_id, + }; + + match self + .switch + .port_link_to_asic_id(member.port_id, member.link_id) + { + Ok(asic_id) => { + if let Err(e) = self + .switch + .asic_hdl + .mc_port_remove(group_id, asic_id) + { + error!( + self.switch.log, + "failed to remove port during rollback"; + "port" => %member.port_id, + "asic_id" => asic_id, + "group" => group_id, + "error" => ?e, + ); + + first_error.get_or_insert_with(|| e.into()); + + failed_port_removal + .push((member.port_id, member.link_id)); + } + } + Err(e) => { + error!( + self.switch.log, + "failed to resolve port link during rollback"; + "port" => %member.port_id, + "link" => %member.link_id, + "error" => ?e, + ); + + first_error.get_or_insert(e); + failed_port_resolution + .push((member.port_id, member.link_id)); + } + } + } + + // Log summary of any failures + if !failed_port_resolution.is_empty() { + error!( + self.switch.log, + "rollback failed to resolve port links"; + "count" => failed_port_resolution.len(), + "ports" => ?failed_port_resolution, + "warning" => "These ports may remain in inconsistent state", + ); + } + + if !failed_port_removal.is_empty() { + error!( + self.switch.log, + "rollback failed to remove ports from ASIC"; + "count" => failed_port_removal.len(), + "ports" => ?failed_port_removal, + "warning" => "These ports may remain in inconsistent state", + ); + } + + // Return the first error encountered, if any + match first_error { + Some(e) => Err(e), + None => Ok(()), + } + } + } +} + +impl<'a> GroupCreateRollbackContext<'a> { + /// Create rollback context for external group operations. + pub(crate) fn new_external( + switch: &'a Switch, + group_ip: IpAddr, + external_id: MulticastGroupId, + underlay_id: MulticastGroupId, + nat_target: NatTarget, + sources: Option<&'a [IpSrc]>, + ) -> Self { + Self { + switch, + group_ip, + external_id, + underlay_id, + nat_target: Some(nat_target), + sources, + } + } + + /// Create rollback context for internal group operations. + pub(crate) fn new_internal( + switch: &'a Switch, + group_ip: IpAddr, + external_id: MulticastGroupId, + underlay_id: MulticastGroupId, + ) -> Self { + Self { + switch, + group_ip, + external_id, + underlay_id, + nat_target: None, + sources: None, + } + } + + /// Perform rollback with member context and return error. + pub(crate) fn rollback_with_members_and_return_error( + &self, + error: E, + added_members: &[(PortId, LinkId, Direction)], + replication_info: &MulticastReplicationInfo, + ) -> E { + if let Err(rollback_err) = + self.perform_group_create_rollback(added_members, replication_info) + { + error!( + self.switch.log, + "rollback failed for group creation"; + "group" => %self.group_ip, + "error" => ?rollback_err, + ); + } + error + } + + /// Perform rollback and return error. + pub(crate) fn rollback_and_return_error(&self, error: E) -> E { + self.rollback_with_members_and_return_error( + error, + &[], // No members + &MulticastReplicationInfo::default(), // Dummy replication info + ) + } + + /// Perform group creation rollback. + fn perform_group_create_rollback( + &self, + added_members: &[(PortId, LinkId, Direction)], + replication_info: &MulticastReplicationInfo, + ) -> DpdResult<()> { + let added_members_converted: Vec = added_members + .iter() + .map(|(port_id, link_id, direction)| MulticastGroupMember { + port_id: *port_id, + link_id: *link_id, + direction: *direction, + }) + .collect(); + + self.collect_rollback_result("port removal", || { + self.rollback_ports(&added_members_converted, &[], replication_info) + }); + self.collect_rollback_result("ASIC group deletion", || { + self.remove_groups() + }); + self.collect_rollback_result("table cleanup", || self.remove_tables()); + + Ok(()) + } + + /// Remove multicast groups from ASIC. + fn remove_groups(&self) -> DpdResult<()> { + // External groups don't destroy ASIC groups (they're shared with internal group) + if self.nat_target.is_none() { + self.log_rollback_error( + "remove external multicast group", + &format!( + "for IP {} with ID {}", + self.group_ip, self.external_id + ), + self.switch.asic_hdl.mc_group_destroy(self.external_id), + ); + self.log_rollback_error( + "remove underlay multicast group", + &format!( + "for IP {} with ID {}", + self.group_ip, self.underlay_id + ), + self.switch.asic_hdl.mc_group_destroy(self.underlay_id), + ); + } + Ok(()) + } + + /// Remove table entries. + fn remove_tables(&self) -> DpdResult<()> { + match self.group_ip { + IpAddr::V4(ipv4) => { + // IPv4 groups are always external-only and never create bitmap entries + // (only IPv6 internal groups with replication create bitmap entries) + + if let Some(srcs) = self.sources { + for src in srcs { + match src { + IpSrc::Exact(IpAddr::V4(src)) => { + self.log_rollback_error( + "delete IPv4 source filter entry", + &format!("for source {src} and group {ipv4}"), + table::mcast::mcast_src_filter::del_ipv4_entry( + self.switch, + Ipv4Net::new(*src, 32).unwrap(), + ipv4, + ), + ); + } + IpSrc::Subnet(subnet) => { + self.log_rollback_error( + "delete IPv4 source filter subnet entry", + &format!("for subnet {subnet} and group {ipv4}"), + table::mcast::mcast_src_filter::del_ipv4_entry( + self.switch, *subnet, ipv4, + ), + ); + } + _ => {} + } + } + } + if self.nat_target.is_some() { + self.log_rollback_error( + "delete IPv4 NAT entry", + &format!("for group {ipv4}"), + table::mcast::mcast_nat::del_ipv4_entry( + self.switch, + ipv4, + ), + ); + } + self.log_rollback_error( + "delete IPv4 route entry", + &format!("for group {ipv4}"), + table::mcast::mcast_route::del_ipv4_entry( + self.switch, + ipv4, + ), + ); + } + IpAddr::V6(ipv6) => { + // Clean up external bitmap entry only if both external and underlay groups exist + // (bitmap entries are only created for internal groups with both group types) + self.log_rollback_error( + "delete IPv6 egress bitmap entry", + &format!("for external group {}", self.external_id), + table::mcast::mcast_egress::del_bitmap_entry( + self.switch, + self.external_id, + ), + ); + + self.log_rollback_error( + "delete IPv6 replication entry", + &format!("for group {ipv6}"), + table::mcast::mcast_replication::del_ipv6_entry( + self.switch, + ipv6, + ), + ); + + if let Some(srcs) = self.sources { + for src in srcs { + if let IpSrc::Exact(IpAddr::V6(src)) = src { + self.log_rollback_error( + "delete IPv6 source filter entry", + &format!("for source {src} and group {ipv6}"), + table::mcast::mcast_src_filter::del_ipv6_entry( + self.switch, + *src, + ipv6, + ), + ); + } + } + } + if self.nat_target.is_some() { + self.log_rollback_error( + "delete IPv6 NAT entry", + &format!("for group {ipv6}"), + table::mcast::mcast_nat::del_ipv6_entry( + self.switch, + ipv6, + ), + ); + } + self.log_rollback_error( + "delete IPv6 route entry", + &format!("for group {ipv6}"), + table::mcast::mcast_route::del_ipv6_entry( + self.switch, + ipv6, + ), + ); + } + } + Ok(()) + } +} + +/// Rollback context for multicast group update operations. +pub(crate) struct GroupUpdateRollbackContext<'a> { + switch: &'a Switch, + group_ip: IpAddr, + original_group: &'a MulticastGroup, +} + +impl RollbackOps for GroupUpdateRollbackContext<'_> { + fn switch(&self) -> &Switch { + self.switch + } + + fn group_ip(&self) -> IpAddr { + self.group_ip + } + + fn external_group_id(&self) -> MulticastGroupId { + self.original_group.external_group_id() + } + + fn underlay_group_id(&self) -> MulticastGroupId { + self.original_group.underlay_group_id() + } + + fn rollback_ports( + &self, + added_ports: &[MulticastGroupMember], + removed_ports: &[MulticastGroupMember], + replication_info: &MulticastReplicationInfo, + ) -> DpdResult<()> { + // External groups don't need port rollback + if self.original_group.replication_info.is_none() { + return Ok(()); + } + + // Internal group - perform actual port rollback + debug!( + self.switch.log, + "rolling back multicast group update"; + "group" => ?self.group_ip, + "added_ports" => added_ports.len(), + "removed_ports" => removed_ports.len(), + ); + + let mut first_error = None; + let mut failed_port_resolution = Vec::new(); + let mut failed_port_removal = Vec::new(); + let mut failed_port_addition = Vec::new(); + + // Remove added ports + for member in added_ports { + let group_id = match member.direction { + Direction::External => self.external_group_id(), + Direction::Underlay => self.underlay_group_id(), + }; + + match self + .switch + .port_link_to_asic_id(member.port_id, member.link_id) + { + Ok(asic_id) => { + if let Err(e) = + self.switch.asic_hdl.mc_port_remove(group_id, asic_id) + { + error!( + self.switch.log, + "failed to remove port during rollback"; + "port" => %member.port_id, + "asic_id" => asic_id, + "group" => group_id, + "error" => ?e, + ); + + first_error.get_or_insert_with(|| e.into()); + failed_port_removal + .push((member.port_id, member.link_id)); + } + } + Err(e) => { + error!( + self.switch.log, + "failed to resolve port link during rollback"; + "port" => %member.port_id, + "link" => %member.link_id, + "error" => ?e, + ); + + first_error.get_or_insert(e); + failed_port_resolution + .push((member.port_id, member.link_id)); + } + } + } + + // Re-add removed ports + for member in removed_ports { + let group_id = match member.direction { + Direction::External => self.external_group_id(), + Direction::Underlay => self.underlay_group_id(), + }; + + match self + .switch + .port_link_to_asic_id(member.port_id, member.link_id) + { + Ok(asic_id) => { + if let Err(e) = self.switch.asic_hdl.mc_port_add( + group_id, + asic_id, + replication_info.rid, + replication_info.level1_excl_id, + ) { + error!( + self.switch.log, + "failed to add port during rollback"; + "port" => %member.port_id, + "asic_id" => asic_id, + "group" => group_id, + "error" => ?e, + ); + + first_error.get_or_insert_with(|| e.into()); + failed_port_addition + .push((member.port_id, member.link_id)); + } + } + Err(e) => { + error!( + self.switch.log, + "failed to resolve port link during rollback"; + "port" => %member.port_id, + "link" => %member.link_id, + "error" => ?e, + ); + + first_error.get_or_insert(e); + failed_port_resolution + .push((member.port_id, member.link_id)); + } + } + } + + // Log summary of any failures + if !failed_port_resolution.is_empty() { + error!( + self.switch.log, + "rollback failed to resolve port links"; + "count" => failed_port_resolution.len(), + "ports" => ?failed_port_resolution, + "warning" => "These ports may remain in inconsistent state", + ); + } + + if !failed_port_removal.is_empty() { + error!( + self.switch.log, + "rollback failed to remove ports from ASIC"; + "count" => failed_port_removal.len(), + "ports" => ?failed_port_removal, + "warning" => "These ports may remain in inconsistent state", + ); + } + + if !failed_port_addition.is_empty() { + error!( + self.switch.log, + "rollback failed to re-add ports to ASIC"; + "count" => failed_port_addition.len(), + "ports" => ?failed_port_addition, + "warning" => "These ports may remain in inconsistent state", + ); + } + + // Return the first error encountered, if any + match first_error { + Some(e) => Err(e), + None => Ok(()), + } + } +} + +impl<'a> GroupUpdateRollbackContext<'a> { + /// Create rollback context for group update operations. + pub(crate) fn new( + switch: &'a Switch, + group_ip: IpAddr, + original_group: &'a MulticastGroup, + ) -> Self { + Self { + switch, + group_ip, + original_group, + } + } + + /// Restore tables and return error. + pub(crate) fn rollback_and_restore(&self, error: E) -> E { + if let Err(rollback_err) = self.restore_tables() { + error!( + self.switch.log, + "failed to restore tables during rollback"; + "group" => %self.group_ip, + "error" => ?rollback_err, + ); + } + error + } + + /// Restore table entries to original state. + fn restore_tables(&self) -> DpdResult<()> { + let external_group_id = self.original_group.external_group_id(); + let underlay_group_id = self.original_group.underlay_group_id(); + let replication_info = &self.original_group.replication_info; + let vlan_id = self.original_group.ext_fwding.vlan_id; + let nat_target = self.original_group.int_fwding.nat_target; + let prev_members = self.original_group.members.to_vec(); + + if let Some(replication_info) = replication_info { + self.log_rollback_error( + "restore replication settings", + &format!("for group {}", self.group_ip), + update_replication_tables( + self.switch, + self.group_ip, + external_group_id, + underlay_group_id, + replication_info, + ), + ); + } + + // Restore NAT settings + match (self.group_ip, nat_target) { + (IpAddr::V4(ipv4), Some(nat)) => { + self.log_rollback_error( + "restore IPv4 NAT settings", + &format!("for group {}", self.group_ip), + table::mcast::mcast_nat::update_ipv4_entry( + self.switch, + ipv4, + nat, + ), + ); + } + (IpAddr::V6(ipv6), Some(nat)) => { + self.log_rollback_error( + "restore IPv6 NAT settings", + &format!("for group {}", self.group_ip), + table::mcast::mcast_nat::update_ipv6_entry( + self.switch, + ipv6, + nat, + ), + ); + } + (IpAddr::V4(ipv4), None) => { + self.log_rollback_error( + "remove IPv4 NAT settings", + &format!("for group {}", self.group_ip), + table::mcast::mcast_nat::del_ipv4_entry(self.switch, ipv4), + ); + } + (IpAddr::V6(ipv6), None) => { + self.log_rollback_error( + "remove IPv6 NAT settings", + &format!("for group {}", self.group_ip), + table::mcast::mcast_nat::del_ipv6_entry(self.switch, ipv6), + ); + } + } + + self.log_rollback_error( + "restore VLAN settings", + &format!("for group {}", self.group_ip), + update_fwding_tables( + self.switch, + self.group_ip, + external_group_id, + underlay_group_id, + &prev_members, + vlan_id, + ), + ); + Ok(()) + } + + /// Rollback external group updates. + pub(crate) fn rollback_external( + &self, + error: E, + new_sources: Option<&[IpSrc]>, + ) -> E { + if new_sources.is_some() { + self.collect_rollback_result("source filter restoration", || { + self.rollback_source_filters(new_sources, None) + }); + } + error + } + + /// Rollback internal group updates. + pub(crate) fn rollback_internal( + &self, + error: E, + added_ports: &[MulticastGroupMember], + removed_ports: &[MulticastGroupMember], + ) -> E { + // Get replication info from original group + if let Some(replication_info) = &self.original_group.replication_info { + if let Err(rollback_err) = self.rollback_internal_update( + added_ports, + removed_ports, + replication_info, + ) { + error!( + self.switch.log, + "rollback failed for internal group update"; + "group" => %self.group_ip, + "error" => ?rollback_err, + ); + } + } + error + } +} diff --git a/dpd/src/mcast/validate.rs b/dpd/src/mcast/validate.rs index e12f038..9f0fe54 100644 --- a/dpd/src/mcast/validate.rs +++ b/dpd/src/mcast/validate.rs @@ -12,6 +12,12 @@ use oxnet::{Ipv4Net, Ipv6Net}; use super::IpSrc; use crate::types::{DpdError, DpdResult}; +/// Check if an IP address is unicast (emulating the unstable std::net API). +/// For IP addresses, unicast means simply "not multicast". +const fn is_unicast(addr: IpAddr) -> bool { + !addr.is_multicast() +} + /// Validates if a multicast address is allowed for group creation. /// /// Returns a [`DpdResult`] indicating whether the address is valid or not. @@ -19,6 +25,10 @@ pub(crate) fn validate_multicast_address( addr: IpAddr, sources: Option<&[IpSrc]>, ) -> DpdResult<()> { + // First validate that source addresses are unicast + validate_source_addresses(sources)?; + + // Then validate the multicast address itself match addr { IpAddr::V4(ipv4) => validate_ipv4_multicast(ipv4, sources), IpAddr::V6(ipv6) => validate_ipv6_multicast(ipv6, sources), @@ -38,7 +48,8 @@ pub(crate) fn validate_nat_target(nat_target: NatTarget) -> DpdResult<()> { if !internal_nat_ip.is_admin_scoped_multicast() { return Err(DpdError::Invalid(format!( - "NAT target internal IP address {} is not a valid site/admin-local or org-scoped multicast address", + "NAT target internal IP address {} is not a valid \ + site/admin-local or org-scoped multicast address", nat_target.internal_ip ))); } @@ -70,8 +81,7 @@ fn validate_ipv4_multicast( // Verify this is actually a multicast address if !addr.is_multicast() { return Err(DpdError::Invalid(format!( - "{} is not a multicast address", - addr + "{addr} is not a multicast address", ))); } @@ -79,17 +89,16 @@ fn validate_ipv4_multicast( if is_ssm(addr.into()) { if sources.is_none() || sources.unwrap().is_empty() { return Err(DpdError::Invalid(format!( - "{} is a Source-Specific Multicast address and requires at least one source to be defined", - addr + "{addr} is a Source-Specific Multicast address and \ + requires at least one source to be defined", ))); } - // If we have sources defined for an SSM address, it's valid + // If sources are defined for an SSM address, it's valid return Ok(()); } else if sources.is_some() { // If this is not SSM but sources are defined, it's invalid return Err(DpdError::Invalid(format!( - "{} is not a Source-Specific Multicast address but sources were provided", - addr + "{addr} is not a Source-Specific Multicast address but sources were provided", ))); } @@ -107,8 +116,7 @@ fn validate_ipv4_multicast( for subnet in &reserved_subnets { if subnet.contains(addr) { return Err(DpdError::Invalid(format!( - "{} is in the reserved multicast subnet {}", - addr, subnet, + "{addr} is in the reserved multicast subnet {subnet}", ))); } } @@ -122,8 +130,7 @@ fn validate_ipv4_multicast( if specific_reserved.contains(&addr) { return Err(DpdError::Invalid(format!( - "{} is a specifically reserved multicast address", - addr + "{addr} is a specifically reserved multicast address", ))); } @@ -137,8 +144,7 @@ fn validate_ipv6_multicast( ) -> DpdResult<()> { if !addr.is_multicast() { return Err(DpdError::Invalid(format!( - "{} is not a multicast address", - addr + "{addr} is not a multicast address", ))); } @@ -146,17 +152,16 @@ fn validate_ipv6_multicast( if is_ssm(addr.into()) { if sources.is_none() || sources.unwrap().is_empty() { return Err(DpdError::Invalid(format!( - "{} is an IPv6 Source-Specific Multicast address (ff3x::/32) and requires at least one source to be defined", - addr + "{addr} is an IPv6 Source-Specific Multicast address (ff3x::/32) \ + and requires at least one source to be defined", ))); } - // If we have sources defined for an IPv6 SSM address, it's valid + // If sources are defined for an IPv6 SSM address, it's valid return Ok(()); } else if sources.is_some() { // If this is not SSM but sources are defined, it's invalid return Err(DpdError::Invalid(format!( - "{} is not a Source-Specific Multicast address but sources were provided", - addr + "{addr} is not a Source-Specific Multicast address but sources were provided", ))); } @@ -174,8 +179,7 @@ fn validate_ipv6_multicast( for subnet in &reserved_subnets { if subnet.contains(addr) { return Err(DpdError::Invalid(format!( - "{} is in the reserved multicast subnet {}", - addr, subnet + "{addr} is in the reserved multicast subnet {subnet}", ))); } } @@ -189,14 +193,92 @@ pub(crate) fn validate_not_admin_scoped_ipv6(addr: IpAddr) -> DpdResult<()> { if oxnet::Ipv6Net::new_unchecked(ipv6, 128).is_admin_scoped_multicast() { return Err(DpdError::Invalid(format!( - "{} is an admin-scoped multicast address and must be created via the internal multicast API", - addr + "{addr} is an admin-scoped multicast address and \ + must be created via the internal multicast API", ))); } } Ok(()) } +/// Validates that source IP addresses are unicast. +pub(crate) fn validate_source_addresses( + sources: Option<&[IpSrc]>, +) -> DpdResult<()> { + let sources = match sources { + Some(sources) => sources, + None => return Ok(()), + }; + + for source in sources { + match source { + IpSrc::Exact(ip) => validate_exact_source_address(*ip)?, + IpSrc::Subnet(subnet) => validate_ipv4_source_subnet(*subnet)?, + } + } + Ok(()) +} + +/// Validates a single exact source IP address. +fn validate_exact_source_address(ip: IpAddr) -> DpdResult<()> { + // First check if it's unicast (excludes multicast) + if !is_unicast(ip) { + return Err(DpdError::Invalid(format!( + "Source IP {ip} must be a unicast address (multicast addresses are not allowed)", + ))); + } + + // Check for other problematic address types + match ip { + IpAddr::V4(ipv4) => validate_ipv4_source_address(ipv4), + IpAddr::V6(ipv6) => validate_ipv6_source_address(ipv6), + } +} + +/// Validates IPv4 source addresses for problematic types. +fn validate_ipv4_source_address(ipv4: Ipv4Addr) -> DpdResult<()> { + if ipv4.is_loopback() || ipv4.is_broadcast() || ipv4.is_unspecified() { + return Err(DpdError::Invalid(format!( + "Source IP {ipv4} is not a valid source address \ + (loopback, broadcast, and unspecified addresses are not allowed)", + ))); + } + Ok(()) +} + +/// Validates IPv6 source addresses for problematic types. +fn validate_ipv6_source_address(ipv6: Ipv6Addr) -> DpdResult<()> { + if ipv6.is_loopback() || ipv6.is_unspecified() { + return Err(DpdError::Invalid(format!( + "Source IP {ipv6} is not a valid source address \ + (loopback and unspecified addresses are not allowed)", + ))); + } + Ok(()) +} + +/// Validates IPv4 source subnets for problematic address ranges. +fn validate_ipv4_source_subnet(subnet: Ipv4Net) -> DpdResult<()> { + let addr = subnet.addr(); + + // Reject subnets that contain multicast addresses + if addr.is_multicast() { + return Err(DpdError::Invalid(format!( + "Source subnet {subnet} contains multicast addresses and cannot be used as a source filter", + ))); + } + + // Reject subnets with loopback or broadcast addresses + if addr.is_loopback() || addr.is_broadcast() { + return Err(DpdError::Invalid(format!( + "Source subnet {subnet} contains invalid address types \ + (loopback/broadcast) for source filtering", + ))); + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; @@ -457,4 +539,116 @@ mod tests { assert!(validate_nat_target(mcast_nat_target).is_ok()); } + + #[test] + fn test_validate_source_addresses() { + // Valid unicast IPv4 sources + let valid_ipv4_sources = vec![ + IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))), + IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))), + ]; + assert!(validate_source_addresses(Some(&valid_ipv4_sources)).is_ok()); + + // Valid unicast IPv6 sources + let valid_ipv6_sources = vec![IpSrc::Exact(IpAddr::V6(Ipv6Addr::new( + 0x2001, 0xdb8, 0, 0, 0, 0, 0, 1, + )))]; + assert!(validate_source_addresses(Some(&valid_ipv6_sources)).is_ok()); + + // Valid subnet sources + let valid_subnet_sources = vec![ + IpSrc::Subnet(Ipv4Net::from_str("192.168.1.0/24").unwrap()), + IpSrc::Subnet(Ipv4Net::from_str("10.0.0.0/8").unwrap()), + ]; + assert!(validate_source_addresses(Some(&valid_subnet_sources)).is_ok()); + + // Invalid multicast IPv4 source + let invalid_mcast_ipv4 = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(224, 1, 1, 1)))]; + assert!(validate_source_addresses(Some(&invalid_mcast_ipv4)).is_err()); + + // Invalid multicast IPv6 source + let invalid_mcast_ipv6 = vec![IpSrc::Exact(IpAddr::V6(Ipv6Addr::new( + 0xff0e, 0, 0, 0, 0, 0, 0, 1, + )))]; + assert!(validate_source_addresses(Some(&invalid_mcast_ipv6)).is_err()); + + // Invalid broadcast IPv4 source + let invalid_broadcast = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(255, 255, 255, 255)))]; + assert!(validate_source_addresses(Some(&invalid_broadcast)).is_err()); + + // Invalid loopback IPv4 source + let invalid_loopback_ipv4 = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)))]; + assert!( + validate_source_addresses(Some(&invalid_loopback_ipv4)).is_err() + ); + + // Invalid loopback IPv6 source + let invalid_loopback_ipv6 = vec![IpSrc::Exact(IpAddr::V6( + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), + ))]; + assert!( + validate_source_addresses(Some(&invalid_loopback_ipv6)).is_err() + ); + + // Invalid multicast subnet + let invalid_mcast_subnet = + vec![IpSrc::Subnet(Ipv4Net::from_str("224.0.0.0/24").unwrap())]; + assert!(validate_source_addresses(Some(&invalid_mcast_subnet)).is_err()); + + // Invalid loopback subnet + let invalid_loopback_subnet = + vec![IpSrc::Subnet(Ipv4Net::from_str("127.0.0.0/8").unwrap())]; + assert!( + validate_source_addresses(Some(&invalid_loopback_subnet)).is_err() + ); + + // No sources should be valid + assert!(validate_source_addresses(None).is_ok()); + + // Empty sources should be valid + assert!(validate_source_addresses(Some(&[])).is_ok()); + } + + #[test] + fn test_address_validation_with_source_validation() { + // Test that multicast address validation now includes source validation + + // Valid case: SSM address with valid unicast sources + let valid_sources = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))]; + assert!(validate_multicast_address( + IpAddr::V4(Ipv4Addr::new(232, 1, 2, 3)), + Some(&valid_sources) + ) + .is_ok()); + + // Invalid case: SSM address with multicast source (should fail source validation first) + let invalid_mcast_sources = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(224, 1, 1, 1)))]; + let result = validate_multicast_address( + IpAddr::V4(Ipv4Addr::new(232, 1, 2, 3)), + Some(&invalid_mcast_sources), + ); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("must be a unicast address")); + + // Invalid case: SSM address with loopback source + let invalid_loopback_sources = + vec![IpSrc::Exact(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)))]; + let result = validate_multicast_address( + IpAddr::V4(Ipv4Addr::new(232, 1, 2, 3)), + Some(&invalid_loopback_sources), + ); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("is not a valid source address")); + } } diff --git a/dpd/src/table/mcast/mcast_replication.rs b/dpd/src/table/mcast/mcast_replication.rs index 64dd79f..a1e60ef 100644 --- a/dpd/src/table/mcast/mcast_replication.rs +++ b/dpd/src/table/mcast/mcast_replication.rs @@ -43,12 +43,12 @@ enum Ipv6Action { /// - external_mcast_grp: for replication to external/customer ports (mcast_grp_a) /// - underlay_mcast_grp: for replication to underlay/infrastructure ports (mcast_grp_b) /// -/// Both groups are optional depending on the group's member configuration. +/// Both groups are always allocated. pub(crate) fn add_ipv6_entry( s: &Switch, dst_addr: Ipv6Addr, - underlay_mcast_grp: Option, - external_mcast_grp: Option, + underlay_mcast_grp: MulticastGroupId, + external_mcast_grp: MulticastGroupId, replication_id: u16, level1_excl_id: u16, level2_excl_id: u16, @@ -59,18 +59,11 @@ pub(crate) fn add_ipv6_entry( )); } - if underlay_mcast_grp.is_none() && external_mcast_grp.is_none() { - return Err(DpdError::McastGroupFailure( - "neither underlay nor external multicast group specified" - .to_string(), - )); - } - let match_key = Ipv6MatchKey::new(dst_addr); let action_data = Ipv6Action::ConfigureIpv6 { - mcast_grp_a: external_mcast_grp.unwrap_or(0), - mcast_grp_b: underlay_mcast_grp.unwrap_or(0), + mcast_grp_a: external_mcast_grp, + mcast_grp_b: underlay_mcast_grp, rid: replication_id, level1_excl_id, level2_excl_id, @@ -92,8 +85,8 @@ pub(crate) fn add_ipv6_entry( pub(crate) fn update_ipv6_entry( s: &Switch, dst_addr: Ipv6Addr, - underlay_mcast_grp: Option, - external_mcast_grp: Option, + underlay_mcast_grp: MulticastGroupId, + external_mcast_grp: MulticastGroupId, replication_id: u16, level1_excl_id: u16, level2_excl_id: u16, @@ -104,18 +97,11 @@ pub(crate) fn update_ipv6_entry( )); } - if underlay_mcast_grp.is_none() && external_mcast_grp.is_none() { - return Err(DpdError::McastGroupFailure( - "neither underlay nor external multicast group specified" - .to_string(), - )); - } - let match_key = Ipv6MatchKey::new(dst_addr); let action_data = Ipv6Action::ConfigureIpv6 { - mcast_grp_a: external_mcast_grp.unwrap_or(0), - mcast_grp_b: underlay_mcast_grp.unwrap_or(0), + mcast_grp_a: external_mcast_grp, + mcast_grp_b: underlay_mcast_grp, rid: replication_id, level1_excl_id, level2_excl_id, diff --git a/dpd/src/types.rs b/dpd/src/types.rs index c764fd4..36c44f3 100644 --- a/dpd/src/types.rs +++ b/dpd/src/types.rs @@ -299,3 +299,9 @@ impl convert::From for DpdError { DpdError::Invalid(err.to_string()) } } + +impl convert::From for DpdError { + fn from(err: dpd_types::mcast::Error) -> Self { + DpdError::Invalid(err.to_string()) + } +} diff --git a/openapi/dpd.json b/openapi/dpd.json index b742950..043692f 100644 --- a/openapi/dpd.json +++ b/openapi/dpd.json @@ -1105,7 +1105,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/MulticastGroupResponse" + "$ref": "#/components/schemas/MulticastGroupExternalResponse" } } } @@ -1151,7 +1151,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/MulticastGroupResponse" + "$ref": "#/components/schemas/MulticastGroupExternalResponse" } } } @@ -1213,39 +1213,6 @@ "required": [] } }, - "post": { - "summary": "Create an internal multicast group configuration.", - "description": "Internal groups are used for admin-scoped IPv6 multicast traffic that requires replication infrastructure. These groups support both external and underlay members with full replication capabilities.", - "operationId": "multicast_group_create", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MulticastGroupCreateEntry" - } - } - }, - "required": true - }, - "responses": { - "201": { - "description": "successful creation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MulticastGroupResponse" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, "delete": { "summary": "Reset all multicast group configurations.", "operationId": "multicast_reset", @@ -1296,50 +1263,6 @@ } } }, - "put": { - "summary": "Update an internal multicast group configuration for a given group IP address.", - "description": "Internal groups are used for admin-scoped IPv6 multicast traffic that requires replication infrastructure with external and underlay members.", - "operationId": "multicast_group_update", - "parameters": [ - { - "in": "path", - "name": "group_ip", - "required": true, - "schema": { - "type": "string", - "format": "ip" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MulticastGroupUpdateEntry" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/MulticastGroupResponse" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, "delete": { "summary": "Delete a multicast group configuration by IP address.", "operationId": "multicast_group_delete", @@ -1449,6 +1372,119 @@ } } }, + "/multicast/underlay-groups": { + "post": { + "summary": "Create an underlay (internal) multicast group configuration.", + "description": "Underlay groups are used for admin-scoped IPv6 multicast traffic that requires replication infrastructure. These groups support both external and underlay members with full replication capabilities.", + "operationId": "multicast_group_create_underlay", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupCreateUnderlayEntry" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupUnderlayResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/multicast/underlay-groups/{group_ip}": { + "get": { + "summary": "Get an underlay (internal) multicast group configuration by admin-scoped", + "description": "IPv6 address.\n\nUnderlay groups handle admin-scoped IPv6 multicast traffic with replication infrastructure for external and underlay members.", + "operationId": "multicast_group_get_underlay", + "parameters": [ + { + "in": "path", + "name": "group_ip", + "required": true, + "schema": { + "$ref": "#/components/schemas/AdminScopedIpv6" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupUnderlayResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "summary": "Update an underlay (internal) multicast group configuration for a given", + "description": "group IP address.\n\nUnderlay groups are used for admin-scoped IPv6 multicast traffic that requires replication infrastructure with external and underlay members.", + "operationId": "multicast_group_update_underlay", + "parameters": [ + { + "in": "path", + "name": "group_ip", + "required": true, + "schema": { + "$ref": "#/components/schemas/AdminScopedIpv6" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupUpdateUnderlayEntry" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MulticastGroupUnderlayResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/multicast/untagged": { "delete": { "summary": "Delete all multicast groups (and associated routes) without a tag.", @@ -5206,6 +5242,11 @@ }, "components": { "schemas": { + "AdminScopedIpv6": { + "description": "A validated admin-scoped IPv6 multicast address.\n\nAdmin-scoped addresses are ff04::/16, ff05::/16, or ff08::/16. These are used for internal/underlay multicast groups.", + "type": "string", + "format": "ipv6" + }, "AnLtStatus": { "description": "A collection of the data involved in the autonegiation/link-training process", "type": "object", @@ -7668,25 +7709,49 @@ } } }, - "MulticastGroupCreateEntry": { - "description": "A multicast group configuration for POST requests for internal (to the rack) groups.", + "MulticastGroupCreateExternalEntry": { + "description": "A multicast group configuration for POST requests for external (to the rack) groups.", "type": "object", "properties": { + "external_forwarding": { + "$ref": "#/components/schemas/ExternalForwarding" + }, "group_ip": { "type": "string", - "format": "ipv6" + "format": "ip" }, - "members": { + "internal_forwarding": { + "$ref": "#/components/schemas/InternalForwarding" + }, + "sources": { + "nullable": true, "type": "array", "items": { - "$ref": "#/components/schemas/MulticastGroupMember" + "$ref": "#/components/schemas/IpSrc" } }, - "sources": { + "tag": { "nullable": true, + "type": "string" + } + }, + "required": [ + "external_forwarding", + "group_ip", + "internal_forwarding" + ] + }, + "MulticastGroupCreateUnderlayEntry": { + "description": "A multicast group configuration for POST requests for internal (to the rack) groups.", + "type": "object", + "properties": { + "group_ip": { + "$ref": "#/components/schemas/AdminScopedIpv6" + }, + "members": { "type": "array", "items": { - "$ref": "#/components/schemas/IpSrc" + "$ref": "#/components/schemas/MulticastGroupMember" } }, "tag": { @@ -7699,16 +7764,24 @@ "members" ] }, - "MulticastGroupCreateExternalEntry": { - "description": "A multicast group configuration for POST requests for external (to the rack) groups.", + "MulticastGroupExternalResponse": { + "description": "Response structure for external multicast group operations. These groups handle IPv4 and non-admin IPv6 multicast via NAT targets.", "type": "object", "properties": { + "external_forwarding": { + "$ref": "#/components/schemas/ExternalForwarding" + }, + "external_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, "group_ip": { "type": "string", "format": "ip" }, - "nat_target": { - "$ref": "#/components/schemas/NatTarget" + "internal_forwarding": { + "$ref": "#/components/schemas/InternalForwarding" }, "sources": { "nullable": true, @@ -7720,17 +7793,13 @@ "tag": { "nullable": true, "type": "string" - }, - "vlan_id": { - "nullable": true, - "type": "integer", - "format": "uint16", - "minimum": 0 } }, "required": [ + "external_forwarding", + "external_group_id", "group_ip", - "nat_target" + "internal_forwarding" ] }, "MulticastGroupMember": { @@ -7754,54 +7823,95 @@ ] }, "MulticastGroupResponse": { - "description": "Response structure for multicast group operations.", - "type": "object", - "properties": { - "ext_fwding": { - "$ref": "#/components/schemas/ExternalForwarding" - }, - "external_group_id": { - "nullable": true, - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "group_ip": { - "type": "string", - "format": "ip" - }, - "int_fwding": { - "$ref": "#/components/schemas/InternalForwarding" - }, - "members": { - "type": "array", - "items": { - "$ref": "#/components/schemas/MulticastGroupMember" - } - }, - "sources": { - "nullable": true, - "type": "array", - "items": { - "$ref": "#/components/schemas/IpSrc" - } - }, - "tag": { - "nullable": true, - "type": "string" + "description": "Unified response type for operations that return mixed group types.", + "oneOf": [ + { + "description": "Response structure for underlay/internal multicast group operations. These groups handle admin-scoped IPv6 multicast with full replication.", + "type": "object", + "properties": { + "external_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "group_ip": { + "$ref": "#/components/schemas/AdminScopedIpv6" + }, + "kind": { + "type": "string", + "enum": [ + "underlay" + ] + }, + "members": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MulticastGroupMember" + } + }, + "tag": { + "nullable": true, + "type": "string" + }, + "underlay_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "external_group_id", + "group_ip", + "kind", + "members", + "underlay_group_id" + ] }, - "underlay_group_id": { - "nullable": true, - "type": "integer", - "format": "uint16", - "minimum": 0 + { + "description": "Response structure for external multicast group operations. These groups handle IPv4 and non-admin IPv6 multicast via NAT targets.", + "type": "object", + "properties": { + "external_forwarding": { + "$ref": "#/components/schemas/ExternalForwarding" + }, + "external_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "group_ip": { + "type": "string", + "format": "ip" + }, + "internal_forwarding": { + "$ref": "#/components/schemas/InternalForwarding" + }, + "kind": { + "type": "string", + "enum": [ + "external" + ] + }, + "sources": { + "nullable": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/IpSrc" + } + }, + "tag": { + "nullable": true, + "type": "string" + } + }, + "required": [ + "external_forwarding", + "external_group_id", + "group_ip", + "internal_forwarding", + "kind" + ] } - }, - "required": [ - "ext_fwding", - "group_ip", - "int_fwding", - "members" ] }, "MulticastGroupResponseResultsPage": { @@ -7825,38 +7935,50 @@ "items" ] }, - "MulticastGroupUpdateEntry": { - "description": "Represents a multicast replication entry for PUT requests for internal (to the rack) groups.", + "MulticastGroupUnderlayResponse": { + "description": "Response structure for underlay/internal multicast group operations. These groups handle admin-scoped IPv6 multicast with full replication.", "type": "object", "properties": { + "external_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "group_ip": { + "$ref": "#/components/schemas/AdminScopedIpv6" + }, "members": { "type": "array", "items": { "$ref": "#/components/schemas/MulticastGroupMember" } }, - "sources": { - "nullable": true, - "type": "array", - "items": { - "$ref": "#/components/schemas/IpSrc" - } - }, "tag": { "nullable": true, "type": "string" + }, + "underlay_group_id": { + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "members" + "external_group_id", + "group_ip", + "members", + "underlay_group_id" ] }, "MulticastGroupUpdateExternalEntry": { "description": "A multicast group update entry for PUT requests for external (to the rack) groups.", "type": "object", "properties": { - "nat_target": { - "$ref": "#/components/schemas/NatTarget" + "external_forwarding": { + "$ref": "#/components/schemas/ExternalForwarding" + }, + "internal_forwarding": { + "$ref": "#/components/schemas/InternalForwarding" }, "sources": { "nullable": true, @@ -7868,16 +7990,30 @@ "tag": { "nullable": true, "type": "string" + } + }, + "required": [ + "external_forwarding", + "internal_forwarding" + ] + }, + "MulticastGroupUpdateUnderlayEntry": { + "description": "Represents a multicast replication entry for PUT requests for internal (to the rack) groups.", + "type": "object", + "properties": { + "members": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MulticastGroupMember" + } }, - "vlan_id": { + "tag": { "nullable": true, - "type": "integer", - "format": "uint16", - "minimum": 0 + "type": "string" } }, "required": [ - "nat_target" + "members" ] }, "NatTarget": {