Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion lisa/microsoft/testsuites/core/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,10 @@ def after_case(self, log: Logger, **kwargs: Any) -> None:
Downgrading priority from 1 to 5. The file share relies on the
storage account key, which we cannot use currently.
Will change it back once file share works with MSI.

TODO: Test this case with storage account reuse changes in
AzureFileShare class (features.py). This test does NOT use
private endpoints (enable_private_endpoint=False by default).
""",
timeout=TIME_OUT,
requirement=simple_requirement(
Expand All @@ -619,7 +623,9 @@ def verify_cifs_basic(self, node: Node, environment: Environment) -> None:

try:
fs_url_dict = azure_file_share.create_file_share(
file_share_names=[fileshare_name], environment=environment
file_share_names=[fileshare_name],
environment=environment,
allow_shared_key_access=True,
)
test_folders_share_dict = {
test_folder: fs_url_dict[fileshare_name],
Expand Down
71 changes: 70 additions & 1 deletion lisa/sut_orchestrator/azure/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1240,6 +1240,38 @@ def get_compute_client(
)


def _get_private_endpoint_ip(
network: Any,
resource_group_name: str,
private_endpoint: Any,
log: Logger,
) -> Optional[str]:
"""
Helper function to extract IP address from a private endpoint.
Tries multiple methods: custom_dns_configs, then network interface.
"""
# Method 1: Try custom_dns_configs
if (
private_endpoint.custom_dns_configs
and private_endpoint.custom_dns_configs[0].ip_addresses
):
return str(private_endpoint.custom_dns_configs[0].ip_addresses[0])

# Method 2: Get IP from network interface
if private_endpoint.network_interfaces:
try:
nic_id = private_endpoint.network_interfaces[0].id
# Extract NIC name from resource ID
nic_name = nic_id.split("/")[-1]
nic = network.network_interfaces.get(resource_group_name, nic_name)
if nic.ip_configurations and nic.ip_configurations[0].private_ip_address:
return str(nic.ip_configurations[0].private_ip_address)
except Exception as e:
log.debug(f"Failed to get IP from network interface: {e}")

return None


def create_update_private_endpoints(
platform: "AzurePlatform",
resource_group_name: str,
Expand All @@ -1253,6 +1285,24 @@ def create_update_private_endpoints(
private_endpoint_name = "pe_test"
status = "Approved"
description = "Auto-Approved"

# Check if private endpoint already exists
try:
existing_pe = network.private_endpoints.get(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
)
log.debug(f"found existing private endpoint: {private_endpoint_name}")
# Return IP from existing endpoint if available
ip_address = _get_private_endpoint_ip(
network, resource_group_name, existing_pe, log
)
if ip_address:
return ip_address
log.debug("Could not get IP from existing endpoint, will recreate...")
except Exception:
log.debug(f"private endpoint {private_endpoint_name} not found, creating...")

private_endpoint = network.private_endpoints.begin_create_or_update(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
Expand All @@ -1275,7 +1325,26 @@ def create_update_private_endpoints(
)
log.debug(f"create private endpoints: {private_endpoint_name}")
result = private_endpoint.result()
return result.custom_dns_configs[0].ip_addresses[0]

# Try to get IP from the creation result
ip_address = _get_private_endpoint_ip(network, resource_group_name, result, log)
if ip_address:
return ip_address

# DNS configs may not be immediately available, fetch endpoint again
log.debug("IP not in result, fetching endpoint again...")
result = network.private_endpoints.get(
resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
)

ip_address = _get_private_endpoint_ip(network, resource_group_name, result, log)
if ip_address:
return ip_address

raise LisaException(
f"Failed to get IP address from private endpoint {private_endpoint_name}"
)


def delete_private_endpoints(
Expand Down
21 changes: 19 additions & 2 deletions lisa/sut_orchestrator/azure/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@
get_node_context,
get_or_create_file_share,
get_primary_ip_addresses,
get_storage_client,
get_storage_credential,
get_virtual_networks,
get_vm,
Expand Down Expand Up @@ -3698,8 +3699,23 @@ def _initialize(self, *args: Any, **kwargs: Any) -> None:
self._initialize_fileshare_information()

def _initialize_fileshare_information(self) -> None:
random_str = generate_random_chars(string.ascii_lowercase + string.digits, 10)
self._storage_account_name = f"lisasc{random_str}"
platform: AzurePlatform = self._platform # type: ignore
storage_client = get_storage_client(
platform.credential, platform.subscription_id, platform.cloud
)
storage_accounts = storage_client.storage_accounts.list_by_resource_group(
self._resource_group_name
)
for account in storage_accounts:
if account.name.startswith("lisasc"):
self._storage_account_name = account.name
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if reuse the existing one, there will be some issues when storage account is deleted in delete_storage_account

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch, I will add flags and logic to delete_storage_account to ensure resources (selective ) are correctly deleted only and only when created by LISA and keep_environment: no
Proposed input:
Deploy : False
Keep_environment: no
resource_group_name: Specified

Proposed Behavior:
If Storage account exists//Storage account has file shares other than what is created: Do not delete,
If file shares from parameters already exists: Do not delete
If Private endpoint already exists: Do not delete
Private DNS zone: No action

break
else:
random_str = generate_random_chars(
string.ascii_lowercase + string.digits, 10
)
self._storage_account_name = f"lisasc{random_str}"
Comment on lines +3711 to +3717
Copy link

Copilot AI Dec 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The storage account reuse logic conflicts with the cleanup logic in delete_azure_fileshare (line 3863-3870), which always deletes the storage account. If one test/node reuses a storage account that another test/node is still using or might need, the delete operation will cause failures.

This creates a conflict between the goal of reusing storage accounts across re-runs and the cleanup behavior. Consider:

  1. Only deleting file shares, not the storage account, when reuse is enabled
  2. Implementing reference counting or a cleanup flag to determine when it's safe to delete the storage account
  3. Adding configuration to control whether storage accounts should be preserved for reuse
Suggested change
self._storage_account_name = account.name
break
else:
random_str = generate_random_chars(
string.ascii_lowercase + string.digits, 10
)
self._storage_account_name = f"lisasc{random_str}"
self._storage_account_name = account.name
self._reuse_storage_account = True
break
else:
random_str = generate_random_chars(
string.ascii_lowercase + string.digits, 10
)
self._storage_account_name = f"lisasc{random_str}"
self._reuse_storage_account = False

Copilot uses AI. Check for mistakes.

self._fstab_info = (
f"nofail,vers={self.get_smb_version()},"
"credentials=/etc/smbcredentials/lisa.cred"
Expand Down Expand Up @@ -3743,6 +3759,7 @@ def create_file_share(
# No changes need to be done in code calling function
# For Quota, currently all volumes will share same quota number.
# Ensure that you use the highest value applicable for your shares
# Reuse existing file shares if they exist, otherwise create new ones
for share_name in file_share_names:
fs_url_dict[share_name] = get_or_create_file_share(
credential=platform.credential,
Expand Down
Loading