Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-12-SP2:Update
salt.5440
add-unit-test-for-skip-false-values-from-prefer...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File add-unit-test-for-skip-false-values-from-preferred_i.patch of Package salt.5440
From a983f9342c6917eaa1aba63cd5ceebd9271f43d5 Mon Sep 17 00:00:00 2001 From: Bo Maryniuk <bo@suse.de> Date: Thu, 20 Apr 2017 14:03:30 +0200 Subject: [PATCH] Add unit test for skip false values from preferred_ip - Add fake preferred IP function for testing - Add initial unit test for openstack cloud module - Move out nested function to be unit-testable - Lintfix - Add unit test for nova connector - Move out nested function for testing purposes - Fix name error exception - Skip test, if libcloud is not around - Add unit test for node ip filtering - Lintfix E0602 - Fix UT parameter changes - Fix lint, typos and readability - PEP8: fix unused variable - Reformat idents, fix typos - Describe debug information --- salt/cloud/clouds/dimensiondata.py | 116 +++++----- salt/cloud/clouds/nova.py | 295 ++++++++++++-------------- salt/cloud/clouds/openstack.py | 229 ++++++++++---------- tests/unit/cloud/clouds/__init__.py | 17 ++ tests/unit/cloud/clouds/dimensiondata_test.py | 28 ++- tests/unit/cloud/clouds/nova_test.py | 43 ++++ tests/unit/cloud/clouds/openstack_test.py | 43 ++++ 7 files changed, 441 insertions(+), 330 deletions(-) create mode 100644 tests/unit/cloud/clouds/nova_test.py create mode 100644 tests/unit/cloud/clouds/openstack_test.py diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py index e4af241867..d8478436b8 100644 --- a/salt/cloud/clouds/dimensiondata.py +++ b/salt/cloud/clouds/dimensiondata.py @@ -131,6 +131,60 @@ def get_dependencies(): ) +def _query_node_data(vm_, data): + running = False + try: + node = show_instance(vm_['name'], 'action') + running = (node['state'] == NodeState.RUNNING) + log.debug('Loaded node data for %s:\nname: %s\nstate: %s', + vm_['name'], pprint.pformat(node['name']), node['state']) + except Exception as err: + log.error( + 'Failed to get nodes list: %s', err, + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + # Trigger a failure in the wait for IP function + return running + + if not running: + # Still not running, trigger another iteration + return + + private = node['private_ips'] + public = node['public_ips'] + + if private and not public: + log.warning('Private IPs returned, but not public. Checking for misidentified IPs.') + for private_ip in private: + private_ip = preferred_ip(vm_, [private_ip]) + if private_ip is False: + continue + if salt.utils.cloud.is_public_ip(private_ip): + log.warning('%s is a public IP', private_ip) + data.public_ips.append(private_ip) + else: + log.warning('%s is a private IP', private_ip) + if private_ip not in data.private_ips: + data.private_ips.append(private_ip) + + if ssh_interface(vm_) == 'private_ips' and data.private_ips: + return data + + if private: + data.private_ips = private + if ssh_interface(vm_) == 'private_ips': + return data + + if public: + data.public_ips = public + if ssh_interface(vm_) != 'private_ips': + return data + + log.debug('Contents of the node data:') + log.debug(data) + + def create(vm_): ''' Create a single VM from a data dict @@ -197,69 +251,9 @@ def create(vm_): ) return False - def __query_node_data(vm_, data): - running = False - try: - node = show_instance(vm_['name'], 'action') - running = (node['state'] == NodeState.RUNNING) - log.debug( - 'Loaded node data for %s:\nname: %s\nstate: %s', - vm_['name'], - pprint.pformat(node['name']), - node['state'] - ) - except Exception as err: - log.error( - 'Failed to get nodes list: %s', err, - # Show the traceback if the debug logging level is enabled - exc_info_on_loglevel=logging.DEBUG - ) - # Trigger a failure in the wait for IP function - return False - - if not running: - # Still not running, trigger another iteration - return - - private = node['private_ips'] - public = node['public_ips'] - - if private and not public: - log.warning( - 'Private IPs returned, but not public... Checking for ' - 'misidentified IPs' - ) - for private_ip in private: - private_ip = preferred_ip(vm_, [private_ip]) - if private_ip is False: - continue - if salt.utils.cloud.is_public_ip(private_ip): - log.warning('%s is a public IP', private_ip) - data.public_ips.append(private_ip) - else: - log.warning('%s is a private IP', private_ip) - if private_ip not in data.private_ips: - data.private_ips.append(private_ip) - - if ssh_interface(vm_) == 'private_ips' and data.private_ips: - return data - - if private: - data.private_ips = private - if ssh_interface(vm_) == 'private_ips': - return data - - if public: - data.public_ips = public - if ssh_interface(vm_) != 'private_ips': - return data - - log.debug('DATA') - log.debug(data) - try: data = salt.utils.cloud.wait_for_ip( - __query_node_data, + _query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=25 * 60), diff --git a/salt/cloud/clouds/nova.py b/salt/cloud/clouds/nova.py index ed9251d4b1..d2cbf7387a 100644 --- a/salt/cloud/clouds/nova.py +++ b/salt/cloud/clouds/nova.py @@ -722,6 +722,145 @@ def request_instance(vm_=None, call=None): return data, vm_ +def _query_node_data(vm_, data, conn): + try: + node = show_instance(vm_['name'], 'action') + log.debug('Loaded node data for {0}:' + '\n{1}'.format(vm_['name'], pprint.pformat(node))) + except Exception as err: + # Show the traceback if the debug logging level is enabled + log.error('Failed to get nodes list: {0}'.format(err), + exc_info_on_loglevel=logging.DEBUG) + # Trigger a failure in the wait for IP function + return False + + running = node['state'] == 'ACTIVE' + if not running: + # Still not running, trigger another iteration + return + + if rackconnect(vm_) is True: + extra = node.get('extra', {}) + rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') + if rc_status != 'DEPLOYED': + log.debug('Waiting for Rackconnect automation to complete') + return + + if managedcloud(vm_) is True: + extra = conn.server_show_libcloud(node['id']).extra + mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') + + if mc_status != 'Complete': + log.debug('Waiting for managed cloud automation to complete') + return + + access_ip = node.get('extra', {}).get('access_ip', '') + + rcv3 = rackconnectv3(vm_) in node['addresses'] + sshif = ssh_interface(vm_) in node['addresses'] + + if any((rcv3, sshif)): + networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) + for network in node['addresses'].get(networkname, []): + if network['version'] is 4: + access_ip = network['addr'] + break + vm_['cloudnetwork'] = True + + # Conditions to pass this + # + # Rackconnect v2: vm_['rackconnect'] = True + # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. + # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the + # server. In this case we can use the private_ips for ssh_interface, or the access_ip. + # + # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> + # If this is the case, salt will need to use the cloud network to login to the server. There + # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud + # also cannot use the private_ips, because that traffic is dropped at the hypervisor. + # + # CloudNetwork: vm['cloudnetwork'] = True + # If this is True, then we should have an access_ip at this point set to the ip on the cloud + # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will + # use the initial access_ip, and not overwrite anything. + + if (any((cloudnetwork(vm_), rackconnect(vm_))) + and (ssh_interface(vm_) != 'private_ips' or rcv3) + and access_ip != ''): + data.public_ips = [access_ip] + return data + + result = [] + + if ('private_ips' not in node + and 'public_ips' not in node + and 'floating_ips' not in node + and 'fixed_ips' not in node + and 'access_ip' in node.get('extra', {})): + result = [node['extra']['access_ip']] + + private = node.get('private_ips', []) + public = node.get('public_ips', []) + fixed = node.get('fixed_ips', []) + floating = node.get('floating_ips', []) + + if private and not public: + log.warning('Private IPs returned, but not public. ' + 'Checking for misidentified IPs') + for private_ip in private: + private_ip = preferred_ip(vm_, [private_ip]) + if private_ip is False: + continue + if salt.utils.cloud.is_public_ip(private_ip): + log.warning('{0} is a public IP'.format(private_ip)) + data.public_ips.append(private_ip) + log.warning('Public IP address was not ready when we last checked. ' + 'Appending public IP address now.') + public = data.public_ips + else: + log.warning('{0} is a private IP'.format(private_ip)) + ignore_ip = ignore_cidr(vm_, private_ip) + if private_ip not in data.private_ips and not ignore_ip: + result.append(private_ip) + + # populate return data with private_ips + # when ssh_interface is set to private_ips and public_ips exist + if not result and ssh_interface(vm_) == 'private_ips': + for private_ip in private: + ignore_ip = ignore_cidr(vm_, private_ip) + if private_ip not in data.private_ips and not ignore_ip: + result.append(private_ip) + + non_private_ips = [] + + if public: + data.public_ips = public + if ssh_interface(vm_) == 'public_ips': + non_private_ips.append(public) + + if floating: + data.floating_ips = floating + if ssh_interface(vm_) == 'floating_ips': + non_private_ips.append(floating) + + if fixed: + data.fixed_ips = fixed + if ssh_interface(vm_) == 'fixed_ips': + non_private_ips.append(fixed) + + if non_private_ips: + log.debug('result = {0}'.format(non_private_ips)) + data.private_ips = result + if ssh_interface(vm_) != 'private_ips': + return data + + if result: + log.debug('result = {0}'.format(result)) + data.private_ips = result + if ssh_interface(vm_) == 'private_ips': + return data + + def create(vm_): ''' Create a single VM from a data dict @@ -792,162 +931,10 @@ def create(vm_): # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id - def __query_node_data(vm_, data): - try: - node = show_instance(vm_['name'], 'action') - log.debug( - 'Loaded node data for {0}:\n{1}'.format( - vm_['name'], - pprint.pformat(node) - ) - ) - except Exception as err: - log.error( - 'Failed to get nodes list: {0}'.format( - err - ), - # Show the traceback if the debug logging level is enabled - exc_info_on_loglevel=logging.DEBUG - ) - # Trigger a failure in the wait for IP function - return False - - running = node['state'] == 'ACTIVE' - if not running: - # Still not running, trigger another iteration - return - - if rackconnect(vm_) is True: - extra = node.get('extra', {}) - rc_status = extra.get('metadata', {}).get( - 'rackconnect_automation_status', '') - if rc_status != 'DEPLOYED': - log.debug('Waiting for Rackconnect automation to complete') - return - - if managedcloud(vm_) is True: - extra = conn.server_show_libcloud( - node['id'] - ).extra - mc_status = extra.get('metadata', {}).get( - 'rax_service_level_automation', '') - - if mc_status != 'Complete': - log.debug('Waiting for managed cloud automation to complete') - return - - access_ip = node.get('extra', {}).get('access_ip', '') - - rcv3 = rackconnectv3(vm_) in node['addresses'] - sshif = ssh_interface(vm_) in node['addresses'] - - if any((rcv3, sshif)): - networkname = rackconnectv3(vm_) if rcv3 else ssh_interface(vm_) - for network in node['addresses'].get(networkname, []): - if network['version'] is 4: - access_ip = network['addr'] - break - vm_['cloudnetwork'] = True - - # Conditions to pass this - # - # Rackconnect v2: vm_['rackconnect'] = True - # If this is True, then the server will not be accessible from the ipv4 addres in public_ips. - # That interface gets turned off, and an ipv4 from the dedicated firewall is routed to the - # server. In this case we can use the private_ips for ssh_interface, or the access_ip. - # - # Rackconnect v3: vm['rackconnectv3'] = <cloudnetwork> - # If this is the case, salt will need to use the cloud network to login to the server. There - # is no ipv4 address automatically provisioned for these servers when they are booted. SaltCloud - # also cannot use the private_ips, because that traffic is dropped at the hypervisor. - # - # CloudNetwork: vm['cloudnetwork'] = True - # If this is True, then we should have an access_ip at this point set to the ip on the cloud - # network. If that network does not exist in the 'addresses' dictionary, then SaltCloud will - # use the initial access_ip, and not overwrite anything. - - if any((cloudnetwork(vm_), rackconnect(vm_))) and (ssh_interface(vm_) != 'private_ips' or rcv3) and access_ip != '': - data.public_ips = [access_ip, ] - return data - - result = [] - - if 'private_ips' not in node and 'public_ips' not in node and \ - 'floating_ips' not in node and 'fixed_ips' not in node and \ - 'access_ip' in node.get('extra', {}): - result = [node['extra']['access_ip']] - - private = node.get('private_ips', []) - public = node.get('public_ips', []) - fixed = node.get('fixed_ips', []) - floating = node.get('floating_ips', []) - - if private and not public: - log.warning( - 'Private IPs returned, but not public... Checking for ' - 'misidentified IPs' - ) - for private_ip in private: - private_ip = preferred_ip(vm_, [private_ip]) - if private_ip is False: - continue - if salt.utils.cloud.is_public_ip(private_ip): - log.warning('{0} is a public IP'.format(private_ip)) - data.public_ips.append(private_ip) - log.warning( - ( - 'Public IP address was not ready when we last' - ' checked. Appending public IP address now.' - ) - ) - public = data.public_ips - else: - log.warning('{0} is a private IP'.format(private_ip)) - ignore_ip = ignore_cidr(vm_, private_ip) - if private_ip not in data.private_ips and not ignore_ip: - result.append(private_ip) - - # populate return data with private_ips - # when ssh_interface is set to private_ips and public_ips exist - if not result and ssh_interface(vm_) == 'private_ips': - for private_ip in private: - ignore_ip = ignore_cidr(vm_, private_ip) - if private_ip not in data.private_ips and not ignore_ip: - result.append(private_ip) - - non_private_ips = [] - - if public: - data.public_ips = public - if ssh_interface(vm_) == 'public_ips': - non_private_ips.append(public) - - if floating: - data.floating_ips = floating - if ssh_interface(vm_) == 'floating_ips': - non_private_ips.append(floating) - - if fixed: - data.fixed_ips = fixed - if ssh_interface(vm_) == 'fixed_ips': - non_private_ips.append(fixed) - - if non_private_ips: - log.debug('result = {0}'.format(non_private_ips)) - data.private_ips = result - if ssh_interface(vm_) != 'private_ips': - return data - - if result: - log.debug('result = {0}'.format(result)) - data.private_ips = result - if ssh_interface(vm_) == 'private_ips': - return data - try: data = salt.utils.cloud.wait_for_ip( - __query_node_data, - update_args=(vm_, data), + _query_node_data, + update_args=(vm_, data, conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py index cc936509c7..c8ad91ff23 100644 --- a/salt/cloud/clouds/openstack.py +++ b/salt/cloud/clouds/openstack.py @@ -585,6 +585,119 @@ def request_instance(vm_=None, call=None): return data, vm_ +def _query_node_data(vm_, data, floating, conn): + try: + node = show_instance(vm_['name'], 'action') + log.debug( + 'Loaded node data for {0}:\n{1}'.format( + vm_['name'], + pprint.pformat(node) + ) + ) + except Exception as err: + log.error( + 'Failed to get nodes list: {0}'.format( + err + ), + # Show the traceback if the debug logging level is enabled + exc_info_on_loglevel=logging.DEBUG + ) + # Trigger a failure in the wait for IP function + return False + + running = node['state'] == NodeState.RUNNING + if not running: + # Still not running, trigger another iteration + return + + if rackconnect(vm_) is True: + check_libcloud_version((0, 14, 0), why='rackconnect: True') + extra = node.get('extra') + rc_status = extra.get('metadata', {}).get( + 'rackconnect_automation_status', '') + access_ip = extra.get('access_ip', '') + + if rc_status != 'DEPLOYED': + log.debug('Waiting for Rackconnect automation to complete') + return + + if managedcloud(vm_) is True: + extra = node.get('extra') + mc_status = extra.get('metadata', {}).get( + 'rax_service_level_automation', '') + + if mc_status != 'Complete': + log.debug('Waiting for managed cloud automation to complete') + return + + public = node['public_ips'] + if floating: + try: + name = data.name + ip = floating[0].ip_address + conn.ex_attach_floating_ip_to_node(data, ip) + log.info( + 'Attaching floating IP \'{0}\' to node \'{1}\''.format( + ip, name + ) + ) + data.public_ips.append(ip) + public = data.public_ips + except Exception: + # Note(pabelanger): Because we loop, we only want to attach the + # floating IP address one. So, expect failures if the IP is + # already attached. + pass + + result = [] + private = node['private_ips'] + if private and not public: + log.warning( + 'Private IPs returned, but not public... Checking for ' + 'misidentified IPs' + ) + for private_ip in private: + private_ip = preferred_ip(vm_, [private_ip]) + if private_ip is False: + continue + if salt.utils.cloud.is_public_ip(private_ip): + log.warning('{0} is a public IP'.format(private_ip)) + data.public_ips.append(private_ip) + log.warning( + 'Public IP address was not ready when we last checked.' + ' Appending public IP address now.' + ) + public = data.public_ips + else: + log.warning('{0} is a private IP'.format(private_ip)) + ignore_ip = ignore_cidr(vm_, private_ip) + if private_ip not in data.private_ips and not ignore_ip: + result.append(private_ip) + + if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': + data.public_ips = access_ip + return data + + # populate return data with private_ips + # when ssh_interface is set to private_ips and public_ips exist + if not result and ssh_interface(vm_) == 'private_ips': + for private_ip in private: + ignore_ip = ignore_cidr(vm_, private_ip) + if private_ip not in data.private_ips and not ignore_ip: + result.append(private_ip) + + if result: + log.debug('result = {0}'.format(result)) + data.private_ips = result + if ssh_interface(vm_) == 'private_ips': + return data + + if public: + data.public_ips = public + if ssh_interface(vm_) != 'private_ips': + return data + + def create(vm_): ''' Create a single VM from a data dict @@ -659,122 +772,10 @@ def create(vm_): # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id - def __query_node_data(vm_, data, floating): - try: - node = show_instance(vm_['name'], 'action') - log.debug( - 'Loaded node data for {0}:\n{1}'.format( - vm_['name'], - pprint.pformat(node) - ) - ) - except Exception as err: - log.error( - 'Failed to get nodes list: {0}'.format( - err - ), - # Show the traceback if the debug logging level is enabled - exc_info_on_loglevel=logging.DEBUG - ) - # Trigger a failure in the wait for IP function - return False - - running = node['state'] == NodeState.RUNNING - if not running: - # Still not running, trigger another iteration - return - - if rackconnect(vm_) is True: - check_libcloud_version((0, 14, 0), why='rackconnect: True') - extra = node.get('extra') - rc_status = extra.get('metadata', {}).get( - 'rackconnect_automation_status', '') - access_ip = extra.get('access_ip', '') - - if rc_status != 'DEPLOYED': - log.debug('Waiting for Rackconnect automation to complete') - return - - if managedcloud(vm_) is True: - extra = node.get('extra') - mc_status = extra.get('metadata', {}).get( - 'rax_service_level_automation', '') - - if mc_status != 'Complete': - log.debug('Waiting for managed cloud automation to complete') - return - - public = node['public_ips'] - if floating: - try: - name = data.name - ip = floating[0].ip_address - conn.ex_attach_floating_ip_to_node(data, ip) - log.info( - 'Attaching floating IP \'{0}\' to node \'{1}\''.format( - ip, name - ) - ) - data.public_ips.append(ip) - public = data.public_ips - except Exception: - # Note(pabelanger): Because we loop, we only want to attach the - # floating IP address one. So, expect failures if the IP is - # already attached. - pass - - result = [] - private = node['private_ips'] - if private and not public: - log.warning( - 'Private IPs returned, but not public... Checking for ' - 'misidentified IPs' - ) - for private_ip in private: - private_ip = preferred_ip(vm_, [private_ip]) - if private_ip is False: - continue - if salt.utils.cloud.is_public_ip(private_ip): - log.warning('{0} is a public IP'.format(private_ip)) - data.public_ips.append(private_ip) - log.warning( - 'Public IP address was not ready when we last checked.' - ' Appending public IP address now.' - ) - public = data.public_ips - else: - log.warning('{0} is a private IP'.format(private_ip)) - ignore_ip = ignore_cidr(vm_, private_ip) - if private_ip not in data.private_ips and not ignore_ip: - result.append(private_ip) - - if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': - data.public_ips = access_ip - return data - - # populate return data with private_ips - # when ssh_interface is set to private_ips and public_ips exist - if not result and ssh_interface(vm_) == 'private_ips': - for private_ip in private: - ignore_ip = ignore_cidr(vm_, private_ip) - if private_ip not in data.private_ips and not ignore_ip: - result.append(private_ip) - - if result: - log.debug('result = {0}'.format(result)) - data.private_ips = result - if ssh_interface(vm_) == 'private_ips': - return data - - if public: - data.public_ips = public - if ssh_interface(vm_) != 'private_ips': - return data - try: data = salt.utils.cloud.wait_for_ip( - __query_node_data, - update_args=(vm_, data, vm_['floating']), + _query_node_data, + update_args=(vm_, data, vm_['floating'], conn), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( diff --git a/tests/unit/cloud/clouds/__init__.py b/tests/unit/cloud/clouds/__init__.py index 40a96afc6f..15d1e2c5c6 100644 --- a/tests/unit/cloud/clouds/__init__.py +++ b/tests/unit/cloud/clouds/__init__.py @@ -1 +1,18 @@ # -*- coding: utf-8 -*- + + +def _preferred_ip(ip_set, preferred=None): + ''' + Returns a function that reacts which ip is prefered + :param ip_set: + :param private: + :return: + ''' + + def _ip_decider(vm, ips): + for ip in ips: + if ip in preferred: + return ip + return False + + return _ip_decider diff --git a/tests/unit/cloud/clouds/dimensiondata_test.py b/tests/unit/cloud/clouds/dimensiondata_test.py index b4ea7f57f5..9f92fd7dbe 100644 --- a/tests/unit/cloud/clouds/dimensiondata_test.py +++ b/tests/unit/cloud/clouds/dimensiondata_test.py @@ -25,6 +25,7 @@ from salt.exceptions import SaltCloudSystemExit from salttesting import TestCase, skipIf from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch from salttesting.helpers import ensure_in_syspath +from tests.unit.cloud.clouds import _preferred_ip ensure_in_syspath('../../../') @@ -48,7 +49,7 @@ VM_NAME = 'winterfell' try: import certifi libcloud.security.CA_CERTS_PATH.append(certifi.where()) -except ImportError: +except (ImportError, NameError): pass @@ -129,6 +130,7 @@ class DimensionDataTestCase(ExtendedTestCase): call='function' ) + @skipIf(HAS_LIBCLOUD is False, "Install 'libcloud' to be able to run this unit test.") def test_avail_sizes(self): ''' Tests that avail_sizes returns an empty dictionary. @@ -160,6 +162,30 @@ class DimensionDataTestCase(ExtendedTestCase): p = dimensiondata.get_configured_provider() self.assertNotEqual(p, None) + PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] + + @patch('salt.cloud.clouds.dimensiondata.show_instance', + MagicMock(return_value={'state': True, + 'name': 'foo', + 'public_ips': [], + 'private_ips': PRIVATE_IPS})) + @patch('salt.cloud.clouds.dimensiondata.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) + @patch('salt.cloud.clouds.dimensiondata.ssh_interface', MagicMock(return_value='private_ips')) + def test_query_node_data_filter_preferred_ip_addresses(self): + ''' + Test if query node data is filtering out unpreferred IP addresses. + ''' + dimensiondata.NodeState = MagicMock() + dimensiondata.NodeState.RUNNING = True + dimensiondata.__opts__ = {} + + vm = {'name': None} + data = MagicMock() + data.public_ips = [] + + assert dimensiondata._query_node_data(vm, data).public_ips == ['0.0.0.0'] + + if __name__ == '__main__': from integration import run_tests run_tests(DimensionDataTestCase, needs_daemon=False) diff --git a/tests/unit/cloud/clouds/nova_test.py b/tests/unit/cloud/clouds/nova_test.py new file mode 100644 index 0000000000..c44c0bd507 --- /dev/null +++ b/tests/unit/cloud/clouds/nova_test.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Bo Maryniuk <bo@suse.de>` +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt Testing Libs +from salttesting import TestCase +from salt.cloud.clouds import nova +from salttesting.mock import MagicMock, patch +from tests.unit.cloud.clouds import _preferred_ip + + +class NovaTestCase(TestCase): + ''' + Test case for openstack + ''' + PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] + + @patch('salt.cloud.clouds.nova.show_instance', + MagicMock(return_value={'state': 'ACTIVE', + 'public_ips': [], + 'addresses': [], + 'private_ips': PRIVATE_IPS})) + @patch('salt.cloud.clouds.nova.rackconnect', MagicMock(return_value=False)) + @patch('salt.cloud.clouds.nova.rackconnectv3', MagicMock(return_value={'mynet': ['1.1.1.1']})) + @patch('salt.cloud.clouds.nova.cloudnetwork', MagicMock(return_value=False)) + @patch('salt.cloud.clouds.nova.managedcloud', MagicMock(return_value=False)) + @patch('salt.cloud.clouds.nova.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) + @patch('salt.cloud.clouds.nova.ssh_interface', MagicMock(return_value='public_ips')) + def test_query_node_data_filter_preferred_ip_addresses(self): + ''' + Test if query node data is filtering out unpreferred IP addresses. + ''' + nova.__opts__ = {} + + vm = {'name': None} + data = MagicMock() + data.public_ips = [] + + assert nova._query_node_data(vm, data, MagicMock()).public_ips == ['0.0.0.0'] diff --git a/tests/unit/cloud/clouds/openstack_test.py b/tests/unit/cloud/clouds/openstack_test.py new file mode 100644 index 0000000000..9e70e3874a --- /dev/null +++ b/tests/unit/cloud/clouds/openstack_test.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +''' + :codeauthor: :email:`Bo Maryniuk <bo@suse.de>` +''' + +# Import Python libs +from __future__ import absolute_import + +# Import Salt Testing Libs +from salttesting import TestCase +from salt.cloud.clouds import openstack +from salttesting.mock import MagicMock, patch +from tests.unit.cloud.clouds import _preferred_ip + + +class OpenstackTestCase(TestCase): + ''' + Test case for openstack + ''' + PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2'] + + @patch('salt.cloud.clouds.openstack.show_instance', + MagicMock(return_value={'state': True, + 'public_ips': [], + 'private_ips': PRIVATE_IPS})) + @patch('salt.cloud.clouds.openstack.rackconnect', MagicMock(return_value=False)) + @patch('salt.cloud.clouds.openstack.managedcloud', MagicMock(return_value=False)) + @patch('salt.cloud.clouds.openstack.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0'])) + @patch('salt.cloud.clouds.openstack.ssh_interface', MagicMock(return_value=False)) + def test_query_node_data_filter_preferred_ip_addresses(self): + ''' + Test if query node data is filtering out unpreferred IP addresses. + ''' + openstack.NodeState = MagicMock() + openstack.NodeState.RUNNING = True + openstack.__opts__ = {} + + vm = {'name': None} + data = MagicMock() + data.public_ips = [] + + with patch('salt.utils.cloud.is_public_ip', MagicMock(return_value=True)): + assert openstack._query_node_data(vm, data, False, MagicMock()).public_ips == ['0.0.0.0'] -- 2.11.0
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor