diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 6032a388..e0949177 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -5,9 +5,9 @@ name: Python import test on: push: - branches: [ master ] + branches: [ master, dev ] pull_request: - branches: [ master ] + branches: [ master, dev ] schedule: - cron: '0 8 * * MON' workflow_dispatch: @@ -60,30 +60,6 @@ jobs: - name: Trial imports run: python -c 'import analysator as pt' - turso_system: - - runs-on: carrington - strategy: - fail-fast: false - matrix: - extras: ["none", "vtk", "all", "bvtk"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python - run: | - python3 -m venv test_venv - - name: Install dependencies - run: | - source ./test_venv/bin/activate - python -m pip install --upgrade pip - python -m pip install --editable ../analysator[${{ matrix.extras }}] - - name: Trial imports - run: | - source ./test_venv/bin/activate - python -c 'import analysator as pt' - - lint: runs-on: ubuntu-latest diff --git a/.github/workflows/test_python_turso.yml b/.github/workflows/test_python_turso.yml new file mode 100644 index 00000000..559158d5 --- /dev/null +++ b/.github/workflows/test_python_turso.yml @@ -0,0 +1,42 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Turso import test + +on: + push: + branches: [ master, dev ] + pull_request: + branches: [ master, dev ] + schedule: + - cron: '0 8 * * MON' + workflow_dispatch: + +jobs: + + turso_system: + if: github.repository_owner == 'fmihpc' + runs-on: carrington + strategy: + fail-fast: false + matrix: + extras: ["none", "vtk", "all", "bvtk"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python + run: | + export TMPDIR=$RUNNER_TEMP + python3 -m venv test_venv + - name: Install dependencies + run: | + export TMPDIR=$RUNNER_TEMP + source ./test_venv/bin/activate + python -m ensurepip + python -m pip install --upgrade pip + python -m pip install --editable ../analysator[${{ matrix.extras }}] + - name: Trial imports + run: | + export TMPDIR=$RUNNER_TEMP + source ./test_venv/bin/activate + python -c 'import analysator as pt' + \ No newline at end of file diff --git a/CITATION.cff b/CITATION.cff index 9b42cef6..d66eab2a 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -68,7 +68,7 @@ authors: - family-names: "Reimi" given-names: "Johanna" title: "Analysator" -version: 0.9.5 +version: 0.9.6 doi: 10.5281/zenodo.4462514 date-released: 2024-11-26 url: "https://github.com/fmihpc/analysator" diff --git a/Documentation/sphinx/obliqueshock.rst b/Documentation/sphinx/obliqueshock.rst new file mode 100644 index 00000000..3c859aa7 --- /dev/null +++ b/Documentation/sphinx/obliqueshock.rst @@ -0,0 +1,5 @@ +obliqueshock +------------ + +.. automodule:: obliqueshock + :members: \ No newline at end of file diff --git a/Documentation/sphinx/obliqueshock_nif.rst b/Documentation/sphinx/obliqueshock_nif.rst new file mode 100644 index 00000000..002cbe21 --- /dev/null +++ b/Documentation/sphinx/obliqueshock_nif.rst @@ -0,0 +1,5 @@ +obliqueshock_nif +---------------- + +.. automodule:: obliqueshock_nif + :members: \ No newline at end of file diff --git a/Documentation/sphinx/scripts.rst b/Documentation/sphinx/scripts.rst index 6da5198f..080c0f90 100644 --- a/Documentation/sphinx/scripts.rst +++ b/Documentation/sphinx/scripts.rst @@ -39,6 +39,24 @@ magnetopause3d ------------ +obliqueshock +------------ +:doc:`obliqueshock` + +.. automodule:: obliqueshock + :no-index: + +------------ + +obliqueshock_nif +---------------- +:doc:`obliqueshock_nif` + +.. automodule:: obliqueshock_nif + :no-index: + +---------------- + shue -------------- :doc:`shue` @@ -65,6 +83,8 @@ tsyganenko gics magnetopause2d magnetopause3d + obliqueshock + obliqueshock_nif shue tsyganenko diff --git a/analysator/pyCalculations/calculations.py b/analysator/pyCalculations/calculations.py index 8cb486db..7f5e940f 100644 --- a/analysator/pyCalculations/calculations.py +++ b/analysator/pyCalculations/calculations.py @@ -44,7 +44,7 @@ from fourier import fourier from spectra import get_spectrum_energy, get_spectrum_alongaxis_vel from variable import VariableInfo -from timeevolution import cell_time_evolution +from timeevolution import cell_time_evolution,point_time_evolution from pitchangle import pitch_angles #from backstream import extract_velocity_cells_sphere, extract_velocity_cells_non_sphere from gyrophaseangle import gyrophase_angles_from_file diff --git a/analysator/pyCalculations/timeevolution.py b/analysator/pyCalculations/timeevolution.py index a94fc801..9006c1bf 100644 --- a/analysator/pyCalculations/timeevolution.py +++ b/analysator/pyCalculations/timeevolution.py @@ -60,8 +60,19 @@ def cell_time_evolution( vlsvReader_list, variables, cellids, units="" ): vlsvReader_list = np.atleast_1d(vlsvReader_list) variables = np.atleast_1d(variables) cellids = np.atleast_1d(cellids) - parameters = ["t","tstep","fileIndex"] - parameter_units=["s","",""] + reader_0 = vlsvReader_list[0] + + # Check against legacy files with tstep instead of timestep: + if reader_0.check_parameter("tstep"): + parameters = ["t","tstep","fileIndex"] + parameter_units=["s","",""] + elif reader_0.check_parameter("timestep"): + parameters = ["t","timestep","fileIndex"] + parameter_units=["s","",""] + else: + logging.warning("Could not obtain tstep or timestep from readers. Returning only t and fileIndex.") + parameters = ["t","fileIndex"] + parameter_units=["s",""] #construct empty units, if none are given if (units == "") or (len(units) != len(variables)): units=[ "" for i in range(len(variables))] @@ -86,6 +97,95 @@ def cell_time_evolution( vlsvReader_list, variables, cellids, units="" ): # Save the data into the right slot in the data array: for i in range(len(cellids)): data[len(parameters)+i*len(variables)+j].append(vlsvReader.read_variable( variable, cellids[i] )) + #TODO Use vectorization over cellids! + + # For optimization purposes we are now freeing vlsvReader's memory + # Note: Upon reading data vlsvReader created an internal hash map that takes a lot of memory + vlsvReader.optimize_clear_fileindex_for_cellid() + # Close the vlsv reader's file: + vlsvReader.optimize_close_file() + from output import output_1d + return output_1d( data, + parameters + [variables[(int)(i)%(int)(len(variables))] for i in range(len(data)-len(parameters))], + parameter_units + [units[(int)(i)%(int)(len(units))] for i in range(len(data)-len(parameters))] ) + + +def point_time_evolution( vlsvReader_list, variables, coordinates, units="", method='nearest'): + ''' Returns variable data from a time evolution of some certain cell ids + + :param vlsvReader_list: List containing VlsvReaders with a file open + :type vlsvReader_list: :class:`vlsvfile.VlsvReader` + :param variables: Name of the variables + :param coordinates: List of coordinates [n,3] + :param units: List of units for the variables (OPTIONAL) + :param method: name of interpolation method ['nearest','linear'] + :returns: an array containing the data for the time evolution for every coordinate + + .. code-block:: python + + import pytools as pt; import pylab as pl + # Example of usage: + time_data = pt.calculations.point_time_evolution( vlsvReader_list=[VlsvReader("bulk.000.vlsv"), VlsvReader("bulk.001.vlsv"), VlsvReader("bulk.002.vlsv")], variables=["rho", "Pressure", "B"], coordinates=[[1e8,0,0],[1.2e8,0,0]], units=["N", "Pascal", "T"] ) + + # Check output + logging.info time_data + + # Now plot the results: + time = time_data[0] + rho = time_data[3] + pt.plot.plot_variables(time, rho) + pl.show() + + # Do post processing: + rho_data = rho.data + non_existing_example_function(rho_data) + + + ''' + vlsvReader_list = np.atleast_1d(vlsvReader_list) + variables = np.atleast_1d(variables) + coordinates = np.array(coordinates) + if coordinates.ndim == 1: + coordinates = coordinates[np.newaxis,:] + reader_0 = vlsvReader_list[0] + + # Check against legacy files with tstep instead of timestep: + if reader_0.check_parameter("tstep"): + parameters = ["t","tstep","fileIndex"] + parameter_units=["s","",""] + elif reader_0.check_parameter("timestep"): + parameters = ["t","timestep","fileIndex"] + parameter_units=["s","",""] + else: + logging.warning("Could not obtain tstep or timestep from readers. Returning only t and fileIndex.") + parameters = ["t","fileIndex"] + parameter_units=["s",""] + #construct empty units, if none are given + if (units == "") or (len(units) != len(variables)): + units=[ "" for i in range(len(variables))] + #construct data + data = [[] for i in range(len(parameters)+coordinates.shape[0]*len(variables))] + for t in range(len(vlsvReader_list)): + # Get the vlsv reader + vlsvReader = vlsvReader_list[t] + # Open the vlsv reader's file: + vlsvReader.optimize_open_file() + #go through parameters + for j in range(len(parameters)): + # Read the parameter + # Save the data into the right slot in the data array: + data[j].append(vlsvReader.read_parameter( parameters[j])) + + # Go through variables: + for j in range(len(variables)): + variable = variables[j] + # Read the variable for all cell ids + #variables_for_cellids = vlsvReader.read_variables_for_cellids( variable, cellids ) + # Save the data into the right slot in the data array: + for i in range(coordinates.shape[0]): + data[len(parameters)+i*len(variables)+j].append(vlsvReader.read_interpolated_variable( variable, coordinates[i,:], method=method )) + #TODO Use vectorization over coordinates! + # For optimization purposes we are now freeing vlsvReader's memory # Note: Upon reading data vlsvReader created an internal hash map that takes a lot of memory vlsvReader.optimize_clear_fileindex_for_cellid() diff --git a/analysator/pyPlots/plot_ionosphere.py b/analysator/pyPlots/plot_ionosphere.py index ffa4287c..20d090c0 100644 --- a/analysator/pyPlots/plot_ionosphere.py +++ b/analysator/pyPlots/plot_ionosphere.py @@ -142,7 +142,12 @@ def plot_ionosphere(filename=None, fontsize3=8*scale # Colour bar ticks and title # Plot title with time - timeval=f.read_parameter("time") + try: + timeval=f.read_parameter("time") + except: + # The ionosphere solver miniApp writes outputfiles without a valid + # time value. + timeval=None # Plot title with time if title is None or title=="msec" or title=="musec": diff --git a/analysator/pyPlots/plot_vdf.py b/analysator/pyPlots/plot_vdf.py index fba0432a..bb2ef09b 100644 --- a/analysator/pyPlots/plot_vdf.py +++ b/analysator/pyPlots/plot_vdf.py @@ -269,6 +269,8 @@ def vSpaceReducer(vlsvReader, cid, slicetype, normvect, VXBins, VYBins, pop="pro Vpeak = V[peakindex,:] V = V - Vpeak logging.info(peakindex) + VXBins -= 0.5*inputcellsize + VYBins -= 0.5*inputcellsize logging.info("Transforming to frame of peak f-value, travelling at speed "+str(Vpeak)) elif not center is None: # assumes it's a vector, either provided or extracted from bulk velocity @@ -689,7 +691,7 @@ def plot_vdf(filename=None, vysize = int(vysize) vzsize = int(vzsize) [vxmin, vymin, vzmin, vxmax, vymax, vzmax] = vlsvReader.get_velocity_mesh_extent(pop=pop) - inputcellsize=(vxmax-vxmin)/vxsize + # Account for WID3 cells per block widval=4 #default WID=4 @@ -699,6 +701,9 @@ def plot_vdf(filename=None, vysize = widval*vysize vzsize = widval*vzsize + inputcellsize=(vxmax-vxmin)/(vxsize) + # print(inputcellsize) + Re = 6.371e+6 # Earth radius in m # unit of velocity velUnit = 1e3 diff --git a/analysator/pyVlsv/reduction.py b/analysator/pyVlsv/reduction.py index 3c50ad1d..aaa9b34d 100644 --- a/analysator/pyVlsv/reduction.py +++ b/analysator/pyVlsv/reduction.py @@ -31,6 +31,7 @@ import vlsvvariables import sys import math +import analysator as pt mp = 1.672622e-27 elementalcharge = 1.6021773e-19 @@ -795,6 +796,56 @@ def vg_reflevel(variables, reader): cellids = variables[0] return reader.get_amr_level(cellids) +def mlt(variables): + # return MLT values for give coordinates + coords = np.atleast_2d(variables[0]) + # print(coords) + xx, yy = coords[:,0], coords[:,1] + angles = np.arctan2(yy,xx) + + MLT_values = 12 + angles *(12/np.pi) + + return MLT_values + + +def ig_coords(variables, reader): + return reader.get_ionosphere_node_coords() + + +def ig_open_closed(variables, reader): + # open: 2, closed: 1 + positions = np.atleast_2d(variables[0]) + io_rad = float(reader.get_config()['ionosphere']['radius'][0]) + + oc_vals = np.zeros((positions.shape[0])) + + n_idx, s_idx = np.where(positions[:,-1]>0)[0], np.where(positions[:,-1]<0)[0] + + def last_valid(traced_all): + valid_idx = ~np.isnan(traced_all).any(axis = 2) # shape: (N, iter) + last_valid_idx = np.argmax(valid_idx == 0, axis=1) - 1 # shape: (iter,) + N_idx = np.arange(traced_all.shape[0]) + return traced_all[N_idx, last_valid_idx, :] + + if len(n_idx) > 0: + traced_north = pt.calculations.static_field_tracer_3d(reader, positions[n_idx], 40000, 4e4, direction='-' ) + last_north = last_valid(traced_north) + dist_n = np.linalg.norm(last_north, axis=1) + oc_vals[n_idx] = (dist_n > io_rad).astype(int) + 1 + + if len(s_idx) > 0: + traced_south = pt.calculations.static_field_tracer_3d(reader, positions[s_idx], 40000, 4e4, direction='+') + last_south = last_valid(traced_south) + dist_s = np.linalg.norm(last_south, axis=1) + oc_vals[s_idx] = (dist_s > io_rad).astype(int) + 1 + + return oc_vals + + + + + + def _normalize(vec): ''' (private) helper function, normalizes a multidimensinonal array of vectors @@ -802,6 +853,8 @@ def _normalize(vec): ''' return vec / np.linalg.norm(vec, axis = -1)[:, np.newaxis] + + def ig_E( variables, reader ): ''' calculate in-plane ionospheric electric field from the ionospheric potential 'ig_potential' @@ -1130,8 +1183,16 @@ def makelambda(index): # IONOSPHERE ('ig_') v5reducers["ig_inplanecurrent"] = DataReducerVariable(["ig_e"], ig_inplanecurrent, "A/m", 1, latex=r"$\vec{J}$",latexunits=r"$\mathrm{A}\,\mathrm{m}^{-1}$", useReader=True) v5reducers["ig_e"] = DataReducerVariable(["ig_potential"], ig_E, "V/m", 1, latex=r"$\vec{E}$",latexunits=r"$\mathrm{V}\,\mathrm{m}^{-1}$", useReader=True) +v5reducers["ig_node_coordinates"] = DataReducerVariable([],ig_coords,"m",3, latex=r"$\vec{r}$", latexunits=r"$\mathrm{m}$", useReader=True) +v5reducers["ig_mlt"] = DataReducerVariable(["ig_node_coordinates"], mlt, "h", 1, latex=r"$\mathrm{MLT}$",latexunits=r"$\mathrm{h}$") +v5reducers["ig_openclosed"] = DataReducerVariable(["ig_upmappednodecoords"], ig_open_closed, "", 1, latex = r"$\mathrm{oc}$", latexunits=r"",useReader = True) + + + + # MAGNETOSPHERE ('vg_') +v5reducers["vg_mlt"] = DataReducerVariable(["vg_coordinates"], mlt, "h", 1, latex=r"$\mathrm{MLT}$",latexunits=r"$\mathrm{h}$") v5reducers["vg_vms"] = DataReducerVariable(["vg_pressure", "vg_rhom", "vg_b_vol"], vms, "m/s", 1, latex=r"$v_\mathrm{ms}$",latexunits=r"$\mathrm{m}\,\mathrm{s}^{-1}$") v5reducers["vg_vs"] = DataReducerVariable(["vg_pressure", "vg_rhom"], vs, "m/s", 1, latex=r"$v_\mathrm{s}$",latexunits=r"$\mathrm{m}\,\mathrm{s}^{-1}$") v5reducers["vg_va"] = DataReducerVariable(["vg_rhom", "vg_b_vol"], va, "m/s", 1, latex=r"$v_\mathrm{A}$",latexunits=r"$\mathrm{m}\,\mathrm{s}^{-1}$") diff --git a/scripts/obliqueshock.py b/scripts/obliqueshock.py index ba5c7fa1..60cf0b9c 100644 --- a/scripts/obliqueshock.py +++ b/scripts/obliqueshock.py @@ -1,18 +1,13 @@ +''' +Calculates shock crossing values from Rankine-Hugoniot relations +in the deHoffmann-Teller (dHT) frame. +''' + import numpy as np import math import scipy.optimize import logging -# Script for calculating shock crossing values from Rankine-Hugoniot relations -# Feed it upstream and shock values in given reference frame, outputs the dHT state -# intput example: -# obliqueshock.rankine(5e5,1.0e6,[-750e3,0,0],[3.5355e-9,0,-3.5355e-9],[1,0,0],0) -# where T_upstream = 500 kK -# n_upstream = 1/cc -# inflow plasma speed is 750 km/s in -X -# upstream magnetic field is 5 nT at 45 degree angle -# Shock front points in +X direction and is stationary in input frame - mu0 = 4*math.pi*1.e-7 mp = 1.67e-27 c = 299792458 @@ -20,14 +15,13 @@ Gamma = 5.0/3.0 def polynome(X, theta, V1sq, beta1, vA1sq, Gamma, vs1sq): - return (V1sq-X*vA1sq)**2 * (X*vs1sq + 0.5*V1sq*np.cos(theta)**2*(X*(Gamma-1)-(Gamma+1))) + 0.5*vA1sq*V1sq*X*np.sin(theta)**2 * ((Gamma+X*(2-Gamma))*V1sq - X*vA1sq*((Gamma+1)-X*(Gamma-1))) + return (V1sq-X*vA1sq)**2 * (X*vs1sq + 0.5*V1sq*np.cos(theta)**2*(X*(Gamma-1)-(Gamma+1))) + 0.5*vA1sq*V1sq*X*np.sin(theta)**2 * ((Gamma+X*(2-Gamma))*V1sq - X*vA1sq*((Gamma+1)-X*(Gamma-1))) #from Koskinen's Physics of Space Storms -book def newtonmethod(theta, V1sq, beta1, vA1sq, Gamma, vs1sq): calctemp1 = 1. + 0.5*Gamma*beta1 cos12 = np.cos(theta)**2 sin12 = np.sin(theta)**2 MA2=V1sq/vA1sq - logging.info("MA: " + str(np.sqrt(MA2))) Ztry = max( ((0.5/cos12)*(calctemp1 + np.sqrt(calctemp1**2 - 2.*Gamma*beta1*cos12)) -1.), ((0.5/cos12)*(calctemp1 - np.sqrt(calctemp1**2 - 2.*Gamma*beta1*cos12)) -1.), 0.) # First (root for M**2) -1 @@ -45,16 +39,16 @@ def newtonmethod(theta, V1sq, beta1, vA1sq, Gamma, vs1sq): gform = (1. +Ztry)*((Ztry**2)*2.*cos12 +(3. +Ztry)*sin12) Rtry = fform/gform M2try = (1. +Ztry)*Rtry - + while (abs(M2try - MA2) > 0.0001): fderi = (Ztry**2)*8.*cos12 +(3. -8.*Ztry)*sin12 -10.*Ztry*beta1 +(1. +Ztry)*(16.*Ztry*cos12 -5.*sin12) gderi = (Ztry**2)*2.*cos12 +(3. +Ztry)*sin12 +(1. +Ztry)*(4.*Ztry*cos12 +sin12) rderi = (gform*fderi-fform*gderi)/(gform**2) m2deri = (1. +Ztry)*rderi + Rtry - + # Newton step forward Ztry = Ztry + (MA2 - M2try)/m2deri * 0.5*rstep - + # Calculate new Rtry and M2try fform = (1. +Ztry)*((Ztry**2)*8.*cos12 +(3. -5.*Ztry)*sin12) -(Ztry**2)*5.*beta1 gform = (1. +Ztry)*((Ztry**2)*2.*cos12 +(3. +Ztry)*sin12) @@ -67,50 +61,79 @@ def newtonmethod(theta, V1sq, beta1, vA1sq, Gamma, vs1sq): return compr def rankine(Tu, rhou, V, B, n, Vsh): + ''' + Call obliqueshock.rankine(Tu, rhou, V, B, n, Vsh) to compute the shock crossing values + + Inputs: + Tu: upstream proton temperature [K] rhou: upstream proton number density [1/m3] V: 3-element upstream proton inflow velocity vector [m/s] + B: 3-element upstream magnetic field vector [T] n: 3-element shock normal vector Vsh: 3-element shock velocity vector [m/s] + + Returns: + The shock compression ratio + X: Compression ratio from scipy.optimize X2: Compression ratio from the Newton method + + Example: + obliqueshock.rankine(5e5, 1.0e6, [-750e3,0,0], [3.5355e-9,0,-3.5355e-9], [1,0,0], 0)\n + -> Computes the shock crossing for Tu = 500 kK, rhou = 1/cc, V = [-750,0,0]km/s (inflow plasma speed is 750 km/s in -X), + B = [3.5355e,0,-3.5355e]nT (upstream magnetic field is 5 nT at 45 degree angle), + n = [1,0,0], Vsh = 0 (shock front points in +X direction and is stationary in input frame) + ''' + # V, B, n are vectors V = np.array(V) B = np.array(B) n = np.array(n) - + # normalise n n = n/np.linalg.norm(n) Vshvect = n*Vsh - + Vu = V - Vshvect Bu = B - + pu = rhou * k * Tu vHT = np.cross(n, np.cross(Vu, Bu)) / np.dot(n,Bu) - #logging.info("The de Hoffmann Teller transformation velocity is ", vHT) + logging.info("The de Hoffmann Teller transformation velocity is [km/s]: " + str(np.linalg.norm(vHT)/1e3)) VuHT = Vu - vHT - BuHT = Bu #+ 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu)) # Eu = -Vu X Bu + BuHT = Bu #BuHT2 = Bu + 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu)) # Eu = -Vu X Bu - #logging.info("BuHT "+str(BuHT)+" alt "+str(BuHT2)) - + Emotional = -np.cross(VuHT,BuHT) - #logging.info("Verify: Motional E-field in HT frame: ", Emotional) + logging.info("Verify motional E-field in dHT frame: " + str(Emotional)) theta = np.arccos(np.dot(BuHT,n)/np.linalg.norm(BuHT)) + logging.info("Theta [degree]: " + str(np.rad2deg(theta))) + + nPlane = np.cross(Vu, Bu) / np.linalg.norm(np.cross(Vu, Bu)) # normal to the plane containing B and V + t = np.cross(n, nPlane) / np.linalg.norm(np.cross(n, nPlane)) # transversal direction, i.e. normal to n and within the (B,V) plane (i.e. in turn normal to nPlane) + # compute the normal and tangential components of upstream V and B VnuHT = np.dot(VuHT, n) - VtuHT = np.linalg.norm(np.cross(VuHT,n)) - # VtuHT = np.sqrt(sum((VuHT - VnuHT .* n).**2)) + VtuHT = np.dot(VuHT, t) + BnuHT = np.dot(BuHT, n) - BtuHT = np.linalg.norm(np.cross(BuHT,n)) - # BtuHT = np.sqrt(sum((BuHT - BnuHT .* n).**2)) + BtuHT = np.dot(BuHT, t) + # upstream plasma state parameters beta1 = 2.0*mu0*pu / np.dot(Bu,Bu) vAusq = np.dot(BuHT,BuHT)/(mu0*mp*rhou) vsusq = Gamma*pu/(mp*rhou) + logging.info("vAu [km/s]: " + str(np.sqrt(vAusq)/1e3)) + logging.info("vSu [km/s]: " + str(np.sqrt(vsusq)/1e3)) + logging.info("Shock MA: " + str(np.linalg.norm(Vu)/np.sqrt(vAusq))) + + # compare the upstream initial speed to dHT speed + logging.info("|Vu| [km/s]: " + str(np.linalg.norm(Vu)/1e3)) + logging.info("|VuHT| [km/s]: " + str(np.linalg.norm(VuHT)/1e3)) # compression ratio - sol = scipy.optimize.root(polynome, 2., args=(theta, np.dot(Vu,Vu), beta1, vAusq, Gamma, vsusq)) + sol = scipy.optimize.root(polynome, 2., args=(theta, np.dot(VuHT,VuHT), beta1, vAusq, Gamma, vsusq)) X = sol.x # Alternative own Newton method - X2 = newtonmethod(theta, np.dot(Vu,Vu), beta1, vAusq, Gamma, vsusq) - logging.info("X " +str(X) + " X2 " + str(X2)) - + X2 = newtonmethod(theta, np.dot(VuHT,VuHT), beta1, vAusq, Gamma, vsusq) + ##logging.info("X " +str(X) + " X2 " + str(X2)) + VuHTsq = np.dot(VuHT,VuHT) VndHT = VnuHT / X VtdHT = VtuHT * (VuHTsq - vAusq)/(VuHTsq-X*vAusq) @@ -119,42 +142,40 @@ def rankine(Tu, rhou, V, B, n, Vsh): rhod = rhou * X BndHT = BnuHT BtdHT = BtuHT * (VuHTsq-vAusq)*X/(VuHTsq-X*vAusq) - # BdHT = np.sqrt(BndHT**2+BtdHT**2) - nPlane = np.cross(Vu, Bu) / np.linalg.norm(np.cross(Vu, Bu)) # normal to the plane containing B and V - t = np.cross(n, nPlane) / np.linalg.norm(np.cross(n, nPlane)) - # transversal direction, i.e. normal to n and within the (B,V) plane (i.e. in turn normal to nPlane) - BdHT = (BndHT * n) + (BtdHT * t) VdHT = (VndHT * n) + (VtdHT * t) - - Bd = BdHT #- 1/c**2 * cross(vHT, cross(-Vu,Bu)) + + Bd = BdHT #Bd2 = BdHT - 1/c**2 * np.cross(vHT, np.cross(-Vu,Bu)) #logging.info("Bd "+str(Bd)+" alt "+str(Bd2)) Vd = VdHT + vHT + Vshvect XB = np.linalg.norm(Bd)/np.linalg.norm(Bu) - + #//Td = pd / (Gamma * rhod * k) Td = pd / (rhod * k) - - logging.info("Gas compression ratio " + str(X[0])) - logging.info("Magnetic compression ratio " + str(XB)) + + #print compression ratios and upstream/downstream state + logging.info("Gas compression ratio: " + str(X[0])) + logging.info("Magnetic compression ratio: " + str(XB)) logging.info("") logging.info("This determines the dHT upstream state") - logging.info("Density " + str(rhou)) - logging.info("Temperature " + str(Tu)) - #logging.info("Thermal pressure ",pu[0]) - logging.info(" V " + str(VuHT)) - logging.info(" B " + str(BuHT)) + logging.info("Density [1/cm3]: " + str(rhou/1e6)) + logging.info("Temperature [K]: " + str(Tu)) + logging.info(" V [km/s]: " + str(VuHT/1e3)) + logging.info(" B [nT]: " + str(BuHT*1e9)) + logging.info(" |V| [km/s]: " + str(np.linalg.norm(VuHT)/1e3)) + logging.info(" |B| [nT]: " + str(np.linalg.norm(BuHT)*1e9)) logging.info("") logging.info("This determines the dHT downstream state") - logging.info("Density " + str(rhod[0])) - logging.info("Temperature " + str(Td[0])) - #logging.info("Thermal pressure ",pd[0]) - logging.info(" V " + str(VdHT)) - logging.info(" B " + str(BdHT)) - - return X[0], XB + logging.info("Density [1/cm3]: " + str(rhod[0]/1e6)) + logging.info("Temperature [K]: " + str(Td[0])) + logging.info(" V [km/s]: " + str(VdHT/1e3)) + logging.info(" B [nT]: " + str(BdHT*1e9)) + logging.info(" |V| [km/s]: " + str(np.linalg.norm(VdHT)/1e3)) + logging.info(" |B| [nT]: " + str(np.linalg.norm(BdHT)*1e9)) + + return X[0], X2 diff --git a/scripts/obliqueshock_nif.py b/scripts/obliqueshock_nif.py new file mode 100644 index 00000000..346cccf7 --- /dev/null +++ b/scripts/obliqueshock_nif.py @@ -0,0 +1,119 @@ +''' +Script calculates shock crossing values from Rankine-Hugoniot relations +in the normal incidence frame (NIF). +''' + +import numpy as np +import math +import scipy.optimize +import logging + +mu0 = 4*math.pi*1.e-7 +mp = 1.67e-27 +c = 299792458 +k = 1.3806506e-23 +Gamma = 5.0/3.0 + +def polynome(X, theta, beta1, MA1): + return (np.cos(theta)**2)*(2*(MA1**2) + 5*beta1*(np.cos(theta)**2))*(X**3) + (MA1**2)*(MA1**2 - (np.cos(theta)**2)*(5*(MA1**2) + 8 + 10*beta1))*(X**2) + (MA1**4)*(11*(np.cos(theta)**2) + 2*(MA1**2) + 5 + 5*beta1)*X - 8*(MA1**6) + +def rankine(Tu, rhou, V, B): + ''' + Call obliqueshock_nif.rankine(Tu, rhou, V, B, n, Vsh) to compute the shock crossing values + + Inputs: + Tu: upstream proton temperature [K] rhou: upstream proton number density [1/m3] V: 3-element upstream proton inflow velocity vector [m/s] + B: 3-element upstream magnetic field vector [T]\n + Note: Give the plasma velocity relative to the shock speed, which is set to zero in the script. + + Returns: + The shock compression ratio + X: Plasma compression ratio XB: Magnetic field compression ratio + + Example: + obliqueshock_nif.rankine(5e5, 1.0e6, [-750e3,0,0], [3.5355e-9,0,-3.5355e-9])\n + -> Computes the shock crossing for Tu = 500 kK, rhou = 1/cc, V = [-750,0,0]km/s (inflow plasma speed is 750 km/s in -X), + B = [3.5355e,0,-3.5355e]nT (upstream magnetic field is 5 nT at 45 degree angle), + ''' + + # V, B, n are vectors + V = np.array(V) + B = np.array(B) + + # normal and tangential vectors + n = -V/np.linalg.norm(V) + nPlane = np.cross(V, B)/np.linalg.norm(np.cross(V, B)); # normal to the plane containing B and V + t = np.cross(n, nPlane)/np.linalg.norm(np.cross(n, nPlane)); + + logging.info("Normal vector n: " + str(n)) + logging.info("Tangential vector t: " + str(t)) + # upstream parameters + Vu = V + Bu = B + pu = rhou * k * Tu + + Vnu = np.dot(Vu, n) + Vtu = np.dot(Vu, t) + + Bnu = np.dot(Bu, n) + Btu = np.dot(Bu, t) + + # compute relevant variables: Upstream beta, vA, MA and shock angle (theta) + theta = np.arccos(np.dot(Bu,n)/np.linalg.norm(Bu)) + logging.info("Theta [degree]: " + str(np.rad2deg(theta))) + + betau = 2.0*mu0*pu / np.dot(Bu,Bu) + vAu = np.sqrt(np.dot(Bu,Bu) / (mu0*mp*rhou)) + MAu = np.linalg.norm(Vu) / vAu + + logging.info("vAu [km/s]: " + str(vAu/1e3)) + logging.info("Shock MA: " + str(np.linalg.norm(Vu)/vAu)) + logging.info("Upstream plasma beta: " + str(betau)) + # compression ratio + sol = scipy.optimize.root(polynome, 2., args=(theta, betau, MAu)) + X = sol.x + + # get downstrean parameters + Z1 = (MAu**2-np.cos(theta)**2) / (MAu**2-X*np.cos(theta)**2) + Z2 = np.sin(theta)*np.cos(theta)*(X-1) / (MAu**2-X*np.cos(theta)**2) + Z3 = (betau/MAu**2 + 2*(X-1)/X + np.sin(theta)**2*(1 - (X*Z1)**2 )/MAu**2) / (2*X) + + rhod = rhou * X + + Vnd = (Vnu/X) + Vnt = Z2*Vnu + Vd = (Vnd * n) + (Vnt * t) + + Bnd = Bnu + Btd = X*Z1*Btu + Bd = (Bnu * n) + (Btd * t) + + vTHd_sq = Z3*Vu[0]**2; + Td = (mp*vTHd_sq/k); + + XB = np.linalg.norm(Bd)/np.linalg.norm(Bu) + + # print compression ratios and upstream/downstream state + logging.info("Gas compression ratio: " + str(X[0])) + logging.info("Magnetic compression ratio: " + str(XB)) + + logging.info("") + logging.info("This determines the NIF upstream state") + logging.info("Density [1/cm3]: " + str(rhou/1e6)) + logging.info("Temperature [K]: " + str(Tu)) + logging.info(" V [km/s]: " + str(Vu/1e3)) + logging.info(" B [nT]: " + str(Bu*1e9)) + logging.info(" |V| [km/s]: " + str(np.linalg.norm(Vu)/1e3)) + logging.info(" |B| [nT]: " + str(np.linalg.norm(Bu)*1e9)) + + logging.info("") + logging.info("This determines the NIF downstream state") + logging.info("Density [1/cm3]: " + str(rhod[0]/1e6)) + logging.info("Temperature [K]: " + str(Td[0])) + logging.info(" V [km/s]: " + str(Vd/1e3)) + logging.info(" B [nT]: " + str(Bd*1e9)) + logging.info(" |V| [km/s]: " + str(np.linalg.norm(Vd)/1e3)) + logging.info(" |B| [nT]: " + str(np.linalg.norm(Bd)*1e9)) + + return X[0], XB +