This commit is contained in:
RocketGod
2022-09-22 13:46:47 -07:00
parent f65104c2ab
commit e7667c1d93
565 changed files with 165005 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
*.so
*.pyd

View File

@@ -0,0 +1,14 @@
from subprocess import call, Popen
MODULES = ["path_creator", "signal_functions", "util", "auto_interpretation"]
COMPILER_DIRECTIVES = {'language_level': 3,
'cdivision': True,
'wraparound': False,
'boundscheck': False,
'initializedcheck': False,
}
for module in MODULES:
call(["cython", "-a", "-X", ",".join("{}={}".format(key, val) for key, val in COMPILER_DIRECTIVES.items()),
"--cplus", "-3", module + ".pyx"])
Popen(["firefox", module + ".html"])

View File

@@ -0,0 +1,240 @@
# noinspection PyUnresolvedReferences
cimport numpy as np
import numpy as np
from cpython cimport array
import array
import cython
from cython.parallel import prange
from libc.stdlib cimport malloc, free
from libcpp.algorithm cimport sort
from libc.stdint cimport uint64_t
cpdef tuple k_means(float[:] data, unsigned int k=2):
cdef float[:] centers = np.empty(k, dtype=np.float32)
cdef list clusters = []
cdef set unique = set(data)
cdef unsigned long i
if len(unique) < k:
print("Warning: less different values than k")
k = len(unique)
for i in range(k):
centers[i] = unique.pop()
clusters.append([])
cdef float[:] old_centers = np.array(centers, dtype=np.float32)
cdef float distance, min_distance, error = 1.0
cdef unsigned int j, index = 0, N = len(data)
while error != 0:
for i in range(k):
clusters[i].clear()
for i in range(N):
min_distance = 999999999
for j in range(k):
distance = (centers[j] - data[i]) * (centers[j] - data[i])
if distance < min_distance:
min_distance = distance
index = j
clusters[index].append(data[i])
old_centers = np.array(centers)
for i in range(k):
centers[i] = np.mean(clusters[i])
error = 0.0
for i in range(k):
error += old_centers[i] * old_centers[i] - centers[i] * centers[i]
return centers, clusters
def segment_messages_from_magnitudes(cython.floating[:] magnitudes, float noise_threshold):
"""
Get the list of start, end indices of messages
:param magnitudes: Magnitudes of samples
:param q: Factor which controls how many samples of previous above noise plateau must be under noise to be counted as noise
:return:
"""
cdef list result = []
if len(magnitudes) == 0:
return []
cdef unsigned long i, N = len(magnitudes), start = 0
cdef unsigned long summed_message_samples = 0
# tolerance / robustness against outliers
cdef unsigned int outlier_tolerance = 10
cdef unsigned int conseq_above = 0, conseq_below = 0
# Three states: 1 = above noise, 0 = in noise, but not yet above k threshold (k * above_total), -1 = in noise
cdef int state
state = 1 if magnitudes[0] > noise_threshold else -1
cdef bint is_above_noise
for i in range(N):
is_above_noise = magnitudes[i] > noise_threshold
if state == 1:
if is_above_noise:
conseq_below = 0
else:
conseq_below += 1
elif state == -1:
if is_above_noise:
conseq_above += 1
else:
conseq_above = 0
# Perform state change if necessary
if state == 1 and conseq_below >= outlier_tolerance:
# 1 -> -1
state = -1
result.append((start, i - conseq_below))
summed_message_samples += (i-conseq_below) - start
conseq_below = conseq_above = 0
elif state == -1 and conseq_above >= outlier_tolerance:
# -1 -> 1
state = 1
start = i - conseq_above
conseq_below = conseq_above = 0
# append last message
if state == 1 and start < N - conseq_below:
result.append((start, N - conseq_below))
return result
cpdef uint64_t[:] get_threshold_divisor_histogram(uint64_t[:] plateau_lengths, float threshold=0.2):
"""
Get a histogram (i.e. count) how many times a value is a threshold divisor for other values in given data
Threshold divisible is defined as having a decimal place less than .2 (threshold)
:param plateau_lengths:
:return:
"""
cdef uint64_t i, j, x, y, minimum, maximum, num_lengths = len(plateau_lengths)
cdef np.ndarray[np.uint64_t, ndim=1] histogram = np.zeros(int(np.max(plateau_lengths)) + 1, dtype=np.uint64)
for i in range(0, num_lengths):
for j in range(i+1, num_lengths):
x = plateau_lengths[i]
y = plateau_lengths[j]
if x == 0 or y == 0:
continue
if x < y:
minimum = x
maximum = y
else:
minimum = y
maximum = x
if maximum / <double>minimum - (maximum / minimum) < threshold:
histogram[minimum] += 1
return histogram
cpdef np.ndarray[np.uint64_t, ndim=1] merge_plateaus(np.ndarray[np.uint64_t, ndim=1] plateaus,
uint64_t tolerance,
uint64_t max_count):
cdef uint64_t j, n, L = len(plateaus), current = 0, i = 1, tmp_sum
if L == 0:
return np.zeros(0, dtype=np.uint64)
cdef np.ndarray[np.uint64_t, ndim=1] result = np.empty(L, dtype=np.uint64)
if plateaus[0] <= tolerance:
result[0] = 0
else:
result[0] = plateaus[0]
while i < L and current < max_count:
if plateaus[i] <= tolerance:
# Look ahead to see whether we need to merge a larger window e.g. for 67, 1, 10, 1, 21
n = 2
while i + n < L and plateaus[i + n] <= tolerance:
n += 2
tmp_sum = 0
for j in range(i - 1, min(L, i + n)):
tmp_sum += plateaus[j]
result[current] = tmp_sum
i += n
else:
current += 1
result[current] = plateaus[i]
i += 1
return result[:current+1]
cpdef np.ndarray[np.uint64_t, ndim=1] get_plateau_lengths(float[:] rect_data, float center, int percentage=25):
if len(rect_data) == 0 or center is None:
return np.array([], dtype=np.uint64)
cdef int state, new_state
state = -1 if rect_data[0] <= center else 1
cdef unsigned long long plateau_length = 0
cdef unsigned long long current_sum = 0
cdef unsigned long long i = 0
cdef unsigned long long len_data = len(rect_data)
cdef float sample
cdef array.array result = array.array('Q', [])
for i in range(0, len_data):
if current_sum >= percentage * len_data / 100:
break
sample = rect_data[i]
new_state = -1 if sample <= center else 1
if state == new_state:
plateau_length += 1
else:
result.append(plateau_length)
current_sum += plateau_length
state = new_state
plateau_length = 1
return np.array(result, dtype=np.uint64)
cdef float median(double[:] data, unsigned long start, unsigned long data_len, unsigned int k=3) nogil:
cdef unsigned long i, j
if start + k > data_len:
k = data_len - start
cdef float* buffer = <float *>malloc(k * sizeof(float))
for i in range(0, k):
buffer[i] = data[start+i]
sort(&buffer[0], (&buffer[0]) + k)
try:
return buffer[k//2]
finally:
free(buffer)
cpdef np.ndarray[np.float32_t, ndim=1] median_filter(double[:] data, unsigned int k=3):
cdef long long start, end, i, n = len(data)
cdef np.ndarray[np.float32_t, ndim=1] result = np.zeros(n, dtype=np.float32)
for i in prange(0, n, nogil=True, schedule='static'):
if i < k // 2:
start = 0
else:
start = i - k // 2
result[i] = median(data, start=i, data_len=n, k=k)
return result

View File

@@ -0,0 +1,369 @@
# noinspection PyUnresolvedReferences
cimport numpy as np
import numpy as np
from libc.math cimport floor, ceil, pow
from libc.stdlib cimport malloc, free
from libcpp cimport bool
from libc.stdint cimport uint8_t, uint32_t, int32_t, int64_t
from urh.cythonext.util import crc
from urh.cythonext.util cimport bit_array_to_number
cpdef set find_longest_common_sub_sequence_indices(np.uint8_t[::1] seq1, np.uint8_t[::1] seq2):
cdef unsigned int i, j, longest = 0, counter = 0, len_bits1 = len(seq1), len_bits2 = len(seq2)
cdef unsigned short max_results = 10, current_result = 0
cdef unsigned int[:, ::1] m = np.zeros((len_bits1+1, len_bits2+1), dtype=np.uint32, order="C")
cdef unsigned int[:, ::1] result_indices = np.zeros((max_results, 2), dtype=np.uint32, order="C")
for i in range(0, len_bits1):
for j in range(0, len_bits2):
if seq1[i] == seq2[j]:
counter = m[i, j] + 1
m[i+1, j+1] = counter
if counter > longest:
longest = counter
current_result = 0
result_indices[current_result, 0] = i - counter + 1
result_indices[current_result, 1] = i + 1
elif counter == longest:
if current_result < max_results - 1:
current_result += 1
result_indices[current_result, 0] = i - counter + 1
result_indices[current_result, 1] = i + 1
cdef set result = set()
for i in range(current_result+1):
result.add((result_indices[i, 0], result_indices[i, 1]))
return result
cpdef uint32_t find_first_difference(uint8_t[::1] bits1, uint8_t[::1] bits2, uint32_t len_bits1, uint32_t len_bits2) nogil:
cdef uint32_t i, smaller_len = min(len_bits1, len_bits2)
for i in range(0, smaller_len):
if bits1[i] != bits2[i]:
return i
return smaller_len
cpdef np.ndarray[np.uint32_t, ndim=2, mode="c"] get_difference_matrix(list bitvectors):
cdef uint32_t i, j, N = len(bitvectors)
cdef np.ndarray[np.uint32_t, ndim=2, mode="c"] result = np.zeros((N, N), dtype=np.uint32, order="C")
cdef uint8_t[::1] bitvector_i
cdef uint32_t len_bitvector_i
for i in range(N):
bitvector_i = bitvectors[i]
len_bitvector_i = len(bitvector_i)
for j in range(i + 1, N):
result[i, j] = find_first_difference(bitvector_i, bitvectors[j], len_bitvector_i, len(bitvectors[j]))
return result
cpdef list get_hexvectors(list bitvectors):
cdef list result = []
cdef uint8_t[::1] bitvector
cdef size_t i, j, M, N = len(bitvectors)
cdef np.ndarray[np.uint8_t, mode="c"] hexvector
cdef size_t len_bitvector
for i in range(0, N):
bitvector = bitvectors[i]
len_bitvector = len(bitvector)
M = <size_t>ceil(len_bitvector / 4)
hexvector = np.zeros(M, dtype=np.uint8, order="C")
for j in range(0, M):
hexvector[j] = bit_array_to_number(bitvector, min(len_bitvector, 4*j+4), 4*j)
result.append(hexvector)
return result
cdef int lower_multiple_of_n(int number, int n) nogil:
return n * <int>floor(number / n)
cdef int64_t find(uint8_t[:] data, int64_t len_data, uint8_t element, int64_t start=0) nogil:
cdef int64_t i
for i in range(start, len_data):
if data[i] == element:
return i
return -1
cpdef tuple get_raw_preamble_position(uint8_t[:] bitvector):
cdef int64_t N = len(bitvector)
if N == 0:
return 0, 0
cdef int64_t i, j, n, m, start = -1
cdef double k = 0
cdef int64_t lower = 0, upper = 0
cdef uint8_t a, b
cdef uint8_t* preamble_pattern = NULL
cdef int64_t len_preamble_pattern, preamble_end
cdef bool preamble_end_reached
while k < 2 and start < N:
start += 1
a = bitvector[start]
b = 1 if a == 0 else 0
# now we search for the pattern a^n b^m
n = find(bitvector, N, b, start) - start
if n <= 0:
return 0, 0, 0
m = find(bitvector, N, a, start+n) - n - start
if m <= 0:
return 0, 0, 0
#preamble_pattern = a * n + b * m
len_preamble_pattern = n + m
preamble_pattern = <uint8_t*> malloc(len_preamble_pattern * sizeof(uint8_t))
for j in range(0, n):
preamble_pattern[j] = a
for j in range(n, len_preamble_pattern):
preamble_pattern[j] = b
preamble_end = start
preamble_end_reached = False
for i in range(start, N, len_preamble_pattern):
if preamble_end_reached:
break
for j in range(0, len_preamble_pattern):
if bitvector[i+j] != preamble_pattern[j]:
preamble_end_reached = True
preamble_end = i
break
free(preamble_pattern)
upper = start + lower_multiple_of_n(preamble_end + 1 - start, len_preamble_pattern)
lower = upper - len_preamble_pattern
k = (upper - start) / len_preamble_pattern
if k > 2:
return start, lower, upper
else:
# no preamble found
return 0, 0, 0
cpdef dict find_possible_sync_words(np.ndarray[np.uint32_t, ndim=2, mode="c"] difference_matrix,
np.ndarray[np.uint32_t, ndim=2, mode="c"] raw_preamble_positions,
list bitvectors, int n_gram_length):
cdef dict possible_sync_words = dict()
cdef uint32_t i, j, num_rows = difference_matrix.shape[0], num_cols = difference_matrix.shape[1]
cdef uint32_t sync_len, sync_end, start, index, k, n
cdef bytes sync_word
cdef np.ndarray[np.uint8_t, mode="c"] bitvector
cdef uint8_t ij_ctr = 0
cdef uint32_t* ij_arr = <uint32_t*>malloc(2 * sizeof(uint32_t))
cdef uint8_t* temp = NULL
for i in range(0, num_rows):
for j in range(i + 1, num_cols):
# position of first difference between message i and j
sync_end = difference_matrix[i, j]
if sync_end == 0:
continue
ij_arr[0] = i
ij_arr[1] = j
for k in range(0, 2):
for ij_ctr in range(0, 2):
index = ij_arr[ij_ctr]
start = raw_preamble_positions[index, 0] + raw_preamble_positions[index, k + 1]
# We take the next lower multiple of n for the sync len
# In doubt, it is better to under estimate the sync len to prevent it from
# taking needed values from other fields e.g. leading zeros for a length field
sync_len = max(0, lower_multiple_of_n(sync_end - start, n_gram_length))
if sync_len >= 2:
bitvector = bitvectors[index]
if sync_len == 2:
# Sync word must not be empty or just two bits long and "10" or "01" because
# that would be indistinguishable from the preamble
if bitvector[start] == 0 and bitvector[start+1] == 1:
continue
if bitvector[start] == 1 and bitvector[start+1] == 0:
continue
temp = <uint8_t*>malloc(sync_len * sizeof(uint8_t))
for n in range(0, sync_len):
temp[n] = bitvector[start+n]
sync_word = <bytes> temp[:sync_len]
free(temp)
possible_sync_words.setdefault(sync_word, 0)
if (start + sync_len) % n_gram_length == 0:
# if sync end aligns nicely at n gram length give it a larger score
possible_sync_words[sync_word] += 1
else:
possible_sync_words[sync_word] += 0.5
free(ij_arr)
return possible_sync_words
cpdef np.ndarray[np.float64_t] create_difference_histogram(list vectors, list active_indices):
"""
Return a histogram of common ranges. E.g. [1, 1, 0.75, 0.8] means 75% of values at third column are equal
:param vectors: Vectors over which differences the histogram will be created
:param active_indices: Active indices of vectors. Vectors with index not in this list will be ignored
:return:
"""
cdef unsigned long i,j,k,index_i,index_j, L = len(active_indices)
cdef unsigned long longest = 0, len_vector, len_vector_i
for i in active_indices:
len_vector = len(vectors[i])
if len_vector > longest:
longest = len_vector
cdef np.ndarray[np.float64_t] histogram = np.zeros(longest, dtype=np.float64)
cdef double n = (len(active_indices) * (len(active_indices) - 1)) // 2
cdef np.ndarray[np.uint8_t] bitvector_i, bitvector_j
for i in range(0, L - 1):
index_i = active_indices[i]
bitvector_i = vectors[index_i]
len_vector_i = len(bitvector_i)
for j in range(i+1, L):
index_j = active_indices[j]
bitvector_j = vectors[index_j]
for k in range(0, <size_t>min(len_vector_i, <size_t>len(bitvector_j))):
if bitvector_i[k] == bitvector_j[k]:
histogram[k] += 1 / n
return histogram
cpdef list find_occurrences(np.uint8_t[::1] a, np.uint8_t[::1] b,
unsigned long[:] ignore_indices=None, bool return_after_first=False):
"""
Find the indices of occurrences of b in a.
:param a: Larger array
:param b: Subarray to search for
:return: List of start indices of b in a
"""
cdef unsigned long i, j
cdef unsigned long len_a = len(a), len_b = len(b)
cdef bool ignore_indices_present = ignore_indices is not None
if len_b > len_a:
return []
cdef list result = []
cdef bool found
for i in range(0, (len_a-len_b) + 1):
found = True
for j in range(0, len_b):
if ignore_indices_present:
if i+j in ignore_indices:
found = False
break
if a[i+j] != b[j]:
found = False
break
if found:
if return_after_first:
return [i]
else:
result.append(i)
return result
cpdef np.ndarray[np.int32_t, ndim=2, mode="c"] create_seq_number_difference_matrix(list bitvectors, int n_gram_length):
"""
Create the difference matrix e.g.
10 20 0
1 2 3
4 5 6
means first eight bits of messages 1 and 2 (row 1) differ by 10 if they are considered as decimal number
:type bitvectors: list of np.ndarray
:type n_gram_length: int
:rtype: np.ndarray
"""
cdef size_t max_len = len(max(bitvectors, key=len))
cdef size_t i, j, k, index, N = len(bitvectors), M = <size_t>ceil(max_len / n_gram_length)
cdef uint8_t[::1] bv1, bv2
cdef size_t len_bv1, len_bv2
cdef int32_t diff
cdef int32_t n_gram_power_two = <int32_t>pow(2, n_gram_length)
cdef np.ndarray[np.int32_t, ndim=2, mode="c"] result = np.full((N - 1, M), -1, dtype=np.int32)
for i in range(1, N):
bv1 = bitvectors[i - 1]
bv2 = bitvectors[i]
len_bv1 = len(bv1)
len_bv2 = len(bv2)
k = min(len_bv1, len_bv2)
for j in range(0, k, n_gram_length):
index = j / n_gram_length
if index < M:
diff = bit_array_to_number(bv2, min(len_bv2, j + n_gram_length), j) -\
bit_array_to_number(bv1, min(len_bv1, j+n_gram_length), j)
# add + n_gram_power_two because in C modulo can be negative
result[i - 1, index] = (diff + n_gram_power_two) % n_gram_power_two
return result
cpdef set check_crc_for_messages(list message_indices, list bitvectors,
unsigned long data_start, unsigned long data_stop,
unsigned long crc_start, unsigned long crc_stop,
unsigned char[:] crc_polynomial, unsigned char[:] crc_start_value,
unsigned char[:] crc_final_xor,
bool crc_lsb_first, bool crc_reverse_polynomial,
bool crc_reverse_all, bool crc_little_endian):
"""
Check a configurable subset of bitvectors for a matching CRC and return the indices of the
vectors who match the CRC with the given parameters
:return:
"""
cdef set result = set()
cdef unsigned long j, index, end = len(message_indices)
cdef np.ndarray[np.uint8_t] bits
cdef unsigned char[:] crc_input
cdef unsigned long long check
for j in range(0, end):
index = message_indices[j]
bits = bitvectors[index]
crc_input = bits[data_start:data_stop]
#check = int("".join(map(str, bits[crc_start:crc_stop])), 2)
check = bit_array_to_number(bits[crc_start:crc_stop], crc_stop - crc_start)
if crc(crc_input, crc_polynomial, crc_start_value, crc_final_xor,
crc_lsb_first, crc_reverse_polynomial,
crc_reverse_all, crc_little_endian) == check:
result.add(index)
return result

View File

@@ -0,0 +1,18 @@
import os
import sys
import tempfile
from subprocess import call
build_dir = os.path.join(tempfile.gettempdir(), "build")
def main():
cur_dir = os.path.realpath(__file__)
os.chdir(os.path.realpath(os.path.join(cur_dir, "..", "..", "..", "..")))
# call([sys.executable, "setup.py", "clean", "--all"])
rc = call([sys.executable, "setup.py", "build_ext", "--inplace", "-j{}".format(os.cpu_count())])
return rc
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,125 @@
# noinspection PyUnresolvedReferences
cimport numpy as np
import numpy as np
from PyQt5.QtCore import QByteArray, QDataStream
from PyQt5.QtGui import QPainterPath
# As we do not use any numpy C API functions we do no import_array here,
# because it can lead to OS X error: https://github.com/jopohl/urh/issues/273
# np.import_array()
from cython.parallel import prange
from urh.cythonext.util cimport iq
from urh import settings
import cython
import math
import struct
cpdef create_path(iq[:] samples, long long start, long long end, list subpath_ranges=None):
cdef iq[:] values
cdef long long[::1] sample_rng
cdef np.int64_t[::1] x
cdef iq sample, minimum, maximum, tmp
cdef float scale_factor
cdef long long i,j,index, chunk_end, num_samples, pixels_on_path, samples_per_pixel
num_samples = end - start
cdef dict type_lookup = {"char[:]": np.int8, "unsigned char[:]": np.uint8,
"short[:]": np.int16, "unsigned short[:]": np.uint16,
"float[:]": np.float32, "double[:]": np.float64}
subpath_ranges = [(start, end)] if subpath_ranges is None else subpath_ranges
pixels_on_path = settings.PIXELS_PER_PATH
samples_per_pixel = <long long>(num_samples / pixels_on_path)
cdef int num_threads = 0
if samples_per_pixel < 20000:
num_threads = 1
if samples_per_pixel > 1:
sample_rng = np.arange(start, end, samples_per_pixel, dtype=np.int64)
values = np.zeros(2 * len(sample_rng), dtype=type_lookup[cython.typeof(samples)], order="C")
scale_factor = num_samples / (2.0 * len(sample_rng)) # 2.0 is important to make it a float division!
for i in prange(start, end, samples_per_pixel, nogil=True, schedule='static', num_threads=num_threads):
chunk_end = i + samples_per_pixel
if chunk_end >= end:
chunk_end = end
tmp = samples[i]
minimum = tmp
maximum = tmp
for j in range(i + 1, chunk_end):
sample = samples[j]
if sample < minimum:
minimum = sample
elif sample > maximum:
maximum = sample
index = <long long>(2*(i-start)/samples_per_pixel)
values[index] = minimum
values[index + 1] = maximum
x = np.repeat(sample_rng, 2)
else:
x = np.arange(start, end, dtype=np.int64)
values = samples[start:end]
scale_factor = 1.0
cdef list result = []
if scale_factor == 0:
scale_factor = 1 # prevent division by zero
for subpath_range in subpath_ranges:
sub_start = ((((subpath_range[0]-start)/scale_factor) * scale_factor) - 2*scale_factor) / scale_factor
sub_start =int(max(0, math.floor(sub_start)))
sub_end = ((((subpath_range[1]-start)/scale_factor) * scale_factor) + 2*scale_factor) / scale_factor
sub_end = int(max(0, math.ceil(sub_end)))
result.append(array_to_QPath(x[sub_start:sub_end], values[sub_start:sub_end]))
return result
cpdef create_live_path(iq[:] samples, unsigned int start, unsigned int end):
return array_to_QPath(np.arange(start, end).astype(np.int64), samples)
cpdef array_to_QPath(np.int64_t[:] x, y):
"""
Convert an array of x,y coordinates to QPainterPath as efficiently as possible.
Speed this up using >> operator
Format is:
numVerts(i4) 0(i4)
x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
...
0(i4)
All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
"""
cdef long long n = x.shape[0]
arr = np.zeros(n + 2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
byte_view = arr.view(dtype=np.uint8)
byte_view[:12] = 0
byte_view.data[12:20] = struct.pack('>ii', n, 0)
arr[1:n+1]['x'] = x
arr[1:n+1]['y'] = np.negative(y) # negate y since coordinate system is inverted
arr[1:n+1]['c'] = 1
cdef long long last_index = 20 * (n + 1)
byte_view.data[last_index:last_index + 4] = struct.pack('>i', 0)
try:
buf = QByteArray.fromRawData(byte_view.data[12:last_index + 4])
except TypeError:
buf = QByteArray(byte_view.data[12:last_index + 4])
path = QPainterPath()
ds = QDataStream(buf)
ds >> path
return path

View File

@@ -0,0 +1,542 @@
# noinspection PyUnresolvedReferences
cimport numpy as np
import cython
import numpy as np
from libcpp cimport bool
from libc.stdint cimport uint8_t, uint16_t, uint32_t, int64_t
from libc.stdio cimport printf
from libc.stdlib cimport malloc, free
from urh.cythonext.util cimport IQ, iq, bit_array_to_number
from cython.parallel import prange
from libc.math cimport atan2, sqrt, M_PI, abs
cdef extern from "math.h" nogil:
float cosf(float x)
float acosf(float x)
float sinf(float x)
cdef extern from "complex.h" namespace "std" nogil:
float arg(float complex x)
float complex conj(float complex x)
# As we do not use any numpy C API functions we do no import_array here,
# because it can lead to OS X error: https://github.com/jopohl/urh/issues/273
# np.import_array()
cdef int64_t PAUSE_STATE = -1
cdef float complex imag_unit = 1j
cdef float NOISE_FSK_PSK = -4.0
cdef float NOISE_ASK = 0.0
cdef float get_noise_for_mod_type(str mod_type):
if mod_type == "ASK":
return NOISE_ASK
elif mod_type == "FSK":
return NOISE_FSK_PSK
elif mod_type == "PSK" or mod_type == "OQPSK":
return NOISE_FSK_PSK
elif mod_type == "QAM":
return NOISE_ASK * NOISE_FSK_PSK
else:
return 0
cdef get_numpy_dtype(iq cython_type):
if str(cython.typeof(cython_type)) == "char":
return np.int8
elif str(cython.typeof(cython_type)) == "short":
return np.int16
elif str(cython.typeof(cython_type)) == "float":
return np.float32
else:
raise ValueError("dtype {} not supported for modulation".format(cython.typeof(cython_type)))
cpdef modulate_c(uint8_t[:] bits, uint32_t samples_per_symbol, str modulation_type,
float[:] parameters, uint16_t bits_per_symbol,
float carrier_amplitude, float carrier_frequency, float carrier_phase, float sample_rate,
uint32_t pause, uint32_t start, dtype=np.float32,
float gauss_bt=0.5, float filter_width=1.0):
if dtype == np.int8:
return __modulate(
bits, samples_per_symbol, modulation_type, parameters, bits_per_symbol, carrier_amplitude,
carrier_frequency, carrier_phase, sample_rate, pause, start, <char>0, gauss_bt, filter_width
)
elif dtype == np.int16:
return __modulate(
bits, samples_per_symbol, modulation_type, parameters, bits_per_symbol, carrier_amplitude,
carrier_frequency, carrier_phase, sample_rate, pause, start, <short>0, gauss_bt, filter_width
)
elif dtype == np.float32:
return __modulate(
bits, samples_per_symbol, modulation_type, parameters, bits_per_symbol, carrier_amplitude,
carrier_frequency, carrier_phase, sample_rate, pause, start, <float>0.0, gauss_bt, filter_width
)
else:
raise ValueError("Unsupported dtype for modulation {}".format(dtype))
cpdef __modulate(uint8_t[:] bits, uint32_t samples_per_symbol, str modulation_type,
float[:] parameters, uint16_t bits_per_symbol,
float carrier_amplitude, float carrier_frequency, float carrier_phase, float sample_rate,
uint32_t pause, uint32_t start, iq iq_type,
float gauss_bt=0.5, float filter_width=1.0):
cdef int64_t i = 0, j = 0, index = 0, prev_index=0, s_i = 0, num_bits = len(bits)
cdef uint32_t total_symbols = int(num_bits // bits_per_symbol)
cdef int64_t total_samples = total_symbols * samples_per_symbol + pause
cdef float a = carrier_amplitude, f = carrier_frequency, phi = carrier_phase
cdef float f_previous = 0, phase_correction = 0
cdef float t = 0, current_arg = 0
result = np.zeros((total_samples, 2), dtype=get_numpy_dtype(iq_type))
if num_bits == 0:
return result
cdef iq[:, ::1] result_view = result
cdef bool is_fsk = modulation_type.lower() == "fsk"
cdef bool is_ask = modulation_type.lower() == "ask"
cdef bool is_psk = modulation_type.lower() == "psk"
cdef bool is_oqpsk = modulation_type.lower() == "oqpsk"
cdef bool is_gfsk = modulation_type.lower() == "gfsk"
assert is_fsk or is_ask or is_psk or is_gfsk or is_oqpsk
cdef uint8_t[:] oqpsk_bits
if is_oqpsk:
assert bits_per_symbol == 2
bits = get_oqpsk_bits(bits)
cdef np.ndarray[np.float32_t, ndim=2] gauss_filtered_freqs_phases
if is_gfsk:
gauss_filtered_freqs_phases = get_gauss_filtered_freqs_phases(bits, parameters, total_symbols,
samples_per_symbol, sample_rate, carrier_phase,
start, gauss_bt, filter_width)
cdef float* phase_corrections = NULL
if is_fsk and total_symbols > 0:
phase_corrections = <float*>malloc(total_symbols * sizeof(float))
phase_corrections[0] = 0.0
for s_i in range(1, total_symbols):
# Add phase correction to FSK modulation in order to prevent spiky jumps
index = bit_array_to_number(bits, end=(s_i+1)*bits_per_symbol, start=s_i*bits_per_symbol)
prev_index = bit_array_to_number(bits, end=s_i*bits_per_symbol, start=(s_i-1)*bits_per_symbol)
f = parameters[index]
f_previous = parameters[prev_index]
if f != f_previous:
t = (s_i*samples_per_symbol+start-1) / sample_rate
phase_corrections[s_i] = (phase_corrections[s_i-1] + 2 * M_PI * (f_previous-f) * t) % (2 * M_PI)
else:
phase_corrections[s_i] = phase_corrections[s_i-1]
for s_i in prange(0, total_symbols, schedule="static", nogil=True):
index = bit_array_to_number(bits, end=(s_i+1)*bits_per_symbol, start=s_i*bits_per_symbol)
a = carrier_amplitude
f = carrier_frequency
phi = carrier_phase
phase_correction = 0
if is_ask:
a = parameters[index]
if a == 0:
continue
elif is_fsk:
f = parameters[index]
phase_correction = phase_corrections[s_i]
elif is_psk or is_oqpsk:
phi = parameters[index]
for i in range(s_i * samples_per_symbol, (s_i+1)*samples_per_symbol):
t = (i+start) / sample_rate
if is_gfsk:
f = gauss_filtered_freqs_phases[i, 0]
phi = gauss_filtered_freqs_phases[i, 1]
current_arg = 2 * M_PI * f * t + phi + phase_correction
result_view[i, 0] = <iq>(a * cosf(current_arg))
result_view[i, 1] = <iq>(a * sinf(current_arg))
if is_oqpsk:
for i in range(0, samples_per_symbol):
result_view[i, 1] = 0
for i in range(total_samples-pause-samples_per_symbol, total_samples-pause):
result_view[i, 0] = 0
if phase_corrections != NULL:
free(phase_corrections)
return result
cpdef uint8_t[:] get_oqpsk_bits(uint8_t[:] original_bits):
# TODO: This method does not work correctly. Fix it when we have a test signal
cdef int64_t i, num_bits = len(original_bits)
if num_bits == 0:
return np.zeros(0, dtype=np.uint8)
result = np.zeros(num_bits+2, dtype=np.uint8)
result[0] = original_bits[0]
result[num_bits+2-1] = original_bits[num_bits-1]
for i in range(2, num_bits-2, 2):
result[i] = original_bits[i]
result[i+1] = original_bits[i-1]
return result
cdef np.ndarray[np.float32_t, ndim=2] get_gauss_filtered_freqs_phases(uint8_t[:] bits, float[:] parameters,
uint32_t num_symbols, uint32_t samples_per_symbol,
float sample_rate, float phi, uint32_t start,
float gauss_bt, float filter_width):
cdef int64_t i, s_i, index, num_values = num_symbols * samples_per_symbol
cdef np.ndarray[np.float32_t, ndim=1] frequencies = np.empty(num_values, dtype=np.float32)
cdef uint16_t bits_per_symbol = int(len(bits) // num_symbols)
for s_i in range(0, num_symbols):
index = bit_array_to_number(bits, end=(s_i+1)*bits_per_symbol, start=s_i*bits_per_symbol)
for i in range(s_i * samples_per_symbol, (s_i+1)*samples_per_symbol):
frequencies[i] = parameters[index]
cdef np.ndarray[np.float32_t, ndim=1] t = np.arange(start, start + num_values, dtype=np.float32) / sample_rate
cdef np.ndarray[np.float32_t, ndim=1] gfir = gauss_fir(sample_rate, samples_per_symbol,
bt=gauss_bt, filter_width=filter_width)
if len(frequencies) >= len(gfir):
frequencies = np.convolve(frequencies, gfir, mode="same")
else:
# Prevent dimension crash later, because gaussian finite impulse response is longer then param_vector
frequencies = np.convolve(gfir, frequencies, mode="same")[:len(frequencies)]
cdef np.ndarray[np.float32_t, ndim=1] phases = np.zeros(len(frequencies), dtype=np.float32)
phases[0] = phi
for i in range(0, len(phases) - 1):
# Correct the phase to prevent spiky jumps
phases[i + 1] = 2 * M_PI * t[i] * (frequencies[i] - frequencies[i + 1]) + phases[i]
return np.column_stack((frequencies, phases))
cdef np.ndarray[np.float32_t, ndim=1] gauss_fir(float sample_rate, uint32_t samples_per_symbol,
float bt=.5, float filter_width=1.0):
"""
:param filter_width: Filter width
:param bt: normalized 3-dB bandwidth-symbol time product
:return:
"""
# http://onlinelibrary.wiley.com/doi/10.1002/9780470041956.app2/pdf
cdef np.ndarray[np.float32_t] k = np.arange(-int(filter_width * samples_per_symbol),
int(filter_width * samples_per_symbol) + 1,
dtype=np.float32)
cdef float ts = samples_per_symbol / sample_rate # symbol time
cdef np.ndarray[np.float32_t] h = np.sqrt((2 * np.pi) / (np.log(2))) * bt / ts * np.exp(
-(((np.sqrt(2) * np.pi) / np.sqrt(np.log(2)) * bt * k / samples_per_symbol) ** 2))
return h / h.sum()
cdef float clamp(float x) nogil:
if x < -1.0:
x = -1.0
elif x > 1.0:
x = 1.0
return x
cdef float[::1] costa_demod(IQ samples, float noise_sqrd, int loop_order, float bandwidth=0.1, float damping=sqrt(2.0) / 2.0):
cdef float alpha = (4 * damping * bandwidth) / (1.0 + 2.0 * damping * bandwidth + bandwidth * bandwidth)
cdef float beta = (4 * bandwidth * bandwidth) / (1.0 + 2.0 * damping * bandwidth + bandwidth * bandwidth)
cdef long long i = 0, num_samples = len(samples)
cdef float real = 0, imag = 0
cdef float scale, shift, real_float, imag_float, ref_real, ref_imag
cdef float f1, f2, costa_freq = 0, costa_error = 0, costa_phase = 1.5
cdef float complex current_sample, nco_out, nco_times_sample
cdef float[::1] result = np.empty(num_samples, dtype=np.float32)
if str(cython.typeof(samples)) == "char[:, ::1]":
scale = 127.5
shift = 0.5
elif str(cython.typeof(samples)) == "unsigned char[:, ::1]":
scale = 127.5
shift = -127.5
elif str(cython.typeof(samples)) == "short[:, ::1]":
scale = 32767.5
shift = 0.5
elif str(cython.typeof(samples)) == "unsigned short[:, ::1]":
scale = 65535.0
shift = -32767.5
elif str(cython.typeof(samples)) == "float[:, ::1]":
scale = 1.0
shift = 0.0
else:
raise ValueError("Unsupported dtype")
if loop_order > 4:
# TODO: Adapt this when PSK demodulation with order > 4 shall be supported
loop_order = 4
for i in range(1, num_samples):
real = samples[i, 0]
imag = samples[i, 1]
if real * real + imag * imag <= noise_sqrd:
result[i] = NOISE_FSK_PSK
continue
real_float = (real + shift) / scale
imag_float = (imag + shift) / scale
current_sample = real_float + imag_unit * imag_float
nco_out = cosf(-costa_phase) + imag_unit * sinf(-costa_phase)
nco_times_sample = nco_out * current_sample
if loop_order == 2:
costa_error = nco_times_sample.imag * nco_times_sample.real
elif loop_order == 4:
f1 = 1.0 if nco_times_sample.real > 0.0 else -1.0
f2 = 1.0 if nco_times_sample.imag > 0.0 else -1.0
costa_error = f1 * nco_times_sample.imag - f2 * nco_times_sample.real
costa_error = clamp(costa_error)
# advance the loop
costa_freq += beta * costa_error
costa_phase += costa_freq + alpha * costa_error
# wrap the phase
while costa_phase > (2 * M_PI):
costa_phase -= 2 * M_PI
while costa_phase < (-2 * M_PI):
costa_phase += 2 * M_PI
costa_freq = clamp(costa_freq)
if loop_order == 2:
result[i] = nco_times_sample.real
elif loop_order == 4:
result[i] = 2 * nco_times_sample.real + nco_times_sample.imag
return result
cpdef np.ndarray[np.float32_t, ndim=1] afp_demod(IQ samples, float noise_mag,
str mod_type, int mod_order, float costas_loop_bandwidth=0.1):
if len(samples) <= 2:
return np.zeros(len(samples), dtype=np.float32)
cdef long long i = 0, ns = len(samples)
cdef float NOISE = get_noise_for_mod_type(mod_type)
cdef float noise_sqrd = noise_mag * noise_mag, real = 0, imag = 0, magnitude = 0, max_magnitude
cdef float complex tmp
if str(cython.typeof(samples)) == "char[:, ::1]":
max_magnitude = sqrt(127*127 + 128*128)
elif str(cython.typeof(samples)) == "unsigned char[:, ::1]":
max_magnitude = sqrt(255*255)
elif str(cython.typeof(samples)) == "short[:, ::1]":
max_magnitude = sqrt(32768*32768 + 32767*32767)
elif str(cython.typeof(samples)) == "unsigned short[:, ::1]":
max_magnitude = sqrt(65535*65535)
elif str(cython.typeof(samples)) == "float[:, ::1]":
max_magnitude = sqrt(2)
else:
raise ValueError("Unsupported dtype")
if mod_type == "PSK":
return np.asarray(costa_demod(samples, noise_sqrd, mod_order, bandwidth=costas_loop_bandwidth))
cdef float[::1] result = np.zeros(ns, dtype=np.float32, order="C")
result[0] = NOISE
for i in prange(1, ns, nogil=True, schedule="static"):
real = samples[i, 0]
imag = samples[i, 1]
magnitude = real * real + imag * imag
if magnitude <= noise_sqrd: # |c| <= mag_treshold
result[i] = NOISE
continue
if mod_type == "ASK":
result[i] = sqrt(magnitude) / max_magnitude
elif mod_type == "FSK":
#tmp = samples[i - 1].conjugate() * c
tmp = (samples[i-1, 0] - imag_unit * samples[i-1, 1]) * (real + imag_unit * imag)
result[i] = atan2(tmp.imag, tmp.real) # Freq
return np.asarray(result)
cpdef np.ndarray[np.float32_t, ndim=1] get_center_thresholds(float center, float spacing, int modulation_order):
cdef np.ndarray[np.float32_t, ndim=1] result = np.empty(modulation_order-1, dtype=np.float32)
cdef int i, n = modulation_order // 2
for i in range(0, n):
result[i] = center - (n-(i+1)) * spacing
for i in range(n, modulation_order-1):
result[i] = center + (i+1-n) * spacing
return result
cpdef int64_t[:, ::1] grab_pulse_lens(float[::1] samples, float center, uint16_t tolerance,
str modulation_type, uint32_t samples_per_symbol,
uint8_t bits_per_symbol=1, float center_spacing=0.1):
"""
Get the pulse lengths after quadrature demodulation
arr[i][0] gives type of symbol e.g. (arr[i][0] = 1) and (arr[i][0] = 0) for binary modulation
Pause is (arr[i][0] = -1)
arr[i][1] gives length of pulse
"""
cdef bool is_ask = modulation_type == "ASK"
cdef int64_t i, j, pulse_length = 0, num_samples = len(samples)
cdef int64_t cur_index = 0, consecutive_ones = 0, consecutive_zeros = 0, consecutive_pause = 0
cdef float s = 0, s_prev = 0
cdef int cur_state = 0, new_state = 0, tmp_state = 0
cdef float NOISE = get_noise_for_mod_type(modulation_type)
cdef int modulation_order = 2**bits_per_symbol
cdef int k
cdef np.ndarray[np.float32_t, ndim=1] thresholds = get_center_thresholds(center, center_spacing, modulation_order)
cdef int64_t[:, ::1] result = np.zeros((num_samples, 2), dtype=np.int64, order="C")
if num_samples == 0:
return result
cdef int64_t[:] state_count = np.zeros(modulation_order, dtype=np.int64)
s_prev = samples[0]
if s_prev == NOISE:
cur_state = PAUSE_STATE
else:
cur_state = modulation_order - 1
for k in range(modulation_order - 1):
if s <= thresholds[k]:
cur_state = k
break
for i in range(num_samples):
pulse_length += 1
s = samples[i]
if s == NOISE:
tmp_state = PAUSE_STATE
else:
tmp_state = modulation_order - 1
for k in range(modulation_order - 1):
if s <= thresholds[k]:
tmp_state = k
break
if tmp_state == PAUSE_STATE:
consecutive_pause += 1
else:
consecutive_pause = 0
for j in range(0, modulation_order):
if j == tmp_state:
state_count[j] += 1
else:
state_count[j] = 0
if cur_state == tmp_state:
continue
new_state = -42
if consecutive_pause > tolerance:
new_state = PAUSE_STATE
else:
for j in range(0, modulation_order):
if state_count[j] > tolerance:
new_state = j
break
if new_state == -42:
continue
if is_ask and cur_state == PAUSE_STATE and (pulse_length - tolerance) < samples_per_symbol:
# Aggregate short pauses for ASK
cur_state = 0
if cur_index > 0 and result[cur_index - 1, 0] == cur_state:
result[cur_index - 1, 1] += pulse_length - tolerance
else:
result[cur_index, 0] = cur_state
result[cur_index, 1] = pulse_length - tolerance
cur_index += 1
pulse_length = tolerance
cur_state = new_state
# Append last one
cdef int64_t len_result = len(result)
if cur_index < len_result:
if cur_index > 0 and result[cur_index - 1, 0] == cur_state:
result[cur_index - 1, 1] += pulse_length - tolerance
else:
result[cur_index, 0] = cur_state
result[cur_index, 1] = pulse_length - tolerance
cur_index += 1
return result[:cur_index]
cpdef int find_nearest_center(float sample, float[::1] centers, int num_centers) nogil:
cdef int i = 0
cdef float center = 0
cdef int result = 0
cdef float min_diff = 99999
cdef float cur_diff = 0
for i in range(0, num_centers):
center = centers[i]
cur_diff = (sample - center) * (sample - center)
if cur_diff < min_diff:
min_diff = cur_diff
result = i
return result
cpdef np.ndarray[np.complex64_t, ndim=1] fir_filter(float complex[::1] input_samples, float complex[::1] filter_taps):
cdef int i = 0, j = 0
cdef int N = len(input_samples)
cdef int M = len(filter_taps)
cdef np.ndarray[np.complex64_t, ndim=1] output = np.zeros(N+M-1, dtype=np.complex64)
for i in range(N):
for j in range(M):
output[i+j] += input_samples[i] * filter_taps[j]
return output[:N]
cpdef np.ndarray[np.complex64_t, ndim=1] iir_filter(np.ndarray[np.float64_t, ndim=1] a,
np.ndarray[np.float64_t, ndim=1] b,
np.ndarray[np.complex64_t, ndim=1] signal):
cdef np.ndarray[np.complex64_t, ndim=1] result = np.zeros(len(signal), dtype=np.complex64)
cdef long n = 0, j = 0, k = 0
cdef long M = len(a)
cdef long N = len(b)
for n in range(max(M, N+1) , len(signal)):
for j in range(M):
result[n] += a[j] * signal[n-j]
for k in range(N):
result[n] += b[k] * result[n-1-k]
return result

View File

@@ -0,0 +1,12 @@
ctypedef fused iq:
char
unsigned char
short
unsigned short
float
ctypedef iq[:, ::1] IQ
from libc.stdint cimport uint64_t, uint8_t, int64_t
cpdef uint64_t bit_array_to_number(uint8_t[:] bits, int64_t end, int64_t start=*) nogil

View File

@@ -0,0 +1,340 @@
# noinspection PyUnresolvedReferences
cimport numpy as np
import numpy as np
# As we do not use any numpy C API functions we do no import_array here,
# because it can lead to OS X error: https://github.com/jopohl/urh/issues/273
# np.import_array()
from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t, int64_t
from libc.stdlib cimport malloc, calloc, free
from cython.parallel import prange
from libc.math cimport log10,pow,sqrt
from libcpp cimport bool
from cpython cimport array
import array
from urh.cythonext.util cimport iq
cpdef tuple minmax(iq[:] arr):
cdef long long i, ns = len(arr)
if ns == 0:
return 0, 0
cdef iq maximum = arr[0]
cdef iq minimum = arr[0]
cdef iq e
for i in range(1, ns):
e = arr[i]
if e > maximum:
maximum = e
if e < minimum:
minimum = e
return minimum, maximum
cpdef np.ndarray[np.float32_t, ndim=2] arr2decibel(np.ndarray[np.complex64_t, ndim=2] arr):
cdef long long x = arr.shape[0]
cdef long long y = arr.shape[1]
cdef long long i, j = 0
cdef np.ndarray[np.float32_t, ndim=2] result = np.empty((x,y), dtype=np.float32)
cdef np.float32_t factor = 10.0
for i in prange(x, nogil=True, schedule='static'):
for j in range(y):
result[i, j] = factor * log10(arr[i, j].real * arr[i, j].real + arr[i, j].imag * arr[i, j].imag)
return result
cpdef uint64_t bit_array_to_number(uint8_t[:] bits, int64_t end, int64_t start=0) nogil:
if end < 1:
return 0
cdef long long i, acc = 1
cdef unsigned long long result = 0
for i in range(start, end):
result += bits[end-1-i+start] * acc
acc *= 2
return result
cpdef uint64_t arr_to_number(uint8_t[:] inpt, bool reverse = False, unsigned int start = 0):
cdef uint64_t result = 0
cdef unsigned int i, len_inpt = len(inpt)
for i in range(start, len_inpt):
if not reverse:
if inpt[len_inpt - 1 - i + start]:
result |= (1 << (i-start))
else:
if inpt[i]:
result |= (1 << (i-start))
return result
cpdef uint64_t crc(uint8_t[:] inpt, uint8_t[:] polynomial, uint8_t[:] start_value, uint8_t[:] final_xor, bool lsb_first, bool reverse_polynomial, bool reverse_all, bool little_endian):
cdef unsigned int len_inpt = len(inpt)
cdef unsigned int i, idx, poly_order = len(polynomial)
cdef uint64_t crc_mask = <uint64_t> pow(2, poly_order - 1) - 1
cdef uint64_t poly_mask = (crc_mask + 1) >> 1
cdef uint64_t poly_int = arr_to_number(polynomial, reverse_polynomial, 1) & crc_mask
cdef unsigned short j, x
# start value
cdef uint64_t temp, crc = arr_to_number(start_value, False, 0) & crc_mask
for i in range(0, len_inpt+7, 8):
for j in range(0, 8):
if lsb_first:
idx = i + (7 - j)
else:
idx = i + j
# generic crc algorithm
if idx >= len_inpt:
break
if (crc & poly_mask > 0) != inpt[idx]:
crc = (crc << 1) & crc_mask
crc ^= poly_int
else:
crc = (crc << 1) & crc_mask
# final XOR
crc ^= arr_to_number(final_xor, False, 0) & crc_mask
# reverse all bits
if reverse_all:
temp = 0
for i in range(0, poly_order - 1):
if crc & (1 << i):
temp |= (1 << (poly_order - 2 - i))
crc = temp & crc_mask
# little endian encoding, different for 16, 32, 64 bit
if poly_order - 1 == 16 and little_endian:
crc = ((crc << 8) & 0xFF00) | (crc >> 8)
elif poly_order - 1 == 32 and little_endian:
crc = ((crc << 24) & <uint64_t>0xFF000000) | ((crc << 8) & 0x00FF0000) | ((crc >> 8) & 0x0000FF00) | (crc >> 24)
elif poly_order - 1 == 64 and little_endian:
crc = ((crc << 56) & <uint64_t>0xFF00000000000000) | (crc >> 56) \
| ((crc >> 40) & <uint64_t>0x000000000000FF00) | ((crc << 40) & <uint64_t>0x00FF000000000000) \
| ((crc << 24) & <uint64_t>0x0000FF0000000000) | ((crc >> 24) & <uint64_t>0x0000000000FF0000) \
| ((crc << 8) & <uint64_t>0x000000FF00000000) | ((crc >> 8) & <uint64_t>0x00000000FF000000)
return crc & crc_mask
cpdef np.ndarray[np.double_t, ndim=1] get_magnitudes(IQ arr):
cdef uint64_t i, n = len(arr)
cdef np.ndarray[np.double_t, ndim=1] result = np.zeros(n, dtype = np.double)
for i in range(0, n):
result[i] = sqrt(arr[i][0] * arr[i][0] + arr[i][1] * arr[i][1])
return result
cpdef np.ndarray[np.uint64_t, ndim=1] calculate_cache(uint8_t[:] polynomial, bool reverse_polynomial=False, uint8_t bits=8):
cdef uint8_t j, poly_order = len(polynomial)
cdef uint64_t crc_mask = <uint64_t> pow(2, poly_order - 1) - 1
cdef uint64_t poly_mask = (crc_mask + 1) >> 1
cdef uint64_t poly_int = arr_to_number(polynomial, reverse_polynomial, 1) & crc_mask
cdef uint64_t crcv, i
cdef np.ndarray[np.uint64_t, ndim=1] cache = np.zeros(<uint64_t> pow(2, bits), dtype = np.uint64)
# Caching
for i in range(0, <uint32_t> len(cache)):
crcv = i << (poly_order - 1 - bits)
for _ in range(0, bits):
if (crcv & poly_mask) > 0:
crcv = (crcv << 1) & crc_mask
crcv ^= poly_int
else:
crcv = (crcv << 1) & crc_mask
cache[i] = crcv
return cache
cpdef uint64_t cached_crc(uint64_t[:] cache, uint8_t bits, uint8_t[:] inpt, uint8_t[:] polynomial, uint8_t[:] start_value, uint8_t[:] final_xor, bool lsb_first, bool reverse_polynomial, bool reverse_all, bool little_endian):
cdef unsigned int len_inpt = len(inpt)
cdef unsigned int i, poly_order = len(polynomial)
cdef uint64_t crc_mask = <uint64_t> pow(2, poly_order - 1) - 1
cdef uint64_t poly_mask = (crc_mask + 1) >> 1
cdef uint64_t poly_int = arr_to_number(polynomial, reverse_polynomial, 1) & crc_mask
cdef uint64_t temp, crcv, data, pos
cdef uint8_t j
# For inputs smaller than 8 bits, call normal function
if len_inpt < bits:
return crc(inpt, polynomial, start_value, final_xor, lsb_first, reverse_polynomial, reverse_all, little_endian)
# CRC
crcv = arr_to_number(start_value, False, 0) & crc_mask
for i in range(0, len_inpt - bits + 1, bits):
data = 0
if lsb_first:
for j in range(0, bits):
if inpt[i + j]:
data |= (1 << j)
else:
for j in range(0, bits):
if inpt[i + bits - 1 - j]:
data |= (1 << j)
pos = (crcv >> (poly_order - bits - 1)) ^ data
crcv = ((crcv << bits) ^ cache[pos]) & crc_mask
# Are we done?
if len_inpt % bits > 0:
# compute rest of crc inpt[-(len_inpt%8):] with normal function
# Set start_value to current crc value
for i in range(0, len(start_value)):
start_value[len(start_value) - 1 - i] = True if (crcv & (1 << i)) > 0 else False
crcv = crc(inpt[len_inpt-(len_inpt%bits):len_inpt], polynomial, start_value, final_xor, lsb_first, reverse_polynomial, reverse_all, little_endian)
else:
# final XOR
crcv ^= arr_to_number(final_xor, False, 0) & crc_mask
# reverse all bits
if reverse_all:
temp = 0
for i in range(0, poly_order - 1):
if crcv & (1 << i):
temp |= (1 << (poly_order - 2 - i))
crcv = temp & crc_mask
# little endian encoding, different for 16, 32, 64 bit
if poly_order - 1 == 16 and little_endian:
crcv = ((crcv << 8) & 0xFF00) | (crcv >> 8)
elif poly_order - 1 == 32 and little_endian:
crcv = ((crcv << 24) & <uint64_t>0xFF000000) | ((crcv << 8) & 0x00FF0000) | ((crcv >> 8) & 0x0000FF00) | (crcv >> 24)
elif poly_order - 1 == 64 and little_endian:
crcv = ((crcv << 56) & <uint64_t>0xFF00000000000000) | (crcv >> 56) \
| ((crcv >> 40) & <uint64_t>0x000000000000FF00) | ((crcv << 40) & <uint64_t>0x00FF000000000000) \
| ((crcv << 24) & <uint64_t>0x0000FF0000000000) | ((crcv >> 24) & <uint64_t>0x0000000000FF0000) \
| ((crcv << 8) & <uint64_t>0x000000FF00000000) | ((crcv >> 8) & <uint64_t>0x00000000FF000000)
return crcv & crc_mask
cpdef tuple get_crc_datarange(uint8_t[:] inpt, uint8_t[:] polynomial, uint64_t vrfy_crc_start, uint8_t[:] start_value, uint8_t[:] final_xor, bool lsb_first, bool reverse_polynomial, bool reverse_all, bool little_endian):
cdef uint32_t len_inpt = len(inpt), poly_order = len(polynomial)
cdef uint8_t j = 0, len_crc = poly_order - 1
if vrfy_crc_start-1+len_crc >= len_inpt or vrfy_crc_start < 2:
return 0, 0
cdef uint64_t* steps = <uint64_t*>calloc(len_inpt+2, sizeof(uint64_t))
cdef uint64_t temp
cdef uint64_t crc_mask = <uint64_t> pow(2, poly_order - 1) - 1
cdef uint64_t poly_mask = (crc_mask + 1) >> 1
cdef uint64_t poly_int = arr_to_number(polynomial, reverse_polynomial, 1) & crc_mask
cdef uint64_t final_xor_int = arr_to_number(final_xor, False, 0) & crc_mask
cdef uint64_t vrfy_crc_int = arr_to_number(inpt[vrfy_crc_start:vrfy_crc_start+len_crc], False, 0) & crc_mask
cdef uint64_t crcvalue = arr_to_number(start_value, False, 0) & crc_mask
cdef bool found
cdef uint32_t i, idx, offset, data_end = vrfy_crc_start
cdef uint8_t* step = <uint8_t*>calloc(len_inpt, sizeof(uint8_t))
step[0] = 1
# crcvalue is initialized with start_value
for i in range(0, data_end+7, 8):
for j in range(0, 8):
if lsb_first:
idx = i + (7 - j)
else:
idx = i + j
# generic crc algorithm
if idx >= data_end:
break
if (crcvalue & poly_mask > 0) != step[idx]:
crcvalue = (crcvalue << 1) & crc_mask
crcvalue ^= poly_int
else:
crcvalue = (crcvalue << 1) & crc_mask
# Save steps XORed with final_xor
steps[idx] = crcvalue ^ final_xor_int
free(step)
# Reverse and little endian
if reverse_all or little_endian:
for i in range(0, data_end):
# reverse all bits
if reverse_all:
temp = 0
for j in range(0, poly_order - 1):
if steps[i] & (1 << j):
temp |= (1 << (poly_order -2 - j))
steps[j] = temp & crc_mask
# little endian encoding, different for 16, 32, 64 bit
if poly_order - 1 == 16 and little_endian:
steps[i] = ((steps[i] << 8) & <uint64_t> 0xFF00) | (steps[i] >> 8)
elif poly_order - 1 == 32 and little_endian:
steps[i] = ((steps[i] << 24) & <uint64_t> 0xFF000000) | ((steps[i] << 8) & <uint64_t> 0x00FF0000) | ((steps[i] >> 8) & <uint64_t> 0x0000FF00) | (steps[i] >> 24)
elif poly_order - 1 == 64 and little_endian:
steps[i] = ((steps[i] << 56) & <uint64_t> 0xFF00000000000000) | (steps[i] >> 56) \
| ((steps[i] >> 40) & <uint64_t> 0x000000000000FF00) | ((steps[i] << 40) & <uint64_t> 0x00FF000000000000) \
| ((steps[i] << 24) & <uint64_t> 0x0000FF0000000000) | ((steps[i] >> 24) & <uint64_t> 0x0000000000FF0000) \
| ((steps[i] << 8) & <uint64_t> 0x000000FF00000000) | ((steps[i] >> 8) & <uint64_t> 0x00000000FF000000)
# Test data range from 0...start_crc until start_crc-1...start_crc
# Compute start value
crcvalue = crc(inpt[:data_end], polynomial, start_value, final_xor, lsb_first, reverse_polynomial, reverse_all, little_endian)
try:
if vrfy_crc_int == crcvalue:
return 0, data_end
found = False
i = 0
while i < data_end - 1:
offset = 0
while inpt[i + offset] == False and i+offset < data_end - 1: # skip leading 0s in data (doesn't change crc...)
offset += 1
# XOR delta=crc(10000...) to last crc value to create next crc value
crcvalue ^= steps[data_end-i-offset-1]
if found:
return i, data_end # Return start_data, end_data
if vrfy_crc_int == crcvalue:
found = True
i += 1 + offset
# No beginning found
return 0, 0
finally:
free(steps)
cdef db(unsigned int t, unsigned int p, unsigned int k, unsigned int n,
uint8_t* a, uint8_t* sequence, uint64_t* current_index):
cdef unsigned int i,j
if t > n:
if n % p == 0:
for i in range(1, p+1):
sequence[current_index[0]] = a[i]
current_index[0] += 1
else:
a[t] = a[t - p]
db(t + 1, p, k, n, a, sequence, current_index)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t+1, t, k, n, a, sequence, current_index)
cpdef array.array de_bruijn(unsigned int n):
cdef unsigned int k = 2 # Alphabet size is 2 because our alphabet is [0, 1]
cdef uint64_t len_sequence = k ** n
cdef uint8_t* a = <uint8_t*>calloc(k*n, sizeof(uint8_t))
cdef array.array array_template = array.array('B', [])
cdef array.array sequence
sequence = array.clone(array_template, len_sequence, zero=False)
cdef uint64_t* current_index = <uint64_t*>calloc(1, sizeof(uint64_t))
db(1, 1, k, n, a, sequence.data.as_uchars, current_index)
try:
return sequence
finally:
free(a)
free(current_index)