Python-Math-Algorithms/matrix/__init__.py

166 lines
4.4 KiB
Python
Raw Permalink Normal View History

2023-04-27 12:25:57 +00:00
import math
class Matrix:
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.fields = []
# initialize to identity matrix
for x in range(rows):
row = []
for y in range(cols):
row.append(float(x == y))
self.fields.append(row)
def __str__(self):
return self.fields.__str__()
# convert this matrix to identity
def to_identity(self):
for x in range(self.rows):
for y in range(self.cols):
self.fields[x][y] = float(x == y)
def transpose(self):
result = Matrix(self.cols, self.rows)
for x in range(self.rows):
for y in range(self.cols):
result.fields[y][x] = self.fields[x][y]
return result
def __mul__(self, other):
result = Matrix(self.rows, other.cols)
for x in range(self.rows):
for y in range(other.cols):
sum = 0
for i in range(self.cols):
sum += self.fields[x][i] * other.fields[i][y]
result.fields[x][y] = sum
return result
def scale(self, other):
result = Matrix(self.rows, other.cols)
for x in range(self.rows):
for y in range(other.cols):
result.fields[x][y] = self.fields[x][y] * other
return result
def __add__(self, other):
result = Matrix(self.rows, self.cols)
for x in range(self.rows):
for y in range(self.cols):
result.fields[x][y] = self.fields[x][y] + other.fields[x][y]
return result
def __sub__(self, other):
result = Matrix(self.rows, self.cols)
for x in range(self.rows):
for y in range(self.cols):
result.fields[x][y] = self.fields[x][y] - other.fields[x][y]
return result
def slice(self, row_min, row_max, col_min, col_max):
rows = row_max - row_min
cols = col_max - col_min
result = Matrix(rows, cols)
for x in range(row_min, row_max):
for y in range(col_min, col_max):
result.fields[x][y] = self.fields[x][y]
return result
def exclude_row_and_col(self, i):
result = Matrix(self.rows - 1, self.cols - 1)
for x in range(i):
for y in range(i):
result.fields[x][y] = self.fields[x][y]
for x in range(i + 1, self.rows - 1):
for y in range(i + 1, self.cols - 1):
result.fields[x][y] = self.fields[x][y]
return result
def determinant(self):
if self.rows == 2 and self.cols == 2:
return self.fields[0][0] * self.fields[1][1] - self.fields[0][1] * self.fields[1][0]
# Satz von Sarrus
if self.rows == 3 and self.cols == 3:
aei = self.fields[0][0] * self.fields[1][1] * self.fields[2][2]
bfg = self.fields[0][1] * self.fields[1][2] * self.fields[2][0]
cdh = self.fields[0][2] * self.fields[1][0] * self.fields[2][1]
ceg = self.fields[0][2] * self.fields[1][1] * self.fields[2][0]
afh = self.fields[0][0] * self.fields[1][2] * self.fields[2][1]
bdi = self.fields[0][1] * self.fields[1][0] * self.fields[2][2]
return aei + bfg + cdh - ceg - afh - bdi
# Laplace
if self.rows == self.cols:
# perform laplace on 2nd row
self.__laplace_row(1)
def __laplace_row(self, i):
sum = 0
for i in range(self.rows):
sum += math.pow(-1, i + 1) * self.exclude_row_and_col(i).determinant()
return sum
def cramer(self, vector, order):
return Matrix.from_vector(vector, order).determinant() / self.determinant()
@classmethod
def from_array(cls, array):
matrix = Matrix(rows=len(array), cols=len(array[0]))
for y in range(matrix.cols):
for x in range(matrix.rows):
matrix.fields[x][y] = array[x][y]
return matrix
@classmethod
def from_vector(cls, vector, column):
matrix = Matrix(rows=len(vector), cols=len(vector))
for y in range(matrix.rows):
matrix.fields[y][column] = vector[y]
return matrix
def test():
mat1 = Matrix.from_array([
[2, -4],
[3, 1],
[-2, 1]
])
mat2 = Matrix.from_array([
[2, -2, 3],
[-3, 4, -4]
])
print(mat1 * mat2)