Compare commits

..

39 Commits

Author SHA1 Message Date
9aa172779d Ajouter LICENSE.txt 2026-03-23 11:15:02 +01:00
37bc000a8e A pu les boutons 2026-03-22 09:37:37 +01:00
f76307ecc1 Ajout du OUT 2026-03-22 09:36:59 +01:00
20ed1e558e Merge branch 'master' of https://git.serveurtom.fr/Tom/24H_du_code_2026 2026-03-22 09:28:47 +01:00
6c3ba0206b PDF 2026-03-22 09:28:39 +01:00
BalkisJerad
5e6f7474b0 Retirer les boutons 2026-03-22 08:30:57 +01:00
BalkisJerad
1a1ea75df4 Revert "tentative fonctionnement boutons"
This reverts commit e109b986733f8583da07b8f498f297a6872d3110.
2026-03-22 08:28:46 +01:00
e109b98673 tentative fonctionnement boutons 2026-03-22 08:13:40 +01:00
8113bf74d5 Merge branch 'master' of https://git.serveurtom.fr/Tom/24H_du_code_2026 2026-03-22 07:42:42 +01:00
5327eea72f Revert Epreuve 3 2026-03-22 07:42:35 +01:00
a1eb9116cc Fibbo 16 bits again 2026-03-22 07:41:02 +01:00
a526b26636 image capture app 2026-03-22 07:40:23 +01:00
a882a07bc7 Ajustements programme Path 2026-03-22 07:34:39 +01:00
667fd60bda Merge branch 'master' of https://git.serveurtom.fr/Tom/24H_du_code_2026 2026-03-22 07:19:02 +01:00
7069455fa6 Fibbo16bits 2026-03-22 07:18:37 +01:00
d85f8af745 ajout boutons et logo 2026-03-22 06:54:46 +01:00
BalkisJerad
4436c441a1 Cleanup 2026-03-22 05:33:35 +01:00
BalkisJerad
bda662116e gitignore 2026-03-22 05:27:42 +01:00
BalkisJerad
6208721dcf Dump Ram - backend 2026-03-22 05:10:11 +01:00
789c2ad1b9 Merge branch 'Epreuve-5' 2026-03-22 04:50:11 +01:00
6a3fb3f502 Epreuve 5 mk2 2026-03-22 04:49:01 +01:00
4aca719414 C++ 2026-03-22 04:48:32 +01:00
71453fbc3a ajout affichage ram et out 2026-03-22 04:38:20 +01:00
BalkisJerad
d0038394da Ajout .bin comme param à simulateur_front 2026-03-22 03:23:43 +01:00
5359c5178d ajout fonctionnalité affichage stack 2026-03-22 03:16:02 +01:00
af122fc3eb Avancement Epreuve 5 2026-03-22 02:00:04 +01:00
422289ee55 Merge branch 'master' of https://git.serveurtom.fr/Tom/24H_du_code_2026 2026-03-22 01:53:30 +01:00
c2aa1738f3 return des valeurs à récupérer pour affichage dynamique 2026-03-22 01:53:25 +01:00
b6e0498dc3 MAJ gitlab 2026-03-22 00:26:24 +01:00
BalkisJerad
ea5f707d05 Epreuve2 Version 2 (Gestion des DB) 2026-03-22 00:16:34 +01:00
BalkisJerad
f28819d288 Gestion DT 2026-03-21 20:54:39 +01:00
eb834b8f72 Ajout fichiers 2026-03-21 19:53:38 +01:00
fe9ca66500 Assembleur final 2026-03-21 19:52:18 +01:00
a4cc3b36a6 Merge branch 'master' of https://git.serveurtom.fr/Tom/24H_du_code_2026 2026-03-21 18:50:57 +01:00
2ae5ff094d Création interface et éléments 2026-03-21 18:46:24 +01:00
b3ca58576d Epreuve 0 - rev 1 2026-03-21 18:41:03 +01:00
BalkisJerad
6ae130c794 Epreuve 3 2026-03-21 18:38:18 +01:00
c05b92a03b Dictionnaire 2026-03-21 13:59:01 +01:00
59126cf65f Fix 2026-03-21 13:57:26 +01:00
76 changed files with 9382 additions and 96 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
__*
.metadata
*.asm.bin

View File

@ -13,9 +13,10 @@ _loop:
MOV R0 R1 ; a = b
MOV R1 R3 ; b = c
CMP R0 R1
CMP R1 R0
JLT _end ; end si b < a
JMP _loop
_end:
JMP _end
MOV R0 'A' ;11100000 01000001
RET

27
Epreuve2.asm Normal file
View File

@ -0,0 +1,27 @@
_main:
MOV R0 0 ; a
MOV R1 1 ; b
OUT R0 ; print 0
_data1:
DB 0 ;00000000
DB 'C' ;01000011
_loop:
OUT R1 ; print b
MOV R2 0 ; 0
MOV R3 R0 ; c = a
SUB R2 R1 ; 0 - b
SUB R3 R2 ; a - (0 - b) = a - -b = a + b
MOV R0 R1 ; a = b
MOV R1 R3 ; b = c
CMP R1 R0
JLT _end ; end si b < a
JMP _loop
_end:
LDR R2 R3 _data1
RET

178
Epreuve2.py Normal file
View File

@ -0,0 +1,178 @@
import sys
# --- Configuration des instructions (Inchangée) ---
instructions = {
"DB": {"ins": "DB", "args": [{"isRegister": False, "isValue": True, "isLabel": False}]},
"CALL": {"ins": "CALL", "args": [{"isRegister": False, "isValue": False, "isLabel": True}]},
"RET": {"ins": "RET", "args": []},
"JMP": {"ins": "JMP", "args": [{"isRegister": False, "isValue": False, "isLabel": True}]},
"JLT": {"ins": "JLT", "args": [{"isRegister": False, "isValue": False, "isLabel": True}]},
"JEQ": {"ins": "JEQ", "args": [{"isRegister": False, "isValue": False, "isLabel": True}]},
"PUSH": {"ins": "PUSH", "args": [{"isRegister": True, "isValue": False, "isLabel": False}]},
"POP": {"ins": "POP", "args": [{"isRegister": True, "isValue": False, "isLabel": False}]},
"MOV": {"ins": "MOV", "args": [{"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": True, "isValue": True, "isLabel": False}]},
"SUB": {"ins": "SUB", "args": [{"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": True, "isValue": True, "isLabel": False}]},
"CMP": {"ins": "CMP", "args": [{"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": True, "isValue": True, "isLabel": False}]},
"LDR": {"ins": "LDR", "args": [{"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": False, "isValue": False, "isLabel": True}]},
"STR": {"ins": "STR", "args": [{"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": True, "isValue": False, "isLabel": False}, {"isRegister": False, "isValue": False, "isLabel": True}]},
"OUT": {"ins": "OUT", "args": [{"isRegister": True, "isValue": False, "isLabel": False}]},
"TIM": {"ins": "TIM", "args": [{"isRegister": False, "isValue": True, "isLabel": False}]}
}
# --- Fonctions Utilitaires (Inchangées) ---
def valueToInt(arg):
try: return int(arg)
except: return ord(arg[1])
def registerToDec(reg): return int(reg[1])
def testArgIsRegister(arg):
if len(arg) != 2 or arg[0] != "R": return False
try: return 0 <= int(arg[1]) <= 3
except: return False
def testArgIsValue(arg):
try:
if 0 <= int(arg) <= 255: return True
except: pass
if len(arg) == 3 and arg[0] == arg[2] == "'": return True
return False
def testArgIsLabel(arg, twoDotsIncluded=False):
if not arg or arg[0] != "_": return False
body = arg[1:-1] if twoDotsIncluded else arg[1:]
if twoDotsIncluded and arg[-1] != ":": return False
return all(c in "abcdefghijklmnopqrstuvwxyz0123456789_" for c in body)
# --- Fonctions de Conversion (Inchangées) ---
def convertInsDB(args): return {"opcode": [valueToInt(args[0])], "is_db": True}
def convertInsCALL(args): return {"opcode": [0x00, "label"], "label": args[0]}
def convertInsRET(args): return {"opcode": [0x80]}
def convertInsJMP(args): return {"opcode": [0x40, "label"], "label": args[0]}
def convertInsJLT(args): return {"opcode": [0xC0, "label"], "label": args[0]}
def convertInsJEQ(args): return {"opcode": [0x20, "label"], "label": args[0]}
def convertInsPUSH(args): return {"opcode": [0xA0 | registerToDec(args[0])]}
def convertInsPOP(args): return {"opcode": [0x60 | registerToDec(args[0])]}
def convertInsMOV(args):
idReg0 = registerToDec(args[0])
if testArgIsRegister(args[1]): return {"opcode": [0x50 | (idReg0 << 2) | registerToDec(args[1])]}
return {"opcode": [0xE0 | idReg0, valueToInt(args[1])]}
def convertInsSUB(args):
idReg0 = registerToDec(args[0])
if testArgIsRegister(args[1]): return {"opcode": [0xD0 | (idReg0 << 2) | registerToDec(args[1])]}
return {"opcode": [0x10 | idReg0, valueToInt(args[1])]}
def convertInsCMP(args):
idReg0 = registerToDec(args[0])
if testArgIsRegister(args[1]): return {"opcode": [0x30 | (idReg0 << 2) | registerToDec(args[1])]}
return {"opcode": [0x90 | idReg0, valueToInt(args[1])]}
def convertInsLDR(args):
return {"opcode": [0xB0 | (registerToDec(args[0]) << 2) | registerToDec(args[1]), "label"], "label": args[2]}
def convertInsSTR(args):
return {"opcode": [0x70 | (registerToDec(args[0]) << 2) | registerToDec(args[1]), "label"], "label": args[2]}
def convertInsOUT(args): return {"opcode": [0xF0 | registerToDec(args[0])]}
def convertInsTIM(args): return {"opcode": [0xF8, valueToInt(args[0])]}
# --- Assembleur ---
def assemble(path):
labels = {}
code_elements = []
data_elements = []
with open(path, "r") as f:
lines = f.readlines()
current_pending_labels = []
for line_num, line in enumerate(lines, 1):
line = line.split(";")[0].strip()
if not line: continue
parts = line.split()
# Traitement des labels (on peut en avoir plusieurs de suite)
while parts and testArgIsLabel(parts[0], True):
label_name = parts[0][:-1]
current_pending_labels.append(label_name)
parts = parts[1:] # On retire le label et on continue sur la même ligne
if not parts: continue # Ligne ne contenait que des labels
instr_name = parts[0]
args = parts[1:]
try:
res = globals()[f"convertIns{instr_name}"](args)
res["attached_labels"] = current_pending_labels
current_pending_labels = []
if res.get("is_db"):
data_elements.append(res)
else:
code_elements.append(res)
except Exception as e:
print(f"ERROR Line {line_num}: {instr_name} -> {e}")
sys.exit(1)
# Si des labels traînent à la toute fin du fichier
if current_pending_labels:
# On les attache à un élément factice à la fin de la data
data_elements.append({"opcode": [], "attached_labels": current_pending_labels, "is_db": True})
# CALCUL DES ADRESSES FINALES
final_labels = {}
current_pc = 0
# 1. On passe sur le CODE d'abord
for item in code_elements:
for lbl in item["attached_labels"]:
final_labels[lbl] = current_pc
current_pc += len(item["opcode"])
# 2. On passe sur la DATA ensuite
for item in data_elements:
for lbl in item["attached_labels"]:
final_labels[lbl] = current_pc
current_pc += len(item["opcode"])
# GÉNÉRATION DU BYTECODE
final_bytecode = []
# Ordre : Code puis Data
for item in code_elements + data_elements:
for op in item["opcode"]:
if op == "label":
label_target = item["label"]
if label_target not in final_labels:
print(f"ERROR: Label '{label_target}' missing!")
sys.exit(1)
final_bytecode.append(final_labels[label_target])
else:
final_bytecode.append(op)
return final_bytecode, final_labels
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python asm.py <file.asm>")
sys.exit(1)
path = sys.argv[1]
bytecode, labels_map = assemble(path)
# Affichage propre
print("\n" + "="*50)
print(f" ASSEMBLY PREVIEW: {path}")
print("="*50)
print(f"{'ADDR':<7} | {'HEX':<5} | {'BINARY':<10}")
print("-" * 30)
for i, b in enumerate(bytecode):
# On cherche si un label pointe ici pour l'afficher
lbl_str = ""
for name, addr in labels_map.items():
if addr == i: lbl_str += f" ({name})"
print(f"0x{i:02X} | {b:02x} | {b:08b} {lbl_str}")
print("="*50 + "\n")
with open(path + ".bin", "wb") as file:
file.write(bytes(bytecode))
print(f"Success: {path}.bin generated.")

282
Epreuve3.py Normal file
View File

@ -0,0 +1,282 @@
# ---------------------------------------------------------
# Simulateur
# ---------------------------------------------------------
# - Bus données : 8 bits
# - 4 registres R0..R3 (8 bits)
# - Bus adresse : 8 bits
# - RAM : 256 octets
# - Instructions : 1 ou 2 octets
# - Cycles : 1 octet -> 1, 2 octets -> 2, LDR/STR -> 3
# - PC démarre à 0
# - Pile descendante, SP=255
# ---------------------------------------------------------
from dataclasses import dataclass, field
import sys
@dataclass
class CPU:
pc: int = 0
sp: int = 255
regs: list = field(default_factory=list) # R0..R3
lt: int = 0 # flag LT
eq: int = 0 # flag EQ
cycles: int = 0
running: bool = True
after_ret: bool = False
last_out = None
def __post_init__(self):
if not self.regs:
self.regs = [0, 0, 0, 0]
class Simulator:
cycles_added = 0
def __init__(self, program: bytes):
self.ram = bytearray(256)
for i, b in enumerate(program[:256]):
self.ram[i] = b
self.cpu = CPU()
self.program_size = len(program)
# ----------------- utilitaires mémoire / pile -----------------
def fetch_byte(self) -> int:
b = self.ram[self.cpu.pc]
self.cpu.pc = (self.cpu.pc + 1) & 0xFF
return b
def push(self, value: int):
if self.cpu.sp < 0:
raise RuntimeError("STACK OVERFLOW")
self.ram[self.cpu.sp] = value & 0xFF
self.cpu.sp -= 1
def pop(self) -> int:
if self.cpu.sp >= 255:
return 0
self.cpu.sp += 1
return self.ram[self.cpu.sp]
# ----------------- exécution d'une instruction -----------------
def step(self):
c = self.cpu
pc_before = c.pc
b = self.fetch_byte()
instr = ""
size = 1 # taille en octets (1 ou 2)
extra_cycles = 0 # pour LDR/STR/TIM
if b == 0x00: # CALL _label
addr = self.fetch_byte()
size = 2
instr = f"CALL {addr}"
self.push(c.pc)
c.pc = addr
elif b == 0x40: # JMP _label
addr = self.fetch_byte()
size = 2
instr = f"JMP {addr}"
c.pc = addr
elif b == 0xC0: # JLT _label
addr = self.fetch_byte()
size = 2
instr = f"JLT {addr}"
if c.lt == 1:
c.pc = addr
elif b == 0x20: # JEQ _label
addr = self.fetch_byte()
size = 2
instr = f"JEQ {addr}"
if c.eq == 1:
c.pc = addr
elif b == 0x80: # RET
instr = "RET"
ret = self.pop()
if c.sp >= 255 and ret == 0:
c.after_ret = True
c.running = False
else:
c.pc = ret
# --- PUSH / POP ---
elif (b & 0b11111100) == 0b10100000: # PUSH Rx
r = b & 0b11
instr = f"PUSH R{r}"
self.push(c.regs[r])
elif (b & 0b11111100) == 0b01100000: # POP Rx
r = b & 0b11
instr = f"POP R{r}"
c.regs[r] = self.pop()
# --- MOV Rx valeur / SUB Rx valeur / CMP Rx valeur ---
elif (b & 0b11111100) == 0b11100000: # MOV Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"MOV R{r}, {imm}"
c.regs[r] = imm
elif (b & 0b11111100) == 0b00010000: # SUB Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"SUB R{r}, {imm}"
c.regs[r] = (c.regs[r] - imm) & 0xFF
elif (b & 0b11111100) == 0b10010000: # CMP Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"CMP R{r}, {imm}"
v = c.regs[r]
c.lt = 1 if v < imm else 0
c.eq = 1 if v == imm else 0
# --- MOV / SUB / CMP registre-registre ---
elif (b & 0b11110000) == 0b01010000: # MOV Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"MOV R{dst}, R{src}"
c.regs[dst] = c.regs[src]
elif (b & 0b11110000) == 0b11010000: # SUB Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"SUB R{dst}, R{src}"
c.regs[dst] = (c.regs[dst] - c.regs[src]) & 0xFF
elif (b & 0b11110000) == 0b00110000: # CMP Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"CMP R{dst}, R{src}"
v1 = c.regs[dst]
v2 = c.regs[src]
c.lt = 1 if v1 < v2 else 0
c.eq = 1 if v1 == v2 else 0
# --- LDR / STR (2 octets, 3 cycles) ---
elif (b & 0b11110000) == 0b10110000: # LDR Rx Ry _label
dst = (b >> 2) & 0b11
src = b & 0b11
addr = self.fetch_byte()
size = 2
instr = f"LDR R{dst}, R{src}, {addr}"
eff = (addr + c.regs[src]) & 0xFF
c.regs[dst] = self.ram[eff]
extra_cycles = 1 # 2 octets -> 2 cycles +1 = 3
elif (b & 0b11110000) == 0b01110000: # STR Rx Ry _label
dst = (b >> 2) & 0b11
src = b & 0b11
addr = self.fetch_byte()
size = 2
instr = f"STR R{dst}, R{src}, {addr}"
eff = (addr + c.regs[src]) & 0xFF
self.ram[eff] = c.regs[dst] & 0xFF
extra_cycles = 1
# --- OUT Rx ---
elif (b & 0b11111100) == 0b11110000: # OUT Rx
r = b & 0b11
instr = f"OUT R{r}"
print(f"[OUT] R{r} = {c.regs[r]}")
self.cpu.last_out = c.regs[r]
# --- TIM valeur ---
elif b == 0xF8: # TIM
second = self.fetch_byte()
size = 2
m = (second >> 7) & 0x1
v = second & 0x7F
instr = f"TIM m={m}, v={v}"
mult = 1 if m == 0 else 100
pause_ms = mult * (v + 1)
c.cycles += pause_ms # modélisation de la pause
else: # Si DB est apres RET on n atteind pas ce bloc
instr = f"DB 0x{b:02X}"
size = 0 # DB n'est pas une INS
# instr = f"UNKNOWN 0x{b:02X}"
# c.running = False
# calcul des cycles
if (b & 0b11110000) in (0xB0, 0x70): # LDR / STR
c.cycles += 3
cycles_added = 3
else:
c.cycles += size
cycles_added = size
return self.report(pc_before, instr, cycles_added)
# ----------------- rapport d'exécution -----------------
def report(self, pc_before: int, instr: str, cycles_added: int):
c = self.cpu
regs_str = " ".join(f"R{i}={c.regs[i]:02X}" for i in range(4))
print(f"PC={pc_before:02X} {instr:20s} +Cycles={cycles_added:3d} Total={c.cycles}")
print(f" {regs_str} LT={c.lt} EQ={c.eq} SP={c.sp}")
print("-" * 60)
ram = self.dump_ram()
out = self.cpu.last_out
self.cpu.last_out = None
return {
"pc": pc_before,
"instr": instr,
"cycles_added": cycles_added,
"regs": c.regs.copy(),
"lt": c.lt,
"eq": c.eq,
"sp": c.sp,
"ram": ram,
"out": out
}
# ----------------- boucle principale -----------------
def run(self, max_steps: int = 100000):
steps = 0
while self.cpu.running and steps < max_steps:
result = self.step()
yield result
steps += 1
def dump_ram(self):
print("\n========= RAM DUMP ========")
for addr in range(0, 256, 8):
chunk = self.ram[addr:addr+8]
hex_values = " ".join(f"{b:02X}" for b in chunk)
print(f"{addr:02X}: {hex_values}")
return self.ram
print("===========================\n")
# ---------------------------------------------------------
# LECTURE D'UN FICHIER .bin ET LANCEMENT
# ---------------------------------------------------------
if __name__ == "__main__":
# Nom du fichier binaire à exécuter
path =""
args= sys.argv
if (len(args) > 1):
filename = args[1]
print("filename: " + filename)
with open(filename, "rb") as f:
program = f.read()
sim = Simulator(program)
sim.run()
else:
print("Needs *.bin as parameter")

46
Fibbo16b.asm Normal file
View File

@ -0,0 +1,46 @@
_main:
MOV R0 1 ; b
SUB R1 R1 ; b
SUB R2 R2 ; a
SUB R3 R3 ; a
_loop:
OUT R2
OUT R3
PUSH R0 ; Sauvegarde de b
PUSH R1
PUSH R1 ; R0 R1 => b R2 R3 => a. Retourne b = b + a = c
SUB R1 R1
SUB R1 R2
SUB R0 R1
POP R1 ; R0 = R0 + R2
CMP R0 R2 ; Si overflow, il faut +1 R3 (a oct fort)
JLT _add16cr1debut
JMP _add16cr1fin
_add16cr1debut:
SUB R3 255
_add16cr1fin:
PUSH R0
SUB R0 R0
SUB R0 R3
SUB R1 R0
POP R0 ; R1 = R1 + R3
CMP R1 R3
JLT _add16cr2debut
JMP _add16cr2fin
_add16cr2debut:
JMP _end
_add16cr2fin:
POP R3 ; Reprise de b
POP R2
JMP _loop
_end:
POP R3
POP R2
RET

BIN
Fibbonacci 16Bits.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

300
Interpreteur.py Normal file
View File

@ -0,0 +1,300 @@
# ---------------------------------------------------------
# Simulateur
# ---------------------------------------------------------
# - Bus données : 8 bits
# - 4 registres R0..R3 (8 bits)
# - Bus adresse : 8 bits
# - RAM : 256 octets
# - Instructions : 1 ou 2 octets
# - Cycles : 1 octet -> 1, 2 octets -> 2, LDR/STR -> 3
# - PC démarre à 0
# - Pile descendante, SP=255
# ---------------------------------------------------------
import sys, time
import uasyncio as asyncio
class CPU:
pc: int = 0
sp: int = 255
regs: list = [0, 0, 0, 0] # R0..R3
lt: int = 0 # flag LT
eq: int = 0 # flag EQ
cycles: int = 0
running: bool = True
after_ret: bool = False
def __post_init__(self):
if not self.regs:
self.regs = [0, 0, 0, 0]
class Simulator:
def __init__(self, program: bytes):
self.ram = bytearray(256)
for i, b in enumerate(program[:256]):
self.ram[i] = b
self.cpu = CPU()
self.program_size = len(program)
self.motorCallback = None
self.registerCallback = None
# ---- Getter / Setter pour interfacer le robot
def setMotorCallback(self, callback):
self.motorCallback = callback
def setRegisterCallback(self, callback):
self.registerCallback = callback
# ----------------- utilitaires mémoire / pile -----------------
def fetch_byte(self) -> int:
b = self.ram[self.cpu.pc]
self.cpu.pc = (self.cpu.pc + 1) & 0xFF
return b
def push(self, value: int):
if self.cpu.sp < 0:
raise RuntimeError("STACK OVERFLOW")
self.ram[self.cpu.sp] = value & 0xFF
self.cpu.sp -= 1
def pop(self) -> int:
if self.cpu.sp >= 255:
return 0
self.cpu.sp += 1
return self.ram[self.cpu.sp]
# ----------------- exécution d'une instruction -----------------
def step(self):
c = self.cpu
pc_before = c.pc
b = self.fetch_byte()
instr = ""
size = 1 # taille en octets (1 ou 2)
extra_cycles = 0 # pour LDR/STR/TIM
# --- instructions 2 octets à opcode fixe ---
#print(pc_before)
#print(self.program_size)
if c.after_ret:
instr = f"DB 0x{b:02X}"
elif b == 0x00: # CALL _label
addr = self.fetch_byte()
size = 2
instr = f"CALL {addr}"
self.push(c.pc)
c.pc = addr
elif b == 0x40: # JMP _label
addr = self.fetch_byte()
size = 2
instr = f"JMP {addr}"
c.pc = addr
elif b == 0xC0: # JLT _label
addr = self.fetch_byte()
size = 2
instr = f"JLT {addr}"
if c.lt == 1:
c.pc = addr
elif b == 0x20: # JEQ _label
addr = self.fetch_byte()
size = 2
instr = f"JEQ {addr}"
if c.eq == 1:
c.pc = addr
elif b == 0x80: # RET
instr = "RET"
ret = self.pop()
if c.sp >= 255 and ret == 0:
c.after_ret = True
c.running = False
else:
c.pc = ret
# --- PUSH / POP ---
elif (b & 0b11111100) == 0b10100000: # PUSH Rx
r = b & 0b11
instr = f"PUSH R{r}"
self.push(c.regs[r])
elif (b & 0b11111100) == 0b01100000: # POP Rx
r = b & 0b11
instr = f"POP R{r}"
c.regs[r] = self.pop()
if (self.registerCallback != None):
self.registerCallback(r, c.regs[r])
# --- MOV Rx valeur / SUB Rx valeur / CMP Rx valeur ---
elif (b & 0b11111100) == 0b11100000: # MOV Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"MOV R{r}, {imm}"
c.regs[r] = imm
if (self.registerCallback != None):
self.registerCallback(r, c.regs[r])
elif (b & 0b11111100) == 0b00010000: # SUB Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"SUB R{r}, {imm}"
c.regs[r] = (c.regs[r] - imm) & 0xFF
if (self.registerCallback != None):
self.registerCallback(r, c.regs[r])
elif (b & 0b11111100) == 0b10010000: # CMP Rx valeur
r = b & 0b11
imm = self.fetch_byte()
size = 2
instr = f"CMP R{r}, {imm}"
v = c.regs[r]
c.lt = 1 if v < imm else 0
c.eq = 1 if v == imm else 0
# --- MOV / SUB / CMP registre-registre ---
elif (b & 0b11110000) == 0b01010000: # MOV Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"MOV R{dst}, R{src}"
c.regs[dst] = c.regs[src]
if (self.registerCallback != None):
self.registerCallback(dst, c.regs[dst])
elif (b & 0b11110000) == 0b11010000: # SUB Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"SUB R{dst}, R{src}"
c.regs[dst] = (c.regs[dst] - c.regs[src]) & 0xFF
if (self.registerCallback != None):
self.registerCallback(dst, c.regs[dst])
elif (b & 0b11110000) == 0b00110000: # CMP Rx Ry
dst = (b >> 2) & 0b11
src = b & 0b11
instr = f"CMP R{dst}, R{src}"
v1 = c.regs[dst]
v2 = c.regs[src]
c.lt = 1 if v1 < v2 else 0
c.eq = 1 if v1 == v2 else 0
# --- LDR / STR (2 octets, 3 cycles) ---
elif (b & 0b11110000) == 0b10110000: # LDR Rx Ry _label
dst = (b >> 2) & 0b11
src = b & 0b11
addr = self.fetch_byte()
size = 2
instr = f"LDR R{dst}, R{src}, {addr}"
eff = (addr + c.regs[src]) & 0xFF
c.regs[dst] = self.ram[eff]
extra_cycles = 1 # 2 octets -> 2 cycles +1 = 3
if (self.registerCallback != None):
self.registerCallback(dst, c.regs[dst])
elif (b & 0b11110000) == 0b01110000: # STR Rx Ry _label
dst = (b >> 2) & 0b11
src = b & 0b11
addr = self.fetch_byte()
size = 2
instr = f"STR R{dst}, R{src}, {addr}"
eff = (addr + c.regs[src]) & 0xFF
self.ram[eff] = c.regs[dst] & 0xFF
extra_cycles = 1
# --- OUT Rx ---
elif (b & 0b11111100) == 0b11110000: # OUT Rx
r = b & 0b11
instr = f"OUT R{r}"
registre = c.regs[r]
print(f"[OUT] R{r} = {registre}")
if (self.motorCallback != None):
motG = (registre >> 4) & 0b1111
motD = (registre) & 0b1111
self.motorCallback(motG, motD)
# --- TIM valeur ---
elif b == 0xF8: # TIM
second = self.fetch_byte()
size = 2
m = (second >> 7) & 0x1
v = second & 0x7F
instr = f"TIM m={m}, v={v}"
mult = 1 if m == 0 else 100
pause_ms = mult * (v + 1)
c.cycles += pause_ms # modélisation de la pause
# print(f"Sleep {pause_ms}ms...")
time.sleep(pause_ms/1000)
# print("BIPBIP")
# if pc_before >= self.program_size:
# if 32 <= b <= 126:
# instr = f"DB 0x{b:02X} ('{chr(b)}')"
# else:
# instr = f"DB 0x{b:02X}"
else:
instr = f"UNKNOWN 0x{b:02X}"
c.running = False
# calcul des cycles
if (b & 0b11110000) in (0xB0, 0x70): # LDR / STR
c.cycles += 3
cycles_added = 3
else:
c.cycles += size
cycles_added = size
self.report(pc_before, instr, cycles_added)
# ----------------- rapport d'exécution -----------------
def report(self, pc_before: int, instr: str, cycles_added: int):
c = self.cpu
regs_str = " ".join(f"R{i}={c.regs[i]:02X}" for i in range(4))
# print(f"PC={pc_before:02X} {instr:20s} +Cycles={cycles_added:3d} Total={c.cycles}")
# print(f" {regs_str} LT={c.lt} EQ={c.eq} SP={c.sp}")
# print("-" * 60)
# ----------------- boucle principale -----------------
def run(self, max_steps: int = 100000):
steps = 0
while self.cpu.running and steps < max_steps:
self.step()
steps += 1
def StartCPU(program, callback, registerCallback):
sim = Simulator(program)
sim.setMotorCallback(callback)
sim.setRegisterCallback(registerCallback)
while sim.cpu.running:
sim.run(max_steps = 1)
#time.sleep(0.1)
import time
# ---------------------------------------------------------
# LECTURE D'UN FICHIER .bin ET LANCEMENT
# ---------------------------------------------------------
if __name__ == "__main__":
# Nom du fichier binaire à exécuter
path =""
args= sys.argv
if (len(args) > 1):
filename = args[1]
print("filename: " + filename)
with open(filename, "rb") as f:
program = f.read()
StartCPU(program, None, None)
else:
print("Needs *.bin as parameter")

32
LED.asm Normal file
View File

@ -0,0 +1,32 @@
_main:
SUB R0 R0
SUB R1 R1
SUB R2 R2
SUB R3 R3
_loop1:
SUB R0 245
TIM 1
CMP R0 220
JLT _loop1
_loop2:
SUB R1 245
TIM 1
CMP R1 220
JLT _loop2
_loop3:
SUB R2 245
TIM 1
CMP R2 220
JLT _loop3
_loop4:
SUB R3 245
TIM 1
CMP R3 220
JLT _loop4
RET

674
LICENSE.txt Normal file
View File

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

75
Path.asm Normal file
View File

@ -0,0 +1,75 @@
_main:
; Start
MOV R0 17
OUT R0
TIM 130 ; Slow Start OK
MOV R0 34
OUT R0
TIM 130 ; Slow Start OK
MOV R0 51
OUT R0
TIM 164 ; Ligne droite OK
MOV R0 49 ; 0b 0011 0001
OUT R0
TIM 131 ; Rotation droite OK
MOV R0 51
OUT R0
TIM 147 ; Ligne droite
MOV R0 19 ; 0b 0001 0011
OUT R0
TIM 131 ; Rotation gauche
;TIM 50
MOV R0 51
OUT R0
TIM 138 ; Ligne droite
MOV R0 0
OUT R0 ; STOP
TIM 160
MOV R0 162
OUT R0
TIM 135 ; Rotation gauche
MOV R0 0
OUT R0 ; STOP
TIM 140
MOV R0 42
OUT R0
TIM 135 ; Rotation droite
MOV R0 0
OUT R0 ; STOP
TIM 160
MOV R0 247
OUT R0
TIM 130 ; Rotation gauche
MOV R0 127
OUT R0
TIM 130 ; Rotation droite
MOV R0 247
OUT R0
TIM 180 ; Rotation gauche
MOV R0 0
OUT R0 ; STOP
RET
_sleep:
MOV R0 0
OUT R0
TIM 100 ; P'tite pause
RET

BIN
Presentation.pdf Normal file

Binary file not shown.

BIN
SII++.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 KiB

442
assembleur.py Normal file
View File

@ -0,0 +1,442 @@
import sys
instructions = {
"DB" : {
"ins": "DB",
"args": [{
"isRegister": False,
"isValue": True,
"isLabel": False,
}]
},
"CALL": {
"ins": "CALL",
"args": [{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"RET": {
"ins": "RET",
"args": []
},
"JMP": {
"ins": "JMP",
"args": [{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"JLT": {
"ins": "JLT",
"args": [{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"JEQ": {
"ins": "JEQ",
"args": [{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"PUSH": {
"ins": "PUSH",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
}]
},
"POP": {
"ins": "POP",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
}]
},
"MOV": {
"ins": "MOV",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": True,
"isValue": True,
"isLabel": False
}]
},
"SUB": {
"ins": "SUB",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": True,
"isValue": True,
"isLabel": False
}]
},
"CMP": {
"ins": "CMP",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": True,
"isValue": True,
"isLabel": False
}]
},
"LDR": {
"ins": "LDR",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"STR": {
"ins": "STR",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": True,
"isValue": False,
"isLabel": False
},
{
"isRegister": False,
"isValue": False,
"isLabel": True
}]
},
"OUT": {
"ins": "OUT",
"args": [{
"isRegister": True,
"isValue": False,
"isLabel": False
}]
},
"TIM": {
"ins": "TIM",
"args": [{
"isRegister": False,
"isValue": True,
"isLabel": False
}]
}
}
labels = {}
lastLabel = ""
def valueToInt(arg):
try:
return int(arg)
except:
return ord(arg[1])
def registerToDec(reg):
return int(reg[1])
def testArgIsRegister(arg):
if (len(arg) != 2):
return False
if (arg[0] != "R"):
return False
try:
val = int(arg[1])
if (0 <= val <= 3):
return True
except:
pass
return False
def testArgIsValue(arg):
# Test 0 - 255
try:
val = int(arg)
if (0 <= val <= 255):
return True
except:
pass
# Test 'a' 'A' '0'
if (len(arg) == 3):
if (arg[0] == arg[2] == "'"):
if ((ord('a') <= ord(arg[1]) <= ord('z')) or (ord('A') <= ord(arg[1]) <= ord('Z')) or (ord('0') <= ord(arg[1]) <= ord('9'))):
return True
return False
def testArgIsLabel(arg, twoDotsIncluded = False):
if (len(arg) == 0):
return False
if (arg[0] != "_"):
return False
if (twoDotsIncluded):
if (arg[-1] != ":"):
return False
if (set(arg[1:-1]) <= set("abcdefghijklmnopqrstuvwxyz0123456789")):
return True
else:
if (set(arg[1:]) <= set("abcdefghijklmnopqrstuvwxyz0123456789")):
return True
return False
def convertInsDB(args):
value = valueToInt(args[0])
return {"opcode": [value], "DB": True}
def convertInsCALL(args):
return {"opcode": [0b00000000, "label"], "label": args[0], "offset": 0}
def convertInsRET(args):
return {"opcode": [0b10000000]}
def convertInsJMP(args):
return {"opcode": [0b01000000, "label"], "label": args[0], "offset": 0}
def convertInsJLT(args):
return {"opcode": [0b11000000, "label"], "label": args[0], "offset": 0}
def convertInsJEQ(args):
return {"opcode": [0b00100000, "label"], "label": args[0], "offset": 0}
def convertInsPUSH(args):
idReg0 = registerToDec(args[0])
return {"opcode": [0b10100000 | idReg0]}
def convertInsPOP(args):
idReg0 = registerToDec(args[0])
return {"opcode": [0b01100000 | idReg0]}
def convertInsMOV(args):
idReg0 = registerToDec(args[0])
print("idReg0", idReg0)
if (testArgIsRegister(args[1])):
idReg1 = registerToDec(args[1])
print("idReg0", idReg1)
return {"opcode": [0b01010000 | (idReg0 << 2) | (idReg1)]}
value = valueToInt(args[1])
return {"opcode": [0b11100000 | (idReg0), value]}
def convertInsSUB(args):
idReg0 = registerToDec(args[0])
print("idReg0", idReg0)
if (testArgIsRegister(args[1])):
idReg1 = registerToDec(args[1])
print("idReg0", idReg1)
return {"opcode": [0b11010000 | (idReg0 << 2) | (idReg1)]}
value = valueToInt(args[1])
return {"opcode": [0b00010000 | (idReg0), value]}
def convertInsCMP(args):
idReg0 = registerToDec(args[0])
print("idReg0", idReg0)
if (testArgIsRegister(args[1])):
idReg1 = registerToDec(args[1])
print("idReg0", idReg1)
return {"opcode": [0b00110000 | (idReg0 << 2) | (idReg1)]}
value = valueToInt(args[1])
return {"opcode": [0b10010000 | (idReg0), value]}
def convertInsLDR(args):
idReg0 = registerToDec(args[0])
idReg1 = registerToDec(args[1])
return {"opcode": [0b10110000 | (idReg0 << 2) | (idReg1), "label"], "label": args[2], "offset": 0, "DB_Update": True}
def convertInsSTR(args):
idReg0 = registerToDec(args[0])
idReg1 = registerToDec(args[1])
return {"opcode": [0b01110000 | (idReg0 << 2) | (idReg1), "label"], "label": args[2], "offset": 0, "DB_Update": True}
def convertInsOUT(args):
idReg0 = registerToDec(args[0])
return {"opcode": [0b11110000 | idReg0]}
def convertInsTIM(args):
value = valueToInt(args[0])
return {"opcode": [0b11111000, value]}
def testArg(arg, insArg):
valid = False
# Test for isRegister
if (insArg["isRegister"] and testArgIsRegister(arg)):
valid = True
# Test for isValue
if (insArg["isValue"] and testArgIsValue(arg)):
valid = True
# Test for isLabel
if (insArg["isLabel"] and testArgIsLabel(arg)):
valid = True
if (not valid):
print(f"ERROR : Arg {arg} not valid !")
exit(1)
pass
def decodeInstruction(args, ins):
for i in range(0, len(args)):
testArg(args[i], ins["args"][i])
if (ins["ins"] == "DB"):
return convertInsDB(args)
elif (ins["ins"] == "CALL") :
return convertInsCALL(args)
elif (ins["ins"] == "RET") :
return convertInsRET(args)
elif (ins["ins"] == "JMP") :
return convertInsJMP(args)
elif (ins["ins"] == "JLT") :
return convertInsJLT(args)
elif (ins["ins"] == "JEQ") :
return convertInsJEQ(args)
elif (ins["ins"] == "PUSH") :
return convertInsPUSH(args)
elif (ins["ins"] == "POP") :
return convertInsPOP(args)
elif (ins["ins"] == "MOV") :
return convertInsMOV(args)
elif (ins["ins"] == "SUB") :
return convertInsSUB(args)
elif (ins["ins"] == "CMP") :
return convertInsCMP(args)
elif (ins["ins"] == "LDR") :
return convertInsLDR(args)
elif (ins["ins"] == "STR") :
return convertInsSTR(args)
elif (ins["ins"] == "OUT") :
return convertInsOUT(args)
elif (ins["ins"] == "TIM") :
return convertInsTIM(args)
pass
def decodeLine(line, PC):
global lastLabel, labels
commentPos = line.find(";")
if (commentPos != -1):
line = line[:line.find(";")]
line = line.strip()
#print(">" + line + "<")
args = line.split(" ")
args = [i for i in args if i]
if (len(args) == 0):
return
INS = args[0]
args = args[1:]
#print(args)
if (testArgIsLabel(INS, twoDotsIncluded=True)):
lastLabel = INS[:-1]
labels[lastLabel] = PC
return
instruction = None
try:
instruction = instructions[INS]
except:
print("ERROR : Bad instruction :", INS)
exit(1)
#print(instruction)
if (len(args) != len(instruction["args"])):
print(f"ERROR : Bad argument count. Excpected {len(instruction['args'])}, got {len(args)}")
exit(1)
return decodeInstruction(args, instruction)
def assemble(path):
global lastLabel, labels
PC = 0
assemble1st = []
bytecode = []
with open(path, "r") as file:
# 1er lecture, pre-compilation
for line in file:
print(line, end="")
ret = decodeLine(line, PC)
if (ret != None):
PC += len(ret["opcode"])
assemble1st.append(ret)
print(" ==> ", ret)
print("\n\n\n\n\n\n")
print(assemble1st)
print("Labels : ", labels)
# Expansion des labels
for item in assemble1st:
if ("label" in item):
labelIndex = labels[item["label"]]
for index in range(len(item["opcode"])):
if (item["opcode"][index] == "label"):
item["opcode"][index] = labelIndex
bytecode.extend(item["opcode"])
print("\n\n\n\n\n\n")
print(assemble1st)
print(bytecode)
return bytecode
if (__name__ == "__main__"):
path = ""
args = sys.argv
if (len(args) > 1):
path = args[1]
else:
print("NEED PATH !!!")
exit(0)
print(path)
code = assemble(path)
with open("out.bin", "wb") as file:
file.write(bytes(code))
exit(0)

95
ble/ComWithDongle.py Normal file
View File

@ -0,0 +1,95 @@
# python -m serial.tools.list_ports
import sys
import time
#import binascii
import base64
import serial
import threading
import json
ctrlC = bytes.fromhex("03")
ctrlD = bytes.fromhex("04")
class ComWithDongle:
"""Class to manage communication with dongle, over virtual COM port"""
def __init__(self, comPort:str, peripheralName:str, onMsgReceived, debug=False):
""":param comPort: name of COM port used by dongle
:param peripheralName: name of BLE peripheral
:param onMsgReceived: function to call when a message from peripheral is received
:param debug: when True, print debug messages received from dongle"""
try:
self.ser = serial.Serial(port=comPort, baudrate=115200, timeout=2)
except serial.SerialException:
exit(f"no device on port {comPort}")
self.bleConnected = threading.Semaphore(0)
self.messageSent = threading.Semaphore(0)
self.onMsgReceived = onMsgReceived
self.debug = debug
self.resetDongle()
threading.Thread(name='readComPort', target=self.readFromComPort, daemon=True).start()
# first message over COM port to dongle is to define BLE peripheral to connect on
self.sendDict({'type':'connect','name':peripheralName})
timeoutNotReached = self.bleConnected.acquire(timeout=5)
if not timeoutNotReached:
exit(f'unable to connect to peripheral "{peripheralName}"')
def resetDongle(self):
self.ser.write(ctrlC)
self.ser.write(ctrlD)
self.ser.flush()
time.sleep(2)
def sendDict(self, msg:dict):
self.ser.write(json.dumps(msg).encode("utf-8") + b'\r')
def sendMsg(self, msg:str|bytes):
if isinstance(msg, str):
self.sendDict({'type':'msg', 'format':'str', 'string':msg})
else:
#self.sendDict({'type':'msg', 'base64':binascii.b2a_base64(msg).decode("utf-8")})
#b = binascii.b2a_base64(msg).decode("utf-8").rstrip()
#b = base64.b64encode(msg).decode("utf-8").rstrip()
b = base64.b64encode(msg).decode("utf-8").rstrip()
if self.debug: print('sendMsg', msg, '=>', b, flush=True)
self.sendDict({'type':'msg', 'format':'base64', 'string':b})
self.messageSent.acquire(timeout=2)
def disconnect(self):
self.sendDict({'type': 'disconnect'})
def readFromComPort(self):
while True:
line = self.ser.readline().rstrip()
# valid message can't be empty
if type(line) is not bytes or line == b'':
# empty message received after a timeout on serial connection, to ignore
continue
line = line.decode("utf-8")
try:
receivedMsgDict = json.loads(line)
except json.decoder.JSONDecodeError:
# this is not a dictionnary, just a debug message
if self.debug: print('from COM:', line, flush=True)
continue
msgType = receivedMsgDict['type']
if msgType == 'connected':
self.bleConnected.release()
elif msgType == 'sentMessage':
self.messageSent.release()
elif msgType == 'msgFromBle':
if receivedMsgDict['format'] == 'str':
self.onMsgReceived(receivedMsgDict['string'])
else:
if self.debug: print('base64 msg from BLE:', len(receivedMsgDict['string']), receivedMsgDict['string'])
#self.onMsgReceived(binascii.a2b_base64(receivedMsgDict['string']))
self.onMsgReceived(base64.b64decode(receivedMsgDict['string']))
elif msgType == 'debug':
if self.debug:
del(receivedMsgDict['type'])
print('debug COM:', receivedMsgDict)
elif msgType in ['connect', 'msg']:
pass
else:
print('unknown msg type', receivedMsgDict)

48
ble/README.md Normal file
View File

@ -0,0 +1,48 @@
# BLE example
Here is an example of driver to send messages using BLE
# Setup on robot (or other BLE advertiser)
Copy following files to robot
- aioble/\*
- RobotBleServer.py
- mainRobotTestBLE.py (to rename as main.py)
You can use script toRobot.sh for that, for example when run from a Windows git bash,
if robot is connected on drive D:, you can run
> ./toRobot.sh /d
# Setup on USB dongle
Copy following files to robot
- aioble/\*
- mainDongle.py (to rename as main.py)
You can use script toDongle.sh for that, for example when run from a Windows git bash,
if dongle is connected on drive E:, you can run
> ./toDongle.sh /e
# Setup on computer
You need pyserial module for python. You can install it using command
> python -m pip install pyserial
or if a proxy is required
> python -m pip install --proxy \<http proxy parameter\> pyserial
Then run following command
> python mainPcTestBLE.py --portcom \<com port used by dongle\>
To know COM port to use as argument, run following command before and after dongle connection:
> python -m serial.tools.list_ports
Port in second result but not in first result is port used by dongle.
# Connect on the good robot
When several robots are started at same time, they shall have a unique identifier so you can connect over BLE on the good robot.
For that, you shall replace "myTeamName" by a unique identifer (for example the name of your team) in following files:
- mainRobotTestBLE.py
- mainPcTestBLE.py
# Note relative to BLE
The Bluetooth is a connection with a limited transfer rate. If you try to send a lot of messages in a short period of time, or transfer long messages, the BLE driver will do it's best to transfer all data but expect delay to receive messages on the other side.

113
ble/RobotBleServer.py Normal file
View File

@ -0,0 +1,113 @@
# to know COM port used when connected on PC:
# python -m serial.tools.list_ports
import binascii
import sys
sys.path.append("")
from micropython import const
import aioble
import bluetooth
import struct
_SERVICE_UUID = bluetooth.UUID(0x1234)
_CHAR_UUID = bluetooth.UUID(0x1235)
# How frequently to send advertising beacons.
_ADV_INTERVAL_MS = 250_000
MAX_MSG_DATA_LENGTH = const(18)
_COMMAND_DONE = const(0)
_COMMAND_SENDDATA = const(1)
_COMMAND_SENDCHUNK = const(2) # send chunk of string, use _COMMAND_SENDDATA for last chunk
_COMMAND_SENDBYTESDATA = const(3)
_COMMAND_SENDBYTESCHUNK = const(4) # send chunk of string base64 formatted, use _COMMAND_SENDBYTESDATA for last chunk
class RobotBleServer:
"""Class to manage connection with BLE"""
def __init__(self, robotName:str, onMsgReceived):
""":param robotName: name to use in advertising
:param onMsgReceived: function to call when a message is received"""
self.robotName = robotName
self.onMsgReceived = onMsgReceived
# Register GATT server.
service = aioble.Service(_SERVICE_UUID)
self.characteristic = aioble.Characteristic(service, _CHAR_UUID, write=True, notify=True)
aioble.register_services(service)
self.connection = None
def sendMessage(self, msg:str|bytes):
"""Send a message over BLE
Message can be a string or a bytes sequence (maximum 18 charaters/bytes per message)
:param msg: message to send"""
if type(msg) == str:
encodedMsg = msg.encode()
sendMsgType, sendChunkMsgType = _COMMAND_SENDDATA, _COMMAND_SENDCHUNK
elif type(msg) == bytes:
#msg = binascii.b2a_base64(msg).encode()
encodedMsg = binascii.b2a_base64(msg).rstrip()
sendMsgType, sendChunkMsgType = _COMMAND_SENDBYTESDATA, _COMMAND_SENDBYTESCHUNK
else:
raise Exception('unsupported message type', type(msg))
print('encode', type(msg), msg, '=>', encodedMsg)
while len(encodedMsg) > MAX_MSG_DATA_LENGTH:
chunk = encodedMsg[:MAX_MSG_DATA_LENGTH]
self.characteristic.notify(self.connection, struct.pack("<B", sendChunkMsgType) + chunk)
encodedMsg = encodedMsg[MAX_MSG_DATA_LENGTH:]
print('sent chunk', chunk)
self.characteristic.notify(self.connection, struct.pack("<B", sendMsgType) + encodedMsg)
print('sent last', encodedMsg)
async def bleTask(self):
"""Loop to wait for incoming messages over BLE.
When a received message is complete, call function defined in self.onMsgReceived
When BLE connection is closed, stop this function"""
try:
with self.connection.timeout(None):
dataChunk = ''
msgId = 0
while True:
await self.characteristic.written()
msg = self.characteristic.read()
#self.characteristic.write(b"")
if len(msg) < 3:
continue
# Message is <command><seq><data>.
command = msg[0]
op_seq = int(msg[1])
msgData = msg[2:].decode()
#print('MSG=', msg)
if command in (_COMMAND_SENDCHUNK, _COMMAND_SENDBYTESCHUNK):
dataChunk += msgData
print('received chunk', msgData, '=>', dataChunk)
elif command in (_COMMAND_SENDDATA, _COMMAND_SENDBYTESDATA):
data = dataChunk + msgData
dataChunk = ''
if command == _COMMAND_SENDBYTESDATA:
data = binascii.a2b_base64(data)
#print('received data:', data)
print('received:', len(data), msgId, type(data), data)
self.onMsgReceived(data)
msgId += 1
except aioble.DeviceDisconnectedError:
print('disconnected BLE')
return
async def communicationTask(self):
"""Loop to advertise and wait for connection.
When connection is established, start task to read incoming messages"""
while True:
print("Waiting for connection")
self.connection = await aioble.advertise(
_ADV_INTERVAL_MS,
name=self.robotName,
services=[_SERVICE_UUID],
)
print("Connection from", self.connection.device)
await self.bleTask()
await self.connection.disconnected()
self.connection = None

32
ble/aioble/__init__.py Normal file
View File

@ -0,0 +1,32 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from .device import Device, DeviceDisconnectedError
from .core import log_info, log_warn, log_error, GattError, config, stop
try:
from .peripheral import advertise
except:
log_info("Peripheral support disabled")
try:
from .central import scan
except:
log_info("Central support disabled")
try:
from .server import (
Service,
Characteristic,
BufferedCharacteristic,
Descriptor,
register_services,
)
except:
log_info("GATT server support disabled")
ADDR_PUBLIC = const(0)
ADDR_RANDOM = const(1)

297
ble/aioble/central.py Normal file
View File

@ -0,0 +1,297 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import bluetooth
import struct
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
)
from .device import Device, DeviceConnection, DeviceTimeout
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_ADV_IND = const(0)
_ADV_DIRECT_IND = const(1)
_ADV_SCAN_IND = const(2)
_ADV_NONCONN_IND = const(3)
_SCAN_RSP = const(4)
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_SHORT_NAME = const(0x08)
_ADV_TYPE_UUID16_INCOMPLETE = const(0x2)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_INCOMPLETE = const(0x4)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_INCOMPLETE = const(0x6)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_APPEARANCE = const(0x19)
_ADV_TYPE_MANUFACTURER = const(0xFF)
# Keep track of the active scanner so IRQs can be delivered to it.
_active_scanner = None
# Set of devices that are waiting for the peripheral connect IRQ.
_connecting = set()
def _central_irq(event, data):
# Send results and done events to the active scanner instance.
if event == _IRQ_SCAN_RESULT:
addr_type, addr, adv_type, rssi, adv_data = data
if not _active_scanner:
return
_active_scanner._queue.append((addr_type, bytes(addr), adv_type, rssi, bytes(adv_data)))
_active_scanner._event.set()
elif event == _IRQ_SCAN_DONE:
if not _active_scanner:
return
_active_scanner._done = True
_active_scanner._event.set()
# Peripheral connect must be in response to a pending connection, so find
# it in the pending connection set.
elif event == _IRQ_PERIPHERAL_CONNECT:
conn_handle, addr_type, addr = data
for d in _connecting:
if d.addr_type == addr_type and d.addr == addr:
# Allow connect() to complete.
connection = d._connection
connection._conn_handle = conn_handle
connection._event.set()
break
# Find the active device connection for this connection handle.
elif event == _IRQ_PERIPHERAL_DISCONNECT:
conn_handle, _, _ = data
if connection := DeviceConnection._connected.get(conn_handle, None):
# Tell the device_task that it should terminate.
connection._event.set()
def _central_shutdown():
global _active_scanner, _connecting
_active_scanner = None
_connecting = set()
register_irq_handler(_central_irq, _central_shutdown)
# Cancel an in-progress scan.
async def _cancel_pending():
if _active_scanner:
await _active_scanner.cancel()
# Start connecting to a peripheral.
# Call device.connect() rather than using method directly.
async def _connect(connection, timeout_ms):
device = connection.device
if device in _connecting:
return
# Enable BLE and cancel in-progress scans.
ensure_active()
await _cancel_pending()
# Allow the connected IRQ to find the device by address.
_connecting.add(device)
# Event will be set in the connected IRQ, and then later
# re-used to notify disconnection.
connection._event = connection._event or asyncio.ThreadSafeFlag()
try:
with DeviceTimeout(None, timeout_ms):
ble.gap_connect(device.addr_type, device.addr)
# Wait for the connected IRQ.
await connection._event.wait()
assert connection._conn_handle is not None
# Register connection handle -> device.
DeviceConnection._connected[connection._conn_handle] = connection
finally:
# After timeout, don't hold a reference and ignore future events.
_connecting.remove(device)
# Represents a single device that has been found during a scan. The scan
# iterator will return the same ScanResult instance multiple times as its data
# changes (i.e. changing RSSI or advertising data).
class ScanResult:
def __init__(self, device):
self.device = device
self.adv_data = None
self.resp_data = None
self.rssi = None
self.connectable = False
# New scan result available, return true if it changes our state.
def _update(self, adv_type, rssi, adv_data):
updated = False
if rssi != self.rssi:
self.rssi = rssi
updated = True
if adv_type in (_ADV_IND, _ADV_NONCONN_IND):
if adv_data != self.adv_data:
self.adv_data = adv_data
self.connectable = adv_type == _ADV_IND
updated = True
elif adv_type == _ADV_SCAN_IND:
if adv_data != self.adv_data and self.resp_data:
updated = True
self.adv_data = adv_data
elif adv_type == _SCAN_RSP and adv_data:
if adv_data != self.resp_data:
self.resp_data = adv_data
updated = True
return updated
def __str__(self):
return "Scan result: {} {}".format(self.device, self.rssi)
# Gets all the fields for the specified types.
def _decode_field(self, *adv_type):
# Advertising payloads are repeated packets of the following form:
# 1 byte data length (N + 1)
# 1 byte type (see constants below)
# N bytes type-specific data
for payload in (self.adv_data, self.resp_data):
if not payload:
continue
i = 0
while i + 1 < len(payload):
if payload[i + 1] in adv_type:
yield payload[i + 2 : i + payload[i] + 1]
i += 1 + payload[i]
# Returns the value of the complete (or shortened) advertised name, if available.
def name(self):
for n in self._decode_field(_ADV_TYPE_NAME, _ADV_TYPE_SHORT_NAME):
return str(n, "utf-8") if n else ""
# Generator that enumerates the service UUIDs that are advertised.
def services(self):
for u in self._decode_field(_ADV_TYPE_UUID16_INCOMPLETE, _ADV_TYPE_UUID16_COMPLETE):
yield bluetooth.UUID(struct.unpack("<H", u)[0])
for u in self._decode_field(_ADV_TYPE_UUID32_INCOMPLETE, _ADV_TYPE_UUID32_COMPLETE):
yield bluetooth.UUID(struct.unpack("<I", u)[0])
for u in self._decode_field(_ADV_TYPE_UUID128_INCOMPLETE, _ADV_TYPE_UUID128_COMPLETE):
yield bluetooth.UUID(u)
# Generator that returns (manufacturer_id, data) tuples.
def manufacturer(self, filter=None):
for u in self._decode_field(_ADV_TYPE_MANUFACTURER):
if len(u) < 2:
continue
m = struct.unpack("<H", u[0:2])[0]
if filter is None or m == filter:
yield (m, u[2:])
# Use with:
# async with aioble.scan(...) as scanner:
# async for result in scanner:
# ...
class scan:
def __init__(self, duration_ms, interval_us=None, window_us=None, active=False):
self._queue = []
self._event = asyncio.ThreadSafeFlag()
self._done = False
# Keep track of what we've already seen.
self._results = set()
# Ideally we'd start the scan here and avoid having to save these
# values, but we need to stop any previous scan first via awaiting
# _cancel_pending(), but __init__ isn't async.
self._duration_ms = duration_ms
self._interval_us = interval_us or 1280000
self._window_us = window_us or 11250
self._active = active
async def __aenter__(self):
global _active_scanner
ensure_active()
await _cancel_pending()
_active_scanner = self
ble.gap_scan(self._duration_ms, self._interval_us, self._window_us, self._active)
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
# Cancel the current scan if we're still the active scanner. This will
# happen if the loop breaks early before the scan duration completes.
if _active_scanner == self:
await self.cancel()
def __aiter__(self):
assert _active_scanner == self
return self
async def __anext__(self):
global _active_scanner
if _active_scanner != self:
# The scan has been canceled (e.g. a connection was initiated).
raise StopAsyncIteration
while True:
while self._queue:
addr_type, addr, adv_type, rssi, adv_data = self._queue.pop()
# Try to find an existing ScanResult for this device.
for r in self._results:
if r.device.addr_type == addr_type and r.device.addr == addr:
result = r
break
else:
# New device, create a new Device & ScanResult.
device = Device(addr_type, addr)
result = ScanResult(device)
self._results.add(result)
# Add the new information from this event.
if result._update(adv_type, rssi, adv_data):
# It's new information, so re-yield this result.
return result
if self._done:
# _IRQ_SCAN_DONE event was fired.
_active_scanner = None
raise StopAsyncIteration
# Wait for either done or result IRQ.
await self._event.wait()
# Cancel any in-progress scan. We need to do this before starting any other operation.
async def cancel(self):
if self._done:
return
ble.gap_scan(None)
while not self._done:
await self._event.wait()
global _active_scanner
_active_scanner = None

456
ble/aioble/client.py Normal file
View File

@ -0,0 +1,456 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from collections import deque
import uasyncio as asyncio
import struct
import bluetooth
from .core import ble, GattError, register_irq_handler
from .device import DeviceConnection
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)
_IRQ_GATTC_DESCRIPTOR_DONE = const(14)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
_IRQ_GATTC_WRITE_DONE = const(17)
_IRQ_GATTC_NOTIFY = const(18)
_IRQ_GATTC_INDICATE = const(19)
_CCCD_UUID = const(0x2902)
_CCCD_NOTIFY = const(1)
_CCCD_INDICATE = const(2)
_FLAG_READ = const(0x0002)
_FLAG_WRITE_NO_RESPONSE = const(0x0004)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
_FLAG_INDICATE = const(0x0020)
# Forward IRQs directly to static methods on the type that handles them and
# knows how to map handles to instances. Note: We copy all uuid and data
# params here for safety, but a future optimisation might be able to avoid
# these copies in a few places.
def _client_irq(event, data):
if event == _IRQ_GATTC_SERVICE_RESULT:
conn_handle, start_handle, end_handle, uuid = data
ClientDiscover._discover_result(
conn_handle, start_handle, end_handle, bluetooth.UUID(uuid)
)
elif event == _IRQ_GATTC_SERVICE_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
conn_handle, end_handle, value_handle, properties, uuid = data
ClientDiscover._discover_result(
conn_handle, end_handle, value_handle, properties, bluetooth.UUID(uuid)
)
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_DESCRIPTOR_RESULT:
conn_handle, dsc_handle, uuid = data
ClientDiscover._discover_result(conn_handle, dsc_handle, bluetooth.UUID(uuid))
elif event == _IRQ_GATTC_DESCRIPTOR_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_READ_RESULT:
conn_handle, value_handle, char_data = data
ClientCharacteristic._read_result(conn_handle, value_handle, bytes(char_data))
elif event == _IRQ_GATTC_READ_DONE:
conn_handle, value_handle, status = data
ClientCharacteristic._read_done(conn_handle, value_handle, status)
elif event == _IRQ_GATTC_WRITE_DONE:
conn_handle, value_handle, status = data
ClientCharacteristic._write_done(conn_handle, value_handle, status)
elif event == _IRQ_GATTC_NOTIFY:
conn_handle, value_handle, notify_data = data
ClientCharacteristic._on_notify(conn_handle, value_handle, bytes(notify_data))
elif event == _IRQ_GATTC_INDICATE:
conn_handle, value_handle, indicate_data = data
ClientCharacteristic._on_indicate(conn_handle, value_handle, bytes(indicate_data))
register_irq_handler(_client_irq, None)
# Async generator for discovering services, characteristics, descriptors.
class ClientDiscover:
def __init__(self, connection, disc_type, parent, timeout_ms, *args):
self._connection = connection
# Each result IRQ will append to this.
self._queue = []
# This will be set by the done IRQ.
self._status = None
# Tell the generator to process new events.
self._event = asyncio.ThreadSafeFlag()
# Must implement the _start_discovery static method. Instances of this
# type are returned by __anext__.
self._disc_type = disc_type
# This will be the connection for a service discovery, and the service for a characteristic discovery.
self._parent = parent
# Timeout for the discovery process.
# TODO: Not implemented.
self._timeout_ms = timeout_ms
# Additional arguments to pass to the _start_discovery method on disc_type.
self._args = args
async def _start(self):
if self._connection._discover:
# TODO: cancel existing? (e.g. perhaps they didn't let the loop run to completion)
raise ValueError("Discovery in progress")
# Tell the connection that we're the active discovery operation (the IRQ only gives us conn_handle).
self._connection._discover = self
# Call the appropriate ubluetooth.BLE method.
self._disc_type._start_discovery(self._parent, *self._args)
def __aiter__(self):
return self
async def __anext__(self):
if self._connection._discover != self:
# Start the discovery if necessary.
await self._start()
# Keep returning items from the queue until the status is set by the
# done IRQ.
while True:
while self._queue:
return self._disc_type(self._parent, *self._queue.pop())
if self._status is not None:
self._connection._discover = None
raise StopAsyncIteration
# Wait for more results to be added to the queue.
await self._event.wait()
# Tell the active discovery instance for this connection to add a new result
# to the queue.
def _discover_result(conn_handle, *args):
if connection := DeviceConnection._connected.get(conn_handle, None):
if discover := connection._discover:
discover._queue.append(args)
discover._event.set()
# Tell the active discovery instance for this connection that it is complete.
def _discover_done(conn_handle, status):
if connection := DeviceConnection._connected.get(conn_handle, None):
if discover := connection._discover:
discover._status = status
discover._event.set()
# Represents a single service supported by a connection. Do not construct this
# class directly, instead use `async for service in connection.services([uuid])` or
# `await connection.service(uuid)`.
class ClientService:
def __init__(self, connection, start_handle, end_handle, uuid):
self.connection = connection
# Used for characteristic discovery.
self._start_handle = start_handle
self._end_handle = end_handle
# Allows comparison to a known uuid.
self.uuid = uuid
def __str__(self):
return "Service: {} {} {}".format(self._start_handle, self._end_handle, self.uuid)
# Search for a specific characteristic by uuid.
async def characteristic(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for characteristic in self.characteristics(uuid, timeout_ms):
if not result and characteristic.uuid == uuid:
# Keep first result.
result = characteristic
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for characteristic in service.characteristics():
# Note: must allow the loop to run to completion.
def characteristics(self, uuid=None, timeout_ms=2000):
return ClientDiscover(self.connection, ClientCharacteristic, self, timeout_ms, uuid)
# For ClientDiscover
def _start_discovery(connection, uuid=None):
ble.gattc_discover_services(connection._conn_handle, uuid)
class BaseClientCharacteristic:
def __init__(self, value_handle, properties, uuid):
# Used for read/write/notify ops.
self._value_handle = value_handle
# Which operations are supported.
self.properties = properties
# Allows comparison to a known uuid.
self.uuid = uuid
if properties & _FLAG_READ:
# Fired for each read result and read done IRQ.
self._read_event = None
self._read_data = None
# Used to indicate that the read is complete.
self._read_status = None
if (properties & _FLAG_WRITE) or (properties & _FLAG_WRITE_NO_RESPONSE):
# Fired for the write done IRQ.
self._write_event = None
# Used to indicate that the write is complete.
self._write_status = None
# Register this value handle so events can find us.
def _register_with_connection(self):
self._connection()._characteristics[self._value_handle] = self
# Map an incoming IRQ to an registered characteristic.
def _find(conn_handle, value_handle):
if connection := DeviceConnection._connected.get(conn_handle, None):
if characteristic := connection._characteristics.get(value_handle, None):
return characteristic
else:
# IRQ for a characteristic that we weren't expecting. e.g.
# notification when we're not waiting on notified().
# TODO: This will happen on btstack, which doesn't give us
# value handle for the done event.
return None
def _check(self, flag):
if not (self.properties & flag):
raise ValueError("Unsupported")
# Issue a read to the characteristic.
async def read(self, timeout_ms=1000):
self._check(_FLAG_READ)
# Make sure this conn_handle/value_handle is known.
self._register_with_connection()
# This will be set by the done IRQ.
self._read_status = None
# This will be set by the result and done IRQs. Re-use if possible.
self._read_event = self._read_event or asyncio.ThreadSafeFlag()
# Issue the read.
ble.gattc_read(self._connection()._conn_handle, self._value_handle)
with self._connection().timeout(timeout_ms):
# The event will be set for each read result, then a final time for done.
while self._read_status is None:
await self._read_event.wait()
if self._read_status != 0:
raise GattError(self._read_status)
return self._read_data
# Map an incoming result IRQ to a registered characteristic.
def _read_result(conn_handle, value_handle, data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._read_data = data
characteristic._read_event.set()
# Map an incoming read done IRQ to a registered characteristic.
def _read_done(conn_handle, value_handle, status):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._read_status = status
characteristic._read_event.set()
async def write(self, data, response=None, timeout_ms=1000):
self._check(_FLAG_WRITE | _FLAG_WRITE_NO_RESPONSE)
# If the response arg is unset, then default it to true if we only support write-with-response.
if response is None:
p = self.properties
response = (p & _FLAG_WRITE) and not (p & _FLAG_WRITE_NO_RESPONSE)
if response:
# Same as read.
self._register_with_connection()
self._write_status = None
self._write_event = self._write_event or asyncio.ThreadSafeFlag()
# Issue the write.
ble.gattc_write(self._connection()._conn_handle, self._value_handle, data, response)
if response:
with self._connection().timeout(timeout_ms):
# The event will be set for the write done IRQ.
await self._write_event.wait()
if self._write_status != 0:
raise GattError(self._write_status)
# Map an incoming write done IRQ to a registered characteristic.
def _write_done(conn_handle, value_handle, status):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._write_status = status
characteristic._write_event.set()
# Represents a single characteristic supported by a service. Do not construct
# this class directly, instead use `async for characteristic in
# service.characteristics([uuid])` or `await service.characteristic(uuid)`.
class ClientCharacteristic(BaseClientCharacteristic):
def __init__(self, service, end_handle, value_handle, properties, uuid):
self.service = service
self.connection = service.connection
# Used for descriptor discovery. If available, otherwise assume just
# past the value handle (enough for two descriptors without risking
# going into the next characteristic).
self._end_handle = end_handle if end_handle > value_handle else value_handle + 2
super().__init__(value_handle, properties, uuid)
if properties & _FLAG_NOTIFY:
# Fired when a notification arrives.
self._notify_event = asyncio.ThreadSafeFlag()
# Data for the most recent notification.
self._notify_queue = deque((), 1)
if properties & _FLAG_INDICATE:
# Same for indications.
self._indicate_event = asyncio.ThreadSafeFlag()
self._indicate_queue = deque((), 1)
def __str__(self):
return "Characteristic: {} {} {} {}".format(
self._end_handle, self._value_handle, self.properties, self.uuid
)
def _connection(self):
return self.service.connection
# Search for a specific descriptor by uuid.
async def descriptor(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for descriptor in self.descriptors(timeout_ms):
if not result and descriptor.uuid == uuid:
# Keep first result.
result = descriptor
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for descriptor in characteristic.descriptors():
# Note: must allow the loop to run to completion.
def descriptors(self, timeout_ms=2000):
return ClientDiscover(self.connection, ClientDescriptor, self, timeout_ms)
# For ClientDiscover
def _start_discovery(service, uuid=None):
ble.gattc_discover_characteristics(
service.connection._conn_handle,
service._start_handle,
service._end_handle,
uuid,
)
# Helper for notified() and indicated().
async def _notified_indicated(self, queue, event, timeout_ms):
# Ensure that events for this connection can route to this characteristic.
self._register_with_connection()
# If the queue is empty, then we need to wait. However, if the queue
# has a single item, we also need to do a no-op wait in order to
# clear the event flag (because the queue will become empty and
# therefore the event should be cleared).
if len(queue) <= 1:
with self._connection().timeout(timeout_ms):
await event.wait()
# Either we started > 1 item, or the wait completed successfully, return
# the front of the queue.
return queue.popleft()
# Wait for the next notification.
# Will return immediately if a notification has already been received.
async def notified(self, timeout_ms=None):
self._check(_FLAG_NOTIFY)
return await self._notified_indicated(self._notify_queue, self._notify_event, timeout_ms)
def _on_notify_indicate(self, queue, event, data):
# If we've gone from empty to one item, then wake something
# blocking on `await char.notified()` (or `await char.indicated()`).
wake = len(queue) == 0
# Append the data. By default this is a deque with max-length==1, so it
# replaces. But if capture is enabled then it will append.
queue.append(data)
if wake:
# Queue is now non-empty. If something is waiting, it will be
# worken. If something isn't waiting right now, then a future
# caller to `await char.written()` will see the queue is
# non-empty, and wait on the event if it's going to empty the
# queue.
event.set()
# Map an incoming notify IRQ to a registered characteristic.
def _on_notify(conn_handle, value_handle, notify_data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._on_notify_indicate(
characteristic._notify_queue, characteristic._notify_event, notify_data
)
# Wait for the next indication.
# Will return immediately if an indication has already been received.
async def indicated(self, timeout_ms=None):
self._check(_FLAG_INDICATE)
return await self._notified_indicated(
self._indicate_queue, self._indicate_event, timeout_ms
)
# Map an incoming indicate IRQ to a registered characteristic.
def _on_indicate(conn_handle, value_handle, indicate_data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._on_notify_indicate(
characteristic._indicate_queue, characteristic._indicate_event, indicate_data
)
# Write to the Client Characteristic Configuration to subscribe to
# notify/indications for this characteristic.
async def subscribe(self, notify=True, indicate=False):
# Ensure that the generated notifications are dispatched in case the app
# hasn't awaited on notified/indicated yet.
self._register_with_connection()
if cccd := await self.descriptor(bluetooth.UUID(_CCCD_UUID)):
await cccd.write(struct.pack("<H", _CCCD_NOTIFY * notify + _CCCD_INDICATE * indicate))
else:
raise ValueError("CCCD not found")
# Represents a single descriptor supported by a characteristic. Do not construct
# this class directly, instead use `async for descriptors in
# characteristic.descriptors([uuid])` or `await characteristic.descriptor(uuid)`.
class ClientDescriptor(BaseClientCharacteristic):
def __init__(self, characteristic, dsc_handle, uuid):
self.characteristic = characteristic
super().__init__(dsc_handle, _FLAG_READ | _FLAG_WRITE_NO_RESPONSE, uuid)
def __str__(self):
return "Descriptor: {} {} {}".format(self._value_handle, self.properties, self.uuid)
def _connection(self):
return self.characteristic.service.connection
# For ClientDiscover
def _start_discovery(characteristic, uuid=None):
ble.gattc_discover_descriptors(
characteristic._connection()._conn_handle,
characteristic._value_handle,
characteristic._end_handle,
)

78
ble/aioble/core.py Normal file
View File

@ -0,0 +1,78 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
import bluetooth
log_level = 1
def log_error(*args):
if log_level > 0:
print("[aioble] E:", *args)
def log_warn(*args):
if log_level > 1:
print("[aioble] W:", *args)
def log_info(*args):
if log_level > 2:
print("[aioble] I:", *args)
class GattError(Exception):
def __init__(self, status):
self._status = status
def ensure_active():
if not ble.active():
try:
from .security import load_secrets
load_secrets()
except:
pass
ble.active(True)
def config(*args, **kwargs):
ensure_active()
return ble.config(*args, **kwargs)
# Because different functionality is enabled by which files are available the
# different modules can register their IRQ handlers and shutdown handlers
# dynamically.
_irq_handlers = []
_shutdown_handlers = []
def register_irq_handler(irq, shutdown):
if irq:
_irq_handlers.append(irq)
if shutdown:
_shutdown_handlers.append(shutdown)
def stop():
ble.active(False)
for handler in _shutdown_handlers:
handler()
# Dispatch IRQs to the registered sub-modules.
def ble_irq(event, data):
log_info(event, data)
for handler in _irq_handlers:
result = handler(event, data)
if result is not None:
return result
# TODO: Allow this to be injected.
ble = bluetooth.BLE()
ble.irq(ble_irq)

295
ble/aioble/device.py Normal file
View File

@ -0,0 +1,295 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import uasyncio as asyncio
import binascii
from .core import ble, register_irq_handler, log_error
_IRQ_MTU_EXCHANGED = const(21)
# Raised by `with device.timeout()`.
class DeviceDisconnectedError(Exception):
pass
def _device_irq(event, data):
if event == _IRQ_MTU_EXCHANGED:
conn_handle, mtu = data
if device := DeviceConnection._connected.get(conn_handle, None):
device.mtu = mtu
if device._mtu_event:
device._mtu_event.set()
register_irq_handler(_device_irq, None)
# Context manager to allow an operation to be cancelled by timeout or device
# disconnection. Don't use this directly -- use `with connection.timeout(ms):`
# instead.
class DeviceTimeout:
def __init__(self, connection, timeout_ms):
self._connection = connection
self._timeout_ms = timeout_ms
# We allow either (or both) connection and timeout_ms to be None. This
# allows this to be used either as a just-disconnect, just-timeout, or
# no-op.
# This task is active while the operation is in progress. It sleeps
# until the timeout, and then cancels the working task. If the working
# task completes, __exit__ will cancel the sleep.
self._timeout_task = None
# This is the task waiting for the actual operation to complete.
# Usually this is waiting on an event that will be set() by an IRQ
# handler.
self._task = asyncio.current_task()
# Tell the connection that if it disconnects, it should cancel this
# operation (by cancelling self._task).
if connection:
connection._timeouts.append(self)
async def _timeout_sleep(self):
try:
await asyncio.sleep_ms(self._timeout_ms)
except asyncio.CancelledError:
# The operation completed successfully and this timeout task was
# cancelled by __exit__.
return
# The sleep completed, so we should trigger the timeout. Set
# self._timeout_task to None so that we can tell the difference
# between a disconnect and a timeout in __exit__.
self._timeout_task = None
self._task.cancel()
def __enter__(self):
if self._timeout_ms:
# Schedule the timeout waiter.
self._timeout_task = asyncio.create_task(self._timeout_sleep())
def __exit__(self, exc_type, exc_val, exc_traceback):
# One of five things happened:
# 1 - The operation completed successfully.
# 2 - The operation timed out.
# 3 - The device disconnected.
# 4 - The operation failed for a different exception.
# 5 - The task was cancelled by something else.
# Don't need the connection to tell us about disconnection anymore.
if self._connection:
self._connection._timeouts.remove(self)
try:
if exc_type == asyncio.CancelledError:
# Case 2, we started a timeout and it's completed.
if self._timeout_ms and self._timeout_task is None:
raise asyncio.TimeoutError
# Case 3, we have a disconnected device.
if self._connection and self._connection._conn_handle is None:
raise DeviceDisconnectedError
# Case 5, something else cancelled us.
# Allow the cancellation to propagate.
return
# Case 1 & 4. Either way, just stop the timeout task and let the
# exception (if case 4) propagate.
finally:
# In all cases, if the timeout is still running, cancel it.
if self._timeout_task:
self._timeout_task.cancel()
class Device:
def __init__(self, addr_type, addr):
# Public properties
self.addr_type = addr_type
self.addr = addr if len(addr) == 6 else binascii.unhexlify(addr.replace(":", ""))
self._connection = None
def __eq__(self, rhs):
return self.addr_type == rhs.addr_type and self.addr == rhs.addr
def __hash__(self):
return hash((self.addr_type, self.addr))
def __str__(self):
return "Device({}, {}{})".format(
"ADDR_PUBLIC" if self.addr_type == 0 else "ADDR_RANDOM",
self.addr_hex(),
", CONNECTED" if self._connection else "",
)
def addr_hex(self):
return binascii.hexlify(self.addr, ":").decode()
async def connect(self, timeout_ms=10000):
if self._connection:
return self._connection
# Forward to implementation in central.py.
from .central import _connect
await _connect(DeviceConnection(self), timeout_ms)
# Start the device task that will clean up after disconnection.
self._connection._run_task()
return self._connection
class DeviceConnection:
# Global map of connection handle to active devices (for IRQ mapping).
_connected = {}
def __init__(self, device):
self.device = device
device._connection = self
self.encrypted = False
self.authenticated = False
self.bonded = False
self.key_size = False
self.mtu = None
self._conn_handle = None
# This event is fired by the IRQ both for connection and disconnection
# and controls the device_task.
self._event = None
# If we're waiting for a pending MTU exchange.
self._mtu_event = None
# In-progress client discovery instance (e.g. services, chars,
# descriptors) used for IRQ mapping.
self._discover = None
# Map of value handle to characteristic (so that IRQs with
# conn_handle,value_handle can route to them). See
# ClientCharacteristic._find for where this is used.
self._characteristics = {}
self._task = None
# DeviceTimeout instances that are currently waiting on this device
# and need to be notified if disconnection occurs.
self._timeouts = []
# Fired by the encryption update event.
self._pair_event = None
# Active L2CAP channel for this device.
# TODO: Support more than one concurrent channel.
self._l2cap_channel = None
# While connected, this tasks waits for disconnection then cleans up.
async def device_task(self):
assert self._conn_handle is not None
# Wait for the (either central or peripheral) disconnected irq.
await self._event.wait()
# Mark the device as disconnected.
del DeviceConnection._connected[self._conn_handle]
self._conn_handle = None
self.device._connection = None
# Cancel any in-progress operations on this device.
for t in self._timeouts:
t._task.cancel()
def _run_task(self):
# Event will be already created this if we initiated connection.
self._event = self._event or asyncio.ThreadSafeFlag()
self._task = asyncio.create_task(self.device_task())
async def disconnect(self, timeout_ms=2000):
await self.disconnected(timeout_ms, disconnect=True)
async def disconnected(self, timeout_ms=60000, disconnect=False):
if not self.is_connected():
return
# The task must have been created after successful connection.
assert self._task
if disconnect:
try:
ble.gap_disconnect(self._conn_handle)
except OSError as e:
log_error("Disconnect", e)
with DeviceTimeout(None, timeout_ms):
await self._task
# Retrieve a single service matching this uuid.
async def service(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for service in self.services(uuid, timeout_ms):
if not result and service.uuid == uuid:
result = service
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for service in device.services():
# Note: must allow the loop to run to completion.
# TODO: disconnection / timeout
def services(self, uuid=None, timeout_ms=2000):
from .client import ClientDiscover, ClientService
return ClientDiscover(self, ClientService, self, timeout_ms, uuid)
async def pair(self, *args, **kwargs):
from .security import pair
await pair(self, *args, **kwargs)
def is_connected(self):
return self._conn_handle is not None
# Use with `with` to simplify disconnection and timeout handling.
def timeout(self, timeout_ms):
return DeviceTimeout(self, timeout_ms)
async def exchange_mtu(self, mtu=None, timeout_ms=1000):
if not self.is_connected():
raise ValueError("Not connected")
if mtu:
ble.config(mtu=mtu)
self._mtu_event = self._mtu_event or asyncio.ThreadSafeFlag()
ble.gattc_exchange_mtu(self._conn_handle)
with self.timeout(timeout_ms):
await self._mtu_event.wait()
return self.mtu
# Wait for a connection on an L2CAP connection-oriented-channel.
async def l2cap_accept(self, psm, mtu, timeout_ms=None):
from .l2cap import accept
return await accept(self, psm, mtu, timeout_ms)
# Attempt to connect to a listening device.
async def l2cap_connect(self, psm, mtu, timeout_ms=1000):
from .l2cap import connect
return await connect(self, psm, mtu, timeout_ms)
# Context manager -- automatically disconnect.
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
await self.disconnect()

214
ble/aioble/l2cap.py Normal file
View File

@ -0,0 +1,214 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import uasyncio as asyncio
from .core import ble, log_error, register_irq_handler
from .device import DeviceConnection
_IRQ_L2CAP_ACCEPT = const(22)
_IRQ_L2CAP_CONNECT = const(23)
_IRQ_L2CAP_DISCONNECT = const(24)
_IRQ_L2CAP_RECV = const(25)
_IRQ_L2CAP_SEND_READY = const(26)
# Once we start listening we're listening forever. (Limitation in NimBLE)
_listening = False
def _l2cap_irq(event, data):
if event not in (
_IRQ_L2CAP_CONNECT,
_IRQ_L2CAP_DISCONNECT,
_IRQ_L2CAP_RECV,
_IRQ_L2CAP_SEND_READY,
):
return
# All the L2CAP events start with (conn_handle, cid, ...)
if connection := DeviceConnection._connected.get(data[0], None):
if channel := connection._l2cap_channel:
# Expect to match the cid for this conn handle (unless we're
# waiting for connection in which case channel._cid is None).
if channel._cid is not None and channel._cid != data[1]:
return
# Update the channel object with new information.
if event == _IRQ_L2CAP_CONNECT:
_, channel._cid, _, channel.our_mtu, channel.peer_mtu = data
elif event == _IRQ_L2CAP_DISCONNECT:
_, _, psm, status = data
channel._status = status
channel._cid = None
connection._l2cap_channel = None
elif event == _IRQ_L2CAP_RECV:
channel._data_ready = True
elif event == _IRQ_L2CAP_SEND_READY:
channel._stalled = False
# Notify channel.
channel._event.set()
def _l2cap_shutdown():
global _listening
_listening = False
register_irq_handler(_l2cap_irq, _l2cap_shutdown)
# The channel was disconnected during a send/recvinto/flush.
class L2CAPDisconnectedError(Exception):
pass
# Failed to connect to connection (argument is status).
class L2CAPConnectionError(Exception):
pass
class L2CAPChannel:
def __init__(self, connection):
if not connection.is_connected():
raise ValueError("Not connected")
if connection._l2cap_channel:
raise ValueError("Already has channel")
connection._l2cap_channel = self
self._connection = connection
# Maximum size that the other side can send to us.
self.our_mtu = 0
# Maximum size that we can send.
self.peer_mtu = 0
# Set back to None on disconnection.
self._cid = None
# Set during disconnection.
self._status = 0
# If true, must wait for _IRQ_L2CAP_SEND_READY IRQ before sending.
self._stalled = False
# Has received a _IRQ_L2CAP_RECV since the buffer was last emptied.
self._data_ready = False
self._event = asyncio.ThreadSafeFlag()
def _assert_connected(self):
if self._cid is None:
raise L2CAPDisconnectedError
async def recvinto(self, buf, timeout_ms=None):
self._assert_connected()
# Wait until the data_ready flag is set. This flag is only ever set by
# the event and cleared by this function.
with self._connection.timeout(timeout_ms):
while not self._data_ready:
await self._event.wait()
self._assert_connected()
self._assert_connected()
# Extract up to len(buf) bytes from the channel buffer.
n = ble.l2cap_recvinto(self._connection._conn_handle, self._cid, buf)
# Check if there's still remaining data in the channel buffers.
self._data_ready = ble.l2cap_recvinto(self._connection._conn_handle, self._cid, None) > 0
return n
# Synchronously see if there's data ready.
def available(self):
self._assert_connected()
return self._data_ready
# Waits until the channel is free and then sends buf.
# If the buffer is larger than the MTU it will be sent in chunks.
async def send(self, buf, timeout_ms=None, chunk_size=None):
self._assert_connected()
offset = 0
chunk_size = min(self.our_mtu * 2, self.peer_mtu, chunk_size or self.peer_mtu)
mv = memoryview(buf)
while offset < len(buf):
if self._stalled:
await self.flush(timeout_ms)
# l2cap_send returns True if you can send immediately.
self._stalled = not ble.l2cap_send(
self._connection._conn_handle,
self._cid,
mv[offset : offset + chunk_size],
)
offset += chunk_size
async def flush(self, timeout_ms=None):
self._assert_connected()
# Wait for the _stalled flag to be cleared by the IRQ.
with self._connection.timeout(timeout_ms):
while self._stalled:
await self._event.wait()
self._assert_connected()
async def disconnect(self, timeout_ms=1000):
if self._cid is None:
return
# Wait for the cid to be cleared by the disconnect IRQ.
ble.l2cap_disconnect(self._connection._conn_handle, self._cid)
await self.disconnected(timeout_ms)
async def disconnected(self, timeout_ms=1000):
with self._connection.timeout(timeout_ms):
while self._cid is not None:
await self._event.wait()
# Context manager -- automatically disconnect.
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
await self.disconnect()
# Use connection.l2cap_accept() instead of calling this directly.
async def accept(connection, psm, mtu, timeout_ms):
global _listening
channel = L2CAPChannel(connection)
# Start the stack listening if necessary.
if not _listening:
ble.l2cap_listen(psm, mtu)
_listening = True
# Wait for the connect irq from the remote connection.
with connection.timeout(timeout_ms):
await channel._event.wait()
return channel
# Use connection.l2cap_connect() instead of calling this directly.
async def connect(connection, psm, mtu, timeout_ms):
if _listening:
raise ValueError("Can't connect while listening")
channel = L2CAPChannel(connection)
with connection.timeout(timeout_ms):
ble.l2cap_connect(connection._conn_handle, psm, mtu)
# Wait for the connect irq from the remote connection.
# If the connection fails, we get a disconnect event (with status) instead.
await channel._event.wait()
if channel._cid is not None:
return channel
else:
raise L2CAPConnectionError(channel._status)

179
ble/aioble/peripheral.py Normal file
View File

@ -0,0 +1,179 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import bluetooth
import struct
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
)
from .device import Device, DeviceConnection, DeviceTimeout
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_UUID16_MORE = const(0x2)
_ADV_TYPE_UUID32_MORE = const(0x4)
_ADV_TYPE_UUID128_MORE = const(0x6)
_ADV_TYPE_APPEARANCE = const(0x19)
_ADV_TYPE_MANUFACTURER = const(0xFF)
_ADV_PAYLOAD_MAX_LEN = const(31)
_incoming_connection = None
_connect_event = None
def _peripheral_irq(event, data):
global _incoming_connection
if event == _IRQ_CENTRAL_CONNECT:
conn_handle, addr_type, addr = data
# Create, initialise, and register the device.
device = Device(addr_type, bytes(addr))
_incoming_connection = DeviceConnection(device)
_incoming_connection._conn_handle = conn_handle
DeviceConnection._connected[conn_handle] = _incoming_connection
# Signal advertise() to return the connected device.
_connect_event.set()
elif event == _IRQ_CENTRAL_DISCONNECT:
conn_handle, _, _ = data
if connection := DeviceConnection._connected.get(conn_handle, None):
# Tell the device_task that it should terminate.
connection._event.set()
def _peripheral_shutdown():
global _incoming_connection, _connect_event
_incoming_connection = None
_connect_event = None
register_irq_handler(_peripheral_irq, _peripheral_shutdown)
# Advertising payloads are repeated packets of the following form:
# 1 byte data length (N + 1)
# 1 byte type (see constants below)
# N bytes type-specific data
def _append(adv_data, resp_data, adv_type, value):
data = struct.pack("BB", len(value) + 1, adv_type) + value
if len(data) + len(adv_data) < _ADV_PAYLOAD_MAX_LEN:
adv_data += data
return resp_data
if len(data) + (len(resp_data) if resp_data else 0) < _ADV_PAYLOAD_MAX_LEN:
if not resp_data:
# Overflow into resp_data for the first time.
resp_data = bytearray()
resp_data += data
return resp_data
raise ValueError("Advertising payload too long")
async def advertise(
interval_us,
adv_data=None,
resp_data=None,
connectable=True,
limited_disc=False,
br_edr=False,
name=None,
services=None,
appearance=0,
manufacturer=None,
timeout_ms=None,
):
global _incoming_connection, _connect_event
ensure_active()
if not adv_data and not resp_data:
# If the user didn't manually specify adv_data / resp_data then
# construct them from the kwargs. Keep adding fields to adv_data,
# overflowing to resp_data if necessary.
# TODO: Try and do better bin-packing than just concatenating in
# order?
adv_data = bytearray()
resp_data = _append(
adv_data,
resp_data,
_ADV_TYPE_FLAGS,
struct.pack("B", (0x01 if limited_disc else 0x02) + (0x18 if br_edr else 0x04)),
)
# Services are prioritised to go in the advertising data because iOS supports
# filtering scan results by service only, so services must come first.
if services:
for uuid in services:
b = bytes(uuid)
if len(b) == 2:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID16_COMPLETE, b)
elif len(b) == 4:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID32_COMPLETE, b)
elif len(b) == 16:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID128_COMPLETE, b)
if name:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_NAME, name)
if appearance:
# See org.bluetooth.characteristic.gap.appearance.xml
resp_data = _append(
adv_data, resp_data, _ADV_TYPE_APPEARANCE, struct.pack("<H", appearance)
)
if manufacturer:
resp_data = _append(
adv_data,
resp_data,
_ADV_TYPE_MANUFACTURER,
struct.pack("<H", manufacturer[0]) + manufacturer[1],
)
_connect_event = _connect_event or asyncio.ThreadSafeFlag()
ble.gap_advertise(interval_us, adv_data=adv_data, resp_data=resp_data, connectable=connectable)
try:
# Allow optional timeout for a central to connect to us (or just to stop advertising).
with DeviceTimeout(None, timeout_ms):
await _connect_event.wait()
# Get the newly connected connection to the central and start a task
# to wait for disconnection.
result = _incoming_connection
_incoming_connection = None
# This mirrors what connecting to a central does.
result._run_task()
return result
except asyncio.CancelledError:
# Something else cancelled this task (to manually stop advertising).
ble.gap_advertise(None)
except asyncio.TimeoutError:
# DeviceTimeout waiting for connection.
ble.gap_advertise(None)
raise

178
ble/aioble/security.py Normal file
View File

@ -0,0 +1,178 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const, schedule
import uasyncio as asyncio
import binascii
import json
from .core import log_info, log_warn, ble, register_irq_handler
from .device import DeviceConnection
_IRQ_ENCRYPTION_UPDATE = const(28)
_IRQ_GET_SECRET = const(29)
_IRQ_SET_SECRET = const(30)
_IRQ_PASSKEY_ACTION = const(31)
_IO_CAPABILITY_DISPLAY_ONLY = const(0)
_IO_CAPABILITY_DISPLAY_YESNO = const(1)
_IO_CAPABILITY_KEYBOARD_ONLY = const(2)
_IO_CAPABILITY_NO_INPUT_OUTPUT = const(3)
_IO_CAPABILITY_KEYBOARD_DISPLAY = const(4)
_PASSKEY_ACTION_INPUT = const(2)
_PASSKEY_ACTION_DISP = const(3)
_PASSKEY_ACTION_NUMCMP = const(4)
_DEFAULT_PATH = "ble_secrets.json"
_secrets = {}
_modified = False
_path = None
# Must call this before stack startup.
def load_secrets(path=None):
global _path, _secrets
# Use path if specified, otherwise use previous path, otherwise use
# default path.
_path = path or _path or _DEFAULT_PATH
# Reset old secrets.
_secrets = {}
try:
with open(_path, "r") as f:
entries = json.load(f)
for sec_type, key, value in entries:
# Decode bytes from hex.
_secrets[sec_type, binascii.a2b_base64(key)] = binascii.a2b_base64(value)
except:
log_warn("No secrets available")
# Call this whenever the secrets dict changes.
def _save_secrets(arg=None):
global _modified, _path
_path = _path or _DEFAULT_PATH
if not _modified:
# Only save if the secrets changed.
return
with open(_path, "w") as f:
# Convert bytes to hex strings (otherwise JSON will treat them like
# strings).
json_secrets = [
(sec_type, binascii.b2a_base64(key), binascii.b2a_base64(value))
for (sec_type, key), value in _secrets.items()
]
json.dump(json_secrets, f)
_modified = False
def _security_irq(event, data):
global _modified
if event == _IRQ_ENCRYPTION_UPDATE:
# Connection has updated (usually due to pairing).
conn_handle, encrypted, authenticated, bonded, key_size = data
log_info("encryption update", conn_handle, encrypted, authenticated, bonded, key_size)
if connection := DeviceConnection._connected.get(conn_handle, None):
connection.encrypted = encrypted
connection.authenticated = authenticated
connection.bonded = bonded
connection.key_size = key_size
# TODO: Handle failure.
if encrypted and connection._pair_event:
connection._pair_event.set()
elif event == _IRQ_SET_SECRET:
sec_type, key, value = data
key = sec_type, bytes(key)
value = bytes(value) if value else None
log_info("set secret:", key, value)
if value is None:
# Delete secret.
if key not in _secrets:
return False
del _secrets[key]
else:
# Save secret.
_secrets[key] = value
# Queue up a save (don't synchronously write to flash).
_modified = True
schedule(_save_secrets, None)
return True
elif event == _IRQ_GET_SECRET:
sec_type, index, key = data
log_info("get secret:", sec_type, index, bytes(key) if key else None)
if key is None:
# Return the index'th secret of this type.
i = 0
for (t, _key), value in _secrets.items():
if t == sec_type:
if i == index:
return value
i += 1
return None
else:
# Return the secret for this key (or None).
key = sec_type, bytes(key)
return _secrets.get(key, None)
elif event == _IRQ_PASSKEY_ACTION:
conn_handle, action, passkey = data
log_info("passkey action", conn_handle, action, passkey)
# if action == _PASSKEY_ACTION_NUMCMP:
# # TODO: Show this passkey and confirm accept/reject.
# accept = 1
# self._ble.gap_passkey(conn_handle, action, accept)
# elif action == _PASSKEY_ACTION_DISP:
# # TODO: Generate and display a passkey so the remote device can enter it.
# passkey = 123456
# self._ble.gap_passkey(conn_handle, action, passkey)
# elif action == _PASSKEY_ACTION_INPUT:
# # TODO: Ask the user to enter the passkey shown on the remote device.
# passkey = 123456
# self._ble.gap_passkey(conn_handle, action, passkey)
# else:
# log_warn("unknown passkey action")
def _security_shutdown():
global _secrets, _modified, _path
_secrets = {}
_modified = False
_path = None
register_irq_handler(_security_irq, _security_shutdown)
# Use device.pair() rather than calling this directly.
async def pair(
connection,
bond=True,
le_secure=True,
mitm=False,
io=_IO_CAPABILITY_NO_INPUT_OUTPUT,
timeout_ms=20000,
):
ble.config(bond=bond, le_secure=le_secure, mitm=mitm, io=io)
with connection.timeout(timeout_ms):
connection._pair_event = asyncio.ThreadSafeFlag()
ble.gap_pair(connection._conn_handle)
await connection._pair_event.wait()
# TODO: Allow the passkey action to return to here and
# invoke a callback or task to process the action.

344
ble/aioble/server.py Normal file
View File

@ -0,0 +1,344 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from collections import deque
import bluetooth
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
GattError,
)
from .device import DeviceConnection, DeviceTimeout
_registered_characteristics = {}
_IRQ_GATTS_WRITE = const(3)
_IRQ_GATTS_READ_REQUEST = const(4)
_IRQ_GATTS_INDICATE_DONE = const(20)
_FLAG_READ = const(0x0002)
_FLAG_WRITE_NO_RESPONSE = const(0x0004)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
_FLAG_INDICATE = const(0x0020)
_FLAG_READ_ENCRYPTED = const(0x0200)
_FLAG_READ_AUTHENTICATED = const(0x0400)
_FLAG_READ_AUTHORIZED = const(0x0800)
_FLAG_WRITE_ENCRYPTED = const(0x1000)
_FLAG_WRITE_AUTHENTICATED = const(0x2000)
_FLAG_WRITE_AUTHORIZED = const(0x4000)
_FLAG_WRITE_CAPTURE = const(0x10000)
_FLAG_DESC_READ = const(1)
_FLAG_DESC_WRITE = const(2)
_WRITE_CAPTURE_QUEUE_LIMIT = const(10)
def _server_irq(event, data):
if event == _IRQ_GATTS_WRITE:
conn_handle, attr_handle = data
Characteristic._remote_write(conn_handle, attr_handle)
elif event == _IRQ_GATTS_READ_REQUEST:
conn_handle, attr_handle = data
return Characteristic._remote_read(conn_handle, attr_handle)
elif event == _IRQ_GATTS_INDICATE_DONE:
conn_handle, value_handle, status = data
Characteristic._indicate_done(conn_handle, value_handle, status)
def _server_shutdown():
global _registered_characteristics
_registered_characteristics = {}
if hasattr(BaseCharacteristic, "_capture_task"):
BaseCharacteristic._capture_task.cancel()
del BaseCharacteristic._capture_queue
del BaseCharacteristic._capture_write_event
del BaseCharacteristic._capture_consumed_event
del BaseCharacteristic._capture_task
register_irq_handler(_server_irq, _server_shutdown)
class Service:
def __init__(self, uuid):
self.uuid = uuid
self.characteristics = []
# Generate tuple for gatts_register_services.
def _tuple(self):
return (self.uuid, tuple(c._tuple() for c in self.characteristics))
class BaseCharacteristic:
def _register(self, value_handle):
self._value_handle = value_handle
_registered_characteristics[value_handle] = self
if self._initial is not None:
self.write(self._initial)
self._initial = None
# Read value from local db.
def read(self):
if self._value_handle is None:
return self._initial or b""
else:
return ble.gatts_read(self._value_handle)
# Write value to local db, and optionally notify/indicate subscribers.
def write(self, data, send_update=False):
if self._value_handle is None:
self._initial = data
else:
ble.gatts_write(self._value_handle, data, send_update)
# When the a capture-enabled characteristic is created, create the
# necessary events (if not already created).
@staticmethod
def _init_capture():
if hasattr(BaseCharacteristic, "_capture_queue"):
return
BaseCharacteristic._capture_queue = deque((), _WRITE_CAPTURE_QUEUE_LIMIT)
BaseCharacteristic._capture_write_event = asyncio.ThreadSafeFlag()
BaseCharacteristic._capture_consumed_event = asyncio.ThreadSafeFlag()
BaseCharacteristic._capture_task = asyncio.create_task(
BaseCharacteristic._run_capture_task()
)
# Monitor the shared queue for incoming characteristic writes and forward
# them sequentially to the individual characteristic events.
@staticmethod
async def _run_capture_task():
write = BaseCharacteristic._capture_write_event
consumed = BaseCharacteristic._capture_consumed_event
q = BaseCharacteristic._capture_queue
while True:
if len(q):
conn, data, characteristic = q.popleft()
# Let the characteristic waiting in `written()` know that it
# can proceed.
characteristic._write_data = (conn, data)
characteristic._write_event.set()
# Wait for the characteristic to complete `written()` before
# continuing.
await consumed.wait()
if not len(q):
await write.wait()
# Wait for a write on this characteristic. Returns the connection that did
# the write, or a tuple of (connection, value) if capture is enabled for
# this characteristics.
async def written(self, timeout_ms=None):
if not hasattr(self, "_write_event"):
# Not a writable characteristic.
return
# If no write has been seen then we need to wait. If the event has
# already been set this will clear the event and continue
# immediately. In regular mode, this is set by the write IRQ
# directly (in _remote_write). In capture mode, this is set when it's
# our turn by _capture_task.
with DeviceTimeout(None, timeout_ms):
await self._write_event.wait()
# Return the write data and clear the stored copy.
# In default usage this will be just the connection handle.
# In capture mode this will be a tuple of (connection_handle, received_data)
data = self._write_data
self._write_data = None
if self.flags & _FLAG_WRITE_CAPTURE:
# Notify the shared queue monitor that the event has been consumed
# by the caller to `written()` and another characteristic can now
# proceed.
BaseCharacteristic._capture_consumed_event.set()
return data
def on_read(self, connection):
return 0
def _remote_write(conn_handle, value_handle):
if characteristic := _registered_characteristics.get(value_handle, None):
# If we've gone from empty to one item, then wake something
# blocking on `await char.written()`.
conn = DeviceConnection._connected.get(conn_handle, None)
if characteristic.flags & _FLAG_WRITE_CAPTURE:
# For capture, we append the connection and the written value
# value to the shared queue along with the matching characteristic object.
# The deque will enforce the max queue len.
data = characteristic.read()
BaseCharacteristic._capture_queue.append((conn, data, characteristic))
BaseCharacteristic._capture_write_event.set()
else:
# Store the write connection handle to be later used to retrieve the data
# then set event to handle in written() task.
characteristic._write_data = conn
characteristic._write_event.set()
def _remote_read(conn_handle, value_handle):
if characteristic := _registered_characteristics.get(value_handle, None):
return characteristic.on_read(DeviceConnection._connected.get(conn_handle, None))
class Characteristic(BaseCharacteristic):
def __init__(
self,
service,
uuid,
read=False,
write=False,
write_no_response=False,
notify=False,
indicate=False,
initial=None,
capture=False,
):
service.characteristics.append(self)
self.descriptors = []
flags = 0
if read:
flags |= _FLAG_READ
if write or write_no_response:
flags |= (_FLAG_WRITE if write else 0) | (
_FLAG_WRITE_NO_RESPONSE if write_no_response else 0
)
if capture:
# Capture means that we keep track of all writes, and capture
# their values (and connection) in a queue. Otherwise we just
# track the connection of the most recent write.
flags |= _FLAG_WRITE_CAPTURE
BaseCharacteristic._init_capture()
# Set when this characteristic has a value waiting in self._write_data.
self._write_event = asyncio.ThreadSafeFlag()
# The connection of the most recent write, or a tuple of
# (connection, data) if capture is enabled.
self._write_data = None
if notify:
flags |= _FLAG_NOTIFY
if indicate:
flags |= _FLAG_INDICATE
# TODO: This should probably be a dict of connection to (ev, status).
# Right now we just support a single indication at a time.
self._indicate_connection = None
self._indicate_event = asyncio.ThreadSafeFlag()
self._indicate_status = None
self.uuid = uuid
self.flags = flags
self._value_handle = None
self._initial = initial
# Generate tuple for gatts_register_services.
def _tuple(self):
if self.descriptors:
return (self.uuid, self.flags, tuple(d._tuple() for d in self.descriptors))
else:
# Workaround: v1.19 and below can't handle an empty descriptor tuple.
return (self.uuid, self.flags)
def notify(self, connection, data=None):
if not (self.flags & _FLAG_NOTIFY):
raise ValueError("Not supported")
ble.gatts_notify(connection._conn_handle, self._value_handle, data)
async def indicate(self, connection, timeout_ms=1000):
if not (self.flags & _FLAG_INDICATE):
raise ValueError("Not supported")
if self._indicate_connection is not None:
raise ValueError("In progress")
if not connection.is_connected():
raise ValueError("Not connected")
self._indicate_connection = connection
self._indicate_status = None
try:
with connection.timeout(timeout_ms):
ble.gatts_indicate(connection._conn_handle, self._value_handle)
await self._indicate_event.wait()
if self._indicate_status != 0:
raise GattError(self._indicate_status)
finally:
self._indicate_connection = None
def _indicate_done(conn_handle, value_handle, status):
if characteristic := _registered_characteristics.get(value_handle, None):
if connection := DeviceConnection._connected.get(conn_handle, None):
if not characteristic._indicate_connection:
# Timeout.
return
# See TODO in __init__ to support multiple concurrent indications.
assert connection == characteristic._indicate_connection
characteristic._indicate_status = status
characteristic._indicate_event.set()
class BufferedCharacteristic(Characteristic):
def __init__(self, service, uuid, max_len=20, append=False):
super().__init__(service, uuid, read=True)
self._max_len = max_len
self._append = append
def _register(self, value_handle):
super()._register(value_handle)
ble.gatts_set_buffer(value_handle, self._max_len, self._append)
class Descriptor(BaseCharacteristic):
def __init__(self, characteristic, uuid, read=False, write=False, initial=None):
characteristic.descriptors.append(self)
# Workaround for https://github.com/micropython/micropython/issues/6864
flags = 0
if read:
flags |= _FLAG_DESC_READ
if write:
self._write_event = asyncio.ThreadSafeFlag()
self._write_data = None
flags |= _FLAG_DESC_WRITE
self.uuid = uuid
self.flags = flags
self._value_handle = None
self._initial = initial
# Generate tuple for gatts_register_services.
def _tuple(self):
return (self.uuid, self.flags)
# Turn the Service/Characteristic/Descriptor classes into a registration tuple
# and then extract their value handles.
def register_services(*services):
ensure_active()
_registered_characteristics.clear()
handles = ble.gatts_register_services(tuple(s._tuple() for s in services))
for i in range(len(services)):
service_handles = handles[i]
service = services[i]
n = 0
for characteristic in service.characteristics:
characteristic._register(service_handles[n])
n += 1
for descriptor in characteristic.descriptors:
descriptor._register(service_handles[n])
n += 1

137
ble/mainDongle.py Normal file
View File

@ -0,0 +1,137 @@
import sys
sys.path.append("")
from micropython import const
import json
import uasyncio as asyncio
import aioble
import bluetooth
import struct
_SERVICE_UUID = bluetooth.UUID(0x1234)
_CHAR_UUID = bluetooth.UUID(0x1235)
MAX_MSG_DATA_LENGTH = const(18)
_COMMAND_DONE = const(0)
_COMMAND_SENDDATA = const(1)
_COMMAND_SENDCHUNK = const(2) # send chunk of data, use _COMMAND_SENDDATA for last chunk
_COMMAND_SENDBYTESDATA = const(3)
_COMMAND_SENDBYTESCHUNK = const(4) # send chunk of bytes, use _COMMAND_SENDBYTESDATA for last chunk
class ManageDongle:
def __init__(self, device):
self._device = device
self._connection = None
self._seq = 1
async def connect(self):
try:
print("Connecting to", self._device)
self._connection = await self._device.connect()
except asyncio.TimeoutError:
print("Timeout during connection")
return
try:
print("Discovering...")
service = await self._connection.service(_SERVICE_UUID)
#uuids = []
#async for char in service.characteristics():
# uuids.append(char.uuid)
#print('uuids', uuids)
self._characteristic = await service.characteristic(_CHAR_UUID)
except asyncio.TimeoutError:
print("Timeout discovering services/characteristics")
return
asyncio.create_task(self.readFromBle())
await asyncio.sleep(0.1)
self.sendDictToCom({'type':'connected'})
async def _command(self, cmd, data):
send_seq = self._seq
await self._characteristic.write(struct.pack("<BB", cmd, send_seq) + data)
#print('sent packet num', send_seq)
self._seq += 1
return send_seq
async def readFromBle(self):
msgChunk = ''
while True:
read = await self._characteristic.notified()
# message format is <command><data>
cmd = read[0]
#print('received from BLE', read)
self.sendDictToCom({'type':'debug', 'from':'fromBle','cmd':cmd, 'data':read[1:]})
if cmd in [_COMMAND_SENDCHUNK, _COMMAND_SENDBYTESCHUNK]:
#self.sendDictToCom({'type':'debug', 'from':'chunkFromBle','string':msgChunk})
msgChunk += read[1:].decode()
elif cmd in [_COMMAND_SENDDATA, _COMMAND_SENDBYTESDATA]:
# message to send to computer over COM port
msgFormat = 'base64' if cmd == _COMMAND_SENDBYTESDATA else 'str'
msg = msgChunk + read[1:].decode()
self.sendDictToCom({'type':'msgFromBle', 'format':msgFormat, 'string':msg})
msgChunk = ''
async def sendData(self, data:str, base64:bool=False):
"""Send a string or bytes sequence over BLE
:param data: string to send (plain str or base64 formated)
:param base64: if True, data is a base64 formated string"""
sendMsgType = _COMMAND_SENDBYTESCHUNK if base64 else _COMMAND_SENDCHUNK
while len(data) > MAX_MSG_DATA_LENGTH:
chunk = data[:MAX_MSG_DATA_LENGTH]
self.sendDictToCom({'type':'debug', 'from':'sendChunkToBle','string':chunk})
await self._command(sendMsgType, chunk.encode())
data = data[MAX_MSG_DATA_LENGTH:]
sendMsgType = _COMMAND_SENDBYTESDATA if base64 else _COMMAND_SENDDATA
#self.sendDictToCom({'type':'msgType', 'strOrBase64':sendMsgType, 'sentdata':data})
await self._command(sendMsgType, data.encode())
self.sendDictToCom({'type':'sentMessage'})
async def disconnect(self):
if self._connection:
await self._connection.disconnect()
def sendDictToCom(self, data:dict):
print(json.dumps(data))
async def main():
print('start dongle')
while True:
try:
line = input()
except KeyboardInterrupt:
# when ctrl-C is sent to dongle, we receive a KeyboardInterrupt
sys.exit(0)
#print('dongle received:', line)
receivedMsgDict = json.loads(line)
if receivedMsgDict['type'] == 'connect':
# => start BLE scan and connect on this peripheral
peripheralName = receivedMsgDict['name']
async with aioble.scan(5000, 30000, 30000, active=True) as scanner:
async for result in scanner:
# print('scan', result.name(), result.services())
print('scan', result.name(), result.rssi, result.services())
if result.name() == peripheralName and _SERVICE_UUID in result.services():
device = result.device
break
else:
print("Server not found")
return
client = ManageDongle(device)
await client.connect()
elif receivedMsgDict['type'] == 'disconnect':
await client.disconnect()
elif receivedMsgDict['type'] == 'msg':
#msgFormat = 'base64' in receivedMsgDict
if 'format' not in receivedMsgDict or receivedMsgDict['format'] not in ['str', 'base64']:
client.sendDictToCom({'type':'badMessage', 'error':'invalid format', 'received':receivedMsgDict})
continue
msgFormat = True if receivedMsgDict['format'] == 'base64' else False
await client.sendData(receivedMsgDict['string'], base64=msgFormat)
else:
print('unknown message type', receivedMsgDict)
await client.disconnect()
asyncio.run(main())

85
ble/mainPcTestBLE.py Normal file
View File

@ -0,0 +1,85 @@
# python -m serial.tools.list_ports
# python mainPcTestBLE.py -p <port com>
# In this example, PC will send some messages to robot,
# and verify it receives checksum of these messages from robot
# Note: if message from PC to robot exceeds 18 characters, it will be split in
# several BLE messages, then merged at robot side to get original message
import sys
import binascii
import time
import argparse
import random
import ComWithDongle
robotName = 'myTeamName'
randCharRange = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
expectedToReceive = []
def onMsgFromRobot(data:str):
"""Function to call when a message sent by robot is received
:param data: message"""
print('received msg', data, flush=True)
print('compair to', expectedToReceive, flush=True)
if data in expectedToReceive:
expectedToReceive.remove(data)
print('-not received yet', len(expectedToReceive), expectedToReceive, flush=True)
else:
print('bad message received', data)
print('expected to receive')
for s in expectedToReceive:
print(' ', s)
exit(1)
parser = argparse.ArgumentParser(
description='Script to communicate with STM32WB55 dongle connected on computer')
parser.add_argument('-p', '--portcom', type=str, help='id of com port used')
parser.add_argument('-d', '--debug', action='store_true', help='display debug messages')
parser.add_argument('-l', '--length', type=int, default=16,
help='number of characters to send over BLE, in each message')
parser.add_argument('-n', '--number', type=int, default=5 , help='number of messages to send over BLE')
parser.add_argument('-b', '--bytes', action='store_true', help='send bytes instead of string')
args = parser.parse_args()
try:
print('start main')
# wait BLE connection is established
com = ComWithDongle.ComWithDongle(comPort=args.portcom, peripheralName=robotName,
onMsgReceived=onMsgFromRobot, debug=args.debug)
print('connected to', robotName)
msgId = 0
while True:
if args.bytes:
data = random.randbytes(args.length)
else:
data = ''.join([random.choice(randCharRange) for _ in range(args.length)])
print('send data', len(data), msgId, data, flush=True)
checksum = binascii.crc32(data)
expectedToReceive.append(str(checksum))
com.sendMsg(data)
print('+not received yet', len(expectedToReceive), expectedToReceive, flush=True)
msgId += 1
if msgId >= args.number: break
#time.sleep(0.01)
time.sleep(0.2)
#all messages sent, wait while we receive some messages
com.sendMsg('END')
nbMissing = len(expectedToReceive)
lastNbMissing = 0
while not nbMissing == lastNbMissing:
if nbMissing == 0:
print('all messages received')
exit(0)
print('missing', expectedToReceive, flush=True)
lastNbMissing = nbMissing
com.sendMsg('END')
time.sleep(2)
nbMissing = len(expectedToReceive)
except KeyboardInterrupt:
pass
com.disconnect()
exit(0)

41
ble/mainRobotTestBLE.py Normal file
View File

@ -0,0 +1,41 @@
# to know COM port used when connected on PC:
# python -m serial.tools.list_ports
# in this example, robot will send back to PC the checksum of each message received
import binascii
import uasyncio as asyncio
import RobotBleServer
robotName = 'myTeamName'
toSend = []
def onMsgToRobot(data:str|bytes):
"""Function to call when a message sent by PC is received
:param data: message received"""
checksum = binascii.crc32(data)
print('received', data, '=>', checksum)
toSend.append(str(checksum))
async def robotMainTask(bleConnection):
"""Main function for robot activities
:param bleConnection: object to check BLE connection and send messages"""
while True:
await asyncio.sleep(0.1)
#print('connection', bleConnection.connection)
if not bleConnection.connection: continue
if toSend == []: continue
while not toSend == []:
data = toSend.pop(0)
bleConnection.sendMessage(data)
print('sent', data)
# Run tasks
async def main():
print('Start main')
bleConnection = RobotBleServer.RobotBleServer(robotName=robotName, onMsgReceived=onMsgToRobot)
asyncio.create_task(robotMainTask(bleConnection))
await bleConnection.communicationTask()
asyncio.run(main())

11
ble/toDongle.sh Normal file
View File

@ -0,0 +1,11 @@
#!/usr/bin/bash
drive=$1
if [[($drive == "")]]; then
echo missing drive
exit 1
fi
cp -r aioble $drive/
cp mainDongle.py $drive/main.py
sync

12
ble/toRobot.sh Normal file
View File

@ -0,0 +1,12 @@
#!/usr/bin/bash
drive=$1
if [[($drive == "")]]; then
echo missing drive
exit 1
fi
cp -r aioble $drive/
cp RobotBleServer.py $drive
cp mainRobotTestBLE.py $drive/main.py
sync

BIN
capture_app_sii++.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

213
main.py
View File

@ -1 +1,212 @@
# main.py -- put your code here!
import machine
import utime, sys
from stm32_ssd1306 import SSD1306, SSD1306_I2C
from stm32_alphabot_v2 import AlphaBot_v2
import neopixel
import _thread
import os
import buzzer
import binascii
import uasyncio as asyncio
import RobotBleServer
from Interpreteur import StartCPU
motorSpeedFactor = 50
motorDCompensation = 1.04
alphabot = AlphaBot_v2()
oled = SSD1306_I2C(128, 64, alphabot.i2c)
oled.fill(0)
oled.show()
bitmapSII = bytes([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0xc0,
0xc0, 0xc0, 0xe0, 0xe0, 0xe0, 0xf0, 0xf0, 0xf0, 0xf8, 0xf8, 0xf8, 0xf8, 0x7c, 0xfc, 0xfc, 0xfc,
0xfc, 0xfc, 0xfc, 0x7c, 0x7e, 0x7e, 0x7e, 0x7c, 0x7c, 0x7c, 0x7c, 0xfc, 0xfc, 0xf8, 0xf8, 0xf0,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xc0, 0xe0, 0xe0,
0xe0, 0xc0, 0x80, 0x20, 0x10, 0x10, 0x18, 0x08, 0x0c, 0x0e, 0x06, 0x07, 0x7f, 0x7f, 0x3f, 0x3f,
0x1f, 0x0f, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x7f, 0x3f,
0x3f, 0x1f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x01, 0x01, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x80, 0xc0, 0xf0, 0xf8, 0xfc, 0xfe, 0xfe, 0xff, 0x7f, 0x3f, 0x0f, 0x07, 0x07,
0x03, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xf0, 0xf8, 0xf8, 0xfc,
0xfe, 0xfe, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xf8, 0xf8,
0xfc, 0xfe, 0xfe, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xf8, 0xfc, 0xfc, 0xfc, 0xc0, 0xc0, 0xc0,
0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x0f, 0x1f, 0x3f, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc,
0xf8, 0xe0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x03, 0x03, 0x03, 0x03, 0x3f, 0x3f, 0x3f, 0x1f,
0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x03, 0x03, 0x03, 0x1f, 0x3f, 0x3f, 0x3f, 0x03, 0x03, 0x03,
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x0f, 0x7f, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xe0, 0xfc, 0xff, 0xff, 0xff, 0xff,
0xff, 0x7f, 0x1f, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x0f, 0x07, 0x07,
0x03, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x20, 0x30, 0x3c, 0x3e, 0x3f, 0x3f, 0x1f, 0x0f, 0x07, 0x07, 0x03, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x03, 0x01,
0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
oled.write_data(bitmapSII)
class FoursNeoPixel():
def __init__(self, pin_number):
self._pin = pin_number
self._max_leds = 4
self._leds = neopixel.NeoPixel(self._pin, 4)
def set_led(self, addr, red, green, blue):
if addr >= 0 and addr < self._max_leds:
# coded on BGR
self._leds[addr] = (blue, green, red)
def set_led2(self, addr, rgb):
if addr >= 0 and addr < self._max_leds:
# coded on BGR
self._leds[addr] = rgb
def show(self):
self._leds.write()
def clear(self):
for i in range (0, self._max_leds):
self.set_led(i, 0,0,0)
self.show()
leds = FoursNeoPixel(alphabot.pin_RGB)
def motorCallback(motG, motD):
sMotG = 1 - ((motG >> 2) & 0b10)
sMotD = 1 - ((motD >> 2) & 0b10)
motG = ((motG & 0b0111) * sMotG) * motorSpeedFactor / 7
motD = (((motD & 0b0111) * sMotD) * motorSpeedFactor / 7) * motorDCompensation
# print("Mot G :", motG)
# print("Mot D :", motD)
alphabot.setMotors(left=motG, right=motD)
def registerCallback(register, value):
global leds
# print(f"Register R{register} changed to {value}")
if (register == 0):
leds.set_led(0, value, abs(127 - value), 255 - value)
elif (register == 1):
leds.set_led(1, value, abs(127 - value), 255 - value)
elif (register == 2):
leds.set_led(2, value, abs(127 - value), 255 - value)
else:
leds.set_led(3, value, abs(127 - value), 255 - value)
leds.show()
# to know COM port used when connected on PC:
# python -m serial.tools.list_ports
# in this example, robot will send back to PC the checksum of each message received
robotName = 'Nogard'
toSend = []
def onMsgToRobot(data:str|bytes):
global useLED, useMotor
if (not ("useLED" in globals())):
useLED = False
if (not ("useMotor" in globals())):
useMotor = True
"""Function to call when a message sent by PC is received
:param data: message received"""
checksum = binascii.crc32(data)
print('received', data, '=>', checksum)
print(data)
if (data[0] == "#"):
data = data[1:]
if (data == "LED ON"):
useLED = True
elif (data == "LED OFF"):
useLED = False
elif (data == "MOTOR ON"):
useMotor = True
elif (data == "MOTOR OFF"):
useMotor = False
else:
StartCPU(data, motorCallback if useMotor else None, registerCallback if useLED else None)
alphabot.stop()
async def robotMainTask(bleConnection):
"""Main function for robot activities
:param bleConnection: object to check BLE connection and send messages"""
while True:
await asyncio.sleep(0.1)
#print('connection', bleConnection.connection)
if not bleConnection.connection: continue
if toSend == []: continue
while not toSend == []:
data = toSend.pop(0)
bleConnection.sendMessage(data)
print('sent', data)
# Run tasks
async def main():
print('Start main')
bleConnection = RobotBleServer.RobotBleServer(robotName=robotName, onMsgReceived=onMsgToRobot)
asyncio.create_task(robotMainTask(bleConnection))
await bleConnection.communicationTask()
asyncio.run(main())

View File

@ -1,7 +1,27 @@
DB : vvvv vvvv
CALL : 0000 0000 aaaa aaaa
RET : 1000 0000
JMP : 0100 0000 aaaa aaaa
JLT : 1100 0000 aaaa aaaa
JEQ : 0010 0000 aaaa aaaa
PUSH : 1010 00xx
POP : 0110 00xx
MOV v : 1110 00xx vvvv vvvv
SUB v : 0001 00xx vvvv vvvv
CMP v : 1001 00xx vvvv vvvv
MOV r : 0101 xxyy
SUB r : 1101 xxyy
CMP r : 0011 xxyy
LDR : 1011 xxyy aaaa aaaa
STR : 0111 xxyy aaaa aaaa
OUT : 1111 00xx
TIM : 1111 1000 mvvv vvvv
a = 0
b = 1
c = 1
c = ?
print a
@ -124,3 +144,42 @@ OUT Rx ;génération binaire 111100xx avec xx = R0, R1,R2,R3
TIM valeur ;génération binaire 11111000 mvvvvvvv
;met dans le registre de Timer la valeur mvvvvvvv
;le processeur se met en pause pendant multiplicateur*(vvvvvvv+1)
; en millisecondes
;vvvvvvv valeur de 0x0 (représente 1) à 0x7F (represente 128)
;m=0 (multiplicateur = 1) ou m=1 (multiplicateur = 100)
Connexion
{"type": "connect", "name": "Nogard"}
Commandes annexes
{"type": "msg", "format": "str", "string": "#LED ON"}
{"type": "msg", "format": "str", "string": "#LED OFF"}
{"type": "msg", "format": "str", "string": "#MOTOR ON"}
{"type": "msg", "format": "str", "string": "#MOTOR OFF"}
Fibbo 8bits
{"type": "msg", "format": "base64", "string": "4ADhAfDx4gBc2d5RVzTAEkAF4EGA"}
Fibbo 16bits
{"type": "msg", "format": "base64", "string": "4AHV2t/y8/LzoKGh1dbRYTLAFUAXE/+g0NPUYDfAIUAjQCdjYkAHY2KA"}
LED
{"type": "msg", "format": "base64", "string": "0NXa3xD1+AGQ3MAEEfX4AZHcwAwS9fgBktzAFBP1+AGT3MAcgA=="}
Demi tour + fuite
{"type": "msg", "format": "base64", "string": "4Dvw+ID4MuAA8Phk4Mzw+KXgAPDgAPD4ZIA="}
Parcours
{"type": "msg", "format": "base64", "string": "4BHw+ILgIvD4guAz8Pil4DHw+ILgM/D4mOAT8PiC4DPw+IjgAPCA4ADw+GSA"}
{"type": "msg", "format": "base64", "string": "4BHw+ILgIvD4guAz8Pik4DHw+IPgM/D4k+AT8PiD4DPw+IrgAPD4oOCi8PiH4ADw+IzgKvD4h+AA8Pig4Pfw+ILgf/D4guD38Pi04ADwgOAA8PhkgA=="}

1
out.bin Normal file
View File

@ -0,0 +1 @@
àÕÚßòó ¡¡ÕÖÑa2À@ÿ ÐÓÔ`7À@!@%cb@cb€

View File

@ -1,92 +0,0 @@
; Windows USB CDC ACM Setup File
; Based on INF files which were:
; Copyright (c) 2000 Microsoft Corporation
; Copyright (C) 2007 Microchip Technology Inc.
; Likely to be covered by the MLPL as found at:
; <http://msdn.microsoft.com/en-us/cc300389.aspx#MLPL>.
[Version]
Signature="$Windows NT$"
Class=Ports
ClassGuid={4D36E978-E325-11CE-BFC1-08002BE10318}
Provider=%MFGNAME%
LayoutFile=layout.inf
DriverVer=03/11/2010,5.1.2600.3
[Manufacturer]
%MFGNAME%=DeviceList, NTamd64
[DestinationDirs]
DefaultDestDir=12
;---------------------------------------------------------------------
; Windows 2000/XP/Server2003/Vista/Server2008/7 - 32bit Sections
[DriverInstall.nt]
include=mdmcpq.inf
CopyFiles=DriverCopyFiles.nt
AddReg=DriverInstall.nt.AddReg
[DriverCopyFiles.nt]
usbser.sys,,,0x20
[DriverInstall.nt.AddReg]
HKR,,DevLoader,,*ntkern
HKR,,NTMPDriver,,usbser.sys
HKR,,EnumPropPages32,,"MsPorts.dll,SerialPortPropPageProvider"
[DriverInstall.nt.Services]
AddService=usbser, 0x00000002, DriverService.nt
[DriverService.nt]
DisplayName=%SERVICE%
ServiceType=1
StartType=3
ErrorControl=1
ServiceBinary=%12%\usbser.sys
;---------------------------------------------------------------------
; Windows XP/Server2003/Vista/Server2008/7 - 64bit Sections
[DriverInstall.NTamd64]
include=mdmcpq.inf
CopyFiles=DriverCopyFiles.NTamd64
AddReg=DriverInstall.NTamd64.AddReg
[DriverCopyFiles.NTamd64]
usbser.sys,,,0x20
[DriverInstall.NTamd64.AddReg]
HKR,,DevLoader,,*ntkern
HKR,,NTMPDriver,,usbser.sys
HKR,,EnumPropPages32,,"MsPorts.dll,SerialPortPropPageProvider"
[DriverInstall.NTamd64.Services]
AddService=usbser, 0x00000002, DriverService.NTamd64
[DriverService.NTamd64]
DisplayName=%SERVICE%
ServiceType=1
StartType=3
ErrorControl=1
ServiceBinary=%12%\usbser.sys
;---------------------------------------------------------------------
; Vendor and Product ID Definitions
[SourceDisksFiles]
[SourceDisksNames]
[DeviceList]
%DESCRIPTION%=DriverInstall, USB\VID_f055&PID_9800&MI_00, USB\VID_f055&PID_9800&MI_01, USB\VID_f055&PID_9801&MI_00, USB\VID_f055&PID_9801&MI_01, USB\VID_f055&PID_9802
[DeviceList.NTamd64]
%DESCRIPTION%=DriverInstall, USB\VID_f055&PID_9800&MI_00, USB\VID_f055&PID_9800&MI_01, USB\VID_f055&PID_9801&MI_00, USB\VID_f055&PID_9801&MI_01, USB\VID_f055&PID_9802
;---------------------------------------------------------------------
; String Definitions
[Strings]
MFGFILENAME="pybcdc"
MFGNAME="MicroPython"
DESCRIPTION="Pyboard USB Comm Port"
SERVICE="USB Serial Driver"

32
robot/aioble/__init__.py Normal file
View File

@ -0,0 +1,32 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from .device import Device, DeviceDisconnectedError
from .core import log_info, log_warn, log_error, GattError, config, stop
try:
from .peripheral import advertise
except:
log_info("Peripheral support disabled")
try:
from .central import scan
except:
log_info("Central support disabled")
try:
from .server import (
Service,
Characteristic,
BufferedCharacteristic,
Descriptor,
register_services,
)
except:
log_info("GATT server support disabled")
ADDR_PUBLIC = const(0)
ADDR_RANDOM = const(1)

297
robot/aioble/central.py Normal file
View File

@ -0,0 +1,297 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import bluetooth
import struct
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
)
from .device import Device, DeviceConnection, DeviceTimeout
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_ADV_IND = const(0)
_ADV_DIRECT_IND = const(1)
_ADV_SCAN_IND = const(2)
_ADV_NONCONN_IND = const(3)
_SCAN_RSP = const(4)
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_SHORT_NAME = const(0x08)
_ADV_TYPE_UUID16_INCOMPLETE = const(0x2)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_INCOMPLETE = const(0x4)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_INCOMPLETE = const(0x6)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_APPEARANCE = const(0x19)
_ADV_TYPE_MANUFACTURER = const(0xFF)
# Keep track of the active scanner so IRQs can be delivered to it.
_active_scanner = None
# Set of devices that are waiting for the peripheral connect IRQ.
_connecting = set()
def _central_irq(event, data):
# Send results and done events to the active scanner instance.
if event == _IRQ_SCAN_RESULT:
addr_type, addr, adv_type, rssi, adv_data = data
if not _active_scanner:
return
_active_scanner._queue.append((addr_type, bytes(addr), adv_type, rssi, bytes(adv_data)))
_active_scanner._event.set()
elif event == _IRQ_SCAN_DONE:
if not _active_scanner:
return
_active_scanner._done = True
_active_scanner._event.set()
# Peripheral connect must be in response to a pending connection, so find
# it in the pending connection set.
elif event == _IRQ_PERIPHERAL_CONNECT:
conn_handle, addr_type, addr = data
for d in _connecting:
if d.addr_type == addr_type and d.addr == addr:
# Allow connect() to complete.
connection = d._connection
connection._conn_handle = conn_handle
connection._event.set()
break
# Find the active device connection for this connection handle.
elif event == _IRQ_PERIPHERAL_DISCONNECT:
conn_handle, _, _ = data
if connection := DeviceConnection._connected.get(conn_handle, None):
# Tell the device_task that it should terminate.
connection._event.set()
def _central_shutdown():
global _active_scanner, _connecting
_active_scanner = None
_connecting = set()
register_irq_handler(_central_irq, _central_shutdown)
# Cancel an in-progress scan.
async def _cancel_pending():
if _active_scanner:
await _active_scanner.cancel()
# Start connecting to a peripheral.
# Call device.connect() rather than using method directly.
async def _connect(connection, timeout_ms):
device = connection.device
if device in _connecting:
return
# Enable BLE and cancel in-progress scans.
ensure_active()
await _cancel_pending()
# Allow the connected IRQ to find the device by address.
_connecting.add(device)
# Event will be set in the connected IRQ, and then later
# re-used to notify disconnection.
connection._event = connection._event or asyncio.ThreadSafeFlag()
try:
with DeviceTimeout(None, timeout_ms):
ble.gap_connect(device.addr_type, device.addr)
# Wait for the connected IRQ.
await connection._event.wait()
assert connection._conn_handle is not None
# Register connection handle -> device.
DeviceConnection._connected[connection._conn_handle] = connection
finally:
# After timeout, don't hold a reference and ignore future events.
_connecting.remove(device)
# Represents a single device that has been found during a scan. The scan
# iterator will return the same ScanResult instance multiple times as its data
# changes (i.e. changing RSSI or advertising data).
class ScanResult:
def __init__(self, device):
self.device = device
self.adv_data = None
self.resp_data = None
self.rssi = None
self.connectable = False
# New scan result available, return true if it changes our state.
def _update(self, adv_type, rssi, adv_data):
updated = False
if rssi != self.rssi:
self.rssi = rssi
updated = True
if adv_type in (_ADV_IND, _ADV_NONCONN_IND):
if adv_data != self.adv_data:
self.adv_data = adv_data
self.connectable = adv_type == _ADV_IND
updated = True
elif adv_type == _ADV_SCAN_IND:
if adv_data != self.adv_data and self.resp_data:
updated = True
self.adv_data = adv_data
elif adv_type == _SCAN_RSP and adv_data:
if adv_data != self.resp_data:
self.resp_data = adv_data
updated = True
return updated
def __str__(self):
return "Scan result: {} {}".format(self.device, self.rssi)
# Gets all the fields for the specified types.
def _decode_field(self, *adv_type):
# Advertising payloads are repeated packets of the following form:
# 1 byte data length (N + 1)
# 1 byte type (see constants below)
# N bytes type-specific data
for payload in (self.adv_data, self.resp_data):
if not payload:
continue
i = 0
while i + 1 < len(payload):
if payload[i + 1] in adv_type:
yield payload[i + 2 : i + payload[i] + 1]
i += 1 + payload[i]
# Returns the value of the complete (or shortened) advertised name, if available.
def name(self):
for n in self._decode_field(_ADV_TYPE_NAME, _ADV_TYPE_SHORT_NAME):
return str(n, "utf-8") if n else ""
# Generator that enumerates the service UUIDs that are advertised.
def services(self):
for u in self._decode_field(_ADV_TYPE_UUID16_INCOMPLETE, _ADV_TYPE_UUID16_COMPLETE):
yield bluetooth.UUID(struct.unpack("<H", u)[0])
for u in self._decode_field(_ADV_TYPE_UUID32_INCOMPLETE, _ADV_TYPE_UUID32_COMPLETE):
yield bluetooth.UUID(struct.unpack("<I", u)[0])
for u in self._decode_field(_ADV_TYPE_UUID128_INCOMPLETE, _ADV_TYPE_UUID128_COMPLETE):
yield bluetooth.UUID(u)
# Generator that returns (manufacturer_id, data) tuples.
def manufacturer(self, filter=None):
for u in self._decode_field(_ADV_TYPE_MANUFACTURER):
if len(u) < 2:
continue
m = struct.unpack("<H", u[0:2])[0]
if filter is None or m == filter:
yield (m, u[2:])
# Use with:
# async with aioble.scan(...) as scanner:
# async for result in scanner:
# ...
class scan:
def __init__(self, duration_ms, interval_us=None, window_us=None, active=False):
self._queue = []
self._event = asyncio.ThreadSafeFlag()
self._done = False
# Keep track of what we've already seen.
self._results = set()
# Ideally we'd start the scan here and avoid having to save these
# values, but we need to stop any previous scan first via awaiting
# _cancel_pending(), but __init__ isn't async.
self._duration_ms = duration_ms
self._interval_us = interval_us or 1280000
self._window_us = window_us or 11250
self._active = active
async def __aenter__(self):
global _active_scanner
ensure_active()
await _cancel_pending()
_active_scanner = self
ble.gap_scan(self._duration_ms, self._interval_us, self._window_us, self._active)
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
# Cancel the current scan if we're still the active scanner. This will
# happen if the loop breaks early before the scan duration completes.
if _active_scanner == self:
await self.cancel()
def __aiter__(self):
assert _active_scanner == self
return self
async def __anext__(self):
global _active_scanner
if _active_scanner != self:
# The scan has been canceled (e.g. a connection was initiated).
raise StopAsyncIteration
while True:
while self._queue:
addr_type, addr, adv_type, rssi, adv_data = self._queue.pop()
# Try to find an existing ScanResult for this device.
for r in self._results:
if r.device.addr_type == addr_type and r.device.addr == addr:
result = r
break
else:
# New device, create a new Device & ScanResult.
device = Device(addr_type, addr)
result = ScanResult(device)
self._results.add(result)
# Add the new information from this event.
if result._update(adv_type, rssi, adv_data):
# It's new information, so re-yield this result.
return result
if self._done:
# _IRQ_SCAN_DONE event was fired.
_active_scanner = None
raise StopAsyncIteration
# Wait for either done or result IRQ.
await self._event.wait()
# Cancel any in-progress scan. We need to do this before starting any other operation.
async def cancel(self):
if self._done:
return
ble.gap_scan(None)
while not self._done:
await self._event.wait()
global _active_scanner
_active_scanner = None

456
robot/aioble/client.py Normal file
View File

@ -0,0 +1,456 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from collections import deque
import uasyncio as asyncio
import struct
import bluetooth
from .core import ble, GattError, register_irq_handler
from .device import DeviceConnection
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)
_IRQ_GATTC_DESCRIPTOR_DONE = const(14)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
_IRQ_GATTC_WRITE_DONE = const(17)
_IRQ_GATTC_NOTIFY = const(18)
_IRQ_GATTC_INDICATE = const(19)
_CCCD_UUID = const(0x2902)
_CCCD_NOTIFY = const(1)
_CCCD_INDICATE = const(2)
_FLAG_READ = const(0x0002)
_FLAG_WRITE_NO_RESPONSE = const(0x0004)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
_FLAG_INDICATE = const(0x0020)
# Forward IRQs directly to static methods on the type that handles them and
# knows how to map handles to instances. Note: We copy all uuid and data
# params here for safety, but a future optimisation might be able to avoid
# these copies in a few places.
def _client_irq(event, data):
if event == _IRQ_GATTC_SERVICE_RESULT:
conn_handle, start_handle, end_handle, uuid = data
ClientDiscover._discover_result(
conn_handle, start_handle, end_handle, bluetooth.UUID(uuid)
)
elif event == _IRQ_GATTC_SERVICE_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
conn_handle, end_handle, value_handle, properties, uuid = data
ClientDiscover._discover_result(
conn_handle, end_handle, value_handle, properties, bluetooth.UUID(uuid)
)
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_DESCRIPTOR_RESULT:
conn_handle, dsc_handle, uuid = data
ClientDiscover._discover_result(conn_handle, dsc_handle, bluetooth.UUID(uuid))
elif event == _IRQ_GATTC_DESCRIPTOR_DONE:
conn_handle, status = data
ClientDiscover._discover_done(conn_handle, status)
elif event == _IRQ_GATTC_READ_RESULT:
conn_handle, value_handle, char_data = data
ClientCharacteristic._read_result(conn_handle, value_handle, bytes(char_data))
elif event == _IRQ_GATTC_READ_DONE:
conn_handle, value_handle, status = data
ClientCharacteristic._read_done(conn_handle, value_handle, status)
elif event == _IRQ_GATTC_WRITE_DONE:
conn_handle, value_handle, status = data
ClientCharacteristic._write_done(conn_handle, value_handle, status)
elif event == _IRQ_GATTC_NOTIFY:
conn_handle, value_handle, notify_data = data
ClientCharacteristic._on_notify(conn_handle, value_handle, bytes(notify_data))
elif event == _IRQ_GATTC_INDICATE:
conn_handle, value_handle, indicate_data = data
ClientCharacteristic._on_indicate(conn_handle, value_handle, bytes(indicate_data))
register_irq_handler(_client_irq, None)
# Async generator for discovering services, characteristics, descriptors.
class ClientDiscover:
def __init__(self, connection, disc_type, parent, timeout_ms, *args):
self._connection = connection
# Each result IRQ will append to this.
self._queue = []
# This will be set by the done IRQ.
self._status = None
# Tell the generator to process new events.
self._event = asyncio.ThreadSafeFlag()
# Must implement the _start_discovery static method. Instances of this
# type are returned by __anext__.
self._disc_type = disc_type
# This will be the connection for a service discovery, and the service for a characteristic discovery.
self._parent = parent
# Timeout for the discovery process.
# TODO: Not implemented.
self._timeout_ms = timeout_ms
# Additional arguments to pass to the _start_discovery method on disc_type.
self._args = args
async def _start(self):
if self._connection._discover:
# TODO: cancel existing? (e.g. perhaps they didn't let the loop run to completion)
raise ValueError("Discovery in progress")
# Tell the connection that we're the active discovery operation (the IRQ only gives us conn_handle).
self._connection._discover = self
# Call the appropriate ubluetooth.BLE method.
self._disc_type._start_discovery(self._parent, *self._args)
def __aiter__(self):
return self
async def __anext__(self):
if self._connection._discover != self:
# Start the discovery if necessary.
await self._start()
# Keep returning items from the queue until the status is set by the
# done IRQ.
while True:
while self._queue:
return self._disc_type(self._parent, *self._queue.pop())
if self._status is not None:
self._connection._discover = None
raise StopAsyncIteration
# Wait for more results to be added to the queue.
await self._event.wait()
# Tell the active discovery instance for this connection to add a new result
# to the queue.
def _discover_result(conn_handle, *args):
if connection := DeviceConnection._connected.get(conn_handle, None):
if discover := connection._discover:
discover._queue.append(args)
discover._event.set()
# Tell the active discovery instance for this connection that it is complete.
def _discover_done(conn_handle, status):
if connection := DeviceConnection._connected.get(conn_handle, None):
if discover := connection._discover:
discover._status = status
discover._event.set()
# Represents a single service supported by a connection. Do not construct this
# class directly, instead use `async for service in connection.services([uuid])` or
# `await connection.service(uuid)`.
class ClientService:
def __init__(self, connection, start_handle, end_handle, uuid):
self.connection = connection
# Used for characteristic discovery.
self._start_handle = start_handle
self._end_handle = end_handle
# Allows comparison to a known uuid.
self.uuid = uuid
def __str__(self):
return "Service: {} {} {}".format(self._start_handle, self._end_handle, self.uuid)
# Search for a specific characteristic by uuid.
async def characteristic(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for characteristic in self.characteristics(uuid, timeout_ms):
if not result and characteristic.uuid == uuid:
# Keep first result.
result = characteristic
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for characteristic in service.characteristics():
# Note: must allow the loop to run to completion.
def characteristics(self, uuid=None, timeout_ms=2000):
return ClientDiscover(self.connection, ClientCharacteristic, self, timeout_ms, uuid)
# For ClientDiscover
def _start_discovery(connection, uuid=None):
ble.gattc_discover_services(connection._conn_handle, uuid)
class BaseClientCharacteristic:
def __init__(self, value_handle, properties, uuid):
# Used for read/write/notify ops.
self._value_handle = value_handle
# Which operations are supported.
self.properties = properties
# Allows comparison to a known uuid.
self.uuid = uuid
if properties & _FLAG_READ:
# Fired for each read result and read done IRQ.
self._read_event = None
self._read_data = None
# Used to indicate that the read is complete.
self._read_status = None
if (properties & _FLAG_WRITE) or (properties & _FLAG_WRITE_NO_RESPONSE):
# Fired for the write done IRQ.
self._write_event = None
# Used to indicate that the write is complete.
self._write_status = None
# Register this value handle so events can find us.
def _register_with_connection(self):
self._connection()._characteristics[self._value_handle] = self
# Map an incoming IRQ to an registered characteristic.
def _find(conn_handle, value_handle):
if connection := DeviceConnection._connected.get(conn_handle, None):
if characteristic := connection._characteristics.get(value_handle, None):
return characteristic
else:
# IRQ for a characteristic that we weren't expecting. e.g.
# notification when we're not waiting on notified().
# TODO: This will happen on btstack, which doesn't give us
# value handle for the done event.
return None
def _check(self, flag):
if not (self.properties & flag):
raise ValueError("Unsupported")
# Issue a read to the characteristic.
async def read(self, timeout_ms=1000):
self._check(_FLAG_READ)
# Make sure this conn_handle/value_handle is known.
self._register_with_connection()
# This will be set by the done IRQ.
self._read_status = None
# This will be set by the result and done IRQs. Re-use if possible.
self._read_event = self._read_event or asyncio.ThreadSafeFlag()
# Issue the read.
ble.gattc_read(self._connection()._conn_handle, self._value_handle)
with self._connection().timeout(timeout_ms):
# The event will be set for each read result, then a final time for done.
while self._read_status is None:
await self._read_event.wait()
if self._read_status != 0:
raise GattError(self._read_status)
return self._read_data
# Map an incoming result IRQ to a registered characteristic.
def _read_result(conn_handle, value_handle, data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._read_data = data
characteristic._read_event.set()
# Map an incoming read done IRQ to a registered characteristic.
def _read_done(conn_handle, value_handle, status):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._read_status = status
characteristic._read_event.set()
async def write(self, data, response=None, timeout_ms=1000):
self._check(_FLAG_WRITE | _FLAG_WRITE_NO_RESPONSE)
# If the response arg is unset, then default it to true if we only support write-with-response.
if response is None:
p = self.properties
response = (p & _FLAG_WRITE) and not (p & _FLAG_WRITE_NO_RESPONSE)
if response:
# Same as read.
self._register_with_connection()
self._write_status = None
self._write_event = self._write_event or asyncio.ThreadSafeFlag()
# Issue the write.
ble.gattc_write(self._connection()._conn_handle, self._value_handle, data, response)
if response:
with self._connection().timeout(timeout_ms):
# The event will be set for the write done IRQ.
await self._write_event.wait()
if self._write_status != 0:
raise GattError(self._write_status)
# Map an incoming write done IRQ to a registered characteristic.
def _write_done(conn_handle, value_handle, status):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._write_status = status
characteristic._write_event.set()
# Represents a single characteristic supported by a service. Do not construct
# this class directly, instead use `async for characteristic in
# service.characteristics([uuid])` or `await service.characteristic(uuid)`.
class ClientCharacteristic(BaseClientCharacteristic):
def __init__(self, service, end_handle, value_handle, properties, uuid):
self.service = service
self.connection = service.connection
# Used for descriptor discovery. If available, otherwise assume just
# past the value handle (enough for two descriptors without risking
# going into the next characteristic).
self._end_handle = end_handle if end_handle > value_handle else value_handle + 2
super().__init__(value_handle, properties, uuid)
if properties & _FLAG_NOTIFY:
# Fired when a notification arrives.
self._notify_event = asyncio.ThreadSafeFlag()
# Data for the most recent notification.
self._notify_queue = deque((), 1)
if properties & _FLAG_INDICATE:
# Same for indications.
self._indicate_event = asyncio.ThreadSafeFlag()
self._indicate_queue = deque((), 1)
def __str__(self):
return "Characteristic: {} {} {} {}".format(
self._end_handle, self._value_handle, self.properties, self.uuid
)
def _connection(self):
return self.service.connection
# Search for a specific descriptor by uuid.
async def descriptor(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for descriptor in self.descriptors(timeout_ms):
if not result and descriptor.uuid == uuid:
# Keep first result.
result = descriptor
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for descriptor in characteristic.descriptors():
# Note: must allow the loop to run to completion.
def descriptors(self, timeout_ms=2000):
return ClientDiscover(self.connection, ClientDescriptor, self, timeout_ms)
# For ClientDiscover
def _start_discovery(service, uuid=None):
ble.gattc_discover_characteristics(
service.connection._conn_handle,
service._start_handle,
service._end_handle,
uuid,
)
# Helper for notified() and indicated().
async def _notified_indicated(self, queue, event, timeout_ms):
# Ensure that events for this connection can route to this characteristic.
self._register_with_connection()
# If the queue is empty, then we need to wait. However, if the queue
# has a single item, we also need to do a no-op wait in order to
# clear the event flag (because the queue will become empty and
# therefore the event should be cleared).
if len(queue) <= 1:
with self._connection().timeout(timeout_ms):
await event.wait()
# Either we started > 1 item, or the wait completed successfully, return
# the front of the queue.
return queue.popleft()
# Wait for the next notification.
# Will return immediately if a notification has already been received.
async def notified(self, timeout_ms=None):
self._check(_FLAG_NOTIFY)
return await self._notified_indicated(self._notify_queue, self._notify_event, timeout_ms)
def _on_notify_indicate(self, queue, event, data):
# If we've gone from empty to one item, then wake something
# blocking on `await char.notified()` (or `await char.indicated()`).
wake = len(queue) == 0
# Append the data. By default this is a deque with max-length==1, so it
# replaces. But if capture is enabled then it will append.
queue.append(data)
if wake:
# Queue is now non-empty. If something is waiting, it will be
# worken. If something isn't waiting right now, then a future
# caller to `await char.written()` will see the queue is
# non-empty, and wait on the event if it's going to empty the
# queue.
event.set()
# Map an incoming notify IRQ to a registered characteristic.
def _on_notify(conn_handle, value_handle, notify_data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._on_notify_indicate(
characteristic._notify_queue, characteristic._notify_event, notify_data
)
# Wait for the next indication.
# Will return immediately if an indication has already been received.
async def indicated(self, timeout_ms=None):
self._check(_FLAG_INDICATE)
return await self._notified_indicated(
self._indicate_queue, self._indicate_event, timeout_ms
)
# Map an incoming indicate IRQ to a registered characteristic.
def _on_indicate(conn_handle, value_handle, indicate_data):
if characteristic := ClientCharacteristic._find(conn_handle, value_handle):
characteristic._on_notify_indicate(
characteristic._indicate_queue, characteristic._indicate_event, indicate_data
)
# Write to the Client Characteristic Configuration to subscribe to
# notify/indications for this characteristic.
async def subscribe(self, notify=True, indicate=False):
# Ensure that the generated notifications are dispatched in case the app
# hasn't awaited on notified/indicated yet.
self._register_with_connection()
if cccd := await self.descriptor(bluetooth.UUID(_CCCD_UUID)):
await cccd.write(struct.pack("<H", _CCCD_NOTIFY * notify + _CCCD_INDICATE * indicate))
else:
raise ValueError("CCCD not found")
# Represents a single descriptor supported by a characteristic. Do not construct
# this class directly, instead use `async for descriptors in
# characteristic.descriptors([uuid])` or `await characteristic.descriptor(uuid)`.
class ClientDescriptor(BaseClientCharacteristic):
def __init__(self, characteristic, dsc_handle, uuid):
self.characteristic = characteristic
super().__init__(dsc_handle, _FLAG_READ | _FLAG_WRITE_NO_RESPONSE, uuid)
def __str__(self):
return "Descriptor: {} {} {}".format(self._value_handle, self.properties, self.uuid)
def _connection(self):
return self.characteristic.service.connection
# For ClientDiscover
def _start_discovery(characteristic, uuid=None):
ble.gattc_discover_descriptors(
characteristic._connection()._conn_handle,
characteristic._value_handle,
characteristic._end_handle,
)

78
robot/aioble/core.py Normal file
View File

@ -0,0 +1,78 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
import bluetooth
log_level = 1
def log_error(*args):
if log_level > 0:
print("[aioble] E:", *args)
def log_warn(*args):
if log_level > 1:
print("[aioble] W:", *args)
def log_info(*args):
if log_level > 2:
print("[aioble] I:", *args)
class GattError(Exception):
def __init__(self, status):
self._status = status
def ensure_active():
if not ble.active():
try:
from .security import load_secrets
load_secrets()
except:
pass
ble.active(True)
def config(*args, **kwargs):
ensure_active()
return ble.config(*args, **kwargs)
# Because different functionality is enabled by which files are available the
# different modules can register their IRQ handlers and shutdown handlers
# dynamically.
_irq_handlers = []
_shutdown_handlers = []
def register_irq_handler(irq, shutdown):
if irq:
_irq_handlers.append(irq)
if shutdown:
_shutdown_handlers.append(shutdown)
def stop():
ble.active(False)
for handler in _shutdown_handlers:
handler()
# Dispatch IRQs to the registered sub-modules.
def ble_irq(event, data):
log_info(event, data)
for handler in _irq_handlers:
result = handler(event, data)
if result is not None:
return result
# TODO: Allow this to be injected.
ble = bluetooth.BLE()
ble.irq(ble_irq)

295
robot/aioble/device.py Normal file
View File

@ -0,0 +1,295 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import uasyncio as asyncio
import binascii
from .core import ble, register_irq_handler, log_error
_IRQ_MTU_EXCHANGED = const(21)
# Raised by `with device.timeout()`.
class DeviceDisconnectedError(Exception):
pass
def _device_irq(event, data):
if event == _IRQ_MTU_EXCHANGED:
conn_handle, mtu = data
if device := DeviceConnection._connected.get(conn_handle, None):
device.mtu = mtu
if device._mtu_event:
device._mtu_event.set()
register_irq_handler(_device_irq, None)
# Context manager to allow an operation to be cancelled by timeout or device
# disconnection. Don't use this directly -- use `with connection.timeout(ms):`
# instead.
class DeviceTimeout:
def __init__(self, connection, timeout_ms):
self._connection = connection
self._timeout_ms = timeout_ms
# We allow either (or both) connection and timeout_ms to be None. This
# allows this to be used either as a just-disconnect, just-timeout, or
# no-op.
# This task is active while the operation is in progress. It sleeps
# until the timeout, and then cancels the working task. If the working
# task completes, __exit__ will cancel the sleep.
self._timeout_task = None
# This is the task waiting for the actual operation to complete.
# Usually this is waiting on an event that will be set() by an IRQ
# handler.
self._task = asyncio.current_task()
# Tell the connection that if it disconnects, it should cancel this
# operation (by cancelling self._task).
if connection:
connection._timeouts.append(self)
async def _timeout_sleep(self):
try:
await asyncio.sleep_ms(self._timeout_ms)
except asyncio.CancelledError:
# The operation completed successfully and this timeout task was
# cancelled by __exit__.
return
# The sleep completed, so we should trigger the timeout. Set
# self._timeout_task to None so that we can tell the difference
# between a disconnect and a timeout in __exit__.
self._timeout_task = None
self._task.cancel()
def __enter__(self):
if self._timeout_ms:
# Schedule the timeout waiter.
self._timeout_task = asyncio.create_task(self._timeout_sleep())
def __exit__(self, exc_type, exc_val, exc_traceback):
# One of five things happened:
# 1 - The operation completed successfully.
# 2 - The operation timed out.
# 3 - The device disconnected.
# 4 - The operation failed for a different exception.
# 5 - The task was cancelled by something else.
# Don't need the connection to tell us about disconnection anymore.
if self._connection:
self._connection._timeouts.remove(self)
try:
if exc_type == asyncio.CancelledError:
# Case 2, we started a timeout and it's completed.
if self._timeout_ms and self._timeout_task is None:
raise asyncio.TimeoutError
# Case 3, we have a disconnected device.
if self._connection and self._connection._conn_handle is None:
raise DeviceDisconnectedError
# Case 5, something else cancelled us.
# Allow the cancellation to propagate.
return
# Case 1 & 4. Either way, just stop the timeout task and let the
# exception (if case 4) propagate.
finally:
# In all cases, if the timeout is still running, cancel it.
if self._timeout_task:
self._timeout_task.cancel()
class Device:
def __init__(self, addr_type, addr):
# Public properties
self.addr_type = addr_type
self.addr = addr if len(addr) == 6 else binascii.unhexlify(addr.replace(":", ""))
self._connection = None
def __eq__(self, rhs):
return self.addr_type == rhs.addr_type and self.addr == rhs.addr
def __hash__(self):
return hash((self.addr_type, self.addr))
def __str__(self):
return "Device({}, {}{})".format(
"ADDR_PUBLIC" if self.addr_type == 0 else "ADDR_RANDOM",
self.addr_hex(),
", CONNECTED" if self._connection else "",
)
def addr_hex(self):
return binascii.hexlify(self.addr, ":").decode()
async def connect(self, timeout_ms=10000):
if self._connection:
return self._connection
# Forward to implementation in central.py.
from .central import _connect
await _connect(DeviceConnection(self), timeout_ms)
# Start the device task that will clean up after disconnection.
self._connection._run_task()
return self._connection
class DeviceConnection:
# Global map of connection handle to active devices (for IRQ mapping).
_connected = {}
def __init__(self, device):
self.device = device
device._connection = self
self.encrypted = False
self.authenticated = False
self.bonded = False
self.key_size = False
self.mtu = None
self._conn_handle = None
# This event is fired by the IRQ both for connection and disconnection
# and controls the device_task.
self._event = None
# If we're waiting for a pending MTU exchange.
self._mtu_event = None
# In-progress client discovery instance (e.g. services, chars,
# descriptors) used for IRQ mapping.
self._discover = None
# Map of value handle to characteristic (so that IRQs with
# conn_handle,value_handle can route to them). See
# ClientCharacteristic._find for where this is used.
self._characteristics = {}
self._task = None
# DeviceTimeout instances that are currently waiting on this device
# and need to be notified if disconnection occurs.
self._timeouts = []
# Fired by the encryption update event.
self._pair_event = None
# Active L2CAP channel for this device.
# TODO: Support more than one concurrent channel.
self._l2cap_channel = None
# While connected, this tasks waits for disconnection then cleans up.
async def device_task(self):
assert self._conn_handle is not None
# Wait for the (either central or peripheral) disconnected irq.
await self._event.wait()
# Mark the device as disconnected.
del DeviceConnection._connected[self._conn_handle]
self._conn_handle = None
self.device._connection = None
# Cancel any in-progress operations on this device.
for t in self._timeouts:
t._task.cancel()
def _run_task(self):
# Event will be already created this if we initiated connection.
self._event = self._event or asyncio.ThreadSafeFlag()
self._task = asyncio.create_task(self.device_task())
async def disconnect(self, timeout_ms=2000):
await self.disconnected(timeout_ms, disconnect=True)
async def disconnected(self, timeout_ms=60000, disconnect=False):
if not self.is_connected():
return
# The task must have been created after successful connection.
assert self._task
if disconnect:
try:
ble.gap_disconnect(self._conn_handle)
except OSError as e:
log_error("Disconnect", e)
with DeviceTimeout(None, timeout_ms):
await self._task
# Retrieve a single service matching this uuid.
async def service(self, uuid, timeout_ms=2000):
result = None
# Make sure loop runs to completion.
async for service in self.services(uuid, timeout_ms):
if not result and service.uuid == uuid:
result = service
return result
# Search for all services (optionally by uuid).
# Use with `async for`, e.g.
# async for service in device.services():
# Note: must allow the loop to run to completion.
# TODO: disconnection / timeout
def services(self, uuid=None, timeout_ms=2000):
from .client import ClientDiscover, ClientService
return ClientDiscover(self, ClientService, self, timeout_ms, uuid)
async def pair(self, *args, **kwargs):
from .security import pair
await pair(self, *args, **kwargs)
def is_connected(self):
return self._conn_handle is not None
# Use with `with` to simplify disconnection and timeout handling.
def timeout(self, timeout_ms):
return DeviceTimeout(self, timeout_ms)
async def exchange_mtu(self, mtu=None, timeout_ms=1000):
if not self.is_connected():
raise ValueError("Not connected")
if mtu:
ble.config(mtu=mtu)
self._mtu_event = self._mtu_event or asyncio.ThreadSafeFlag()
ble.gattc_exchange_mtu(self._conn_handle)
with self.timeout(timeout_ms):
await self._mtu_event.wait()
return self.mtu
# Wait for a connection on an L2CAP connection-oriented-channel.
async def l2cap_accept(self, psm, mtu, timeout_ms=None):
from .l2cap import accept
return await accept(self, psm, mtu, timeout_ms)
# Attempt to connect to a listening device.
async def l2cap_connect(self, psm, mtu, timeout_ms=1000):
from .l2cap import connect
return await connect(self, psm, mtu, timeout_ms)
# Context manager -- automatically disconnect.
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
await self.disconnect()

214
robot/aioble/l2cap.py Normal file
View File

@ -0,0 +1,214 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import uasyncio as asyncio
from .core import ble, log_error, register_irq_handler
from .device import DeviceConnection
_IRQ_L2CAP_ACCEPT = const(22)
_IRQ_L2CAP_CONNECT = const(23)
_IRQ_L2CAP_DISCONNECT = const(24)
_IRQ_L2CAP_RECV = const(25)
_IRQ_L2CAP_SEND_READY = const(26)
# Once we start listening we're listening forever. (Limitation in NimBLE)
_listening = False
def _l2cap_irq(event, data):
if event not in (
_IRQ_L2CAP_CONNECT,
_IRQ_L2CAP_DISCONNECT,
_IRQ_L2CAP_RECV,
_IRQ_L2CAP_SEND_READY,
):
return
# All the L2CAP events start with (conn_handle, cid, ...)
if connection := DeviceConnection._connected.get(data[0], None):
if channel := connection._l2cap_channel:
# Expect to match the cid for this conn handle (unless we're
# waiting for connection in which case channel._cid is None).
if channel._cid is not None and channel._cid != data[1]:
return
# Update the channel object with new information.
if event == _IRQ_L2CAP_CONNECT:
_, channel._cid, _, channel.our_mtu, channel.peer_mtu = data
elif event == _IRQ_L2CAP_DISCONNECT:
_, _, psm, status = data
channel._status = status
channel._cid = None
connection._l2cap_channel = None
elif event == _IRQ_L2CAP_RECV:
channel._data_ready = True
elif event == _IRQ_L2CAP_SEND_READY:
channel._stalled = False
# Notify channel.
channel._event.set()
def _l2cap_shutdown():
global _listening
_listening = False
register_irq_handler(_l2cap_irq, _l2cap_shutdown)
# The channel was disconnected during a send/recvinto/flush.
class L2CAPDisconnectedError(Exception):
pass
# Failed to connect to connection (argument is status).
class L2CAPConnectionError(Exception):
pass
class L2CAPChannel:
def __init__(self, connection):
if not connection.is_connected():
raise ValueError("Not connected")
if connection._l2cap_channel:
raise ValueError("Already has channel")
connection._l2cap_channel = self
self._connection = connection
# Maximum size that the other side can send to us.
self.our_mtu = 0
# Maximum size that we can send.
self.peer_mtu = 0
# Set back to None on disconnection.
self._cid = None
# Set during disconnection.
self._status = 0
# If true, must wait for _IRQ_L2CAP_SEND_READY IRQ before sending.
self._stalled = False
# Has received a _IRQ_L2CAP_RECV since the buffer was last emptied.
self._data_ready = False
self._event = asyncio.ThreadSafeFlag()
def _assert_connected(self):
if self._cid is None:
raise L2CAPDisconnectedError
async def recvinto(self, buf, timeout_ms=None):
self._assert_connected()
# Wait until the data_ready flag is set. This flag is only ever set by
# the event and cleared by this function.
with self._connection.timeout(timeout_ms):
while not self._data_ready:
await self._event.wait()
self._assert_connected()
self._assert_connected()
# Extract up to len(buf) bytes from the channel buffer.
n = ble.l2cap_recvinto(self._connection._conn_handle, self._cid, buf)
# Check if there's still remaining data in the channel buffers.
self._data_ready = ble.l2cap_recvinto(self._connection._conn_handle, self._cid, None) > 0
return n
# Synchronously see if there's data ready.
def available(self):
self._assert_connected()
return self._data_ready
# Waits until the channel is free and then sends buf.
# If the buffer is larger than the MTU it will be sent in chunks.
async def send(self, buf, timeout_ms=None, chunk_size=None):
self._assert_connected()
offset = 0
chunk_size = min(self.our_mtu * 2, self.peer_mtu, chunk_size or self.peer_mtu)
mv = memoryview(buf)
while offset < len(buf):
if self._stalled:
await self.flush(timeout_ms)
# l2cap_send returns True if you can send immediately.
self._stalled = not ble.l2cap_send(
self._connection._conn_handle,
self._cid,
mv[offset : offset + chunk_size],
)
offset += chunk_size
async def flush(self, timeout_ms=None):
self._assert_connected()
# Wait for the _stalled flag to be cleared by the IRQ.
with self._connection.timeout(timeout_ms):
while self._stalled:
await self._event.wait()
self._assert_connected()
async def disconnect(self, timeout_ms=1000):
if self._cid is None:
return
# Wait for the cid to be cleared by the disconnect IRQ.
ble.l2cap_disconnect(self._connection._conn_handle, self._cid)
await self.disconnected(timeout_ms)
async def disconnected(self, timeout_ms=1000):
with self._connection.timeout(timeout_ms):
while self._cid is not None:
await self._event.wait()
# Context manager -- automatically disconnect.
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_traceback):
await self.disconnect()
# Use connection.l2cap_accept() instead of calling this directly.
async def accept(connection, psm, mtu, timeout_ms):
global _listening
channel = L2CAPChannel(connection)
# Start the stack listening if necessary.
if not _listening:
ble.l2cap_listen(psm, mtu)
_listening = True
# Wait for the connect irq from the remote connection.
with connection.timeout(timeout_ms):
await channel._event.wait()
return channel
# Use connection.l2cap_connect() instead of calling this directly.
async def connect(connection, psm, mtu, timeout_ms):
if _listening:
raise ValueError("Can't connect while listening")
channel = L2CAPChannel(connection)
with connection.timeout(timeout_ms):
ble.l2cap_connect(connection._conn_handle, psm, mtu)
# Wait for the connect irq from the remote connection.
# If the connection fails, we get a disconnect event (with status) instead.
await channel._event.wait()
if channel._cid is not None:
return channel
else:
raise L2CAPConnectionError(channel._status)

179
robot/aioble/peripheral.py Normal file
View File

@ -0,0 +1,179 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
import bluetooth
import struct
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
)
from .device import Device, DeviceConnection, DeviceTimeout
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_UUID16_MORE = const(0x2)
_ADV_TYPE_UUID32_MORE = const(0x4)
_ADV_TYPE_UUID128_MORE = const(0x6)
_ADV_TYPE_APPEARANCE = const(0x19)
_ADV_TYPE_MANUFACTURER = const(0xFF)
_ADV_PAYLOAD_MAX_LEN = const(31)
_incoming_connection = None
_connect_event = None
def _peripheral_irq(event, data):
global _incoming_connection
if event == _IRQ_CENTRAL_CONNECT:
conn_handle, addr_type, addr = data
# Create, initialise, and register the device.
device = Device(addr_type, bytes(addr))
_incoming_connection = DeviceConnection(device)
_incoming_connection._conn_handle = conn_handle
DeviceConnection._connected[conn_handle] = _incoming_connection
# Signal advertise() to return the connected device.
_connect_event.set()
elif event == _IRQ_CENTRAL_DISCONNECT:
conn_handle, _, _ = data
if connection := DeviceConnection._connected.get(conn_handle, None):
# Tell the device_task that it should terminate.
connection._event.set()
def _peripheral_shutdown():
global _incoming_connection, _connect_event
_incoming_connection = None
_connect_event = None
register_irq_handler(_peripheral_irq, _peripheral_shutdown)
# Advertising payloads are repeated packets of the following form:
# 1 byte data length (N + 1)
# 1 byte type (see constants below)
# N bytes type-specific data
def _append(adv_data, resp_data, adv_type, value):
data = struct.pack("BB", len(value) + 1, adv_type) + value
if len(data) + len(adv_data) < _ADV_PAYLOAD_MAX_LEN:
adv_data += data
return resp_data
if len(data) + (len(resp_data) if resp_data else 0) < _ADV_PAYLOAD_MAX_LEN:
if not resp_data:
# Overflow into resp_data for the first time.
resp_data = bytearray()
resp_data += data
return resp_data
raise ValueError("Advertising payload too long")
async def advertise(
interval_us,
adv_data=None,
resp_data=None,
connectable=True,
limited_disc=False,
br_edr=False,
name=None,
services=None,
appearance=0,
manufacturer=None,
timeout_ms=None,
):
global _incoming_connection, _connect_event
ensure_active()
if not adv_data and not resp_data:
# If the user didn't manually specify adv_data / resp_data then
# construct them from the kwargs. Keep adding fields to adv_data,
# overflowing to resp_data if necessary.
# TODO: Try and do better bin-packing than just concatenating in
# order?
adv_data = bytearray()
resp_data = _append(
adv_data,
resp_data,
_ADV_TYPE_FLAGS,
struct.pack("B", (0x01 if limited_disc else 0x02) + (0x18 if br_edr else 0x04)),
)
# Services are prioritised to go in the advertising data because iOS supports
# filtering scan results by service only, so services must come first.
if services:
for uuid in services:
b = bytes(uuid)
if len(b) == 2:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID16_COMPLETE, b)
elif len(b) == 4:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID32_COMPLETE, b)
elif len(b) == 16:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_UUID128_COMPLETE, b)
if name:
resp_data = _append(adv_data, resp_data, _ADV_TYPE_NAME, name)
if appearance:
# See org.bluetooth.characteristic.gap.appearance.xml
resp_data = _append(
adv_data, resp_data, _ADV_TYPE_APPEARANCE, struct.pack("<H", appearance)
)
if manufacturer:
resp_data = _append(
adv_data,
resp_data,
_ADV_TYPE_MANUFACTURER,
struct.pack("<H", manufacturer[0]) + manufacturer[1],
)
_connect_event = _connect_event or asyncio.ThreadSafeFlag()
ble.gap_advertise(interval_us, adv_data=adv_data, resp_data=resp_data, connectable=connectable)
try:
# Allow optional timeout for a central to connect to us (or just to stop advertising).
with DeviceTimeout(None, timeout_ms):
await _connect_event.wait()
# Get the newly connected connection to the central and start a task
# to wait for disconnection.
result = _incoming_connection
_incoming_connection = None
# This mirrors what connecting to a central does.
result._run_task()
return result
except asyncio.CancelledError:
# Something else cancelled this task (to manually stop advertising).
ble.gap_advertise(None)
except asyncio.TimeoutError:
# DeviceTimeout waiting for connection.
ble.gap_advertise(None)
raise

178
robot/aioble/security.py Normal file
View File

@ -0,0 +1,178 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const, schedule
import uasyncio as asyncio
import binascii
import json
from .core import log_info, log_warn, ble, register_irq_handler
from .device import DeviceConnection
_IRQ_ENCRYPTION_UPDATE = const(28)
_IRQ_GET_SECRET = const(29)
_IRQ_SET_SECRET = const(30)
_IRQ_PASSKEY_ACTION = const(31)
_IO_CAPABILITY_DISPLAY_ONLY = const(0)
_IO_CAPABILITY_DISPLAY_YESNO = const(1)
_IO_CAPABILITY_KEYBOARD_ONLY = const(2)
_IO_CAPABILITY_NO_INPUT_OUTPUT = const(3)
_IO_CAPABILITY_KEYBOARD_DISPLAY = const(4)
_PASSKEY_ACTION_INPUT = const(2)
_PASSKEY_ACTION_DISP = const(3)
_PASSKEY_ACTION_NUMCMP = const(4)
_DEFAULT_PATH = "ble_secrets.json"
_secrets = {}
_modified = False
_path = None
# Must call this before stack startup.
def load_secrets(path=None):
global _path, _secrets
# Use path if specified, otherwise use previous path, otherwise use
# default path.
_path = path or _path or _DEFAULT_PATH
# Reset old secrets.
_secrets = {}
try:
with open(_path, "r") as f:
entries = json.load(f)
for sec_type, key, value in entries:
# Decode bytes from hex.
_secrets[sec_type, binascii.a2b_base64(key)] = binascii.a2b_base64(value)
except:
log_warn("No secrets available")
# Call this whenever the secrets dict changes.
def _save_secrets(arg=None):
global _modified, _path
_path = _path or _DEFAULT_PATH
if not _modified:
# Only save if the secrets changed.
return
with open(_path, "w") as f:
# Convert bytes to hex strings (otherwise JSON will treat them like
# strings).
json_secrets = [
(sec_type, binascii.b2a_base64(key), binascii.b2a_base64(value))
for (sec_type, key), value in _secrets.items()
]
json.dump(json_secrets, f)
_modified = False
def _security_irq(event, data):
global _modified
if event == _IRQ_ENCRYPTION_UPDATE:
# Connection has updated (usually due to pairing).
conn_handle, encrypted, authenticated, bonded, key_size = data
log_info("encryption update", conn_handle, encrypted, authenticated, bonded, key_size)
if connection := DeviceConnection._connected.get(conn_handle, None):
connection.encrypted = encrypted
connection.authenticated = authenticated
connection.bonded = bonded
connection.key_size = key_size
# TODO: Handle failure.
if encrypted and connection._pair_event:
connection._pair_event.set()
elif event == _IRQ_SET_SECRET:
sec_type, key, value = data
key = sec_type, bytes(key)
value = bytes(value) if value else None
log_info("set secret:", key, value)
if value is None:
# Delete secret.
if key not in _secrets:
return False
del _secrets[key]
else:
# Save secret.
_secrets[key] = value
# Queue up a save (don't synchronously write to flash).
_modified = True
schedule(_save_secrets, None)
return True
elif event == _IRQ_GET_SECRET:
sec_type, index, key = data
log_info("get secret:", sec_type, index, bytes(key) if key else None)
if key is None:
# Return the index'th secret of this type.
i = 0
for (t, _key), value in _secrets.items():
if t == sec_type:
if i == index:
return value
i += 1
return None
else:
# Return the secret for this key (or None).
key = sec_type, bytes(key)
return _secrets.get(key, None)
elif event == _IRQ_PASSKEY_ACTION:
conn_handle, action, passkey = data
log_info("passkey action", conn_handle, action, passkey)
# if action == _PASSKEY_ACTION_NUMCMP:
# # TODO: Show this passkey and confirm accept/reject.
# accept = 1
# self._ble.gap_passkey(conn_handle, action, accept)
# elif action == _PASSKEY_ACTION_DISP:
# # TODO: Generate and display a passkey so the remote device can enter it.
# passkey = 123456
# self._ble.gap_passkey(conn_handle, action, passkey)
# elif action == _PASSKEY_ACTION_INPUT:
# # TODO: Ask the user to enter the passkey shown on the remote device.
# passkey = 123456
# self._ble.gap_passkey(conn_handle, action, passkey)
# else:
# log_warn("unknown passkey action")
def _security_shutdown():
global _secrets, _modified, _path
_secrets = {}
_modified = False
_path = None
register_irq_handler(_security_irq, _security_shutdown)
# Use device.pair() rather than calling this directly.
async def pair(
connection,
bond=True,
le_secure=True,
mitm=False,
io=_IO_CAPABILITY_NO_INPUT_OUTPUT,
timeout_ms=20000,
):
ble.config(bond=bond, le_secure=le_secure, mitm=mitm, io=io)
with connection.timeout(timeout_ms):
connection._pair_event = asyncio.ThreadSafeFlag()
ble.gap_pair(connection._conn_handle)
await connection._pair_event.wait()
# TODO: Allow the passkey action to return to here and
# invoke a callback or task to process the action.

344
robot/aioble/server.py Normal file
View File

@ -0,0 +1,344 @@
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
from micropython import const
from collections import deque
import bluetooth
import uasyncio as asyncio
from .core import (
ensure_active,
ble,
log_info,
log_error,
log_warn,
register_irq_handler,
GattError,
)
from .device import DeviceConnection, DeviceTimeout
_registered_characteristics = {}
_IRQ_GATTS_WRITE = const(3)
_IRQ_GATTS_READ_REQUEST = const(4)
_IRQ_GATTS_INDICATE_DONE = const(20)
_FLAG_READ = const(0x0002)
_FLAG_WRITE_NO_RESPONSE = const(0x0004)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
_FLAG_INDICATE = const(0x0020)
_FLAG_READ_ENCRYPTED = const(0x0200)
_FLAG_READ_AUTHENTICATED = const(0x0400)
_FLAG_READ_AUTHORIZED = const(0x0800)
_FLAG_WRITE_ENCRYPTED = const(0x1000)
_FLAG_WRITE_AUTHENTICATED = const(0x2000)
_FLAG_WRITE_AUTHORIZED = const(0x4000)
_FLAG_WRITE_CAPTURE = const(0x10000)
_FLAG_DESC_READ = const(1)
_FLAG_DESC_WRITE = const(2)
_WRITE_CAPTURE_QUEUE_LIMIT = const(10)
def _server_irq(event, data):
if event == _IRQ_GATTS_WRITE:
conn_handle, attr_handle = data
Characteristic._remote_write(conn_handle, attr_handle)
elif event == _IRQ_GATTS_READ_REQUEST:
conn_handle, attr_handle = data
return Characteristic._remote_read(conn_handle, attr_handle)
elif event == _IRQ_GATTS_INDICATE_DONE:
conn_handle, value_handle, status = data
Characteristic._indicate_done(conn_handle, value_handle, status)
def _server_shutdown():
global _registered_characteristics
_registered_characteristics = {}
if hasattr(BaseCharacteristic, "_capture_task"):
BaseCharacteristic._capture_task.cancel()
del BaseCharacteristic._capture_queue
del BaseCharacteristic._capture_write_event
del BaseCharacteristic._capture_consumed_event
del BaseCharacteristic._capture_task
register_irq_handler(_server_irq, _server_shutdown)
class Service:
def __init__(self, uuid):
self.uuid = uuid
self.characteristics = []
# Generate tuple for gatts_register_services.
def _tuple(self):
return (self.uuid, tuple(c._tuple() for c in self.characteristics))
class BaseCharacteristic:
def _register(self, value_handle):
self._value_handle = value_handle
_registered_characteristics[value_handle] = self
if self._initial is not None:
self.write(self._initial)
self._initial = None
# Read value from local db.
def read(self):
if self._value_handle is None:
return self._initial or b""
else:
return ble.gatts_read(self._value_handle)
# Write value to local db, and optionally notify/indicate subscribers.
def write(self, data, send_update=False):
if self._value_handle is None:
self._initial = data
else:
ble.gatts_write(self._value_handle, data, send_update)
# When the a capture-enabled characteristic is created, create the
# necessary events (if not already created).
@staticmethod
def _init_capture():
if hasattr(BaseCharacteristic, "_capture_queue"):
return
BaseCharacteristic._capture_queue = deque((), _WRITE_CAPTURE_QUEUE_LIMIT)
BaseCharacteristic._capture_write_event = asyncio.ThreadSafeFlag()
BaseCharacteristic._capture_consumed_event = asyncio.ThreadSafeFlag()
BaseCharacteristic._capture_task = asyncio.create_task(
BaseCharacteristic._run_capture_task()
)
# Monitor the shared queue for incoming characteristic writes and forward
# them sequentially to the individual characteristic events.
@staticmethod
async def _run_capture_task():
write = BaseCharacteristic._capture_write_event
consumed = BaseCharacteristic._capture_consumed_event
q = BaseCharacteristic._capture_queue
while True:
if len(q):
conn, data, characteristic = q.popleft()
# Let the characteristic waiting in `written()` know that it
# can proceed.
characteristic._write_data = (conn, data)
characteristic._write_event.set()
# Wait for the characteristic to complete `written()` before
# continuing.
await consumed.wait()
if not len(q):
await write.wait()
# Wait for a write on this characteristic. Returns the connection that did
# the write, or a tuple of (connection, value) if capture is enabled for
# this characteristics.
async def written(self, timeout_ms=None):
if not hasattr(self, "_write_event"):
# Not a writable characteristic.
return
# If no write has been seen then we need to wait. If the event has
# already been set this will clear the event and continue
# immediately. In regular mode, this is set by the write IRQ
# directly (in _remote_write). In capture mode, this is set when it's
# our turn by _capture_task.
with DeviceTimeout(None, timeout_ms):
await self._write_event.wait()
# Return the write data and clear the stored copy.
# In default usage this will be just the connection handle.
# In capture mode this will be a tuple of (connection_handle, received_data)
data = self._write_data
self._write_data = None
if self.flags & _FLAG_WRITE_CAPTURE:
# Notify the shared queue monitor that the event has been consumed
# by the caller to `written()` and another characteristic can now
# proceed.
BaseCharacteristic._capture_consumed_event.set()
return data
def on_read(self, connection):
return 0
def _remote_write(conn_handle, value_handle):
if characteristic := _registered_characteristics.get(value_handle, None):
# If we've gone from empty to one item, then wake something
# blocking on `await char.written()`.
conn = DeviceConnection._connected.get(conn_handle, None)
if characteristic.flags & _FLAG_WRITE_CAPTURE:
# For capture, we append the connection and the written value
# value to the shared queue along with the matching characteristic object.
# The deque will enforce the max queue len.
data = characteristic.read()
BaseCharacteristic._capture_queue.append((conn, data, characteristic))
BaseCharacteristic._capture_write_event.set()
else:
# Store the write connection handle to be later used to retrieve the data
# then set event to handle in written() task.
characteristic._write_data = conn
characteristic._write_event.set()
def _remote_read(conn_handle, value_handle):
if characteristic := _registered_characteristics.get(value_handle, None):
return characteristic.on_read(DeviceConnection._connected.get(conn_handle, None))
class Characteristic(BaseCharacteristic):
def __init__(
self,
service,
uuid,
read=False,
write=False,
write_no_response=False,
notify=False,
indicate=False,
initial=None,
capture=False,
):
service.characteristics.append(self)
self.descriptors = []
flags = 0
if read:
flags |= _FLAG_READ
if write or write_no_response:
flags |= (_FLAG_WRITE if write else 0) | (
_FLAG_WRITE_NO_RESPONSE if write_no_response else 0
)
if capture:
# Capture means that we keep track of all writes, and capture
# their values (and connection) in a queue. Otherwise we just
# track the connection of the most recent write.
flags |= _FLAG_WRITE_CAPTURE
BaseCharacteristic._init_capture()
# Set when this characteristic has a value waiting in self._write_data.
self._write_event = asyncio.ThreadSafeFlag()
# The connection of the most recent write, or a tuple of
# (connection, data) if capture is enabled.
self._write_data = None
if notify:
flags |= _FLAG_NOTIFY
if indicate:
flags |= _FLAG_INDICATE
# TODO: This should probably be a dict of connection to (ev, status).
# Right now we just support a single indication at a time.
self._indicate_connection = None
self._indicate_event = asyncio.ThreadSafeFlag()
self._indicate_status = None
self.uuid = uuid
self.flags = flags
self._value_handle = None
self._initial = initial
# Generate tuple for gatts_register_services.
def _tuple(self):
if self.descriptors:
return (self.uuid, self.flags, tuple(d._tuple() for d in self.descriptors))
else:
# Workaround: v1.19 and below can't handle an empty descriptor tuple.
return (self.uuid, self.flags)
def notify(self, connection, data=None):
if not (self.flags & _FLAG_NOTIFY):
raise ValueError("Not supported")
ble.gatts_notify(connection._conn_handle, self._value_handle, data)
async def indicate(self, connection, timeout_ms=1000):
if not (self.flags & _FLAG_INDICATE):
raise ValueError("Not supported")
if self._indicate_connection is not None:
raise ValueError("In progress")
if not connection.is_connected():
raise ValueError("Not connected")
self._indicate_connection = connection
self._indicate_status = None
try:
with connection.timeout(timeout_ms):
ble.gatts_indicate(connection._conn_handle, self._value_handle)
await self._indicate_event.wait()
if self._indicate_status != 0:
raise GattError(self._indicate_status)
finally:
self._indicate_connection = None
def _indicate_done(conn_handle, value_handle, status):
if characteristic := _registered_characteristics.get(value_handle, None):
if connection := DeviceConnection._connected.get(conn_handle, None):
if not characteristic._indicate_connection:
# Timeout.
return
# See TODO in __init__ to support multiple concurrent indications.
assert connection == characteristic._indicate_connection
characteristic._indicate_status = status
characteristic._indicate_event.set()
class BufferedCharacteristic(Characteristic):
def __init__(self, service, uuid, max_len=20, append=False):
super().__init__(service, uuid, read=True)
self._max_len = max_len
self._append = append
def _register(self, value_handle):
super()._register(value_handle)
ble.gatts_set_buffer(value_handle, self._max_len, self._append)
class Descriptor(BaseCharacteristic):
def __init__(self, characteristic, uuid, read=False, write=False, initial=None):
characteristic.descriptors.append(self)
# Workaround for https://github.com/micropython/micropython/issues/6864
flags = 0
if read:
flags |= _FLAG_DESC_READ
if write:
self._write_event = asyncio.ThreadSafeFlag()
self._write_data = None
flags |= _FLAG_DESC_WRITE
self.uuid = uuid
self.flags = flags
self._value_handle = None
self._initial = initial
# Generate tuple for gatts_register_services.
def _tuple(self):
return (self.uuid, self.flags)
# Turn the Service/Characteristic/Descriptor classes into a registration tuple
# and then extract their value handles.
def register_services(*services):
ensure_active()
_registered_characteristics.clear()
handles = ble.gatts_register_services(tuple(s._tuple() for s in services))
for i in range(len(services)):
service_handles = handles[i]
service = services[i]
n = 0
for characteristic in service.characteristics:
characteristic._register(service_handles[n])
n += 1
for descriptor in characteristic.descriptors:
descriptor._register(service_handles[n])
n += 1

30
robot/buzzer.py Normal file
View File

@ -0,0 +1,30 @@
import sys
import utime
class Buzzer:
def __init__(self):
pass
# ------------------------------------
# Vittascience
# Example for playing sound
# ------------------------------------
def _pitch(self, robot, noteFrequency, noteDuration, silence_ms = 10):
if noteFrequency is not 0:
microsecondsPerWave = 1e6 / noteFrequency
millisecondsPerCycle = 1000 / (microsecondsPerWave * 2)
loopTime = noteDuration * millisecondsPerCycle
for x in range(loopTime):
# Buzzer high: 0
robot.controlBuzzer(0)
utime.sleep_us(int(microsecondsPerWave))
# buzzer low: 1
robot.controlBuzzer(1)
utime.sleep_us(int(microsecondsPerWave))
else:
utime.sleep_ms(int(noteDuration))
utime.sleep_ms(silence_ms)
def pitch(self, robot, noteFrequency, noteDuration, silence_ms = 10):
#print("[DEBUG][pitch]: Frequency {:5} Hz, Duration {:4} ms, silence {:4} ms".format(noteFrequency, noteDuration, silence_ms))
self._pitch(robot, noteFrequency, noteDuration, silence_ms)

497
robot/main.py Normal file
View File

@ -0,0 +1,497 @@
import machine
import utime, sys
import json
from stm32_alphabot_v2 import AlphaBot_v2
import gc
from stm32_ssd1306 import SSD1306, SSD1306_I2C
from stm32_vl53l0x import VL53L0X
from stm32_nec import NEC_8, NEC_16
import neopixel
import _thread
import os
#import bluetooth
#from stm32_ble_uart import BLEUART
import buzzer
# variable:
alphabot = oled = vl53l0x = None
ir_current_remote_code = None
dict_base=dict([('C-',32.70),('C#',34.65),('D-',36.71),('D#',38.89),('E-',41.20),('E#',43.65),('F-',43.65),('F#',46.35),('G-',49.00),('G#',51.91),('A-',55.00),('A#',58.27),('B-',61.74),('S-',0)])
# -------------------------------
# neopixel
# -------------------------------
class FoursNeoPixel():
def __init__(self, pin_number):
self._pin = pin_number
self._max_leds = 4
self._leds = neopixel.NeoPixel(self._pin, 4)
def set_led(self, addr, red, green, blue):
if addr >= 0 and addr < self._max_leds:
# coded on BGR
self._leds[addr] = (blue, green, red)
def set_led2(self, addr, rgb):
if addr >= 0 and addr < self._max_leds:
# coded on BGR
self._leds[addr] = rgb
def show(self):
self._leds.write()
def clear(self):
for i in range (0, self._max_leds):
self.set_led(i, 0,0,0)
self.show()
def neo_french_flag_threaded(leds):
while True:
leds.set_led(0, 250, 0, 0)
leds.set_led(1, 250, 0, 0)
leds.set_led(2, 250, 0, 0)
leds.set_led(3, 250, 0, 0)
leds.show()
utime.sleep(1)
leds.set_led(0, 250, 250, 250)
leds.set_led(1, 250, 250, 250)
leds.set_led(2, 250, 250, 250)
leds.set_led(3, 250, 250, 250)
leds.show()
utime.sleep(1)
leds.set_led(0, 0, 0, 250)
leds.set_led(1, 0, 0, 250)
leds.set_led(2, 0, 0, 250)
leds.set_led(3, 0, 0, 250)
leds.show()
utime.sleep(1)
leds.clear()
utime.sleep(2)
def neo_french_flag(fours_rgb_leds):
_thread.start_new_thread(neo_french_flag_threaded, ([fours_rgb_leds]))
# ----------------------------
# Remote Control
# ----------------------------
# Remote control Correlation table
# |-----------------| |----------------------|
# | | | | | | | |
# | CH- | CH | CH+ | | Vol- | Play | Vol+ |
# | | | | | | Pause | |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | |<< | >>| | >|| | | Setup | Up | Stop |
# | | | | | | | Mode |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | - | + | EQ | | Left | Enter | Right|
# | | | | | | Save | |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | 0 |100+ | 200+| <==> | 0 | Down | Back |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | 1 | 2 | 3 | | 1 | 2 | 3 |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | 4 | 5 | 6 | | 4 | 5 | 6 |
# | | | | | | | |
# |-----------------| |----------------------|
# | | | | | | | |
# | 7 | 8 | 9 | | 7 | 8 | 9 |
# | | | | | | | |
# |-----------------| |----------------------|
#
def remoteNEC_basicBlack_getButton(hexCode):
if hexCode == 0x0c: return "1"
elif hexCode == 0x18: return "2"
elif hexCode == 0x5e: return "3"
elif hexCode == 0x08: return "4"
elif hexCode == 0x1c: return "5"
elif hexCode == 0x5a: return "6"
elif hexCode == 0x42: return "7"
elif hexCode == 0x52: return "8"
elif hexCode == 0x4a: return "9"
elif hexCode == 0x16: return "0"
elif hexCode == 0x40: return "up"
elif hexCode == 0x19: return "down"
elif hexCode == 0x07: return "left"
elif hexCode == 0x09: return "right"
elif hexCode == 0x15: return "enter_save"
elif hexCode == 0x0d: return "back"
elif hexCode == 0x45: return "volMinus"
elif hexCode == 0x47: return "volPlus"
elif hexCode == 0x46: return "play_pause"
elif hexCode == 0x44: return "setup"
elif hexCode == 0x43: return "stop_mode"
else: return "NEC remote code error"
def remoteNEC_callback(data, addr, ctrl):
global ir_current_remote_code
print("coucou")
if data < 0: # NEC protocol sends repeat codes.
print('Repeat code.')
else:
#print('Data {:02x} Addr {:04x} Ctrl {:02x}'.format(data, addr, ctrl))
ir_current_remote_code = remoteNEC_basicBlack_getButton(data)
print('Data {:02x} Addr {:04x} Ctrl {:02x} {}'.format(data, addr, ctrl, ir_current_remote_code))
# ----------------------------
# play music
# ----------------------------
def music_play():
d= [['C-3', 4 ], ['D-3', 4 ], ['E-3', 4 ], ['F-3', 4 ], ['G-3', 4 ] , ['A-3', 4 ], ['B-3', 4 ]]
freq_list = [ dict_base[d[i][0][:2] ] * 2**(int(d[i][0][2]) - 1 ) for i in range(0, len(d), 1 ) ]
duration_list= [int(d[i][1]) * 125 for i in range(0,len(d), 1)]
buz = buzzer.Buzzer()
for i in range(len(freq_list)):
buz.pitch(alphabot, freq_list[i], duration_list[i], 50)
# ----------------------------
# Follow line
# ----------------------------
_BLACKLIMIT = 650
DISPLAY_LINE_TRACKING_INFO = 1
def _motor_left_right(ml, mr):
alphabot.setMotors(left=ml, right=mr)
if DISPLAY_LINE_TRACKING_INFO:
oled.text('L {}'.format(ml),64, 0)
oled.text('R {}'.format(mr),64, 16)
def show_motor_left_right(ml, mr):
if DISPLAY_LINE_TRACKING_INFO:
oled.text('L {}'.format(ml),64, 0)
oled.text('R {}'.format(mr),64, 16)
def isSensorAboveLine(robot, sensorName, blackLimit = 300):
sensorsValue = robot.TRSensors_readLine(sensor=0) # all sensors values
if 'IR' in sensorName:
if sensorName=='IR1' and sensorsValue[0] < blackLimit: return True
elif sensorName=='IR2' and sensorsValue[1] < blackLimit: return True
elif sensorName=='IR3' and sensorsValue[2] < blackLimit: return True
elif sensorName=='IR4' and sensorsValue[3] < blackLimit: return True
elif sensorName=='IR5' and sensorsValue[4] < blackLimit: return True
else: return False
else:
raise ValueError("name '" + sensorName + "' is not a sensor option")
SPEED_MOTOR=13
def line_follower(limit=_BLACKLIMIT):
oled.fill(0)
if alphabot.readUltrasonicDistance() <= 5:
alphabot.stop()
#music_play()
else:
if not isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
oled.fill(0)
oled.show()
oled.text('En arriere', 0, 0)
oled.show()
alphabot.moveBackward(SPEED_MOTOR)
if not isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
oled.fill(0)
oled.show()
oled.text('A Gauche', 0, 0)
oled.show()
alphabot.setMotorLeft(SPEED_MOTOR)
alphabot.setMotorRight(0)
if not isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
oled.fill(0)
oled.show()
oled.text('Tout Droit', 0, 0)
oled.show()
alphabot.setMotorLeft(SPEED_MOTOR)
alphabot.setMotorRight(SPEED_MOTOR)
if not isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
alphabot.setMotorLeft(SPEED_MOTOR)
alphabot.setMotorRight(5)
if isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
oled.fill(0)
oled.show()
oled.text('A droite', 0, 0)
oled.show()
alphabot.setMotorLeft(0)
alphabot.setMotorRight(SPEED_MOTOR)
if isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
alphabot.moveBackward(SPEED_MOTOR)
if isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and not isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
alphabot.setMotorLeft(5)
alphabot.setMotorLeft(SPEED_MOTOR)
if isSensorAboveLine(alphabot, 'IR2', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR3', blackLimit=limit) and isSensorAboveLine(alphabot, 'IR4', blackLimit=limit):
alphabot.moveBackward(SPEED_MOTOR)
def line_follower_simple(limit=_BLACKLIMIT):
oled.fill(0)
if alphabot.readUltrasonicDistance() <= 5:
alphabot.stop()
oled.text('Obstacle', 4*8, 0)
oled.text('detected', 4*8, 16)
oled.text('STOPPED!', 4*8, 32)
#music_play()
else:
# get the light detection measurement on one time
line_detection = alphabot.TRSensors_readLine()
if DISPLAY_LINE_TRACKING_INFO:
print("readline:", line_detection)
if DISPLAY_LINE_TRACKING_INFO: oled.text('{:.02f}'.format(line_detection[1]), 0, 0)
if DISPLAY_LINE_TRACKING_INFO: oled.text('{:.02f}'.format(line_detection[2]), 0, 16)
if DISPLAY_LINE_TRACKING_INFO: oled.text('{:.02f}'.format(line_detection[3]), 0, 32)
if line_detection[2] < limit:
# we are on the line
alphabot.setMotors(right=22, left=22)
show_motor_left_right(22, 22)
elif line_detection[1] < limit:
#alphabot.turnLeft(65, 25)
alphabot.turnLeft(65, duration_ms=50)
show_motor_left_right(65, 0)
elif line_detection[3] < limit:
#alphabot.turnRight(65, 25)
alphabot.turnRight(65, duration_ms=50)
show_motor_left_right(0, 65)
elif line_detection[2] > limit:
alphabot.moveBackward(35, duration_ms=50)
show_motor_left_right(-35, -35)
# if (line_detection[1] < limit):
# alphabot.turnLeft(65, 25)
# show_motor_left_right(65, 0)
# elif line_detection[3] < limit:
# alphabot.turnRight(65, 25)
# show_motor_left_right(0, 65)
# elif line_detection[2] > limit:
# alphabot.setMotors(right=22, left=22)
# show_motor_left_right(22, 22)
# elif line_detection[2] < limit:
# alphabot.moveBackward(35, duration_ms=50)
# show_motor_left_right(-35, -35)
if vl53l0x is not None:
oled.text('tof {:4.0f}mm'.format(vl53l0x.getRangeMillimeters()), 0, 48)
oled.show()
# ------------------------------------------------
last_proportional = 0
integral = 0
maximum = 100
derivative = 0
# Algo from: https://www.waveshare.com/w/upload/7/74/AlphaBot2.tar.gz
def line_follower2():
oled.fill(0)
if alphabot.readUltrasonicDistance() <= 5:
print("Obstacle!!!!!")
alphabot.stop()
#music_play()
else:
global last_proportional
global integral
global derivative
# get the light detection measurement on one time
position,sensors_line = alphabot.TRSensors_position_readLine()
if (sensors_line[0] > 900 and sensors_line[1] > 900 and sensors_line[2] > 900 and sensors_line[3] > 900 and sensors_line[4] > 900):
_motor_left_right(0, 0)
return
if DISPLAY_LINE_TRACKING_INFO:
print("readline:", position, sensors_line)
oled.text('{:.02f}'.format(sensors_line[1]), 0, 0)
oled.text('{:.02f}'.format(sensors_line[2]), 0, 16)
oled.text('{:.02f}'.format(sensors_line[3]), 0, 32)
# The "proportional" term should be 0 when we are on the line.
proportional = position - 2000
# Compute the derivative (change) and integral (sum) of the position.
derivative = proportional - last_proportional
integral += proportional
# Remember the last position.
last_proportional = proportional
'''
// Compute the difference between the two motor power settings,
// m1 - m2. If this is a positive number the robot will turn
// to the right. If it is a negative number, the robot will
// turn to the left, and the magnitude of the number determines
// the sharpness of the turn. You can adjust the constants by which
// the proportional, integral, and derivative terms are multiplied to
// improve performance.
'''
power_difference = proportional/30 + integral/10000 + derivative*2
if (power_difference > maximum):
power_difference = maximum
if (power_difference < - maximum):
power_difference = - maximum
print("Line follower: ", position, power_difference)
if (power_difference < 0):
_motor_left_right(maximum + power_difference, maximum)
else:
_motor_left_right(maximum, maximum - power_difference)
utime.sleep_ms(100)
alphabot.stop()
oled.show()
# ----------------------------
# Motor move
# ----------------------------
def move_right(t=30):
alphabot.turnRight(20, t)
def move_left(t=30):
alphabot.turnLeft(20, t)
def move_forward(t=200):
if alphabot.readUltrasonicDistance() > 10:
alphabot.moveForward(20)
utime.sleep_ms(t)
alphabot.stop()
else:
alphabot.stop()
def move_backward(t=200):
alphabot.moveBackward(20)
utime.sleep_ms(t)
alphabot.stop()
def move_circumvention():
move_left(450)
move_forward(400)
move_right(450)
move_forward(400)
move_right(450)
move_forward(400)
move_left(450)
# ----------------------------
# BLE UART
# ----------------------------
# m: move
# b: move back
# l: left
# r: right
# s: stop
# M: music
# q: quit
def bluetooth_serial_processing(ble_uart):
while True:
utime.sleep_ms(200)
if ble_uart.any():
bluetoothData = ble_uart.read().decode().strip()
print(str(bluetoothData));
if 'r'.find(bluetoothData) + 1 == 1:
move_right()
elif 'l'.find(bluetoothData) + 1 == 1:
move_left()
elif 'm'.find(bluetoothData) + 1 == 1:
move_forward()
elif 'b'.find(bluetoothData) + 1 == 1:
move_backward()
elif 's'.find(bluetoothData) + 1 == 1:
alphabot.stop()
elif 'M'.find(bluetoothData) + 1 == 1:
music_play()
elif 'q'.find(bluetoothData) + 1 == 1:
break
else:
pass
# ----------------------------
# INIT
# ----------------------------
# init Alphabot
try:
alphabot = AlphaBot_v2()
except Exception as e:
print('alphabot exception occurred: {}'.format(e))
alphabot = None
try:
if alphabot is not None:
oled = SSD1306_I2C(128, 64, alphabot.i2c)
except Exception as e:
print('OLED exception occurred: {}'.format(e))
oled = None
try:
if alphabot is not None:
vl53l0x = VL53L0X(i2c=alphabot.i2c)
except Exception as e:
print('vl53l0x exception occurred: {}'.format(e))
vl53l0x = None
try:
classes = (NEC_8, NEC_16)
if alphabot is not None:
ir_remote = classes[0](alphabot.pin_IR, remoteNEC_callback)
else:
ir_remote = None
except Exception as e:
print('ir_remote exception occurred: {}'.format(e))
ir_remote = None
neopixel_leds = FoursNeoPixel(alphabot.pin_RGB)
#ble = bluetooth.BLE()
#uart = BLEUART(ble)
### Print system
print()
print(f"Platform: {sys.platform}")
print(f"MicroPython ver: {os.uname().release} ({os.uname().version})")
print(f"Machine ID: {os.uname().machine}")
print(f"CPU Frequency: {machine.freq()} Hz")
print()
oled.text("Martian", 4*8, 0)
oled.show()
print("Ready to drive on Mars")
neo_french_flag(neopixel_leds)
print("We drive on Mars")
while True:
# IR
# enter_save aka + : robot stop
# up aka >> : robot forward
# down aka 100+ : robot backward
# left aka - : robot go to left
# right aka EQ : robot go to right
# play_pause aka CH : follow line
# setup aka << : bluetooth uart
# 9 aka 9 : play music
utime.sleep_ms(20)
gc.collect()
if ir_current_remote_code == "enter_save":
alphabot.stop()
elif ir_current_remote_code == "up":
move_forward()
elif ir_current_remote_code == "down":
move_backward()
elif ir_current_remote_code == "left":
move_left()
elif ir_current_remote_code == "right":
move_right()
elif ir_current_remote_code == "play_pause":
line_follower_simple()
elif ir_current_remote_code == "setup":
bluetooth_serial_processing()
elif ir_current_remote_code == "9":
music_play()
else:
line_follower_simple()

45
robot/neopixel.py Normal file
View File

@ -0,0 +1,45 @@
# NeoPixel driver for MicroPython
# MIT license; Copyright (c) 2016 Damien P. George, 2021 Jim Mussared
from machine import bitstream
class NeoPixel:
# G R B W
ORDER = (1, 0, 2, 3)
def __init__(self, pin, n, bpp=3, timing=1):
self.pin = pin
self.n = n
self.bpp = bpp
self.buf = bytearray(n * bpp)
self.pin.init(pin.OUT)
# Timing arg can either be 1 for 800kHz or 0 for 400kHz,
# or a user-specified timing ns tuple (high_0, low_0, high_1, low_1).
self.timing = (
((400, 850, 800, 450) if timing else (800, 1700, 1600, 900))
if isinstance(timing, int)
else timing
)
def __len__(self):
return self.n
def __setitem__(self, i, v):
offset = i * self.bpp
for i in range(self.bpp):
self.buf[offset + self.ORDER[i]] = v[i]
def __getitem__(self, i):
offset = i * self.bpp
return tuple(self.buf[offset + self.ORDER[i]] for i in range(self.bpp))
def fill(self, v):
b = self.buf
for i in range(self.bpp):
c = v[i]
for j in range(self.ORDER[i], len(self.buf), self.bpp):
b[j] = c
def write(self):
# BITSTREAM_TYPE_HIGH_LOW = 0
bitstream(self.pin, 0, self.timing, self.buf)

199
robot/stm32_TRsensors.py Normal file
View File

@ -0,0 +1,199 @@
"""
QTRSensors.h - Originally Arduino Library for using Pololu QTR reflectance sensors and reflectance sensor arrays
MIT Licence
Copyright (c) 2008-2012 waveshare Corporation. For more information, see
https://www.waveshare.com/wiki/AlphaBot2-Ar
Copyright (c) 2021 leomlr (Léo Meillier). For more information, see
https://github.com/vittascience/stm32-libraries
https://vittascience.com/stm32/
You may freely modify and share this code, as long as you keep this notice intact.
Disclaimer: To the extent permitted by law, waveshare provides this work
without any warranty. It might be defective, in which case you agree
to be responsible for all resulting costs and damages.
Author: Léo Meillier (leomlr)
Date: 07/2021
Note: library adapted in micropython for using 5 QTR sensors on Alphabot v2 robot controlled by STM32 board.
"""
import pyb
from micropython import const
import utime
PIN_CS = 'D10'
PIN_DOUT = 'D11'
PIN_ADDR = 'D12'
PIN_CLK = 'D13'
NUMSENSORS = const(5)
QTR_EMITTERS_OFF = const(0x00)
QTR_EMITTERS_ON = const(0x01)
QTR_EMITTERS_ON_AND_OFF = const(0x02)
QTR_NO_EMITTER_PIN = const(0xff)
QTR_MAX_SENSORS = const(16)
class TRSensors(object):
""" Base class data member initialization (called by derived class init()). """
def __init__(self, cs=PIN_CS, dout=PIN_DOUT, addr=PIN_ADDR, clk=PIN_CLK):
self._cs = pyb.Pin(cs, pyb.Pin.OUT)
self._dout = pyb.Pin(dout, pyb.Pin.IN)
self._addr = pyb.Pin(addr, pyb.Pin.OUT)
self._clk = pyb.Pin(clk, pyb.Pin.OUT)
self._numSensors = NUMSENSORS
self.calibratedMin = [0] * self._numSensors
self.calibratedMax = [1023] * self._numSensors
self.last_value = 0
""" Reads the sensor values using TLC1543 ADC chip into an array.
The values returned are a measure of the reflectance in abstract units,
with higher values corresponding to lower reflectance (e.g. a black
surface or a void). """
def analogRead(self):
value = [0]* (self._numSensors+1)
#Read Channel0~channel4 AD value
for j in range(0, self._numSensors+1):
self._cs.off()
for i in range(0,4):
#sent 4-bit Address
if (j >> (3 - i)) & 0x01:
self._addr.on()
else:
self._addr.off()
#read MSB 4-bit data
value[j] <<= 1
if self._dout.value():
value[j] |= 0x01
self._clk.on()
self._clk.off()
for i in range(0, self._numSensors+1):
#read LSB 8-bit data
value[j] <<= 1
if self._dout.value():
value[j] |= 0x01
self._clk.on()
self._clk.off()
#no mean ,just delay
#for i in range(0,6):
# self._clk.on()
# self._clk.off()
utime.sleep_us(100)
self._cs.on()
return value[1:]
""" Reads the sensors 10 times and uses the results for
calibration. The sensor values are not returned instead, the
maximum and minimum values found over time are stored internally
and used for the readCalibrated() method. """
def calibrate(self):
sensor_values = []
max_sensor_values = [0]*self._numSensors
min_sensor_values = [0]*self._numSensors
for j in range(0, 10):
sensor_values = self.analogRead()
for i in range(0, self._numSensors):
# set the max we found THIS time
if j == 0 or max_sensor_values[i] < sensor_values[i]:
max_sensor_values[i] = sensor_values[i]
# set the min we found THIS time
if j == 0 or min_sensor_values[i] > sensor_values[i]:
min_sensor_values[i] = sensor_values[i]
# record the min and max calibration values
for i in range(0, self._numSensors):
if min_sensor_values[i] > self.calibratedMax[i]:
self.calibratedMax[i] = min_sensor_values[i]
if max_sensor_values[i] < self.calibratedMin[i]:
self.calibratedMin[i] = max_sensor_values[i]
""" Returns values calibrated to a value between 0 and 1000, where
0 corresponds to the minimum value read by calibrate() and 1000
corresponds to the maximum value. Calibration values are
stored separately for each sensor, so that differences in the
sensors are accounted for automatically. """
def readCalibrated(self):
# read the needed values
sensor_values = self.analogRead()
for i in range(self._numSensors):
denominator = self.calibratedMax[i] - self.calibratedMin[i]
value = 0
if denominator is not 0:
value = (sensor_values[i] - self.calibratedMin[i]) * 1000 / denominator
if value < 0:
value = 0
elif value > 1000:
value = 1000
sensor_values[i] = value
return sensor_values
""" Operates the same as read calibrated, but also returns an
estimated position of the robot with respect to a line. The
estimate is made using a weighted average of the sensor indices
multiplied by 1000, so that a return value of 0 indicates that
the line is directly below sensor 0, a return value of 1000
indicates that the line is directly below sensor 1, 2000
indicates that it's below sensor 2000, etc. Intermediate
values indicate that the line is between two sensors. The
formula is:
0*value0 + 1000*value1 + 2000*value2 + ...
--------------------------------------------
value0 + value1 + value2 + ...
By default, this function assumes a dark line (high values)
surrounded by white (low values). If your line is light on
black, set the optional second argument white_line to true. In
this case, each sensor value will be replaced by (1000-value)
before the averaging. """
def readLine(self, white_line = 0):
sensor_values = self.readCalibrated()
avg = 0
sum = 0
on_line = 0
for i in range(0, self._numSensors):
value = sensor_values[i]
if white_line:
value = 1000-value
# keep track of whether we see the line at all
if value > 200:
on_line = 1
# only average in values that are above a noise threshold
if value > 50:
avg += value * (i * 1000) # this is for the weighted total,
sum += value # this is for the denominator
if on_line != 1:
# If it last read to the left of center, return 0.
if self.last_value < (self._numSensors - 1)*1000/2:
#print("left")
self.last_value = 0
# If it last read to the right of center, return the max.
else:
#print("right")
self.last_value = (self._numSensors - 1)*1000
else:
self.last_value = avg/sum
return self.last_value,sensor_values;

265
robot/stm32_alphabot_v2.py Normal file
View File

@ -0,0 +1,265 @@
"""
MicroPython for AlphaBot2-Ar from Waveshare.
https://github.com/vittascience/stm32-libraries
https://www.waveshare.com/wiki/AlphaBot2-Ar
MIT License
Copyright (c) 2021 leomlr (Léo Meillier)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.0.0-auto.0"
__repo__ = "show"
from stm32_TRsensors import TRSensors
from stm32_pcf8574 import PCF8574
import machine
import pyb
import utime
ALPHABOT_V2_PIN_AIN2 = 'A0'
ALPHABOT_V2_PIN_AIN1 = 'A1'
ALPHABOT_V2_PIN_BIN1 = 'A2'
ALPHABOT_V2_PIN_BIN2 = 'A3'
ALPHABOT_V2_PIN_ECHO = 'D2'
ALPHABOT_V2_PIN_TRIG = 'D3'
ALPHABOT_V2_PIN_IR = 'D4'
ALPHABOT_V2_PIN_PWMB = 'D5'
ALPHABOT_V2_PIN_PWMA = 'D6'
ALPHABOT_V2_PIN_RGB = 'D7'
ALPHABOT_V2_PIN_OLED_D_C = 'D8'
ALPHABOT_V2_PIN_OLED_RESET = 'D9'
ALPHABOT_V2_PIN_TRS_CS = 'D10'
ALPHABOT_V2_PIN_TRS_DOUT = 'D11'
ALPHABOT_V2_PIN_TRS_ADDR = 'D12'
ALPHABOT_V2_PIN_TRS_CLK = 'D13'
ALPHABOT_V2_PCF8574_I2C_ADDR = 0x20
ALPHABOT_V2_OLED_I2C_ADDR_DC_OFF = 0x3c
ALPHABOT_V2_OLED_I2C_ADDR_DC_ON = 0x3d
class AlphaBot_v2(object):
def __init__(self):
self.ain1 = pyb.Pin(ALPHABOT_V2_PIN_AIN1, pyb.Pin.OUT)
self.ain2 = pyb.Pin(ALPHABOT_V2_PIN_AIN2, pyb.Pin.OUT)
self.bin1 = pyb.Pin(ALPHABOT_V2_PIN_BIN1, pyb.Pin.OUT)
self.bin2 = pyb.Pin(ALPHABOT_V2_PIN_BIN2, pyb.Pin.OUT)
self.pin_PWMA = pyb.Pin(ALPHABOT_V2_PIN_PWMA, pyb.Pin.OUT_PP)
tim_A = pyb.Timer(1, freq=500)
self.PWMA = tim_A.channel(1, pyb.Timer.PWM, pin=self.pin_PWMA)
self.pin_PWMB = pyb.Pin(ALPHABOT_V2_PIN_PWMB, pyb.Pin.OUT_PP)
tim_B = pyb.Timer(2, freq=500)
self.PWMB = tim_B.channel(1, pyb.Timer.PWM, pin=self.pin_PWMB)
self.stop()
print('[Alpha_INFO]: Motors initialised')
self.trig = pyb.Pin(ALPHABOT_V2_PIN_TRIG, pyb.Pin.OUT)
self.echo = pyb.Pin(ALPHABOT_V2_PIN_ECHO, pyb.Pin.IN)
self.pin_RGB = pyb.Pin(ALPHABOT_V2_PIN_RGB, pyb.Pin.OUT)
self.tr_sensors = TRSensors(
cs = ALPHABOT_V2_PIN_TRS_CS,
dout = ALPHABOT_V2_PIN_TRS_DOUT,
addr = ALPHABOT_V2_PIN_TRS_ADDR,
clk = ALPHABOT_V2_PIN_TRS_CLK
)
print('[Alpha_INFO]: TR sensors initialised')
self.i2c = machine.I2C(1)
self.LEFT_OBSTACLE = 'L'
self.RIGHT_OBSTACLE = 'R'
self.BOTH_OBSTACLE = 'B'
self.NO_OBSTACLE = 'N'
self.JOYSTICK_UP = 'up'
self.JOYSTICK_RIGHT = 'right'
self.JOYSTICK_LEFT = 'left'
self.JOYSTICK_DOWN = 'down'
self.JOYSTICK_CENTER = 'center'
print('[Alpha_INFO]: IR detectors initialised (for obstacles)')
self.pin_IR = pyb.Pin(ALPHABOT_V2_PIN_IR, pyb.Pin.IN)
print('[Alpha_INFO]: IR receiver initialised (for remotes)')
self.pin_oled_reset = pyb.Pin(ALPHABOT_V2_PIN_OLED_RESET, pyb.Pin.OUT)
self.pin_oled_reset.off()
utime.sleep_ms(10)
self.pin_oled_reset.on()
self.pin_DC = pyb.Pin(ALPHABOT_V2_PIN_OLED_D_C, pyb.Pin.OUT)
print('[Alpha_INFO]: OLED screen initialised')
self._pcf8574 = PCF8574(self.i2c, addr=ALPHABOT_V2_PCF8574_I2C_ADDR)
def setPWMA(self, value):
self.PWMA.pulse_width_percent(value)
def setPWMB(self, value):
self.PWMB.pulse_width_percent(value)
def setMotors(self, left=None, right=None):
if left is not None:
if left >= 0 and left <= 100:
self.ain1.off()
self.ain2.on()
self.setPWMA(left)
elif left >= -100 and left < 0:
self.ain1.on()
self.ain2.off()
self.setPWMA(-left)
if right is not None:
if right >= 0 and right <= 100:
self.bin1.off()
self.bin2.on()
self.setPWMB(right)
elif right >= -100 and right < 0:
self.bin1.on()
self.bin2.off()
self.setPWMB(-right)
def stop(self):
self.setMotors(left=0, right=0)
def moveForward(self, speed, duration_ms=0):
self.setMotors(left=speed, right=speed)
if duration_ms:
utime.sleep_ms(duration_ms)
self.stop()
def moveBackward(self, speed, duration_ms=0):
self.setMotors(left=-speed, right=-speed)
if duration_ms:
utime.sleep_ms(duration_ms)
self.stop()
def turnLeft(self, speed, duration_ms=0):
if speed < 20:
self.setMotors(left=speed, right=50-speed)
else:
self.setMotors(left=30-speed, right=speed)
if duration_ms:
utime.sleep_ms(duration_ms)
self.stop()
def turnRight(self, speed, duration_ms=0):
if speed < 20:
self.setMotors(left=50-speed, right=speed)
else:
self.setMotors(left=speed, right=30-speed)
if duration_ms:
utime.sleep_ms(duration_ms)
self.stop()
def calibrateLineFinder(self):
print("[Alpha_INFO]: TR sensors calibration ...\\n")
for i in range(0, 100):
if i<25 or i>= 75:
self.turnRight(15)
else:
self.turnLeft(15)
self.TRSensors_calibrate()
self.stop()
print("Calibration done.\\n")
print(str(self.tr_sensors.calibratedMin) + '\\n')
print(str(self.tr_sensors.calibratedMax) + '\\n')
utime.sleep_ms(500)
def TRSensors_calibrate(self):
self.tr_sensors.calibrate()
def TRSensors_read(self, sensor = 0):
return self.tr_sensors.analogRead()
def TRSensors_readLine(self, sensor = 0):
position, sensor_values = self.tr_sensors.readLine()
if sensor is 0:
return sensor_values
else:
return sensor_values[sensor-1]
def TRSensors_position_readLine(self, sensor = 0):
return self.tr_sensors.readLine()
def readUltrasonicDistance(self, length=15, timeout_us = 30000):
measurements = 0
for i in range(length):
self.trig.off()
utime.sleep_us(2)
self.trig.on()
utime.sleep_us(10)
self.trig.off()
self.echo.value()
measurements += machine.time_pulse_us(self.echo, 1, timeout_us)/1e6 # t_echo in seconds
duration = measurements/length
return 343 * duration/2 * 100
def getOLEDaddr(self):
if self.pin_DC.value():
return ALPHABOT_V2_OLED_I2C_ADDR_DC_ON
else:
return ALPHABOT_V2_OLED_I2C_ADDR_DC_OFF
# Drivers for PCF8574T
def controlBuzzer(self, state):
self._pcf8574.pin(5, state)
def getJoystickValue(self):
i = 0
for i in range(5):
if not self._pcf8574.pin(i): break
elif i == 4: i = None
if i == 0:
return self.JOYSTICK_UP
elif i == 1:
return self.JOYSTICK_RIGHT
elif i == 2:
return self.JOYSTICK_LEFT
elif i == 3:
return self.JOYSTICK_DOWN
elif i == 4:
return self.JOYSTICK_CENTER
else:
return None
def readInfrared(self):
left = not self._pcf8574.pin(7)
right = not self._pcf8574.pin(6)
if left and not right:
return self.LEFT_OBSTACLE
elif not left and right:
return self.RIGHT_OBSTACLE
elif left and right:
return self.BOTH_OBSTACLE
else:
return self.NO_OBSTACLE

View File

@ -0,0 +1,91 @@
# Exemple pour générer des trames d'advertising pour le BLE
from micropython import const
import struct
import bluetooth
# Les trames d'advertising sont sont des paquets répétés ayant la structure suivante :
# 1 octet indiquant la taille des données (N + 1)
# 1 octet indiquant le type de données (voir les constantes ci-dessous)
# N octets de données du type indiqué
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_UUID16_MORE = const(0x2)
_ADV_TYPE_UUID32_MORE = const(0x4)
_ADV_TYPE_UUID128_MORE = const(0x6)
_ADV_TYPE_APPEARANCE = const(0x19)
_ADV_TYPE_MANUFACTURER = const(0xFF)
# Génère une trame qui sera passée à la méthode gap_advertise(adv_data=...).
def adv_payload(
limited_disc=False,
br_edr=False,
name=None,
services=None,
appearance=0,
manufacturer=0,
):
payload = bytearray()
def _append(adv_type, value):
nonlocal payload
payload += struct.pack("BB", len(value) + 1, adv_type) + value
_append(
_ADV_TYPE_FLAGS,
struct.pack("B", (0x01 if limited_disc else 0x02) + (0x00 if br_edr else 0x04)),
)
if name:
_append(_ADV_TYPE_NAME, name)
if services:
for uuid in services:
b = bytes(uuid)
if len(b) == 2:
_append(_ADV_TYPE_UUID16_COMPLETE, b)
elif len(b) == 4:
_append(_ADV_TYPE_UUID32_COMPLETE, b)
elif len(b) == 16:
_append(_ADV_TYPE_UUID128_COMPLETE, b)
if appearance:
# Voir org.bluetooth.characteristic.gap.appearance.xml
_append(_ADV_TYPE_APPEARANCE, struct.pack("<h", appearance))
if manufacturer:
_append(_ADV_TYPE_MANUFACTURER, manufacturer)
return payload
def decode_field(payload, adv_type):
i = 0
result = []
while i + 1 < len(payload):
if payload[i + 1] == adv_type:
result.append(payload[i + 2 : i + payload[i] + 1])
i += 1 + payload[i]
return result
def decode_name(payload):
n = decode_field(payload, _ADV_TYPE_NAME)
return str(n[0], "utf-8") if n else ""
def decode_services(payload):
services = []
for u in decode_field(payload, _ADV_TYPE_UUID16_COMPLETE):
services.append(bluetooth.UUID(struct.unpack("<h", u)[0]))
for u in decode_field(payload, _ADV_TYPE_UUID32_COMPLETE):
services.append(bluetooth.UUID(struct.unpack("<d", u)[0]))
for u in decode_field(payload, _ADV_TYPE_UUID128_COMPLETE):
services.append(bluetooth.UUID(u))
return services

118
robot/stm32_ble_uart.py Normal file
View File

@ -0,0 +1,118 @@
# Objet du script : mise en oeuvre du service UART BLE de Nordic Semiconductors (NUS pour
# "Nordic UART Service").
# Sources :
# https://github.com/micropython/micropython/blob/master/examples/bluetooth/ble_uart_peripheral.py
# Attente active, envoi de l'adresse MAC et réception continue de chaines de caractères
import bluetooth # Classes "primitives du BLE"
from stm32_bleAdvertising import adv_payload # Pour construire la trame d'advertising
from binascii import hexlify # Convertit une donnée binaire en sa représentation hexadécimale
# Constantes requises pour construire le service BLE UART
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_GATTS_WRITE = const(3)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
# Définition du service UART avec ses deux caractéristiques RX et TX
_UART_UUID = bluetooth.UUID("6E400001-B5A3-F393-E0A9-E50E24DCCA9E")
_UART_TX = (
bluetooth.UUID("6E400003-B5A3-F393-E0A9-E50E24DCCA9E"),
_FLAG_NOTIFY, # Cette caractéristique notifiera le central des modifications que lui apportera le périphérique
)
_UART_RX = (
bluetooth.UUID("6E400002-B5A3-F393-E0A9-E50E24DCCA9E"),
_FLAG_WRITE, # Le central pourra écrire dans cette caractéristique
)
_UART_SERVICE = (
_UART_UUID,
(_UART_TX, _UART_RX),
)
# org.bluetooth.characteristic.gap.appearance.xml
_ADV_APPEARANCE_GENERIC_COMPUTER = const(128)
# Nombre maximum d'octets qui peuvent être échangés par la caractéristique RX
_MAX_NB_BYTES = const(100)
ascii_mac = None
class BLEUART:
# Initialisations
def __init__(self, ble, name="WB55-UART", rxbuf=_MAX_NB_BYTES):
self._ble = ble
self._ble.active(True)
self._ble.irq(self._irq)
# Enregistrement du service
((self._tx_handle, self._rx_handle),) = self._ble.gatts_register_services((_UART_SERVICE,))
# Augmente la taille du tampon rx et active le mode "append"
self._ble.gatts_set_buffer(self._rx_handle, rxbuf, True)
self._connections = set()
self._rx_buffer = bytearray()
self._handler = None
# Advertising du service :
# On peut ajouter en option services=[_UART_UUID], mais cela risque de rendre la payload de la caractéristique trop longue
self._payload = adv_payload(name=name, appearance=_ADV_APPEARANCE_GENERIC_COMPUTER)
self._advertise()
# Affiche l'adresse MAC de l'objet
dummy, byte_mac = self._ble.config('mac')
hex_mac = hexlify(byte_mac)
global ascii_mac
ascii_mac = hex_mac.decode("ascii")
print("Adresse MAC : %s" %ascii_mac)
# Interruption pour gérer les réceptions
def irq(self, handler):
self._handler = handler
# Surveille les connexions afin d'envoyer des notifications
def _irq(self, event, data):
# Si un central se connecte
if event == _IRQ_CENTRAL_CONNECT:
conn_handle, _, _ = data
self._connections.add(conn_handle)
# Si un central se déconnecte
elif event == _IRQ_CENTRAL_DISCONNECT:
conn_handle, _, _ = data
if conn_handle in self._connections:
self._connections.remove(conn_handle)
# Redémarre l'advertising pour permettre de nouvelles connexions
self._advertise()
# Lorsqu'un client écrit dans une caractéristique exposée par le serveur
# (gestion des évènements de recéption depuis le central)
elif event == _IRQ_GATTS_WRITE:
conn_handle, value_handle = data
if conn_handle in self._connections and value_handle == self._rx_handle:
self._rx_buffer += self._ble.gatts_read(self._rx_handle)
if self._handler:
self._handler()
# Appelée pour vérifier s'il y a des messages en attente de lecture dans RX
def any(self):
return len(self._rx_buffer)
# Retourne les catactères reçus dans RX
def read(self, sz=None):
if not sz:
sz = len(self._rx_buffer)
result = self._rx_buffer[0:sz]
self._rx_buffer = self._rx_buffer[sz:]
return result
# Ecrit dans TX un message à l'attention du central
def write(self, data):
for conn_handle in self._connections:
self._ble.gatts_notify(conn_handle, self._tx_handle, data)
# Mets fin à la connexion au port série simulé
def close(self):
for conn_handle in self._connections:
self._ble.gap_disconnect(conn_handle)
self._connections.clear()
# Pour démarrer l'advertising, précise qu'un central pourra se connecter au périphérique
def _advertise(self, interval_us=500000):
self._ble.gap_advertise(interval_us, adv_data=self._payload, connectable = True)

View File

@ -0,0 +1,69 @@
# ir_rx __init__.py Decoder for IR remote control using synchronous code
# IR_RX abstract base class for IR receivers.
# Author: Peter Hinch
# Copyright Peter Hinch 2020-2021 Released under the MIT license
from machine import Timer, Pin
from array import array
from utime import ticks_us
# Save RAM
# from micropython import alloc_emergency_exception_buf
# alloc_emergency_exception_buf(100)
# On 1st edge start a block timer. While the timer is running, record the time
# of each edge. When the timer times out decode the data. Duration must exceed
# the worst case block transmission time, but be less than the interval between
# a block start and a repeat code start (~108ms depending on protocol)
class IR_RX():
# Result/error codes
# Repeat button code
REPEAT = -1
# Error codes
BADSTART = -2
BADBLOCK = -3
BADREP = -4
OVERRUN = -5
BADDATA = -6
BADADDR = -7
def __init__(self, pin, nedges, tblock, callback, *args): # Optional args for callback
self._pin = pin
self._nedges = nedges
self._tblock = tblock
self.callback = callback
self.args = args
self._errf = lambda _ : None
self.verbose = False
self._times = array('i', (0 for _ in range(nedges + 1))) # +1 for overrun
pin.irq(handler = self._cb_pin, trigger = (Pin.IRQ_FALLING | Pin.IRQ_RISING))
self.edge = 0
self.tim = Timer(-1) # Sofware timer
self.cb = self.decode
# Pin interrupt. Save time of each edge for later decode.
def _cb_pin(self, line):
t = ticks_us()
# On overrun ignore pulses until software timer times out
if self.edge <= self._nedges: # Allow 1 extra pulse to record overrun
if not self.edge: # First edge received
self.tim.init(period=self._tblock , mode=Timer.ONE_SHOT, callback=self.cb)
self._times[self.edge] = t
self.edge += 1
def do_callback(self, cmd, addr, ext, thresh=0):
self.edge = 0
if cmd >= thresh:
self.callback(cmd, addr, ext, *self.args)
else:
self._errf(cmd)
def error_function(self, func):
self._errf = func
def close(self):
self._pin.irq(handler = None)
self.tim.deinit()

62
robot/stm32_nec.py Normal file
View File

@ -0,0 +1,62 @@
# nec.py Decoder for IR remote control using synchronous code
# Supports NEC protocol.
# For a remote using NEC see https://www.adafruit.com/products/389
# Author: Peter Hinch
# Copyright Peter Hinch 2020 Released under the MIT license
from utime import ticks_us, ticks_diff
from stm32_ir_receiver import IR_RX
class NEC_ABC(IR_RX):
def __init__(self, pin, extended, callback, *args):
# Block lasts <= 80ms (extended mode) and has 68 edges
super().__init__(pin, 68, 80, callback, *args)
self._extended = extended
self._addr = 0
def decode(self, _):
try:
if self.edge > 68:
raise RuntimeError(self.OVERRUN)
width = ticks_diff(self._times[1], self._times[0])
if width < 4000: # 9ms leading mark for all valid data
raise RuntimeError(self.BADSTART)
width = ticks_diff(self._times[2], self._times[1])
if width > 3000: # 4.5ms space for normal data
if self.edge < 68: # Haven't received the correct number of edges
raise RuntimeError(self.BADBLOCK)
# Time spaces only (marks are always 562.5µs)
# Space is 1.6875ms (1) or 562.5µs (0)
# Skip last bit which is always 1
val = 0
for edge in range(3, 68 - 2, 2):
val >>= 1
if ticks_diff(self._times[edge + 1], self._times[edge]) > 1120:
val |= 0x80000000
elif width > 1700: # 2.5ms space for a repeat code. Should have exactly 4 edges.
raise RuntimeError(self.REPEAT if self.edge == 4 else self.BADREP) # Treat REPEAT as error.
else:
raise RuntimeError(self.BADSTART)
addr = val & 0xff # 8 bit addr
cmd = (val >> 16) & 0xff
if cmd != (val >> 24) ^ 0xff:
raise RuntimeError(self.BADDATA)
if addr != ((val >> 8) ^ 0xff) & 0xff: # 8 bit addr doesn't match check
if not self._extended:
raise RuntimeError(self.BADADDR)
addr |= val & 0xff00 # pass assumed 16 bit address to callback
self._addr = addr
except RuntimeError as e:
cmd = e.args[0]
addr = self._addr if cmd == self.REPEAT else 0 # REPEAT uses last address
# Set up for new data burst and run user callback
self.do_callback(cmd, addr, 0, self.REPEAT)
class NEC_8(NEC_ABC):
def __init__(self, pin, callback, *args):
super().__init__(pin, False, callback, *args)
class NEC_16(NEC_ABC):
def __init__(self, pin, callback, *args):
super().__init__(pin, True, callback, *args)

49
robot/stm32_pcf8574.py Normal file
View File

@ -0,0 +1,49 @@
class PCF8574:
def __init__(self, i2c, addr=0x20):
self._i2c = i2c
i2cModules = self._i2c.scan()
if addr not in i2cModules:
error = "Unable to find module 'PCF8574' at address " + str(hex(addr)) + ". Please check connections with the board.\n"
error += "[Info] I2C address.es detected: " + str([hex(a) for a in i2cModules])
raise ValueError(error)
self._addr = addr
self._port = bytearray(1)
@property
def port(self):
self._read()
return self._port[0]
@port.setter
def port(self, value):
self._port[0] = value & 0xff
self._write()
def pin(self, pin, value=None):
pin = self.validate_pin(pin)
if value is None:
self._read()
return (self._port[0] >> pin) & 1
else:
if value:
self._port[0] |= (1 << (pin))
else:
self._port[0] &= ~(1 << (pin))
self._write()
def toggle(self, pin):
pin = self.validate_pin(pin)
self._port[0] ^= (1 << (pin))
self._write()
def validate_pin(self, pin):
# pin valid range 0..7
if not 0 <= pin <= 7:
raise ValueError('Invalid pin {}. Use 0-7.'.format(pin))
return pin
def _read(self):
self._i2c.readfrom_into(self._addr, self._port)
def _write(self):
self._i2c.writeto(self._addr, self._port)

131
robot/stm32_ssd1306.py Normal file
View File

@ -0,0 +1,131 @@
# MicroPython SSD1306 OLED I2C driver
from micropython import const
import framebuf
import utime
SSD1306_I2C_ADDR = 0x3C
# register definitions
SET_CONTRAST = const(0x81)
SET_ENTIRE_ON = const(0xA4)
SET_NORM_INV = const(0xA6)
SET_DISP = const(0xAE)
SET_MEM_ADDR = const(0x20)
SET_COL_ADDR = const(0x21)
SET_PAGE_ADDR = const(0x22)
SET_DISP_START_LINE = const(0x40)
SET_SEG_REMAP = const(0xA0)
SET_MUX_RATIO = const(0xA8)
SET_COM_OUT_DIR = const(0xC0)
SET_DISP_OFFSET = const(0xD3)
SET_COM_PIN_CFG = const(0xDA)
SET_DISP_CLK_DIV = const(0xD5)
SET_PRECHARGE = const(0xD9)
SET_VCOM_DESEL = const(0xDB)
SET_CHARGE_PUMP = const(0x8D)
# Subclassing FrameBuffer provides support for graphics primitives
# http://docs.micropython.org/en/latest/pyboard/library/framebuf.html
class SSD1306(framebuf.FrameBuffer):
def __init__(self, width, height, external_vcc):
self.width = width
self.height = height
self.external_vcc = external_vcc
self.pages = self.height // 8
self.buffer = bytearray(self.pages * self.width)
super().__init__(self.buffer, self.width, self.height, framebuf.MONO_VLSB)
self.init_display()
def init_display(self):
for cmd in (
SET_DISP, # display off
# address setting
SET_MEM_ADDR,
0x00, # horizontal
# resolution and layout
SET_DISP_START_LINE, # start at line 0
SET_SEG_REMAP | 0x01, # column addr 127 mapped to SEG0
SET_MUX_RATIO,
self.height - 1,
SET_COM_OUT_DIR | 0x08, # scan from COM[N] to COM0
SET_DISP_OFFSET,
0x00,
SET_COM_PIN_CFG,
0x02 if self.width > 2 * self.height else 0x12,
# timing and driving scheme
SET_DISP_CLK_DIV,
0x80,
SET_PRECHARGE,
0x22 if self.external_vcc else 0xF1,
SET_VCOM_DESEL,
0x30, # 0.83*Vcc
# display
SET_CONTRAST,
0xFF, # maximum
SET_ENTIRE_ON, # output follows RAM contents
SET_NORM_INV, # not inverted
# charge pump
SET_CHARGE_PUMP,
0x10 if self.external_vcc else 0x14,
SET_DISP | 0x01, # display on
): # on
self.write_cmd(cmd)
self.fill(0)
self.show()
def poweroff(self):
self.write_cmd(SET_DISP)
def poweron(self):
self.write_cmd(SET_DISP | 0x01)
def contrast(self, contrast):
self.write_cmd(SET_CONTRAST)
self.write_cmd(contrast)
def invert(self, invert):
self.write_cmd(SET_NORM_INV | (invert & 1))
def rotate(self, rotate):
self.write_cmd(SET_COM_OUT_DIR | ((rotate & 1) << 3))
self.write_cmd(SET_SEG_REMAP | (rotate & 1))
def show(self):
x0 = 0
x1 = self.width - 1
if self.width == 64:
# displays with width of 64 pixels are shifted by 32
x0 += 32
x1 += 32
self.write_cmd(SET_COL_ADDR)
self.write_cmd(x0)
self.write_cmd(x1)
self.write_cmd(SET_PAGE_ADDR)
self.write_cmd(0)
self.write_cmd(self.pages - 1)
self.write_data(self.buffer)
class SSD1306_I2C(SSD1306):
def __init__(self, width, height, i2c, addr=SSD1306_I2C_ADDR, external_vcc=False):
if i2c == None:
raise ValueError("I2C object 'SSD1306' needed as argument!")
self._i2c = i2c
utime.sleep_ms(200)
i2cModules = self._i2c.scan()
if addr not in i2cModules:
error = "Unable to find module 'SSD1306' at address " + str(hex(addr)) + ". Please check connections with the board.\n"
error += "[Info] I2C address.es detected: " + str([hex(a) for a in i2cModules])
raise ValueError(error)
self._addr = addr
self.temp = bytearray(2)
self.write_list = [b"\x40", None] # Co=0, D/C#=1
super().__init__(width, height, external_vcc)
def write_cmd(self, cmd):
self.temp[0] = 0x80 # Co=1, D/C#=0
self.temp[1] = cmd
self._i2c.writeto(self._addr, self.temp)
def write_data(self, buf):
self.write_list[1] = buf
self._i2c.writevto(self._addr, self.write_list)

525
robot/stm32_vl53l0x.py Normal file
View File

@ -0,0 +1,525 @@
"""
MicroPython for Grove Time Of Flight VL53L0X sensor (I2C).
https://github.com/vittascience/stm32-libraries
https://wiki.seeedstudio.com/Grove-Time_of_Flight_Distance_Sensor-VL53L0X/
MIT License
Copyright (c) 2020 leomlr (Léo Meillier)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/Vittascience/stm32-libraries"
from micropython import const
import utime
import math
_VL53L0X_IIC_ADDR = const(0x29)
# Configuration constants:
_SYSRANGE_START = const(0x00)
_SYSTEM_THRESH_HIGH = const(0x0C)
_SYSTEM_THRESH_LOW = const(0x0E)
_SYSTEM_SEQUENCE_CONFIG = const(0x01)
_SYSTEM_RANGE_CONFIG = const(0x09)
_SYSTEM_INTERMEASUREMENT_PERIOD = const(0x04)
_SYSTEM_INTERRUPT_CONFIG_GPIO = const(0x0A)
_GPIO_HV_MUX_ACTIVE_HIGH = const(0x84)
_SYSTEM_INTERRUPT_CLEAR = const(0x0B)
_RESULT_INTERRUPT_STATUS = const(0x13)
_RESULT_RANGE_STATUS = const(0x14)
_RESULT_CORE_AMBIENT_WINDOW_EVENTS_RTN = const(0xBC)
_RESULT_CORE_RANGING_TOTAL_EVENTS_RTN = const(0xC0)
_RESULT_CORE_AMBIENT_WINDOW_EVENTS_REF = const(0xD0)
_RESULT_CORE_RANGING_TOTAL_EVENTS_REF = const(0xD4)
_RESULT_PEAK_SIGNAL_RATE_REF = const(0xB6)
_ALGO_PART_TO_PART_RANGE_OFFSET_MM = const(0x28)
_I2C_SLAVE_DEVICE_ADDRESS = const(0x8A)
_MSRC_CONFIG_CONTROL = const(0x60)
_PRE_RANGE_CONFIG_MIN_SNR = const(0x27)
_PRE_RANGE_CONFIG_VALID_PHASE_LOW = const(0x56)
_PRE_RANGE_CONFIG_VALID_PHASE_HIGH = const(0x57)
_PRE_RANGE_MIN_COUNT_RATE_RTN_LIMIT = const(0x64)
_FINAL_RANGE_CONFIG_MIN_SNR = const(0x67)
_FINAL_RANGE_CONFIG_VALID_PHASE_LOW = const(0x47)
_FINAL_RANGE_CONFIG_VALID_PHASE_HIGH = const(0x48)
_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT = const(0x44)
_PRE_RANGE_CONFIG_SIGMA_THRESH_HI = const(0x61)
_PRE_RANGE_CONFIG_SIGMA_THRESH_LO = const(0x62)
_PRE_RANGE_CONFIG_VCSEL_PERIOD = const(0x50)
_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI = const(0x51)
_PRE_RANGE_CONFIG_TIMEOUT_MACROP_LO = const(0x52)
_SYSTEM_HISTOGRAM_BIN = const(0x81)
_HISTOGRAM_CONFIG_INITIAL_PHASE_SELECT = const(0x33)
_HISTOGRAM_CONFIG_READOUT_CTRL = const(0x55)
_FINAL_RANGE_CONFIG_VCSEL_PERIOD = const(0x70)
_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI = const(0x71)
_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_LO = const(0x72)
_CROSSTALK_COMPENSATION_PEAK_RATE_MCPS = const(0x20)
_MSRC_CONFIG_TIMEOUT_MACROP = const(0x46)
_SOFT_RESET_GO2_SOFT_RESET_N = const(0xBF)
_IDENTIFICATION_MODEL_ID = const(0xC0)
_IDENTIFICATION_REVISION_ID = const(0xC2)
_OSC_CALIBRATE_VAL = const(0xF8)
_GLOBAL_CONFIG_VCSEL_WIDTH = const(0x32)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_0 = const(0xB0)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_1 = const(0xB1)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_2 = const(0xB2)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_3 = const(0xB3)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_4 = const(0xB4)
_GLOBAL_CONFIG_SPAD_ENABLES_REF_5 = const(0xB5)
_GLOBAL_CONFIG_REF_EN_START_SELECT = const(0xB6)
_DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD = const(0x4E)
_DYNAMIC_SPAD_REF_EN_START_OFFSET = const(0x4F)
_POWER_MANAGEMENT_GO1_POWER_FORCE = const(0x80)
_VHV_CONFIG_PAD_SCL_SDA__EXTSUP_HV = const(0x89)
_ALGO_PHASECAL_LIM = const(0x30)
_ALGO_PHASECAL_CONFIG_TIMEOUT = const(0x30)
_VCSEL_PERIOD_PRE_RANGE = const(0)
_VCSEL_PERIOD_FINAL_RANGE = const(1)
def _decode_timeout(val):
# format: "(LSByte * 2^MSByte) + 1"
return float(val & 0xFF) * math.pow(2.0, ((val & 0xFF00) >> 8)) + 1
def _encode_timeout(timeout_mclks):
# format: "(LSByte * 2^MSByte) + 1"
timeout_mclks = int(timeout_mclks) & 0xFFFF
ls_byte = 0
ms_byte = 0
if timeout_mclks > 0:
ls_byte = timeout_mclks - 1
while ls_byte > 255:
ls_byte >>= 1
ms_byte += 1
return ((ms_byte << 8) | (ls_byte & 0xFF)) & 0xFFFF
return 0
def _timeout_mclks_to_microseconds(timeout_period_mclks, vcsel_period_pclks):
macro_period_ns = ((2304 * (vcsel_period_pclks) * 1655) + 500) // 1000
return ((timeout_period_mclks * macro_period_ns) + (macro_period_ns // 2)) // 1000
def _timeout_microseconds_to_mclks(timeout_period_us, vcsel_period_pclks):
macro_period_ns = ((2304 * (vcsel_period_pclks) * 1655) + 500) // 1000
return ((timeout_period_us * 1000) + (macro_period_ns // 2)) // macro_period_ns
class VL53L0X:
"""Driver for the VL53L0X distance sensor."""
def __init__(self, i2c, address=_VL53L0X_IIC_ADDR, io_timeout_s=0):
# pylint: disable=too-many-statements
self._i2c = i2c
self._addr = address
self.io_timeout_s = io_timeout_s
# Check identification registers for expected values.
# From section 3.2 of the datasheet.
if (
self._read_u8(0xC0) is not 0xEE
or self._read_u8(0xC1) is not 0xAA
or self._read_u8(0xC2) is not 0x10
):
raise RuntimeError("Failed to find expected ID register values. Check wiring!")
# Initialize access to the sensor. This is based on the logic from:
# https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
# Set I2C standard mode.
for pair in ((0x88, 0x00), (0x80, 0x01), (0xFF, 0x01), (0x00, 0x00)):
self._write_u8(pair[0], pair[1])
self._stop_variable = self._read_u8(0x91)
for pair in ((0x00, 0x01), (0xFF, 0x00), (0x80, 0x00)):
self._write_u8(pair[0], pair[1])
# disable SIGNAL_RATE_MSRC (bit 1) and SIGNAL_RATE_PRE_RANGE (bit 4)
# limit checks
config_control = self._read_u8(_MSRC_CONFIG_CONTROL) | 0x12
self._write_u8(_MSRC_CONFIG_CONTROL, config_control)
# set final range signal rate limit to 0.25 MCPS (million counts per
# second)
self.signal_rate_limit = 0.25
self._write_u8(_SYSTEM_SEQUENCE_CONFIG, 0xFF)
spad_count, spad_is_aperture = self._get_spad_info()
# The SPAD map (RefGoodSpadMap) is read by
# VL53L0X_get_info_from_device() in the API, but the same data seems to
# be more easily readable from GLOBAL_CONFIG_SPAD_ENABLES_REF_0 through
# _6, so read it from there.
ref_spad_map = bytearray(1)
ref_spad_map[0] = _GLOBAL_CONFIG_SPAD_ENABLES_REF_0
self._i2c.writeto(self._addr, ref_spad_map)
buf = bytearray(6)
self._i2c.readfrom_mem_into(self._addr, ref_spad_map[0], buf)
ref_spad_map.extend(buf)
for pair in (
(0xFF, 0x01),
(_DYNAMIC_SPAD_REF_EN_START_OFFSET, 0x00),
(_DYNAMIC_SPAD_NUM_REQUESTED_REF_SPAD, 0x2C),
(0xFF, 0x00),
(_GLOBAL_CONFIG_REF_EN_START_SELECT, 0xB4),
):
self._write_u8(pair[0], pair[1])
first_spad_to_enable = 12 if spad_is_aperture else 0
spads_enabled = 0
for i in range(48):
if i < first_spad_to_enable or spads_enabled == spad_count:
# This bit is lower than the first one that should be enabled,
# or (reference_spad_count) bits have already been enabled, so
# zero this bit.
ref_spad_map[1 + (i // 8)] &= ~(1 << (i % 8))
elif (ref_spad_map[1 + (i // 8)] >> (i % 8)) & 0x1 > 0:
spads_enabled += 1
self._i2c.writeto(self._addr, ref_spad_map)
for pair in (
(0xFF, 0x01),
(0x00, 0x00),
(0xFF, 0x00),
(0x09, 0x00),
(0x10, 0x00),
(0x11, 0x00),
(0x24, 0x01),
(0x25, 0xFF),
(0x75, 0x00),
(0xFF, 0x01),
(0x4E, 0x2C),
(0x48, 0x00),
(0x30, 0x20),
(0xFF, 0x00),
(0x30, 0x09),
(0x54, 0x00),
(0x31, 0x04),
(0x32, 0x03),
(0x40, 0x83),
(0x46, 0x25),
(0x60, 0x00),
(0x27, 0x00),
(0x50, 0x06),
(0x51, 0x00),
(0x52, 0x96),
(0x56, 0x08),
(0x57, 0x30),
(0x61, 0x00),
(0x62, 0x00),
(0x64, 0x00),
(0x65, 0x00),
(0x66, 0xA0),
(0xFF, 0x01),
(0x22, 0x32),
(0x47, 0x14),
(0x49, 0xFF),
(0x4A, 0x00),
(0xFF, 0x00),
(0x7A, 0x0A),
(0x7B, 0x00),
(0x78, 0x21),
(0xFF, 0x01),
(0x23, 0x34),
(0x42, 0x00),
(0x44, 0xFF),
(0x45, 0x26),
(0x46, 0x05),
(0x40, 0x40),
(0x0E, 0x06),
(0x20, 0x1A),
(0x43, 0x40),
(0xFF, 0x00),
(0x34, 0x03),
(0x35, 0x44),
(0xFF, 0x01),
(0x31, 0x04),
(0x4B, 0x09),
(0x4C, 0x05),
(0x4D, 0x04),
(0xFF, 0x00),
(0x44, 0x00),
(0x45, 0x20),
(0x47, 0x08),
(0x48, 0x28),
(0x67, 0x00),
(0x70, 0x04),
(0x71, 0x01),
(0x72, 0xFE),
(0x76, 0x00),
(0x77, 0x00),
(0xFF, 0x01),
(0x0D, 0x01),
(0xFF, 0x00),
(0x80, 0x01),
(0x01, 0xF8),
(0xFF, 0x01),
(0x8E, 0x01),
(0x00, 0x01),
(0xFF, 0x00),
(0x80, 0x00),
):
self._write_u8(pair[0], pair[1])
self._write_u8(_SYSTEM_INTERRUPT_CONFIG_GPIO, 0x04)
gpio_hv_mux_active_high = self._read_u8(_GPIO_HV_MUX_ACTIVE_HIGH)
self._write_u8(
_GPIO_HV_MUX_ACTIVE_HIGH, gpio_hv_mux_active_high & ~0x10
) # active low
self._write_u8(_SYSTEM_INTERRUPT_CLEAR, 0x01)
self._measurement_timing_budget_us = self.measurement_timing_budget
self._write_u8(_SYSTEM_SEQUENCE_CONFIG, 0xE8)
self.measurement_timing_budget = self._measurement_timing_budget_us
self._write_u8(_SYSTEM_SEQUENCE_CONFIG, 0x01)
self._perform_single_ref_calibration(0x40)
self._write_u8(_SYSTEM_SEQUENCE_CONFIG, 0x02)
self._perform_single_ref_calibration(0x00)
# "restore the previous Sequence Config"
self._write_u8(_SYSTEM_SEQUENCE_CONFIG, 0xE8)
def _read_u8(self, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
buf = self._i2c.readfrom_mem(self._addr, address, 1)
return buf[0]
def _read_u16(self, address):
# Read a 16-bit BE unsigned value from the specified 8-bit address.
buf = self._i2c.readfrom_mem(self._addr, address, 2)
return (buf[0] << 8) | buf[1]
def _write_u8(self, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
self._i2c.writeto(self._addr, bytearray([address & 0xFF, val & 0xFF]))
def _write_u16(self, address, val):
# Write a 16-bit BE unsigned value to the specified 8-bit address.
self._i2c.writeto(self._addr, bytearray([address & 0xFF, (val >> 8) & 0xFF, val & 0xFF]))
def _get_spad_info(self):
# Get reference SPAD count and type, returned as a 2-tuple of
# count and boolean is_aperture. Based on code from:
# https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
for pair in ((0x80, 0x01), (0xFF, 0x01), (0x00, 0x00), (0xFF, 0x06)):
self._write_u8(pair[0], pair[1])
self._write_u8(0x83, self._read_u8(0x83) | 0x04)
for pair in (
(0xFF, 0x07),
(0x81, 0x01),
(0x80, 0x01),
(0x94, 0x6B),
(0x83, 0x00),
):
self._write_u8(pair[0], pair[1])
start = utime.gmtime()
while self._read_u8(0x83) == 0x00:
if (
self.io_timeout_s > 0
and (utime.gmtime() - start) >= self.io_timeout_s
):
raise RuntimeError("Timeout waiting for VL53L0X!")
self._write_u8(0x83, 0x01)
tmp = self._read_u8(0x92)
count = tmp & 0x7F
is_aperture = ((tmp >> 7) & 0x01) == 1
for pair in ((0x81, 0x00), (0xFF, 0x06)):
self._write_u8(pair[0], pair[1])
self._write_u8(0x83, self._read_u8(0x83) & ~0x04)
for pair in ((0xFF, 0x01), (0x00, 0x01), (0xFF, 0x00), (0x80, 0x00)):
self._write_u8(pair[0], pair[1])
return (count, is_aperture)
def _perform_single_ref_calibration(self, vhv_init_byte):
# based on VL53L0X_perform_single_ref_calibration() from ST API.
self._write_u8(_SYSRANGE_START, 0x01 | vhv_init_byte & 0xFF)
start = utime.gmtime()
while (self._read_u8(_RESULT_INTERRUPT_STATUS) & 0x07) == 0:
if (
self.io_timeout_s > 0
and (utime.gmtime() - start) >= self.io_timeout_s
):
raise RuntimeError("Timeout waiting for VL53L0X!")
self._write_u8(_SYSTEM_INTERRUPT_CLEAR, 0x01)
self._write_u8(_SYSRANGE_START, 0x00)
def _get_vcsel_pulse_period(self, vcsel_period_type):
# pylint: disable=no-else-return
# Disable should be removed when refactor can be tested
if vcsel_period_type == _VCSEL_PERIOD_PRE_RANGE:
val = self._read_u8(_PRE_RANGE_CONFIG_VCSEL_PERIOD)
return (((val) + 1) & 0xFF) << 1
elif vcsel_period_type == _VCSEL_PERIOD_FINAL_RANGE:
val = self._read_u8(_FINAL_RANGE_CONFIG_VCSEL_PERIOD)
return (((val) + 1) & 0xFF) << 1
return 255
def _get_sequence_step_enables(self):
# based on VL53L0X_GetSequenceStepEnables() from ST API
sequence_config = self._read_u8(_SYSTEM_SEQUENCE_CONFIG)
tcc = (sequence_config >> 4) & 0x1 > 0
dss = (sequence_config >> 3) & 0x1 > 0
msrc = (sequence_config >> 2) & 0x1 > 0
pre_range = (sequence_config >> 6) & 0x1 > 0
final_range = (sequence_config >> 7) & 0x1 > 0
return (tcc, dss, msrc, pre_range, final_range)
def _get_sequence_step_timeouts(self, pre_range):
# based on get_sequence_step_timeout() from ST API but modified by
# pololu here:
# https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
pre_range_vcsel_period_pclks = self._get_vcsel_pulse_period(
_VCSEL_PERIOD_PRE_RANGE
)
msrc_dss_tcc_mclks = (self._read_u8(_MSRC_CONFIG_TIMEOUT_MACROP) + 1) & 0xFF
msrc_dss_tcc_us = _timeout_mclks_to_microseconds(
msrc_dss_tcc_mclks, pre_range_vcsel_period_pclks
)
pre_range_mclks = _decode_timeout(
self._read_u16(_PRE_RANGE_CONFIG_TIMEOUT_MACROP_HI)
)
pre_range_us = _timeout_mclks_to_microseconds(
pre_range_mclks, pre_range_vcsel_period_pclks
)
final_range_vcsel_period_pclks = self._get_vcsel_pulse_period(
_VCSEL_PERIOD_FINAL_RANGE
)
final_range_mclks = _decode_timeout(
self._read_u16(_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI)
)
if pre_range:
final_range_mclks -= pre_range_mclks
final_range_us = _timeout_mclks_to_microseconds(
final_range_mclks, final_range_vcsel_period_pclks
)
return (
msrc_dss_tcc_us,
pre_range_us,
final_range_us,
final_range_vcsel_period_pclks,
pre_range_mclks,
)
@property
def signal_rate_limit(self):
"""The signal rate limit in mega counts per second."""
val = self._read_u16(_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT)
# Return value converted from 16-bit 9.7 fixed point to float.
return val / (1 << 7)
@signal_rate_limit.setter
def signal_rate_limit(self, val):
assert 0.0 <= val <= 511.99
# Convert to 16-bit 9.7 fixed point value from a float.
val = int(val * (1 << 7))
self._write_u16(_FINAL_RANGE_CONFIG_MIN_COUNT_RATE_RTN_LIMIT, val)
@property
def measurement_timing_budget(self):
"""The measurement timing budget in microseconds."""
budget_us = 1910 + 960 # Start overhead + end overhead.
tcc, dss, msrc, pre_range, final_range = self._get_sequence_step_enables()
step_timeouts = self._get_sequence_step_timeouts(pre_range)
msrc_dss_tcc_us, pre_range_us, final_range_us, _, _ = step_timeouts
if tcc:
budget_us += msrc_dss_tcc_us + 590
if dss:
budget_us += 2 * (msrc_dss_tcc_us + 690)
elif msrc:
budget_us += msrc_dss_tcc_us + 660
if pre_range:
budget_us += pre_range_us + 660
if final_range:
budget_us += final_range_us + 550
self._measurement_timing_budget_us = budget_us
return budget_us
@measurement_timing_budget.setter
def measurement_timing_budget(self, budget_us):
# pylint: disable=too-many-locals
assert budget_us >= 20000
used_budget_us = 1320 + 960 # Start (diff from get) + end overhead
tcc, dss, msrc, pre_range, final_range = self._get_sequence_step_enables()
step_timeouts = self._get_sequence_step_timeouts(pre_range)
msrc_dss_tcc_us, pre_range_us, _ = step_timeouts[:3]
final_range_vcsel_period_pclks, pre_range_mclks = step_timeouts[3:]
if tcc:
used_budget_us += msrc_dss_tcc_us + 590
if dss:
used_budget_us += 2 * (msrc_dss_tcc_us + 690)
elif msrc:
used_budget_us += msrc_dss_tcc_us + 660
if pre_range:
used_budget_us += pre_range_us + 660
if final_range:
used_budget_us += 550
# "Note that the final range timeout is determined by the timing
# budget and the sum of all other timeouts within the sequence.
# If there is no room for the final range timeout, then an error
# will be set. Otherwise the remaining time will be applied to
# the final range."
if used_budget_us > budget_us:
raise ValueError("Requested timeout too big.")
final_range_timeout_us = budget_us - used_budget_us
final_range_timeout_mclks = _timeout_microseconds_to_mclks(
final_range_timeout_us, final_range_vcsel_period_pclks
)
if pre_range:
final_range_timeout_mclks += pre_range_mclks
self._write_u16(
_FINAL_RANGE_CONFIG_TIMEOUT_MACROP_HI,
_encode_timeout(final_range_timeout_mclks),
)
self._measurement_timing_budget_us = budget_us
def getRangeMillimeters(self):
"""Perform a single reading of the range for an object in front of
the sensor and return the distance in millimeters.
"""
# Adapted from readRangeSingleMillimeters &
# readRangeContinuousMillimeters in pololu code at:
# https://github.com/pololu/vl53l0x-arduino/blob/master/VL53L0X.cpp
for pair in (
(0x80, 0x01),
(0xFF, 0x01),
(0x00, 0x00),
(0x91, self._stop_variable),
(0x00, 0x01),
(0xFF, 0x00),
(0x80, 0x00),
(_SYSRANGE_START, 0x01),
):
self._write_u8(pair[0], pair[1])
start = utime.gmtime()
while (self._read_u8(_SYSRANGE_START) & 0x01) > 0:
if (self.io_timeout_s > 0 and (utime.gmtime() - start) >= self.io_timeout_s):
raise RuntimeError("Timeout waiting for VL53L0X!")
start = utime.gmtime()
while (self._read_u8(_RESULT_INTERRUPT_STATUS) & 0x07) == 0:
if (self.io_timeout_s > 0 and (utime.gmtime() - start) >= self.io_timeout_s):
raise RuntimeError("Timeout waiting for VL53L0X!")
# assumptions: Linearity Corrective Gain is 1000 (default)
# fractional ranging is not enabled
range_mm = self._read_u16(_RESULT_RANGE_STATUS + 10)
self._write_u8(_SYSTEM_INTERRUPT_CLEAR, 0x01)
return range_mm
def set_address(self, new_address):
"""Set a new I2C address to the instantaited object. This is only called when using
multiple VL53L0X sensors on the same I2C bus (SDA & SCL pins). See also the
`example <examples.html#multiple-vl53l0x-on-same-i2c-bus>`_ for proper usage.
:param int new_address: The 7-bit `int` that is to be assigned to the VL53L0X sensor.
The address that is assigned should NOT be already in use by another device on the
I2C bus.
.. important:: To properly set the address to an individual VL53L0X sensor, you must
first ensure that all other VL53L0X sensors (using the default address of ``0x29``)
on the same I2C bus are in their off state by pulling the "SHDN" pins LOW. When the
"SHDN" pin is pulled HIGH again the default I2C address is ``0x29``.
"""
self._i2c.write(_I2C_SLAVE_DEVICE_ADDRESS, new_address & 0x7F)

267
simulateur_front.py Normal file
View File

@ -0,0 +1,267 @@
from tkinter import *
import tkinter as tk
import Epreuve3 as e3
import subprocess
import sys
root = tk.Tk()
root.title("Simulateur Epreuve 3 - 24H du Code 2026")
image = tk.PhotoImage(file="SII++.png")
#root.configure(bg="#0059A3")
root.configure(bg="#89B4E1")
# Widgets are added here
#Frame principale
frame = tk.Frame(root, width=800, height=400, background="#89B4E1")
frame.pack(padx=10, pady=10)
#Frames Corps
instructions_frame = tk.Frame(frame, width=400, height=400, bg="#89B4E1")
instructions_frame.pack(padx=5, pady=5, side=tk.LEFT, fill=Y)
infos_frames = tk.Frame(frame, width=400, height=400, bg="#89B4E1")
infos_frames.pack(padx=5, pady=5, side=tk.RIGHT)
#Frame stack
stack_frame = tk.Frame(instructions_frame, width=400, height=300)
stack_frame.pack(padx=5, pady=5, side=tk.TOP)
Label(stack_frame, text=f"Stack").pack(pady=5)
scrollbar = tk.Scrollbar(stack_frame)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
mylist = tk.Listbox(stack_frame, yscrollcommand=scrollbar.set, width=50)
mylist.pack(side=tk.LEFT, fill=tk.BOTH)
scrollbar.config(command=mylist.yview)
#Frame instruction suivante
single_inst_frame = tk.Frame(instructions_frame, width=400, height=100, bg="#89B4E1")
single_inst_frame.pack(padx=5, pady=5, side=tk.TOP)
Label(single_inst_frame, text=f"Instruction").pack(pady=5, side=LEFT)
single_instr = Label(single_inst_frame, text=f"instruction+cycle", width=30, bg="white")
single_instr.pack(pady=5, side=tk.LEFT)
image_frame = tk.Frame(instructions_frame, width=400, height=100, bg="#89B4E1")
image_frame.pack(padx=5, pady=5, side=tk.TOP)
display_image = image.subsample(10, 10)
tk.Label(image_frame, image=display_image, bg="#89B4E1").pack(padx=5, pady=5)
#Frame registres
registres_frames = tk.Frame(infos_frames, width=400, height=100, bg="#89B4E1")
registres_frames.pack(padx=10, pady=10, side=tk.TOP)
#Registre 1
registre_1_frame = tk.Frame(registres_frames, width=100, height=100, bg="orange")
registre_1_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(registre_1_frame, text=f"R0", width=15, bg="orange").pack(pady=5)
label_registre_1 = Label(registre_1_frame, bg="orange")
label_registre_1.pack(pady=5)
#Registre 2
registre_2_frame = tk.Frame(registres_frames, width=100, height=100, bg="orange")
registre_2_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(registre_2_frame, text=f"R1", width=15, bg="orange").pack(pady=5)
label_registre_2 = Label(registre_2_frame, bg="orange")
label_registre_2.pack(pady=5)
#Registre 3
registre_3_frame = tk.Frame(registres_frames, width=100, height=100, bg="orange")
registre_3_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(registre_3_frame, text=f"R2", width=15, bg="orange").pack(pady=5)
label_registre_3 = Label(registre_3_frame, bg="orange")
label_registre_3.pack(pady=5)
#Registre 4
registre_4_frame = tk.Frame(registres_frames, width=100, height=100, bg="ORANGE")
registre_4_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(registre_4_frame, text=f"R3", width=15, bg="orange").pack(pady=5)
label_registre_4 = Label(registre_4_frame, bg="orange")
label_registre_4.pack(pady=5)
#Frame annexes
annexes_frames = tk.Frame(infos_frames, width=400, height=100, bg="#89B4E1")
annexes_frames.pack(padx=10, pady=10, side=tk.TOP)
#Annexe 1
annexe_1_frame = tk.Frame(annexes_frames, width=100, height=100, bg="orange")
annexe_1_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(annexe_1_frame, text=f"EQ", width=15, bg="orange").pack(pady=5)
label_eq = Label(annexe_1_frame, bg="orange")
label_eq.pack(pady=5)
#RAnnexe 2
annexe_2_frame = tk.Frame(annexes_frames, width=100, height=100, bg="orange")
annexe_2_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(annexe_2_frame, text=f"LT", width=15, bg="orange").pack(pady=5)
label_lt = Label(annexe_2_frame, bg="orange")
label_lt.pack(pady=5)
#Annexe 3
annexe_3_frame = tk.Frame(annexes_frames, width=100, height=100, bg="orange")
annexe_3_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(annexe_3_frame, text=f"PC", width=15, bg="orange").pack(pady=5)
label_pc = Label(annexe_3_frame, bg="orange")
label_pc.pack(pady=5)
#Annexe 4
annexe_4_frame = tk.Frame(annexes_frames, width=100, height=100, bg="ORANGE")
annexe_4_frame.pack(padx=10, pady=0, side=tk.LEFT)
Label(annexe_4_frame, text=f"SP", width=15, bg="orange").pack(pady=5)
label_sp = Label(annexe_4_frame, bg="orange")
label_sp.pack(pady=5)
#Frame RAM
ram_frame = tk.Frame(infos_frames, width=400, height=100)
ram_frame.pack(padx=10, pady=10, side=tk.TOP)
Label(ram_frame, text=f"RAM").pack(pady=5)
scrollbarRam = tk.Scrollbar(ram_frame)
scrollbarRam.pack(side=tk.RIGHT, fill=tk.Y)
myRamList = tk.Listbox(ram_frame, yscrollcommand=scrollbarRam.set, width=50)
myRamList.pack(side=tk.LEFT, fill=tk.BOTH)
scrollbarRam.config(command=myRamList.yview)
#Frame OUT
out_frame = tk.Frame(infos_frames, width=400, height=100)
out_frame.pack(padx=10, pady=10, side=tk.TOP)
Label(out_frame, text=f"OUT").pack(pady=5)
scrollbarOut = tk.Scrollbar(out_frame)
scrollbarOut.pack(side=tk.RIGHT, fill=tk.Y)
myOutList = tk.Listbox(out_frame, yscrollcommand=scrollbarOut.set, width=50)
myOutList.pack(side=tk.LEFT, fill=tk.BOTH)
scrollbarOut.config(command=myOutList.yview)
#val = subprocess.run(["/usr/bin/python3", "/home/aurelien/Documents/24hducode2026/24H_du_code_2026/Epreuve3.py", "test_bin_epreuve3/call_label.bin"] # transforme bytes en str)
previous_sp = None # Initialisation avant la première itération
def update_gui():
global previous_sp, sim_iter # Accéder à la variable globale previous_sp
try:
state = next(sim_iter)
# 👉 mise à jour label instruction
single_instr.config(
text=state["instr"] + " Cycle(s): " + str(state["cycles_added"])
)
label_registre_1.config(text=f"{state['regs'][0]:02X}")
label_registre_2.config(text=f"{state['regs'][1]:02X}")
label_registre_3.config(text=f"{state['regs'][2]:02X}")
label_registre_4.config(text=f"{state['regs'][3]:02X}")
label_eq.config(text=f"{state['eq']}")
label_lt.config(text=f"{state['lt']}")
label_pc.config(text=f"{state['pc']:02X}")
label_sp.config(text=f"{state['sp']:02X}")
# Vérification du changement de valeur de SP
if previous_sp is not None:
print(f"SP actuel : {state['sp']}")
# Si SP se décrémente
if state['sp'] < previous_sp:
mylist.insert(tk.END, state['sp'])
# Si SP se réincrémente
elif state['sp'] > previous_sp:
if mylist.size() > 0:
mylist.delete(tk.END) # On enlève la dernière ligne
# Mettre à jour la valeur précédente de SP pour la prochaine itération
previous_sp = state['sp']
ram = state['ram']
for addr in range(0, 256, 8):
chunk = ram[addr:addr+8]
hex_values = " ".join(f"{b:02X}" for b in chunk)
myRamList.insert(tk.END, f"{addr:02X}: {hex_values}")
if (state["out"] != None):
myOutList.insert(tk.END, state["out"])
# relance automatique
root.after(200, update_gui)
except StopIteration:
print("Simulation terminée")
#Frame RAM
# ---------------------------------------------------------
# LECTURE D'UN FICHIER .bin ET LANCEMENT
# ---------------------------------------------------------
if __name__ == "__main__":
# Nom du fichier binaire à exécuter
path =""
args= sys.argv
if (len(args) > 1):
filename = args[1]
print("filename: " + filename)
with open(filename, "rb") as f:
program = f.read()
sim = e3.Simulator(program)
sim_iter = sim.run()
update_gui()
root.mainloop()
else:
print("Needs *.bin as parameter")

Binary file not shown.

View File

@ -0,0 +1 @@
<EFBFBD>˙a“z<E2809C>BX0“9€

BIN
test_bin_epreuve3/db1.bin Normal file

Binary file not shown.

View File

@ -0,0 +1 @@
abcdefghijklmnopqrstuvwxyz€

View File

@ -0,0 +1 @@
ABCDEFGHIJKLMNOPQRSTUVWXYZ€

View File

@ -0,0 +1 @@
0123456789€

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
@@@@ €€

Binary file not shown.

View File

@ -0,0 +1 @@
PQRSTUVWXYZ[\]^_€

View File

@ -0,0 +1 @@
<EFBFBD><01><><EFBFBD>a<EFBFBD>z<EFBFBD>B<EFBFBD>X<EFBFBD>0<EFBFBD>9<EFBFBD>

View File

@ -0,0 +1 @@
πρςσ€

View File

@ -0,0 +1 @@
`abc<62>

Binary file not shown.

View File

@ -0,0 +1 @@
ΠΡ<EFBFBD>ΣΤΥΦΧΨΩΪΫάέήί€

View File

@ -0,0 +1 @@
˙azBX09€

BIN
test_bin_epreuve3/tim.bin Normal file

Binary file not shown.