Compare commits

...

57 Commits

Author SHA1 Message Date
wangjiyu 392ec2416b debug 2025-05-04 11:18:59 +08:00
wangjiyu a9d531c6fc debug 2025-05-04 11:14:22 +08:00
wangjiyu 00bd8c4c64 debug 2025-05-04 11:03:44 +08:00
wangjiyu e70e728b98 debug 2025-05-04 10:58:59 +08:00
wangjiyu 7ed30e6edc debug 2025-05-04 10:48:24 +08:00
wangjiyu 0843f56fa9 debug 2025-05-04 10:27:31 +08:00
wangjiyu 3944db37fd debug 2025-05-04 10:05:49 +08:00
wangjiyu 396f808877 debug 2025-05-04 10:01:24 +08:00
wangjiyu 98c7b8e7ad debug 2025-05-04 09:47:49 +08:00
wangjiyu 6e6e7f4337 debug 2025-05-03 23:32:21 +08:00
wangjiyu a4713448d7 debug 2025-05-03 23:20:21 +08:00
wangjiyu f5840b46d2 debug 2025-05-03 23:03:33 +08:00
wangjiyu b6a33bcfbd debug 2025-05-03 23:02:06 +08:00
wangjiyu 0228402b5b debug 2025-05-03 22:58:13 +08:00
wangjiyu 39c474fa22 debug 2025-05-03 22:55:10 +08:00
wangjiyu ceff0bd694 use pybind 2025-05-03 21:51:02 +08:00
wangjiyu f688c690aa add demo:play audio in python after migu rtc 2025-04-24 16:38:10 +08:00
wangjiyu 95969ffcc8 add demo:send audio to app 2025-04-24 10:31:26 +08:00
wangjiyu 756e782ef1 debug 2025-04-16 19:46:35 +08:00
wangjiyu dcb81f9915 debug 2025-04-16 19:43:18 +08:00
wangjiyu b34a1956ee debug 2025-04-16 19:40:25 +08:00
wangjiyu f00ed230a0 debug 2025-04-16 19:38:34 +08:00
wangjiyu af0a29f3dc debug 2025-04-16 19:30:53 +08:00
wangjiyu d910833a9c debug 2025-04-16 19:09:51 +08:00
wangjiyu 3157894c1a debug 2025-04-16 19:07:53 +08:00
wangjiyu ad48b2e55a debug 2025-04-16 17:53:08 +08:00
wangjiyu 0bee448294 debug 2025-04-16 17:46:00 +08:00
wangjiyu df30aa53c9 debug 2025-04-16 17:44:03 +08:00
wangjiyu 815956c01c debug 2025-04-16 17:29:00 +08:00
wangjiyu 3fbe05cf92 debug 2025-04-16 17:24:57 +08:00
wangjiyu 5c5228c3b2 debug 2025-04-16 17:04:32 +08:00
wangjiyu 6cb5d60eb5 debug 2025-04-16 16:55:10 +08:00
wangjiyu a6e6f7fdde debug 2025-04-16 16:29:16 +08:00
wangjiyu e7fc174bc7 debug 2025-04-16 16:19:22 +08:00
wangjiyu 9ef5758116 debug 2025-04-16 16:18:24 +08:00
wangjiyu 29f14acd5f debug 2025-04-16 16:04:25 +08:00
wangjiyu 7c11d681f9 debug 2025-04-16 15:59:45 +08:00
wangjiyu e089162220 debug 2025-04-16 15:44:43 +08:00
wangjiyu fe9c640c5b debug 2025-04-16 15:40:46 +08:00
wangjiyu b04c665ac6 debug 2025-04-16 15:31:43 +08:00
wangjiyu 0042f506c2 debug 2025-04-16 15:30:55 +08:00
wangjiyu 088f373770 debug 2025-04-16 11:34:51 +08:00
wangjiyu 6f24ab5105 debug 2025-04-16 11:26:13 +08:00
wangjiyu 847aad603e debug 2025-04-16 11:23:04 +08:00
wangjiyu f3fbcf94f7 debug 2025-04-16 11:18:33 +08:00
wangjiyu 80fe99160f debug 2025-04-16 11:15:19 +08:00
wangjiyu c668b06ef8 debug 2025-04-16 11:05:29 +08:00
wangjiyu a98aa02dce debug 2025-04-16 10:53:36 +08:00
wangjiyu c1130200b8 debug 2025-04-16 10:47:29 +08:00
wangjiyu c58d016357 debug 2025-04-16 10:45:48 +08:00
wangjiyu 8a2389bf68 debug 2025-04-16 10:39:45 +08:00
wangjiyu b9a981b57a debug 2025-04-15 23:43:13 +08:00
wangjiyu f53fc89531 debug 2025-04-15 23:36:59 +08:00
wangjiyu c8c3a25fe9 debug 2025-04-15 23:31:07 +08:00
wangjiyu 418654abf4 debug 2025-04-15 23:23:35 +08:00
wangjiyu 715687e85c debug 2025-04-15 23:21:48 +08:00
wangjiyu 6936dfd292 debug 2025-04-15 23:13:54 +08:00
9 changed files with 341 additions and 699 deletions

View File

@ -1,17 +1,3 @@
g++ -shared -fPIC \
-I/usr/include/python3.10 -I/usr/include/python3.10/numpy -I./include \
-L./lib -L/usr/lib/x86_64-linux-gnu \
-DRTC_NUMPY_IMPL \
rtc_plugins.cpp util/RTCContext.cpp \
-lMRTCEngine -lboost_python310 -lboost_numpy310 -lpython3.10 \
-Wl,-rpath='$ORIGIN/lib' \
-o rtc_plugins.so
#g++ -shared -fPIC \
# -I/usr/include/python3.10 -I/usr/include/python3.10/numpy -I./include \
# -I/usr/include/python3.10 -I/usr/include/numpy \
# -Wl,-rpath='$ORIGIN/lib' \
# -lboost_python310 -lpython3.10 \
# -lMRTCEngine -lboost_python310 -lpython3.10 \
# -L$(python3 -c "import numpy; print(numpy.get_include())") \
# rtc_plugins.cpp util/RTCContext.cpp \
# -o rtc_plugins.so
#g++ -shared -fPIC -std=c++17 $(python3 -m pybind11 --includes) -I./include -L./lib -L/usr/lib/x86_64-linux-gnu -lMRTCEngine -lpython3.10 -Wl,-rpath='$ORIGIN/lib' -o rtc_plugins$(python3-config --extension-suffix) rtc_plugins.cpp util/RTCContext.cpp
g++ -shared -fPIC -std=c++17 $(python3 -m pybind11 --includes) -I./include -L./lib -L/usr/lib/x86_64-linux-gnu -Wl,--no-as-needed -lMRTCEngine -Wl,--as-needed -lpython3.10 -Wl,-rpath='$ORIGIN/lib' -o rtc_plugins$(python3-config --extension-suffix) rtc_plugins.cpp util/RTCContext.cpp

View File

@ -1,24 +1,11 @@
// rtc_plugins.cpp
#include "util/RTCContext.h"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h> // pybind11 的 NumPy 支持
#define IMPLEMENT_NUMPY_API // 标记这是实现文件
#include "util/numpyStub.h"
namespace py = pybind11;
#include "util/RTCContext.h"
// 提供转换接口
void** get_numpy_api() {
return (void**)RTC_PLUGINS_ARRAY_API;
}
namespace py = boost::python;
int init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId, boost::python::object callback) {
if (!PyArray_API) {
std::cout << "PyArray_API is null in outer init" << std::endl;
} else {
std::cout << "PyArray_API is not null in outer init" << std::endl;
}
RTCContext::instance().setPyCallback(callback);
int init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId) {
bool res = RTCContext::instance().init(selfUserId, selfDisplayName, selfRoomId);
if (res) {
return 0;
@ -27,11 +14,6 @@ int init(const char* selfUserId, const char* selfDisplayName, const char* selfRo
}
}
int initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex) {
if (!PyArray_API) {
std::cout << "PyArray_API is null in outer initRecv" << std::endl;
} else {
std::cout << "PyArray_API is not null in outer initRecv" << std::endl;
}
bool res = RTCContext::instance().initRecv(destRoomId, srcUserId, destChannelIndex);
if (res) {
return 0;
@ -39,164 +21,92 @@ int initRecv(const char* destRoomId, const char* srcUserId, const int16_t destCh
return -1;
}
}
int initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex) {
bool res = RTCContext::instance().initSend(srcRoomId, destRoomId, destChannelIndex);
int initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, const int16_t channelNum) {
bool res = RTCContext::instance().initSend(srcRoomId, destRoomId, destChannelIndex, channelNum);
if (res) {
return 0;
} else {
return -1;
}
}
py::object create_int16_array() {
// 1. 定义数组维度1维长度为 4
npy_intp dims[1] = {4};
// 2. 创建原生 C 数组int16_t 数据)
int16_t data[4] = {1, 2, -3, 4}; // 示例数据
// 3. 通过 NumPy C API 创建 PyObject*
PyObject* py_array = PyArray_SimpleNewFromData(
1, // 维度数
dims, // 各维度大小
NPY_INT16, // 数据类型np.int16
data // 数据指针
// NumPy 数据交互(关键修改)
py::array_t<int16_t> getNumpyData() {
return py::array_t<int16_t>(
RTCContext::instance().getNumpyData() // 假设返回的是已有 NumPy 数组
);
if (!py_array) {
throw std::runtime_error("Failed to create NumPy array");
}
// 4. 转换为 py::object自动管理引用计数
return py::object(py::handle<>(py_array));
}
int sendCustomAudioData(int16_t destChannelIndex, py::object pD,
int sendCustomAudioData(int16_t destChannelIndex, py::array_t<int16_t> inputArray,
int32_t sampleRate, uint64_t channelNum, uint64_t dataLen) {
try {
// 强制转换为 int16 连续数组
PyObject* py_array = PyArray_FROM_OTF(
pD.ptr(),
NPY_INT16,
NPY_ARRAY_IN_ARRAY | NPY_ARRAY_FORCECAST
);
if (!py_array) {
throw std::runtime_error("Failed to convert input to int16 array");
}
//py::gil_scoped_release release;
// 修复点:使用花括号初始化
py::object arr{py::handle<>(py_array)};
// 检查数据长度
PyArrayObject* npArray = reinterpret_cast<PyArrayObject*>(arr.ptr());
if (PyArray_SIZE(npArray) != static_cast<npy_intp>(dataLen)) {
Py_DECREF(py_array);
throw std::runtime_error("Array length does not match dataLen");
}
// 处理数据...
void* dataPtr = PyArray_DATA(npArray);
int ret = RTCContext::instance().sendCustomAudioData(
destChannelIndex, dataPtr, sampleRate, channelNum, dataLen
);
Py_DECREF(py_array); // 释放临时数组
return ret;
} catch (...) {
PyErr_SetString(PyExc_RuntimeError, "Invalid audio data");
return -1;
py::array_t<int16_t> contiguous = py::array::ensure(inputArray);
if (!contiguous) throw py::value_error("Array conversion failed");
auto buf = contiguous.request();
if (buf.size != dataLen) {
throw py::value_error("Array length does not match dataLen");
}
std::vector<int16_t> localCopy(static_cast<int16_t*>(buf.ptr),
static_cast<int16_t*>(buf.ptr) + buf.size);
return RTCContext::instance().sendCustomAudioData(
destChannelIndex, localCopy.data(), sampleRate, channelNum, dataLen
);
//return RTCContext::instance().sendCustomAudioData(
// destChannelIndex, buf.ptr, sampleRate, channelNum, dataLen
//);
}
py::list getListData() {
return RTCContext::instance().getListData();
}
int getSize() {
return RTCContext::instance().getSize();
}
RetAudioFrame getData() {
return RTCContext::instance().getData();
}
int16_t getDataCount() {
return RTCContext::instance().getDataCount();
}
/*
int sendCustomAudioData(const int16_t destChannelIndex, py::object pyData, int32_t sampleRate, uint64_t channelNum,
uint64_t dataLen) {
try {
//py::object pyData = create_int16_array();
std::cout << "step 1" << std::endl;
// 1. 检查输入有效性
if (pyData.ptr() == nullptr) {
throw std::runtime_error("Input data is NULL");
}
PYBIND11_MODULE(rtc_plugins, m) {
// 可选:暴露 RetAudioFrame 类(需额外绑定)
py::class_<RetAudioFrame>(m, "RetAudioFrame")
// 定义 data 属性(包含 getter 和 setter
.def_property("data",
// Getter返回 NumPy 数组
[](RetAudioFrame& self) {
return py::array_t<short>(
{self.dataCount}, // 数组形状
{sizeof(short)}, // 步长
self.data.get() // 数据指针(原始内存)
);
},
// Setter从 NumPy 数组复制数据
[](RetAudioFrame& self, py::array_t<short> arr) {
auto buf = arr.request(); // 获取数组信息
self.data.reset(new short[buf.size]); // 重新分配内存
std::memcpy(
self.data.get(), // 目标指针
buf.ptr, // 源数据指针
buf.size * sizeof(short) // 数据大小
);
self.dataCount = buf.size; // 更新数据长度
}
)
.def_readwrite("dataCount", &RetAudioFrame::dataCount)
.def_readwrite("sampleRate", &RetAudioFrame::sampleRate)
.def_readwrite("numChannels", &RetAudioFrame::numChannels)
.def_readwrite("channelIndex", &RetAudioFrame::channelIndex);
m.def("init", &init);
m.def("initRecv", &initRecv);
m.def("initSend", &initSend);
m.def("sendCustomAudioData", &sendCustomAudioData);
m.def("getSize", &getSize);
m.def("getData", &getData);
m.def("getNumpyData", &getNumpyData);
m.def("getListData", &getListData);
m.def("getDataCount", &getDataCount);
std::cout << "step 2" << std::endl;
std::cout << "pyData ptr is:" << pyData.ptr() << std::endl;
if (!pyData.ptr() || !Py_IsInitialized() || !PyObject_TypeCheck(pyData.ptr(), &PyBaseObject_Type)) {
throw std::runtime_error("Invalid Python object");
}
std::cout << "step 2" << std::endl;
// 2. 检查是否是 numpy 数组
if (!PyArray_Check(pyData.ptr())) {
std::cout << "input is notnumpy" << std::endl;
throw std::runtime_error("Input is not a numpy array");
}
std::cout << "step 3" << std::endl;
// 3. 转换为 PyArrayObject
PyArrayObject* npArray = reinterpret_cast<PyArrayObject*>(pyData.ptr());
std::cout << "step 4" << std::endl;
// 4. 检查数据类型是否为 int16
if (PyArray_TYPE(npArray) != NPY_INT16) {
throw std::runtime_error("Array must be of type int16 (np.int16)");
}
std::cout << "step 5" << std::endl;
// 5. 检查数据是否连续
if (!PyArray_ISCONTIGUOUS(npArray)) {
throw std::runtime_error("Array must be contiguous in memory");
}
std::cout << "step 6" << std::endl;
// 6. 获取数据指针
void* dataPtr = PyArray_DATA(npArray);
if (dataPtr == nullptr) {
throw std::runtime_error("Invalid data pointer");
}
std::cout << "step 7" << std::endl;
return RTCContext::instance().sendCustomAudioData(destChannelIndex, dataPtr, sampleRate, channelNum, dataLen);
} catch (const std::exception& e) {
std::cout << "error:" << e.what() << std::endl;
return -1;
}
}
*/
void init_numpy() {
// 直接调用底层函数,绕过宏的问题
if (_import_array() < 0) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
throw std::runtime_error("NumPy initialization failed");
}
std::cout << "NumPy API addr: " << PyArray_API << std::endl;
}
BOOST_PYTHON_MODULE(rtc_plugins) {
try {
init_numpy();
void** numpyApi = (void**)PyArray_API;
if (!numpyApi || !numpyApi[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "NumPy API corrupt! Key functions missing." << std::endl;
PyErr_Print();
throw std::runtime_error("Invalid NumPy API state");
} else {
RTCContext::instance().setNumpyApi(numpyApi);
std::cout << "set numpyApi succ:" << numpyApi[93] << std::endl;
}
/*
if (!PyArray_API) {
std::cout << "PyArray_API is null" << std::endl;
} else {
std::cout << "PyArray_API is not null" << std::endl;
}
*/
py::def("init", &init);
py::def("initRecv", &initRecv);
py::def("initSend", &initSend);
py::def("sendCustomAudioData", &sendCustomAudioData);
} catch (...) {
PyErr_SetString(PyExc_RuntimeError, "Module initialization failed");
}
}

45
test_audio_r.py Normal file
View File

@ -0,0 +1,45 @@
import rtc_plugins
import time
import sounddevice as sd
import numpy as np
import mmap
import os
from ctypes import c_int16
import struct
srcUserId = "srcUser12"
destUserId = "destUser12"
srcDisplayName = "srcDisplayName12"
destDisplayName = "destDisplayName12"
srcRoomId = "srcRoom12"
#destRoomId = "destRoomId12"
destRoomId = srcRoomId
srcChannelIndex = 46
destChannelIndex = 47
def my_callback_r(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"my_callback_r, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}")
print("after my_callback_r")
ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback_r)
if ret != 0:
print(f"init fail, ret:{ret}")
exit(1)
ret = rtc_plugins.initRecv(destRoomId, srcUserId, destChannelIndex)
if ret != 0:
print(f"initRecv fail, ret:{ret}")
exit(1)
#ret = rtc_plugins.initSend(destRoomId, srcRoomId, srcChannelIndex, 1)
#if ret != 0:
# print(f"initSend fail, ret:{ret}")
# exit(1)
sampleRate = 16000
while True:
frame = rtc_plugins.getListData()
sd.play(frame, sampleRate)
sd.wait()
print(f"get frame:{frame}")
time.sleep(0.005)

51
test_audio_s.py Normal file
View File

@ -0,0 +1,51 @@
import rtc_plugins
import time
import numpy as np
from scipy.io import wavfile
srcUserId = "srcUser12"
destUserId = "destUser12"
srcDisplayName = "srcDisplayName12"
destDisplayName = "destDisplayName12"
srcRoomId = "srcRoom12"
#destRoomId = "destRoomId12"
destRoomId = srcRoomId
srcChannelIndex = 46
destChannelIndex = 47
ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId)
if ret != 0:
print(f"init fail, ret:{ret}")
exit(1)
ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex, 1)
if ret != 0:
print(f"initSend fail, ret:{ret}")
exit(1)
#audioData = np.array([0, 1, -1, 0], dtype=np.int16)
sampleRate, audioData = wavfile.read("xusample1.wav")
print(f"sampleRate:{sampleRate} HZ")
print(f"shape:{audioData.shape}")
print(f"type:{audioData.dtype}")
if audioData.dtype != np.int16:
audioData = (audioData * 32767).astype(np.int16)
ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, sampleRate, 1, len(audioData))
if ret != 0:
print(f"send fail, ret:{ret}")
print("send succ")
ret = rtc_plugins.initRecv(srcRoomId, srcUserId, srcChannelIndex)
if ret != 0:
print(f"initRecv fail, ret:{ret}")
exit(1)
for i in range(100):
ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, sampleRate, 1, len(audioData))
if ret != 0:
print(f"send fail, ret:{ret}")
#size = rtc_plugins.getSize()
#print(f"data size:{size}")
#frame = rtc_plugins.getListData()
#print(f"get frame:{frame}")
#dataCount = rtc_plugins.getDataCount()
#print(f"data count:{dataCount}")
time.sleep(3)

View File

@ -4,6 +4,7 @@ import numpy as np
import mmap
import os
from ctypes import c_int16
import struct
srcUserId = "srcUser12"
destUserId = "destUser12"
@ -15,26 +16,12 @@ srcRoomId = "srcRoom12"
destRoomId = srcRoomId
srcChannelIndex = 46
destChannelIndex = 47
def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
def my_callback_r(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"my_callback_r, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}")
#fd = os.open(shmName, os.O_RDONLY)
#if fd == -1:
# raise RuntimeError(f"无法打开共享内存 {shmName}")
print("after my_callback_r")
## 2. 创建内存映射
#shm = mmap.mmap(fd, dataSize, mmap.MAP_SHARED, mmap.PROT_READ)
## 3. 转换为numpy数组 (零拷贝)
#audio_data = np.frombuffer(shm, dtype=c_int16, count=dataCount)
#print(f" 前5个采样点: {audio_data[:5]}")
#audioData = np.array([0, 1, -1, 0], dtype=np.int16)
#ret = rtc_plugins.sendCustomAudioData(srcChannelIndex, audioData, 48000, 1, len(audioData))
#if ret != 0:
# print(f"resend fail, ret:{ret}")
#else:
# print("resend succ")
ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback)
ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback_r)
if ret != 0:
print(f"init fail, ret:{ret}")
exit(1)
@ -42,7 +29,7 @@ ret = rtc_plugins.initRecv(destRoomId, srcUserId, destChannelIndex)
if ret != 0:
print(f"initRecv fail, ret:{ret}")
exit(1)
ret = rtc_plugins.initSend(destRoomId, srcRoomId, srcChannelIndex)
ret = rtc_plugins.initSend(destRoomId, srcRoomId, srcChannelIndex, 1)
if ret != 0:
print(f"initSend fail, ret:{ret}")
exit(1)
@ -55,4 +42,11 @@ while True:
print(f"resend fail, ret:{ret}")
else:
print("resend succ")
time.sleep(30)
size = rtc_plugins.getSize()
print(f"data size:{size}")
#frame = rtc_plugins.getNumpyData()
frame = rtc_plugins.getListData()
print(f"get frame:{frame}")
dataCount = rtc_plugins.getDataCount()
print(f"data count:{dataCount}")
time.sleep(0.005)

View File

@ -12,13 +12,15 @@ srcRoomId = "srcRoom12"
destRoomId = srcRoomId
srcChannelIndex = 46
destChannelIndex = 47
def my_callback(npData, dataCount, sampleRate, numChannels, channelIndex):
print(f"dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"my_callback, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}")
print("after my_callback_r")
ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback)
if ret != 0:
print(f"init fail, ret:{ret}")
exit(1)
ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex)
ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex, 1)
if ret != 0:
print(f"initSend fail, ret:{ret}")
exit(1)
@ -34,4 +36,11 @@ for i in range(100):
ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, 48000, 1, len(audioData))
if ret != 0:
print(f"send fail, ret:{ret}")
time.sleep(30)
size = rtc_plugins.getSize()
print(f"data size:{size}")
frame = rtc_plugins.getListData()
print(f"get frame:{frame}")
dataCount = rtc_plugins.getDataCount()
print(f"data count:{dataCount}")
time.sleep(3)

View File

@ -7,10 +7,30 @@ void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& room
isOnRoom_ = true;
}
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << "," << consumerInfo.channelIndex;
//std::cout << "RTCContext::onConsumer()" << std::endl;
std::cout << "RTCContext::onConsumer():msgId:" << msgId << ", roomId:" << consumerInfo.roomId << ", displayName:"
<< consumerInfo.displayName << ", channelIndex" << (int)consumerInfo.channelIndex;
std::lock_guard<std::mutex> lock(mutex_);
isOnConsumer_ = true;
//std::cout << "RTCContext::onConsumer()" << std::endl;
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, roomId,
peerId, consumerInfo.channelIndex, this);
if (0 != ret1)
{
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return;
}
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM,
false, consumerInfo.channelIndex);
if (0 != ret2)
{
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return;
}
std::cout << "init recv succ" << std::endl;
}
void RTCContext::onRender(const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
@ -20,11 +40,11 @@ void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
std::lock_guard<std::mutex> lock(mutex_);
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
std::cout << "receive join multi room callback" << msgId;
std::cout << "receive join multi room callback" << msgId << std::endl;
isJoinMultiRoom_ = true;
}
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg;
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg << std::endl;
//std::cout << "RTCContext::onCallBackMessage()" << std::endl;
}
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
@ -36,408 +56,26 @@ void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint
{
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
}
/*
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
PyGILState_STATE gstate = PyGILState_Ensure();
void printTimestamp() {
// 获取系统当前时间点
auto now = std::chrono::system_clock::now();
try {
std::cout << "-----------------------------------" << std::endl;
std::cout << "dataCount:" << audioFrame.dataCount << std::endl;
std::cout << "dataCount value: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
// 转换为时间戳(秒 + 毫秒)
auto timestamp = std::chrono::duration_cast<std::chrono::seconds>(
now.time_since_epoch()).count();
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(
now.time_since_epoch()).count() % 1000;
std::cout << "onAudioProcess, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in onAudioProcess" << std::endl;
} else {
std::cout << "numpyApi_ is not null in onAudioProcess:" << numpyApi_[93] << std::endl;
}
//auto numpyApi = RTCContext::numpy_api();
std::cout << "step1" << std::endl;
if (!numpyApi_) {
PyGILState_Release(gstate);
throw std::runtime_error("NumPy C-API not initialized. Call import_array() in module init");
}
std::cout << "step2" << std::endl;
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
void* func_ptr = numpyApi_[93];
std::cout << "Raw function pointer: " << func_ptr << std::endl;
auto ptmp = (PyObject*(*)(int, npy_intp*, int))numpyApi_[93];
std::cout << "ptmp is:" << ptmp << std::endl;
std::cout << "Pointer sizes:\n"
<< "void*: " << sizeof(void*) << "\n"
<< "FunctionPtr: " << sizeof(PyObject*(*)(int, npy_intp*, int)) << std::endl;
// 2. 使用memcpy避免编译器优化问题
PyArray_SimpleNew_t PyArray_SimpleNew;
static_assert(sizeof(func_ptr) == sizeof(PyArray_SimpleNew),
"Pointer size mismatch");
std::cout << "step3" << std::endl;
memcpy(&PyArray_SimpleNew, &func_ptr, sizeof(func_ptr));
//auto PyArray_SimpleNew = reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
std::cout << "step4, PyArray_SimpleNew:" << PyArray_SimpleNew << std::endl;
// 3. 严格校验输入数据
if (!audioFrame.data || audioFrame.dataCount <= 0) {
PyGILState_Release(gstate);
throw std::invalid_argument("Invalid audio frame data");
}
std::cout << "step5" << std::endl;
// 4. 安全创建维度数组(带边界检查)
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
PyGILState_Release(gstate);
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
std::cout << "step6" << std::endl;
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
std::cout << "step7" << std::endl;
// 5. 创建NumPy数组带内存保护
PyObject* pyArray = nullptr;
pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
std::cout << "step8" << std::endl;
if (!pyArray) {
PyGILState_Release(gstate);
throw std::bad_alloc();
}
std::cout << "step9" << std::endl;
// 6. 安全拷贝数据(带对齐检查)
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
PyGILState_Release(gstate);
throw std::runtime_error("Unaligned audio data pointer");
}
std::cout << "step10" << std::endl;
std::memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(pyArray)),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
std::cout << "step11" << std::endl;
// 7. 执行回调(带引用计数保护)
if (!pyCallback_.is_none()) {
try {
pyCallback_(
py::handle<>(pyArray), // 自动管理引用
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
} catch (...) {
Py_DECREF(pyArray);
throw; // 重新抛出异常
}
}
std::cout << "step12" << std::endl;
// 8. 释放资源
Py_DECREF(pyArray);
std::cout << "step13" << std::endl;
PyGILState_Release(gstate);
std::cout << "step14" << std::endl;
} catch (const std::exception& e) {
std::cerr << "Audio process error: " << e.what() << std::endl;
PyErr_Print();
}
exit(0);
// 转换为本地时间(可读格式)
std::time_t time = std::chrono::system_clock::to_time_t(now);
std::cout << "Timestamp: " << timestamp << "." << milliseconds << std::endl;
}
*/
/*
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
std::cout << "=== 开始音频处理 ===" << std::endl;
// 1. 获取GIL
std::cout << "[1] 获取GIL锁..." << std::endl;
PyGILState_STATE gstate = PyGILState_Ensure();
try {
// 2. 输入参数校验
std::cout << "[2] 检查输入参数..." << std::endl;
std::cout << " dataCount: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
if (!audioFrame.data || audioFrame.dataCount <= 0) {
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
throw std::invalid_argument("Invalid audio frame data");
}
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
std::cout << "[ERROR] 数据长度超过最大值" << std::endl;
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
// 3. 准备数组维度
std::cout << "[3] 准备数组维度..." << std::endl;
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
std::cout << " 维度设置完成: [" << dims[0] << "]" << std::endl;
// 4. 检查NumPy API状态
std::cout << "[4] 检查NumPy API状态..." << std::endl;
std::cout << " numpyApi_ 地址: " << numpyApi_ << std::endl;
if (!numpyApi_) {
throw std::runtime_error("NumPy C-API not initialized");
}
// 5. 获取PyArray_SimpleNew函数
std::cout << "[5] 获取PyArray_SimpleNew函数..." << std::endl;
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
PyArray_SimpleNew_t PyArray_SimpleNew =
reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
std::cout << " 函数地址: " << (void*)PyArray_SimpleNew << std::endl;
std::cout << "[5.1] 验证函数指针..." << std::endl;
void* func_ptr = numpyApi_[93];
if (reinterpret_cast<uintptr_t>(func_ptr) < 0x1000) { // 检查是否为合法地址
std::cerr << "非法函数指针: " << func_ptr << std::endl;
throw std::runtime_error("Invalid PyArray_SimpleNew pointer");
}
// 6. 创建NumPy数组
std::cout << "[6] 创建NumPy数组..." << std::endl;
PyObject* pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
std::cout << " 数组地址: " << pyArray << std::endl;
if (!pyArray) {
throw std::bad_alloc();
}
// 7. 检查内存对齐
std::cout << "[7] 检查内存对齐..." << std::endl;
std::cout << " 音频数据地址: " << (void*)audioFrame.data
<< " 对齐要求: " << alignof(int16_t) << std::endl;
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
throw std::runtime_error("Unaligned audio data pointer");
}
// 8. 拷贝数据
std::cout << "[8] 拷贝音频数据..." << std::endl;
std::cout << " 目标地址: " << PyArray_DATA((PyArrayObject*)pyArray)
<< " 字节数: " << audioFrame.dataCount * sizeof(int16_t) << std::endl;
std::memcpy(PyArray_DATA((PyArrayObject*)pyArray),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
// 9. 执行回调
if (!pyCallback_.is_none()) {
std::cout << "[9] 准备执行Python回调..." << std::endl;
try {
pyCallback_(
py::handle<>(pyArray),
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
std::cout << " 回调执行成功" << std::endl;
} catch (...) {
std::cout << "[ERROR] 回调执行失败" << std::endl;
Py_DECREF(pyArray);
throw;
}
} else {
std::cout << "[9] 无回调函数设置" << std::endl;
}
// 10. 释放资源
std::cout << "[10] 释放资源..." << std::endl;
Py_DECREF(pyArray);
std::cout << "[11] 释放GIL..." << std::endl;
PyGILState_Release(gstate);
std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
PyGILState_Release(gstate);
PyErr_Print();
std::cerr << "Audio process error: " << e.what() << std::endl;
}
}
*/
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
std::cout << "=== 开始音频处理(共享内存版) ===" << std::endl;
std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
// 1. 获取GIL
std::cout << "[1] 获取GIL锁..." << std::endl;
PyGILState_STATE gstate = PyGILState_Ensure();
try {
// 2. 输入参数校验
std::cout << "[2] 检查输入参数..." << std::endl;
std::cout << " dataCount: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
if (!audioFrame.data || audioFrame.dataCount <= 0) {
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
throw std::invalid_argument("Invalid audio frame data");
}
const size_t data_size = audioFrame.dataCount * sizeof(int16_t);
// 3. 创建共享内存
std::cout << "[3] 创建共享内存..." << std::endl;
char shm_name[32];
snprintf(shm_name, sizeof(shm_name), "/audio_shm_%d", getpid());
int fd = shm_open(shm_name, O_CREAT | O_RDWR, 0666);
if (fd == -1) {
std::cout << "[ERROR] shm_open失败: " << strerror(errno) << std::endl;
throw std::runtime_error("Failed to create shared memory");
}
std::cout << " 共享内存fd: " << fd << " 名称: " << shm_name << std::endl;
// 4. 设置共享内存大小
std::cout << "[4] 设置共享内存大小..." << std::endl;
if (ftruncate(fd, data_size) == -1) {
close(fd);
std::cout << "[ERROR] ftruncate失败: " << strerror(errno) << std::endl;
throw std::runtime_error("Failed to resize shared memory");
}
std::cout << " 内存大小: " << data_size << " bytes" << std::endl;
// 5. 内存映射
std::cout << "[5] 内存映射..." << std::endl;
void* ptr = mmap(NULL, data_size, PROT_WRITE, MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED) {
close(fd);
std::cout << "[ERROR] mmap失败: " << strerror(errno) << std::endl;
throw std::runtime_error("Failed to map shared memory");
}
std::cout << " 映射地址: " << ptr << std::endl;
namespace py = boost::python;
namespace np = boost::python::numpy;
// 6. 拷贝数据到共享内存
std::cout << "[6] 拷贝音频数据到共享内存..." << std::endl;
memcpy(ptr, audioFrame.data, data_size);
std::cout << "step1" << std::endl;
npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) };
std::cout << "step2" << std::endl;
np::dtype dtype = np::dtype::get_builtin<int16_t>();
std::cout << "step3" << std::endl;
np::ndarray audioArray = np::from_data(
audioFrame.data, // 数据指针
dtype, // 数据类型 (int16)
py::make_tuple(shape[0]), // 形状 (1D)
py::make_tuple(sizeof(int16_t)), // 步长
py::object() // 所有者Python管理
);
std::cout << " 数据拷贝完成" << std::endl;
// 7. 执行回调
if (!pyCallback_.is_none()) {
std::cout << "[7] 准备执行Python回调..." << std::endl;
// 增加引用计数防止提前释放
Py_INCREF(pyCallback_.ptr());
try {
std::cout << " pyCallback_ type: " << Py_TYPE(pyCallback_.ptr())->tp_name << std::endl;
PyObject* repr = PyObject_Repr(pyCallback_.ptr());
if (repr) {
std::cout << " pyCallback_ repr: " << PyUnicode_AsUTF8(repr) << std::endl;
Py_DECREF(repr); // 必须手动释放
}
/*
// 传递共享内存信息
pyCallback_(
py::str(shm_name), // 共享内存名称
data_size, // 数据大小
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
*/
pyCallback_(
audioArray, // numpy 数组
data_size, // 数据大小
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
std::cout << " after callback" << std::endl;
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (value) {
PyObject* str = PyObject_Str(value);
if (str) {
std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
Py_DECREF(str);
}
}
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
//PyErr_Print();
throw std::runtime_error("Python callback error");
}
std::cout << " 回调执行成功" << std::endl;
} catch (const py::error_already_set& e) {
std::cerr << "[PYTHON ERROR] ";
PyErr_Print(); // 自动打印到stderr
// 可选:获取更详细的错误信息
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
std::cerr << "Details: "
<< PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
PyErr_Restore(type, value, traceback);
}
Py_DECREF(pyCallback_.ptr());
} catch (...) {
std::cout << "[ERROR] 回调执行失败" << std::endl;
munmap(ptr, data_size);
close(fd);
shm_unlink(shm_name);
Py_DECREF(pyCallback_.ptr());
throw;
}
Py_DECREF(pyCallback_.ptr());
} else {
std::cout << "[7] 无回调函数设置" << std::endl;
}
// 8. 释放资源
std::cout << "[8] 释放共享内存资源..." << std::endl;
munmap(ptr, data_size);
close(fd);
shm_unlink(shm_name);
std::cout << "[9] 释放GIL..." << std::endl;
PyGILState_Release(gstate);
std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
PyGILState_Release(gstate);
std::cerr << "Audio process error: " << e.what() << std::endl;
}
setData(audioFrame);
}
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
@ -447,12 +85,6 @@ void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
}
bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId)
{
std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in init" << std::endl;
} else {
std::cout << "numpyApi_ is not null in init" << std::endl;
}
mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory();
if (!rtcFactory)
{
@ -498,48 +130,21 @@ bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
namespace py = boost::python;
namespace np = boost::python::numpy;
Py_Initialize(); // 初始化 Python
np::initialize();
return true;
}
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
{
std::cout << "initRecv, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in initRecv" << std::endl;
} else {
std::cout << "numpyApi_ is not null in initRecv" << std::endl;
}
while (!isOnConsumer_)
{
std::cout << "wait for OnConsumer" << std::endl;
sleep(3);
}
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, destRoomId,
srcUserId, destChannelIndex, this);
if (0 != ret1)
{
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return false;
}
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(destRoomId, srcUserId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, false, destChannelIndex);
if (0 != ret2)
{
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return false;
}
std::cout << "init recv succ" << std::endl;
return true;
}
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex)
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex,
const uint8_t channelNum)
{
while (!isOnRoom_)
{
@ -564,6 +169,7 @@ bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const i
strcpy(option.dstRoomId, destRoomId);
}
option.channelIndex = destChannelIndex;
option.channel = channelNum;
std::cout << "startCustomAudio" << std::endl;
int16_t ret2 = rtcEngine_->startCustomAudio(option);
if (ret2 != 0)
@ -618,12 +224,74 @@ void RTCContext::setpData(void* pData)
std::lock_guard<std::mutex> lock(mutex_);
pData_ = pData;
}
void RTCContext::setPyCallback(boost::python::object callback) {
std::lock_guard<std::mutex> lock(mutex_);
pyCallback_ = callback;
void RTCContext::setData(const mrtc::MRTCAudioFrame& frame) {
std::lock_guard<std::mutex> lock(dataMutex_);
if (dataSize_ == totalSize_) {
bottom_ = (bottom_ + 1) % totalSize_;
dataSize_--;
}
RetAudioFrame newFrame;
newFrame.dataCount = frame.dataCount;
newFrame.sampleRate = frame.sampleRate;
newFrame.numChannels = frame.numChannels;
newFrame.channelIndex = frame.channelIndex;
newFrame.data = std::make_unique<int16_t[]>(frame.dataCount);
std::memcpy(newFrame.data.get(), frame.data, frame.dataCount* sizeof(int16_t));
data_[head_] = std::move(newFrame);
head_ = (head_ + 1) % totalSize_;
dataSize_++;
}
void RTCContext::setNumpyApi(void **numpyApi) {
std::lock_guard<std::mutex> lock(mutex_);
numpyApi_ = numpyApi;
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
RetAudioFrame RTCContext::getData() {
//std::lock_guard<std::mutex> lock(dataMutex_);
if (dataSize_ > 0) {
RetAudioFrame frame = std::move(data_[bottom_]); // 移动而非拷贝
bottom_ = (bottom_ + 1) % totalSize_;
dataSize_--;
return frame; // 返回值优化(RVO)会生效
}
return {}; // 返回空对象
}
py::array_t<int16_t> RTCContext::getNumpyData() {
std::cout << "step1" << std::endl;
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
std::cout << "step2" << std::endl;
int16_t* dataPtr = frame.data.get(); // 你的数据指针
std::cout << "step3" << std::endl;
size_t length = frame.dataCount; // 数据长度
std::cout << "step4" << std::endl;
if (!dataPtr || length == 0) {
return py::array_t<int16_t>({0}); // 返回空数组
}
// 直接构造 pybind11 的 NumPy 数组(自动管理内存)
py::array_t<int16_t> result({static_cast<py::ssize_t>(length)});
auto buf = result.mutable_unchecked();
for (size_t i = 0; i < length; i++) {
buf[i] = dataPtr[i];
}
return result;
}
py::list RTCContext::getListData() {
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
py::list result;
if (frame.data) {
for (int i = 0; i < frame.dataCount; i++) {
result.append(frame.data.get()[i]);
}
}
return result;
}
int16_t RTCContext::getDataCount() {
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
return frame.dataCount;
}
int16_t RTCContext::getSize() {
std::lock_guard<std::mutex> lock(dataMutex_);
return dataSize_;
}

View File

@ -1,9 +1,6 @@
// RTCContext.h
#pragma once
//#include "numpyConfig.h"
#include "numpyStub.h"
#include "IMRTCEngine.hpp"
#include "MRTCEngineDefine.hpp"
#include "IMRTCEngineFactory.hpp"
@ -18,23 +15,26 @@
#include <sys/stat.h>
#include <Python.h>
#include <boost/python.hpp>
#include <boost/python/detail/wrap_python.hpp>
#include <boost/python/numpy.hpp>
#include <boost/python/detail/prefix.hpp>
#include <boost/python/module.hpp>
#include <boost/python/def.hpp>
#include <numpy/ndarrayobject.h>
#include <numpy/arrayobject.h>
// pybind11 头文件
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/stl.h>
namespace py = pybind11;
//#include <numpy/arrayobject.h>
// 必须声明外部变量(关键!)
//#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
namespace fs = std::filesystem;
#define ENV_PRODUCT
//#define SEND_MODE
// 音频数据帧
struct RetAudioFrame
{
std::unique_ptr<int16_t[]> data;
int dataCount = 0;
int sampleRate = 48000;
int numChannels = 1;
int channelIndex = 0;
};
class RTCContext :
public RTCENGINE_NAMESPACE::IMRTCRoomCallBack,
public RTCENGINE_NAMESPACE::IMRTCConsumerCallBack,
@ -60,36 +60,22 @@ public:
static RTCContext instance;
return instance;
}
static void** numpy_api() {
static void** api = [](){
// 强制初始化NumPy
if (_import_array() < 0) {
PyErr_Print();
throw std::runtime_error("NumPy initialization failed");
}
void** ptr = reinterpret_cast<void**>(RTC_PLUGINS_ARRAY_API);
std::cout << "ptr:" << ptr << std::endl;
if (!ptr || !ptr[93]) {
std::cerr << "NumPy API corrupt! Expected at 93: "
<< (ptr ? (void*)ptr[93] : nullptr) << std::endl;
abort();
}
return ptr;
}();
return api;
}
RTCContext(const RTCContext&) = delete;
RTCContext& operator=(const RTCContext&) = delete;
mrtc::IMRTCEngine* getRtcEngine() const;
bool init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId);
bool initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex);
bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex);
bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, const uint8_t channelNum);
int16_t getSize();
void setData(const mrtc::MRTCAudioFrame& frame);
RetAudioFrame getData();
py::array_t<int16_t> getNumpyData();
py::list getListData();
int16_t getDataCount();
void* getpData() const;
void setpData(void* pData);
void setPyCallback(boost::python::object callback);
void setNumpyApi(void** numpyApi);
int16_t sendAudioData(uint8_t channelIndex = 0, const void* pData = nullptr, int32_t nSampleRate = 48000, uint64_t nNumberOfChannels = 2, uint64_t dataLength = 0);
int16_t sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate,
@ -99,6 +85,7 @@ public:
private:
RTCContext()
{
data_.resize(totalSize_);
}
mutable std::mutex mutex_;
mrtc::IMRTCEngine * rtcEngine_ = nullptr;
@ -107,8 +94,12 @@ private:
bool isOnConsumer_ = false;
bool isJoinMultiRoom_ = false;
bool isMultiRoom_ = false;
boost::python::object pyCallback_;
void ** numpyApi_;
std::vector<RetAudioFrame> data_;
mutable std::mutex dataMutex_;
const int16_t totalSize_ = 100;
int16_t dataSize_ = 0;
int16_t bottom_ = 0;
int16_t head_= 0;
void onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo);
void onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo);
void onRender(const char* roomId, const char* peerId,

View File

@ -1,12 +0,0 @@
// numpy_interface.h (新建)
#pragma once
#ifdef IMPLEMENT_NUMPY_API
// 主模块实现路径
#define PY_ARRAY_UNIQUE_SYMBOL RTC_PLUGINS_ARRAY_API
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#else
// 用户头文件路径
extern void* RTC_PLUGINS_ARRAY_API[]; // 严格匹配NumPy类型
#endif