2025-04-09 16:18:50 +08:00
|
|
|
|
#include "RTCContext.h"
|
|
|
|
|
|
|
|
|
|
void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) {
|
|
|
|
|
//std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message;
|
|
|
|
|
std::cout << "RTCContext::onRoom()" << std::endl;
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
isOnRoom_ = true;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
|
2025-04-15 23:13:54 +08:00
|
|
|
|
//std::cout << "RTCContext::onConsumer()" << std::endl;
|
|
|
|
|
std::cout << "RTCContext::onConsumer():msgId:" << msgId << ", roomId:" << consumerInfo.roomId << ", displayName:"
|
|
|
|
|
<< consumerInfo.displayName << ", channelIndex" << (int)consumerInfo.channelIndex;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
isOnConsumer_ = true;
|
2025-04-15 23:13:54 +08:00
|
|
|
|
std::cout << "registerSoundLevelListener" << std::endl;
|
|
|
|
|
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, roomId,
|
|
|
|
|
peerId, consumerInfo.channelIndex, this);
|
|
|
|
|
if (0 != ret1)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::cout << "muteAudio" << std::endl;
|
|
|
|
|
int16_t ret2 = rtcEngine_->muteAudio(roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM,
|
|
|
|
|
false, consumerInfo.channelIndex);
|
|
|
|
|
if (0 != ret2)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::cout << "init recv succ" << std::endl;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
}
|
|
|
|
|
void RTCContext::onRender(const char* roomId, const char* peerId,
|
|
|
|
|
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
|
|
|
|
|
std::cout << "RTCContext::onRender()" << std::endl;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
|
|
|
|
|
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
|
|
|
|
|
std::cout << "receive join multi room callback" << msgId;
|
|
|
|
|
isJoinMultiRoom_ = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg;
|
|
|
|
|
//std::cout << "RTCContext::onCallBackMessage()" << std::endl;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
|
|
|
|
|
//std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length;
|
|
|
|
|
std::cout << "RTCContext::onCallBackCustomData()" << std::endl;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint16_t audioSourceType,
|
|
|
|
|
uint8_t channelIndex, uint16_t volume, int32_t vad)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
|
|
|
|
|
}
|
2025-04-10 11:15:04 +08:00
|
|
|
|
/*
|
|
|
|
|
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
|
|
|
|
|
mrtc::MRTCAudioFrame& audioFrame,
|
|
|
|
|
mrtc::MRTCAudioSourceType audioSourceType)
|
|
|
|
|
{
|
|
|
|
|
namespace py = boost::python;
|
|
|
|
|
|
2025-04-10 11:34:01 +08:00
|
|
|
|
PyGILState_STATE gstate = PyGILState_Ensure();
|
2025-04-10 11:15:04 +08:00
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
std::cout << "-----------------------------------" << std::endl;
|
|
|
|
|
std::cout << "dataCount:" << audioFrame.dataCount << std::endl;
|
2025-04-10 12:47:13 +08:00
|
|
|
|
std::cout << "dataCount value: " << audioFrame.dataCount
|
|
|
|
|
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
|
|
|
|
|
|
2025-04-10 21:19:55 +08:00
|
|
|
|
std::cout << "onAudioProcess, numpyApi_:" << numpyApi_[93] << std::endl;
|
2025-04-10 21:23:38 +08:00
|
|
|
|
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
|
|
|
|
|
std::cout << "numpyApi_ is null in onAudioProcess" << std::endl;
|
2025-04-10 20:51:28 +08:00
|
|
|
|
} else {
|
2025-04-10 21:23:38 +08:00
|
|
|
|
std::cout << "numpyApi_ is not null in onAudioProcess:" << numpyApi_[93] << std::endl;
|
2025-04-10 20:51:28 +08:00
|
|
|
|
}
|
|
|
|
|
//auto numpyApi = RTCContext::numpy_api();
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step1" << std::endl;
|
2025-04-10 21:23:38 +08:00
|
|
|
|
if (!numpyApi_) {
|
2025-04-10 13:00:35 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
|
|
|
|
throw std::runtime_error("NumPy C-API not initialized. Call import_array() in module init");
|
2025-04-10 12:50:09 +08:00
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step2" << std::endl;
|
2025-04-10 21:33:07 +08:00
|
|
|
|
|
2025-04-10 17:47:00 +08:00
|
|
|
|
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
|
2025-04-10 21:33:07 +08:00
|
|
|
|
|
|
|
|
|
void* func_ptr = numpyApi_[93];
|
|
|
|
|
std::cout << "Raw function pointer: " << func_ptr << std::endl;
|
|
|
|
|
|
2025-04-10 21:41:20 +08:00
|
|
|
|
auto ptmp = (PyObject*(*)(int, npy_intp*, int))numpyApi_[93];
|
|
|
|
|
std::cout << "ptmp is:" << ptmp << std::endl;
|
2025-04-10 21:48:21 +08:00
|
|
|
|
std::cout << "Pointer sizes:\n"
|
|
|
|
|
<< "void*: " << sizeof(void*) << "\n"
|
|
|
|
|
<< "FunctionPtr: " << sizeof(PyObject*(*)(int, npy_intp*, int)) << std::endl;
|
2025-04-10 21:41:20 +08:00
|
|
|
|
|
2025-04-10 21:33:07 +08:00
|
|
|
|
// 2. 使用memcpy避免编译器优化问题
|
|
|
|
|
PyArray_SimpleNew_t PyArray_SimpleNew;
|
|
|
|
|
static_assert(sizeof(func_ptr) == sizeof(PyArray_SimpleNew),
|
|
|
|
|
"Pointer size mismatch");
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step3" << std::endl;
|
2025-04-10 21:33:07 +08:00
|
|
|
|
memcpy(&PyArray_SimpleNew, &func_ptr, sizeof(func_ptr));
|
|
|
|
|
|
|
|
|
|
//auto PyArray_SimpleNew = reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
|
2025-04-10 21:13:44 +08:00
|
|
|
|
std::cout << "step4, PyArray_SimpleNew:" << PyArray_SimpleNew << std::endl;
|
2025-04-10 12:50:09 +08:00
|
|
|
|
|
2025-04-10 13:00:35 +08:00
|
|
|
|
// 3. 严格校验输入数据
|
2025-04-10 11:15:04 +08:00
|
|
|
|
if (!audioFrame.data || audioFrame.dataCount <= 0) {
|
2025-04-10 11:34:01 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
2025-04-10 13:00:35 +08:00
|
|
|
|
throw std::invalid_argument("Invalid audio frame data");
|
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step5" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
|
|
|
|
|
// 4. 安全创建维度数组(带边界检查)
|
|
|
|
|
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
|
|
|
|
|
PyGILState_Release(gstate);
|
|
|
|
|
throw std::overflow_error("Audio frame size exceeds maximum limit");
|
2025-04-10 11:15:04 +08:00
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step6" << std::endl;
|
2025-04-10 12:47:13 +08:00
|
|
|
|
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
|
2025-04-10 11:15:04 +08:00
|
|
|
|
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step7" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
// 5. 创建NumPy数组(带内存保护)
|
|
|
|
|
PyObject* pyArray = nullptr;
|
2025-04-10 17:47:00 +08:00
|
|
|
|
pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step8" << std::endl;
|
2025-04-10 17:47:00 +08:00
|
|
|
|
if (!pyArray) {
|
|
|
|
|
PyGILState_Release(gstate);
|
|
|
|
|
throw std::bad_alloc();
|
2025-04-10 13:00:35 +08:00
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step9" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
|
|
|
|
|
// 6. 安全拷贝数据(带对齐检查)
|
|
|
|
|
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
|
|
|
|
|
Py_DECREF(pyArray);
|
2025-04-10 11:34:01 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
2025-04-10 13:00:35 +08:00
|
|
|
|
throw std::runtime_error("Unaligned audio data pointer");
|
2025-04-10 11:15:04 +08:00
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step10" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
std::memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(pyArray)),
|
2025-04-10 11:34:01 +08:00
|
|
|
|
audioFrame.data,
|
|
|
|
|
audioFrame.dataCount * sizeof(int16_t));
|
2025-04-10 11:15:04 +08:00
|
|
|
|
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step11" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
// 7. 执行回调(带引用计数保护)
|
2025-04-10 20:51:28 +08:00
|
|
|
|
if (!pyCallback_.is_none()) {
|
2025-04-10 13:00:35 +08:00
|
|
|
|
try {
|
2025-04-10 20:51:28 +08:00
|
|
|
|
pyCallback_(
|
2025-04-10 13:00:35 +08:00
|
|
|
|
py::handle<>(pyArray), // 自动管理引用
|
|
|
|
|
audioFrame.dataCount,
|
|
|
|
|
audioFrame.sampleRate,
|
|
|
|
|
audioFrame.numChannels,
|
|
|
|
|
audioFrame.channelIndex
|
|
|
|
|
);
|
|
|
|
|
} catch (...) {
|
|
|
|
|
Py_DECREF(pyArray);
|
|
|
|
|
throw; // 重新抛出异常
|
|
|
|
|
}
|
2025-04-10 11:15:04 +08:00
|
|
|
|
}
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step12" << std::endl;
|
2025-04-10 11:15:04 +08:00
|
|
|
|
|
2025-04-10 13:00:35 +08:00
|
|
|
|
// 8. 释放资源
|
2025-04-10 11:15:04 +08:00
|
|
|
|
Py_DECREF(pyArray);
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step13" << std::endl;
|
2025-04-10 13:00:35 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
2025-04-10 18:18:56 +08:00
|
|
|
|
std::cout << "step14" << std::endl;
|
2025-04-10 11:15:04 +08:00
|
|
|
|
|
|
|
|
|
} catch (const std::exception& e) {
|
|
|
|
|
std::cerr << "Audio process error: " << e.what() << std::endl;
|
|
|
|
|
PyErr_Print();
|
|
|
|
|
}
|
2025-04-10 18:26:13 +08:00
|
|
|
|
exit(0);
|
2025-04-10 22:02:12 +08:00
|
|
|
|
}
|
|
|
|
|
*/
|
2025-04-11 08:42:29 +08:00
|
|
|
|
/*
|
2025-04-10 22:02:12 +08:00
|
|
|
|
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
|
|
|
|
|
mrtc::MRTCAudioFrame& audioFrame,
|
|
|
|
|
mrtc::MRTCAudioSourceType audioSourceType)
|
|
|
|
|
{
|
|
|
|
|
namespace py = boost::python;
|
|
|
|
|
std::cout << "=== 开始音频处理 ===" << std::endl;
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 1. 获取GIL
|
|
|
|
|
std::cout << "[1] 获取GIL锁..." << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
PyGILState_STATE gstate = PyGILState_Ensure();
|
|
|
|
|
|
|
|
|
|
try {
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 2. 输入参数校验
|
2025-04-10 22:02:12 +08:00
|
|
|
|
std::cout << "[2] 检查输入参数..." << std::endl;
|
|
|
|
|
std::cout << " dataCount: " << audioFrame.dataCount
|
|
|
|
|
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
|
|
|
|
|
|
|
|
|
|
if (!audioFrame.data || audioFrame.dataCount <= 0) {
|
|
|
|
|
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
|
|
|
|
|
throw std::invalid_argument("Invalid audio frame data");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
|
|
|
|
|
std::cout << "[ERROR] 数据长度超过最大值" << std::endl;
|
|
|
|
|
throw std::overflow_error("Audio frame size exceeds maximum limit");
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 3. 准备数组维度
|
2025-04-10 22:02:12 +08:00
|
|
|
|
std::cout << "[3] 准备数组维度..." << std::endl;
|
|
|
|
|
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << " 维度设置完成: [" << dims[0] << "]" << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 4. 检查NumPy API状态
|
|
|
|
|
std::cout << "[4] 检查NumPy API状态..." << std::endl;
|
|
|
|
|
std::cout << " numpyApi_ 地址: " << numpyApi_ << std::endl;
|
|
|
|
|
if (!numpyApi_) {
|
|
|
|
|
throw std::runtime_error("NumPy C-API not initialized");
|
2025-04-10 22:06:08 +08:00
|
|
|
|
}
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 5. 获取PyArray_SimpleNew函数
|
|
|
|
|
std::cout << "[5] 获取PyArray_SimpleNew函数..." << std::endl;
|
|
|
|
|
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
|
|
|
|
|
PyArray_SimpleNew_t PyArray_SimpleNew =
|
|
|
|
|
reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
|
|
|
|
|
std::cout << " 函数地址: " << (void*)PyArray_SimpleNew << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
|
2025-04-10 22:26:16 +08:00
|
|
|
|
std::cout << "[5.1] 验证函数指针..." << std::endl;
|
|
|
|
|
void* func_ptr = numpyApi_[93];
|
|
|
|
|
if (reinterpret_cast<uintptr_t>(func_ptr) < 0x1000) { // 检查是否为合法地址
|
|
|
|
|
std::cerr << "非法函数指针: " << func_ptr << std::endl;
|
|
|
|
|
throw std::runtime_error("Invalid PyArray_SimpleNew pointer");
|
|
|
|
|
}
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 6. 创建NumPy数组
|
|
|
|
|
std::cout << "[6] 创建NumPy数组..." << std::endl;
|
|
|
|
|
PyObject* pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
|
|
|
|
|
std::cout << " 数组地址: " << pyArray << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
|
|
|
|
|
if (!pyArray) {
|
|
|
|
|
throw std::bad_alloc();
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 7. 检查内存对齐
|
|
|
|
|
std::cout << "[7] 检查内存对齐..." << std::endl;
|
|
|
|
|
std::cout << " 音频数据地址: " << (void*)audioFrame.data
|
|
|
|
|
<< " 对齐要求: " << alignof(int16_t) << std::endl;
|
|
|
|
|
|
2025-04-10 22:02:12 +08:00
|
|
|
|
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
|
|
|
|
|
Py_DECREF(pyArray);
|
|
|
|
|
throw std::runtime_error("Unaligned audio data pointer");
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 8. 拷贝数据
|
|
|
|
|
std::cout << "[8] 拷贝音频数据..." << std::endl;
|
|
|
|
|
std::cout << " 目标地址: " << PyArray_DATA((PyArrayObject*)pyArray)
|
|
|
|
|
<< " 字节数: " << audioFrame.dataCount * sizeof(int16_t) << std::endl;
|
|
|
|
|
|
2025-04-10 22:02:12 +08:00
|
|
|
|
std::memcpy(PyArray_DATA((PyArrayObject*)pyArray),
|
|
|
|
|
audioFrame.data,
|
|
|
|
|
audioFrame.dataCount * sizeof(int16_t));
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 9. 执行回调
|
2025-04-10 22:02:12 +08:00
|
|
|
|
if (!pyCallback_.is_none()) {
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << "[9] 准备执行Python回调..." << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
try {
|
|
|
|
|
pyCallback_(
|
|
|
|
|
py::handle<>(pyArray),
|
|
|
|
|
audioFrame.dataCount,
|
|
|
|
|
audioFrame.sampleRate,
|
|
|
|
|
audioFrame.numChannels,
|
|
|
|
|
audioFrame.channelIndex
|
|
|
|
|
);
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << " 回调执行成功" << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
} catch (...) {
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << "[ERROR] 回调执行失败" << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
Py_DECREF(pyArray);
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << "[9] 无回调函数设置" << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
}
|
|
|
|
|
|
2025-04-10 22:20:33 +08:00
|
|
|
|
// 10. 释放资源
|
|
|
|
|
std::cout << "[10] 释放资源..." << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
Py_DECREF(pyArray);
|
2025-04-10 22:20:33 +08:00
|
|
|
|
std::cout << "[11] 释放GIL..." << std::endl;
|
2025-04-10 22:02:12 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
|
|
|
|
std::cout << "=== 音频处理完成 ===" << std::endl;
|
|
|
|
|
|
|
|
|
|
} catch (const std::exception& e) {
|
|
|
|
|
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
|
|
|
|
|
PyGILState_Release(gstate);
|
|
|
|
|
PyErr_Print();
|
|
|
|
|
std::cerr << "Audio process error: " << e.what() << std::endl;
|
2025-04-11 08:42:29 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*/
|
2025-04-16 15:44:43 +08:00
|
|
|
|
void printTimestamp() {
|
|
|
|
|
// 获取系统当前时间点
|
|
|
|
|
auto now = std::chrono::system_clock::now();
|
|
|
|
|
|
|
|
|
|
// 转换为时间戳(秒 + 毫秒)
|
|
|
|
|
auto timestamp = std::chrono::duration_cast<std::chrono::seconds>(
|
|
|
|
|
now.time_since_epoch()).count();
|
|
|
|
|
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(
|
|
|
|
|
now.time_since_epoch()).count() % 1000;
|
|
|
|
|
|
|
|
|
|
// 转换为本地时间(可读格式)
|
|
|
|
|
std::time_t time = std::chrono::system_clock::to_time_t(now);
|
2025-04-16 16:04:25 +08:00
|
|
|
|
std::cout << "Timestamp: " << timestamp << "." << milliseconds << std::endl;
|
2025-04-16 15:44:43 +08:00
|
|
|
|
}
|
2025-04-11 08:42:29 +08:00
|
|
|
|
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
|
|
|
|
|
mrtc::MRTCAudioFrame& audioFrame,
|
|
|
|
|
mrtc::MRTCAudioSourceType audioSourceType)
|
|
|
|
|
{
|
2025-04-16 15:44:43 +08:00
|
|
|
|
//namespace py = boost::python;
|
|
|
|
|
//std::cout << "=== 开始音频处理(共享内存版) ===" << std::endl;
|
|
|
|
|
//std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
|
|
|
|
|
// audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
|
2025-04-11 08:42:29 +08:00
|
|
|
|
|
2025-04-16 16:04:25 +08:00
|
|
|
|
//printTimestamp();
|
2025-04-16 15:30:55 +08:00
|
|
|
|
setData(audioFrame);
|
2025-04-11 08:42:29 +08:00
|
|
|
|
// 1. 获取GIL
|
2025-04-16 15:40:46 +08:00
|
|
|
|
//std::cout << "[1] 获取GIL锁..." << std::endl;
|
|
|
|
|
////PyGILState_STATE gstate = PyGILState_Ensure();
|
|
|
|
|
|
|
|
|
|
//try {
|
|
|
|
|
// // 2. 输入参数校验
|
|
|
|
|
// std::cout << "[2] 检查输入参数..." << std::endl;
|
|
|
|
|
// std::cout << " dataCount: " << audioFrame.dataCount
|
|
|
|
|
// << " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
|
|
|
|
|
|
|
|
|
|
// if (!audioFrame.data || audioFrame.dataCount <= 0) {
|
|
|
|
|
// std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
|
|
|
|
|
// throw std::invalid_argument("Invalid audio frame data");
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// const size_t data_size = audioFrame.dataCount * sizeof(int16_t);
|
|
|
|
|
|
|
|
|
|
// // 3. 创建共享内存
|
|
|
|
|
// std::cout << "[3] 创建共享内存..." << std::endl;
|
|
|
|
|
// char shm_name[32];
|
|
|
|
|
// //snprintf(shm_name, sizeof(shm_name), "/audio_shm_%d", getpid());
|
|
|
|
|
// snprintf(shm_name, sizeof(shm_name), "/audio_shm_test");
|
|
|
|
|
|
|
|
|
|
// int fd = shm_open(shm_name, O_CREAT | O_RDWR, 0666);
|
|
|
|
|
// if (fd == -1) {
|
|
|
|
|
// std::cout << "[ERROR] shm_open失败: " << strerror(errno) << std::endl;
|
|
|
|
|
// throw std::runtime_error("Failed to create shared memory");
|
|
|
|
|
// }
|
|
|
|
|
// std::cout << " 共享内存fd: " << fd << " 名称: " << shm_name << std::endl;
|
|
|
|
|
|
|
|
|
|
// // 4. 设置共享内存大小
|
|
|
|
|
// std::cout << "[4] 设置共享内存大小..." << std::endl;
|
|
|
|
|
// if (ftruncate(fd, data_size) == -1) {
|
|
|
|
|
// close(fd);
|
|
|
|
|
// std::cout << "[ERROR] ftruncate失败: " << strerror(errno) << std::endl;
|
|
|
|
|
// throw std::runtime_error("Failed to resize shared memory");
|
|
|
|
|
// }
|
|
|
|
|
// std::cout << " 内存大小: " << data_size << " bytes" << std::endl;
|
|
|
|
|
|
|
|
|
|
// // 5. 内存映射
|
|
|
|
|
// std::cout << "[5] 内存映射..." << std::endl;
|
|
|
|
|
// void* ptr = mmap(NULL, data_size, PROT_WRITE, MAP_SHARED, fd, 0);
|
|
|
|
|
// if (ptr == MAP_FAILED) {
|
|
|
|
|
// close(fd);
|
|
|
|
|
// std::cout << "[ERROR] mmap失败: " << strerror(errno) << std::endl;
|
|
|
|
|
// throw std::runtime_error("Failed to map shared memory");
|
|
|
|
|
// }
|
|
|
|
|
// std::cout << " 映射地址: " << ptr << std::endl;
|
|
|
|
|
|
|
|
|
|
// namespace py = boost::python;
|
|
|
|
|
// namespace np = boost::python::numpy;
|
|
|
|
|
// // 6. 拷贝数据到共享内存
|
|
|
|
|
// std::cout << "[6] 拷贝音频数据到共享内存..." << std::endl;
|
|
|
|
|
// memcpy(ptr, audioFrame.data, data_size);
|
|
|
|
|
// std::cout << "step1" << std::endl;
|
|
|
|
|
// /*
|
|
|
|
|
// npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) };
|
|
|
|
|
// std::cout << "step2" << std::endl;
|
|
|
|
|
// np::dtype dtype = np::dtype::get_builtin<int16_t>();
|
|
|
|
|
// std::cout << "step3" << std::endl;
|
|
|
|
|
// np::ndarray audioArray = np::from_data(
|
|
|
|
|
// audioFrame.data, // 数据指针
|
|
|
|
|
// dtype, // 数据类型 (int16)
|
|
|
|
|
// py::make_tuple(shape[0]), // 形状 (1D)
|
|
|
|
|
// py::make_tuple(sizeof(int16_t)), // 步长
|
|
|
|
|
// py::object() // 所有者(Python管理)
|
|
|
|
|
// );
|
|
|
|
|
// */
|
|
|
|
|
// std::cout << " 数据拷贝完成" << std::endl;
|
|
|
|
|
|
|
|
|
|
// // 7. 执行回调
|
|
|
|
|
// //if (!pyCallback_.is_none()) {
|
|
|
|
|
// // std::cout << "[7] 准备执行Python回调..." << std::endl;
|
|
|
|
|
// // // 增加引用计数防止提前释放
|
|
|
|
|
// // Py_INCREF(pyCallback_.ptr());
|
|
|
|
|
// // try {
|
|
|
|
|
// // std::cout << " pyCallback_ type: " << Py_TYPE(pyCallback_.ptr())->tp_name << std::endl;
|
|
|
|
|
// // PyObject* repr = PyObject_Repr(pyCallback_.ptr());
|
|
|
|
|
// // if (repr) {
|
|
|
|
|
// // std::cout << " pyCallback_ repr: " << PyUnicode_AsUTF8(repr) << std::endl;
|
|
|
|
|
// // Py_DECREF(repr); // 必须手动释放
|
|
|
|
|
// // }
|
|
|
|
|
// // // 传递共享内存信息
|
|
|
|
|
// // pyCallback_(
|
|
|
|
|
// // py::str(shm_name), // 共享内存名称
|
|
|
|
|
// // data_size, // 数据大小
|
|
|
|
|
// // audioFrame.dataCount,
|
|
|
|
|
// // audioFrame.sampleRate,
|
|
|
|
|
// // audioFrame.numChannels,
|
|
|
|
|
// // audioFrame.channelIndex
|
|
|
|
|
// // );
|
|
|
|
|
// // /*
|
|
|
|
|
// // pyCallback_(
|
|
|
|
|
// // audioArray, // numpy 数组
|
|
|
|
|
// // data_size, // 数据大小
|
|
|
|
|
// // audioFrame.dataCount,
|
|
|
|
|
// // audioFrame.sampleRate,
|
|
|
|
|
// // audioFrame.numChannels,
|
|
|
|
|
// // audioFrame.channelIndex
|
|
|
|
|
// // );
|
|
|
|
|
// // */
|
|
|
|
|
// // std::cout << " after callback" << std::endl;
|
|
|
|
|
// // if (PyErr_Occurred()) {
|
|
|
|
|
// // PyObject *type, *value, *traceback;
|
|
|
|
|
// // PyErr_Fetch(&type, &value, &traceback);
|
|
|
|
|
// // if (value) {
|
|
|
|
|
// // PyObject* str = PyObject_Str(value);
|
|
|
|
|
// // if (str) {
|
|
|
|
|
// // std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
|
|
|
|
|
// // Py_DECREF(str);
|
|
|
|
|
// // }
|
|
|
|
|
// // }
|
|
|
|
|
// // Py_XDECREF(type);
|
|
|
|
|
// // Py_XDECREF(value);
|
|
|
|
|
// // Py_XDECREF(traceback);
|
|
|
|
|
// // //PyErr_Print();
|
|
|
|
|
// // throw std::runtime_error("Python callback error");
|
|
|
|
|
// // }
|
|
|
|
|
// // std::cout << " 回调执行成功" << std::endl;
|
|
|
|
|
|
|
|
|
|
// // } catch (const py::error_already_set& e) {
|
|
|
|
|
// // std::cerr << "[PYTHON ERROR] ";
|
|
|
|
|
// // PyErr_Print(); // 自动打印到stderr
|
|
|
|
|
// // // 可选:获取更详细的错误信息
|
|
|
|
|
// // if (PyErr_Occurred()) {
|
|
|
|
|
// // PyObject *type, *value, *traceback;
|
|
|
|
|
// // PyErr_Fetch(&type, &value, &traceback);
|
|
|
|
|
// // std::cerr << "Details: "
|
|
|
|
|
// // << PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
|
|
|
|
|
// // PyErr_Restore(type, value, traceback);
|
|
|
|
|
// // }
|
|
|
|
|
// // Py_DECREF(pyCallback_.ptr());
|
|
|
|
|
// // } catch (...) {
|
|
|
|
|
// // std::cout << "[ERROR] 回调执行失败" << std::endl;
|
|
|
|
|
// // munmap(ptr, data_size);
|
|
|
|
|
// // close(fd);
|
|
|
|
|
// // shm_unlink(shm_name);
|
|
|
|
|
// // Py_DECREF(pyCallback_.ptr());
|
|
|
|
|
// // throw;
|
|
|
|
|
// // }
|
|
|
|
|
// // Py_DECREF(pyCallback_.ptr());
|
|
|
|
|
// //} else {
|
|
|
|
|
// // std::cout << "[7] 无回调函数设置" << std::endl;
|
|
|
|
|
// //}
|
|
|
|
|
|
|
|
|
|
// // 8. 释放资源
|
|
|
|
|
// std::cout << "[8] 释放共享内存资源..." << std::endl;
|
|
|
|
|
// munmap(ptr, data_size);
|
|
|
|
|
// close(fd);
|
|
|
|
|
// shm_unlink(shm_name);
|
|
|
|
|
|
|
|
|
|
// std::cout << "[9] 释放GIL..." << std::endl;
|
|
|
|
|
// //PyGILState_Release(gstate);
|
|
|
|
|
// std::cout << "=== 音频处理完成 ===" << std::endl;
|
|
|
|
|
|
|
|
|
|
//} catch (const std::exception& e) {
|
|
|
|
|
// std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
|
|
|
|
|
// //PyGILState_Release(gstate);
|
|
|
|
|
// std::cerr << "Audio process error: " << e.what() << std::endl;
|
|
|
|
|
//}
|
2025-04-10 11:15:04 +08:00
|
|
|
|
}
|
2025-04-10 22:06:08 +08:00
|
|
|
|
|
2025-04-09 16:18:50 +08:00
|
|
|
|
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "-----------------------------------" << std::endl;
|
|
|
|
|
std::cout << "RTCContext::onProducer()" << std::endl;
|
|
|
|
|
}
|
|
|
|
|
bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId)
|
|
|
|
|
{
|
2025-04-10 21:19:55 +08:00
|
|
|
|
std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl;
|
2025-04-10 21:23:38 +08:00
|
|
|
|
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
|
|
|
|
|
std::cout << "numpyApi_ is null in init" << std::endl;
|
2025-04-10 16:38:26 +08:00
|
|
|
|
} else {
|
2025-04-10 21:23:38 +08:00
|
|
|
|
std::cout << "numpyApi_ is not null in init" << std::endl;
|
2025-04-10 16:38:26 +08:00
|
|
|
|
}
|
2025-04-09 16:18:50 +08:00
|
|
|
|
mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory();
|
|
|
|
|
if (!rtcFactory)
|
|
|
|
|
{
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
rtcEngine_ = rtcFactory->produceMRTCEngine();
|
|
|
|
|
if (!rtcEngine_)
|
|
|
|
|
{
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
mrtc::MRTCEngineConfig engineConfig;
|
|
|
|
|
strcpy(engineConfig.domain, domain);
|
|
|
|
|
strcpy(engineConfig.applicationId, appid);
|
|
|
|
|
strcpy(engineConfig.appSecrectKey, appSecrectKey);
|
|
|
|
|
engineConfig.port = port;
|
|
|
|
|
if (0 != rtcEngine_->init(engineConfig, this))
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().init() failed" << std::endl;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (0 != rtcEngine_->setUserInfo(selfUserId, selfDisplayName, selfRoomId))
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().setUserInfo() failed" << std::endl;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
mrtc::MRTCJoinAuthority authority;
|
|
|
|
|
strcpy(authority.applicationId, appid);
|
|
|
|
|
strcpy(authority.appSecretKey, appSecrectKey);
|
|
|
|
|
mrtc::MRTCJoinConfig loginConfig;
|
|
|
|
|
if (0!= rtcEngine_->joinRoom(authority, loginConfig))
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().joinRoom() failed" << std::endl;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_ROOM, this))
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_CONSUMER, this))
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2025-04-15 14:03:35 +08:00
|
|
|
|
namespace py = boost::python;
|
|
|
|
|
namespace np = boost::python::numpy;
|
|
|
|
|
Py_Initialize(); // 初始化 Python
|
|
|
|
|
np::initialize();
|
2025-04-09 16:18:50 +08:00
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
|
|
|
|
|
{
|
2025-04-10 21:19:55 +08:00
|
|
|
|
std::cout << "initRecv, numpyApi_:" << numpyApi_[93] << std::endl;
|
2025-04-10 21:23:38 +08:00
|
|
|
|
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
|
|
|
|
|
std::cout << "numpyApi_ is null in initRecv" << std::endl;
|
2025-04-10 16:38:26 +08:00
|
|
|
|
} else {
|
2025-04-10 21:23:38 +08:00
|
|
|
|
std::cout << "numpyApi_ is not null in initRecv" << std::endl;
|
2025-04-10 16:38:26 +08:00
|
|
|
|
}
|
2025-04-09 16:18:50 +08:00
|
|
|
|
while (!isOnConsumer_)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "wait for OnConsumer" << std::endl;
|
|
|
|
|
sleep(3);
|
|
|
|
|
}
|
2025-04-15 23:13:54 +08:00
|
|
|
|
/*
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::cout << "registerSoundLevelListener" << std::endl;
|
|
|
|
|
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, destRoomId,
|
|
|
|
|
srcUserId, destChannelIndex, this);
|
|
|
|
|
if (0 != ret1)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2025-04-15 10:43:25 +08:00
|
|
|
|
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::cout << "muteAudio" << std::endl;
|
|
|
|
|
int16_t ret2 = rtcEngine_->muteAudio(destRoomId, srcUserId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, false, destChannelIndex);
|
|
|
|
|
if (0 != ret2)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2025-04-15 10:43:25 +08:00
|
|
|
|
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::cout << "init recv succ" << std::endl;
|
2025-04-15 23:13:54 +08:00
|
|
|
|
*/
|
2025-04-09 16:18:50 +08:00
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
}
|
2025-04-15 23:21:48 +08:00
|
|
|
|
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex,
|
|
|
|
|
const uint8_t channelNum)
|
2025-04-09 16:18:50 +08:00
|
|
|
|
{
|
|
|
|
|
while (!isOnRoom_)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "wait for OnRoom" << std::endl;
|
|
|
|
|
sleep(3);
|
|
|
|
|
}
|
2025-04-14 18:24:16 +08:00
|
|
|
|
if (std::string(srcRoomId) != std::string(destRoomId)) {
|
2025-04-14 21:11:08 +08:00
|
|
|
|
isMultiRoom_ = true;
|
2025-04-14 18:24:16 +08:00
|
|
|
|
std::cout << "join multi room" << std::endl;
|
|
|
|
|
int16_t ret1 = rtcEngine_->joinMultiRoom(destRoomId);
|
|
|
|
|
if (ret1 != 0)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "joinMultiRoom fail, ret:" << ret1;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2025-04-14 21:11:08 +08:00
|
|
|
|
} else {
|
|
|
|
|
isMultiRoom_ = false;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mrtc::MRTCAudioOption option;
|
2025-04-15 09:21:27 +08:00
|
|
|
|
if (std::string(srcRoomId) != std::string(destRoomId)) {
|
|
|
|
|
strcpy(option.dstRoomId, destRoomId);
|
|
|
|
|
}
|
2025-04-09 16:18:50 +08:00
|
|
|
|
option.channelIndex = destChannelIndex;
|
2025-04-15 23:21:48 +08:00
|
|
|
|
option.channel = channelNum;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::cout << "startCustomAudio" << std::endl;
|
|
|
|
|
int16_t ret2 = rtcEngine_->startCustomAudio(option);
|
|
|
|
|
if (ret2 != 0)
|
|
|
|
|
{
|
|
|
|
|
std::cout << "startCustomAudio fail, ret:" << ret2;
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
std::cout << "init send succ" << std::endl;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void RTCContext::destorySend(const int16_t selfChannelIndex)
|
|
|
|
|
{
|
|
|
|
|
rtcEngine_->stopCustomAudio(selfChannelIndex);
|
|
|
|
|
}
|
|
|
|
|
int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, uint64_t dataLength)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
if (pData_)
|
|
|
|
|
{
|
|
|
|
|
return rtcEngine_->sendCustomAudioData(channelIndex, pData, nSampleRate, nNumberOfChannels, dataLength);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate,
|
|
|
|
|
uint64_t channelNum, uint64_t dataLen)
|
|
|
|
|
{
|
2025-04-14 21:11:08 +08:00
|
|
|
|
while(!isOnRoom_ || (isMultiRoom_ && !isJoinMultiRoom_)) {
|
2025-04-09 16:18:50 +08:00
|
|
|
|
std::cout << "wait for room and multi room before send" << std::endl;
|
|
|
|
|
sleep(3);
|
|
|
|
|
}
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
2025-04-10 09:07:06 +08:00
|
|
|
|
if (customData == nullptr) {
|
|
|
|
|
std::cout << "customData is null" << std::endl;
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
std::cout << "customData addr is:" << customData << std::endl;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, channelNum, dataLen);
|
|
|
|
|
}
|
|
|
|
|
mrtc::IMRTCEngine* RTCContext::getRtcEngine() const
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
return rtcEngine_;
|
|
|
|
|
}
|
|
|
|
|
void* RTCContext::getpData() const
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
return pData_;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::setpData(void* pData)
|
|
|
|
|
{
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
pData_ = pData;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::setPyCallback(boost::python::object callback) {
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
2025-04-10 20:51:28 +08:00
|
|
|
|
pyCallback_ = callback;
|
|
|
|
|
}
|
|
|
|
|
void RTCContext::setNumpyApi(void **numpyApi) {
|
|
|
|
|
std::lock_guard<std::mutex> lock(mutex_);
|
|
|
|
|
numpyApi_ = numpyApi;
|
2025-04-10 21:19:55 +08:00
|
|
|
|
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
|
2025-04-09 16:18:50 +08:00
|
|
|
|
}
|
2025-04-16 15:30:55 +08:00
|
|
|
|
void RTCContext::setData(const mrtc::MRTCAudioFrame& frame) {
|
|
|
|
|
std::lock_guard<std::mutex> lock(dataMutex_);
|
|
|
|
|
if (dataSize_ == totalSize_) {
|
|
|
|
|
bottom_ = (bottom_ + 1) % totalSize_;
|
|
|
|
|
dataSize_--;
|
|
|
|
|
}
|
|
|
|
|
RetAudioFrame newFrame;
|
|
|
|
|
newFrame.dataCount = frame.dataCount;
|
|
|
|
|
newFrame.sampleRate = frame.sampleRate;
|
|
|
|
|
newFrame.numChannels = frame.numChannels;
|
|
|
|
|
newFrame.channelIndex = frame.channelIndex;
|
|
|
|
|
newFrame.data = std::make_unique<int16_t[]>(frame.dataCount);
|
|
|
|
|
std::memcpy(newFrame.data.get(), frame.data, frame.dataCount* sizeof(int16_t));
|
|
|
|
|
data_[head_] = std::move(newFrame);
|
|
|
|
|
head_ = (head_ + 1) % totalSize_;
|
|
|
|
|
dataSize_++;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RetAudioFrame RTCContext::getData() {
|
2025-04-16 16:29:16 +08:00
|
|
|
|
//std::lock_guard<std::mutex> lock(dataMutex_);
|
2025-04-16 15:30:55 +08:00
|
|
|
|
if (dataSize_ > 0) {
|
|
|
|
|
RetAudioFrame frame = std::move(data_[bottom_]); // 移动而非拷贝
|
|
|
|
|
bottom_ = (bottom_ + 1) % totalSize_;
|
|
|
|
|
dataSize_--;
|
|
|
|
|
return frame; // 返回值优化(RVO)会生效
|
|
|
|
|
}
|
|
|
|
|
return {}; // 返回空对象
|
|
|
|
|
}
|
2025-04-16 16:18:24 +08:00
|
|
|
|
namespace bp = boost::python;
|
|
|
|
|
namespace np = boost::python::numpy;
|
2025-04-16 19:07:53 +08:00
|
|
|
|
np::ndarray RTCContext::getNumpyData() {
|
2025-04-16 17:29:00 +08:00
|
|
|
|
std::cout << "step1" << std::endl;
|
2025-04-16 16:18:24 +08:00
|
|
|
|
std::lock_guard<std::mutex> lock(dataMutex_);
|
|
|
|
|
RetAudioFrame frame = getData();
|
2025-04-16 17:29:00 +08:00
|
|
|
|
std::cout << "step2" << std::endl;
|
2025-04-16 19:30:53 +08:00
|
|
|
|
int16_t* dataPtr = frame.data.get(); // 你的数据指针
|
2025-04-16 17:29:00 +08:00
|
|
|
|
std::cout << "step3" << std::endl;
|
2025-04-16 16:18:24 +08:00
|
|
|
|
size_t length = frame.dataCount; // 数据长度
|
2025-04-16 17:29:00 +08:00
|
|
|
|
std::cout << "step4" << std::endl;
|
2025-04-16 16:18:24 +08:00
|
|
|
|
|
2025-04-16 17:44:03 +08:00
|
|
|
|
PyGILState_STATE gstate = PyGILState_Ensure();
|
2025-04-16 17:46:00 +08:00
|
|
|
|
np::ndarray result = np::empty(bp::make_tuple(length), np::dtype::get_builtin<int16_t>());
|
2025-04-16 17:44:03 +08:00
|
|
|
|
try {
|
2025-04-16 19:30:53 +08:00
|
|
|
|
if (!dataPtr || length == 0) {
|
2025-04-16 17:44:03 +08:00
|
|
|
|
result = np::zeros(bp::make_tuple(0), np::dtype::get_builtin<int16_t>());
|
|
|
|
|
} else {
|
|
|
|
|
result = np::empty(bp::make_tuple(length), np::dtype::get_builtin<int16_t>());
|
2025-04-16 19:30:53 +08:00
|
|
|
|
std::memcpy(result.get_data(), dataPtr, length * sizeof(int16_t));
|
2025-04-16 17:44:03 +08:00
|
|
|
|
}
|
|
|
|
|
} catch (...) {
|
|
|
|
|
PyGILState_Release(gstate); // 异常时释放GIL
|
|
|
|
|
throw;
|
2025-04-16 16:18:24 +08:00
|
|
|
|
}
|
2025-04-16 17:44:03 +08:00
|
|
|
|
PyGILState_Release(gstate);
|
2025-04-16 17:24:57 +08:00
|
|
|
|
return result;
|
2025-04-16 17:44:03 +08:00
|
|
|
|
|
2025-04-16 16:18:24 +08:00
|
|
|
|
}
|
2025-04-16 19:38:34 +08:00
|
|
|
|
bp::list RTCContext::getListData() {
|
2025-04-16 19:07:53 +08:00
|
|
|
|
std::cout << "step1" << std::endl;
|
|
|
|
|
std::lock_guard<std::mutex> lock(dataMutex_);
|
|
|
|
|
RetAudioFrame frame = getData();
|
|
|
|
|
std::cout << "step2" << std::endl;
|
2025-04-16 19:30:53 +08:00
|
|
|
|
int16_t* dataPtr = frame.data.get(); // 你的数据指针
|
2025-04-16 19:07:53 +08:00
|
|
|
|
std::cout << "step3" << std::endl;
|
|
|
|
|
size_t length = frame.dataCount; // 数据长度
|
|
|
|
|
std::cout << "step4" << std::endl;
|
2025-04-16 19:38:34 +08:00
|
|
|
|
bp::list result;
|
|
|
|
|
if (dataPtr && length > 0) {
|
|
|
|
|
for (size_t i = 0; i < length; ++i) {
|
|
|
|
|
result.append(dataPtr[i]); // 逐个元素添加(值传递)
|
|
|
|
|
}
|
2025-04-16 19:30:53 +08:00
|
|
|
|
}
|
2025-04-16 19:38:34 +08:00
|
|
|
|
return result;
|
2025-04-16 19:07:53 +08:00
|
|
|
|
}
|
2025-04-16 16:55:10 +08:00
|
|
|
|
int16_t RTCContext::getDataCount() {
|
|
|
|
|
std::lock_guard<std::mutex> lock(dataMutex_);
|
|
|
|
|
RetAudioFrame frame = getData();
|
|
|
|
|
return frame.dataCount;
|
|
|
|
|
}
|
2025-04-16 15:30:55 +08:00
|
|
|
|
int16_t RTCContext::getSize() {
|
|
|
|
|
std::lock_guard<std::mutex> lock(dataMutex_);
|
2025-04-16 15:31:43 +08:00
|
|
|
|
return dataSize_;
|
2025-04-16 15:30:55 +08:00
|
|
|
|
}
|