#include "RTCContext.h" void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) { //std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message; std::cout << "RTCContext::onRoom()" << std::endl; std::lock_guard lock(mutex_); isOnRoom_ = true; } void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) { std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << "," << consumerInfo.channelIndex; std::lock_guard lock(mutex_); isOnConsumer_ = true; //std::cout << "RTCContext::onConsumer()" << std::endl; } void RTCContext::onRender(const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) { std::cout << "RTCContext::onRender()" << std::endl; } void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) { std::lock_guard lock(mutex_); if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) { std::cout << "receive join multi room callback" << msgId; isJoinMultiRoom_ = true; } std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg; //std::cout << "RTCContext::onCallBackMessage()" << std::endl; } void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) { //std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length; std::cout << "RTCContext::onCallBackCustomData()" << std::endl; } void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint16_t audioSourceType, uint8_t channelIndex, uint16_t volume, int32_t vad) { std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl; } /* void RTCContext::onAudioProcess(const char* roomId, const char* peerId, mrtc::MRTCAudioFrame& audioFrame, mrtc::MRTCAudioSourceType audioSourceType) { namespace py = boost::python; PyGILState_STATE gstate = PyGILState_Ensure(); try { std::cout << "-----------------------------------" << std::endl; std::cout << "dataCount:" << audioFrame.dataCount << std::endl; std::cout << "dataCount value: " << audioFrame.dataCount << " (max: " << std::numeric_limits::max() << ")" << std::endl; std::cout << "onAudioProcess, numpyApi_:" << numpyApi_[93] << std::endl; if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量 std::cout << "numpyApi_ is null in onAudioProcess" << std::endl; } else { std::cout << "numpyApi_ is not null in onAudioProcess:" << numpyApi_[93] << std::endl; } //auto numpyApi = RTCContext::numpy_api(); std::cout << "step1" << std::endl; if (!numpyApi_) { PyGILState_Release(gstate); throw std::runtime_error("NumPy C-API not initialized. Call import_array() in module init"); } std::cout << "step2" << std::endl; using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int); void* func_ptr = numpyApi_[93]; std::cout << "Raw function pointer: " << func_ptr << std::endl; auto ptmp = (PyObject*(*)(int, npy_intp*, int))numpyApi_[93]; std::cout << "ptmp is:" << ptmp << std::endl; std::cout << "Pointer sizes:\n" << "void*: " << sizeof(void*) << "\n" << "FunctionPtr: " << sizeof(PyObject*(*)(int, npy_intp*, int)) << std::endl; // 2. 使用memcpy避免编译器优化问题 PyArray_SimpleNew_t PyArray_SimpleNew; static_assert(sizeof(func_ptr) == sizeof(PyArray_SimpleNew), "Pointer size mismatch"); std::cout << "step3" << std::endl; memcpy(&PyArray_SimpleNew, &func_ptr, sizeof(func_ptr)); //auto PyArray_SimpleNew = reinterpret_cast(numpyApi_[93]); std::cout << "step4, PyArray_SimpleNew:" << PyArray_SimpleNew << std::endl; // 3. 严格校验输入数据 if (!audioFrame.data || audioFrame.dataCount <= 0) { PyGILState_Release(gstate); throw std::invalid_argument("Invalid audio frame data"); } std::cout << "step5" << std::endl; // 4. 安全创建维度数组(带边界检查) if (audioFrame.dataCount > std::numeric_limits::max()) { PyGILState_Release(gstate); throw std::overflow_error("Audio frame size exceeds maximum limit"); } std::cout << "step6" << std::endl; npy_intp dims[1] = {static_cast(audioFrame.dataCount)}; std::cout << "step7" << std::endl; // 5. 创建NumPy数组(带内存保护) PyObject* pyArray = nullptr; pyArray = PyArray_SimpleNew(1, dims, NPY_INT16); std::cout << "step8" << std::endl; if (!pyArray) { PyGILState_Release(gstate); throw std::bad_alloc(); } std::cout << "step9" << std::endl; // 6. 安全拷贝数据(带对齐检查) if (reinterpret_cast(audioFrame.data) % alignof(int16_t) != 0) { Py_DECREF(pyArray); PyGILState_Release(gstate); throw std::runtime_error("Unaligned audio data pointer"); } std::cout << "step10" << std::endl; std::memcpy(PyArray_DATA(reinterpret_cast(pyArray)), audioFrame.data, audioFrame.dataCount * sizeof(int16_t)); std::cout << "step11" << std::endl; // 7. 执行回调(带引用计数保护) if (!pyCallback_.is_none()) { try { pyCallback_( py::handle<>(pyArray), // 自动管理引用 audioFrame.dataCount, audioFrame.sampleRate, audioFrame.numChannels, audioFrame.channelIndex ); } catch (...) { Py_DECREF(pyArray); throw; // 重新抛出异常 } } std::cout << "step12" << std::endl; // 8. 释放资源 Py_DECREF(pyArray); std::cout << "step13" << std::endl; PyGILState_Release(gstate); std::cout << "step14" << std::endl; } catch (const std::exception& e) { std::cerr << "Audio process error: " << e.what() << std::endl; PyErr_Print(); } exit(0); } */ /* void RTCContext::onAudioProcess(const char* roomId, const char* peerId, mrtc::MRTCAudioFrame& audioFrame, mrtc::MRTCAudioSourceType audioSourceType) { namespace py = boost::python; std::cout << "=== 开始音频处理 ===" << std::endl; // 1. 获取GIL std::cout << "[1] 获取GIL锁..." << std::endl; PyGILState_STATE gstate = PyGILState_Ensure(); try { // 2. 输入参数校验 std::cout << "[2] 检查输入参数..." << std::endl; std::cout << " dataCount: " << audioFrame.dataCount << " (max: " << std::numeric_limits::max() << ")" << std::endl; if (!audioFrame.data || audioFrame.dataCount <= 0) { std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl; throw std::invalid_argument("Invalid audio frame data"); } if (audioFrame.dataCount > std::numeric_limits::max()) { std::cout << "[ERROR] 数据长度超过最大值" << std::endl; throw std::overflow_error("Audio frame size exceeds maximum limit"); } // 3. 准备数组维度 std::cout << "[3] 准备数组维度..." << std::endl; npy_intp dims[1] = {static_cast(audioFrame.dataCount)}; std::cout << " 维度设置完成: [" << dims[0] << "]" << std::endl; // 4. 检查NumPy API状态 std::cout << "[4] 检查NumPy API状态..." << std::endl; std::cout << " numpyApi_ 地址: " << numpyApi_ << std::endl; if (!numpyApi_) { throw std::runtime_error("NumPy C-API not initialized"); } // 5. 获取PyArray_SimpleNew函数 std::cout << "[5] 获取PyArray_SimpleNew函数..." << std::endl; using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int); PyArray_SimpleNew_t PyArray_SimpleNew = reinterpret_cast(numpyApi_[93]); std::cout << " 函数地址: " << (void*)PyArray_SimpleNew << std::endl; std::cout << "[5.1] 验证函数指针..." << std::endl; void* func_ptr = numpyApi_[93]; if (reinterpret_cast(func_ptr) < 0x1000) { // 检查是否为合法地址 std::cerr << "非法函数指针: " << func_ptr << std::endl; throw std::runtime_error("Invalid PyArray_SimpleNew pointer"); } // 6. 创建NumPy数组 std::cout << "[6] 创建NumPy数组..." << std::endl; PyObject* pyArray = PyArray_SimpleNew(1, dims, NPY_INT16); std::cout << " 数组地址: " << pyArray << std::endl; if (!pyArray) { throw std::bad_alloc(); } // 7. 检查内存对齐 std::cout << "[7] 检查内存对齐..." << std::endl; std::cout << " 音频数据地址: " << (void*)audioFrame.data << " 对齐要求: " << alignof(int16_t) << std::endl; if (reinterpret_cast(audioFrame.data) % alignof(int16_t) != 0) { Py_DECREF(pyArray); throw std::runtime_error("Unaligned audio data pointer"); } // 8. 拷贝数据 std::cout << "[8] 拷贝音频数据..." << std::endl; std::cout << " 目标地址: " << PyArray_DATA((PyArrayObject*)pyArray) << " 字节数: " << audioFrame.dataCount * sizeof(int16_t) << std::endl; std::memcpy(PyArray_DATA((PyArrayObject*)pyArray), audioFrame.data, audioFrame.dataCount * sizeof(int16_t)); // 9. 执行回调 if (!pyCallback_.is_none()) { std::cout << "[9] 准备执行Python回调..." << std::endl; try { pyCallback_( py::handle<>(pyArray), audioFrame.dataCount, audioFrame.sampleRate, audioFrame.numChannels, audioFrame.channelIndex ); std::cout << " 回调执行成功" << std::endl; } catch (...) { std::cout << "[ERROR] 回调执行失败" << std::endl; Py_DECREF(pyArray); throw; } } else { std::cout << "[9] 无回调函数设置" << std::endl; } // 10. 释放资源 std::cout << "[10] 释放资源..." << std::endl; Py_DECREF(pyArray); std::cout << "[11] 释放GIL..." << std::endl; PyGILState_Release(gstate); std::cout << "=== 音频处理完成 ===" << std::endl; } catch (const std::exception& e) { std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl; PyGILState_Release(gstate); PyErr_Print(); std::cerr << "Audio process error: " << e.what() << std::endl; } } */ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, mrtc::MRTCAudioFrame& audioFrame, mrtc::MRTCAudioSourceType audioSourceType) { namespace py = boost::python; std::cout << "=== 开始音频处理(共享内存版) ===" << std::endl; // 1. 获取GIL std::cout << "[1] 获取GIL锁..." << std::endl; PyGILState_STATE gstate = PyGILState_Ensure(); try { // 2. 输入参数校验 std::cout << "[2] 检查输入参数..." << std::endl; std::cout << " dataCount: " << audioFrame.dataCount << " (max: " << std::numeric_limits::max() << ")" << std::endl; if (!audioFrame.data || audioFrame.dataCount <= 0) { std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl; throw std::invalid_argument("Invalid audio frame data"); } const size_t data_size = audioFrame.dataCount * sizeof(int16_t); // 3. 创建共享内存 std::cout << "[3] 创建共享内存..." << std::endl; char shm_name[32]; snprintf(shm_name, sizeof(shm_name), "/audio_shm_%d", getpid()); int fd = shm_open(shm_name, O_CREAT | O_RDWR, 0666); if (fd == -1) { std::cout << "[ERROR] shm_open失败: " << strerror(errno) << std::endl; throw std::runtime_error("Failed to create shared memory"); } std::cout << " 共享内存fd: " << fd << " 名称: " << shm_name << std::endl; // 4. 设置共享内存大小 std::cout << "[4] 设置共享内存大小..." << std::endl; if (ftruncate(fd, data_size) == -1) { close(fd); std::cout << "[ERROR] ftruncate失败: " << strerror(errno) << std::endl; throw std::runtime_error("Failed to resize shared memory"); } std::cout << " 内存大小: " << data_size << " bytes" << std::endl; // 5. 内存映射 std::cout << "[5] 内存映射..." << std::endl; void* ptr = mmap(NULL, data_size, PROT_WRITE, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) { close(fd); std::cout << "[ERROR] mmap失败: " << strerror(errno) << std::endl; throw std::runtime_error("Failed to map shared memory"); } std::cout << " 映射地址: " << ptr << std::endl; // 6. 拷贝数据到共享内存 std::cout << "[6] 拷贝音频数据到共享内存..." << std::endl; memcpy(ptr, audioFrame.data, data_size); std::cout << " 数据拷贝完成" << std::endl; // 7. 执行回调 if (!pyCallback_.is_none()) { std::cout << "[7] 准备执行Python回调..." << std::endl; try { // 增加引用计数防止提前释放 Py_INCREF(pyCallback_.ptr()); std::cout << " py incr, callback ptr" << pyCallback_ << std::endl; // 传递共享内存信息 pyCallback_( py::str(shm_name), // 共享内存名称 data_size, // 数据大小 audioFrame.dataCount, audioFrame.sampleRate, audioFrame.numChannels, audioFrame.channelIndex ); std::cout << " after callback" << std::endl; if (PyErr_Occurred()) { PyErr_Print(); throw std::runtime_error("Python callback error"); } std::cout << " 回调执行成功" << std::endl; } catch (...) { std::cout << "[ERROR] 回调执行失败" << std::endl; munmap(ptr, data_size); close(fd); shm_unlink(shm_name); throw; } } else { std::cout << "[7] 无回调函数设置" << std::endl; } // 8. 释放资源 std::cout << "[8] 释放共享内存资源..." << std::endl; munmap(ptr, data_size); close(fd); shm_unlink(shm_name); std::cout << "[9] 释放GIL..." << std::endl; PyGILState_Release(gstate); std::cout << "=== 音频处理完成 ===" << std::endl; } catch (const std::exception& e) { std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl; PyGILState_Release(gstate); std::cerr << "Audio process error: " << e.what() << std::endl; } } void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info) { std::cout << "-----------------------------------" << std::endl; std::cout << "RTCContext::onProducer()" << std::endl; } bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId) { std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl; if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量 std::cout << "numpyApi_ is null in init" << std::endl; } else { std::cout << "numpyApi_ is not null in init" << std::endl; } mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory(); if (!rtcFactory) { return false; } rtcEngine_ = rtcFactory->produceMRTCEngine(); if (!rtcEngine_) { return false; } mrtc::MRTCEngineConfig engineConfig; strcpy(engineConfig.domain, domain); strcpy(engineConfig.applicationId, appid); strcpy(engineConfig.appSecrectKey, appSecrectKey); engineConfig.port = port; if (0 != rtcEngine_->init(engineConfig, this)) { std::cout << "RTCContext::instance().init() failed" << std::endl; return false; } if (0 != rtcEngine_->setUserInfo(selfUserId, selfDisplayName, selfRoomId)) { std::cout << "RTCContext::instance().setUserInfo() failed" << std::endl; return false; } mrtc::MRTCJoinAuthority authority; strcpy(authority.applicationId, appid); strcpy(authority.appSecretKey, appSecrectKey); mrtc::MRTCJoinConfig loginConfig; if (0!= rtcEngine_->joinRoom(authority, loginConfig)) { std::cout << "RTCContext::instance().joinRoom() failed" << std::endl; return false; } if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_ROOM, this)) { std::cout << "RTCContext::instance().registerListener() failed" << std::endl; return false; } if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_CONSUMER, this)) { std::cout << "RTCContext::instance().registerListener() failed" << std::endl; return false; } return true; } bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex) { std::cout << "initRecv, numpyApi_:" << numpyApi_[93] << std::endl; if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量 std::cout << "numpyApi_ is null in initRecv" << std::endl; } else { std::cout << "numpyApi_ is not null in initRecv" << std::endl; } while (!isOnConsumer_) { std::cout << "wait for OnConsumer" << std::endl; sleep(3); } std::cout << "registerSoundLevelListener" << std::endl; int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, destRoomId, srcUserId, destChannelIndex, this); if (0 != ret1) { std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1; return false; } std::cout << "muteAudio" << std::endl; int16_t ret2 = rtcEngine_->muteAudio(destRoomId, srcUserId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, false, destChannelIndex); if (0 != ret2) { std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2; return false; } std::cout << "init recv succ" << std::endl; return true; } bool RTCContext::initSend(const char* destRoomId, const int16_t destChannelIndex) { while (!isOnRoom_) { std::cout << "wait for OnRoom" << std::endl; sleep(3); } std::cout << "join multi room" << std::endl; int16_t ret1 = rtcEngine_->joinMultiRoom(destRoomId); if (ret1 != 0) { std::cout << "joinMultiRoom fail, ret:" << ret1; return false; } mrtc::MRTCAudioOption option; strcpy(option.dstRoomId, destRoomId); option.channelIndex = destChannelIndex; std::cout << "startCustomAudio" << std::endl; int16_t ret2 = rtcEngine_->startCustomAudio(option); if (ret2 != 0) { std::cout << "startCustomAudio fail, ret:" << ret2; return false; } std::cout << "init send succ" << std::endl; return true; } void RTCContext::destorySend(const int16_t selfChannelIndex) { rtcEngine_->stopCustomAudio(selfChannelIndex); } int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, uint64_t dataLength) { std::lock_guard lock(mutex_); if (pData_) { return rtcEngine_->sendCustomAudioData(channelIndex, pData, nSampleRate, nNumberOfChannels, dataLength); } return 0; } int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate, uint64_t channelNum, uint64_t dataLen) { while(!isOnRoom_ || !isJoinMultiRoom_) { std::cout << "wait for room and multi room before send" << std::endl; sleep(3); } std::lock_guard lock(mutex_); if (customData == nullptr) { std::cout << "customData is null" << std::endl; return -1; } std::cout << "customData addr is:" << customData << std::endl; return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, channelNum, dataLen); } mrtc::IMRTCEngine* RTCContext::getRtcEngine() const { std::lock_guard lock(mutex_); return rtcEngine_; } void* RTCContext::getpData() const { std::lock_guard lock(mutex_); return pData_; } void RTCContext::setpData(void* pData) { std::lock_guard lock(mutex_); pData_ = pData; } void RTCContext::setPyCallback(boost::python::object callback) { std::lock_guard lock(mutex_); pyCallback_ = callback; } void RTCContext::setNumpyApi(void **numpyApi) { std::lock_guard lock(mutex_); numpyApi_ = numpyApi; std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl; }