Compare commits

..

40 Commits

Author SHA1 Message Date
wangjiyu 95969ffcc8 add demo:send audio to app 2025-04-24 10:31:26 +08:00
wangjiyu 756e782ef1 debug 2025-04-16 19:46:35 +08:00
wangjiyu dcb81f9915 debug 2025-04-16 19:43:18 +08:00
wangjiyu b34a1956ee debug 2025-04-16 19:40:25 +08:00
wangjiyu f00ed230a0 debug 2025-04-16 19:38:34 +08:00
wangjiyu af0a29f3dc debug 2025-04-16 19:30:53 +08:00
wangjiyu d910833a9c debug 2025-04-16 19:09:51 +08:00
wangjiyu 3157894c1a debug 2025-04-16 19:07:53 +08:00
wangjiyu ad48b2e55a debug 2025-04-16 17:53:08 +08:00
wangjiyu 0bee448294 debug 2025-04-16 17:46:00 +08:00
wangjiyu df30aa53c9 debug 2025-04-16 17:44:03 +08:00
wangjiyu 815956c01c debug 2025-04-16 17:29:00 +08:00
wangjiyu 3fbe05cf92 debug 2025-04-16 17:24:57 +08:00
wangjiyu 5c5228c3b2 debug 2025-04-16 17:04:32 +08:00
wangjiyu 6cb5d60eb5 debug 2025-04-16 16:55:10 +08:00
wangjiyu a6e6f7fdde debug 2025-04-16 16:29:16 +08:00
wangjiyu e7fc174bc7 debug 2025-04-16 16:19:22 +08:00
wangjiyu 9ef5758116 debug 2025-04-16 16:18:24 +08:00
wangjiyu 29f14acd5f debug 2025-04-16 16:04:25 +08:00
wangjiyu 7c11d681f9 debug 2025-04-16 15:59:45 +08:00
wangjiyu e089162220 debug 2025-04-16 15:44:43 +08:00
wangjiyu fe9c640c5b debug 2025-04-16 15:40:46 +08:00
wangjiyu b04c665ac6 debug 2025-04-16 15:31:43 +08:00
wangjiyu 0042f506c2 debug 2025-04-16 15:30:55 +08:00
wangjiyu 088f373770 debug 2025-04-16 11:34:51 +08:00
wangjiyu 6f24ab5105 debug 2025-04-16 11:26:13 +08:00
wangjiyu 847aad603e debug 2025-04-16 11:23:04 +08:00
wangjiyu f3fbcf94f7 debug 2025-04-16 11:18:33 +08:00
wangjiyu 80fe99160f debug 2025-04-16 11:15:19 +08:00
wangjiyu c668b06ef8 debug 2025-04-16 11:05:29 +08:00
wangjiyu a98aa02dce debug 2025-04-16 10:53:36 +08:00
wangjiyu c1130200b8 debug 2025-04-16 10:47:29 +08:00
wangjiyu c58d016357 debug 2025-04-16 10:45:48 +08:00
wangjiyu 8a2389bf68 debug 2025-04-16 10:39:45 +08:00
wangjiyu b9a981b57a debug 2025-04-15 23:43:13 +08:00
wangjiyu f53fc89531 debug 2025-04-15 23:36:59 +08:00
wangjiyu c8c3a25fe9 debug 2025-04-15 23:31:07 +08:00
wangjiyu 418654abf4 debug 2025-04-15 23:23:35 +08:00
wangjiyu 715687e85c debug 2025-04-15 23:21:48 +08:00
wangjiyu 6936dfd292 debug 2025-04-15 23:13:54 +08:00
8 changed files with 735 additions and 160 deletions

View File

@ -1,4 +1,4 @@
g++ -shared -fPIC \ g++ -DBOOST_DEBUG_PYTHON -shared -fPIC \
-I/usr/include/python3.10 -I/usr/include/python3.10/numpy -I./include \ -I/usr/include/python3.10 -I/usr/include/python3.10/numpy -I./include \
-L./lib -L/usr/lib/x86_64-linux-gnu \ -L./lib -L/usr/lib/x86_64-linux-gnu \
-DRTC_NUMPY_IMPL \ -DRTC_NUMPY_IMPL \

View File

@ -13,7 +13,11 @@ void** get_numpy_api() {
namespace py = boost::python; namespace py = boost::python;
int init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId, boost::python::object callback) { int init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId, boost::python::object callback) {
if (!PyArray_API) {
std::cout << "PyArray_API is null in outer init" << std::endl;
} else {
std::cout << "PyArray_API is not null in outer init" << std::endl;
}
RTCContext::instance().setPyCallback(callback); RTCContext::instance().setPyCallback(callback);
bool res = RTCContext::instance().init(selfUserId, selfDisplayName, selfRoomId); bool res = RTCContext::instance().init(selfUserId, selfDisplayName, selfRoomId);
if (res) { if (res) {
@ -23,7 +27,11 @@ int init(const char* selfUserId, const char* selfDisplayName, const char* selfRo
} }
} }
int initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex) { int initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex) {
if (!PyArray_API) {
std::cout << "PyArray_API is null in outer initRecv" << std::endl;
} else {
std::cout << "PyArray_API is not null in outer initRecv" << std::endl;
}
bool res = RTCContext::instance().initRecv(destRoomId, srcUserId, destChannelIndex); bool res = RTCContext::instance().initRecv(destRoomId, srcUserId, destChannelIndex);
if (res) { if (res) {
return 0; return 0;
@ -31,7 +39,7 @@ int initRecv(const char* destRoomId, const char* srcUserId, const int16_t destCh
return -1; return -1;
} }
} }
int initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, const uint8_t channelNum) { int initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, const int16_t channelNum) {
bool res = RTCContext::instance().initSend(srcRoomId, destRoomId, destChannelIndex, channelNum); bool res = RTCContext::instance().initSend(srcRoomId, destRoomId, destChannelIndex, channelNum);
if (res) { if (res) {
return 0; return 0;
@ -39,7 +47,21 @@ int initSend(const char* srcRoomId, const char* destRoomId, const int16_t destCh
return -1; return -1;
} }
} }
int getSize() {
return RTCContext::instance().getSize();
}
namespace bp = boost::python;
namespace np = boost::python::numpy;
np::ndarray getNumpyData() {
return RTCContext::instance().getNumpyData();
}
bp::list getListData() {
return RTCContext::instance().getListData();
}
int16_t getDataCount() {
return RTCContext::instance().getDataCount();
}
py::object create_int16_array() { py::object create_int16_array() {
// 1. 定义数组维度1维长度为 4 // 1. 定义数组维度1维长度为 4
npy_intp dims[1] = {4}; npy_intp dims[1] = {4};
@ -98,6 +120,65 @@ int sendCustomAudioData(int16_t destChannelIndex, py::object pD,
return -1; return -1;
} }
} }
RetAudioFrame getData() {
return RTCContext::instance().getData();
}
/*
int sendCustomAudioData(const int16_t destChannelIndex, py::object pyData, int32_t sampleRate, uint64_t channelNum,
uint64_t dataLen) {
try {
//py::object pyData = create_int16_array();
std::cout << "step 1" << std::endl;
// 1. 检查输入有效性
if (pyData.ptr() == nullptr) {
throw std::runtime_error("Input data is NULL");
}
std::cout << "step 2" << std::endl;
std::cout << "pyData ptr is:" << pyData.ptr() << std::endl;
if (!pyData.ptr() || !Py_IsInitialized() || !PyObject_TypeCheck(pyData.ptr(), &PyBaseObject_Type)) {
throw std::runtime_error("Invalid Python object");
}
std::cout << "step 2" << std::endl;
// 2. 检查是否是 numpy 数组
if (!PyArray_Check(pyData.ptr())) {
std::cout << "input is notnumpy" << std::endl;
throw std::runtime_error("Input is not a numpy array");
}
std::cout << "step 3" << std::endl;
// 3. 转换为 PyArrayObject
PyArrayObject* npArray = reinterpret_cast<PyArrayObject*>(pyData.ptr());
std::cout << "step 4" << std::endl;
// 4. 检查数据类型是否为 int16
if (PyArray_TYPE(npArray) != NPY_INT16) {
throw std::runtime_error("Array must be of type int16 (np.int16)");
}
std::cout << "step 5" << std::endl;
// 5. 检查数据是否连续
if (!PyArray_ISCONTIGUOUS(npArray)) {
throw std::runtime_error("Array must be contiguous in memory");
}
std::cout << "step 6" << std::endl;
// 6. 获取数据指针
void* dataPtr = PyArray_DATA(npArray);
if (dataPtr == nullptr) {
throw std::runtime_error("Invalid data pointer");
}
std::cout << "step 7" << std::endl;
return RTCContext::instance().sendCustomAudioData(destChannelIndex, dataPtr, sampleRate, channelNum, dataLen);
} catch (const std::exception& e) {
std::cout << "error:" << e.what() << std::endl;
return -1;
}
}
*/
void init_numpy() { void init_numpy() {
// 直接调用底层函数,绕过宏的问题 // 直接调用底层函数,绕过宏的问题
@ -120,10 +201,23 @@ BOOST_PYTHON_MODULE(rtc_plugins) {
std::cout << "set numpyApi succ:" << numpyApi[93] << std::endl; std::cout << "set numpyApi succ:" << numpyApi[93] << std::endl;
} }
/*
if (!PyArray_API) {
std::cout << "PyArray_API is null" << std::endl;
} else {
std::cout << "PyArray_API is not null" << std::endl;
}
*/
py::def("init", &init); py::def("init", &init);
py::def("initRecv", &initRecv); py::def("initRecv", &initRecv);
py::def("initSend", &initSend); py::def("initSend", &initSend);
py::def("sendCustomAudioData", &sendCustomAudioData); py::def("sendCustomAudioData", &sendCustomAudioData);
py::def("getSize", &getSize);
py::def("getData", &getData);
py::def("getNumpyData", &getNumpyData);
py::def("getListData", &getListData);
py::def("getDataCount", &getDataCount);
} catch (...) { } catch (...) {
PyErr_SetString(PyExc_RuntimeError, "Module initialization failed"); PyErr_SetString(PyExc_RuntimeError, "Module initialization failed");
} }

54
test_audio_s.py Normal file
View File

@ -0,0 +1,54 @@
import rtc_plugins
import time
import numpy as np
import wave
from scipy.io import wavfile
srcUserId = "srcUser12"
destUserId = "destUser12"
srcDisplayName = "srcDisplayName12"
destDisplayName = "destDisplayName12"
srcRoomId = "srcRoom12"
#destRoomId = "destRoomId12"
destRoomId = srcRoomId
srcChannelIndex = 46
destChannelIndex = 47
def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"my_callback, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}")
print("after my_callback_r")
ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback)
if ret != 0:
print(f"init fail, ret:{ret}")
exit(1)
ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex, 1)
if ret != 0:
print(f"initSend fail, ret:{ret}")
exit(1)
#audioData = np.array([0, 1, -1, 0], dtype=np.int16)
sampleRate, audioData = wavfile.read("xusample1.wav")
print(f"sampleRate:{sampleRate} HZ")
print(f"shape:{audioData.shape}")
print(f"type:{audioData.dtype}")
if audioData.dtype != np.int16:
audioData = (audioData * 32767).astype(np.int16)
ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, sampleRate, 1, len(audioData))
if ret != 0:
print(f"send fail, ret:{ret}")
ret = rtc_plugins.initRecv(srcRoomId, srcUserId, srcChannelIndex)
if ret != 0:
print(f"initRecv fail, ret:{ret}")
exit(1)
for i in range(100):
#ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, sampleRate, 1, len(audioData))
#if ret != 0:
# print(f"send fail, ret:{ret}")
#size = rtc_plugins.getSize()
#print(f"data size:{size}")
#frame = rtc_plugins.getListData()
#print(f"get frame:{frame}")
#dataCount = rtc_plugins.getDataCount()
#print(f"data count:{dataCount}")
time.sleep(3)

View File

@ -17,7 +17,7 @@ ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback)
if ret != 0: if ret != 0:
print(f"init fail, ret:{ret}") print(f"init fail, ret:{ret}")
exit(1) exit(1)
ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex, 1) ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex)
if ret != 0: if ret != 0:
print(f"initSend fail, ret:{ret}") print(f"initSend fail, ret:{ret}")
exit(1) exit(1)

View File

@ -4,6 +4,7 @@ import numpy as np
import mmap import mmap
import os import os
from ctypes import c_int16 from ctypes import c_int16
import struct
srcUserId = "srcUser12" srcUserId = "srcUser12"
destUserId = "destUser12" destUserId = "destUser12"
@ -15,11 +16,12 @@ srcRoomId = "srcRoom12"
destRoomId = srcRoomId destRoomId = srcRoomId
srcChannelIndex = 46 srcChannelIndex = 46
destChannelIndex = 47 destChannelIndex = 47
def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex): def my_callback_r(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}") print(f"my_callback_r, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}") print(f"data:{shmName}")
print("after my_callback_r")
ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback) ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback_r)
if ret != 0: if ret != 0:
print(f"init fail, ret:{ret}") print(f"init fail, ret:{ret}")
exit(1) exit(1)
@ -34,10 +36,17 @@ if ret != 0:
while True: while True:
print("recv") print("recv")
#audioData = np.array([0, 1, -1, 0], dtype=np.int16) audioData = np.array([0, 1, -1, 0], dtype=np.int16)
#ret = rtc_plugins.sendCustomAudioData(srcChannelIndex, audioData, 48000, 1, len(audioData)) ret = rtc_plugins.sendCustomAudioData(srcChannelIndex, audioData, 48000, 1, len(audioData))
#if ret != 0: if ret != 0:
# print(f"resend fail, ret:{ret}") print(f"resend fail, ret:{ret}")
#else: else:
# print("resend succ") print("resend succ")
time.sleep(30) size = rtc_plugins.getSize()
print(f"data size:{size}")
#frame = rtc_plugins.getNumpyData()
frame = rtc_plugins.getListData()
print(f"get frame:{frame}")
dataCount = rtc_plugins.getDataCount()
print(f"data count:{dataCount}")
time.sleep(0.005)

View File

@ -12,8 +12,10 @@ srcRoomId = "srcRoom12"
destRoomId = srcRoomId destRoomId = srcRoomId
srcChannelIndex = 46 srcChannelIndex = 46
destChannelIndex = 47 destChannelIndex = 47
def my_callback(npData, dataCount, sampleRate, numChannels, channelIndex): def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIndex):
print(f"dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}") print(f"my_callback, dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}")
print(f"data:{shmName}")
print("after my_callback_r")
ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback) ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback)
if ret != 0: if ret != 0:
print(f"init fail, ret:{ret}") print(f"init fail, ret:{ret}")
@ -34,4 +36,11 @@ for i in range(100):
ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, 48000, 1, len(audioData)) ret = rtc_plugins.sendCustomAudioData(destChannelIndex, audioData, 48000, 1, len(audioData))
if ret != 0: if ret != 0:
print(f"send fail, ret:{ret}") print(f"send fail, ret:{ret}")
time.sleep(30)
size = rtc_plugins.getSize()
print(f"data size:{size}")
frame = rtc_plugins.getListData()
print(f"get frame:{frame}")
dataCount = rtc_plugins.getDataCount()
print(f"data count:{dataCount}")
time.sleep(3)

View File

@ -1,5 +1,4 @@
#include "RTCContext.h" #include "RTCContext.h"
#define GIL
void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) { void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) {
//std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message; //std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message;
@ -7,30 +6,31 @@ void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& room
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
isOnRoom_ = true; isOnRoom_ = true;
} }
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) { //std::cout << "RTCContext::onConsumer()" << std::endl;
std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << "," std::cout << "RTCContext::onConsumer():msgId:" << msgId << ", roomId:" << consumerInfo.roomId << ", displayName:"
<< consumerInfo.channelIndex << std::endl; << consumerInfo.displayName << ", channelIndex" << (int)consumerInfo.channelIndex;
if (isRecv_) {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
isOnConsumer_ = true;
std::cout << "registerSoundLevelListener" << std::endl; std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, consumerInfo.roomId, int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, roomId,
peerId, consumerInfo.channelIndex, this); peerId, consumerInfo.channelIndex, this);
if (0 != ret1) { if (0 != ret1)
{
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1; std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return; return;
} }
std::cout << "muteAudio" << std::endl; std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(consumerInfo.roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, int16_t ret2 = rtcEngine_->muteAudio(roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM,
false, consumerInfo.channelIndex); false, consumerInfo.channelIndex);
if (0 != ret2) { if (0 != ret2)
{
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2; std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return; return;
} }
std::cout << "init recv succ" << std::endl; std::cout << "init recv succ" << std::endl;
}
} }
void RTCContext::onRender(const char* roomId, const char* peerId, void RTCContext::onRender(const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) { RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
@ -40,11 +40,12 @@ void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) { if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
std::cout << "receive join multi room callback" << msgId << std::endl; std::cout << "receive join multi room callback" << msgId;
isJoinMultiRoom_ = true; isJoinMultiRoom_ = true;
} }
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg << std::endl; std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg;
//std::cout << "RTCContext::onCallBackMessage()" << std::endl;
} }
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) { void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
//std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length; //std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length;
@ -55,24 +56,136 @@ void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint
{ {
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl; std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
} }
/*
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
PyGILState_STATE gstate = PyGILState_Ensure();
try {
std::cout << "-----------------------------------" << std::endl;
std::cout << "dataCount:" << audioFrame.dataCount << std::endl;
std::cout << "dataCount value: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
std::cout << "onAudioProcess, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in onAudioProcess" << std::endl;
} else {
std::cout << "numpyApi_ is not null in onAudioProcess:" << numpyApi_[93] << std::endl;
}
//auto numpyApi = RTCContext::numpy_api();
std::cout << "step1" << std::endl;
if (!numpyApi_) {
PyGILState_Release(gstate);
throw std::runtime_error("NumPy C-API not initialized. Call import_array() in module init");
}
std::cout << "step2" << std::endl;
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
void* func_ptr = numpyApi_[93];
std::cout << "Raw function pointer: " << func_ptr << std::endl;
auto ptmp = (PyObject*(*)(int, npy_intp*, int))numpyApi_[93];
std::cout << "ptmp is:" << ptmp << std::endl;
std::cout << "Pointer sizes:\n"
<< "void*: " << sizeof(void*) << "\n"
<< "FunctionPtr: " << sizeof(PyObject*(*)(int, npy_intp*, int)) << std::endl;
// 2. 使用memcpy避免编译器优化问题
PyArray_SimpleNew_t PyArray_SimpleNew;
static_assert(sizeof(func_ptr) == sizeof(PyArray_SimpleNew),
"Pointer size mismatch");
std::cout << "step3" << std::endl;
memcpy(&PyArray_SimpleNew, &func_ptr, sizeof(func_ptr));
//auto PyArray_SimpleNew = reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
std::cout << "step4, PyArray_SimpleNew:" << PyArray_SimpleNew << std::endl;
// 3. 严格校验输入数据
if (!audioFrame.data || audioFrame.dataCount <= 0) {
PyGILState_Release(gstate);
throw std::invalid_argument("Invalid audio frame data");
}
std::cout << "step5" << std::endl;
// 4. 安全创建维度数组(带边界检查)
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
PyGILState_Release(gstate);
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
std::cout << "step6" << std::endl;
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
std::cout << "step7" << std::endl;
// 5. 创建NumPy数组带内存保护
PyObject* pyArray = nullptr;
pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
std::cout << "step8" << std::endl;
if (!pyArray) {
PyGILState_Release(gstate);
throw std::bad_alloc();
}
std::cout << "step9" << std::endl;
// 6. 安全拷贝数据(带对齐检查)
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
PyGILState_Release(gstate);
throw std::runtime_error("Unaligned audio data pointer");
}
std::cout << "step10" << std::endl;
std::memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(pyArray)),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
std::cout << "step11" << std::endl;
// 7. 执行回调(带引用计数保护)
if (!pyCallback_.is_none()) {
try {
pyCallback_(
py::handle<>(pyArray), // 自动管理引用
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
} catch (...) {
Py_DECREF(pyArray);
throw; // 重新抛出异常
}
}
std::cout << "step12" << std::endl;
// 8. 释放资源
Py_DECREF(pyArray);
std::cout << "step13" << std::endl;
PyGILState_Release(gstate);
std::cout << "step14" << std::endl;
} catch (const std::exception& e) {
std::cerr << "Audio process error: " << e.what() << std::endl;
PyErr_Print();
}
exit(0);
}
*/
/*
void RTCContext::onAudioProcess(const char* roomId, const char* peerId, void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame, mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType) mrtc::MRTCAudioSourceType audioSourceType)
{ {
namespace np = boost::python::numpy;
namespace py = boost::python; namespace py = boost::python;
Py_Initialize(); // 初始化 Python
np::initialize();
std::cout << "=== 开始音频处理 ===" << std::endl; std::cout << "=== 开始音频处理 ===" << std::endl;
std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
// 1. 获取GIL // 1. 获取GIL
std::cout << "[1] 获取GIL锁..." << std::endl; std::cout << "[1] 获取GIL锁..." << std::endl;
#ifdef GIL
PyGILState_STATE gstate = PyGILState_Ensure(); PyGILState_STATE gstate = PyGILState_Ensure();
#endif
try { try {
// 2. 输入参数校验 // 2. 输入参数校验
@ -85,122 +198,285 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
throw std::invalid_argument("Invalid audio frame data"); throw std::invalid_argument("Invalid audio frame data");
} }
const size_t data_size = audioFrame.dataCount * sizeof(int16_t); if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
std::cout << "[ERROR] 数据长度超过最大值" << std::endl;
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
std::cout << "step1" << std::endl; // 3. 准备数组维度
namespace py = boost::python; std::cout << "[3] 准备数组维度..." << std::endl;
namespace np = boost::python::numpy; npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) }; std::cout << " 维度设置完成: [" << dims[0] << "]" << std::endl;
std::cout << "step2" << std::endl;
// 7. 执行回调 // 4. 检查NumPy API状态
std::cout << "[4] 检查NumPy API状态..." << std::endl;
std::cout << " numpyApi_ 地址: " << numpyApi_ << std::endl;
if (!numpyApi_) {
throw std::runtime_error("NumPy C-API not initialized");
}
// 5. 获取PyArray_SimpleNew函数
std::cout << "[5] 获取PyArray_SimpleNew函数..." << std::endl;
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
PyArray_SimpleNew_t PyArray_SimpleNew =
reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
std::cout << " 函数地址: " << (void*)PyArray_SimpleNew << std::endl;
std::cout << "[5.1] 验证函数指针..." << std::endl;
void* func_ptr = numpyApi_[93];
if (reinterpret_cast<uintptr_t>(func_ptr) < 0x1000) { // 检查是否为合法地址
std::cerr << "非法函数指针: " << func_ptr << std::endl;
throw std::runtime_error("Invalid PyArray_SimpleNew pointer");
}
// 6. 创建NumPy数组
std::cout << "[6] 创建NumPy数组..." << std::endl;
PyObject* pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
std::cout << " 数组地址: " << pyArray << std::endl;
if (!pyArray) {
throw std::bad_alloc();
}
// 7. 检查内存对齐
std::cout << "[7] 检查内存对齐..." << std::endl;
std::cout << " 音频数据地址: " << (void*)audioFrame.data
<< " 对齐要求: " << alignof(int16_t) << std::endl;
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
throw std::runtime_error("Unaligned audio data pointer");
}
// 8. 拷贝数据
std::cout << "[8] 拷贝音频数据..." << std::endl;
std::cout << " 目标地址: " << PyArray_DATA((PyArrayObject*)pyArray)
<< " 字节数: " << audioFrame.dataCount * sizeof(int16_t) << std::endl;
std::memcpy(PyArray_DATA((PyArrayObject*)pyArray),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
// 9. 执行回调
if (!pyCallback_.is_none()) { if (!pyCallback_.is_none()) {
std::cout << "[7] 准备执行Python回调..." << std::endl; std::cout << "[9] 准备执行Python回调..." << std::endl;
// 增加引用计数防止提前释放
//Py_INCREF(pyCallback_.ptr());
try { try {
//PyGILState_STATE gstate = PyGILState_Ensure();
std::cout << "data:" << audioFrame.data << std::endl;
std::cout << "当前线程是否持有 GIL: " << PyGILState_Check() << std::endl;
np::dtype dtype = np::dtype::get_builtin<int16_t>();
std::cout << "init dtype" << std::endl;
if (!Py_IsInitialized()) {
std::cerr << "Python 解释器未初始化!" << std::endl;
return;
}
try {
py::object str_repr = py::str(dtype);
std::cout << "str_repr" << std::endl;
if(str_repr.ptr() != Py_None) {
std::cout << "str_repr is not null" << std::endl;
std::string dtype_str = py::extract<std::string>(str_repr);
std::cout << "数据类型: " << dtype_str << std::endl;
} else {
std::cout << "数据类型: None" << std::endl;
}
} catch (const py::error_already_set&) {
std::cout<< "数据类型转换错误" << std::endl;
PyErr_Clear();
}
std::cout << "数据形状: " << shape[0] << std::endl;
np::ndarray audioArray = np::from_data(
audioFrame.data, // 数据指针
dtype, // 数据类型 (int16)
py::make_tuple(shape[0]), // 形状 (1D)
py::make_tuple(sizeof(int16_t)), // 步长
py::object() // 所有者Python管理
);
std::cout << " 数据拷贝完成" << std::endl;
pyCallback_( pyCallback_(
audioArray, // numpy 数组 py::handle<>(pyArray),
data_size, // 数据大小
audioFrame.dataCount, audioFrame.dataCount,
audioFrame.sampleRate, audioFrame.sampleRate,
audioFrame.numChannels, audioFrame.numChannels,
audioFrame.channelIndex audioFrame.channelIndex
); );
std::cout << " after callback" << std::endl;
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (value) {
PyObject* str = PyObject_Str(value);
if (str) {
std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
Py_DECREF(str);
}
}
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
//PyErr_Print();
throw std::runtime_error("Python callback error");
}
std::cout << " 回调执行成功" << std::endl; std::cout << " 回调执行成功" << std::endl;
} catch (const py::error_already_set& e) {
std::cerr << "[PYTHON ERROR] ";
PyErr_Print(); // 自动打印到stderr
// 可选:获取更详细的错误信息
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
std::cerr << "Details: "
<< PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
PyErr_Restore(type, value, traceback);
}
//Py_DECREF(pyCallback_.ptr());
} catch (...) { } catch (...) {
std::cout << "[ERROR] 回调执行失败" << std::endl; std::cout << "[ERROR] 回调执行失败" << std::endl;
Py_DECREF(pyArray);
//Py_DECREF(pyCallback_.ptr());
throw; throw;
} }
//Py_DECREF(pyCallback_.ptr());
} else { } else {
std::cout << "[7] 无回调函数设置" << std::endl; std::cout << "[9] 无回调函数设置" << std::endl;
} }
// 8. 释放资源 // 10. 释放资源
std::cout << "[8] 释放共享内存资源..." << std::endl; std::cout << "[10] 释放资源..." << std::endl;
Py_DECREF(pyArray);
std::cout << "[9] 释放GIL..." << std::endl; std::cout << "[11] 释放GIL..." << std::endl;
#ifdef GIL
PyGILState_Release(gstate); PyGILState_Release(gstate);
#endif
std::cout << "=== 音频处理完成 ===" << std::endl; std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) { } catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl; std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
#ifdef GIL
PyGILState_Release(gstate); PyGILState_Release(gstate);
#endif PyErr_Print();
std::cerr << "Audio process error: " << e.what() << std::endl; std::cerr << "Audio process error: " << e.what() << std::endl;
} }
#ifdef GIL }
PyGILState_Release(gstate); */
#endif void printTimestamp() {
// 获取系统当前时间点
auto now = std::chrono::system_clock::now();
// 转换为时间戳(秒 + 毫秒)
auto timestamp = std::chrono::duration_cast<std::chrono::seconds>(
now.time_since_epoch()).count();
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(
now.time_since_epoch()).count() % 1000;
// 转换为本地时间(可读格式)
std::time_t time = std::chrono::system_clock::to_time_t(now);
std::cout << "Timestamp: " << timestamp << "." << milliseconds << std::endl;
}
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
//namespace py = boost::python;
//std::cout << "=== 开始音频处理(共享内存版) ===" << std::endl;
//std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
// audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
//printTimestamp();
setData(audioFrame);
// 1. 获取GIL
//std::cout << "[1] 获取GIL锁..." << std::endl;
////PyGILState_STATE gstate = PyGILState_Ensure();
//try {
// // 2. 输入参数校验
// std::cout << "[2] 检查输入参数..." << std::endl;
// std::cout << " dataCount: " << audioFrame.dataCount
// << " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
// if (!audioFrame.data || audioFrame.dataCount <= 0) {
// std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
// throw std::invalid_argument("Invalid audio frame data");
// }
// const size_t data_size = audioFrame.dataCount * sizeof(int16_t);
// // 3. 创建共享内存
// std::cout << "[3] 创建共享内存..." << std::endl;
// char shm_name[32];
// //snprintf(shm_name, sizeof(shm_name), "/audio_shm_%d", getpid());
// snprintf(shm_name, sizeof(shm_name), "/audio_shm_test");
// int fd = shm_open(shm_name, O_CREAT | O_RDWR, 0666);
// if (fd == -1) {
// std::cout << "[ERROR] shm_open失败: " << strerror(errno) << std::endl;
// throw std::runtime_error("Failed to create shared memory");
// }
// std::cout << " 共享内存fd: " << fd << " 名称: " << shm_name << std::endl;
// // 4. 设置共享内存大小
// std::cout << "[4] 设置共享内存大小..." << std::endl;
// if (ftruncate(fd, data_size) == -1) {
// close(fd);
// std::cout << "[ERROR] ftruncate失败: " << strerror(errno) << std::endl;
// throw std::runtime_error("Failed to resize shared memory");
// }
// std::cout << " 内存大小: " << data_size << " bytes" << std::endl;
// // 5. 内存映射
// std::cout << "[5] 内存映射..." << std::endl;
// void* ptr = mmap(NULL, data_size, PROT_WRITE, MAP_SHARED, fd, 0);
// if (ptr == MAP_FAILED) {
// close(fd);
// std::cout << "[ERROR] mmap失败: " << strerror(errno) << std::endl;
// throw std::runtime_error("Failed to map shared memory");
// }
// std::cout << " 映射地址: " << ptr << std::endl;
// namespace py = boost::python;
// namespace np = boost::python::numpy;
// // 6. 拷贝数据到共享内存
// std::cout << "[6] 拷贝音频数据到共享内存..." << std::endl;
// memcpy(ptr, audioFrame.data, data_size);
// std::cout << "step1" << std::endl;
// /*
// npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) };
// std::cout << "step2" << std::endl;
// np::dtype dtype = np::dtype::get_builtin<int16_t>();
// std::cout << "step3" << std::endl;
// np::ndarray audioArray = np::from_data(
// audioFrame.data, // 数据指针
// dtype, // 数据类型 (int16)
// py::make_tuple(shape[0]), // 形状 (1D)
// py::make_tuple(sizeof(int16_t)), // 步长
// py::object() // 所有者Python管理
// );
// */
// std::cout << " 数据拷贝完成" << std::endl;
// // 7. 执行回调
// //if (!pyCallback_.is_none()) {
// // std::cout << "[7] 准备执行Python回调..." << std::endl;
// // // 增加引用计数防止提前释放
// // Py_INCREF(pyCallback_.ptr());
// // try {
// // std::cout << " pyCallback_ type: " << Py_TYPE(pyCallback_.ptr())->tp_name << std::endl;
// // PyObject* repr = PyObject_Repr(pyCallback_.ptr());
// // if (repr) {
// // std::cout << " pyCallback_ repr: " << PyUnicode_AsUTF8(repr) << std::endl;
// // Py_DECREF(repr); // 必须手动释放
// // }
// // // 传递共享内存信息
// // pyCallback_(
// // py::str(shm_name), // 共享内存名称
// // data_size, // 数据大小
// // audioFrame.dataCount,
// // audioFrame.sampleRate,
// // audioFrame.numChannels,
// // audioFrame.channelIndex
// // );
// // /*
// // pyCallback_(
// // audioArray, // numpy 数组
// // data_size, // 数据大小
// // audioFrame.dataCount,
// // audioFrame.sampleRate,
// // audioFrame.numChannels,
// // audioFrame.channelIndex
// // );
// // */
// // std::cout << " after callback" << std::endl;
// // if (PyErr_Occurred()) {
// // PyObject *type, *value, *traceback;
// // PyErr_Fetch(&type, &value, &traceback);
// // if (value) {
// // PyObject* str = PyObject_Str(value);
// // if (str) {
// // std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
// // Py_DECREF(str);
// // }
// // }
// // Py_XDECREF(type);
// // Py_XDECREF(value);
// // Py_XDECREF(traceback);
// // //PyErr_Print();
// // throw std::runtime_error("Python callback error");
// // }
// // std::cout << " 回调执行成功" << std::endl;
// // } catch (const py::error_already_set& e) {
// // std::cerr << "[PYTHON ERROR] ";
// // PyErr_Print(); // 自动打印到stderr
// // // 可选:获取更详细的错误信息
// // if (PyErr_Occurred()) {
// // PyObject *type, *value, *traceback;
// // PyErr_Fetch(&type, &value, &traceback);
// // std::cerr << "Details: "
// // << PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
// // PyErr_Restore(type, value, traceback);
// // }
// // Py_DECREF(pyCallback_.ptr());
// // } catch (...) {
// // std::cout << "[ERROR] 回调执行失败" << std::endl;
// // munmap(ptr, data_size);
// // close(fd);
// // shm_unlink(shm_name);
// // Py_DECREF(pyCallback_.ptr());
// // throw;
// // }
// // Py_DECREF(pyCallback_.ptr());
// //} else {
// // std::cout << "[7] 无回调函数设置" << std::endl;
// //}
// // 8. 释放资源
// std::cout << "[8] 释放共享内存资源..." << std::endl;
// munmap(ptr, data_size);
// close(fd);
// shm_unlink(shm_name);
// std::cout << "[9] 释放GIL..." << std::endl;
// //PyGILState_Release(gstate);
// std::cout << "=== 音频处理完成 ===" << std::endl;
//} catch (const std::exception& e) {
// std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
// //PyGILState_Release(gstate);
// std::cerr << "Audio process error: " << e.what() << std::endl;
//}
} }
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info) void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
@ -261,20 +537,51 @@ bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const
std::cout << "RTCContext::instance().registerListener() failed" << std::endl; std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false; return false;
} }
//namespace py = boost::python; namespace py = boost::python;
//namespace np = boost::python::numpy; namespace np = boost::python::numpy;
//Py_Initialize(); // 初始化 Python Py_Initialize(); // 初始化 Python
//np::initialize(); np::initialize();
return true; return true;
} }
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex) bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
{ {
isRecv_ = true; std::cout << "initRecv, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in initRecv" << std::endl;
} else {
std::cout << "numpyApi_ is not null in initRecv" << std::endl;
}
while (!isOnConsumer_)
{
std::cout << "wait for OnConsumer" << std::endl;
sleep(3);
}
/*
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, destRoomId,
srcUserId, destChannelIndex, this);
if (0 != ret1)
{
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return false;
}
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(destRoomId, srcUserId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, false, destChannelIndex);
if (0 != ret2)
{
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return false;
}
std::cout << "init recv succ" << std::endl;
*/
return true; return true;
} }
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum) bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex,
const uint8_t channelNum)
{ {
while (!isOnRoom_) while (!isOnRoom_)
{ {
@ -295,11 +602,11 @@ bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const i
} }
mrtc::MRTCAudioOption option; mrtc::MRTCAudioOption option;
option.channel = channelNum;
if (std::string(srcRoomId) != std::string(destRoomId)) { if (std::string(srcRoomId) != std::string(destRoomId)) {
strcpy(option.dstRoomId, destRoomId); strcpy(option.dstRoomId, destRoomId);
} }
option.channelIndex = destChannelIndex; option.channelIndex = destChannelIndex;
option.channel = channelNum;
std::cout << "startCustomAudio" << std::endl; std::cout << "startCustomAudio" << std::endl;
int16_t ret2 = rtcEngine_->startCustomAudio(option); int16_t ret2 = rtcEngine_->startCustomAudio(option);
if (ret2 != 0) if (ret2 != 0)
@ -310,16 +617,12 @@ bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const i
std::cout << "init send succ" << std::endl; std::cout << "init send succ" << std::endl;
return true; return true;
} }
bool RTCContext::initGIL() {
isGIL_ = true;
}
void RTCContext::destorySend(const int16_t selfChannelIndex) void RTCContext::destorySend(const int16_t selfChannelIndex)
{ {
rtcEngine_->stopCustomAudio(selfChannelIndex); rtcEngine_->stopCustomAudio(selfChannelIndex);
} }
int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, uint64_t dataLength)
uint64_t dataLength)
{ {
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
if (pData_) if (pData_)
@ -341,8 +644,7 @@ int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* custom
return -1; return -1;
} }
std::cout << "customData addr is:" << customData << std::endl; std::cout << "customData addr is:" << customData << std::endl;
return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, channelNum, dataLen);
channelNum, dataLen);
} }
mrtc::IMRTCEngine* RTCContext::getRtcEngine() const mrtc::IMRTCEngine* RTCContext::getRtcEngine() const
{ {
@ -368,3 +670,87 @@ void RTCContext::setNumpyApi(void **numpyApi) {
numpyApi_ = numpyApi; numpyApi_ = numpyApi;
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl; std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
} }
void RTCContext::setData(const mrtc::MRTCAudioFrame& frame) {
std::lock_guard<std::mutex> lock(dataMutex_);
if (dataSize_ == totalSize_) {
bottom_ = (bottom_ + 1) % totalSize_;
dataSize_--;
}
RetAudioFrame newFrame;
newFrame.dataCount = frame.dataCount;
newFrame.sampleRate = frame.sampleRate;
newFrame.numChannels = frame.numChannels;
newFrame.channelIndex = frame.channelIndex;
newFrame.data = std::make_unique<int16_t[]>(frame.dataCount);
std::memcpy(newFrame.data.get(), frame.data, frame.dataCount* sizeof(int16_t));
data_[head_] = std::move(newFrame);
head_ = (head_ + 1) % totalSize_;
dataSize_++;
}
RetAudioFrame RTCContext::getData() {
//std::lock_guard<std::mutex> lock(dataMutex_);
if (dataSize_ > 0) {
RetAudioFrame frame = std::move(data_[bottom_]); // 移动而非拷贝
bottom_ = (bottom_ + 1) % totalSize_;
dataSize_--;
return frame; // 返回值优化(RVO)会生效
}
return {}; // 返回空对象
}
namespace bp = boost::python;
namespace np = boost::python::numpy;
np::ndarray RTCContext::getNumpyData() {
std::cout << "step1" << std::endl;
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
std::cout << "step2" << std::endl;
int16_t* dataPtr = frame.data.get(); // 你的数据指针
std::cout << "step3" << std::endl;
size_t length = frame.dataCount; // 数据长度
std::cout << "step4" << std::endl;
PyGILState_STATE gstate = PyGILState_Ensure();
np::ndarray result = np::empty(bp::make_tuple(length), np::dtype::get_builtin<int16_t>());
try {
if (!dataPtr || length == 0) {
result = np::zeros(bp::make_tuple(0), np::dtype::get_builtin<int16_t>());
} else {
result = np::empty(bp::make_tuple(length), np::dtype::get_builtin<int16_t>());
std::memcpy(result.get_data(), dataPtr, length * sizeof(int16_t));
}
} catch (...) {
PyGILState_Release(gstate); // 异常时释放GIL
throw;
}
PyGILState_Release(gstate);
return result;
}
bp::list RTCContext::getListData() {
std::cout << "step1" << std::endl;
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
std::cout << "step2" << std::endl;
int16_t* dataPtr = frame.data.get(); // 你的数据指针
std::cout << "step3" << std::endl;
size_t length = frame.dataCount; // 数据长度
std::cout << "step4" << std::endl;
bp::list result;
if (dataPtr && length > 0) {
for (size_t i = 0; i < length; ++i) {
result.append(dataPtr[i]); // 逐个元素添加(值传递)
}
}
return result;
}
int16_t RTCContext::getDataCount() {
std::lock_guard<std::mutex> lock(dataMutex_);
RetAudioFrame frame = getData();
return frame.dataCount;
}
int16_t RTCContext::getSize() {
std::lock_guard<std::mutex> lock(dataMutex_);
return dataSize_;
}

View File

@ -31,10 +31,22 @@
// 必须声明外部变量(关键!) // 必须声明外部变量(关键!)
//#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION //#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
namespace bp = boost::python;
namespace fs = std::filesystem; namespace fs = std::filesystem;
namespace np = boost::python::numpy;
#define ENV_PRODUCT #define ENV_PRODUCT
//#define SEND_MODE //#define SEND_MODE
// 音频数据帧
struct RetAudioFrame final
{
std::unique_ptr<int16_t[]> data;
int dataCount = 0;
int sampleRate = 48000;
int numChannels = 1;
int channelIndex = 0;
};
class RTCContext : class RTCContext :
public RTCENGINE_NAMESPACE::IMRTCRoomCallBack, public RTCENGINE_NAMESPACE::IMRTCRoomCallBack,
public RTCENGINE_NAMESPACE::IMRTCConsumerCallBack, public RTCENGINE_NAMESPACE::IMRTCConsumerCallBack,
@ -84,8 +96,13 @@ public:
mrtc::IMRTCEngine* getRtcEngine() const; mrtc::IMRTCEngine* getRtcEngine() const;
bool init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId); bool init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId);
bool initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex); bool initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex);
bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum); bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, const uint8_t channelNum);
bool initGIL(); int16_t getSize();
void setData(const mrtc::MRTCAudioFrame& frame);
RetAudioFrame getData();
np::ndarray getNumpyData();
bp::list getListData();
int16_t getDataCount();
void* getpData() const; void* getpData() const;
void setpData(void* pData); void setpData(void* pData);
@ -100,17 +117,23 @@ public:
private: private:
RTCContext() RTCContext()
{ {
data_.resize(totalSize_);
} }
mutable std::mutex mutex_; mutable std::mutex mutex_;
mrtc::IMRTCEngine * rtcEngine_ = nullptr; mrtc::IMRTCEngine * rtcEngine_ = nullptr;
void* pData_ = nullptr; void* pData_ = nullptr;
bool isOnRoom_ = false; bool isOnRoom_ = false;
bool isRecv_ = false; bool isOnConsumer_ = false;
bool isJoinMultiRoom_ = false; bool isJoinMultiRoom_ = false;
bool isMultiRoom_ = false; bool isMultiRoom_ = false;
bool isGIL_ = false;
boost::python::object pyCallback_; boost::python::object pyCallback_;
void ** numpyApi_; void ** numpyApi_;
std::vector<RetAudioFrame> data_;
mutable std::mutex dataMutex_;
const int16_t totalSize_ = 100;
int16_t dataSize_ = 0;
int16_t bottom_ = 0;
int16_t head_= 0;
void onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo); void onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo);
void onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo); void onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo);
void onRender(const char* roomId, const char* peerId, void onRender(const char* roomId, const char* peerId,