@@ -35,13 +35,12 @@ describe('Plugin', () => {
35
35
36
36
describe ( 'langchain' , ( ) => {
37
37
withVersions ( 'langchain' , [ '@langchain/core' ] , version => {
38
- beforeEach ( ( ) => {
38
+ before ( ( ) => {
39
39
return agent . load ( 'langchain' )
40
40
} )
41
41
42
- afterEach ( ( ) => {
43
- // wiping in order to read new env vars for the config each time
44
- return agent . close ( { ritmReset : false , wipe : true } )
42
+ after ( ( ) => {
43
+ return agent . close ( { ritmReset : false } )
45
44
} )
46
45
47
46
beforeEach ( ( ) => {
@@ -65,84 +64,6 @@ describe('Plugin', () => {
65
64
nock . cleanAll ( )
66
65
} )
67
66
68
- describe ( 'with global configurations' , ( ) => {
69
- describe ( 'with sampling rate' , ( ) => {
70
- useEnv ( {
71
- DD_LANGCHAIN_SPAN_PROMPT_COMPLETION_SAMPLE_RATE : 0
72
- } )
73
-
74
- it ( 'does not tag prompt or completion' , async ( ) => {
75
- stubCall ( {
76
- ...openAiBaseCompletionInfo ,
77
- response : {
78
- model : 'gpt-3.5-turbo-instruct' ,
79
- choices : [ {
80
- text : 'The answer is 4' ,
81
- index : 0 ,
82
- logprobs : null ,
83
- finish_reason : 'length'
84
- } ] ,
85
- usage : { prompt_tokens : 8 , completion_tokens : 12 , otal_tokens : 20 }
86
- }
87
- } )
88
-
89
- const llm = new langchainOpenai . OpenAI ( { model : 'gpt-3.5-turbo-instruct' } )
90
- const checkTraces = agent
91
- . use ( traces => {
92
- expect ( traces [ 0 ] . length ) . to . equal ( 1 )
93
- const span = traces [ 0 ] [ 0 ]
94
-
95
- expect ( span . meta ) . to . not . have . property ( 'langchain.request.prompts.0.content' )
96
- expect ( span . meta ) . to . not . have . property ( 'langchain.response.completions.0.text' )
97
- } )
98
-
99
- const result = await llm . generate ( [ 'what is 2 + 2?' ] )
100
-
101
- expect ( result . generations [ 0 ] [ 0 ] . text ) . to . equal ( 'The answer is 4' )
102
-
103
- await checkTraces
104
- } )
105
- } )
106
-
107
- describe ( 'with span char limit' , ( ) => {
108
- useEnv ( {
109
- DD_LANGCHAIN_SPAN_CHAR_LIMIT : 5
110
- } )
111
-
112
- it ( 'truncates the prompt and completion' , async ( ) => {
113
- stubCall ( {
114
- ...openAiBaseCompletionInfo ,
115
- response : {
116
- model : 'gpt-3.5-turbo-instruct' ,
117
- choices : [ {
118
- text : 'The answer is 4' ,
119
- index : 0 ,
120
- logprobs : null ,
121
- finish_reason : 'length'
122
- } ] ,
123
- usage : { prompt_tokens : 8 , completion_tokens : 12 , otal_tokens : 20 }
124
- }
125
- } )
126
-
127
- const llm = new langchainOpenai . OpenAI ( { model : 'gpt-3.5-turbo-instruct' } )
128
- const checkTraces = agent
129
- . use ( traces => {
130
- expect ( traces [ 0 ] . length ) . to . equal ( 1 )
131
- const span = traces [ 0 ] [ 0 ]
132
-
133
- expect ( span . meta ) . to . have . property ( 'langchain.request.prompts.0.content' , 'what ...' )
134
- expect ( span . meta ) . to . have . property ( 'langchain.response.completions.0.text' , 'The a...' )
135
- } )
136
-
137
- const result = await llm . generate ( [ 'what is 2 + 2?' ] )
138
-
139
- expect ( result . generations [ 0 ] [ 0 ] . text ) . to . equal ( 'The answer is 4' )
140
-
141
- await checkTraces
142
- } )
143
- } )
144
- } )
145
-
146
67
describe ( 'llm' , ( ) => {
147
68
it ( 'does not tag output on error' , async ( ) => {
148
69
nock ( 'https://api.openai.com' ) . post ( '/v1/completions' ) . reply ( 403 )
0 commit comments